query
stringlengths 9
9.05k
| document
stringlengths 10
222k
| metadata
dict | negatives
listlengths 30
30
| negative_scores
listlengths 30
30
| document_score
stringlengths 4
10
| document_rank
stringclasses 2
values |
---|---|---|---|---|---|---|
queries SavedHomes using saved_home_id to get longitude
|
def saved_home_latitude(saved_home_id):
sql = "SELECT latitude FROM saved_homes WHERE saved_home_id = :saved_home_id"
cursor = db.session.execute(sql,{"saved_home_id": saved_home_id})
latitude = cursor.fetchone()
return latitude
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def saved_home_longitude(saved_home_id):\n\n sql = \"SELECT longitude FROM saved_homes WHERE saved_home_id = :saved_home_id\"\n\n cursor = db.session.execute(sql,{\"saved_home_id\": saved_home_id})\n\n longitude = cursor.fetchone()\n\n return longitude",
"def get_address(saved_home_id):\n\n sql = \"SELECT address FROM saved_homes WHERE saved_home_id= :saved_home_id\"\n\n cursor = db.session.execute(sql,{\"saved_home_id\": saved_home_id})\n\n old_address = cursor.fetchone()\n \n address = \" \".join(old_address)\n\n return address",
"def saved_businesses(saved_home_id):\n\n sql = \"SELECT bus_name FROM saved_businesses WHERE saved_home_id = :saved_home_id\"\n\n cursor = db.session.execute(sql,{\"saved_home_id\": saved_home_id})\n\n bus = cursor.fetchone()\n\n return bus",
"def read_long_lat_proxi():\n session = Session()\n # data est une liste de tuple\n long_lat_proxi_data = session.query(Prix_Median.longitude,\n Prix_Median.latitude,\n Prix_Median.ocean_proximity_str,\n Prix_Median.ocean_proximity).all()\n session.close()\n list_long_lat = DataFrame(long_lat_proxi_data)\n list_long_lat = list_long_lat.drop_duplicates()\n return list_long_lat",
"def get_location(geoname):\n\n DB_NAME = global_settings.DB_NAME_GEONAMES\n db_user = global_settings.POSTGRESQL_USERNAME\n db_password = global_settings.POSTGRESQL_PASSWORD\n db_host = global_settings.POSTGRESQL_HOST\n db_port = global_settings.POSTGRESQL_PORT\n\n sql = \"SELECT latitude, longitude FROM {} WHERE name like '{}'\".format(global_settings.TABLE_NAME_GEONAMES, geoname)\n\n resp = sqlExecute(DB_NAME, db_user, db_password, db_host, db_port, sql, True)\n\n if not resp['success']:\n return []\n\n lat_long = []\n\n for data in resp['data']:\n lat_long.append([data[0], data[1]])\n\n return lat_long",
"def set_home_position(self, lat, lon, alt):\n pass",
"def save_new_home(user_id, rm_property_id, longitude, latitude, address):\n home = SavedHomes(\n user_id = user_id,\n rm_property_id = rm_property_id,\n longitude=longitude,\n latitude=latitude,\n address=address,\n )\n \n db.session.add(home)\n db.session.commit()\n\n return home",
"def get_all_locations(self):",
"def get_all_locations():\n rs = run_query('''select * from zlrz_office_location''')\n return [] if rs is None else list(map(lambda t: Location(t[1], t[2], t[3], t[4], t[5], t[0]), rs))",
"def get_longitude(query):\n data = get_data(query)\n longitude = 2.3504873\n try:\n longitude = (\n data.get_data(query)[\"results\"][0][\"geometry\"][\"location\"][\"lng\"])\n except IndexError:\n longitude = 2.3504873\n finally:\n return longitude",
"def geo_locate(cursor: sqlite3.Cursor):\n cursor.execute('''DELETE FROM location_cache''') # Scrub previous results to start over\n\n geo_code = Nominatim(user_agent=\"capstone_project\")\n cursor.execute(\"\"\"SELECT location FROM combined_jobs\"\"\")\n jobs = cursor.fetchall() # Set to .fetchall once development is complete\n\n for location in jobs:\n try:\n full_loc = geo_code.geocode(location[0])\n print(location[0])\n cursor.execute(f\"\"\"INSERT INTO location_cache(location, latitude, longitude)\n VALUES (?,?,?)\"\"\", (location[0], full_loc.latitude, full_loc.longitude))\n except AttributeError:\n print(AttributeError)\n except sqlite3.IntegrityError:\n print(sqlite3.IntegrityError)",
"def getFeaturedLocation(guide):\n photos = guide.photos.all()\n\n x = 0\n y = 0\n z = 0\n\n size = 0\n\n for photo in photos:\n if photo.latitude:\n lat = radians(float(photo.latitude))\n lon = radians(float(photo.longitude))\n x += cos(lat) * cos(lon)\n y += cos(lat) * sin(lon)\n z += sin(lat)\n size+=1\n\n if size is 0:\n return None\n\n x = float(x / size)\n y = float(y / size)\n z = float(z / size)\n\n return {\n 'latitude': degrees(atan2(z, sqrt(x * x + y * y))),\n 'longitude': degrees(atan2(y, x))\n }\n # return atan2(z, sqrt(x * x + y * y)), atan2(y, x)\n\n\n\n # for photo in photos:\n # if photo.latitude:\n # return {\n # 'latitude': photo.latitude,\n # 'longitude': photo.longitude\n # }\n\n # return None",
"def get_home(self):\n return self.complete_address[6]",
"def get_hikedetails_by_userloc(k):\n \n npbyuserloc = Hike.query.filter(Hike.area_name == k).all()\n\n return npbyuserloc",
"def Fetch_station(long, lat, y):\r\n global ddf\r\n dmin = 1000000\r\n rs = 0\r\n i=0\r\n for i in range(len(ddf[y])):\r\n #Calculate the distance between zip code location and weather station location\r\n dnew = Distance_orthonormique(ddf[y]['LON'][i], ddf[y]['LAT'][i], long, lat)\r\n\r\n if(dmin > dnew):\r\n #If the last smaller distance is superior than the current distance :\r\n #the new smaller distance is the current distance\r\n dmin = dnew\r\n rs = i\r\n\r\n #rs = index dataframe weather station\r\n #ddf[y]['STATION NAME'][rs] = Weather station name\r\n #round(dmin, 2) = Distance between weather station and zip code\r\n \r\n return rs, ddf[y]['STATION NAME'][rs], round(dmin,2)",
"def lat_lng_approximation(homes, destination, lat_lng_dest, average_speed):\n\n for home in homes:\n # Stores the lat and lng points for the home\n lat_lng_home = (home.home.latitude, home.home.longitude)\n\n # Returns the distance from the two lat lng points in miles\n distance = geopy.distance.geodesic(lat_lng_home, lat_lng_dest).miles\n\n # If the distance is less than a mile then don't add any distance since it is already so close\n if distance > 1:\n # Extra distance is determined by giving more distance to homes farther away\n extra_distance = EXTRA_DISTANCE_LAT_LNG_APPROX * (1 - 1.0/distance)\n # This normalizes the value since walking needs less of a weight than biking since homes\n # are more direct when walking.\n distance += extra_distance * average_speed/AVERAGE_BICYCLING_SPEED\n if average_speed is not 0:\n # If the speed is not zero (to prevent divide by zero, then add the commute time to\n # the home\n commute_time_hours = distance / average_speed\n commute_time = commute_time_hours * 60\n home.approx_commute_times[destination] = commute_time\n else:\n # If there was a divide by zero then just eliminate the home\n home.eliminate_home()",
"def location_search(self, lat: float, lng: float) -> List[Location]:\n params = {\n \"latitude\": lat,\n \"longitude\": lng,\n # rankToken=c544eea5-726b-4091-a916-a71a35a76474 - self.uuid?\n # fb_access_token=EAABwzLixnjYBABK2YBFkT...pKrjju4cijEGYtcbIyCSJ0j4ZD\n }\n result = self.private_request(\"location_search/\", params=params)\n locations = []\n for venue in result[\"venues\"]:\n if \"lat\" not in venue:\n venue[\"lat\"] = lat\n venue[\"lng\"] = lng\n locations.append(extract_location(venue))\n return locations",
"def search_geoloc_range(request):\n\n distance = float(request.POST['distance'])\n\n latlng = (request.POST['latlng']).replace(\"(\",'').replace(\")\",'').split(', ')\n latitude = float(latlng[0])\n longitude = float(latlng[1])\n print distance\n print latitude\n print longitude\n\n # count range of nowa latlng\n radius_lat = (distance/(69.172)) #count latitude range\n min_lat = latitude - radius_lat\n max_lat = latitude + radius_lat\n print min_lat\n print max_lat\n\n radius_lng = (math.fabs(distance/(math.cos(longitude) * 69.172))) #count longitude range\n min_lng = longitude - radius_lng\n max_lng = longitude + radius_lng\n print min_lng\n print max_lng\n\n # if sys.version_info < (2, 7):\n # min_lat = decimal.Decimal(str(min_lat))\n # max_lat = decimal.Decimal(str(max_lat))\n # min_lng = decimal.Decimal(str(min_lng))\n # max_lng = decimal.Decimal(str(max_lng))\n\n # query db to match the range of dentist work place in db\n total = WorkPlace.objects.filter(latitude__gte=min_lat, latitude__lte=max_lat,\n longitude__gte=min_lng, longitude__lte=max_lng).count()\n\n result = []\n\n # step for how many lines separate per page. then count nowa page's start line no. and end line no.\n if 'page' in request.POST:\n page = request.POST['page']\n else:\n page = 1\n\n step = 10\n end = step * int(page)\n start = step * (int(page)-1)\n is_end = False\n\n if (end - total) < step:\n is_end = False\n WorkPlaceDict = WorkPlace.objects.filter(latitude__gte=min_lat, latitude__lte=max_lat,\n longitude__gte=min_lng, longitude__lte=max_lng).order_by('id')[start:end]\n\n for i in WorkPlaceDict:\n\n dentist_profile = i.dentistid\n did = dentist_profile.user.user.id\n\n latitude = str(i.latitude)\n longitude = str(i.longitude)\n latlng = \"(\"+latitude+\", \"+longitude+\")\"\n\n counts = _relation_counts(request,did,request.user.id)\n\n i_wrap = {\n \"clinic\": i.clinic_name,\n \"work_location\": i.location,\n \"latlng\": latlng,\n \"business_hour\": str(i.business_hour),\n \"dentistid\": did,\n \"dentistname\": _show_obj_name(did),\n \"summary\": dentist_profile.user.summary,\n \"avatar\": settings.MEDIA_URL + str(dentist_profile.user.imagesmall),\n \"patient_count\": counts[\"patient_count\"],\n \"follower_count\": counts[\"follower_count\"],\n \"status\": counts[\"status\"],\n \"is_end\": is_end\n }\n\n result.append(i_wrap)\n\n else:\n is_end = True\n i_wrap = {\n \"is_end\": is_end\n }\n\n result.append(i_wrap)\n\n template_var = {\n \"searchresult\": result\n }\n\n return JsonResponse(template_var)",
"def shot_lon_lat(self) -> list[tuple[float, float]]:\n if self._shot_lon_lat is None:\n if self.parent_granule.product == \"GEDI_L2A\":\n self._shot_lon_lat = list(\n zip(self[\"lon_lowestmode\"], self[\"lat_lowestmode\"])\n )\n elif self.parent_granule.product == \"GEDI_L1B\":\n self._shot_lon_lat = list(\n zip(\n self[\"geolocation/longitude_lastbin\"],\n self[\"geolocation/latitude_lastbin\"],\n )\n )\n else:\n raise NotImplementedError(\n \"No method to get main data for \"\n f\"product {self.parent_granule.product}\"\n )\n return self._shot_lon_lat",
"def test_query_google(self):\n google_api = LocationData()\n latLng = google_api.getLatLong(test_address)\n self.assertEqual(latLng['lat'], 32.625849)",
"def find_5near_hospitals(lon, lat):\r\n engine = get_sql_engine()\r\n hospital5 = text(\r\n \"\"\"\r\n SELECT\r\n \"HOSPITAL_NAME\" AS name, \"STREET_ADDRESS\" as address,\r\n \"PHONE_NUMBER\" as contact, geom,\r\n\t ST_X(geom) AS lon, ST_Y(geom) AS lat,\r\n\t ST_Distance(ST_SetSRID(ST_MakePoint(:lon, :lat), 4326)::geography, geom::geography) AS distance\r\n FROM philly_hospital\r\n ORDER BY 7 ASC\r\n LIMIT 5\r\n \"\"\"\r\n )\r\n near_hospital = gpd.read_postgis(hospital5, con=engine, params={\"lon\": lon, \"lat\": lat})\r\n return near_hospital",
"def get_map_locs(self, CalSwimView):\n # Initialize query list\n query_build = []\n \n if (CalSwimView.lat and CalSwimView.lng): \n # Search query has a specified location thus check against intersection of points and polygons in database\n self.cursor.execute(\"SET @center = GeomFromText('POINT(%s %s)');\",(float(CalSwimView.lat), float(CalSwimView.lng)))\n self.cursor.execute(\"SET @radius = %s;\",(CalSwimView.radius))\n self.cursor.execute(\"\"\"\n SET @bbox = CONCAT('POLYGON((',\n X(@center) - @radius, ' ', Y(@center) - @radius, ',',\n X(@center) + @radius, ' ', Y(@center) - @radius, ',',\n X(@center) + @radius, ' ', Y(@center) + @radius, ',',\n X(@center) - @radius, ' ', Y(@center) + @radius, ',',\n X(@center) - @radius, ' ', Y(@center) - @radius, '))'\n );\n \"\"\")\n query_build.append(\"\"\"\n SELECT gd_id, organization, project_name_short, project_name, project_description, data_type, data_target, AsText(location)\n FROM GeoData\n WHERE Intersects( location, GeomFromText(@bbox) )\n AND\n CASE geometrytype(location)\n WHEN 'POINT' THEN\n SQRT(POW( ABS( X(location) - X(@center)), 2) + POW( ABS(Y(location) - Y(@center)), 2 )) < @radius\n ELSE\n TRUE\n END\n \"\"\")\n # Search query has at least 1 keyword\n if len(CalSwimView.keywords) > 0:\n # Just a few MySQL notes:\n # Default MySQL operation executes an \"OR\" search among terms\n # To make sure all terms are in a given result, \"AND\" search among terms, then just add prefix \"+\" before each term\n # To exclude results with a given term, just add prefix \"-\" before the term\n keyword_query = \"*, \".join(CalSwimView.keywords) +\"*\" \n query_build.append(\"\"\" \n AND\n MATCH (organization, contact, project_name, project_description, project_funder, data_target, location_description, data_collector, data_type, keyword, other)\n AGAINST ('%(KeywordQuery)s' IN BOOLEAN MODE)\n \"\"\" % {\"KeywordQuery\":keyword_query})\n else:\n # Search query does not have a specified location\n query_build.append(\"\"\"\n SELECT gd_id, organization, project_name_short, project_name, project_description, data_type, data_target, AsText(location)\n FROM GeoData\n \"\"\")\n # Search query has at least 1 keyword\n if len(CalSwimView.keywords) > 0:\n # Just a few MySQL notes:\n # Default MySQL operation executes an \"OR\" search among terms\n # To make sure all terms are in a given result, \"AND\" search among terms, then just add prefix \"+\" before each term\n # To exclude results with a given term, just add prefix \"-\" before the term\n keyword_query = \"*, \".join(CalSwimView.keywords) +\"*\" \n query_build.append(\"\"\" \n WHERE\n MATCH (organization, contact, project_name, project_description, project_funder, data_target, location_description, data_collector, data_type, keyword, other)\n AGAINST ('%(KeywordQuery)s' IN BOOLEAN MODE)\n \"\"\" % {\"KeywordQuery\":keyword_query})\n select_query = \"\\n\".join(query_build)\n #print >> CalSwimView.errors, select_query\n \n # execute SQL query using execute() method.\n self.cursor.execute(select_query)\n\n # Fetch a single row using fetchone() method.\n rows = [] \n table_data = {}\n coordinates = []\n while(1):\n row=self.cursor.fetchone()\n if row == None:\n break \n coordinates.append( str(row[7]).replace('POINT(','').replace('POLYGON((','').replace(')','') )\n rows.append( {\"c\":[{\"v\":row[0]}, {\"v\":row[1]}, {\"v\":row[2]}, {\"v\":row[3]}, {\"v\":row[4]}, {\"v\":row[5]}, {\"v\":row[6]}]} )\n \n # Return search values as json\n cols = [{\"id\":'gd_id', \"label\":'gd_id', \"type\":'string'},\n {\"id\":'organization', \"label\":'Organization', \"type\":'string'},\n {\"id\":'project_short', \"label\":'Project Short', \"type\":'string'},\n {\"id\":'project', \"label\":'Project', \"type\":'string'},\n {\"id\":'description', \"label\":'Description', \"type\":'string'}, \n {\"id\":'target', \"label\":'Target', \"type\":'string'}]\n table_data[\"cols\"] = cols\n table_data[\"rows\"] = rows\n # Assign table data to json table data container\n json_data = {}\n json_data[\"table_data\"] = table_data\n json_data[\"coordinates\"] = coordinates\n \n # Close DB connections \n self.cursor.close()\n \n # Return results\n return json.dumps(json_data)",
"def lat_lons(self):",
"def _update_home_information(self, homes):\n\n if homes is not None and len(homes) > 0:\n self._home = homes[0]\n self.has_home = True\n self._update_horizon(max(abs(self._home[0]), abs(self._home[1])))\n if self.experimental_home is None:\n self.experimental_home = self._home\n else:\n if self.experimental_home not in self.last_scan['Home']:\n print self, self.experimental_home, \"is not in\", self.last_scan['Home']\n self.experimental_home = self._home\n else:\n self._home = self.experimental_home # Try some reckoning\n\n return",
"def location(bot, update):\n\n bot.send_message(chat_id=update.message.chat_id, text=\"OK you wait ah...\")\n latitude = update.message.location.latitude\n longitude = update.message.location.longitude\n bot.send_message(chat_id=update.message.chat_id, text=\"Just let you know for fun lol - your latitude is {0}, and your longitude is {1}\".format(latitude,longitude))\n try:\n # Read carpark csv as dataframe\n df = pd.read_csv('Parking_withcoords.csv')\n \n # Calculate distance between each carpark and postal code and append it to dataframe\n distance = []\n for coord in df['Coord_rad']: \n carpark = haversine((radians(latitude),radians(longitude)), ast.literal_eval(coord)) #converts string to tuple\n distance.append(carpark)\n df['Distance_km'] = distance\n\n # Sort in ascending order and extract top 5\n top_five = df.sort_values('Distance_km').head(5)\n\n for row in top_five['Info']:\n bot.send_message(chat_id=update.message.chat_id, parse_mode='HTML', text=row.replace(\"\\$\", \"$\"))\n\n bot.send_message(chat_id=update.message.chat_id, text=\"Fast hor! If you want to check other places, type /start again ok :P\")\n except:\n bot.send_message(chat_id=update.message.chat_id, text=\"Jialat liao got error...try again with /start and then use the postal code method can? Paiseh!\")",
"def locate(self):\n \n #CONNECT TO API\n api = GoogleV3(api_key = self.google_key)\n\n #INITALIZE ARRAY\n array = []\n\n #START GEOCODING ADDRESSES\n for i in tqdm(range(len(self.df)), desc='Geocoding Addresses'):\n\n \n row = self.df.iloc[i]\n\n #GET ADDRESS VARIABLES\n st_name = row['street_name']\n st_number = row['house_number']\n city = row['city']\n state = row['state/province']\n listing_number = row['listing_number']\n zip = row['postal_code']\n\n\n #FORMAT ADDRESS FOR API\n full_address = str(\"{} {},{},{},{}\".format(st_number, st_name, city, state, zip))\n\n #TRY TO LOCATE WITH GOOGLE\n try:\n \n location = api.geocode(full_address, timeout=10)\n\n lat = location.latitude\n lon = location.longitude\n \n\n info = [lat,lon, listing_number]\n\n array.append(info)\n\n next \n\n #Go to next if you cant locate\n except:\n\n info = [0,0, listing_number]\n\n array.append(info)\n\n next\n\n #CONVERT SERIES TO DATAFRAME\n geo_data = pd.DataFrame(data = array, columns = ['lat', 'lon', 'listing_number'])\n \n #INNER JOIN DATA TO DATAFRAME\n self.df = pd.merge(self.df, geo_data, on= 'listing_number', how = 'inner')",
"def geo(self):\n return self.query.geo",
"def get_location_by_id(self, location_id):",
"def set_home_locations(self):\n self.swarmie.set_home_gps_location(self.swarmie.get_gps_location())\n\n current_location = self.swarmie.get_odom_location()\n current_pose = current_location.get_pose()\n home_odom = Location(current_location.Odometry)\n\n detections = self.swarmie.get_latest_targets().detections\n try:\n for detection in detections:\n if detection.id == 256:\n see_home_tag = True\n home_detection = self._transform_to_odom(detection)\n\n quat = [home_detection.pose.orientation.x,\n home_detection.pose.orientation.y,\n home_detection.pose.orientation.z,\n home_detection.pose.orientation.w]\n _r, _p, yaw = tf.transformations.euler_from_quaternion(\n quat\n )\n yaw += math.pi / 2\n\n home_odom.Odometry.pose.pose.position.x = float(\n home_detection.pose.position.x + 0.5 * math.cos(yaw)\n )\n home_odom.Odometry.pose.pose.position.y = float(\n home_detection.pose.position.y + 0.5 * math.sin(yaw)\n )\n self.swarmie.set_home_odom_location(home_odom)\n return\n\n except tf.Exception:\n pass # use backup below\n\n # project home_odom location 50cm in front of rover's current location\n home_odom.Odometry.pose.pose.position.x = (\n current_pose.x + 0.5 * math.cos(current_pose.theta)\n )\n home_odom.Odometry.pose.pose.position.y = (\n current_pose.y + 0.5 * math.sin(current_pose.theta)\n )\n self.swarmie.set_home_odom_location(home_odom)\n return",
"def test_fetchlocation(self):\n result = export.processExport(houseId=1,\n locationIds = [1,],\n )\n\n self.assertEqual(result.shape, (2880, 1))\n self.assertEqual(result.columns[0], LOC1)\n\n result = export.processExport(houseId=1,\n locationIds = [2,],\n )\n\n self.assertEqual(result.shape, (2880, 1))\n self.assertEqual(result.columns[0], LOC2)"
] |
[
"0.79902",
"0.6118132",
"0.57440174",
"0.5707932",
"0.5707018",
"0.5700337",
"0.5519437",
"0.55093503",
"0.5499253",
"0.5488462",
"0.5446763",
"0.5413726",
"0.54134554",
"0.5385816",
"0.5265151",
"0.5264347",
"0.5260805",
"0.5260011",
"0.5238127",
"0.5235772",
"0.5224815",
"0.52082443",
"0.52048033",
"0.5200095",
"0.5195372",
"0.5175691",
"0.5159055",
"0.515186",
"0.5150475",
"0.51146257"
] |
0.76028997
|
1
|
Shows businesses near saved home longtitude and latitude
|
def list_businesses(property_id):
#need to query for longitude and latitude
saved_home_id = property_id
#SQL query to get longitude using saved_home_id
longitude = saved_home_longitude(saved_home_id)
#SQL query to get latitude using saved_home_id
latitude = saved_home_latitude(saved_home_id)
API_HOST = 'https://api.yelp.com'
SEARCH_PATH = '/v3/businesses/search'
BUSINESS_PATH = '/v3/businesses/'
# TODO furture stuff save things we got back to the db and filter searches we already have data for
headers = {
'Authorization': 'Bearer %s' % YELP_API
}
url = API_HOST + SEARCH_PATH
params = {'longitude': longitude, 'latitude': latitude}
business_search_url = API_HOST + SEARCH_PATH
req=requests.get(business_search_url, params=params,headers=headers)
data = json.loads(req.content)
return data
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def find_near_location():\n return render_template(\"location.html\", latitude=None, longitude=None,\n list_stops=None)",
"def station_viewer():\r\n name = request.args[\"address\"]\r\n stations = get_zipcode_stations(name)\r\n\r\n if len(stations) > 0:\r\n stations['coordinate'] = 'end_point='+stations['name'].astype(str)+'&'+'end_lng=' + stations['lon'].astype(str)+'&'+'end_lat='+stations['lat'].astype(str)\r\n\r\n #genetrate folium map\r\n station_coordinates = stations[[\"lat\", \"lon\"]].values.tolist()\r\n\r\n map=make_folium_map(station_coordinates)\r\n\r\n\r\n # generate interactive map\r\n\r\n return render_template(\r\n \"page3.html\",\r\n num_stations=get_num_stations(name),\r\n address=name,\r\n stations=stations[[\"name\", \"address\", \"available_bikes\", 'coordinate']].values,\r\n map=map._repr_html_()\r\n )\r\n\r\n else:\r\n lng=get_address(name)[1]\r\n lat=get_address(name)[0]\r\n near_bike = find_5near_stations(lng, lat)\r\n near_bike['coordinate'] = 'end_point='+near_bike['name'].astype(str)+'&'+'end_lng=' + near_bike['lon'].astype(str)+'&'+'end_lat='+near_bike['lat'].astype(str)\r\n\r\n return render_template(\r\n \"page3_1b_nobike.html\",\r\n address=name,\r\n near_bike_table=near_bike[[\"name\", \"address\", \"available_bikes\", \"coordinate\", \"distance\"]].values)",
"def get_all_locations(self):",
"def listNatura2000Locations(self):\n vocab = (\n ('inside', 'location_inside'),\n ('near', 'location_near'),\n )\n return DisplayList(vocab)",
"def lat_lons(self):",
"def search(self):\n return self.key.geocode(self.cleanplace)",
"def show_places():\n t0 = time.time()\n print(f\"--- {request}\")\n print(f\"--- {user_session}\")\n # Set context by owner and the data selections\n u_context = UserContext(user_session, current_user, request)\n # Which range of data is shown\n u_context.set_scope_from_request(request, \"place_scope\")\n u_context.count = request.args.get(\"c\", 50, type=int)\n\n with PlaceReader(\"read\", u_context) as service:\n # reader = PlaceReader(readservice, u_context)\n # The 'items' list has Place objects, which include also the lists of\n # nearest upper and lower Places as place[i].upper[] and place[i].lower[]\n res = service.get_place_list()\n\n if res[\"status\"] == Status.NOT_FOUND:\n print(f'bp.scene.routes.show_places: {_(\"No places found\")}')\n elif res[\"status\"] != Status.OK:\n print(\n f'bp.scene.routes.show_places: {_(\"Could not get places\")}: {res.get(\"statustext\")}'\n )\n\n elapsed = time.time() - t0\n stk_logger(\n u_context,\n f\"-> bp.scene.routes.show_places n={len(res.get('items'))} e={elapsed:.3f}\",\n )\n return render_template(\n \"/scene/places.html\",\n places=res[\"items\"],\n menuno=4,\n user_context=u_context,\n elapsed=elapsed,\n )",
"def geocoded(self):\n return self.get_queryset().filter(latitude__isnull=False,\n longitude__isnull=False)",
"def list_near_location(coords):\n latitude, longitude = coords\n # Quick check to ensure coordinates are within range of Great Britain\n if not location.check_bounds(latitude, longitude):\n raise NotFound(\"Latitude and longitude coordinates are too far from \"\n \"Great Britain.\")\n\n stops = models.StopPoint.in_range(latitude, longitude,\n db.undefer(models.StopPoint.lines))\n groups = _group_lines_stops(stops)\n\n return render_template(\"location.html\", latitude=latitude,\n longitude=longitude, list_stops=stops, groups=groups)",
"def set_coordinates(self):\n client = Client(api_key=settings.YANDEX_GEOCODER_KEY)\n address = f'Россия, {self.state}, {self.city}, {self.street}'\n self.longitude, self.latitude = client.coordinates(address)",
"def geolocate_address(self):\n self.geolocator = Nominatim(user_agent=\"fundaft\")\n\n # If latitude / longitude are missing, try to geocode them on the basis\n # of the address \n self.coords = [self.get_coords(address) if np.isnan(lat)\n else (lat, lon) for address, lat, lon in\n zip(self.df_ads['property_title'], \n self.df_ads['latitude'], \n self.df_ads['longitude'])]\n \n df = pd.DataFrame(self.coords, columns=['latitude', 'longitude'])\n \n # If new coordinates are not in Dublin, change to na again\n df = self.is_in_dublin(df)\n\n self.df_ads[[\"latitude\",\"longitude\"]] = df",
"def get_station_boroughs(self):\\",
"def show_results():\n print 'Distancia total: ', total_distance\n print 'Ruta: ', visited_cities",
"def __init__(self, lat, lng):\n self.summary = \"Lorem ipsum\"",
"def get_bus_location():\n\n transit_data = cache.get(\"transit_data\")\n if transit_data:\n return transit_data\n \n routes = get_routes()\n\n live_data_url = \"http://gtfs.edmonton.ca/TMGTFSRealTimeWebService/Vehicle/VehiclePositions.pb\"\n feed = gtfs_realtime_pb2.FeedMessage()\n live_data_response = requests.get(live_data_url)\n feed.ParseFromString(live_data_response.content)\n\n transit_data = {} # contains the informtion for all the busses\n for entity in feed.entity:\n if entity.HasField(\"vehicle\"):\n trip_number = entity.vehicle.trip.trip_id\n # sometimes the live bus trip ids don't show up in the routes database\n if trip_number not in routes:\n continue\n\n bus_number, bus_title = routes[trip_number]\n vehicle = entity.vehicle.vehicle.id\n \n # the higher one seems to be correct - sometimes the vehicle id is too short\n if entity.vehicle.vehicle.label:\n vehicle = max(int(entity.vehicle.vehicle.id),\n int(entity.vehicle.vehicle.label))\n latitude = entity.vehicle.position.latitude\n longitude = entity.vehicle.position.longitude\n bearing = entity.vehicle.position.bearing\n\n vehicle_data = {\"bus_number\": bus_number,\n \"bus_title\": bus_title,\n \"latitude\": latitude,\n \"longitude\": longitude,\n \"bearing\": bearing,\n \"trip_id\": trip_number}\n\n transit_data[vehicle] = vehicle_data\n \n cache.set(\"transit_data\", transit_data, timeout=15)\n return(transit_data)",
"def main():\n #get_lat_long\n place_name = 'Arlington - Arlington St'\n # sec_fun = get_lat_long(place_name)\n # print(sec_fun)\n # get_nearest_station(sec_fun[0], sec_fun[1]) #\n # get_nearest_station(42.350009, -71.076077)\n print(find_stop_near(place_name))",
"def browse(self, lat, lon):\n places = self.filter(active=True).order_by('-id')[:10]\n items = []\n for item in places:\n item.distance = item.compute_distance(lat, lon)\n item.orientation = self.orientation(int(item.compute_orientation(lat,lon)))\n items.append(item)\n return items",
"def locate(self):\n \n #CONNECT TO API\n api = GoogleV3(api_key = self.google_key)\n\n #INITALIZE ARRAY\n array = []\n\n #START GEOCODING ADDRESSES\n for i in tqdm(range(len(self.df)), desc='Geocoding Addresses'):\n\n \n row = self.df.iloc[i]\n\n #GET ADDRESS VARIABLES\n st_name = row['street_name']\n st_number = row['house_number']\n city = row['city']\n state = row['state/province']\n listing_number = row['listing_number']\n zip = row['postal_code']\n\n\n #FORMAT ADDRESS FOR API\n full_address = str(\"{} {},{},{},{}\".format(st_number, st_name, city, state, zip))\n\n #TRY TO LOCATE WITH GOOGLE\n try:\n \n location = api.geocode(full_address, timeout=10)\n\n lat = location.latitude\n lon = location.longitude\n \n\n info = [lat,lon, listing_number]\n\n array.append(info)\n\n next \n\n #Go to next if you cant locate\n except:\n\n info = [0,0, listing_number]\n\n array.append(info)\n\n next\n\n #CONVERT SERIES TO DATAFRAME\n geo_data = pd.DataFrame(data = array, columns = ['lat', 'lon', 'listing_number'])\n \n #INNER JOIN DATA TO DATAFRAME\n self.df = pd.merge(self.df, geo_data, on= 'listing_number', how = 'inner')",
"def geolocate(place): # string\n geolocator = geopy.geocoders.Nominatim()\n location = geolocator.geocode(place)\n # i dati si danno in (latitudine, longitudine), ma vanno intesi come (y, x)\n # ovvero vanno visualizzati come x=longitudine, y=latitudine\n return (location.latitude, location.longitude) # coordinate",
"def locationByCoordinate(latitude, longitude) :\n geoLoc = Nominatim(user_agent=\"GetLoc\")\n coordinateString = f\"{latitude}, {longitude}\"\n locationCoordinates = geoLoc.reverse(coordinateString)\n return locationCoordinates.address",
"def get_nearby_location(request):\n latitude, longitude = latlang(request)\n point = Point(float(longitude), float(latitude), srid=4326)\n locations = Location.objects.filter(point__distance_lte=(point, D(km=100)))\n return JsonResponse(json.dumps([serializer(location) for location in locations]), safe=False)",
"def search_geoloc_range(request):\n\n distance = float(request.POST['distance'])\n\n latlng = (request.POST['latlng']).replace(\"(\",'').replace(\")\",'').split(', ')\n latitude = float(latlng[0])\n longitude = float(latlng[1])\n print distance\n print latitude\n print longitude\n\n # count range of nowa latlng\n radius_lat = (distance/(69.172)) #count latitude range\n min_lat = latitude - radius_lat\n max_lat = latitude + radius_lat\n print min_lat\n print max_lat\n\n radius_lng = (math.fabs(distance/(math.cos(longitude) * 69.172))) #count longitude range\n min_lng = longitude - radius_lng\n max_lng = longitude + radius_lng\n print min_lng\n print max_lng\n\n # if sys.version_info < (2, 7):\n # min_lat = decimal.Decimal(str(min_lat))\n # max_lat = decimal.Decimal(str(max_lat))\n # min_lng = decimal.Decimal(str(min_lng))\n # max_lng = decimal.Decimal(str(max_lng))\n\n # query db to match the range of dentist work place in db\n total = WorkPlace.objects.filter(latitude__gte=min_lat, latitude__lte=max_lat,\n longitude__gte=min_lng, longitude__lte=max_lng).count()\n\n result = []\n\n # step for how many lines separate per page. then count nowa page's start line no. and end line no.\n if 'page' in request.POST:\n page = request.POST['page']\n else:\n page = 1\n\n step = 10\n end = step * int(page)\n start = step * (int(page)-1)\n is_end = False\n\n if (end - total) < step:\n is_end = False\n WorkPlaceDict = WorkPlace.objects.filter(latitude__gte=min_lat, latitude__lte=max_lat,\n longitude__gte=min_lng, longitude__lte=max_lng).order_by('id')[start:end]\n\n for i in WorkPlaceDict:\n\n dentist_profile = i.dentistid\n did = dentist_profile.user.user.id\n\n latitude = str(i.latitude)\n longitude = str(i.longitude)\n latlng = \"(\"+latitude+\", \"+longitude+\")\"\n\n counts = _relation_counts(request,did,request.user.id)\n\n i_wrap = {\n \"clinic\": i.clinic_name,\n \"work_location\": i.location,\n \"latlng\": latlng,\n \"business_hour\": str(i.business_hour),\n \"dentistid\": did,\n \"dentistname\": _show_obj_name(did),\n \"summary\": dentist_profile.user.summary,\n \"avatar\": settings.MEDIA_URL + str(dentist_profile.user.imagesmall),\n \"patient_count\": counts[\"patient_count\"],\n \"follower_count\": counts[\"follower_count\"],\n \"status\": counts[\"status\"],\n \"is_end\": is_end\n }\n\n result.append(i_wrap)\n\n else:\n is_end = True\n i_wrap = {\n \"is_end\": is_end\n }\n\n result.append(i_wrap)\n\n template_var = {\n \"searchresult\": result\n }\n\n return JsonResponse(template_var)",
"def net_xy(street):\r\n\r\n # api-endpoint\r\n URL = \"https://ags.govmap.gov.il/Search/FreeSearch\"\r\n # headers\r\n headers = {\"Content-Type\": \"application/json\", \"charset\": \"utf-8\"}\r\n # location given here\r\n try:\r\n p = \"{\\\"keyword\\\": \\\"\" + street + \"\\\",\\\"LstResult\\\": null}\"\r\n PARAMS = p.encode(\"utf-8\")\r\n\r\n # sending get request and saving the response as response object\r\n r = requests.post(url=URL, data=PARAMS, headers=headers)\r\n\r\n # extracting data in json format\r\n data = r.json()\r\n\r\n # extracting latitude, longitude and formatted address\r\n # of the first matching location\r\n\r\n X = data['data']['Result'][0]['X']\r\n Y = data['data']['Result'][0]['Y']\r\n except Exception as e:\r\n print(e)\r\n # print('exception ddamammnnnnn')\r\n print(street)\r\n return 0,0\r\n return X,Y",
"def location(bot, update):\n\n bot.send_message(chat_id=update.message.chat_id, text=\"OK you wait ah...\")\n latitude = update.message.location.latitude\n longitude = update.message.location.longitude\n bot.send_message(chat_id=update.message.chat_id, text=\"Just let you know for fun lol - your latitude is {0}, and your longitude is {1}\".format(latitude,longitude))\n try:\n # Read carpark csv as dataframe\n df = pd.read_csv('Parking_withcoords.csv')\n \n # Calculate distance between each carpark and postal code and append it to dataframe\n distance = []\n for coord in df['Coord_rad']: \n carpark = haversine((radians(latitude),radians(longitude)), ast.literal_eval(coord)) #converts string to tuple\n distance.append(carpark)\n df['Distance_km'] = distance\n\n # Sort in ascending order and extract top 5\n top_five = df.sort_values('Distance_km').head(5)\n\n for row in top_five['Info']:\n bot.send_message(chat_id=update.message.chat_id, parse_mode='HTML', text=row.replace(\"\\$\", \"$\"))\n\n bot.send_message(chat_id=update.message.chat_id, text=\"Fast hor! If you want to check other places, type /start again ok :P\")\n except:\n bot.send_message(chat_id=update.message.chat_id, text=\"Jialat liao got error...try again with /start and then use the postal code method can? Paiseh!\")",
"def describe_locations():\n pass",
"def saved_businesses(saved_home_id):\n\n sql = \"SELECT bus_name FROM saved_businesses WHERE saved_home_id = :saved_home_id\"\n\n cursor = db.session.execute(sql,{\"saved_home_id\": saved_home_id})\n\n bus = cursor.fetchone()\n\n return bus",
"def short_location(self):\n bc = self.barcamp\n location = AttributeMapper(bc.location)\n if location.name and location.city:\n return \"%s, %s\" %(location.name, location.city)\n else:\n return self.handler._(\"location to be announced\")",
"def query_api(location):\n #bearer_token = obtain_bearer_token(API_HOST, TOKEN_PATH)\n bearer_token ='SHdrjUqMJXqXBKUc7bGIplM8y6tnbwZbXXDbWPCd9wWMP8tX9PdJrC5MZHwJRhb7jMtLjXxT-hsWjNf2OkdiDWd30HsS84AVI5iRnrpxkak3HbWXAdUKvraQ_wgXWXYx'\n response = transaction_search(bearer_token,location)\n response = response.get('businesses')\n return response",
"def weatherstation_nearest_command(latitude, longitude):\n print(stations.get_nearest_station(latitude, longitude))",
"def search(lat, lng, distance, query):\n\n url = SEARCH_URL.format(lat, lng, distance,\n query, F_CLIENT_ID, F_CLIENT_SECRET,\n time.strftime(\"%Y%m%d\"))\n venue_list = []\n\n data = requests.get(url).json()\n for i in range(0, len(data['response']['groups'][0]['items'])):\n try:\n item = data['response']['groups'][0]['items'][i]\n venue = item['venue']\n venue_list.append(Business(venue['name'],\n venue['location']['address'],\n venue['rating'],\n venue['ratingSignals'],\n (venue['location']['lat'], venue['location']['lng'])))\n except:\n pass\n\n return venue_list"
] |
[
"0.6190853",
"0.60117227",
"0.5954512",
"0.5796066",
"0.5763397",
"0.56744766",
"0.566464",
"0.5655306",
"0.56232136",
"0.5610862",
"0.56104386",
"0.5592543",
"0.5587977",
"0.5574137",
"0.557158",
"0.5570936",
"0.55348104",
"0.5516368",
"0.5511563",
"0.54928356",
"0.5488545",
"0.5486171",
"0.54672456",
"0.5423685",
"0.5415258",
"0.53669363",
"0.5349169",
"0.53480965",
"0.53218573",
"0.53176343"
] |
0.6223184
|
0
|
creates a new saved business
|
def save_new_business(user_id, bus_name, yelp_id, latitude, longitude, yelp_url, saved_home_id):
business = SavedBusinesses(
user_id = user_id,
yelp_id = yelp_id,
bus_name = bus_name,
latitude = latitude,
longitude = longitude,
yelp_url = yelp_url,
saved_home_id = saved_home_id
)
db.session.add(business)
db.session.commit()
return business
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def create_business(identifier, legal_type=None, legal_name=None):\n business = Business()\n business.identifier = identifier\n business.legal_type = legal_type\n business.legal_name = legal_name\n business.save()\n return business",
"def register_business(current_user):\n data = request.get_json(force=True)\n\n if not data:\n return make_json_reply(\n 'message', 'Cannot create business due to missing fields'), 400\n\n if (len(data.keys()) != 4):\n return make_json_reply(\n 'message', 'Cannot create business due to missing fields'), 400\n\n user_id = current_user.id\n name = data['name']\n location = data['location']\n category = data['category']\n description = data['description']\n\n if check_validity_of_input(\n name=name,\n location=location,\n category=category,\n description=description) == False:\n return make_json_reply('message', 'Fields cannot be empty'), 400\n\n business = Business(\n user_id=user_id,\n name=name,\n location=location,\n category=category,\n description=description)\n\n db.session.add(business)\n\n return make_json_reply(\n 'message',\n 'Business ' + str(business.name) + ' successfully created'), 201",
"def create(new_attrs: dict) -> BusinessNotification:\n\n new_business_report = BusinessNotification()\n\n db.session.add(new_business_report)\n db.session.commit()\n\n return new_business_report",
"def create(self, validated_data):\n print(validated_data)\n return Booking.objects.create(**validated_data)",
"def create_new_banks():\n\n\tcity = request.form.get('bankCity', '')\n\tname = request.form.get('bankName', '')\n\taddress = request.form.get('bankAddress', '')\n\tinfo = dict(city=city, name=name, address=address)\n\t# print(info)\n\tbank = Bank(city, name, address)\n\tres = bank.save()\n\t# print('res=%d' % res)\n\treturn send_result(info, res, status=\"True\")",
"def test_ach_create_for_business(self):\n\n business = self.client.businesses.create({})\n\n self.ach_model[\"business_token\"] = business.token\n\n funding_source = self.client.funding_sources.ach.create(self.ach_model)\n\n self.verify[\"business_token\"] = business.token\n\n verify_ach_response_model(self, funding_source, self.verify)",
"def post(self):\n FeatureBusiness.add(request.get_json(), user_id=request.user_id)\n\n return {\"status\": 201}, 201",
"def test_add_business_to_mission(self):\n\n self.assertEqual(len(self.mission.businesses), 0)\n\n with self.client as c:\n with c.session_transaction() as sess:\n sess['user_id'] = self.user2.id\n\n db.session.add(self.mission)\n\n resp = c.post(\n f'v1/mission/add_business/{self.mission.id}',\n json=self.business_data)\n\n self.assertEqual(resp.json['success'],\n 'Added!')\n business = Business.query.get('3h939hd798dhjf97')\n self.assertIn(business, self.mission.businesses)\n self.assertEqual(len(self.mission.businesses), 1)\n\n # test repeat adding does nothing\n resp = c.post(\n f'v1/mission/add_business/{self.mission.id}',\n json=self.business_data)\n\n self.assertEqual(resp.json['success'], 'Already Added.')\n self.assertIn(business, self.mission.businesses)\n self.assertEqual(len(self.mission.businesses), 1)",
"def post(self, request): # FIRST EXAMPLE\n model = self._create_booking(\n request=request) # when _create_booking is invoked, historio Client will log model\n print('save me')",
"def create(self, **kwargs):\n return self.save(self.new(**kwargs))",
"def create():",
"def create():",
"def create(self, validated_data):\n user_data = validated_data.pop('user')\n user = UserSerializer.create(UserSerializer(), validated_data=user_data)\n businessuser, created = BusinessUser.objects.update_or_create(user=user,\n company_name=validated_data.pop('company_name'), \n reg=validated_data.pop('reg'), \n vat=validated_data.pop('vat'), )\n return businessuser",
"def create(self):\n\n pass",
"def _add_to_businesses(params):\n print params\n if not Business.query.filter_by(yelp_id=params['yelp_id']).first():\n business = Business()\n cat_list = []\n for key in params:\n # adds elements in category lists to category table if they don't already exist\n if key == \"categories\":\n for cat in params[key]:\n cat_list.append(cat)\n if not Category.query.filter_by(category_name=cat).first():\n category = Category(category_name=cat)\n db.session.add(category)\n # THROUGH LINE 40 REPLACED BY 30-34\n # for group in params[key]:\n # print type(group)\n # for subtype in group:\n # print type(subtype)\n # if not Category.query.filter_by(category_name=subtype).first():\n # category = Category(category_name=subtype)\n # db.session.add(category)\n # cat_list.append(subtype)\n # print cat_list\n elif key == \"yelp_id\":\n business.yelp_id = params[key]\n elif key == \"name\":\n business.name = params[key]\n elif key == \"address_line_1\":\n business.address_line_1 = params[key]\n elif key == \"address_line_2\":\n business.address_line_2 = params[key]\n elif key == \"city\":\n business.city = params[key]\n elif key == \"state\":\n business.state = params[key]\n elif key == \"zipcode\":\n business.zipcode = params[key]\n elif key == \"phone\":\n business.phone = params[key]\n elif key == \"latitude\":\n business.latitude = params[key]\n elif key == \"longitude\":\n business.longitude = params[key]\n try:\n db.session.add(business)\n db.session.commit()\n except:\n db.session.rollback()\n print business.name, \"has insufficient information, skipping.\"\n return None\n # creates rows in reference table\n for cat in cat_list:\n # creates row in reference table\n business = Business.query.filter_by(yelp_id=params['yelp_id']).first()\n catbus = BusinessCategory()\n print business.business_id\n catbus.business_id = business.business_id\n cat_object = Category.query.filter_by(category_name=cat).first()\n print cat_object.category_name\n catbus.category_id = cat_object.category_id\n\n if not BusinessCategory.query.filter_by(business_id=catbus.business_id,\n category_id=catbus.category_id).first():\n db.session.add(catbus)\n db.session.commit()\n\n print \"added \" + business.name + \" to db\"\n\n else:\n print \"Already in Dictionary\"\n return None",
"def create(self):\n pass",
"def create(self):\n pass",
"def create(self):\n pass",
"def create(self):",
"def create(self, validated_data):",
"def create(self):\n ...",
"def create(self,**extra_fields):\r\n print(extra_fields)\r\n data = self.model(**extra_fields)\r\n data.save(using=self._db)",
"def create_bigfirm(bf_data):\n return get_or_create_object(bf_data, BigFirm)",
"def create(self):\n db.session.add(self)\n db.session.commit()",
"def create(self, validated_data):\n return FlightsheetDetails.objects.create(**validated_data)",
"def create_job_detail(company_name, job_title, application_deadline, job_listing_url, state, city, application_listed, salary):\n\n job_detail = JobDetail(company_name = company_name, job_title = job_title, application_deadline = application_deadline, job_listing_url = job_listing_url, state = state , city = city, application_listed = application_listed, salary = salary)\n db.session.add(job_detail)\n db.session.commit()\n\n return job_detail",
"def test_business_model(self):\n\n self.user.save()\n query_user = User.query.filter_by(email='[email protected]').first()\n\n business = Business('CosmasTech', 'Technology', 'Nairobi',\n 'AI is transforming human life', query_user.id)\n business.save()\n\n query_res = Business.query.filter_by(id=1).first()\n self.assertEqual(query_res.name, 'cosmastech')",
"def create(cls, **kwargs):\r\n return cls().fill(**kwargs).save()",
"def create(self, datastore, **kwargs):\n return self.save(datastore, (self.new(**kwargs)))",
"def bank():\n\n bank = Bank.objects.create(name='Random Bank')\n return bank"
] |
[
"0.7183854",
"0.67874414",
"0.66240084",
"0.653262",
"0.63423127",
"0.6336254",
"0.6319564",
"0.63147587",
"0.62548125",
"0.62217283",
"0.6177882",
"0.6177882",
"0.6064845",
"0.60640514",
"0.60619956",
"0.60320467",
"0.60320467",
"0.60320467",
"0.60247517",
"0.6001875",
"0.60015386",
"0.5996598",
"0.59901476",
"0.5931548",
"0.59215534",
"0.5905161",
"0.5889809",
"0.58830196",
"0.5838994",
"0.58311373"
] |
0.757261
|
0
|
r"""VGG 16layer model (configuration "D") with batch normalization
|
def vgg16_bn(model_dir, tag, num_classes, use_cls, batchnorm=True):
cfg = [[64, 64, 'M'], [128, 128, 'M'], [256, 256, 256, 'M'], [512, 512, 512, 512, 512, 512], [1024, 'D', 1024, 'D']]
net = VGG(model_dir, tag, cfg, num_classes, use_cls, batchnorm=batchnorm)
return net
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def vgg16_bn(pretrained,**kwargs):\n model = VGG(make_layers(cfg['D'], batch_norm=True), **kwargs)\n if pretrained:\n model_dict = paddle.load('./pre_model/vgg16_bn.paddle')\n model.set_state_dict(model_dict)\n return model",
"def build_cnn_vgg16(num_classes):\n\n inputs = tf.keras.layers.Input(\n shape=(IMAGE_WIDTH, IMAGE_HEIGHT, IMAGE_CHANNELS)\n )\n\n x = inputs\n x = tf.keras.applications.vgg16.preprocess_input(x)\n vgg16 = tf.keras.applications.VGG16(\n input_shape=(IMAGE_WIDTH, IMAGE_HEIGHT, IMAGE_CHANNELS),\n weights=\"imagenet\",\n include_top=False\n )\n\n vgg16.trainable = False\n x = vgg16(x, training=False)\n\n x = tf.keras.layers.GlobalAveragePooling2D()(x)\n x = tf.keras.layers.Dense(\n units=num_classes,\n activation=tf.keras.activations.softmax\n )(x)\n\n outputs = x\n\n model = tf.keras.Model(\n inputs=inputs,\n outputs=outputs\n )\n\n return model",
"def vgg16(pretrained=False, **kwargs):\n if pretrained:\n kwargs['init_weights'] = False\n model = VGG(make_layers(cfg['D']), **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['vgg16']), strict=False)\n return model",
"def vgg_16(input_shape=(224, 224, 3), output_shape=1000):\n model = Sequential()\n \n # layer 1 ~ 2 (filter: 64)\n model.add(Input(shape=input_shape))\n model.add(ZeroPadding2D((1, 1)))\n model.add(Conv2D(64, (3, 3), activation='relu'))\n model.add(ZeroPadding2D((1, 1)))\n model.add(Conv2D(64, (3, 3), activation='relu'))\n model.add(MaxPool2D((2, 2), strides=(2, 2)))\n # output size: 112 x 112 x 64\n \n # layer 3 ~ 4 (filter: 128)\n model.add(ZeroPadding2D((1, 1)))\n model.add(Conv2D(128, (3, 3), activation='relu'))\n model.add(ZeroPadding2D((1, 1)))\n model.add(Conv2D(128, (3, 3), activation='relu'))\n model.add(MaxPool2D((2, 2), strides=(2, 2)))\n # output size: 56 x 56 x 128\n \n # layer 5 ~ 7 (filter: 256)\n model.add(ZeroPadding2D((1, 1)))\n model.add(Conv2D(256, (3, 3), activation='relu'))\n model.add(ZeroPadding2D((1, 1)))\n model.add(Conv2D(256, (3, 3), activation='relu'))\n model.add(ZeroPadding2D((1, 1)))\n model.add(Conv2D(256, (3, 3), activation='relu'))\n model.add(MaxPool2D((2, 2), strides=(2, 2)))\n # output size: 28 x 28 x 256\n \n # layer 8 ~ 10 (filter: 512)\n model.add(ZeroPadding2D((1, 1)))\n model.add(Conv2D(512, (3, 3), activation='relu'))\n model.add(ZeroPadding2D((1, 1)))\n model.add(Conv2D(512, (3, 3), activation='relu'))\n model.add(ZeroPadding2D((1, 1)))\n model.add(Conv2D(512, (3, 3), activation='relu'))\n model.add(MaxPool2D((2, 2), strides=(2, 2)))\n # output size: 14 x 14 x 512\n \n # layer 11 ~ 13 (filter: 512)\n model.add(ZeroPadding2D((1, 1)))\n model.add(Conv2D(512, (3, 3), activation='relu'))\n model.add(ZeroPadding2D((1, 1)))\n model.add(Conv2D(512, (3, 3), activation='relu'))\n model.add(ZeroPadding2D((1, 1)))\n model.add(Conv2D(512, (3, 3), activation='relu'))\n model.add(MaxPool2D((2, 2), strides=(2, 2)))\n # output size: 7 x 7 x 512\n \n # layer 14 ~ 16 (Fully Connected)\n model.add(Flatten())\n # flatten: 7 x 7 x 512 = 25,088\n model.add(Dense(4096, activation='relu'))\n model.add(Dropout(0.5))\n model.add(Dense(4096, activation='relu'))\n model.add(Dropout(0.5))\n model.add(Dense(output_shape, activation='softmax'))\n # categorized by output shape\n \n return model",
"def vgg16(pretrained=False, **kwargs):\n model = VGG(make_layers(cfg['D']), **kwargs)\n if pretrained:\n model_dict = paddle.load('./pre_model/vgg16.paddle')\n model.set_state_dict(model_dict)\n return model",
"def vgg16_bn(pretrained=False, **kwargs):\n if pretrained:\n kwargs['init_weights'] = False\n model = VGG(make_layers(cfg['D'], batch_norm=True), **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['vgg16_bn']), strict=False)\n return model",
"def build_vgg16(self):\n use_batch_norm = self.use_batch_norm\n\n imgs = tf.placeholder(tf.float32, [self.batch_size]+self.img_shape)\n is_train = tf.placeholder(tf.bool)\n\n conv1_1_feats = convolution(imgs, 3, 3, 64, 1, 1, 'conv1_1')\n conv1_1_feats = nonlinear(conv1_1_feats, 'relu')\n conv1_2_feats = convolution(conv1_1_feats, 3, 3, 64, 1, 1, 'conv1_2')\n conv1_2_feats = nonlinear(conv1_2_feats, 'relu')\n pool1_feats = max_pool(conv1_2_feats, 2, 2, 2, 2, 'pool1')\n\n conv2_1_feats = convolution(pool1_feats, 3, 3, 128, 1, 1, 'conv2_1')\n conv2_1_feats = nonlinear(conv2_1_feats, 'relu')\n conv2_2_feats = convolution(conv2_1_feats, 3, 3, 128, 1, 1, 'conv2_2')\n conv2_2_feats = nonlinear(conv2_2_feats, 'relu')\n pool2_feats = max_pool(conv2_2_feats, 2, 2, 2, 2, 'pool2')\n\n conv3_1_feats = convolution(pool2_feats, 3, 3, 256, 1, 1, 'conv3_1')\n conv3_1_feats = nonlinear(conv3_1_feats, 'relu')\n conv3_2_feats = convolution(conv3_1_feats, 3, 3, 256, 1, 1, 'conv3_2')\n conv3_2_feats = nonlinear(conv3_2_feats, 'relu')\n conv3_3_feats = convolution(conv3_2_feats, 3, 3, 256, 1, 1, 'conv3_3')\n conv3_3_feats = nonlinear(conv3_3_feats, 'relu')\n pool3_feats = max_pool(conv3_3_feats, 2, 2, 2, 2, 'pool3')\n\n conv4_1_feats = convolution(pool3_feats, 3, 3, 512, 1, 1, 'conv4_1')\n conv4_1_feats = nonlinear(conv4_1_feats, 'relu')\n conv4_2_feats = convolution(conv4_1_feats, 3, 3, 512, 1, 1, 'conv4_2')\n conv4_2_feats = nonlinear(conv4_2_feats, 'relu')\n conv4_3_feats = convolution(conv4_2_feats, 3, 3, 512, 1, 1, 'conv4_3')\n conv4_3_feats = nonlinear(conv4_3_feats, 'relu')\n pool4_feats = max_pool(conv4_3_feats, 2, 2, 2, 2, 'pool4')\n\n conv5_1_feats = convolution(pool4_feats, 3, 3, 512, 1, 1, 'conv5_1')\n conv5_1_feats = nonlinear(conv5_1_feats, 'relu')\n conv5_2_feats = convolution(conv5_1_feats, 3, 3, 512, 1, 1, 'conv5_2')\n conv5_2_feats = nonlinear(conv5_2_feats, 'relu')\n conv5_3_feats = convolution(conv5_2_feats, 3, 3, 512, 1, 1, 'conv5_3')\n conv5_3_feats = nonlinear(conv5_3_feats, 'relu')\n\n conv5_3_feats_flat = tf.reshape(conv5_3_feats, [self.batch_size, 196, 512])\n self.conv_feats = conv5_3_feats_flat\n self.conv_feat_shape = [196, 512]\n self.num_ctx = 196 \n self.dim_ctx = 512\n\n self.imgs = imgs\n self.is_train = is_train",
"def VGGModel(input_shape):\n \n\n X_input = Input(input_shape)\n \n # Creating a Neural Network (VGG-16)\n\n X = Conv2D(64, (3, 3), activation='relu', padding='same', name='block1_conv1')(X_input)\n X = Conv2D(64, (3, 3), activation='relu', padding='same', name='block1_conv2')(X)\n X = MaxPooling2D((2, 2), strides=(2, 2), name='block1_pool')(X)\n\n # Block 2\n X = Conv2D(128, (3, 3), activation='relu', padding='same', name='block2_conv1')(X)\n X = Conv2D(128, (3, 3), activation='relu', padding='same', name='block2_conv2')(X)\n X = MaxPooling2D((2, 2), strides=(2, 2), name='block2_pool')(X)\n\n # Block 3\n X = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv1')(X)\n X = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv2')(X)\n X = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv3')(X)\n X = MaxPooling2D((2, 2), strides=(2, 2), name='block3_pool')(X)\n\n # Block 4\n X = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv1')(X)\n X = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv2')(X)\n X = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv3')(X)\n X = MaxPooling2D((2, 2), strides=(2, 2), name='block4_pool')(X)\n\n # Block 5\n X = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv1')(X)\n X = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv2')(X)\n X = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv3')(X)\n X = MaxPooling2D((2, 2), strides=(2, 2), name='block5_pool')(X)\n \n X = Flatten()(X)\n X = Dense(4096, activation='relu', kernel_initializer = 'he_normal', name='fc')(X)\n X = Dense(4096, activation='relu', kernel_initializer = 'he_normal', name='fc2')(X)\n X = Dense(2048, activation='relu', kernel_initializer = 'he_normal', name='fc3')(X)\n X = Dense(1024, activation='relu', kernel_initializer = 'he_normal', name='fc4')(X)\n X = Dense(512, activation='relu', kernel_initializer = 'he_normal', name='fc5')(X)\n X = Dense(256, activation='relu', kernel_initializer = 'he_normal', name='fc6')(X)\n X = Dense(2, activation='linear', name='regression')(X)\n model = Model(inputs=X_input, outputs = X, name='HappyModel')\n print(model.summary())\n \n return model",
"def get_model_vgg16():\n # base_model.summary():\n # ....\n # block5_conv4 (Conv2D) (None, 15, 15, 512) 2359808\n # _________________________________________________________________\n # block5_pool (MaxPooling2D) (None, 7, 7, 512) 0\n # _________________________________________________________________\n # flatten (Flatten) (None, 25088) 0\n # _________________________________________________________________\n # fc1 (Dense) (None, 4096) 102764544\n # _________________________________________________________________\n # fc2 (Dense) (None, 4096) 16781312\n # _________________________________________________________________\n # predictions (Dense) (None, 1000) 4097000\n #\n base_model = VGG16(weights='imagenet', include_top=True)\n model = Model(inputs=base_model.input,\n outputs=base_model.get_layer('fc2').output)\n return model",
"def vgg_16(inputs,\n num_classes=1000,\n is_training=True,\n dropout_keep_prob=0.5,\n spatial_squeeze=True,\n scope='vgg_16'):\n with tf.name_scope(scope, 'vgg_16', [inputs]) as sc:\n end_points_collection = sc + '_end_points'\n # Collect outputs for conv2d, fully_connected and max_pool2d.\n with slim.arg_scope([slim.conv2d, slim.fully_connected, slim.max_pool2d],\n outputs_collections=end_points_collection):\n net = slim.repeat(inputs, 2, slim.conv2d, 64, [3, 3], scope='conv1')\n net = slim.max_pool2d(net, [2, 2], scope='pool1')\n net = slim.repeat(net, 2, slim.conv2d, 128, [3, 3], scope='conv2')\n net = slim.max_pool2d(net, [2, 2], scope='pool2')\n net = slim.repeat(net, 3, slim.conv2d, 256, [3, 3], scope='conv3')\n net = slim.max_pool2d(net, [2, 2], scope='pool3')\n net = slim.repeat(net, 3, slim.conv2d, 512, [3, 3], scope='conv4')\n net = slim.max_pool2d(net, [2, 2], scope='pool4')\n net = slim.repeat(net, 3, slim.conv2d, 512, [3, 3], scope='conv5')\n net = slim.max_pool2d(net, [2, 2], scope='pool5')\n # Use conv2d instead of fully_connected layers.\n net = slim.conv2d(net, 4096, [3, 3], padding='VALID', scope='fc6')\n net = slim.dropout(net, dropout_keep_prob, is_training=is_training,\n scope='dropout6')\n net = slim.conv2d(net, 4096, [1, 1], scope='fc7')\n net = slim.dropout(net, dropout_keep_prob, is_training=is_training,\n scope='dropout7')\n net = slim.conv2d(net, num_classes, [1, 1],\n activation_fn=None,\n normalizer_fn=None,\n scope='fc8')\n # Convert end_points_collection into a end_point dict.\n end_points = slim.utils.convert_collection_to_dict(end_points_collection)\n if spatial_squeeze:\n net = tf.squeeze(net, [1, 2], name='fc8/squeezed')\n end_points[sc + '/fc8'] = net\n return net, end_points",
"def vgg_16(inputs,\n num_classes=1000,\n is_training=True,\n dropout_keep_prob=0.5,\n spatial_squeeze=True,\n scope='vgg_16',\n fc_conv_padding='VALID',\n global_pool=False):\n with tf.variable_scope(scope, 'vgg_16', [inputs]) as sc:\n end_points_collection = sc.original_name_scope + '_end_points'\n # Collect outputs for conv2d, fully_connected and max_pool2d.\n with slim.arg_scope([slim.conv2d, slim.fully_connected, slim.max_pool2d],\n outputs_collections=end_points_collection):\n net = slim.repeat(inputs, 2, slim.conv2d, 64, [3, 3], scope='conv1')\n net = slim.max_pool2d(net, [2, 2], scope='pool1')\n net = slim.repeat(net, 2, slim.conv2d, 128, [3, 3], scope='conv2')\n net = slim.max_pool2d(net, [2, 2], scope='pool2')\n net = slim.repeat(net, 3, slim.conv2d, 256, [3, 3], scope='conv3')\n net = slim.max_pool2d(net, [2, 2], scope='pool3')\n net = slim.repeat(net, 3, slim.conv2d, 512, [3, 3], scope='conv4')\n net = slim.max_pool2d(net, [2, 2], scope='pool4')\n net = slim.repeat(net, 3, slim.conv2d, 512, [3, 3], scope='conv5')\n net = slim.max_pool2d(net, [2, 2], scope='pool5')\n\n # if num_classes == 0:\n # return net\n\n # Use conv2d instead of fully_connected layers.\n net = slim.conv2d(net, 4096, [7, 7], padding=fc_conv_padding, scope='fc6')\n net = slim.dropout(net, dropout_keep_prob, is_training=is_training,\n scope='dropout6')\n net = slim.conv2d(net, 4096, [1, 1], scope='fc7')\n # Convert end_points_collection into a end_point dict.\n end_points = slim.utils.convert_collection_to_dict(end_points_collection)\n if global_pool:\n net = tf.reduce_mean(net, [1, 2], keep_dims=True, name='global_pool')\n end_points['global_pool'] = net\n if num_classes:\n net = slim.dropout(net, dropout_keep_prob, is_training=is_training,\n scope='dropout7')\n net = slim.conv2d(net, num_classes, [1, 1],\n activation_fn=None,\n normalizer_fn=None,\n scope='fc8')\n if spatial_squeeze:\n net = tf.squeeze(net, [1, 2], name='fc8/squeezed')\n end_points[sc.name + '/fc8'] = net\n return net, end_points",
"def build_vgg16_notop(image_dimensions, pooling, size_final_dense, num_classes, trainable=False, weights='imagenet'):\n\n vgg16_base = VGG16(weights=weights\n , include_top=False # Ignore the final dense layers, we'll train our own\n , input_shape=image_dimensions\n , pooling=pooling)\n vgg16_base.trainable=trainable\n\n image_input = Input(shape=image_dimensions)\n\n x = vgg16_base(image_input)\n x = Flatten()(x)\n x = Dense(size_final_dense,activation='relu')(x)\n out = Dense(num_classes,activation='softmax')(x) # Task is classification\n\n model = Model(image_input, out)\n return(model)",
"def instance_norm_model():\n inputs = tf.keras.Input(shape=(16, 16, 3,))\n x = tf.keras.layers.Conv2D(16, (3, 3))(inputs)\n x = tf.contrib.layers.instance_norm(x)\n return x",
"def build_vgg(self, weights=\"imagenet\"): \n \n # Input image to extract features from\n img = Input(shape=(self.img_rows, self.img_cols, 3))\n\n # Mean center and rescale by variance as in PyTorch\n processed = Lambda(lambda x: (x-self.mean) / self.std)(img)\n \n # If inference only, just return empty model \n if self.inference_only:\n model = Model(inputs=img, outputs=[img for _ in range(len(self.vgg_layers))])\n model.trainable = False\n model.compile(loss='mse', optimizer='adam')\n return model\n \n # Get the vgg network from Keras applications\n if weights in ['imagenet', None]:\n vgg = VGG16(weights=weights, include_top=False)\n else:\n vgg = VGG16(weights=None, include_top=False)\n vgg.load_weights(weights, by_name=True)\n\n # Output the first three pooling layers\n vgg.outputs = [vgg.layers[i].output for i in self.vgg_layers] \n \n # Create model and compile\n model = Model(inputs=img, outputs=vgg(processed))\n model.trainable = False\n model.compile(loss='mse', optimizer='adam')\n\n return model",
"def build_vgg():\n input_shape = (256, 256, 3)\n\n vgg = keras.applications.VGG19(include_top = False , input_shape = input_shape , weights=\"imagenet\")\n features = vgg.get_layer(index = 9).output\n\n model = keras.Model(inputs=[vgg.inputs], outputs=[features])\n return model",
"def vgg16_1d(**kwargs):\r\n return VGG16_1d(**kwargs)",
"def vgg16_bn(pretrained=False, **kwargs):\n if pretrained:\n kwargs['init_weights'] = False\n model = VGGBase(make_layers(), **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['vgg16_bn']))\n return model",
"def build_model():\n pretrained_model = VGG16(input_shape=(fixed_size[0], fixed_size[1], 3), weights='imagenet', include_top=False)\n # We will not train the layers imported.\n for layer in pretrained_model.layers:\n layer.trainable = False\n transfer_learning_model = Sequential()\n transfer_learning_model.add(pretrained_model)\n transfer_learning_model.add(Flatten())\n transfer_learning_model.add(Dense(128, activation='relu', kernel_initializer='he_uniform'))\n transfer_learning_model.add(Dropout(0.5))\n transfer_learning_model.add(Dense(3, activation='softmax'))\n transfer_learning_model.summary()\n opt = Adam(learning_rate=.0003)\n transfer_learning_model.compile(optimizer=opt, loss='sparse_categorical_crossentropy', metrics=['accuracy'])\n return transfer_learning_model",
"def __init__(self, img_size, multi_layers=False):\n super().__init__()\n\n vgg = VGG16(include_top=False, input_shape=(img_size[0], img_size[1], 3))\n if multi_layers:\n layer_ids = [2, 5, 9, 13, 17]\n layer_outputs = [\n Flatten()(vgg.layers[layer_id].output) for layer_id in layer_ids]\n features = Concatenate(axis=-1)(layer_outputs)\n else:\n layer_ids = [13] # 13 -> conv4_3\n features = [\n Flatten()(vgg.layers[layer_id].output) for layer_id in layer_ids]\n\n self._model = Model(inputs=vgg.input, outputs=features)",
"def forward(self, data_batch):\n\n x = data_batch[0]\n im_info = data_batch[1]\n gt_boxes = data_batch[2]\n num_boxes = data_batch[3]\n rel_mat = data_batch[4]\n\n if self.training:\n self.iter_counter += 1\n\n input_imgs = x.clone()\n\n sources = list()\n loc = list()\n conf = list()\n\n self.batch_size = x.size(0)\n\n # apply vgg up to conv4_3 relu\n if isinstance(self.base, nn.ModuleList):\n for k,v in enumerate(self.base):\n x = v(x)\n else:\n x = self.base(x)\n\n s = self.L2Norm(x)\n sources.append(s)\n base_feat = s\n\n # apply vgg up to fc7\n if isinstance(self.conv5, nn.ModuleList):\n for k,v in enumerate(self.conv5):\n x = v(x)\n else:\n x = self.conv5(x)\n sources.append(x)\n\n # apply extra layers and cache source layer outputs\n for k, v in enumerate(self.extras):\n x = F.relu(v(x), inplace=True)\n if k % 2 == 1:\n sources.append(x)\n\n # apply multibox head to source layers\n for (x, l, c) in zip(sources, self.loc, self.conf):\n loc.append(l(x).permute(0, 2, 3, 1).contiguous())\n conf.append(c(x).permute(0, 2, 3, 1).contiguous())\n\n loc = torch.cat([o.view(o.size(0), -1) for o in loc], 1)\n conf = torch.cat([o.view(o.size(0), -1) for o in conf], 1)\n\n loc = loc.view(loc.size(0), -1, 4)\n conf = conf.view(conf.size(0), -1, self.num_classes)\n\n SSD_loss_cls = 0\n SSD_loss_bbox = 0\n if self.training:\n predictions = (\n loc,\n conf,\n self.priors.type_as(loc)\n )\n # targets = torch.cat([gt_boxes[:,:,:4] / self.size, gt_boxes[:,:,4:5]],dim=2)\n targets = gt_boxes\n SSD_loss_bbox, SSD_loss_cls = self.criterion(predictions, targets, num_boxes)\n\n conf = self.softmax(conf)\n\n # online data\n if self.training:\n if self.iter_counter > cfg.TRAIN.VMRN.ONLINEDATA_BEGIN_ITER:\n obj_rois, obj_num = self._obj_det(conf, loc, self.batch_size, im_info)\n obj_rois = obj_rois.type_as(gt_boxes)\n obj_num = obj_num.type_as(num_boxes)\n else:\n obj_rois = torch.FloatTensor([]).type_as(gt_boxes)\n obj_num = torch.LongTensor([]).type_as(num_boxes)\n obj_labels = None\n else:\n # when testing, this is object detection results\n # TODO: SUPPORT MULTI-IMAGE BATCH\n obj_rois, obj_num = self._obj_det(conf, loc, self.batch_size, im_info)\n if obj_rois.numel() > 0:\n obj_labels = obj_rois[:, 5]\n obj_rois = obj_rois[:, :5]\n obj_rois = obj_rois.type_as(gt_boxes)\n obj_num = obj_num.type_as(num_boxes)\n else:\n # there is no object detected\n obj_labels = torch.Tensor([]).type_as(gt_boxes).long()\n obj_rois = obj_rois.type_as(gt_boxes)\n obj_num = obj_num.type_as(num_boxes)\n\n if self.training:\n # offline data\n for i in range(self.batch_size):\n obj_rois = torch.cat([obj_rois,\n torch.cat([(i * torch.ones(num_boxes[i].item(), 1)).type_as(gt_boxes),\n (gt_boxes[i][:num_boxes[i]][:, 0:4])], 1)\n ])\n obj_num = torch.cat([obj_num, torch.Tensor([num_boxes[i]]).type_as(obj_num)])\n\n\n obj_rois = Variable(obj_rois)\n\n VMRN_rel_loss_cls = 0\n rel_cls_prob = torch.Tensor([]).type_as(obj_rois)\n if (obj_num > 1).sum().item() > 0:\n\n obj_pair_feat = self.VMRN_obj_pair_feat_extractor(input_imgs, obj_rois, self.batch_size, obj_num)\n # obj_pair_feat = obj_pair_feat.detach()\n rel_cls_score = self.VMRN_rel_cls_score(obj_pair_feat)\n\n rel_cls_prob = F.softmax(rel_cls_score)\n\n self.rel_batch_size = obj_pair_feat.size(0)\n\n if self.training:\n obj_pair_rel_label = self._generate_rel_labels(obj_rois, gt_boxes, obj_num, rel_mat)\n obj_pair_rel_label = obj_pair_rel_label.type_as(gt_boxes).long()\n\n rel_not_keep = (obj_pair_rel_label == 0)\n # no relationship is kept\n if (rel_not_keep == 0).sum().item() > 0:\n rel_keep = torch.nonzero(rel_not_keep == 0).view(-1)\n\n rel_cls_score = rel_cls_score[rel_keep]\n\n obj_pair_rel_label = obj_pair_rel_label[rel_keep]\n obj_pair_rel_label -= 1\n VMRN_rel_loss_cls = F.cross_entropy(rel_cls_score, obj_pair_rel_label)\n else:\n if (not cfg.TEST.VMRN.ISEX) and cfg.TRAIN.VMRN.ISEX:\n rel_cls_prob = rel_cls_prob[::2, :]\n\n rel_result = None\n if not self.training:\n if obj_rois.numel() > 0:\n pred_boxes = obj_rois.data[:,1:5]\n pred_boxes[:, 0::2] /= im_info[0][3].item()\n pred_boxes[:, 1::2] /= im_info[0][2].item()\n rel_result = (pred_boxes, obj_labels, rel_cls_prob.data)\n else:\n rel_result = (obj_rois.data, obj_labels, rel_cls_prob.data)\n\n return loc, conf, rel_result, SSD_loss_bbox, SSD_loss_cls, VMRN_rel_loss_cls",
"def vgg16(tensorized, **kwargs):\n return _vgg('vgg16', 'D', False, tensorized, **kwargs)",
"def vgg16_bn(tensorized, **kwargs):\n return _vgg('vgg16_bn', 'D', True, tensorized, **kwargs)",
"def run_vgg_experiment(args, device):\n validation_ratio, record_train_acc, record_val_acc, record_test_acc = utils.configure_training_mode(args)\n\n train_loader, validation_loader, test_loader = datasets.build_cifar10_loaders(args.batch_size,\n validation_ratio=validation_ratio,\n train_validation_split_seed=0)\n local_loss_list = utils.get_loss(args)\n nonlinearity = utils.get_nonlinearity(args)\n\n optimizer_local, local_opt_arguments_dict, local_scheduler_arguments_dict, \\\n optimizer_final, final_opt_arguments_dict, final_scheduler_arguments_dict = \\\n utils.choose_optimizers_and_parameters(args)\n\n conv_sizes = [128, 256, 256, 512, 512, 512]\n\n if args.vgg_conv_size_multiplier != 1:\n for i in range(len(conv_sizes)):\n conv_sizes[i] = conv_sizes[i] * args.vgg_conv_size_multiplier\n do_pooling = [False, True, False, True, True, True]\n\n if args.divisive_norm_conv:\n divisive_norm_list_conv = [networks.DivisiveNorm(args.divnorm_power, args.grouping_dim, args.grouped_var_delta)\n for i in range(len(conv_sizes))]\n else:\n divisive_norm_list_conv = None\n\n kernel_sizes = [3 for i in range(len(conv_sizes))]\n fc_layers = [1024]\n\n if args.divisive_norm_fc:\n divisive_norm_list_fc = [networks.DivisiveNorm(args.divnorm_power, args.grouping_dim,\n args.grouped_var_delta)\n for i in range(len(fc_layers))]\n else:\n divisive_norm_list_fc = None\n\n alt_feedback_type = None\n if args.feedback_alignment:\n alt_feedback_type = 'feedback_alignment'\n elif args.sign_symmetry:\n alt_feedback_type = 'sign_symmetry'\n\n net = networks.Network(nonlinearity, local_loss_list, optimizer_local,\n torch.optim.lr_scheduler.MultiStepLR, conv_sizes, kernel_sizes,\n do_pooling, fc_layers, 'max', 'CIFAR10', bias=False,\n local_opt_arguments_dict=local_opt_arguments_dict,\n local_scheduler_arguments_dict=local_scheduler_arguments_dict,\n dropout_p=args.dropout_p, batch_norm=args.batch_norm,\n divisive_norm_list_conv=divisive_norm_list_conv, divisive_norm_list_fc=divisive_norm_list_fc,\n spatial_dropout=args.spatial_dropout, alt_feedback_type=alt_feedback_type)\n\n net = net.to(device)\n print(net)\n\n final_loss = nn.CrossEntropyLoss()\n\n if args.backprop:\n final_opt = optimizer_final(net.parameters(), **final_opt_arguments_dict)\n compute_local_loss = False\n update_local_loss = False\n else:\n final_opt = optimizer_final(net.softmax_layer.parameters(), **final_opt_arguments_dict)\n compute_local_loss = True\n update_local_loss = True\n\n final_scheduler = torch.optim.lr_scheduler.MultiStepLR(final_opt, **final_scheduler_arguments_dict)\n\n train_acc, val_acc, test_acc = utils.train_network(\n net, device, final_loss, final_opt, final_scheduler, args.n_epochs, train_loader, validation_loader,\n test_loader, compute_local_loss=compute_local_loss, update_local_loss=update_local_loss,\n record_train_acc=record_train_acc, record_val_acc=record_val_acc, record_test_acc=record_test_acc,\n print_results=True, backprop_batch_manhattan=args.backprop_batch_manhattan)\n\n return train_acc, val_acc, test_acc",
"def create_vggvox(embedding_dims, name=\"vggvox\"):\n model = tf.keras.Sequential(name=name)\n model.add(tf.keras.layers.Conv2D(96, (7,7), strides=(2,2), padding=\"valid\", kernel_regularizer=tf.keras.regularizers.L2(5e-4), name=f\"{name}_conv1\"))\n model.add(tf.keras.layers.BatchNormalization())\n model.add(tf.keras.layers.ReLU())\n model.add(tf.keras.layers.MaxPooling2D((3,3), strides=(2,2), name=\"mpool1\"))\n model.add(tf.keras.layers.Conv2D(256, (5,5), strides=(2,2), padding=\"valid\", kernel_regularizer=tf.keras.regularizers.L2(5e-4), name=f\"{name}_conv2\"))\n model.add(tf.keras.layers.BatchNormalization())\n model.add(tf.keras.layers.ReLU())\n model.add(tf.keras.layers.MaxPooling2D((3,3), strides=(2,2), name=\"mpool2\"))\n model.add(tf.keras.layers.Conv2D(384, (3,3), strides=(1,1), padding=\"same\", kernel_regularizer=tf.keras.regularizers.L2(5e-4), name=f\"{name}_conv3\"))\n model.add(tf.keras.layers.BatchNormalization())\n model.add(tf.keras.layers.ReLU())\n model.add(tf.keras.layers.Conv2D(256, (3,3), strides=(1,1), padding=\"same\", kernel_regularizer=tf.keras.regularizers.L2(5e-4), name=f\"{name}_conv4\"))\n model.add(tf.keras.layers.BatchNormalization())\n model.add(tf.keras.layers.ReLU())\n model.add(tf.keras.layers.Conv2D(256, (3,3), strides=(1,1), padding=\"same\", kernel_regularizer=tf.keras.regularizers.L2(5e-4), name=f\"{name}_conv5\"))\n model.add(tf.keras.layers.BatchNormalization())\n model.add(tf.keras.layers.ReLU())\n model.add(tf.keras.layers.MaxPooling2D((5,3), strides=(3,2), name=f\"{name}_mpool5\"))\n model.add(tf.keras.layers.Conv2D(4096, (9,1), strides=1, kernel_regularizer=tf.keras.regularizers.L2(5e-4), padding=\"valid\", name=f\"{name}_fc6\"))\n model.add(tf.keras.layers.BatchNormalization())\n model.add(tf.keras.layers.ReLU())\n model.add(tf.keras.layers.Lambda(lambda x: tf.math.reduce_mean(x, axis=[1,2], name=f\"{name}_apool6\")))\n model.add(tf.keras.layers.Flatten())\n model.add(tf.keras.layers.Dense(embedding_dims, kernel_regularizer=tf.keras.regularizers.L2(5e-4), name=f\"{name}_embeddings\"))\n model.add(tf.keras.layers.BatchNormalization())\n model.add(tf.keras.layers.ReLU())\n return model",
"def instantiate_VGG_model(img_input_shape):\r\n # Load the VGG model\r\n vgg_conv = VGG16(weights='imagenet', include_top=False, input_shape=img_input_shape)\r\n \r\n # Freeze the layers except the last 4 layers\r\n for layer in vgg_conv.layers[:-4]:\r\n layer.trainable = False\r\n \r\n # Create the model\r\n model = models.Sequential()\r\n model.add(vgg_conv)\r\n \r\n # Add new layers\r\n model.add(layers.Flatten())\r\n model.add(layers.Dense(256, activation='relu'))\r\n model.add(layers.Dropout(0.25))\r\n model.add(layers.Dense(nb_class, activation='softmax'))\r\n \r\n model.compile(loss='categorical_crossentropy',\r\n optimizer=optimizers.RMSprop(lr=1e-4),\r\n metrics=['acc'])\r\n return model",
"def vgg16(**kwargs):\r\n return VGG16(**kwargs)",
"def vgg16(*args):\n return _VGGWrapper(models.vgg16(*args))",
"def __init__(self):\n torch.nn.Module.__init__(self)\n ######################### Convolution and pooling layers of VGG-16.\n self.features = torchvision.models.vgg16(pretrained=True).features # fine tune?\n self.features = torch.nn.Sequential(*list(self.features.children())\n [:-22]) # Remove pool2 and rest, lack of computational resource\n # No grad for convVGG\n # for param in self.features.parameters():\n # param.requires_grad = False\n\n #################### Channel Grouping Net\n # self.fc1_ = torch.nn.Linear(128, 128*16)#lack of resource\n # self.fc2_ = torch.nn.Linear(128, 128*16)\n # self.fc3_ = torch.nn.Linear(128, 128*16)\n #\n # torch.nn.init.kaiming_normal_(self.fc1_.weight.data, nonlinearity='relu')\n # if self.fc1_.bias is not None:\n # torch.nn.init.constant_(self.fc1_.bias.data, val=0) # fc层的bias进行constant初始化\n # torch.nn.init.kaiming_normal_(self.fc2_.weight.data, nonlinearity='relu')\n # if self.fc2_.bias is not None:\n # torch.nn.init.constant_(self.fc2_.bias.data, val=0) # fc层的bias进行constant初始化\n # torch.nn.init.kaiming_normal_(self.fc3_.weight.data, nonlinearity='relu')\n # if self.fc3_.bias is not None:\n # torch.nn.init.constant_(self.fc3_.bias.data, val=0) # fc层的bias进行constant初始化\n\n self.fc1 = torch.nn.Linear(128*28*28, 128)\n self.fc2 = torch.nn.Linear(128*28*28, 128)\n self.fc3 = torch.nn.Linear(128*28*28, 128)\n\n\n torch.nn.init.kaiming_normal_(self.fc1.weight.data, nonlinearity='relu')\n if self.fc1.bias is not None:\n torch.nn.init.constant_(self.fc1.bias.data, val=0) # fc层的bias进行constant初始化\n torch.nn.init.kaiming_normal_(self.fc2.weight.data, nonlinearity='relu')\n if self.fc2.bias is not None:\n torch.nn.init.constant_(self.fc2.bias.data, val=0) # fc层的bias进行constant初始化\n torch.nn.init.kaiming_normal_(self.fc3.weight.data, nonlinearity='relu')\n if self.fc3.bias is not None:\n torch.nn.init.constant_(self.fc3.bias.data, val=0) # fc层的bias进行constant初始化\n\n self.layerNorm=nn.LayerNorm([224,224])\n\n # global grad for hook\n self.image_reconstruction = None\n self.register_hooks()\n self.GradWeight=1e-1\n\n # ################### STN input N*3*448*448\n # self.localization = [\n # nn.Sequential(\n # nn.MaxPool2d(4,stride=4),#112\n # nn.ReLU(True),\n #\n # nn.Conv2d(3, 32, kernel_size=5,stride=1,padding=2), # 112\n # nn.MaxPool2d(2, stride=2), # 56\n # nn.ReLU(True),\n #\n # nn.Conv2d(32, 48, kernel_size=3,stride=1,padding=1),\n # nn.MaxPool2d(2, stride=2), # 56/2=28\n # nn.ReLU(True),\n #\n # nn.Conv2d(48, 64, kernel_size=3, stride=1, padding=1),\n # nn.MaxPool2d(2, stride=2), # 28/2=14\n # nn.ReLU(True) #output 64*14*14\n # ).cuda(),\n # nn.Sequential(\n # nn.MaxPool2d(4, stride=4), # 112\n # nn.ReLU(True),\n #\n # nn.Conv2d(3, 32, kernel_size=5, stride=1, padding=2), # 112\n # nn.MaxPool2d(2, stride=2), # 56\n # nn.ReLU(True),\n #\n # nn.Conv2d(32, 48, kernel_size=3, stride=1, padding=1),\n # nn.MaxPool2d(2, stride=2), # 56/2=28\n # nn.ReLU(True),\n #\n # nn.Conv2d(48, 64, kernel_size=3, stride=1, padding=1),\n # nn.MaxPool2d(2, stride=2), # 28/2=14\n # nn.ReLU(True) # output 64*14*14\n # ).cuda(),\n # nn.Sequential(\n # nn.MaxPool2d(4, stride=4), # 112\n # nn.ReLU(True),\n #\n # nn.Conv2d(3, 32, kernel_size=5, stride=1, padding=2), # 112\n # nn.MaxPool2d(2, stride=2), # 56\n # nn.ReLU(True),\n #\n # nn.Conv2d(32, 48, kernel_size=3, stride=1, padding=1),\n # nn.MaxPool2d(2, stride=2), # 56/2=28\n # nn.ReLU(True),\n #\n # nn.Conv2d(48, 64, kernel_size=3, stride=1, padding=1),\n # nn.MaxPool2d(2, stride=2), # 28/2=14\n # nn.ReLU(True) # output 64*14*14\n # ).cuda()\n # ]\n # # Regressor for the 3 * 2 affine matrix\n # self.fc_loc = [\n # nn.Sequential(\n # nn.Linear(64 * 14 * 14, 32),\n # nn.ReLU(True),\n # nn.Linear(32, 3 * 2)\n # ).cuda(),\n # nn.Sequential(\n # nn.Linear(64 * 14 * 14, 32),\n # nn.ReLU(True),\n # nn.Linear(32, 3 * 2)\n # ).cuda(),\n # nn.Sequential(\n # nn.Linear(64 * 14 * 14, 32),\n # nn.ReLU(True),\n # nn.Linear(32, 3 * 2)\n # ).cuda()\n # ]\n # # Initialize the weights/bias with identity transformation\n # for fc_locx in self.fc_loc:\n # fc_locx[2].weight.data.zero_()\n # fc_locx[2].bias.data.copy_(torch.tensor([1, 0, 0, 0, 1, 0], dtype=torch.float))\n\n ########################Bilinear CNN output 256 channels\n self.bcnnConv_1=torch.nn.Sequential(*list(torchvision.models.vgg16(pretrained=True).features.children())\n [:-1]) # Remove pool3 and rest.\n self.bcnnConv_2 = torch.nn.Sequential(*list(torchvision.models.vgg16(pretrained=True).features.children())\n [:-1]) # Remove pool3 and rest.\n self.bcnnConv_3 = torch.nn.Sequential(*list(torchvision.models.vgg16(pretrained=True).features.children())\n [:-1]) # Remove pool3 and rest.\n #BCNN Linear classifier.\n self.bfc1 = torch.nn.Linear(512*512, 200)\n self.bfc2 = torch.nn.Linear(512*512, 200)\n self.bfc3 = torch.nn.Linear(512*512, 200)\n torch.nn.init.kaiming_normal_(self.bfc1.weight.data) # 何凯明初始化\n if self.bfc1.bias is not None:\n torch.nn.init.constant_(self.bfc1.bias.data, val=0) # fc层的bias进行constant初始化\n torch.nn.init.kaiming_normal_(self.bfc2.weight.data) # 何凯明初始化\n if self.bfc2.bias is not None:\n torch.nn.init.constant_(self.bfc2.bias.data, val=0) # fc层的bias进行constant初始化\n torch.nn.init.kaiming_normal_(self.bfc3.weight.data) # 何凯明初始化\n if self.bfc3.bias is not None:\n torch.nn.init.constant_(self.bfc3.bias.data, val=0) # fc层的bias进行constant初始化\n\n # self.CBP1 = CompactBilinearPooling(512, 512, 50000)\n # self.CBP2 = CompactBilinearPooling(512, 512, 50000)\n # self.CBP3 = CompactBilinearPooling(512, 512, 50000)",
"def model(pretrained=False, **kwargs):\n model = VGG(make_layers(cfg['D1'], dilation=dilation['D1']), **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['vgg16']))\n return model",
"def __init__(self, num_classes):\n super(VGG16, self).__init__()\n self.vgg16_feature_extractor = VGG16FeatureExtraction(weights_update=True)\n self.max_pool = nn.MaxPool2d(kernel_size=2, stride=2)\n self.classifier = VGG16Classfier()\n self.fc3 = _fc(in_channels=4096, out_channels=num_classes)"
] |
[
"0.6655622",
"0.6650276",
"0.664758",
"0.66025937",
"0.65362394",
"0.64770895",
"0.6441213",
"0.6392127",
"0.6389471",
"0.6289369",
"0.62637967",
"0.62584615",
"0.62367207",
"0.62322664",
"0.6110412",
"0.6110049",
"0.6054402",
"0.60233474",
"0.60222334",
"0.6008342",
"0.60014254",
"0.59955025",
"0.59953684",
"0.5973236",
"0.5964242",
"0.595873",
"0.5955753",
"0.5948627",
"0.59477895",
"0.5947363"
] |
0.69133383
|
0
|
Looks ahead one token. If the next token matches one of the given ones, returns true and advances the head pointer.
|
def _match(self, *token_types):
for token in token_types:
if self._check(token):
self._advance()
return True
return False
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def match(self, token):\n try:\n if token == 'S' and is_symbol(self.the_input[self.index]) \\\n or self.the_input[self.index] == token:\n self.index += 1\n return True\n except IndexError:\n print 'Error on checking \\'' + token + \\\n '\\': the next token is empty'\n exit(1)\n print 'No' # there is improper grammar\n exit(1)",
"def IsFirstRequire(self, token):\n return self._require_tokens and token == self._require_tokens[0]",
"def check_token(self, *args) -> bool:\n if len(args) == 1:\n if isinstance(args[0], str):\n return self.token_name == args[0]\n elif isinstance(args[0], _Enum):\n return self.token_name == args[0].name\n elif isinstance(args[0], _Sequence):\n return self.token_name in args[0]\n raise TypeError(\"_check_token() taking 1 argument, type: str, Enum or Sequence\")",
"def containsToken(self, token):\n if token.sentence != self.tokens[0].sentence:\n return False # not in same sentence\n \n return self.tokens[0].index <= token.index and token.index <= self.tokens[-1].index",
"def has_next():",
"def match_any(self, literals):\n for (index, literal) in enumerate(literals):\n if self.read(len(literal)) == literal:\n self.pos += len(literal)\n return index\n\n raise TokenError(\"Expected one of: {}.\".format(\", \".join(literals)))",
"def has_next():\n\n return True",
"def match(self, token):\n\n if self.la == token:\n self.la, self.val = self.next_token()\n else:\n raise ParseError(\"found {} instead of {}\".format(self.la, token))",
"def IsFirstProvide(self, token):\n return self._provide_tokens and token == self._provide_tokens[0]",
"def peek_for_token(self, ch, check_tok, yes_tok, no_tok):\n if self.peek_char() == check_tok:\n first = ch\n self.read_char()\n literal = first + self.char\n return Token(yes_tok, first + self.char)\n else:\n return Token(no_tok, ch)",
"def startsWith(self, prefix: str) -> bool:\n node = self.head\n for c in prefix:\n if c not in node.next:\n return False\n node = node.next[c]\n return True",
"def has_next(self):\n try:\n self.next()\n return True\n except (ParseException, struct.error):\n return False",
"def has_next(self) -> bool:\n return self.peek() != self.sentinel",
"def check_for_token(token):\n try:\n decode_token(token)\n return True\n except:\n return False",
"def match(cls, characters: CharacterStream) -> Optional[\"Token\"]:\n return",
"def startsWith(self, prefix):\n if prefix[0] not in self.trie:\n return False\n cur = self.trie[prefix[0]]\n for char in prefix[1:]:\n if char not in cur.nexts:\n return False\n cur = cur.nexts[char]\n return True",
"def isPrefixOp(tokens):\n stop = SwiftSupport.getLastOpTokenIndex(tokens)\n if stop == -1:\n return False\n start = tokens.index\n prevToken = tokens.get(start - 1)\n nextToken = tokens.get(stop + 1)\n prevIsWS = SwiftSupport.isLeftOperatorWS(prevToken)\n nextIsWS = SwiftSupport.isRightOperatorWS(nextToken)\n result = prevIsWS and not nextIsWS\n return result",
"def is_next_to(index, alist):\n if not index == 0 and alist[index - 1] == alist[index]:\n return True\n elif not index >= len(alist) - 1 and alist[index + 1] == alist[index]:\n return True\n else:\n return False",
"def contains(s, v):\n head = s\n while not empty(head):\n if head.first == v:\n return True\n head = head.rest\n return False",
"def atHead(self):\n return self.cursor == self.head",
"def __contains__(self, token: Hashable) -> bool:\n return token in self._token_to_idx",
"def search(self, x):\n temp = self.head\n while temp:\n if temp.data == x:\n return True\n temp = temp.next\n return False",
"def next_token(self, context, token):",
"def next(self):\r\n\t\tself.index += 1\r\n\t\treturn not self.eof()",
"def search(self, word: str) -> bool:\n node = self.head\n for c in word:\n if c not in node.next:\n return False\n node = node.next[c]\n return node.valid",
"def has_next(self):\n regf = self.first_hbin().parent()\n if regf.hbins_size() + regf.first_hbin_offset() == self._offset_next_hbin:\n return False\n\n try:\n self.next()\n return True\n except (ParseException, struct.error):\n return False",
"def next_token(self):\n p = self.re_token.search(self.remain)\n if not p:\n return None\n # move forward.\n s = p.start()\n self.buffer.append(self.remain[:s].encode(string_escape))\n self.cur += s + len(p.group())\n\n return p",
"def has_next(self):\n # type: () -> bool\n return len(self.buffer) > 0",
"def next(self):\n if not self.tokens:\n return None\n else:\n return self.tokens[0]",
"def match(self, literal):\n if self.read(len(literal)) == literal:\n self.pos += len(literal)\n else:\n raise TokenError(\"Expected {}.\".format(literal))"
] |
[
"0.64136237",
"0.585626",
"0.58293134",
"0.5813293",
"0.5748833",
"0.57118",
"0.56403",
"0.561034",
"0.56024075",
"0.55615383",
"0.54867095",
"0.54227394",
"0.5391972",
"0.5381704",
"0.5378281",
"0.5342942",
"0.5334457",
"0.5324501",
"0.5290606",
"0.5278128",
"0.5262168",
"0.5230846",
"0.5228388",
"0.52049583",
"0.5201311",
"0.5199518",
"0.5198854",
"0.51902544",
"0.5188693",
"0.51560074"
] |
0.62295413
|
1
|
Checks the next token for the given token type.
|
def _check(self, token_type):
if self._is_at_end():
return False
return self._peek().token_type == token_type
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def eat(self, token_type):\n if self.current_token.type == token_type:\n self.current_token = self.lexer.get_next_token()\n # print(self.current_token)\n else:\n self.error()",
"def _assert_token_type(self, token, expected_type):\n if token and token.type == expected_type:\n return\n # Skip whitespace to make the error message more useful.\n pos = self._skip_whitespace()\n raise CppParsingError(expected_type, self.body, pos, self.file_path,\n self.line_number)",
"def match_type(self, token_type):\n if isinstance(self.cursor(), token_type):\n token = self.cursor()\n self.pos += 1\n else:\n raise ParseError(\"Expected {!s}.\".format(token_type))\n return token",
"def maybe_advance(self, expected_type):\n token = self._get_token()\n if token and token.type == expected_type:\n self.pos = token.pos\n return token.value\n return None",
"def _match(self, *token_types):\n for token in token_types:\n if self._check(token):\n self._advance()\n return True\n\n return False",
"def _consume(self, token_type, msg):\n if self._check(token_type):\n return self._advance()\n\n raise self._error(self._peek(), msg)",
"def here(self, type):\n # Get the token ahead of the current index.\n possibleIndexEosToken = self.getCurrentToken().tokenIndex - 1\n ahead = self._input.get(possibleIndexEosToken)\n\n # Check if the token resides on the HIDDEN channel and if it is of the\n # provided type.\n return (ahead.channel == Lexer.HIDDEN) and (ahead.type == type)",
"def next_token(self, context, token):",
"def consume_if(self, tok_type: str) -> Token:\n curr = self.current\n if curr.tok_type != tok_type:\n raise ExprSyntaxError\n self.pos += 1\n return curr",
"def next_token(self, context: PluginScanContext, token: MarkdownToken) -> None:\n if token.is_atx_heading:\n atx_token = cast(AtxHeadingMarkdownToken, token)\n self.__handle_atx_heading(context, atx_token)\n elif token.is_setext_heading:\n setext_token = cast(SetextHeadingMarkdownToken, token)\n self.__handle_setext_heading(setext_token)\n elif token.is_text:\n text_token = cast(TextMarkdownToken, token)\n self.__handle_text(text_token)\n elif token.is_setext_heading_end:\n end_token = cast(EndMarkdownToken, token)\n self.__handle_setext_heading_end(context, end_token)",
"def token_assert(obj, type_):\n if not isinstance(obj, type_):\n raise TokenizeError(\"Tokenizer Error: Expected {}, got {}\".format(type_, type(obj)))",
"def check_for_token(token):\n try:\n decode_token(token)\n return True\n except:\n return False",
"def advance(self, expected_type):\n token = self._get_token()\n self._assert_token_type(token, expected_type)\n self.pos = token.pos\n return token.value",
"def next_token(self, tok, include_extra=False):\n # type: (Token, bool) -> Token\n i = tok.index + 1\n if not include_extra:\n while is_non_coding_token(self._tokens[i].type):\n i += 1\n return self._tokens[i]",
"def find_token(self, start_token, tok_type, tok_str=None, reverse=False):\n # type: (Token, int, Optional[str], bool) -> Token\n t = start_token\n advance = self.prev_token if reverse else self.next_token\n while not match_token(t, tok_type, tok_str) and not token.ISEOF(t.type):\n t = advance(t, include_extra=True)\n return t",
"def CheckToken(self, token, state):\n # Store some convenience variables\n first_in_line = token.IsFirstInLine()\n last_in_line = token.IsLastInLine()\n last_non_space_token = state.GetLastNonSpaceToken()\n\n type = token.type\n\n # Process the line change.\n if not self._is_html and FLAGS.strict:\n # TODO(robbyw): Support checking indentation in HTML files.\n indentation_errors = self._indentation.CheckToken(token, state)\n for indentation_error in indentation_errors:\n self._HandleError(*indentation_error)\n\n if last_in_line:\n self._CheckLineLength(token, state)\n\n if type == Type.PARAMETERS:\n # Find missing spaces in parameter lists.\n if self.MISSING_PARAMETER_SPACE.search(token.string):\n self._HandleError(errors.MISSING_SPACE, 'Missing space after \",\"',\n token)\n\n # Find extra spaces at the beginning of parameter lists. Make sure\n # we aren't at the beginning of a continuing multi-line list.\n if not first_in_line:\n space_count = len(token.string) - len(token.string.lstrip())\n if space_count:\n self._HandleError(errors.EXTRA_SPACE, 'Extra space after \"(\"',\n token, Position(0, space_count))\n\n elif (type == Type.START_BLOCK and\n token.metadata.context.type == Context.BLOCK):\n self._CheckForMissingSpaceBeforeToken(token)\n\n elif type == Type.END_BLOCK:\n # This check is for object literal end block tokens, but there is no need\n # to test that condition since a comma at the end of any other kind of\n # block is undoubtedly a parse error.\n last_code = token.metadata.last_code\n if last_code.IsOperator(','):\n self._HandleError(errors.COMMA_AT_END_OF_LITERAL,\n 'Illegal comma at end of object literal', last_code,\n Position.All(last_code.string))\n\n if state.InFunction() and state.IsFunctionClose():\n is_immediately_called = (token.next and\n token.next.type == Type.START_PAREN)\n if state.InTopLevelFunction():\n # When the function was top-level and not immediately called, check\n # that it's terminated by a semi-colon.\n if state.InAssignedFunction():\n if not is_immediately_called and (last_in_line or\n not token.next.type == Type.SEMICOLON):\n self._HandleError(errors.MISSING_SEMICOLON_AFTER_FUNCTION,\n 'Missing semicolon after function assigned to a variable',\n token, Position.AtEnd(token.string))\n else:\n if not last_in_line and token.next.type == Type.SEMICOLON:\n self._HandleError(errors.ILLEGAL_SEMICOLON_AFTER_FUNCTION,\n 'Illegal semicolon after function declaration',\n token.next, Position.All(token.next.string))\n\n if (state.InInterfaceMethod() and last_code.type != Type.START_BLOCK):\n self._HandleError(errors.INTERFACE_METHOD_CANNOT_HAVE_CODE,\n 'Interface methods cannot contain code', last_code)\n\n elif (state.IsBlockClose() and\n token.next and token.next.type == Type.SEMICOLON):\n self._HandleError(errors.REDUNDANT_SEMICOLON,\n 'No semicolon is required to end a code block',\n token.next, Position.All(token.next.string))\n\n elif type == Type.SEMICOLON:\n if token.previous and token.previous.type == Type.WHITESPACE:\n self._HandleError(errors.EXTRA_SPACE, 'Extra space before \";\"',\n token.previous, Position.All(token.previous.string))\n\n if token.next and token.next.line_number == token.line_number:\n if token.metadata.context.type != Context.FOR_GROUP_BLOCK:\n # TODO(robbyw): Error about no multi-statement lines.\n pass\n\n elif token.next.type not in (\n Type.WHITESPACE, Type.SEMICOLON, Type.END_PAREN):\n self._HandleError(errors.MISSING_SPACE,\n 'Missing space after \";\" in for statement',\n token.next,\n Position.AtBeginning())\n\n last_code = token.metadata.last_code\n if last_code and last_code.type == Type.SEMICOLON:\n # Allow a single double semi colon in for loops for cases like:\n # for (;;) { }.\n # NOTE(user): This is not a perfect check, and will not throw an error\n # for cases like: for (var i = 0;; i < n; i++) {}, but then your code\n # probably won't work either.\n for_token = tokenutil.CustomSearch(last_code,\n lambda token: token.type == Type.KEYWORD and token.string == 'for',\n end_func=lambda token: token.type == Type.SEMICOLON,\n distance=None,\n reverse=True)\n\n if not for_token:\n self._HandleError(errors.REDUNDANT_SEMICOLON, 'Redundant semicolon',\n token, Position.All(token.string))\n\n elif type == Type.START_PAREN:\n if token.previous and token.previous.type == Type.KEYWORD:\n self._HandleError(errors.MISSING_SPACE, 'Missing space before \"(\"',\n token, Position.AtBeginning())\n elif token.previous and token.previous.type == Type.WHITESPACE:\n before_space = token.previous.previous\n if (before_space and before_space.line_number == token.line_number and\n before_space.type == Type.IDENTIFIER):\n self._HandleError(errors.EXTRA_SPACE, 'Extra space before \"(\"',\n token.previous, Position.All(token.previous.string))\n\n elif type == Type.START_BRACKET:\n if (not first_in_line and token.previous.type == Type.WHITESPACE and\n last_non_space_token and\n last_non_space_token.type in Type.EXPRESSION_ENDER_TYPES):\n self._HandleError(errors.EXTRA_SPACE, 'Extra space before \"[\"',\n token.previous, Position.All(token.previous.string))\n # If the [ token is the first token in a line we shouldn't complain\n # about a missing space before [. This is because some Ecma script\n # languages allow syntax like:\n # [Annotation]\n # class MyClass {...}\n # So we don't want to blindly warn about missing spaces before [.\n # In the the future, when rules for computing exactly how many spaces\n # lines should be indented are added, then we can return errors for\n # [ tokens that are improperly indented.\n # For example:\n # var someVeryVeryVeryVeryVeryVeryVeryVeryVeryVeryVeryLongVariableName =\n # [a,b,c];\n # should trigger a proper indentation warning message as [ is not indented\n # by four spaces.\n elif (not first_in_line and token.previous and\n not token.previous.type in (\n [Type.WHITESPACE, Type.START_PAREN, Type.START_BRACKET] +\n Type.EXPRESSION_ENDER_TYPES)):\n self._HandleError(errors.MISSING_SPACE, 'Missing space before \"[\"',\n token, Position.AtBeginning())\n\n elif type in (Type.END_PAREN, Type.END_BRACKET):\n # Ensure there is no space before closing parentheses, except when\n # it's in a for statement with an omitted section, or when it's at the\n # beginning of a line.\n if (token.previous and token.previous.type == Type.WHITESPACE and\n not token.previous.IsFirstInLine() and\n not (last_non_space_token and last_non_space_token.line_number ==\n token.line_number and\n last_non_space_token.type == Type.SEMICOLON)):\n self._HandleError(errors.EXTRA_SPACE, 'Extra space before \"%s\"' %\n token.string, token.previous, Position.All(token.previous.string))\n\n if token.type == Type.END_BRACKET:\n last_code = token.metadata.last_code\n if last_code.IsOperator(','):\n self._HandleError(errors.COMMA_AT_END_OF_LITERAL,\n 'Illegal comma at end of array literal', last_code,\n Position.All(last_code.string))\n\n elif type == Type.WHITESPACE:\n if self.ILLEGAL_TAB.search(token.string):\n if token.IsFirstInLine():\n self._HandleError(errors.ILLEGAL_TAB,\n 'Illegal tab in whitespace before \"%s\"' % token.next.string,\n token, Position.All(token.string))\n else:\n self._HandleError(errors.ILLEGAL_TAB,\n 'Illegal tab in whitespace after \"%s\"' % token.previous.string,\n token, Position.All(token.string))\n\n # Check whitespace length if it's not the first token of the line and\n # if it's not immediately before a comment.\n if last_in_line:\n # Check for extra whitespace at the end of a line.\n self._HandleError(errors.EXTRA_SPACE, 'Extra space at end of line',\n token, Position.All(token.string))\n elif not first_in_line and not token.next.IsComment():\n if token.length > 1:\n self._HandleError(errors.EXTRA_SPACE, 'Extra space after \"%s\"' %\n token.previous.string, token,\n Position(1, len(token.string) - 1))\n\n elif type == Type.OPERATOR:\n last_code = token.metadata.last_code\n\n if not self._ExpectSpaceBeforeOperator(token):\n if (token.previous and token.previous.type == Type.WHITESPACE and\n last_code and last_code.type in (Type.NORMAL, Type.IDENTIFIER)):\n self._HandleError(errors.EXTRA_SPACE,\n 'Extra space before \"%s\"' % token.string, token.previous,\n Position.All(token.previous.string))\n\n elif (token.previous and\n not token.previous.IsComment() and\n token.previous.type in Type.EXPRESSION_ENDER_TYPES):\n self._HandleError(errors.MISSING_SPACE,\n 'Missing space before \"%s\"' % token.string, token,\n Position.AtBeginning())\n\n # Check that binary operators are not used to start lines.\n if ((not last_code or last_code.line_number != token.line_number) and\n not token.metadata.IsUnaryOperator()):\n self._HandleError(errors.LINE_STARTS_WITH_OPERATOR,\n 'Binary operator should go on previous line \"%s\"' % token.string,\n token)\n\n elif type == Type.DOC_FLAG:\n flag = token.attached_object\n\n if flag.flag_type == 'bug':\n # TODO(robbyw): Check for exactly 1 space on the left.\n string = token.next.string.lstrip()\n string = string.split(' ', 1)[0]\n\n if not string.isdigit():\n self._HandleError(errors.NO_BUG_NUMBER_AFTER_BUG_TAG,\n '@bug should be followed by a bug number', token)\n\n elif flag.flag_type == 'suppress':\n if flag.type is None:\n # A syntactically invalid suppress tag will get tokenized as a normal\n # flag, indicating an error.\n self._HandleError(errors.INCORRECT_SUPPRESS_SYNTAX,\n 'Invalid suppress syntax: should be @suppress {errortype}. '\n 'Spaces matter.', token)\n elif flag.type not in state.GetDocFlag().SUPPRESS_TYPES:\n self._HandleError(errors.INVALID_SUPPRESS_TYPE,\n 'Invalid suppression type: %s' % flag.type,\n token)\n\n elif FLAGS.strict and flag.flag_type == 'author':\n # TODO(user): In non strict mode check the author tag for as much as\n # it exists, though the full form checked below isn't required.\n string = token.next.string\n result = self.AUTHOR_SPEC.match(string)\n if not result:\n self._HandleError(errors.INVALID_AUTHOR_TAG_DESCRIPTION,\n 'Author tag line should be of the form: '\n '@author [email protected] (Your Name)',\n token.next)\n else:\n # Check spacing between email address and name. Do this before\n # checking earlier spacing so positions are easier to calculate for\n # autofixing.\n num_spaces = len(result.group(2))\n if num_spaces < 1:\n self._HandleError(errors.MISSING_SPACE,\n 'Missing space after email address',\n token.next, Position(result.start(2), 0))\n elif num_spaces > 1:\n self._HandleError(errors.EXTRA_SPACE,\n 'Extra space after email address',\n token.next,\n Position(result.start(2) + 1, num_spaces - 1))\n\n # Check for extra spaces before email address. Can't be too few, if\n # not at least one we wouldn't match @author tag.\n num_spaces = len(result.group(1))\n if num_spaces > 1:\n self._HandleError(errors.EXTRA_SPACE,\n 'Extra space before email address',\n token.next, Position(1, num_spaces - 1))\n\n elif (flag.flag_type in state.GetDocFlag().HAS_DESCRIPTION and\n not self._limited_doc_checks):\n if flag.flag_type == 'param':\n if flag.name is None:\n self._HandleError(errors.MISSING_JSDOC_PARAM_NAME,\n 'Missing name in @param tag', token)\n\n if not flag.description or flag.description is None:\n flag_name = token.type\n if 'name' in token.values:\n flag_name = '@' + token.values['name']\n self._HandleError(errors.MISSING_JSDOC_TAG_DESCRIPTION,\n 'Missing description in %s tag' % flag_name, token)\n else:\n self._CheckForMissingSpaceBeforeToken(flag.description_start_token)\n\n # We want punctuation to be inside of any tags ending a description,\n # so strip tags before checking description. See bug 1127192. Note\n # that depending on how lines break, the real description end token\n # may consist only of stripped html and the effective end token can\n # be different.\n end_token = flag.description_end_token\n end_string = htmlutil.StripTags(end_token.string).strip()\n while (end_string == '' and not\n end_token.type in Type.FLAG_ENDING_TYPES):\n end_token = end_token.previous\n if end_token.type in Type.FLAG_DESCRIPTION_TYPES:\n end_string = htmlutil.StripTags(end_token.string).rstrip()\n\n if not (end_string.endswith('.') or end_string.endswith('?') or\n end_string.endswith('!')):\n # Find the position for the missing punctuation, inside of any html\n # tags.\n desc_str = end_token.string.rstrip()\n while desc_str.endswith('>'):\n start_tag_index = desc_str.rfind('<')\n if start_tag_index < 0:\n break\n desc_str = desc_str[:start_tag_index].rstrip()\n end_position = Position(len(desc_str), 0)\n\n self._HandleError(\n errors.JSDOC_TAG_DESCRIPTION_ENDS_WITH_INVALID_CHARACTER,\n ('%s descriptions must end with valid punctuation such as a '\n 'period.' % token.string),\n end_token, end_position)\n\n if flag.flag_type in state.GetDocFlag().HAS_TYPE:\n if flag.type_start_token is not None:\n self._CheckForMissingSpaceBeforeToken(\n token.attached_object.type_start_token)\n\n if flag.type and flag.type != '' and not flag.type.isspace():\n self._CheckJsDocType(token)\n\n if type in (Type.DOC_FLAG, Type.DOC_INLINE_FLAG):\n if (token.values['name'] not in state.GetDocFlag().LEGAL_DOC and\n token.values['name'] not in FLAGS.custom_jsdoc_tags):\n self._HandleError(errors.INVALID_JSDOC_TAG,\n 'Invalid JsDoc tag: %s' % token.values['name'], token)\n\n if (FLAGS.strict and token.values['name'] == 'inheritDoc' and\n type == Type.DOC_INLINE_FLAG):\n self._HandleError(errors.UNNECESSARY_BRACES_AROUND_INHERIT_DOC,\n 'Unnecessary braces around @inheritDoc',\n token)\n\n elif type == Type.SIMPLE_LVALUE:\n identifier = token.values['identifier']\n\n if ((not state.InFunction() or state.InConstructor()) and\n not state.InParentheses() and not state.InObjectLiteralDescendant()):\n jsdoc = state.GetDocComment()\n if not state.HasDocComment(identifier):\n # Only test for documentation on identifiers with .s in them to\n # avoid checking things like simple variables. We don't require\n # documenting assignments to .prototype itself (bug 1880803).\n if (not state.InConstructor() and\n identifier.find('.') != -1 and not\n identifier.endswith('.prototype') and not\n self._limited_doc_checks):\n comment = state.GetLastComment()\n if not (comment and comment.lower().count('jsdoc inherited')):\n self._HandleError(errors.MISSING_MEMBER_DOCUMENTATION,\n \"No docs found for member '%s'\" % identifier,\n token);\n elif jsdoc and (not state.InConstructor() or\n identifier.startswith('this.')):\n # We are at the top level and the function/member is documented.\n if identifier.endswith('_') and not identifier.endswith('__'):\n if jsdoc.HasFlag('override'):\n self._HandleError(errors.INVALID_OVERRIDE_PRIVATE,\n '%s should not override a private member.' % identifier,\n jsdoc.GetFlag('override').flag_token)\n # Can have a private class which inherits documentation from a\n # public superclass.\n if jsdoc.HasFlag('inheritDoc') and not jsdoc.HasFlag('constructor'):\n self._HandleError(errors.INVALID_INHERIT_DOC_PRIVATE,\n '%s should not inherit from a private member.' % identifier,\n jsdoc.GetFlag('inheritDoc').flag_token)\n if (not jsdoc.HasFlag('private') and\n not ('underscore' in jsdoc.suppressions)):\n self._HandleError(errors.MISSING_PRIVATE,\n 'Member \"%s\" must have @private JsDoc.' %\n identifier, token)\n if jsdoc.HasFlag('private') and 'underscore' in jsdoc.suppressions:\n self._HandleError(errors.UNNECESSARY_SUPPRESS,\n '@suppress {underscore} is not necessary with @private',\n jsdoc.suppressions['underscore'])\n elif jsdoc.HasFlag('private'):\n self._HandleError(errors.EXTRA_PRIVATE,\n 'Member \"%s\" must not have @private JsDoc' %\n identifier, token)\n\n if ((jsdoc.HasFlag('desc') or jsdoc.HasFlag('hidden'))\n and not identifier.startswith('MSG_')\n and identifier.find('.MSG_') == -1):\n # TODO(user): Update error message to show the actual invalid\n # tag, either @desc or @hidden.\n self._HandleError(errors.INVALID_USE_OF_DESC_TAG,\n 'Member \"%s\" should not have @desc JsDoc' % identifier,\n token)\n\n # Check for illegaly assigning live objects as prototype property values.\n index = identifier.find('.prototype.')\n # Ignore anything with additional .s after the prototype.\n if index != -1 and identifier.find('.', index + 11) == -1:\n equal_operator = tokenutil.SearchExcept(token, Type.NON_CODE_TYPES)\n next_code = tokenutil.SearchExcept(equal_operator, Type.NON_CODE_TYPES)\n if next_code and (\n next_code.type in (Type.START_BRACKET, Type.START_BLOCK) or\n next_code.IsOperator('new')):\n self._HandleError(errors.ILLEGAL_PROTOTYPE_MEMBER_VALUE,\n 'Member %s cannot have a non-primitive value' % identifier,\n token)\n\n elif type == Type.END_PARAMETERS:\n # Find extra space at the end of parameter lists. We check the token\n # prior to the current one when it is a closing paren.\n if (token.previous and token.previous.type == Type.PARAMETERS\n and self.ENDS_WITH_SPACE.search(token.previous.string)):\n self._HandleError(errors.EXTRA_SPACE, 'Extra space before \")\"',\n token.previous)\n\n jsdoc = state.GetDocComment()\n if state.GetFunction().is_interface:\n if token.previous and token.previous.type == Type.PARAMETERS:\n self._HandleError(errors.INTERFACE_CONSTRUCTOR_CANNOT_HAVE_PARAMS,\n 'Interface constructor cannot have parameters',\n token.previous)\n elif (state.InTopLevel() and jsdoc and not jsdoc.HasFlag('see')\n and not jsdoc.InheritsDocumentation()\n and not state.InObjectLiteralDescendant() and not\n jsdoc.IsInvalidated()):\n distance, edit = jsdoc.CompareParameters(state.GetParams())\n if distance:\n params_iter = iter(state.GetParams())\n docs_iter = iter(jsdoc.ordered_params)\n\n for op in edit:\n if op == 'I':\n # Insertion.\n # Parsing doc comments is the same for all languages\n # but some languages care about parameters that don't have\n # doc comments and some languages don't care.\n # Languages that don't allow variables to by typed such as\n # JavaScript care but languages such as ActionScript or Java\n # that allow variables to be typed don't care.\n if not self._limited_doc_checks:\n self.HandleMissingParameterDoc(token, params_iter.next())\n\n elif op == 'D':\n # Deletion\n self._HandleError(errors.EXTRA_PARAMETER_DOCUMENTATION,\n 'Found docs for non-existing parameter: \"%s\"' %\n docs_iter.next(), token)\n elif op == 'S':\n # Substitution\n if not self._limited_doc_checks:\n self._HandleError(errors.WRONG_PARAMETER_DOCUMENTATION,\n 'Parameter mismatch: got \"%s\", expected \"%s\"' %\n (params_iter.next(), docs_iter.next()), token)\n\n else:\n # Equality - just advance the iterators\n params_iter.next()\n docs_iter.next()\n\n elif type == Type.STRING_TEXT:\n # If this is the first token after the start of the string, but it's at\n # the end of a line, we know we have a multi-line string.\n if token.previous.type in (Type.SINGLE_QUOTE_STRING_START,\n Type.DOUBLE_QUOTE_STRING_START) and last_in_line:\n self._HandleError(errors.MULTI_LINE_STRING,\n 'Multi-line strings are not allowed', token)\n\n\n # This check is orthogonal to the ones above, and repeats some types, so\n # it is a plain if and not an elif.\n if token.type in Type.COMMENT_TYPES:\n if self.ILLEGAL_TAB.search(token.string):\n self._HandleError(errors.ILLEGAL_TAB,\n 'Illegal tab in comment \"%s\"' % token.string, token)\n\n trimmed = token.string.rstrip()\n if last_in_line and token.string != trimmed:\n # Check for extra whitespace at the end of a line.\n self._HandleError(errors.EXTRA_SPACE, 'Extra space at end of line',\n token, Position(len(trimmed), len(token.string) - len(trimmed)))\n\n # This check is also orthogonal since it is based on metadata.\n if token.metadata.is_implied_semicolon:\n self._HandleError(errors.MISSING_SEMICOLON,\n 'Missing semicolon at end of line', token)",
"def _get_token(self):\n self._skip()\n\n token = None\n # Checks single-quoted string.\n if self.current_char == \"'\":\n start_position = self.current_position\n while not (self.current_char != \"\\\\\" and self._peek() == \"'\"):\n self._next_char()\n if self.EOF:\n raise LexerError(\n start_position, f\"EOL while scanning string literal at position {start_position}\")\n self._next_char()\n token = Token(self.line_number, self.line_start_position, start_position, self.current_position,\n _token_names.STRING, self.stream[start_position:self.current_position + 1])\n\n # Checks double-quoted string.\n elif self.current_char == '\"':\n start_position = self.current_position\n while not (self.current_char != \"\\\\\" and self._peek() == '\"'):\n self._next_char()\n if self.EOF:\n raise LexerError(\n start_position, f\"EOL while scanning string literal at position {start_position}\")\n self._next_char()\n token = Token(self.line_number, self.line_start_position, start_position, self.current_position,\n _token_names.STRING, self.stream[start_position:self.current_position + 1])\n\n # Checks number begins with a digit.\n elif self.current_char.isdigit():\n start_position = self.current_position\n while self._peek().isdigit():\n self._next_char()\n if self._peek() == \".\":\n self._next_char()\n while self._peek().isdigit():\n self._next_char()\n if self._peek() in [\"d\", \"D\", \"f\", \"F\"]:\n self._next_char()\n token = Token(self.line_number, self.line_start_position, start_position, self.current_position,\n _token_names.NUMBER, self.stream[start_position:self.current_position + 1])\n\n # Checks number begins with a dot.\n elif self.current_char == \".\":\n if self._peek().isdigit():\n start_position = self.current_position\n while self._peek().isdigit():\n self._next_char()\n if self._peek() in [\"d\", \"D\", \"f\", \"F\"]:\n self._next_char()\n token = Token(self.line_number, self.line_start_position, start_position, self.current_position,\n _token_names.NUMBER, self.stream[start_position:self.current_position + 1])\n else:\n token = Token(self.line_number, self.line_start_position, self.current_position, self.current_position,\n _token_names.Separators(self.current_char).name, self.current_char)\n\n # Checks word begins with an alphabetic letter or an underscore.\n elif self.current_char.isalpha() or self.current_char == \"_\":\n start_position = self.current_position\n while True:\n if (self._peek() in [\" \", \"\\t\", \"\\r\", \"\\n\", \"\\0\"]\n or self._peek() in _token_names.SEPARATORS\n or self._peek() in _token_names.OPERATORS):\n break\n self._next_char()\n word = self.stream[start_position:self.current_position + 1]\n # Checks if word is a keyword.\n if word in _token_names.Keywords.values():\n token = Token(self.line_number, self.line_start_position, start_position, self.current_position,\n _token_names.Keywords(word).name, word)\n elif word in _token_names.KeywordsType.values():\n token = Token(self.line_number, self.line_start_position, start_position, self.current_position,\n _token_names.KeywordsType(word).name, word)\n elif word in _token_names.KeywordsAttribute.values():\n token = Token(self.line_number, self.line_start_position, start_position, self.current_position,\n _token_names.KeywordsAttribute(word).name, word)\n # Otherwise put it as identifier.\n else:\n token = Token(self.line_number, self.line_start_position, start_position, self.current_position,\n _token_names.IDENTIFIER, word)\n\n # Checks if is a separator.\n elif self.current_char in _token_names.Separators.values():\n token = Token(self.line_number, self.line_start_position, self.current_position, self.current_position,\n _token_names.Separators(self.current_char).name, self.current_char)\n\n # Checks if is an operator.\n elif self.current_char in _token_names.Operators.values():\n last_position = self.current_position\n if self.current_char not in [\"&\", \"|\"] and self._peek() == \"=\":\n val = self.current_char + self._peek()\n self._next_char()\n token = Token(self.line_number, self.line_start_position, last_position, self.current_position,\n _token_names.Operators(val).name, val)\n elif self.current_char == \"+\" and self._peek() == \"+\":\n val = self.current_char + self._peek()\n self._next_char()\n token = Token(self.line_number, self.line_start_position, last_position, self.current_position,\n _token_names.Operators(val).name, val)\n elif self.current_char == \"-\" and self._peek() == \"-\":\n val = self.current_char + self._peek()\n self._next_char()\n token = Token(self.line_number, self.line_start_position, last_position, self.current_position,\n _token_names.Operators(val).name, val)\n elif self.current_char == \"&\" and self._peek() == \"&\":\n val = self.current_char + self._peek()\n self._next_char()\n token = Token(self.line_number, self.line_start_position, last_position, self.current_position,\n _token_names.Operators(val).name, val)\n elif self.current_char == \"|\" and self._peek() == \"|\":\n val = self.current_char + self._peek()\n self._next_char()\n token = Token(self.line_number, self.line_start_position, last_position, self.current_position,\n _token_names.Operators(val).name, val)\n else:\n token = Token(self.line_number, self.line_start_position, self.current_position, self.current_position,\n _token_names.Operators(self.current_char).name, self.current_char)\n\n # Checks if is EOF\n elif self.current_char == \"\\0\":\n token = Token(self.line_number, self.line_start_position, self.current_position, self.current_position,\n _token_names.EOF, self.current_char)\n\n # Raise error if is an unknown token.\n else:\n raise LexerError(self.current_position)\n\n self._next_char()\n return token",
"def match_value(self, token_type, token_value):\n if isinstance(self.cursor(), token_type) and self.cursor().token == token_value:\n token = self.cursor()\n self.pos += 1\n else:\n raise ParseError(\"Expected {!s}.\".format(token_value))\n return token",
"async def validate_token(self, token):",
"def check_token(self, *args) -> bool:\n if len(args) == 1:\n if isinstance(args[0], str):\n return self.token_name == args[0]\n elif isinstance(args[0], _Enum):\n return self.token_name == args[0].name\n elif isinstance(args[0], _Sequence):\n return self.token_name in args[0]\n raise TypeError(\"_check_token() taking 1 argument, type: str, Enum or Sequence\")",
"def get_next_token(self):\n\t\t\n\t\tif self.pos > len(self.text)-1:\n\t\t\treturn Token(EOF, None)\n\t\t\t\n\t\tcurrent_char = self.text[self.pos]\n\t\t\n\t\tif current_char.isdigit() or current_char.isalpha():",
"def match(self, token):\n try:\n if token == 'S' and is_symbol(self.the_input[self.index]) \\\n or self.the_input[self.index] == token:\n self.index += 1\n return True\n except IndexError:\n print 'Error on checking \\'' + token + \\\n '\\': the next token is empty'\n exit(1)\n print 'No' # there is improper grammar\n exit(1)",
"def next_token(self, context, token):\n if token.is_unordered_list_start:\n self.__handle_unordered_list_start(context, token)\n elif token.is_ordered_list_start:\n self.__handle_ordered_list_start(token)\n elif token.is_unordered_list_end or token.is_ordered_list_end:\n self.__handle_list_end(context, token)\n elif token.is_new_list_item:\n self.__handle_list_item(context, token)",
"def next_token(self, context: PluginScanContext, token: MarkdownToken) -> None:\n if token.is_atx_heading:\n atx_token = cast(AtxHeadingMarkdownToken, token)\n if not atx_token.remove_trailing_count:\n self.__atx_heading_token = token\n elif token.is_paragraph_end:\n self.__atx_heading_token = None\n elif token.is_text:\n text_token = cast(TextMarkdownToken, token)\n resolved_extracted_whitespace = ParserHelper.remove_all_from_text(\n text_token.extracted_whitespace\n )\n if self.__atx_heading_token and len(resolved_extracted_whitespace) > 1:\n self.report_next_token_error(context, self.__atx_heading_token)",
"def next_token(self) -> T.Optional[Token]:\n if self.has_finished():\n return None\n token_type = None\n token_chars = []\n if is_number_char(self.current):\n token_type = \"N\"\n while not self.has_finished() and is_number_char(self.current):\n token_chars.append(self.consume())\n elif is_char_token(self.current):\n if self.current in [\"(\", \")\"]:\n token_type = self.current\n elif self.current in [\"+\", \"-\"]:\n token_type = \"S\"\n elif self.current in [\"*\", \"/\"]:\n token_type = \"M\"\n else:\n raise ExprSyntaxError\n token_chars.append(self.consume())\n elif self.current.isspace():\n self.consume()\n return self.next_token()\n else:\n raise UnexpectedChar\n return Token(token_type, \"\".join(token_chars))",
"def type(self, tokens):\n if len(tokens) != 1:\n raise Exception(\"Unexpected argument counts\")\n return tokens[0].value",
"def token_type(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"token_type\")",
"def token_type(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"token_type\")",
"def _CheckJsDocType(self, token):\n flag = token.attached_object\n type = flag.type\n if type and type is not None and not type.isspace():\n pieces = self.TYPE_SPLIT.split(type)\n if len(pieces) == 1 and type.count('|') == 1 and (\n type.endswith('|null') or type.startswith('null|')):\n self._HandleError(errors.JSDOC_PREFER_QUESTION_TO_PIPE_NULL,\n 'Prefer \"?Type\" to \"Type|null\": \"%s\"' % type, token)\n\n for p in pieces:\n if p.count('|') and p.count('?'):\n # TODO(robbyw): We should do actual parsing of JsDoc types. As is,\n # this won't report an error for {number|Array.<string>?}, etc.\n self._HandleError(errors.JSDOC_ILLEGAL_QUESTION_WITH_PIPE,\n 'JsDoc types cannot contain both \"?\" and \"|\": \"%s\"' % p, token)\n\n if FLAGS.strict and (flag.type_start_token.type != Type.DOC_START_BRACE or\n flag.type_end_token.type != Type.DOC_END_BRACE):\n self._HandleError(errors.MISSING_BRACES_AROUND_TYPE,\n 'Type must always be surrounded by curly braces.', token)",
"def test_get_next_token():\n # Create a lexer instance with text and try _get_next_token\n assert lex._lexer(None, None)._load_text(\"test\")._get_next_token(\n [lex_bases.rule(\"TEST\", r\"test\")], []\n ) == lex_bases.token(\"TEST\", \"test\")"
] |
[
"0.7041934",
"0.686519",
"0.66744655",
"0.6639003",
"0.655341",
"0.64888304",
"0.64674366",
"0.64206403",
"0.6393062",
"0.6368189",
"0.6220035",
"0.61547",
"0.60059273",
"0.5962529",
"0.5935847",
"0.5935539",
"0.5929813",
"0.5926765",
"0.59179354",
"0.58905804",
"0.587732",
"0.5780358",
"0.5777853",
"0.57697016",
"0.57663727",
"0.5757975",
"0.5696087",
"0.5696087",
"0.5678276",
"0.56668967"
] |
0.7866306
|
0
|
Returns the previous token in the list.
|
def _previous(self):
return self.token_list[self._current - 1]
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def prev_token(self, tok, include_extra=False):\n # type: (Token, bool) -> Token\n i = tok.index - 1\n if not include_extra:\n while is_non_coding_token(self._tokens[i].type):\n i -= 1\n return self._tokens[i]",
"def get_previous(self):\n return self._next_previous_helper('previous')",
"def previous(self):\n return Reference(\":\".join(self.names[:-2]))",
"def previous(self):\n return self.my_previous",
"def get_prev(self):\n return self.prev",
"def get_previous(self):\n return self.previous",
"def getPrev(self):\n\t\t\treturn self.prev",
"def previous(self):\n if len(self.stack) == 1:\n return None\n self.stack.pop()\n return self.get_node()",
"def getPrevious(self):\n return self.__previous__",
"def get_previous_step(self):\n return self.get_step_by_index(-2)",
"def previous_symbol(self):\r\n if self.position == 0:\r\n return None\r\n return self.rule.rightside[self.position-1]",
"def get_previous_item(self):\n return self.ui.listItemList.model().get_previous(self.selected_item)",
"def previous(self):\n\n pass",
"def prev(self):\n return self.__prev",
"def previous(self):\n return _libsbml.SwigPyIterator_previous(self)",
"def get_prev(self, pos):\n if pos <= 0:\n return None, None\n return self._get_at(pos - 1)",
"def previous(self, type=None):\n i = self.index - 1\n s = self.sentence\n while i > 0:\n if type in (s[i].type, None):\n return s[i]\n i -= 1",
"def back(self):\n return self.sentinel.prev.item",
"def previous(self):\n posts_by_date = self.posts_by_date\n index = bisect.bisect_left(posts_by_date, self)\n if index == 0:\n return None\n return posts_by_date[index - 1]",
"def getPreviousElement(self,currentId):\n\tids = self.getObjectIds()\n\tpreviousId = None\n\tfor id in ids:\n\t if id == currentId:\n\t\treturn previousId\n\t else:\n\t\tpreviousId = id\n\treturn None",
"def previous(self):\n return self._call_player_proxy('Prev', None)",
"def previous():\n releases_list = releases()\n try:\n return releases_list[-2]\n except IndexError:\n return None",
"def previous(self, item) -> LinkedListNode:\n node = self.head\n while node is not self._nil:\n if node.next.item is item:\n return node\n node = node.next\n return node",
"def previous(self):\n return _osgAnimation.SwigPyIterator_previous(self)",
"def previous(self):\n if self.cursor.pref:\n self.cursor = self.cursor.pref\n return self.cursor\n return None",
"def get_previous_block(self):\r\n return self.chain[-1] # Return the previous block\r",
"def nav_prev_sibling(self):\r\n siblings = self.nav_siblings()\r\n prev_sibling = None\r\n for i, sibling in enumerate(siblings):\r\n if sibling == self and i > 0:\r\n prev_sibling = siblings[i-1]\r\n return prev_sibling",
"def previous_player(current_player, players):\n if len(players) == 1:\n return players[0]\n if current_player != players[0]:\n return players[players.index(current_player) - 1]\n return players[-1]",
"def previous_character(self) -> str:\n return self.seek(self.index - 1)",
"def previous(self) -> Optional[Chainable]:\n return None"
] |
[
"0.78973746",
"0.7702691",
"0.7477467",
"0.7417186",
"0.7413403",
"0.7353703",
"0.7339029",
"0.7330643",
"0.72991306",
"0.72103095",
"0.7198118",
"0.71271235",
"0.7043022",
"0.70284075",
"0.7019578",
"0.6980399",
"0.69696885",
"0.6966419",
"0.69032735",
"0.681985",
"0.68124574",
"0.67929536",
"0.67832214",
"0.6778744",
"0.6773626",
"0.67313516",
"0.6662417",
"0.66338784",
"0.6633343",
"0.66296965"
] |
0.89975834
|
0
|
Attempts to consume the next token if it is the given type.
|
def _consume(self, token_type, msg):
if self._check(token_type):
return self._advance()
raise self._error(self._peek(), msg)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def eat(self, token_type):\n if self.current_token.type == token_type:\n self.current_token = self.lexer.get_next_token()\n # print(self.current_token)\n else:\n self.error()",
"def _consume_type(self):\n try:\n self._consume(self.VARIABLE_TYPES)\n except CompilationEngineError:\n self._consume(TokenTypes.IDENTIFIER) # Class name",
"def maybe_advance(self, expected_type):\n token = self._get_token()\n if token and token.type == expected_type:\n self.pos = token.pos\n return token.value\n return None",
"def consume_if(self, tok_type: str) -> Token:\n curr = self.current\n if curr.tok_type != tok_type:\n raise ExprSyntaxError\n self.pos += 1\n return curr",
"def advance(self, expected_type):\n token = self._get_token()\n self._assert_token_type(token, expected_type)\n self.pos = token.pos\n return token.value",
"def match_type(self, token_type):\n if isinstance(self.cursor(), token_type):\n token = self.cursor()\n self.pos += 1\n else:\n raise ParseError(\"Expected {!s}.\".format(token_type))\n return token",
"def _check(self, token_type):\n if self._is_at_end():\n return False\n\n return self._peek().token_type == token_type",
"def consume(self):\n if self.next():\n self.tokens.pop(0)",
"def next_token(self, context: PluginScanContext, token: MarkdownToken) -> None:\n if token.is_atx_heading:\n atx_token = cast(AtxHeadingMarkdownToken, token)\n self.__handle_atx_heading(context, atx_token)\n elif token.is_setext_heading:\n setext_token = cast(SetextHeadingMarkdownToken, token)\n self.__handle_setext_heading(setext_token)\n elif token.is_text:\n text_token = cast(TextMarkdownToken, token)\n self.__handle_text(text_token)\n elif token.is_setext_heading_end:\n end_token = cast(EndMarkdownToken, token)\n self.__handle_setext_heading_end(context, end_token)",
"def next_token(self, context, token):",
"def next(self, type=None):\n i = self.index + 1\n s = self.sentence\n while i < len(s):\n if type in (s[i].type, None):\n return s[i]\n i += 1",
"def next_token(self) -> T.Optional[Token]:\n if self.has_finished():\n return None\n token_type = None\n token_chars = []\n if is_number_char(self.current):\n token_type = \"N\"\n while not self.has_finished() and is_number_char(self.current):\n token_chars.append(self.consume())\n elif is_char_token(self.current):\n if self.current in [\"(\", \")\"]:\n token_type = self.current\n elif self.current in [\"+\", \"-\"]:\n token_type = \"S\"\n elif self.current in [\"*\", \"/\"]:\n token_type = \"M\"\n else:\n raise ExprSyntaxError\n token_chars.append(self.consume())\n elif self.current.isspace():\n self.consume()\n return self.next_token()\n else:\n raise UnexpectedChar\n return Token(token_type, \"\".join(token_chars))",
"def next(self, type=None):\n i = self.stop\n s = self.sentence\n while i < len(s):\n if s[i].chunk is not None and type in (s[i].chunk.type, None):\n return s[i].chunk\n i += 1",
"def consume():\n depth = tokenizer.depth()\n for token in source:\n yield token\n if tokenizer.depth() < depth:\n return",
"def consume():\n depth = tokenizer.depth()\n for token in source:\n yield token\n if tokenizer.depth() < depth:\n return",
"def consume():\n depth = tokenizer.depth()\n for token in source:\n yield token\n if tokenizer.depth() < depth:\n return",
"def _consume(self, expected) -> None:\n raise TypeError(\"Unsupported type: \", type(expected))",
"def next_token(self, tok, include_extra=False):\n # type: (Token, bool) -> Token\n i = tok.index + 1\n if not include_extra:\n while is_non_coding_token(self._tokens[i].type):\n i += 1\n return self._tokens[i]",
"def here(self, type):\n # Get the token ahead of the current index.\n possibleIndexEosToken = self.getCurrentToken().tokenIndex - 1\n ahead = self._input.get(possibleIndexEosToken)\n\n # Check if the token resides on the HIDDEN channel and if it is of the\n # provided type.\n return (ahead.channel == Lexer.HIDDEN) and (ahead.type == type)",
"def _match(self, *token_types):\n for token in token_types:\n if self._check(token):\n self._advance()\n return True\n\n return False",
"def tryRE(regexp,kind,stream,tokens):\n M = regexp.match(stream.text)\n if M:\n block = M.group()\n if not isSkipType(kind):\n if kind == '_keyword':\n token = Token(block)\n else:\n token = Token(kind,attr=block)\n tokens.append(token)\n stream.consumeChars(len(block))\n return True\n else:\n return False",
"def next_token(self, context, token):\n if token.is_unordered_list_start:\n self.__handle_unordered_list_start(context, token)\n elif token.is_ordered_list_start:\n self.__handle_ordered_list_start(token)\n elif token.is_unordered_list_end or token.is_ordered_list_end:\n self.__handle_list_end(context, token)\n elif token.is_new_list_item:\n self.__handle_list_item(context, token)",
"def find_token(self, start_token, tok_type, tok_str=None, reverse=False):\n # type: (Token, int, Optional[str], bool) -> Token\n t = start_token\n advance = self.prev_token if reverse else self.next_token\n while not match_token(t, tok_type, tok_str) and not token.ISEOF(t.type):\n t = advance(t, include_extra=True)\n return t",
"def __next__(self):\n if self.gen is None:\n self.gen = self.token_generator()\n\n tok = next(self.gen)\n while tok.type in self.IGNORED_TOKENS:\n tok = next(self.gen)\n return tok",
"def token_assert(obj, type_):\n if not isinstance(obj, type_):\n raise TokenizeError(\"Tokenizer Error: Expected {}, got {}\".format(type_, type(obj)))",
"def peek_for_token(self, ch, check_tok, yes_tok, no_tok):\n if self.peek_char() == check_tok:\n first = ch\n self.read_char()\n literal = first + self.char\n return Token(yes_tok, first + self.char)\n else:\n return Token(no_tok, ch)",
"def type(self, tokens):\n if len(tokens) != 1:\n raise Exception(\"Unexpected argument counts\")\n return tokens[0].value",
"def next_token(self):\n p = self.re_token.search(self.remain)\n if not p:\n return None\n # move forward.\n s = p.start()\n self.buffer.append(self.remain[:s].encode(string_escape))\n self.cur += s + len(p.group())\n\n return p",
"def next(self):\n if not self.tokens:\n return None\n else:\n return self.tokens[0]",
"def gettok(self):\n try:\n self.next = next(self.tokens)\n except StopIteration:\n self.next = None"
] |
[
"0.7341707",
"0.67438674",
"0.66897255",
"0.65760523",
"0.64632964",
"0.62723833",
"0.6213298",
"0.6211683",
"0.6120097",
"0.6010382",
"0.58728313",
"0.58645236",
"0.58205235",
"0.57960516",
"0.57960516",
"0.57960516",
"0.56326324",
"0.55995536",
"0.55565697",
"0.551926",
"0.54777557",
"0.5477568",
"0.54372585",
"0.54366875",
"0.5422677",
"0.5386332",
"0.53349394",
"0.5299397",
"0.529355",
"0.52869684"
] |
0.78619826
|
0
|
Returns a ParseError and logs an error with the interpreter.
|
def _error(self, token, msg):
self._interpreter.parse_error(token, msg)
return ParseError()
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def _parse_error(self, err):\r\n self.logger.debug(err)\r\n stack = err.get(\"stack\", [])\r\n if not err[\"message\"].startswith(\"parse error:\"):\r\n err[\"message\"] = \"error: \" + err[\"message\"]\r\n errmsg = \"Octave evaluation error:\\n%s\" % err[\"message\"]\r\n\r\n if not isinstance(stack, StructArray):\r\n return errmsg\r\n\r\n errmsg += \"\\nerror: called from:\"\r\n for item in stack[:-1]:\r\n errmsg += \"\\n %(name)s at line %(line)d\" % item\r\n try: # noqa\r\n errmsg += \", column %(column)d\" % item\r\n except Exception: # noqa\r\n pass\r\n return errmsg",
"def error(self, message, token=None):\n raise ParseException(\n message,\n self.filename,\n line=self._line,\n line_number=self._line_number,\n token=token)",
"def test():\n\ttry:\n\t\tprint \"Raising ParseErrror.\"\n\t\traise ParseError\n\texcept ParseError:\n\t\tprint \"Caught ParseError.\"",
"def add_parse_error(parse_error):\n assert isinstance(parse_error, ParseError)\n ParseError.parse_errors.append(parse_error)\n message = unicode(parse_error)\n if parse_error.type == \"error\":\n ParseError.logger.error(message)\n import settings\n if not settings.settings[\"quiet\"]:\n print>>sys.stderr, message\n else:\n ParseError.logger.warning(message)",
"def _ast_node_or_parse_exception(self):\n # This attribute may also be set by __construct_from_annotated_ast(),\n # in which case this code does not run.\n try:\n return _parse_ast_nodes(\n self.text, self._input_flags, self._auto_flags, \"exec\")\n except Exception as e:\n # Add the filename to the exception message to be nicer.\n if self.text.filename:\n try:\n e = type(e)(\"While parsing %s: %s\" % (self.text.filename, e))\n except TypeError:\n # Exception takes more than one argument\n pass\n # Cache the exception to avoid re-attempting while debugging.\n return e",
"def parse_error(self, message, exc_cls=VisualizerParseError):\n raise exc_cls(\"Error parsing %s '%s' (%s:%i): %s\" % \n (self.tag, self.ref, self.filename, self.lineno, message))",
"def error(self, message):\r\n self._construct_partial_parser().error(message)",
"def syntaxError (self, s) :\r\n report = self.generateReport() + s\r\n raise Exception, report",
"def error(self, msg, elem):\n if elem is not None:\n msg += \" (line %d)\" % elem.sourceline\n if self.ignore_errors:\n return self.warn(msg, elem)\n raise ParserException(msg)",
"def _raise(self,\n message: str,\n token: Optional[Token] = None,\n verbose: bool = True) -> NoReturn:\n if not verbose:\n raise ParserException(message)\n\n if token is None:\n token = self.tokens.peek()\n\n if isinstance(token, EOF):\n message = f\"{message}, but reached end of file\"\n else:\n message = f\"{message}, but found {repr(str(token))} at {token.position!s}\"\n\n raise ParserException(message)",
"def _syntax_error(self, msg, thing):\n raise TempliteSyntaxError(f\"{msg}: {thing!r}\")",
"def test_parseMethodExceptionLogged(self):\n\n class UnhandledException(Exception):\n \"\"\"\n An unhandled exception.\n \"\"\"\n\n def raisesValueError(line):\n raise UnhandledException\n\n self.server.parseState = \"command\"\n self.server.parse_command = raisesValueError\n\n self.server.lineReceived(b\"invalid\")\n\n self.assertTrue(self.flushLoggedErrors(UnhandledException))",
"def get_parse_error(code):\r\n # note that this uses non-public elements from stdlib's tabnanny, because tabnanny\r\n # is (very frustratingly) written only to be used as a script, but using it that way\r\n # in this context requires writing temporarily files, running subprocesses, blah blah blah\r\n code_buffer = StringIO(code)\r\n try:\r\n tabnanny.process_tokens(tokenize.generate_tokens(code_buffer.readline))\r\n except tokenize.TokenError, err:\r\n return \"Could not parse code: %s\" % err\r\n except IndentationError, err:\r\n return \"Indentation error: %s\" % err\r\n except tabnanny.NannyNag, err:\r\n return \"Ambiguous tab at line %d; line is '%s'.\" % (err.get_lineno(), err.get_line())\r\n return None",
"def handle_syntax_error(self, resp, main_line):\r\n errline = '\\n'.join(resp.splitlines()[:])\r\n msg = ('Oct2Py tried to run:\\n\"\"\"\\n%s\\n\"\"\"\\n'\r\n 'Octave returned Syntax Error:\\n\"\"\"\"\\n%s\\n\"\"\"' % (main_line,\r\n errline))\r\n msg += '\\nIf using an m-file script, make sure it runs in Octave'\r\n if not self.use_pty:\r\n msg += '\\nSession Closed by Octave'\r\n self.close()\r\n raise Oct2PyError(msg)\r\n else:\r\n raise Oct2PyError(msg)",
"def error(error):\n print(\"Error\", error)\n erlport.erlang.cast(this.erlang_pid, (erlport.erlterms.Atom(b'python_error'), str(error)))",
"def error ( self , message , *args , **kwargs ) :\n return self.logger.error ( message , *args , **kwargs )",
"def parseError(message):\n print(\"config error in \" + config_file + \": \" + message, file=sys.stderr)",
"def _ps_error(e):\n\n error(None, str(e))",
"def log_error(e):\n\tprint(e)",
"def log_error(e):\n\tprint(e)",
"def parseError( msg ):\n raise Exception( \"Parse error for client object on line {0}: {1}\".format( Campaign.currentLineNumber, msg ) )",
"def parse_error(self, kind=ParseError, args=None): # type: () -> None\n line, col = self._to_linecol(self._idx)\n\n if args:\n return kind(line, col, *args)\n else:\n return kind(line, col)",
"def error(self, message):\n return self.log(\"ERROR\", message)",
"def main():\n\ttest() #test ParseError",
"def log_error(e):\n print(e)",
"def log_error(e):\n print(e)",
"def log_error(e):\n print(e)",
"def log_error(e):\n print(e)",
"def log_error(e):\n print(e)",
"def log_error(e):\n print(e)"
] |
[
"0.6517298",
"0.6387569",
"0.6333749",
"0.6310108",
"0.59419096",
"0.59037",
"0.5853194",
"0.5799247",
"0.5798414",
"0.5780637",
"0.5685462",
"0.56765276",
"0.5633103",
"0.5619426",
"0.55892473",
"0.55616635",
"0.5501678",
"0.54769194",
"0.545392",
"0.545392",
"0.5449951",
"0.54152524",
"0.54145753",
"0.5353801",
"0.53215945",
"0.53215945",
"0.53215945",
"0.53215945",
"0.53215945",
"0.53215945"
] |
0.71268624
|
0
|
Map values with sigmoid function to range [0,1]. Y(t) = 1/(1 + exp(gain(values shift))
|
def sigmoid(values, gain, shift):
import numpy as np
tiny = 0.000000001
# Make sure argument is a numpy array
if type(values) != np.ndarray:
values = np.array(values)
return 1.0 / (1.0 + np.exp(-gain * (values - shift)) + tiny)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def sigmoid(value):\n return 1.0 / (1.0 + math.exp(-value))",
"def sigmoid(t):\n t[t >= 20] = 20\n t[t <= -20] = -20\n return np.exp(t)/(np.exp(t)+1)",
"def sigmoid(x,shift=0,mult=1):\n return 1 / (1 + math.exp(-(x+shift)*mult))",
"def sigmoid(x):\r\n return 1 / (1 + np.exp(-x))",
"def sigmoid(x):\n\treturn 1 / (1 + m.exp(-x))",
"def sigmoid(x):\n return 1/(1+np.exp(-1*x))",
"def sigmoid(x):\n return 1 / (1 + np.exp(-x))",
"def sigmoid(x):\n return 1 / (1 + np.exp(-x))",
"def sigmoid(x):\n return 1 / (1 + np.exp(-x))",
"def sigmoid(x):\n return 1 / (1 + np.exp(-x))",
"def sigmoid(x):\n return 1 / (1 + np.exp(-x))",
"def sigmoid(x):\n return 1 / (1 + np.exp(-x))",
"def sigmoid(x):\n return 1.0/(1 + np.exp(-x))",
"def sigmoid(t):\n\tprecLim = 10\n\t\n\tt[t<=-precLim] = 0\n\tt[t>-precLim] = 1/ (1 + np.exp(-t))\n\n\treturn t",
"def sigmoid(x):\n return 1. / (1. + np.exp(-x))",
"def sigmoid(x):\r\n\r\n return 1 / (1 + np.exp(-x))",
"def sigmoid(x):\n return 1 / (1 * np.exp(-x))",
"def sigmoid(x):\n\treturn 1.0/(1.0+math.exp(-(x-0.5)*12.0))",
"def sigmoid(x):\n return 1.0 / (1.0 + np.exp(-x))",
"def sigmoid(t):\n return 1 / (1 + np.exp(-t))",
"def activation_sigmoid_custom(self):\n self.value = 1 / (1 + np.e ** (-4.9 * self.value))",
"def sigmoid(x):\n return 1 / (1 + math.exp(-x))",
"def _sigmoid(x):\n return 1 / (1 + np.exp(-x))",
"def sigmoid(x):\n pos_mask = (x >= 0)\n neg_mask = (x < 0)\n z = np.zeros_like(x)\n z[pos_mask] = np.exp(-x[pos_mask])\n z[neg_mask] = np.exp(x[neg_mask])\n top = np.ones_like(x)\n top[neg_mask] = z[neg_mask]\n return top / (1 + z)",
"def sigmoid(x):\n return 1 / (1 + exp(-x))",
"def sigmoid(x):\n return 1 / (1 + exp(-x))",
"def sigmoid(t):\n return np.exp(t)/(1+np.exp(t))",
"def sigmoid(x):\n return 1 / (1 - (power(e,-x)))",
"def sigmoid(x):\n return 1 / (1 + math.exp(-x))",
"def sigmoid(x):\n return 1/(1 + math.exp(-x))"
] |
[
"0.73663825",
"0.71762073",
"0.71002215",
"0.68752146",
"0.6858091",
"0.6832065",
"0.6828366",
"0.6828366",
"0.6828366",
"0.6828366",
"0.6828366",
"0.6828366",
"0.68040353",
"0.6799179",
"0.6792524",
"0.67914814",
"0.6790694",
"0.6784127",
"0.6758304",
"0.67532116",
"0.67454356",
"0.6742623",
"0.6736227",
"0.6729043",
"0.67244035",
"0.67244035",
"0.671442",
"0.6705319",
"0.66991866",
"0.66984147"
] |
0.8068838
|
0
|
Find endpoints in a region of connected vertices. These points are intended to serve as endpoints of fundus curves running along highlikelihood paths within a region (fold). This algorithm iteratively propagates from an initial highlikelihood seed toward the boundary of a region within thresholded subregions of decreasing likelihood. The first boundary point that is reached for each segmented branch serves as an endpoint.
|
def find_segment_endpoints(indices, neighbor_lists, likelihoods, step=1):
import numpy as np
from mindboggle.labels.labels import extract_borders
# Make sure arguments are numpy arrays
if isinstance(likelihoods, list):
likelihoods = np.array(likelihoods)
# Parameters:
min_size = 1
xstep = 1
# Threshold parameters:
use_thresholds = True
threshold_factor = 0.9
min_threshold = 0.1
# Recursive function for segmenting and finding endpoints:
def creep(R, P, X, E, L, B, step, neighbor_lists, min_size=1):
"""
Recursively segment a mesh, creeping toward its edges to find endpoints.
Steps ::
Propagate P into R, and call these new vertices N.
Propagate X into P, R, and N.
Remove points from N and R that are also in the expanded X.
Remove P and N from R.
Reassign P to X.
If N is empty:
Choose highest likelihood point in P as endpoint.
Return endpoints E and remaining vertices R.
else:
Identify N_i different segments of N.
For each segment N_i:
If N_i large enough or if max(i)==1:
Call recursive function creep() with new arguments.
Return E, R, P, X, and N.
Parameters
----------
R : list of integers
indices of vertices to segment (such as a fold)
P : list of integers
indices of previous segment vertices
X : list of integers
indices of segmented vertices to exclude from endpoint selection
E: list of integers
indices to endpoint vertices
L : numpy array of floats
likelihood values for all vertices
step : integer
number of segmentation steps before assessing segments
neighbor_lists : list of lists of integers
indices to neighboring vertices for each vertex
min_size : integer
minimum number of vertices for an endpoint segment
Returns
-------
R : list of integers
remaining vertices to segment
P : list of integers
previous segment
X : list of integers
excluded segment
E: list of integers
endpoints
"""
import numpy as np
from mindboggle.utils.segment import segment
# Expand X and exclude endpoint selection?:
rmX = False
#-----------------------------------------------------------------------
# Propagate P into R, and call these new vertices N:
#-----------------------------------------------------------------------
PintoR = segment(R, neighbor_lists, min_region_size=1, seed_lists=[P],
keep_seeding=False, spread_within_labels=False,
labels=[], label_lists=[], values=[], max_steps=step)
PN = [i for i,x in enumerate(PintoR) if x != -1]
# Remove P (seeds) from N:
N = list(frozenset(PN).difference(P))
#print(' {0} vertices in the new segment'.format(len(N)))
#-----------------------------------------------------------------------
# Propagate X into R (including P and N):
#-----------------------------------------------------------------------
if rmX:
if X:
RPN = R[:]
RPN.extend(PN)
XintoR = segment(RPN, neighbor_lists, min_region_size=1,
seed_lists=[X], keep_seeding=False,
spread_within_labels=False, labels=[],
label_lists=[], values=[], max_steps=xstep)
X = [i for i,x in enumerate(XintoR) if x != -1]
print(' {0} vertices spread from previously segmented'.format(len(X)))
# Remove points from N and R that are also in the expanded X:
N = list(frozenset(N).difference(X))
R = list(frozenset(R).difference(X))
# Reassign P to X:
X.extend(P)
# Remove P and N from R:
R = list(frozenset(R).difference(P))
R = list(frozenset(R).difference(N))
#-----------------------------------------------------------------------
# If N is empty, return endpoints:
#-----------------------------------------------------------------------
BandN = list(frozenset(B).intersection(N))
if not N:
pass
elif BandN:
# Choose highest likelihood point in P as endpoint:
E.append(BandN[np.argmax(L[BandN])])
#-----------------------------------------------------------------------
# If N is not empty, assign as P and continue segmenting recursively:
#-----------------------------------------------------------------------
else:
# Identify N_i different segments of N:
N_segments = segment(N, neighbor_lists, min_region_size=1)
unique_N = [x for x in np.unique(N_segments) if x!=-1]
n_segments = len(unique_N)
# For each segment N_i:
for n in unique_N:
N_i = [i for i,x in enumerate(N_segments) if x==n]
# If N_i large enough or if max(i)==1:
if len(N_i) >= min_size or n_segments==1:
# Call creep() with new arguments:
R, P, X, E = creep(R, N_i, X, E, L, B, step,
neighbor_lists, min_size)
# Return endpoints E and remaining vertices R:
return R, P, X, E
# Extract boundary:
D = np.ones(len(likelihoods))
D[indices] = 2
B, foo1, foo2 = extract_borders(range(len(likelihoods)), D, neighbor_lists)
# Initialize R, X, and E:
R = []
X = []
E = []
indices_endpoints = []
# Initialize P and threshold with the maximum likelihood point:
L = likelihoods
Imax = indices[np.argmax(L[indices])]
P = [Imax]
threshold = L[Imax]
# Include new vertices with lower likelihood values:
if use_thresholds:
# Iterate endpoint extraction until all vertices have been segmented:
continue_loop = True
while continue_loop:
prev_threshold = threshold
# If threshold above minimum, update R based on the threshold:
if threshold > min_threshold:
#if X: threshold = threshold_factor * np.mean(L[X])
threshold = threshold_factor * threshold
T = [x for x in indices if L[x] >= threshold
if L[x] < prev_threshold]
if not T:
decrease_threshold = True
while decrease_threshold:
threshold *= threshold_factor
T = [x for x in indices if L[x] >= threshold
if L[x] < prev_threshold]
if T or threshold < min_threshold:
decrease_threshold = False
R.extend(T)
# If threshold below minimum, update and exit:
else:
T = [x for x in indices if L[x] < prev_threshold]
R.extend(T)
continue_loop = False
# Run recursive function creep() to return endpoints:
R, P, X, E = creep(R, P, X, E, L, B, step, neighbor_lists, min_size)
E = np.unique(E).tolist()
# Print message:
if len(R) == 1:
str1 = 'vertex'
else:
str1 = 'vertices'
if len(E) == 1:
str2 = 'endpoint'
else:
str2 = 'endpoints'
print(' {0} remaining {1}, {2} {3} (threshold: {4:0.3f})'.
format(len(R), str1, len(E), str2, threshold))
# Don't use thresholds -- include all vertices:
else:
R = indices
print(' Segment {0} vertices'.format(len(R)))
# Run recursive function creep() to return endpoints:
R, P, X, E = creep(R, P, X, E, L, B, step, neighbor_lists, min_size)
indices_endpoints = E
return indices_endpoints
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def find_edges(starting_point, max_dist, hi, lo, bgArray):\n try:\n b = fetch_val(bgArray, starting_point)\n except IndexError:\n return None\n offsets = [(0,1), (1,0), (0,-1), (-1,0)]\n edgePoints = []\n for offset in offsets:\n first_result = find_edge(starting_point, offset, max_dist, hi, lo, bgArray)\n if first_result is not None:\n edgePoints.append(first_result[0])\n if b < lo or b > hi:\n # Try to find second point, since starting click was outside threshold\n second_result = find_edge(first_result[0], offset, max_dist - first_result[1], hi, lo, bgArray)\n if second_result is not None:\n edgePoints.append(second_result[0])\n return edgePoints",
"def endpoints(line_points):\n neighbors = []\n for p in line_points:\n aux = 0\n for q in line_points:\n if np.linalg.norm(p-q) == 1:\n aux += 1\n neighbors.append(aux)\n e_points = np.where(np.array(neighbors)==1)\n return line_points[e_points]",
"def find_isolated_endpoints(lines):\n \n isolated_endpoints = []\n count = len(lines)\n print(\"Finding isolated end points 2/3\")\n pb = pbar.ProgressBar(count)\n for i, line in enumerate(lines):\n pb += 1\n other_lines = lines[:i] + lines[i+1:]\n for q in [0,-1]:\n endpoint = Point(line.coords[q])\n if any(endpoint.touches(another_line) \n for another_line in other_lines):\n continue\n else:\n isolated_endpoints.append(endpoint)\n del pb\n return isolated_endpoints",
"def endpoints(image):\n return _neighbors_conv(image) == 1",
"def _get_end_points(self, segmented_instances, i, stats, idx):\n\n end_points=[]\n\n # find all points intersecting the bbox\n #(tl_x, th_y, width, height, area)\n label_num=i+1\n leftmost_x = stats['bbox'][i][cv2.CC_STAT_LEFT]\n topmost_y = stats['bbox'][i][cv2.CC_STAT_TOP]\n width = stats['bbox'][i][cv2.CC_STAT_WIDTH]\n height = stats['bbox'][i][cv2.CC_STAT_HEIGHT]\n bottom_most_y = topmost_y + height-1\n right_most_x = leftmost_x + width-1\n\n segmented_instances_copy=segmented_instances.copy()\n edge_points = np.zeros(segmented_instances.shape).astype(np.uint8)\n segs = np.zeros(segmented_instances.shape).astype(np.uint8)\n segs[segmented_instances==label_num]=255\n cv2.rectangle(segmented_instances_copy,(leftmost_x, topmost_y), (right_most_x, bottom_most_y), 150, 2)\n\n #Get all points for the current stem segment\n label_points = np.argwhere(segmented_instances.copy()==label_num)\n\n # upper points from (tl_x,th_y) to (th_x, th_y) that instersect with the upper edge of the bouding box\n upper_points = [i for i in label_points if i[0]==topmost_y and i[1]>=leftmost_x and i[1]<=right_most_x]\n x_pts, segs, edge_points = self._update_imgs_and_pt_list(upper_points, edge_points, segs, 1)\n center_upper_pts = sorted(self._get_centeroids(x_pts))\n\n # left side points from (tl_x, tl_y) to (tl_x, th_y) that instersect with the left edge of the bouding box\n left_points = [i for i in label_points if i[1]==leftmost_x and i[0]<=bottom_most_y and i[0]>=topmost_y]\n x_pts, segs, edge_points = self._update_imgs_and_pt_list(left_points, edge_points, segs, 0)\n center_left_pts = sorted(self._get_centeroids(x_pts))\n\n #right side points form (th_x, tl_y) to (th_x, th_y) that instersect with the right edge of the bouding box\n right_points = [i for i in label_points if i[1]==right_most_x and i[0]<=bottom_most_y and i[0]>=topmost_y]\n x_pts, segs, edge_points = self._update_imgs_and_pt_list(right_points, edge_points, segs, 0)\n center_right_pts = sorted(self._get_centeroids(x_pts))\n\n #bottom points from (tl_x, tl_y) to (th_x,tl_y)\n bottom_points = [i for i in label_points if i[1]>=leftmost_x and i[1]<=right_most_x and i[0]==bottom_most_y]\n x_pts, segs, edge_points = self._update_imgs_and_pt_list(bottom_points, edge_points, segs, 1)\n center_bottom_pts = sorted(self._get_centeroids(x_pts))\n\n # If there are corner edges, get the centroid of that\n center_x_lb, center_y_lb, center_left_pts, center_bottom_pts = self._get_corner_centers(center_left_pts, \\\n center_bottom_pts, bottom_most_y, leftmost_x)\n if (center_x_lb != None) and (center_y_lb != None):\n end_points.append([center_x_lb, center_y_lb])\n else:\n if len(center_left_pts)>0:\n for pt_idx in range(0, len(center_left_pts)):\n end_points.append([leftmost_x, center_left_pts[pt_idx]])\n if len(center_bottom_pts)>0:\n for pt_idx in range(0, len(center_bottom_pts)):\n end_points.append([center_bottom_pts[pt_idx], bottom_most_y])\n\n # If there are corner edges, get the centroid of that\n center_x_ur, center_y_ur, center_right_pts, center_upper_pts = self._get_corner_centers(center_right_pts, \\\n center_upper_pts, topmost_y, right_most_x)\n if (center_x_ur != None) and (center_y_ur != None):\n end_points.append([center_x_ur, center_y_ur])\n else:\n if len(center_right_pts)>0:\n for pt_idx in range(0, len(center_right_pts)):\n end_points.append([right_most_x, center_right_pts[pt_idx]])\n if len(center_upper_pts)>0:\n for pt_idx in range(0, len(center_upper_pts)):\n end_points.append([center_upper_pts[pt_idx], topmost_y])\n\n # If there are corner edges, get the centroid of that\n center_x_ul, center_y_ul, center_left_pts, center_upper_pts = self._get_corner_centers(center_left_pts, \\\n center_upper_pts, topmost_y, leftmost_x)\n if (center_x_ul != None) and (center_y_ul != None):\n end_points.append([center_x_ul, center_y_ul])\n else:\n if len(center_left_pts)>0:\n for pt_idx in range(0, len(center_left_pts)):\n end_points.append([leftmost_x, center_left_pts[pt_idx]])\n if len(center_upper_pts)>0:\n for pt_idx in range(0, len(center_upper_pts)):\n end_points.append([center_upper_pts[pt_idx], topmost_y])\n\n\n # If there are corner edges, get the centroid of that\n center_x_br, center_y_br, center_right_pts, center_bottom_pts = self._get_corner_centers(center_right_pts, \\\n center_bottom_pts, bottom_most_y, right_most_x)\n if (center_x_br != None) and (center_y_br != None):\n end_points.append([center_x_br, center_y_br])\n else:\n if len(center_right_pts)>0:\n for pt_idx in range(0, len(center_right_pts)):\n end_points.append([right_most_x, center_right_pts[pt_idx]])\n if len(center_bottom_pts)>0:\n for pt_idx in range(0, len(center_bottom_pts)):\n end_points.append([center_bottom_pts[pt_idx], bottom_most_y])\n\n #self.showme(segmented_instances_copy, 'bbox')\n\n return end_points",
"def extract_1d_boundaries(xy, NL, KL, BL, PVx, PVy, check=False):\n if PVx is None and PVy is None:\n raise RuntimeError('Not designed to allow openBC networks.')\n # PVx = np.zeros_like(KL, dtype=float)\n # PVy = np.zeros_like(KL, dtype=float)\n\n # If there are dangling points, remove them for now and adjust indices later\n dangles, xy, NL, KL, BL, backtrans = remove_dangling_points(xy, NL, KL, BL, check=check)\n # If no dangling bonds, no need to translate indices at the end\n translate_at_end = len(dangles) > 0\n\n # Initialize the list of boundary indices to be larger than necessary\n boundaries = []\n for boundaryloc in ['top', 'bottom']:\n # Initialize the boundary list to be as long as possible (will truncate later)\n bb = np.zeros(2 * len(xy), dtype=int)\n if boundaryloc == 'top':\n # Start with the topmost point, which is guaranteed to be\n # at the convex hull and thus also at the top outer edge.\n # Then take the first step to be along the minimum angle bond\n rightIND = np.where(xy[:, 1] == np.max(xy[:, 1]))[0]\n # If there are more than one rightmost point, choose one\n if rightIND.size > 1:\n rightIND = rightIND[0]\n else:\n # Start with the bottom most point, which is guaranteed to be\n # at the convex hull and thus also at the bottom outer edge.\n # Then take the first step to be along the minimum angle bond\n rightIND = np.where(xy[:, 1] == np.min(xy[:, 1]))[0]\n # If there are more than one rightmost point, choose one\n if rightIND.size > 1:\n rightIND = rightIND[0]\n\n if check:\n print 'le.extract_1d_boundaries(): Found extremal pt: ', rightIND\n print 'le.extract_1d_boundaries(): with neighbors: ', NL[rightIND]\n print 'le.extract_1d_boundaries(): with connectns: ', KL[rightIND]\n plt.plot(xy[:, 0], xy[:, 1], 'k.')\n plt.plot(xy[rightIND, 0], xy[rightIND, 1], 'bo')\n for ii in range(len(xy)):\n plt.text(xy[ii, 0] + 0.1, xy[ii, 1], str(ii))\n plt.plot(xy[rightIND, 0], xy[rightIND, 1], 'ro')\n plt.pause(0.01)\n\n # Grab the true neighbors of this starting point\n # print 'le.extract_boundary(): NL[rightIND, :] = ', NL[rightIND, :]\n connect = np.argwhere(np.abs(KL[rightIND]).ravel()).ravel()\n neighbors = NL[rightIND, connect]\n if check:\n print 'le.extract_1d_boundaries(): neighbors = ', neighbors\n print 'le.extract_1d_boundaries(): rightIND = ', rightIND\n\n # Compute the angles of the neighbor bonds\n angles = np.mod(np.arctan2(xy[neighbors, 1] - xy[rightIND, 1] + PVy[rightIND, connect],\n xy[neighbors, 0] - xy[rightIND, 0] + PVx[rightIND, connect]).ravel(),\n 2 * np.pi)\n if check:\n print 'le.extract_1d_boundaries(): KL[rightIND] = ', KL[rightIND]\n print 'le.extract_1d_boundaries(): KL[rightIND,0] = ', KL[rightIND, 0]\n print 'le.extract_1d_boundaries(): KL[rightIND,0] ==0 ', KL[rightIND, 0] == 0\n print 'le.extract_1d_boundaries(): np.argwhere(KL[rightIND]) = ', np.argwhere(KL[rightIND])\n print 'le.extract_1d_boundaries(): np.argwhere(KL[rightIND].ravel())= ', np.argwhere(KL[rightIND].ravel())\n print 'le.extract_1d_boundaries(): neighbors = ', neighbors\n print 'le.extract_1d_boundaries(): angles = ', angles\n\n # Assign this pvx and pvy as pvx_prev and pvy_prev for next time around.\n # Note that this must preceed the redefinition of nextIND\n pvx_prev = PVx[rightIND, connect[angles == min(angles)][0]]\n pvy_prev = PVy[rightIND, connect[angles == min(angles)][0]]\n\n # Take the second particle to be the one with the lowest bond angle (will be >= pi/2)\n nextIND = neighbors[angles == min(angles)][0]\n bb[0] = rightIND\n\n dmyi = 1\n # as long as we haven't completed the full outer edge/boundary, add nextIND\n while nextIND != rightIND:\n # print '\\n nextIND = ', nextIND\n # print 'np.argwhere(KL[nextIND]) = ', np.argwhere(KL[nextIND]).ravel()\n bb[dmyi] = nextIND\n connect = np.argwhere(np.abs(KL[nextIND]).ravel())\n n_tmp = NL[nextIND, connect]\n\n # Get position in row of NL where NL == bb[dmyi - 1] (the previous boundary particle/site)\n # and where the PVx and PVy are opposite of the last used PVx and PVy values (to make sure we\n # are looking backwards along the boundary). We will use this to get the 'backward angle' -- the\n # angle of the previous bond in the boundary\n # Note that bb[dmyi - 1] may have been index 0, so there could be multiple matches\n nlpos = np.where(np.logical_and(NL[nextIND] == bb[dmyi - 1],\n np.abs(KL[nextIND]).ravel().astype(bool)))[0]\n if len(nlpos) > 1:\n # There is more than one connection to the previous particle. Check for where PVx and PVy\n # values are opposite the previously used values.\n ind_nlpos = np.where(np.logical_and(PVx[nextIND, nlpos] == -pvx_prev,\n PVy[nextIND, nlpos] == -pvy_prev))[0]\n print 'ind_nlpos = ', ind_nlpos\n nlpos = nlpos[ind_nlpos]\n\n # Exclude previous boundary particle (the copy of that particle in the nlpos position)\n # from the neighbors array, UNLESS IT IS THE ONLY ONE,\n # since its angle with itself is zero!\n\n # Used to remove previous particle, but this assumes that boundary is more than 2\n # particles long, which might not be true for periodic_strip bcs\n if len(n_tmp) == 1:\n print 'le: The bond is a lone bond, not part of a triangle.'\n neighbors = n_tmp\n else:\n print 'n_tmp = ', n_tmp\n neighbors = np.delete(n_tmp, nlpos)\n connect = np.delete(connect, nlpos)\n print 'n_tmp = ', n_tmp\n print 'neighbors = ', neighbors\n\n # print 'le: nlpos = ', nlpos\n forward_angles = np.arctan2(xy[neighbors, 1] - xy[nextIND, 1] + PVy[nextIND, connect],\n xy[neighbors, 0] - xy[nextIND, 0] + PVx[nextIND, connect]).ravel()\n backward_angle = np.arctan2(xy[bb[dmyi - 1], 1] - xy[nextIND, 1] + PVy[nextIND, nlpos],\n xy[bb[dmyi - 1], 0] - xy[nextIND, 0] + PVx[nextIND, nlpos]).ravel()\n if check:\n print 'le: connect = ', connect\n print 'le: forward_angles = ', forward_angles\n print 'le: backward_angle = ', backward_angle\n\n angles = np.mod(forward_angles - backward_angle, 2 * np.pi)\n if check:\n print 'le: angles = ', angles\n print 'le: angles==min--> ', angles == min(angles)\n print 'le: neighbors = ', neighbors\n print 'le.extract_1d_boundaries(): angles==min--> ', angles == min(angles)\n print 'le.extract_1d_boundaries(): neighbors[angles == min(angles)] --> ', neighbors[angles == min(angles)]\n\n # Assign this pvx and pvy as pvx_prev and pvy_prev for next time around.\n # Note that this must preceed the redefinition of nextIND\n pvx_prev = PVx[nextIND, connect[angles == min(angles)][0]]\n pvy_prev = PVy[nextIND, connect[angles == min(angles)][0]]\n # Redefine nextIND to be the new boundary index\n nextIND = neighbors[angles == min(angles)][0]\n # print 'nextIND = ', nextIND\n\n if check:\n # plt.plot(xy[:,0],xy[:,1],'k.')\n XY = np.vstack([xy[bb[dmyi], :], xy[nextIND, :]])\n plt.plot(XY[:, 0], XY[:, 1], 'r-')\n # for i in range(len(xy)):\n # plt.text(xy[i,0]+0.2,xy[i,1],str(i))\n plt.gca().set_aspect('equal')\n plt.pause(0.01)\n\n dmyi += 1\n\n # Truncate the list of boundary indices\n boundary = bb[0:dmyi]\n\n # Since some points were removed from the boundary identification, translate\n # indices back to indices of original xy\n if translate_at_end:\n print 'le.extract_boundary(): Translating boundary points back into original indices...'\n # print 'boundary = ', boundary\n # print 'translation = ', translation\n # print 'backtrans = ', backtrans\n boundary = backtrans[boundary]\n\n boundaries.append(boundary)\n\n return tuple(boundaries)",
"def find_edge(point, offset, max_dist, hi, lo, bgArray):\n for i in range(1, max_dist):\n next = (point[0] + i * offset[0], point[1] + i * offset[1])\n if is_edge(next, hi, lo, bgArray):\n return (next, i)\n return None",
"def get_path_endpoints(self):\n endpoints = []\n\n # Get the far end of the last path segment\n path, split_ends, position_stack = self.trace()\n endpoint = path[-1][2]\n if split_ends is not None:\n for termination in split_ends:\n endpoints.extend(termination.get_path_endpoints())\n elif endpoint is not None:\n endpoints.append(endpoint)\n\n return endpoints",
"def compute_pair_bounds(self, edges, pair):\n lower_bounds =[]\n upper_bounds = []\n for arc in edges:\n l_e = self.arc_info[arc][\"lower_bound\"]\n u_e = self.arc_info[arc][\"upper_bound\"]\n f_mij = self.compute_f_mij(arc, pair)\n lower_bounds.append(l_e - f_mij)\n upper_bounds.append(u_e - f_mij)\n lb = max(lower_bounds + [0])\n # in case no edges in here, make max of 5,000\n if len(upper_bounds) == 0:\n i = pair[0]\n j = pair[1]\n print(\"Path i ({}): {}\".format(i, self.paths[i]))\n print(\"Path j ({}): {}\".format(j, self.paths[j]))\n ub = min(upper_bounds + [5000])\n #print(\"lower bounds: {}\".format(lower_bounds))\n #print(\"upper bounds: {}\".format(upper_bounds))\n return(lb, ub)",
"def inters_segment(self, s):\r\n x1 = s.start[0] - self.center[0]\r\n y1 = s.start[1] - self.center[1]\r\n x2 = s.end[0] - self.center[0]\r\n y2 = s.end[1] - self.center[1]\r\n dx = x2 - x1\r\n dy = y2 - y1\r\n dr = math.sqrt(dx * dx + dy * dy)\r\n D = x1 * y2 - x2 * y1\r\n dr2 = dr * dr\r\n d = self.radius * self.radius * dr2 - D * D \r\n \r\n if d < 0:\r\n return []\r\n else: \r\n if dy < 0:\r\n sgndy = -1\r\n else:\r\n sgndy = 1 \r\n \r\n Ddy = D * dy\r\n mDdx = -D * dx\r\n sgndydxsqrtd = sgndy * dx * math.sqrt(d)\r\n absdysqrtd = abs(dy) * math.sqrt(d) \r\n \r\n xa = float(Ddy + sgndydxsqrtd) / dr2 + self.center[0]\r\n ya = float(mDdx + absdysqrtd) / dr2 + self.center[1]\r\n \r\n xb = (Ddy - sgndydxsqrtd) / dr2 + self.center[0]\r\n yb = (mDdx - absdysqrtd) / dr2 + self.center[1]\r\n \r\n if (d == 0) or not s.contains_point(xb, yb):\r\n if s.contains_point(xa, ya):\r\n return [(int(xa), int(ya))]\r\n else:\r\n return []\r\n else:\r\n if s.contains_point(xa, ya):\r\n return [(int(xa), int(ya)), (int(xb), int(yb))]\r\n else:\r\n return [(int(xb), int(yb))]",
"def __detect_branching_haghverdi16(\n self, Dseg: np.ndarray, tips: np.ndarray\n ) -> np.ndarray:\n # sort distance from first tip point\n # then the sequence of distances Dseg[tips[0]][idcs] increases\n idcs = np.argsort(Dseg[tips[0]])\n # consider now the sequence of distances from the other\n # two tip points, which only increase when being close to `tips[0]`\n # where they become correlated\n # at the point where this happens, we define a branching point\n if True:\n imax = self.kendall_tau_split(\n Dseg[tips[1]][idcs],\n Dseg[tips[2]][idcs],\n )\n if False:\n # if we were in euclidian space, the following should work\n # as well, but here, it doesn't because the scales in Dseg are\n # highly different, one would need to write the following equation\n # in terms of an ordering, such as exploited by the kendall\n # correlation method above\n imax = np.argmin(\n Dseg[tips[0]][idcs] + Dseg[tips[1]][idcs] + Dseg[tips[2]][idcs]\n )\n # init list to store new segments\n ssegs = [] # noqa: F841 # TODO Look into this\n # first new segment: all points until, but excluding the branching point\n # increasing the following slightly from imax is a more conservative choice\n # as the criterion based on normalized distances, which follows below,\n # is less stable\n if imax > 0.95 * len(idcs) and self.allow_kendall_tau_shift:\n # if \"everything\" is correlated (very large value of imax), a more\n # conservative choice amounts to reducing this\n logg.warning(\n 'shifting branching point away from maximal kendall-tau '\n 'correlation (suppress this with `allow_kendall_tau_shift=False`)'\n )\n ibranch = int(0.95 * imax)\n else:\n # otherwise, a more conservative choice is the following\n ibranch = imax + 1\n return idcs[:ibranch]",
"def detect_branchings(self):\n logg.debug(\n f' detect {self.n_branchings} '\n f'branching{\"\" if self.n_branchings == 1 else \"s\"}',\n )\n # a segment is a subset of points of the data set (defined by the\n # indices of the points in the segment)\n # initialize the search for branchings with a single segment,\n # that is, get the indices of the whole data set\n indices_all = np.arange(self._adata.shape[0], dtype=int)\n # let's keep a list of segments, the first segment to add is the\n # whole data set\n segs = [indices_all]\n # a segment can as well be defined by the two points that have maximal\n # distance in the segment, the \"tips\" of the segment\n #\n # the rest of the points in the segment is then defined by demanding\n # them to \"be close to the line segment that connects the tips\", that\n # is, for such a point, the normalized added distance to both tips is\n # smaller than one:\n # (D[tips[0],i] + D[tips[1],i])/D[tips[0],tips[1] < 1\n # of course, this condition is fulfilled by the full cylindrical\n # subspace surrounding that line segment, where the radius of the\n # cylinder can be infinite\n #\n # if D denotes a euclidian distance matrix, a line segment is a linear\n # object, and the name \"line\" is justified. if we take the\n # diffusion-based distance matrix Dchosen, which approximates geodesic\n # distance, with \"line\", we mean the shortest path between two points,\n # which can be highly non-linear in the original space\n #\n # let us define the tips of the whole data set\n if False: # this is safe, but not compatible with on-the-fly computation\n tips_all = np.array(\n np.unravel_index(\n np.argmax(self.distances_dpt), self.distances_dpt.shape\n )\n )\n else:\n if self.iroot is not None:\n tip_0 = np.argmax(self.distances_dpt[self.iroot])\n else:\n tip_0 = np.argmax(self.distances_dpt[0])\n tips_all = np.array([tip_0, np.argmax(self.distances_dpt[tip_0])])\n # we keep a list of the tips of each segment\n segs_tips = [tips_all]\n segs_connects = [[]]\n segs_undecided = [True]\n segs_adjacency = [[]]\n logg.debug(\n ' do not consider groups with less than '\n f'{self.min_group_size} points for splitting'\n )\n for ibranch in range(self.n_branchings):\n iseg, tips3 = self.select_segment(segs, segs_tips, segs_undecided)\n if iseg == -1:\n logg.debug(' partitioning converged')\n break\n logg.debug(\n f' branching {ibranch + 1}: split group {iseg}',\n ) # [third start end]\n # detect branching and update segs and segs_tips\n self.detect_branching(\n segs,\n segs_tips,\n segs_connects,\n segs_undecided,\n segs_adjacency,\n iseg,\n tips3,\n )\n # store as class members\n self.segs = segs\n self.segs_tips = segs_tips\n self.segs_undecided = segs_undecided\n # the following is a bit too much, but this allows easy storage\n self.segs_adjacency = sp.sparse.lil_matrix((len(segs), len(segs)), dtype=float)\n self.segs_connects = sp.sparse.lil_matrix((len(segs), len(segs)), dtype=int)\n for i, seg_adjacency in enumerate(segs_adjacency):\n self.segs_connects[i, seg_adjacency] = segs_connects[i]\n for i in range(len(segs)):\n for j in range(len(segs)):\n self.segs_adjacency[i, j] = self.distances_dpt[\n self.segs_connects[i, j], self.segs_connects[j, i]\n ]\n self.segs_adjacency = self.segs_adjacency.tocsr()\n self.segs_connects = self.segs_connects.tocsr()",
"def GetInteriorEdgesPent(self):\n\n if not isinstance(self.all_edges,np.ndarray):\n self.GetEdgesPent()\n if not isinstance(self.edges,np.ndarray):\n self.GetBoundaryEdgesPent()\n\n sorted_all_edges = np.sort(self.all_edges,axis=1)\n sorted_boundary_edges = np.sort(self.edges,axis=1)\n\n x = []\n for i in range(self.edges.shape[0]):\n current_sorted_boundary_edge = np.tile(sorted_boundary_edges[i,:],\n self.all_edges.shape[0]).reshape(self.all_edges.shape[0],self.all_edges.shape[1])\n interior_edges = np.linalg.norm(current_sorted_boundary_edge - sorted_all_edges,axis=1)\n pos_interior_edges = np.where(interior_edges==0)[0]\n if pos_interior_edges.shape[0] != 0:\n x.append(pos_interior_edges)\n\n edge_aranger = np.arange(self.all_edges.shape[0])\n edge_aranger = np.setdiff1d(edge_aranger,np.array(x)[:,0])\n interior_edges = self.all_edges[edge_aranger,:]\n\n # GET FLAGS FOR BOUNDRAY AND INTERIOR\n edge_flags = np.ones(self.all_edges.shape[0],dtype=np.int64)\n edge_flags[edge_aranger] = 0\n\n self.interior_edges = interior_edges\n return interior_edges, edge_flags",
"def _convex_hull_side(image, start, end):\n\n convex_points = [start]\n\n x_start, y_start = start\n x_end, y_end = end\n\n side = (x_start <= x_end, y_start <= y_end)\n\n\n ranges = {\n (True, True): [\n [x_start + 1, x_end + 1],\n [y_start, y_end + 1],\n False\n ],\n (False, True): [\n [y_start + 1, y_end + 1],\n [x_start, x_end - 1, -1],\n True\n ],\n (False, False): [\n [x_start - 1, x_end - 1, -1],\n [y_start, y_end - 1, -1],\n False\n ],\n (True, False): [\n [y_start - 1, y_end - 1, -1],\n [x_start, x_end + 1],\n True\n ]\n }\n\n prev = 0\n\n for outer in range(*ranges[side][0]):\n\n curr_pixel = None\n\n for inner in range(*ranges[side][1]):\n if ranges[side][2] and image[outer, inner] == 0:\n curr_pixel = (inner, outer)\n break\n elif not ranges[side][2] and image[inner, outer] == 0:\n curr_pixel = (outer, inner)\n break\n\n if curr_pixel is None:\n continue\n\n while True:\n # slope infinite for first point\n prev_slope = (\n float(\"-inf\") if prev == 0\n else slope(\n convex_points[prev - 1],\n convex_points[prev],\n ranges[side][2]))\n\n # remove previous point if it yields concave segment\n if prev_slope > slope(\n convex_points[prev],\n curr_pixel,\n ranges[side][2]\n ):\n convex_points.pop(prev)\n prev -= 1\n # add point to hull if it yields convex segment\n else:\n convex_points.append(curr_pixel)\n prev += 1\n break\n\n return convex_points[1:]",
"def get_main_points(neuron):\n (branch_index,) = np.where(neuron.branch_order[neuron.n_soma:] == 2)\n (endpoint_index,) = np.where(neuron.branch_order[neuron.n_soma:] == 0)\n selected_index = np.union1d(branch_index + neuron.n_soma,\n endpoint_index + neuron.n_soma)\n selected_index = np.append(range(neuron.n_soma), selected_index)\n return selected_index",
"def GetBoundaryEdgesPent(self):\n\n p = self.InferPolynomialDegree()\n\n # DO NOT COMPUTE IF ALREADY COMPUTED\n if isinstance(self.edges,np.ndarray):\n if self.edges.shape[0] > 1:\n # IF LINEAR VERSION IS COMPUTED, DO COMPUTE HIGHER VERSION\n if self.edges.shape[1] == 2 and p > 1:\n pass\n else:\n return\n\n node_arranger = np.array([\n [0,1],\n [1,2],\n [2,3],\n [3,4],\n [4,0],\n ])\n\n # GET ALL EDGES FROM THE ELEMENT CONNECTIVITY\n all_edges = np.concatenate((self.elements[:,node_arranger[0,:]],self.elements[:,node_arranger[1,:]],\n self.elements[:,node_arranger[2,:]],self.elements[:,node_arranger[3,:]],\n self.elements[:,node_arranger[4,:]]),axis=0).astype(np.uint64)\n\n # GET UNIQUE ROWS\n uniques, idx, inv = unique2d(all_edges,consider_sort=True,order=False,return_index=True,return_inverse=True)\n\n # ROWS THAT APPEAR ONLY ONCE CORRESPOND TO BOUNDARY EDGES\n freqs_inv = itemfreq(inv)\n edges_ext_flags = freqs_inv[freqs_inv[:,1]==1,0]\n # NOT ARRANGED\n self.edges = uniques[edges_ext_flags,:]\n\n # DETERMINE WHICH FACE OF THE ELEMENT THEY ARE\n boundary_edge_to_element = np.zeros((edges_ext_flags.shape[0],2),dtype=np.int64)\n\n # FURTHER RE-ARRANGEMENT / ARANGE THE NODES BASED ON THE ORDER THEY APPEAR\n # IN ELEMENT CONNECTIVITY\n # THIS STEP IS NOT NECESSARY INDEED - ITS JUST FOR RE-ARANGMENT OF EDGES\n all_edges_in_edges = in2d(all_edges,self.edges,consider_sort=True)\n all_edges_in_edges = np.where(all_edges_in_edges==True)[0]\n\n boundary_edge_to_element[:,0] = all_edges_in_edges % self.elements.shape[0]\n boundary_edge_to_element[:,1] = all_edges_in_edges // self.elements.shape[0]\n\n # ARRANGE FOR ANY ORDER OF BASES/ELEMENTS AND ASSIGN DATA MEMBERS\n self.edges = self.elements[boundary_edge_to_element[:,0][:,None],node_arranger[boundary_edge_to_element[:,1],:]]\n self.edges = self.edges.astype(np.uint64)\n self.boundary_edge_to_element = boundary_edge_to_element\n\n return self.edges",
"def break_points(graph = None):\n\tstart = 0\n\tstop = nx.number_of_nodes(graph) -1\n\tbreak_points = []\n\tbasepair_list = basepairs(graph = graph)\n\tfor basepair in basepair_list:\n\t\tif (successor_pair(basepair, start, stop) == (-1,-1)) or \\\n\t\t(predecessor_pair(basepair, start, stop) == (-1,-1)) or \\\n\t\tnot (successor_pair(basepair, start, stop) in basepair_list) or \\\n\t\tnot (predecessor_pair(basepair, start, stop) in basepair_list):\n\t\t\tbreak_points.append(basepair)\n\treturn break_points",
"def find_inner_edge(wrap, dist=25, prom=0.08): # used to be named as find_cell_corner\n if len(wrap.shape) == 2:\n wrap_g = wrap\n elif len(wrap.shape) == 3:\n wrap_g = cv.cvtColor(wrap, cv.COLOR_BGR2GRAY)\n\n sum_x = np.sum(wrap_g, axis=0)\n sum_x = sum_x / np.max(sum_x)\n peak_x, _ = signal.find_peaks(-sum_x, distance=dist, prominence=prom)\n\n sum_y = np.sum(wrap_g, axis=1)\n sum_y = sum_y / np.max(sum_y)\n peak_y, _ = signal.find_peaks(-sum_y, distance=dist, prominence=prom)\n\n return peak_x, peak_y",
"def detect_branching(\n self,\n segs: Sequence[np.ndarray],\n segs_tips: Sequence[np.ndarray],\n segs_connects,\n segs_undecided,\n segs_adjacency,\n iseg: int,\n tips3: np.ndarray,\n ):\n seg = segs[iseg]\n # restrict distance matrix to points in segment\n if not isinstance(self.distances_dpt, OnFlySymMatrix):\n Dseg = self.distances_dpt[np.ix_(seg, seg)]\n else:\n Dseg = self.distances_dpt.restrict(seg)\n # given the three tip points and the distance matrix detect the\n # branching on the segment, return the list ssegs of segments that\n # are defined by splitting this segment\n result = self._detect_branching(Dseg, tips3, seg)\n ssegs, ssegs_tips, ssegs_adjacency, ssegs_connects, trunk = result\n # map back to global indices\n for iseg_new, seg_new in enumerate(ssegs):\n ssegs[iseg_new] = seg[seg_new]\n ssegs_tips[iseg_new] = seg[ssegs_tips[iseg_new]]\n ssegs_connects[iseg_new] = list(seg[ssegs_connects[iseg_new]])\n # remove previous segment\n segs.pop(iseg)\n segs_tips.pop(iseg)\n # insert trunk/undecided_cells at same position\n segs.insert(iseg, ssegs[trunk])\n segs_tips.insert(iseg, ssegs_tips[trunk])\n # append other segments\n segs += [seg for iseg, seg in enumerate(ssegs) if iseg != trunk]\n segs_tips += [\n seg_tips for iseg, seg_tips in enumerate(ssegs_tips) if iseg != trunk\n ]\n if len(ssegs) == 4:\n # insert undecided cells at same position\n segs_undecided.pop(iseg)\n segs_undecided.insert(iseg, True)\n # correct edges in adjacency matrix\n n_add = len(ssegs) - 1\n prev_connecting_segments = segs_adjacency[iseg].copy()\n if self.flavor == 'haghverdi16':\n segs_adjacency += [[iseg] for i in range(n_add)]\n segs_connects += [\n seg_connects\n for iseg, seg_connects in enumerate(ssegs_connects)\n if iseg != trunk\n ]\n prev_connecting_points = segs_connects[ # noqa: F841 TODO Evaluate whether to assign the variable or not\n iseg\n ]\n for jseg_cnt, jseg in enumerate(prev_connecting_segments):\n iseg_cnt = 0\n for iseg_new, seg_new in enumerate(ssegs):\n if iseg_new != trunk:\n pos = segs_adjacency[jseg].index(iseg)\n connection_to_iseg = segs_connects[jseg][pos]\n if connection_to_iseg in seg_new:\n kseg = len(segs) - n_add + iseg_cnt\n segs_adjacency[jseg][pos] = kseg\n pos_2 = segs_adjacency[iseg].index(jseg)\n segs_adjacency[iseg].pop(pos_2)\n idx = segs_connects[iseg].pop(pos_2)\n segs_adjacency[kseg].append(jseg)\n segs_connects[kseg].append(idx)\n break\n iseg_cnt += 1\n segs_adjacency[iseg] += list(\n range(len(segs_adjacency) - n_add, len(segs_adjacency))\n )\n segs_connects[iseg] += ssegs_connects[trunk]\n else:\n import networkx as nx\n\n segs_adjacency += [[] for i in range(n_add)]\n segs_connects += [[] for i in range(n_add)]\n kseg_list = [iseg] + list(range(len(segs) - n_add, len(segs)))\n for jseg in prev_connecting_segments:\n pos = segs_adjacency[jseg].index(iseg)\n distances = []\n closest_points_in_jseg = []\n closest_points_in_kseg = []\n for kseg in kseg_list:\n reference_point_in_k = segs_tips[kseg][0]\n closest_points_in_jseg.append(\n segs[jseg][\n np.argmin(\n self.distances_dpt[reference_point_in_k, segs[jseg]]\n )\n ]\n )\n # do not use the tip in the large segment j, instead, use the closest point\n reference_point_in_j = closest_points_in_jseg[\n -1\n ] # segs_tips[jseg][0]\n closest_points_in_kseg.append(\n segs[kseg][\n np.argmin(\n self.distances_dpt[reference_point_in_j, segs[kseg]]\n )\n ]\n )\n distances.append(\n self.distances_dpt[\n closest_points_in_jseg[-1], closest_points_in_kseg[-1]\n ]\n )\n # print(jseg, '(', segs_tips[jseg][0], closest_points_in_jseg[-1], ')',\n # kseg, '(', segs_tips[kseg][0], closest_points_in_kseg[-1], ') :', distances[-1])\n idx = np.argmin(distances)\n kseg_min = kseg_list[idx]\n segs_adjacency[jseg][pos] = kseg_min\n segs_connects[jseg][pos] = closest_points_in_kseg[idx]\n pos_2 = segs_adjacency[iseg].index(jseg)\n segs_adjacency[iseg].pop(pos_2)\n segs_connects[iseg].pop(pos_2)\n segs_adjacency[kseg_min].append(jseg)\n segs_connects[kseg_min].append(closest_points_in_jseg[idx])\n # if we split two clusters, we need to check whether the new segments connect to any of the other\n # old segments\n # if not, we add a link between the new segments, if yes, we add two links to connect them at the\n # correct old segments\n do_not_attach_kseg = False\n for kseg in kseg_list:\n distances = []\n closest_points_in_jseg = []\n closest_points_in_kseg = []\n jseg_list = [\n jseg\n for jseg in range(len(segs))\n if jseg != kseg and jseg not in prev_connecting_segments\n ]\n for jseg in jseg_list:\n reference_point_in_k = segs_tips[kseg][0]\n closest_points_in_jseg.append(\n segs[jseg][\n np.argmin(\n self.distances_dpt[reference_point_in_k, segs[jseg]]\n )\n ]\n )\n # do not use the tip in the large segment j, instead, use the closest point\n reference_point_in_j = closest_points_in_jseg[\n -1\n ] # segs_tips[jseg][0]\n closest_points_in_kseg.append(\n segs[kseg][\n np.argmin(\n self.distances_dpt[reference_point_in_j, segs[kseg]]\n )\n ]\n )\n distances.append(\n self.distances_dpt[\n closest_points_in_jseg[-1], closest_points_in_kseg[-1]\n ]\n )\n idx = np.argmin(distances)\n jseg_min = jseg_list[idx]\n if jseg_min not in kseg_list:\n segs_adjacency_sparse = sp.sparse.lil_matrix(\n (len(segs), len(segs)), dtype=float\n )\n for i, seg_adjacency in enumerate(segs_adjacency):\n segs_adjacency_sparse[i, seg_adjacency] = 1\n G = nx.Graph(segs_adjacency_sparse)\n paths_all = nx.single_source_dijkstra_path(G, source=kseg)\n if jseg_min not in paths_all:\n segs_adjacency[jseg_min].append(kseg)\n segs_connects[jseg_min].append(closest_points_in_kseg[idx])\n segs_adjacency[kseg].append(jseg_min)\n segs_connects[kseg].append(closest_points_in_jseg[idx])\n logg.debug(f' attaching new segment {kseg} at {jseg_min}')\n # if we split the cluster, we should not attach kseg\n do_not_attach_kseg = True\n else:\n logg.debug(\n f' cannot attach new segment {kseg} at {jseg_min} '\n '(would produce cycle)'\n )\n if kseg != kseg_list[-1]:\n logg.debug(' continue')\n continue\n else:\n logg.debug(' do not add another link')\n break\n if jseg_min in kseg_list and not do_not_attach_kseg:\n segs_adjacency[jseg_min].append(kseg)\n segs_connects[jseg_min].append(closest_points_in_kseg[idx])\n segs_adjacency[kseg].append(jseg_min)\n segs_connects[kseg].append(closest_points_in_jseg[idx])\n break\n segs_undecided += [False for i in range(n_add)]",
"def interface_endpoints_coords(cell_a, cell_b):\n corners_mask = interface_endpoints_mask(cell_a, cell_b)\n corners_mask = binary_dilation(corners_mask, selem=np.ones((5, 5)))\n if np.all(~corners_mask):\n raise Exception(\"Zero endpoints found between these cells\")\n # Label the corners and use their centroids as coordinates of the cell interface\n corner_labels = label(corners_mask)\n total = np.max(corner_labels)\n if total == 2:\n centroid_0 = regionprops(corner_labels)[0].centroid\n centroid_1 = regionprops(corner_labels)[1].centroid\n endpoints = (centroid_0, centroid_1)\n else:\n raise Exception(f\"Expected 2 corner mask regions; found {total}\")\n\n return endpoints",
"def extract_inner_boundary(xy, NL, KL, BL, inner_pt=None, check=False):\n # Center the points around some point that is inside the inner region to be extracted\n if inner_pt is not None:\n xy -= inner_pt\n else:\n xy -= np.mean(xy, axis=0)\n\n # Clear periodic bonds from KL\n pbonds = np.where(KL.ravel() < 0)[0]\n if len(pbonds) > 0:\n print 'le: Found periodic bonds in le.extract_inner_boundary(), clearing...'\n KLr = KL.ravel()\n KLr[pbonds] = 0\n KL = KLr.reshape(np.shape(KL))\n print 'le: pbonds = ', pbonds\n\n # If there are dangling points, remove them for now and adjust indices later\n dangles, xy, NL, KL, BL, backtrans = remove_dangling_points(xy, NL, KL, BL, check=check)\n translate_at_end = len(dangles) > 0\n\n # Initialize the list of boundary indices to be larger than necessary\n bb = np.zeros(2 * len(xy), dtype=int)\n\n # Start with the centermost point that is on the right side of the y axis, which is guaranteed to be\n # at the convex hull for an annular sample and thus also at the inner edge.\n # Then take the first step to be along the minimum angle bond\n # Compute radial distance of each particle\n distr2 = xy[:, 0] ** 2 + xy[:, 1] ** 2\n xpositive = np.where(xy[:, 0] > 0)[0]\n if translate_at_end:\n # avoid choosing a dangling particle with no bonds\n selection = np.intersect1d(xpositive, nodangles)\n rightIND = np.where(distr2 == np.min(distr2[selection]))[0]\n else:\n rightIND = np.where(distr2 == np.min(distr2[xpositive]))[0]\n # print 'rightIND = ', rightIND\n # plt.plot(xy[:, 0], xy[:, ])\n # for ii in range(len(xy)):\n # plt.text(xy[ii, 0] + 0.1, xy[ii, 1], str(ii))\n # plt.show()\n # sys.exit()\n # If there are more than one rightmost point, choose one\n if rightIND.size > 1:\n rightIND = rightIND[0]\n\n if check:\n print 'le.extract_inner_boundary(): Found innermost pt: ', rightIND\n print 'le.extract_inner_boundary(): with neighbors: ', NL[rightIND]\n print 'le.extract_inner_boundary(): with connectns: ', KL[rightIND]\n plt.plot(xy[:, 0], xy[:, 1], 'k.')\n plt.plot(xy[rightIND, 0], xy[rightIND, 1], 'bo')\n for ii in range(len(xy)):\n plt.text(xy[ii, 0] + 0.1, xy[ii, 1], str(ii))\n plt.plot(xy[rightIND, 0], xy[rightIND, 1], 'ro')\n plt.pause(0.1)\n\n # Grab the true neighbors of this starting point\n print 'le.extract_inner_boundary(): NL[rightIND, :] = ', NL[rightIND, :]\n neighbors = NL[rightIND, np.argwhere(KL[rightIND].ravel()).ravel()]\n print 'le.extract_inner_boundary(): neighbors = ', neighbors\n print 'le.extract_inner_boundary(): rightIND = ', rightIND\n\n # Take the second particle to be the one with the smallest bond angle above pi (might be <= 3pi/2, but not\n # necessarily).\n # Compute the angles of the neighbor bonds and add pi\n angles = np.mod(np.arctan2(xy[neighbors, 1] - xy[rightIND, 1], xy[neighbors, 0] - xy[rightIND, 0]).ravel() + np.pi,\n 2 * np.pi)\n nextIND = neighbors[angles == min(angles)][0]\n bb[0] = rightIND\n dmyi = 1\n\n if check:\n print 'KL[rightIND] = ', KL[rightIND]\n print 'KL[rightIND,0] = ', KL[rightIND, 0]\n print 'KL[rightIND,0] ==0 ', KL[rightIND, 0] == 0\n print 'np.argwhere(KL[rightIND]) = ', np.argwhere(KL[rightIND])\n print 'np.argwhere(KL[rightIND].ravel())= ', np.argwhere(KL[rightIND].ravel())\n print 'neighbors = ', neighbors\n print 'angles = ', angles\n\n # This part, commented out, was a red herring\n # It is possible for the first particle to be attached to only one other site. If this is the case, then we need to\n # add its neighbor to the bb array and take the next max angle with respect to that bond instead of the min angle.\n # while len(angles) == 1:\n # print 'le.extract_inner_boundary(): there is only one neighbor for the first identified boundary particle'\n # bb[dmyi] = nextIND\n # angles, neighbors = bond_angles_wrt_bond(bb[dmyi - 1], nextIND, xy, BL, KL)\n # nextIND = neighbors[angles == max(angles)][0]\n # # print 'nextIND = ', nextIND\n\n if check:\n print 'bb = ', bb\n # sys.exit()\n # as long as we haven't completed the full outer edge/boundary, add nextIND\n while nextIND != rightIND:\n # print '\\n nextIND = ', nextIND\n # print 'np.argwhere(KL[nextIND]) = ', np.argwhere(KL[nextIND]).ravel()\n bb[dmyi] = nextIND\n angles, neighbors = bond_angles_wrt_bond(bb[dmyi - 1], nextIND, xy, NL, KL)\n nextIND = neighbors[angles == min(angles)][0]\n # print 'nextIND = ', nextIND\n\n if check:\n plt.plot(xy[:,0],xy[:,1],'k.')\n XY = np.vstack([xy[bb[dmyi], :], xy[nextIND, :]])\n plt.plot(XY[:, 0], XY[:, 1], 'r-')\n for i in range(len(xy)):\n plt.text(xy[i,0] + 0.2, xy[i, 1], str(i))\n plt.gca().set_aspect('equal')\n plt.show()\n\n dmyi += 1\n\n # Truncate the list of boundary indices\n inner_boundary = bb[0:dmyi]\n\n # Since some points were removed from the boundary identification, translate\n # indices back to indices of original xy\n if translate_at_end:\n print 'le.extract_boundary(): Translating boundary points back into original indices...'\n inner_boundary = backtrans[inner_boundary]\n\n return inner_boundary",
"def is_endpoint(color):\n\n img = cv2.cvtColor(color, cv2.COLOR_RGB2BGR)\n blur = cv2.GaussianBlur(img,(5,5),0)\n\n lower_range = np.array([175, 175, 175], dtype=np.uint8)\n upper_range = np.array([255, 255, 255], dtype=np.uint8)\n\n mask = cv2.inRange(blur, lower_range, upper_range)\n res = cv2.bitwise_and(img,img, mask= mask)\n\n bilateral_filtered_image = cv2.bilateralFilter(res, 5, 175, 175)\n edge_detected_image = cv2.Canny(bilateral_filtered_image, 75, 200)\n\n _, contours, _= cv2.findContours(edge_detected_image, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n contour_list = []\n for contour in contours:\n \tapprox = cv2.approxPolyDP(contour,0.01*cv2.arcLength(contour,True),True)\n \tarea = cv2.contourArea(contour)\n \tif ((len(approx) > 8) & (area > 10000) & (area < 30000)):\n \t\tcontour_list.append(contour)\n\n if not len(contour_list)==0:\n \treturn True\n else:\n \treturn False",
"def searchPairsBinaryOrient(_session, _beg, _end, _const):\n res = []\n it1 = _session.create_iterator(_session.sc_constraint_new(sc_constants.CONSTR_5_a_a_f_a_f, \n sc.SC_NODE | _const, # 2 \n sc.SC_ARC | sc.SC_POS | _const, # 1\n _beg, # 0\n sc.SC_ARC | sc.SC_POS | _const, # 5\n keynodes.n_1), # 6\n True)\n while not it1.is_over():\n # check if node 2 is a sheaf\n if checkIncToSets(_session, it1.value(0), [keynodes.info.stype_sheaf], sc.SC_POS | sc.SC_CONST):\n # finding other nodes to end\n res2 = _session.search_one_shot(_session.sc_constraint_new(sc_constants.CONSTR_5_f_a_f_a_f,\n it1.value(0), # 2\n sc.SC_ARC | sc.SC_POS | _const, # 3\n _end, # 4\n sc.SC_ARC | sc.SC_POS | _const, # 7\n keynodes.n_2\n ), True, 5)\n if res2 is not None:\n res.append((_beg, it1.value(1), it1.value(0), res2[1], _end, \n it1.value(4), keynodes.n_1, res[3], keynodes.n_2))\n it1.next()\n \n if len(res) > 0:\n return res\n \n return None",
"def find_approx_fb(self,min_offset,max_offset,min_cdp,max_cdp,offset_spacing,n_split=100):\n fbs=np.zeros((self.dataset['pred_avg'].shape[0],1))\n min_offset=(min_offset+offset_spacing)*self.scalar_offset\n max_offset=(max_offset-offset_spacing)*self.scalar_offset\n offsets=np.arange(min_offset,max_offset,offset_spacing*self.scalar_offset)\n for i,coffset in enumerate(offsets):\n print('Working on central offset:\\t{}'.format(coffset/self.scalar_offset))\n obin_trcs=np.where(np.logical_and(self.dataset['cdp'][:]<=max_cdp,np.logical_and(self.dataset['cdp'][:]>=min_cdp,np.logical_and(self.dataset['offset'][:]>=coffset-offset_spacing,self.dataset['offset'][:]<coffset+offset_spacing))))[0]\n tmp1=np.array_split(obin_trcs,n_split) \n if len(obin_trcs)>10:\n for k,l in enumerate(tmp1):\n tmp0=self.dataset['pred_avg'][list(tmp1[k]),:]\n tmp2=np.sum(tmp0,axis=0)\n tmp2=np.where(tmp2[:]==np.amax(tmp2))[0]\n for m,n in enumerate(tmp1[k]):\n fbs[n]=np.int(tmp2)\n else:\n print('Not enough traces in a splitted offset bin')",
"def find_crime_areas(segmented_points):\n\n # once all of the interpolated points are loaded into segmented_points\n # loop through them again to find out which places are high crime.\n bad_neighborhood_crime_index = 0.2\n\n for j in range(1, len(segmented_points)):\n print \"segmented_points[j]\", segmented_points[j]\n # ====================================================================\n # waypoint algorithm fleshing out\n # ====================================================================\n if segmented_points[j]['crime_index'] > bad_neighborhood_crime_index:\n # get the center of the geohash\n print \"This is a bad neighborhood\"\n\n # this is probably temporary, for display purposes\n segmented_points[j]['is_high_crime'] = True\n\n # do a conditional that if the bad neighborhood is at\n # len(segmented_points) we need to go get the end dict\n\n # now that we know what the bad neighborhood point is, let's get\n # the latitude, longitude from the point before and the point after\n if 'lat' not in segmented_points[j-1] or 'lng' not in segmented_points[j-1]:\n point_before = (segmented_points[j-1]['data']['start']['lat'],\n segmented_points[j-1]['data']['start']['lng'])\n else:\n point_before = (segmented_points[j-1]['lat'],\n segmented_points[j-1]['lng'])\n\n if 'lat' not in segmented_points[j+1] or 'lng' not in segmented_points[j+1]:\n point_after = (segmented_points[j+1]['data']['end']['lat'],\n segmented_points[j+1]['data']['end']['lng'])\n else:\n point_after = (segmented_points[j+1]['lat'],\n segmented_points[j+1]['lng'])\n\n current_point = (segmented_points[j]['lat'],\n segmented_points[j]['lng'])\n\n # before calling inspect_waypoints, check the deltas for the\n # step before and the step after to determine whether the function\n # needs to be called twice, or four times, and what direction to go\n # get the change in latitude and longitude between the before\n # and current point location\n delta_lat_before_current = current_point[0] - point_before[0]\n delta_lng_before_current = current_point[1] - point_before[1]\n\n # get the change in latitude and longitude between the before\n # and current point location\n delta_lat_after_current = point_after[0] - current_point[0]\n delta_lng_after_current = point_after[1] - current_point[1]\n\n delta_before_after = [delta_lat_before_current, delta_lng_before_current,\n delta_lat_after_current, delta_lng_after_current]\n\n segmented_points = check_directions_find_waypoint(current_point,\n segmented_points[j],\n delta_before_after,\n segmented_points)\n print \"this is segmented_points[0] returned\", segmented_points[0]\n return segmented_points[0]",
"def pathFinder(M, start, end):\r\n point = M[start-1][end-1]\r\n if point != 0:\r\n pathFinder(M, start, point)\r\n print \"V\" + str(point)\r\n pathFinder(M, point, end)",
"def searchDeadEnd(self):\n boundaries = []\n if not self.red:\n i = self.midWidth - 1\n else:\n i = self.midWidth + 1\n boudaries = [(i, j) for j in range(self.height)]\n validPositions = []\n for i in boudaries:\n if not (i[0], i[1]) in self.walls:\n validPositions.append(i)\n\n dangerPos = []\n\n toExpand = self.scanmap.twoEntryPoints()\n for (x,y) in toExpand:\n adjacent = self.scanmap.adjacentValidPoints(x, y)\n if not (x,y) in dangerPos:\n for (u, w) in adjacent:\n visited = []\n visited.append((x, y))\n safe = False\n danger = False\n DFS = util.Stack()\n DFS.push((u,w))\n while not safe and not danger:\n (i,j) = DFS.pop()\n visited.append((i,j))\n adjacents = self.scanmap.adjacentValidPoints(i,j)\n for position in adjacents:\n if not position in visited:\n DFS.push(position)\n if DFS.isEmpty():\n danger = True\n dangerPos = list(set(dangerPos) | set(visited))\n\n if (i,j) in validPositions:\n safe = True\n oneEntry = self.scanmap.oneEntryPoints()\n dangerPos = list(set(oneEntry).union(set(dangerPos)))\n dangerPos.sort()\n return dangerPos",
"def edge_virtualv2(src, point_list=None, degree=3):\n src = cv2.cvtColor(np.asarray(src), cv2.COLOR_RGBA2BGRA)\n xmax = np.amax(point_list, axis=0)[0]\n ymax = np.amax(point_list, axis=0)[1]\n xmin = np.amin(point_list, axis=0)[0]\n ymin = np.amin(point_list, axis=0)[1]\n widthTrans = xmax - xmin\n heightTrans = ymax - ymin\n side = min(widthTrans, heightTrans)\n\n # create initial mask covering the actual image.\n alpha = src[:, :, 3]\n alpha[0, :], alpha[-1, :], alpha[:, 0], alpha[:, -1] = 0, 0, 0, 0\n idx = np.where(src[:, :, 3] > 15)\n mask = np.zeros(src.shape[:2], dtype=np.uint8)\n mask[idx] = 255\n mask[0, :], mask[-1, :], mask[:, 0], mask[:, -1] = 0, 0, 0, 0\n # iteratively erode the mask and assigned the removed region to division factor in descend.\n if side < 100:\n factors = np.linspace(1.5, 1, 5)\n else:\n factors = np.linspace(2, 1, 10)\n kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3))\n for factor in factors:\n dil = cv2.erode(mask, kernel)\n diff1 = mask - dil\n mask = dil\n removed_loc = np.where(diff1 > 0)\n alpha[removed_loc] = alpha[removed_loc] // factor\n\n src[:, :, 3] = alpha\n img = Image.fromarray(cv2.cvtColor(src, cv2.COLOR_BGRA2RGBA))\n return img, point_list",
"def calculate_interior_pts(image_size, corners):\n\n # YOU SHOULDN'T NEED TO CHANGE THIS\n path = Path(corners)\n\n xx, yy = np.meshgrid(range(image_size[1]), range(image_size[0]))\n xxyy = np.stack([xx.ravel(), yy.ravel()], 1)\n\n interior_ind = path.contains_points(xxyy)\n interior_pts = xxyy[interior_ind]\n \n return interior_pts",
"def locate_interesting_segment(binary_matrix, indeces, beats, during_threshold = 5):\n point = np.zeros([1, 4], dtype = int)\n segments = np.empty([0, 4], dtype = int)\n is_segment_bedin = False\n for index in indeces:\n temp = np.diag(binary_matrix, -index)\n for j in range(len(temp)):\n if (temp[j] == 0 and is_segment_bedin == False) or (temp[j] == 1 and is_segment_bedin == True):\n continue\n else:\n if temp[j] == 1:\n point[0, 0] = index + j\n point[0, 1] = j\n is_segment_bedin = True\n else:\n point[0, 2] = index + j\n point[0, 3] = j\n is_segment_bedin = False\n segments = np.append(segments, point, axis = 0)\n\n # using the time during whose default value is 4s to filter segment\n del_indeces = np.array([], dtype = int)\n new_binary_matrix = binary_matrix.copy()\n for i in range(len(segments)):\n\n time_begin = beats[segments[i, 0]]\n time_end = beats[segments[i, 2]]\n if time_end - time_begin < during_threshold:\n del_indeces = np.append(del_indeces, i)\n\n # set the binary matrix\n for row in range(segments[i, 0], segments[i, 2]):\n row_begin = segments[i, 0]\n col_begin = segments[i, 1]\n new_binary_matrix[row, row - row_begin + col_begin] = 0\n\n segments = np.delete(segments, del_indeces, axis=0)\n\n length = len(segments)\n # the matrix which denote if segment is close with each other\n segments_close_matrix = np.zeros([length, length], dtype = int)\n for i in range(length):\n for j in range(length):\n if i == j:\n continue\n x1 = segments[i, :]\n x2 = segments[j, :]\n\n # determine if segment is close with each other\n if x2[0] >= x1[0] - 5 and x2[2] <= x1[2] + 20 and abs(x2[1] - x1[1]) <= 20 and x2[3] <= x1[3] + 5:\n segments_close_matrix[i, j] = 1\n\n # delete some segments with less than 3 closed segment\n del_indeces = np.array([], dtype=int)\n close_count = np.sum(segments_close_matrix, axis = 0)\n for i in range(len(segments)):\n if close_count[i] < 3:\n del_indeces = np.append(del_indeces, i)\n\n # set the binary matrix\n for row in range(segments[i, 0], segments[i, 2]):\n row_begin = segments[i, 0]\n col_begin = segments[i, 1]\n new_binary_matrix[row, row - row_begin + col_begin] = 0\n\n segments = np.delete(segments, del_indeces, axis = 0)\n # plt.matshow(new_binary_matrix, cmap=plt.cm.gray)\n # plt.show()\n\n return segments, new_binary_matrix"
] |
[
"0.63463396",
"0.59707016",
"0.58227015",
"0.5684077",
"0.5558691",
"0.54850435",
"0.5458841",
"0.5346233",
"0.5345577",
"0.5324624",
"0.5304109",
"0.52686787",
"0.5258255",
"0.5256899",
"0.52393764",
"0.52276045",
"0.5224099",
"0.5215032",
"0.5209798",
"0.51924145",
"0.51746273",
"0.5172368",
"0.5134167",
"0.51319176",
"0.513117",
"0.5111521",
"0.5082989",
"0.5062336",
"0.50592095",
"0.505143"
] |
0.640198
|
0
|
Shrink segments in a segmented surface mesh by a fraction of its maximum depth, for all segments or for segments in regions with multiple segments.
|
def shrink_segments(regions, segments, depths, shrink_factor=0.25,
only_multiple_segments=False):
import numpy as np
print('Shrink segments')
remove_fraction = 1 - shrink_factor
shrunken_segments = -1 * np.ones(len(depths))
# Make sure arguments are numpy arrays
if not isinstance(segments, np.ndarray):
segments = np.array(segments)
if not isinstance(depths, np.ndarray):
depths = np.array(depths)
# Shrink only segments in regions with multiple segments
if only_multiple_segments:
print(' Shrink each segment to {0:.2f} of its depth for regions with '
'multiple segments'.format(shrink_factor))
# For each region
unique_regions = [x for x in np.unique(regions) if x > -1]
for n_region in unique_regions:
# Check to see if there are multiple segments in the region
indices_region = [i for i,x in enumerate(regions) if x == n_region]
segments_in_region = [x for x in np.unique(segments[indices_region])
if x > -1]
if len(segments_in_region) > 1:
# Shrink each segment in the region
for n_segment in segments_in_region:
indices_segment = [i for i,x in enumerate(segments)
if x == n_segment]
indices_segment = list(frozenset(indices_segment).intersection(indices_region))
depth_threshold = remove_fraction * np.max(depths[indices_segment])
indices_segment = [x for x in indices_segment
if depths[x] > depth_threshold]
shrunken_segments[indices_segment] = n_segment
# Shrink all segments
else:
print(' Shrink each segment to {0:.2f} of its depth'.format(shrink_factor))
unique_segments = [x for x in np.unique(segments) if x != -1]
for n_segment in unique_segments:
indices_segment = [i for i,x in enumerate(segments) if x == n_segment]
depth_threshold = remove_fraction * np.max(depths[indices_segment])
indices_segment = [x for x in indices_segment
if depths[x] > depth_threshold]
shrunken_segments[indices_segment] = n_segment
return shrunken_segments
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def subdivide_mesh(\n vertices: np.ndarray, faces: np.ndarray, subdivide_threshold: float\n) -> np.ndarray:\n\n max_edge = get_vertices_bounding_box(vertices)[-1] * subdivide_threshold\n\n return trimesh.remesh.subdivide_to_size(vertices, faces, max_edge)[0]",
"def callback(mesh):\n shrunk = mesh.shrink(0.9)\n mesh.overwrite(shrunk) # must operate \"in-place\" by overwrite",
"def shrink(self):\n for i in range(1, len(self.vertices)):\n self.vertices[i] = self.vertices[0] + self.sigma*(self.vertices[i]-self.vertices[0])",
"def _shrink_secondary(self, amt):\n self._resize_secondary(-amt)",
"def SetSurfaceSegmentMode(self, *args):\n return _ShapeUpgrade.ShapeUpgrade_ShapeDivide_SetSurfaceSegmentMode(self, *args)",
"def subdivide_mesh(obj, n_subdiv=2):\n thisfunc = thisfile + '->subdivide_mesh()'\n\n scene = bpy.context.scene\n\n # All objects need to be in 'OBJECT' mode to apply modifiers -- maybe a Blender bug?\n for o in bpy.data.objects:\n scene.objects.active = o\n bpy.ops.object.mode_set(mode='OBJECT')\n o.select = False\n obj.select = True\n scene.objects.active = obj\n\n bpy.ops.object.modifier_add(type='SUBSURF')\n obj.modifiers['Subsurf'].subdivision_type = 'CATMULL_CLARK'\n obj.modifiers['Subsurf'].levels = n_subdiv\n obj.modifiers['Subsurf'].render_levels = n_subdiv\n\n # Apply modifier\n bpy.ops.object.modifier_apply(modifier='Subsurf', apply_as='DATA')\n\n # Scene update necessary, as matrix_world is updated lazily\n scene.update()\n\n logging.info(\"%s: Subdivided mesh of '%s'\", thisfunc, obj.name)",
"def shrink(self):\r\n # first we need to decide how to shrink\r\n choice = rand.choice([0, 1, 2, 3])\r\n # now do it\r\n if ((choice == 0) and (self.xspan > mparam.min_s_xspan)):\r\n # delete first row\r\n self.cells = np.delete(self.cells, (0), axis=0) \r\n elif ((choice == 1) and (self.xspan > mparam.min_s_xspan)):\r\n # delete last row\r\n self.cells = np.delete(self.cells, (-1), axis=0) \r\n elif ((choice == 2) and (self.yspan > mparam.min_s_yspan)):\r\n # delete first column\r\n self.cells = np.delete(self.cells, (0), axis=1) \r\n elif ((choice == 3) and (self.yspan > mparam.min_s_yspan)):\r\n # delete last column\r\n self.cells = np.delete(self.cells, (-1), axis=1) \r\n # now let's update xspan and yspan to the new size\r\n self.xspan = self.cells.shape[0]\r\n self.yspan = self.cells.shape[1]\r\n #\r",
"def setSurfaceSize(xmin, xmax, ymin, ymax):\n dislin.sursze(xmin, xmax, ymin, ymax)",
"def shrink(self):\n self.mass *= 0.8",
"def blurSurf(surface, amt):\n amt = max(1, amt)\n scale = 1.0/float(amt)\n surf_size = surface.get_size()\n scale_size = (int(surf_size[0]*scale), int(surf_size[1]*scale))\n surf = pygame.transform.smoothscale(surface, scale_size)\n surf = pygame.transform.smoothscale(surf, surf_size)\n return surf",
"def resizeZ(self,zMin=None,zMax=None,shrink=False,dryrun=False):\n if zMin is None:\n zMin = self.z[0]\n if zMax is None:\n zMax = self.z[-1]\n if not shrink:\n if zMin > self.z[0]:\n print('zMin not changed from',self.z[0],'to',zMin)\n return\n if zMax < self.z[-1]:\n print('zMax not changed from',self.z[-1],'to',zMax)\n return\n\n self.zbot = zMin\n\n imin = int((zMin-self.z[0])/self.dz)\n imax = int(np.ceil((zMax-self.z[0])/self.dz))\n zMin = imin*self.dz + self.z[0]\n zMax = imax*self.dz + self.z[0]\n ioff = int((self.z[0]-zMin)/self.dz)\n if dryrun: sys.stdout.write('(DRY RUN) ')\n print('Resizing fluctuations field in z-dir from [',\n self.z[0],self.z[-1],'] to [',zMin,zMax,']')\n print(' before:',self.U.shape)\n \n newNZ = imax-imin+1\n Unew = np.zeros((3,self.N,self.NY,newNZ))\n Tnew = np.zeros(( self.N,self.NY,newNZ))\n for iz in range(ioff):\n Unew[:,:,:,iz] = self.U[:,:,:,0]\n Tnew[ :,:,iz] = self.T[ :,:,0]\n if not shrink:\n Unew[:,:,:,ioff:ioff+self.NZ] = self.U\n Tnew[ :,:,ioff:ioff+self.NZ] = self.T\n else:\n iupper = np.min((ioff+self.NZ, newNZ))\n Unew[:,:,:,ioff:iupper] = self.U[:,:,:,:iupper-ioff]\n Tnew[ :,:,ioff:iupper] = self.T[ :,:,:iupper-ioff]\n print(' after:',Unew.shape)\n if not dryrun:\n self.U = Unew\n self.T = Tnew\n self.NZ = newNZ\n\n znew = self.zbot + np.arange(newNZ,dtype=self.realtype)*self.dz\n if not dryrun:\n print('Updating z coordinates')\n self.z = znew\n else:\n print('(DRY RUN) z coordinates:',znew)\n\n if not dryrun:\n print('Resetting scaling function')\n self.scaling = np.ones((3,newNZ))",
"def ResSurfaces(self, *args):\n return _ShapeUpgrade.ShapeUpgrade_SplitSurface_ResSurfaces(self, *args)",
"def SetSurfaceSegmentMode(self, *args):\n return _ShapeUpgrade.ShapeUpgrade_FaceDivide_SetSurfaceSegmentMode(self, *args)",
"def blur_surf(surface, amt):\n\n scale = 1.0/float(amt)\n surf_size = surface.get_size()\n scale_size = (int(surf_size[0]*scale), int(surf_size[1]*scale))\n surf = pygame.transform.smoothscale(surface, scale_size)\n surf = pygame.transform.smoothscale(surf, surf_size)\n return surf",
"def fracture(self, max_points=_max_points, precision=1e-3):\n if max_points > 4:\n ii = 0\n while ii < len(self.polygons):\n if len(self.polygons[ii]) > max_points:\n pts0 = sorted(self.polygons[ii][:, 0])\n pts1 = sorted(self.polygons[ii][:, 1])\n ncuts = len(pts0) // max_points\n if pts0[-1] - pts0[0] > pts1[-1] - pts1[0]:\n # Vertical cuts\n cuts = [\n pts0[int(i * len(pts0) / (ncuts + 1.0) + 0.5)]\n for i in range(1, ncuts + 1)\n ]\n chopped = clipper._chop(self.polygons[ii], cuts, 0,\n 1 / precision)\n else:\n # Horizontal cuts\n cuts = [\n pts1[int(i * len(pts1) / (ncuts + 1.0) + 0.5)]\n for i in range(1, ncuts + 1)\n ]\n chopped = clipper._chop(self.polygons[ii], cuts, 1,\n 1 / precision)\n self.polygons.pop(ii)\n layer = self.layers.pop(ii)\n datatype = self.datatypes.pop(ii)\n self.polygons.extend(\n numpy.array(x)\n for x in itertools.chain.from_iterable(chopped))\n npols = sum(len(c) for c in chopped)\n self.layers.extend(layer for _ in range(npols))\n self.datatypes.extend(datatype for _ in range(npols))\n else:\n ii += 1\n return self",
"def _shrink_main(self, amt):\n self.ratio -= amt\n self.ratio = max(self.min_ratio, self.ratio)",
"def reduceFrame(self, depth, height_ratio = 0.5, sub_sample = 0.3, reduce_to = 'lower'):\n if (height_ratio > 1.0) or (height_ratio < 0.0)\\\n or (sub_sample > 1.0) or (sub_sample < 0.0):\n print('height_ratio and sub_sample must be between 0 and 1')\n exit(1)\n \n depth_copy = depth.copy()\n height = depth_copy.shape[0]\n h = int(height_ratio*(height))\n cols_to_cut = 0\n\n # catches the case when all rows are kept\n if height_ratio == 1:\n d_short = depth_copy\n\n elif reduce_to == 'lower':\n d_short = depth_copy[(height - h):,\\\n cols_to_cut:-(cols_to_cut+1)]\n\n elif reduce_to == 'middle_lower':\n upper_brdr = int(3*(height/4.0) - h/2)\n lower_brdr = upper_brdr + h\n d_short = depth_copy[upper_brdr:lower_brdr,\\\n cols_to_cut:-(cols_to_cut+1)]\n\n elif reduce_to == 'middle':\n upper_brdr = int((height - h)/2.0)\n lower_brdr = upper_brdr + h\n d_short = depth_copy[upper_brdr:lower_brdr,\\\n cols_to_cut:-(cols_to_cut+1)]\n\n elif reduce_to == 'middle_upper':\n upper_brdr = int((height/4.0) - h/2)\n lower_brdr = upper_brdr + h\n d_short = depth_copy[upper_brdr:lower_brdr,\\\n cols_to_cut:-(cols_to_cut+1)]\n\n elif reduce_to == 'upper':\n d_short = depth_copy[:h, cols_to_cut:-(cols_to_cut+1)]\n\n d_short[d_short <= 0] = np.nan\n d_short[d_short > self.max_depth] = np.nan\n \n rescaled = rescale(d_short, sub_sample, mode='reflect', multichannel=False, anti_aliasing=True)\n\n return rescaled",
"def _shrink(self, cidx, amt):\n # get max resizable amount\n margin = self.get_shrink_margin(cidx)\n if amt > margin: # too much\n self.relative_sizes[cidx] -= self._get_relative_size_from_absolute(margin)\n return amt - margin\n else:\n self.relative_sizes[cidx] -= self._get_relative_size_from_absolute(amt)\n return 0",
"def _densify(self, geom, segment):\n # temporary solution for readthedocs fail. - cannot mock osgeo\n try:\n from osgeo import ogr\n except ModuleNotFoundError:\n import warnings\n\n warnings.warn(\"OGR (GDAL) is required.\")\n\n poly = geom\n wkt = geom.wkt # shapely Polygon to wkt\n geom = ogr.CreateGeometryFromWkt(wkt) # create ogr geometry\n geom.Segmentize(segment) # densify geometry by 2 metres\n geom.CloseRings() # fix for GDAL 2.4.1 bug\n wkt2 = geom.ExportToWkt() # ogr geometry to wkt\n try:\n new = loads(wkt2) # wkt to shapely Polygon\n return new\n except Exception:\n return poly",
"def remove_subdivison(self):\n temp_sub_vertices = []\n for index in range(0, len(self.subdivision_list) - 1, 4):\n v0 = Vec3d(0, 0, 0, 0)\n v1 = Vec3d(0, 0, 0, 0)\n v2 = Vec3d(0, 0, 0, 0)\n\n v0.x = self.subdivision_list[index + 1][0].x\n v0.y = self.subdivision_list[index + 1][0].y\n v0.z = self.subdivision_list[index + 1][0].z\n v0.w = self.subdivision_list[index + 1][0].w\n\n v1.x = self.subdivision_list[index + 2][0].x\n v1.y = self.subdivision_list[index + 2][0].y\n v1.z = self.subdivision_list[index + 2][0].z\n v1.w = self.subdivision_list[index + 2][0].w\n\n v2.x = self.subdivision_list[index + 3][0].x\n v2.y = self.subdivision_list[index + 3][0].y\n v2.z = self.subdivision_list[index + 3][0].z\n v2.w = self.subdivision_list[index + 3][0].w\n\n temp_sub_vertices.append([v0, v1, v2])\n\n self.subdivision_list = temp_sub_vertices",
"def minimize_individual_blocks(self):\n i = len(self.blocks) - 1\n while i >= 0:\n u, v = self.blocks[i].bounds\n Lexical.shrink(\n self.shrink_target.buffer[u:v],\n lambda b: self.try_shrinking_blocks((i,), b),\n random=self.random,\n full=False,\n )\n i -= 1",
"def scale(self, size):\n self._surf = pygame.transform.smoothscale(self._surf, size).convert_alpha()",
"def shrink(self):\n if self.focused == 0:\n self._shrink_main(self.change_ratio)\n elif len(self.clients) == 2:\n self._shrink_solo_secondary(self.change_ratio)\n else:\n self._shrink_secondary(self.change_size)\n self.group.layout_all()",
"def scale_depth_disp(self, pred): # TODO\n disp = 1. / pred\n min_disp = 1. / self.hparams.max_depth\n max_disp = 1. / self.hparams.min_depth\n scaled_disp = min_disp + (max_disp - min_disp) * ((disp - np.min(disp)) / (np.max(disp) - np.min(disp)))\n scaled_depth = 1. / scaled_disp\n return scaled_disp, scaled_depth",
"def create_partition(mesh,polygons,enforce_exact=False):",
"def _shrink_arr(self):\n self._resize_arr(self._capacity // self._growth_factor)",
"def shrink_polygon(self,polygon, offset = 1):\r\n \r\n import numpy as np\r\n import copy\r\n import math\r\n \r\n def angle(x1, y1, x2, y2):\r\n numer = (x1*x2 + y1*y2)\r\n denom = np.sqrt((x1**2 + y1**2) * (x2**2 + y2**2))\r\n print(numer)\r\n print(denom)\r\n print( math.acos(numer/denom) )\r\n return math.acos(numer/denom) \r\n \r\n def cross_sign(x1, y1, x2, y2):\r\n return x1*y2 > x2*y1\r\n \r\n # If the polygon is closed, un-close it\r\n closed = False\r\n if np.linalg.norm(polygon[0,:]-polygon[-1,:]) < 1E-10:\r\n polygon = polygon[:-1,:]\r\n closed = True\r\n \r\n # Make sure polygon is counter-clockwise\r\n if self.are_vertices_clockwise(np.row_stack((polygon,polygon[0,:]))):\r\n polygon = np.flipud(polygon)\r\n \r\n polygon_shrinked = copy.copy(polygon)\r\n \r\n for idx in range(polygon.shape[0]):\r\n \r\n if idx == polygon.shape[0]-1:\r\n vtx_before = idx-1\r\n vtx_center = idx\r\n vtx_after = 0\r\n else:\r\n vtx_before = idx-1\r\n vtx_center = idx\r\n vtx_after = idx+1\r\n \r\n side_before = polygon[vtx_center,:] - polygon[vtx_before,:]\r\n side_after = polygon[vtx_after,:] - polygon[vtx_center,:]\r\n \r\n side_before /= np.linalg.norm(side_before)\r\n side_after /= np.linalg.norm(side_after)\r\n \r\n nvec_before = np.asarray([-side_before[1], side_before[0]])\r\n nvec_after = np.asarray([-side_after[1], side_after[0]])\r\n \r\n vtx1_before = polygon[vtx_before,:] + nvec_before*offset\r\n vtx2_before = polygon[vtx_center,:] + nvec_before*offset\r\n \r\n vtx1_after = polygon[vtx_center,:] + nvec_after*offset\r\n vtx2_after = polygon[vtx_after,:] + nvec_after*offset\r\n \r\n p = vtx1_before\r\n r = (vtx2_before-vtx1_before)\r\n \r\n q = vtx1_after\r\n s = (vtx2_after-vtx1_after)\r\n \r\n if np.cross(r,s) == 0:\r\n \r\n # Lines are collinear\r\n polygon_shrinked[idx,:] = vtx2_before\r\n \r\n else:\r\n \r\n # Lines are not collinear\r\n t = np.cross(q - p,s)/(np.cross(r,s))\r\n \r\n # This is the intersection point\r\n polygon_shrinked[idx,:] = p + t*r\r\n \r\n if closed:\r\n polygon_shrinked = np.row_stack((\r\n polygon_shrinked,\r\n polygon_shrinked[0,:]))\r\n \r\n return polygon_shrinked",
"def shrink_polygon(self,polygon, offset = 1):\r\n \r\n import numpy as np\r\n import copy\r\n import math\r\n \r\n def angle(x1, y1, x2, y2):\r\n numer = (x1*x2 + y1*y2)\r\n denom = np.sqrt((x1**2 + y1**2) * (x2**2 + y2**2))\r\n print(numer)\r\n print(denom)\r\n print( math.acos(numer/denom) )\r\n return math.acos(numer/denom) \r\n \r\n def cross_sign(x1, y1, x2, y2):\r\n return x1*y2 > x2*y1\r\n \r\n # If the polygon is closed, un-close it\r\n closed = False\r\n if np.linalg.norm(polygon[0,:]-polygon[-1,:]) < 1E-10:\r\n polygon = polygon[:-1,:]\r\n closed = True\r\n \r\n # Make sure polygon is counter-clockwise\r\n if self.are_vertices_clockwise(np.row_stack((polygon,polygon[0,:]))):\r\n polygon = np.flipud(polygon)\r\n \r\n polygon_shrinked = copy.copy(polygon)\r\n \r\n for idx in range(polygon.shape[0]):\r\n \r\n if idx == polygon.shape[0]-1:\r\n vtx_before = idx-1\r\n vtx_center = idx\r\n vtx_after = 0\r\n else:\r\n vtx_before = idx-1\r\n vtx_center = idx\r\n vtx_after = idx+1\r\n \r\n side_before = polygon[vtx_center,:] - polygon[vtx_before,:]\r\n side_after = polygon[vtx_after,:] - polygon[vtx_center,:]\r\n \r\n side_before /= np.linalg.norm(side_before)\r\n side_after /= np.linalg.norm(side_after)\r\n \r\n nvec_before = np.asarray([-side_before[1], side_before[0]])\r\n nvec_after = np.asarray([-side_after[1], side_after[0]])\r\n \r\n vtx1_before = polygon[vtx_before,:] + nvec_before*offset\r\n vtx2_before = polygon[vtx_center,:] + nvec_before*offset\r\n \r\n vtx1_after = polygon[vtx_center,:] + nvec_after*offset\r\n vtx2_after = polygon[vtx_after,:] + nvec_after*offset\r\n \r\n p = vtx1_before\r\n r = (vtx2_before-vtx1_before)\r\n \r\n q = vtx1_after\r\n s = (vtx2_after-vtx1_after)\r\n \r\n if np.cross(r,s) == 0:\r\n \r\n # Lines are collinear\r\n polygon_shrinked[idx,:] = vtx2_before\r\n \r\n else:\r\n \r\n # Lines are not collinear\r\n t = np.cross(q - p,s)/(np.cross(r,s))\r\n \r\n # This is the intersection point\r\n polygon_shrinked[idx,:] = p + t*r\r\n \r\n if closed:\r\n polygon_shrinked = np.row_stack((\r\n polygon_shrinked,\r\n polygon_shrinked[0,:]))\r\n \r\n return polygon_shrinked",
"def scale(self, size):\n self._surf = pygame.transform.smoothscale(self._surf,\n size).convert_alpha()\n self._version += 1\n return self",
"def shrink_kernel(self, kernel, up_scale):\n up_scale = torch.tensor(up_scale).float()\n # boundary padding based on the scaling law\n pad_in = (torch.ceil(up_scale**2).int())*((kernel.shape[2]-1)//2)\n pad_h = (torch.ceil(up_scale).int())*((kernel.shape[3]-1)//2)\n pad_w = (torch.ceil(up_scale).int())*((kernel.shape[4]-1)//2)\n padded_kernel = F.pad(kernel, (pad_w, pad_w, pad_h, pad_h, pad_in, pad_in))\n delta = up_scale%1\n \n if delta == 0:\n shrink_factor = 1\n else:\n # shrink_factor for coordinates.\n shrink_factor = (((kernel.shape[4]-1))/(padded_kernel.shape[-1]-1)*(up_scale+1))\n \n # Adjustment to deal with weird filtering on the grid sample function.\n shrink_factor = 1.5*(shrink_factor-0.5)**3 + 0.57 \n\n grid = torch.meshgrid(torch.linspace(-1, 1, kernel.shape[2])*(shrink_factor**2),\n torch.linspace(-1, 1, kernel.shape[3])*shrink_factor, \n torch.linspace(-1, 1, kernel.shape[4])*shrink_factor)\n\n grid = torch.cat([grid[2].unsqueeze(0).unsqueeze(-1), \n grid[1].unsqueeze(0).unsqueeze(-1), \n grid[0].unsqueeze(0).unsqueeze(-1)], dim = -1).repeat(kernel.shape[0],1,1,1,1)\n\n new_kernel = F.grid_sample(padded_kernel, grid.to(device))\n if kernel.shape[-1] - 2*up_scale > 0:\n new_kernel = new_kernel * (kernel.shape[-1]**2/((kernel.shape[-1] - 2*up_scale)**2 + 0.01))\n return new_kernel"
] |
[
"0.55230653",
"0.5414716",
"0.52755874",
"0.5205108",
"0.5172866",
"0.51585984",
"0.5103534",
"0.506077",
"0.50528574",
"0.5033383",
"0.49927393",
"0.49777892",
"0.49749196",
"0.49594805",
"0.49082968",
"0.49042964",
"0.4895167",
"0.48624578",
"0.48506373",
"0.47627047",
"0.4760278",
"0.47335526",
"0.46957904",
"0.46858722",
"0.4666461",
"0.463939",
"0.4634908",
"0.4634908",
"0.4605402",
"0.4599849"
] |
0.6899877
|
0
|
Retrieve PODPAC semantic version as string Returns str Semantic version if outside git repository Returns `git describe always` if inside the git repository
|
def version():
version_full = semver()
CWD = os.path.dirname(__file__)
got_git = os.path.exists(os.path.join(os.path.dirname(__file__), "..", ".git"))
if not got_git:
return version_full
try:
# determine git binary
git = "git"
try:
subprocess.check_output([git, "--version"])
except Exception:
git = "/usr/bin/git"
try:
subprocess.check_output([git, "--version"])
except Exception as e:
return version_full
version_full = subprocess.check_output([git, "describe", "--always", "--tags"], cwd=CWD).strip().decode("ascii")
version_full = version_full.replace("-", "+", 1).replace("-", ".") # Make this consistent with PEP440
except Exception as e:
print("Could not determine PODPAC version from git repo.\n" + str(e))
return version_full
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def _get_version():\n try:\n code, output = _run_cmd('git', 'describe', '--tags')\n if code:\n return 'unknown'\n output = output.decode('utf8').strip().split('-')\n if len(output) != 3:\n return 'unknown'\n version = '%s+%s' % (output[0], output[2])\n\n code, _ = _run_cmd('git', 'diff', '--quiet')\n if code:\n version += '+dirty'\n\n return version\n except OSError:\n return 'unknown'",
"def gitversion():\n import os\n from subprocess import Popen, PIPE, STDOUT\n origdir = os.getcwd()\n os.chdir(os.path.dirname(__file__))\n try:\n p = Popen(['git', \"describe\", \"--tags\", \"--dirty\", \"--always\"], stdout=PIPE, stderr=STDOUT)\n except EnvironmentError:\n return 'unknown'\n\n os.chdir(origdir)\n out = p.communicate()[0]\n if p.returncode == 0:\n #- avoid py3 bytes and py3 unicode; get native str in both cases\n return str(out.rstrip().decode('ascii'))\n else:\n return 'unknown'",
"def get_ver():\n import subprocess\n\n proc = subprocess.run(\n [\"git\", \"describe\"], stdout=subprocess.PIPE, stderr=subprocess.PIPE\n )\n if not proc.returncode == 0:\n return\n v = proc.stdout.decode().strip()\n if \"-\" not in v:\n ret = v\n else:\n csum = v[v.rindex(\"-\") + 1 :]\n base = v[: v.rindex(\"-\")]\n count = base[base.rindex(\"-\") + 1 :]\n tag = base[: base.rindex(\"-\")]\n ret = f\"{tag}.post{count}+{csum}\"\n return ret",
"def version():\n import inspect\n import shlex\n import subprocess\n\n def output(command):\n path = os.path.realpath(os.path.dirname(inspect.stack(0)[0][1]))\n return subprocess.check_output(shlex.split(command), cwd=path).strip()\n\n return (\n output(\"git rev-parse --show-toplevel\"),\n output(\"git remote get-url origin\"),\n output(\"git describe --always\"),\n )",
"def get_version():\n try:\n return check_output(\n \"git describe --tags\".split(\" \")\n ).decode('utf-8').strip()\n except CalledProcessError:\n return check_output(\n \"git rev-parse --short HEAD\".split(\" \")\n ).decode('utf-8').strip()",
"def version():\n\n version = None\n output = gitopen(['--version'])\n m = re.search(br\" version ([\\d\\.A-Za-z]+)\", output)\n if m is not None:\n version = m.group(1).decode('utf-8')\n return version",
"def get_version():\n git_root = find_git_root(dirname(__file__))\n\n if git_root is not None:\n # Get the version using \"git describe\".\n cmd = \"git describe --tags --match [0-9]*\".split()\n try:\n version = subprocess.check_output(cmd).decode().strip()\n except subprocess.CalledProcessError:\n logger.exception(\"Unable to get version number from git tags\")\n exit(1)\n\n # PEP 386 compatibility\n if \"-\" in version:\n version = \".post\".join(version.split(\"-\")[:2])\n\n # Don't declare a version \"dirty\" merely because a time stamp has\n # changed. If it is dirty, append a \".dev1\" suffix to indicate a\n # development revision after the release.\n with open(os.devnull, \"w\") as fd_devnull:\n subprocess.call([\"git\", \"status\"], stdout=fd_devnull, stderr=fd_devnull)\n\n cmd = \"git diff-index --name-only HEAD\".split()\n try:\n dirty = subprocess.check_output(cmd).decode().strip()\n except subprocess.CalledProcessError:\n logger.exception(\"Unable to get git index status\")\n exit(1)\n\n if dirty != \"\":\n version += \".dev1\"\n\n return version\n\n else:\n try:\n return pkg_resources.working_set.by_key[\"graphql-validate\"].version\n except KeyError:\n return \"0.0.0-unreleased\"",
"def git_version():\n def _minimal_ext_cmd(cmd):\n # construct minimal environment\n env = {}\n for k in ['SYSTEMROOT', 'PATH']:\n v = os.environ.get(k)\n if v is not None:\n env[k] = v\n # LANGUAGE is used on win32\n env['LANGUAGE'] = 'C'\n env['LANG'] = 'C'\n env['LC_ALL'] = 'C'\n out = subprocess.Popen(cmd, stdout = subprocess.PIPE, env=env).communicate()[0]\n return out\n\n try:\n out = _minimal_ext_cmd(['git', 'rev-parse', 'HEAD'])\n GIT_REVISION = out.strip().decode('ascii')\n except OSError:\n GIT_REVISION = \"Unknown\"\n return GIT_REVISION",
"def get_base_version():\n if BASE_VERSION is None:\n return shell_output('git describe --tags --abbrev=0')\n return BASE_VERSION",
"def versionstr():\n return \"%d.%d.%d%s\" % (version[0], version[1], version[2],\n '-' + gitstr() if gitstr() else '')",
"def get_setup_version():\n if os.path.isdir(\".git\"):\n process = subprocess.Popen(COMMAND_DESCRIBE_VERSION, **SUBPROCESS_KWARGS)\n process.wait()\n version = process.communicate()[0].decode(\"utf-8\").strip()\n return re.match(re_version, version).group(1)\n else:\n return \"0.1\"",
"def version(self) -> str:\n self.__verify_repo_initialized()\n res = vcompat.get_repository_software_version_spec(self._env.branchenv)\n return str(res)",
"def get_git_version(self) -> str:\n VERSION_PFX = \"git version \"\n version = self.cmd.version()\n if version.startswith(VERSION_PFX):\n version = version[len(VERSION_PFX) :].split()[0]\n else:\n version = \"\"\n return \".\".join(version.split(\".\")[:3])",
"def provide_git_revision(cls):\n version = str(VERSION)\n git_revision = str(GIT_REVISION)\n git_date = str(GIT_DATE)\n if os.path.exists(\".git\"):\n from subprocess import check_output\n command = 'git describe --tags --long --dirty'\n version_string = check_output(command.split()).decode('utf-8').strip()\n if version_string != 'fatal: No names found, cannot describe anything.':\n # git describe -> tag-commits-sha-dirty\n version_string = version_string.replace('-dirty', '')\n version_string = version_string.lstrip('v')\n parts = version_string.split('-')\n parts_len = len(parts)\n # only tag or git sha\n if parts_len == 1:\n if cls.is_git_sha(parts[0]):\n git_revision = parts[0]\n git_revision = git_revision.lstrip('g')\n else:\n version = parts[0]\n if parts_len == 2:\n version = parts[0]\n git_revision = cls.get_git_revision(parts[1])\n if parts_len > 2:\n # git sha\n git_revision = cls.get_git_revision(parts[-1])\n # commits after given tag\n commits = cls.get_commits_count(parts[-2])\n # version based on tag\n version = ''.join(parts[:-1])\n if commits is not None:\n version = ''.join(parts[:-2])\n # normalize rc to rcN for PEP 440 compatibility\n version = version.lower()\n if version.endswith('rc'):\n version += '0'\n else:\n cls.logger.warning(\"Git describe command failed for current git repository\")\n git_date = cls.get_git_date(git_revision)\n else:\n from pkg_resources import get_distribution\n try:\n version, git_revision = get_distribution(\"hivemind\").version.split(\"+\")\n except:\n cls.logger.warning(\"Unable to get version and git revision from package data\")\n cls._save_version_file(version, git_revision, git_date)\n return version, git_revision",
"def semantic_version(self) -> str:\n\n version_core = f\"{self.major_version}.{self.minor_version}.{self.patch_version}\"\n sep = \"-\" if self.pre_release != \"\" else \"\"\n\n return f\"{version_core}{sep}{self.pre_release}\"",
"def version_string():\n git_hash = current_git_hash()\n if git_hash:\n return \"pyhole v%s (%s) - https://github.com/jk0/pyhole\" % (\n __VERSION__, git_hash)\n\n return \"pyhole v%s - https://github.com/jk0/pyhole\" % __VERSION__",
"def get_version():\n return \"0.0.1 (prerelease prototype)\"",
"def get_revision(self) -> str:\n try:\n return self.cmd.rev_parse(verify=True, args=\"HEAD\", check_returncode=True)\n except exc.CommandError:\n return \"initial\"",
"def meta():\n\n meta_version = lain_yaml(ignore_prepare=True).repo_meta_version()\n if meta_version is None:\n error(\"please git commit.\")\n else:\n info(\"meta version : %s\" % lain_yaml(ignore_prepare=True).repo_meta_version())",
"def version():\n version_info = pbr.version.VersionInfo('ardana-service')\n return version_info.version_string_with_vcs()",
"def describe(location):\n ensure_dir(location)\n with utils.cd(location):\n cmd = '/usr/bin/git describe --always'\n return subprocess.check_output(cmd, shell=True).strip()",
"def get_version():\n\n with open('__init__.py') as f:\n for line in f.readlines():\n if '__version__' in line:\n apicem_version = line.strip().split(\"=\")[-1].strip(\" '\")\n if '__first_release_date__' in line:\n first_release_data_str = line.strip().split(\"=\")[-1].strip(\" '\")\n first_release_data = date(*[int(num) for num in first_release_data_str.split('.')])\n num_commits = get_cr_num(first_release_data)\n return '{apicem_version}.{num_commits}'.format(\n apicem_version=apicem_version, num_commits=num_commits)\n\n raise ValueError(\"could not read version\")",
"def get_target_git_version():\n if os.path.exists(os.path.join(ROOT_DIR, '.git_bleeding_edge')):\n git_version_file = 'git_version_bleeding_edge.txt'\n else:\n git_version_file = 'git_version.txt'\n with open(os.path.join(THIS_DIR, git_version_file)) as f:\n return f.read().strip()",
"def getversion(online: bool = True) -> str:\n branches = {\n 'master': 'branches/master',\n 'stable': 'branches/stable',\n }\n data = getversiondict()\n data['cmp_ver'] = 'n/a'\n local_hsh = data.get('hsh', '')\n hsh = {}\n\n if online:\n if not local_hsh:\n data['cmp_ver'] = 'UNKNOWN'\n else:\n for branch, path in branches.items():\n with suppress(Exception):\n hsh[getversion_onlinerepo(path)] = branch\n if hsh:\n data['cmp_ver'] = hsh.get(local_hsh, 'OUTDATED')\n\n data['hsh'] = local_hsh[:7] # make short hash from full hash\n return '{tag} ({hsh}, {rev}, {date}, {cmp_ver})'.format_map(data)",
"def gitstr():\n try:\n return \"%s\" % (open('.git/refs/heads/master').read().strip()[0:10])\n except FileNotFoundError:\n return \"\"\n except IndexError:\n return \"\"",
"def version(version_file=default_version_file, osp_package=default_osp_package):\n\n if os.path.exists(version_file):\n (version_string, version_name) = version_from_file(version_file)\n\n else:\n package_info = get_package_info(osp_package)\n repo_name = get_package_repo_name(package_info)\n version_string = get_version_from_repo_name(repo_name)\n\n if version_string == None:\n version_string = \"unknown\"\n \n return version_string",
"def getversion_git(path=None):\n _program_dir = path or _get_program_dir()\n cmd = 'git'\n try:\n subprocess.Popen([cmd], stdout=subprocess.PIPE).communicate()\n except OSError:\n # some Windows git versions provide git.cmd instead of git.exe\n cmd = 'git.cmd'\n\n with open(os.path.join(_program_dir, '.git/config')) as f:\n tag = f.read()\n # Try 'origin' and then 'gerrit' as remote name; bail if can't find either.\n remote_pos = tag.find('[remote \"origin\"]')\n if remote_pos == -1:\n remote_pos = tag.find('[remote \"gerrit\"]')\n if remote_pos == -1:\n tag = '?'\n else:\n s = tag.find('url = ', remote_pos)\n e = tag.find('\\n', s)\n tag = tag[(s + 6):e]\n t = tag.strip().split('/')\n tag = f\"[{t[0][:-1]}] {'-'.join(t[3:])}\"\n dp = subprocess.Popen([cmd, '--no-pager',\n 'log', '-1',\n '--pretty=format:\"%ad|%an|%h|%H|%d\"',\n '--abbrev-commit',\n '--date=iso'],\n cwd=_program_dir,\n stdout=subprocess.PIPE)\n info, _ = dp.communicate()\n info = info.decode(config.console_encoding).split('|')\n date = info[0][:-6]\n date = time.strptime(date.strip('\"'), '%Y-%m-%d %H:%M:%S')\n dp = subprocess.Popen([cmd, 'rev-list', 'HEAD'],\n cwd=_program_dir,\n stdout=subprocess.PIPE)\n rev, stderr = dp.communicate()\n rev = f'g{len(rev.splitlines())}'\n hsh = info[3] # also stored in '.git/refs/heads/master'\n if (not date or not tag or not rev) and not path:\n raise VersionParseError\n return (tag, rev, date, hsh)",
"def k8s_version(self) -> str:\n stdout, _, _ = RunKubectlCommand(['version', '-o', 'yaml'])\n return yaml.safe_load(stdout)['serverVersion']['gitVersion']",
"async def manage_version():\n\n try:\n repo = git.Repo(search_parent_directories=True)\n version = repo.git.describe('--tags')\n except Exception:\n version = \"v0.0.0\"\n\n base_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n creation_time = time.ctime(os.path.getmtime(base_dir))\n\n response = {'version': version, 'deployedOn': creation_time}\n return OK(response)",
"def get_version():\n import subprocess\n proc = subprocess.Popen(\n 'hg log -r tip --template \"{latesttagdistance}\"',\n shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n pending, _ = proc.communicate()\n return \"%(tag)sd%(pending)s\" % dict(tag=config.TAG, pending=pending)"
] |
[
"0.7336789",
"0.7291358",
"0.72601277",
"0.7169582",
"0.70130545",
"0.6905142",
"0.68349826",
"0.6752513",
"0.657856",
"0.655547",
"0.6540561",
"0.64956397",
"0.64951843",
"0.647409",
"0.6458362",
"0.6425985",
"0.6338563",
"0.6262185",
"0.6238744",
"0.62258005",
"0.6216029",
"0.62159336",
"0.6191382",
"0.6167041",
"0.6154774",
"0.61473376",
"0.61277926",
"0.61262137",
"0.61148816",
"0.6113878"
] |
0.83106726
|
0
|
Returns True if chromosome_name sounds like one of our insertion cassettes, False otherwise.
|
def is_cassette_chromosome(chromosome_name):
return ("insertion_cassette" in chromosome_name)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def is_other_chromosome(chromosome_name):\n if is_cassette_chromosome(chromosome_name): return False\n if chromosome_name.startswith('chr') or chromosome_name.startswith('scaffold'): return False\n else: return True",
"def check_for_chr(sam):\n if 'chr' in sam.references[0]:\n return True\n return False",
"def is_ucsc_reference_name(name):\n return (normalize_reference_name(name) in normalized_ucsc_reference_names)",
"def is_an_oak(name):\n if 'quercus' in name.lower():\n return True\n else:\n return False",
"def check_chromosome_composition(chromosome: str, nurses_number: int) -> bool:\n\n # The number of genes in the chromosome is generated\n genes = 21 * nurses_number\n\n # Here is removed white spaces\n chromosome = chromosome.replace(' ', '')\n\n # If the number of genes on the chromosome is different from the expected\n # number of genes, the function returns False\n if len(chromosome) != genes:\n return False\n\n # For each gene is verified if it is composed by zero or one. Otherwise, the function returns False\n for i in range(len(chromosome)):\n if chromosome[i] != '0' and chromosome[i] != '1':\n return False\n\n # If all the restrictions was satisfied, the function returns True\n return True",
"def _is_reserved_name(content_name: str) -> bool:\n return content_name in RESERVED_NAMES",
"def has_nucleic_acids(self):\n for frag in self.iter_nucleic_acids():\n return True\n return False",
"def is_nucleic_acid(self):\n return True",
"def filter_DNA(c):\n if c in \"ACGTacgt\":\n return True\n else:\n return False",
"def is_nucleic_acid(self):\n return False",
"def has_ascii_name(self):\n return self.unpack_word(0x10) & 1 == 1",
"def _contains_consonants(self, letter_group: str) -> bool:\n return self.consonant_matcher.search(letter_group) is not None",
"def is_allc(self):\n g = self.get_gene().get_seq()\n for i in range(1, len(g)):\n if g[i] is 'd':\n return False\n return True",
"def __contains__(self, chromosome):\n return (to_chromosome(chromosome) in self.chromosome_list)",
"def is_alld(self):\n g = self.get_gene().get_seq()\n for i in range(1, len(g)):\n if g[i] is 'c':\n return False\n return True",
"def has_name(self):\n return self.unpack_word(0x2) != 0",
"def __continas__ (self, name):\n return name in self.containments",
"def has_inspection(fileContent: str) -> bool:\n\t\tif not DISABLE_PYCHARM_INSPECTION_TUPLE:\n\t\t\treturn True\n\n\t\tfor inspectionName in DISABLE_PYCHARM_INSPECTION_TUPLE:\n\t\t\tif inspectionName in fileContent:\n\t\t\t\treturn True\n\t\treturn False",
"def is_covered_class_name(class_name, generated_class_names):\n for generated_class_name in generated_class_names:\n if class_name == generated_class_name or class_name.startswith(generated_class_name + '$'):\n return False\n return True",
"def is_an_oak(name):\n return name.lower().startswith(\"quercus\") # use startswith() to catch bugs in spelling/spacing",
"def is_file_already_processed(file_content, file_name):\n\tchar_found = False\n\tfile_content_to_search = file_content.encode(SOURCE_ENCODING)\n\tfor des_char, char in CHARS_TO_REPLACE:\n\t\tif char in file_content_to_search:\n\t\t\tchar_found = True\n\t\t\tbreak\n\tif char_found:\n\t\tprint \"--> %s already processed, skipping...\" % file_name\n\treturn char_found",
"def is_valid_compound_name(name: str) -> bool:\n return n2s.has_smiles(name)",
"def isProteic(self):\n from MolKit.PDBresidueNames import AAnames\n\n self.AARes = [x for x in self.residues if x.type in AAnames]\n\n water = [x for x in self.residues if x.type in ['HOH', 'WAT']]\n\n if len(self.AARes) and len(self.AARes)+len(water) == len(self.residues):\n return True\n else:\n return False",
"def check_names(treat, control, error_stream):\n tchrnames = set(treat.get_chr_names())\n cchrnames = set(control.get_chr_names())\n commonnames = tchrnames.intersection(cchrnames)\n if len(commonnames)==0:\n error_stream(\"No common chromosome names can be found from treatment and control!\")\n error_stream(\"Please make sure that the treatment and control alignment files were generated by using the same genome assembly!\")\n error_stream(\"Chromosome names in treatment: %s\" % \",\".join(sorted(tchrnames)))\n error_stream(\"Chromosome names in control: %s\" % \",\".join(sorted(cchrnames)))\n sys.exit()",
"def has_any(self, name):\n counter = 0\n for element in self.cards:\n if name in str(element):\n counter += 1\n\n if counter > 0:\n return True\n else:\n return False",
"def is_valid_cname(common_name: str) -> bool:\n return True if Band.band_range(common_name) else False",
"def _contains_in_self_or_parent(self, name: str) -> bool:\n return name in self",
"def has_been_coded_by(self, vname, coder):\n return self.metadata.has_been_coded_by(vname, coder)",
"def has_name_data(self):\n\n matches = set(self.broader_tags()) & LANGUAGES_WITH_NAME_DATA\n return bool(matches)",
"def isDna(self):\n from MolKit.PDBresidueNames import Nucleotides\n\n dnaRes = [x for x in self.residues if \\\n x.type.strip() in Nucleotides]\n\n water = [x for x in self.residues if x.type in ['HOH', 'WAT']]\n\n if len(dnaRes) and len(dnaRes)+len(water) == len(self.residues):\n self.isDNA = True\n return True\n else:\n self.isDNA = False\n return False"
] |
[
"0.7973496",
"0.6904484",
"0.63201296",
"0.5878787",
"0.5870619",
"0.58183515",
"0.581004",
"0.5799723",
"0.5798506",
"0.57770634",
"0.5756595",
"0.5752675",
"0.57503545",
"0.57446855",
"0.5728181",
"0.5641129",
"0.56181234",
"0.5585736",
"0.5577955",
"0.5553379",
"0.55077946",
"0.5451964",
"0.54151565",
"0.5414898",
"0.5399016",
"0.53971785",
"0.53963715",
"0.53599423",
"0.5347867",
"0.5343479"
] |
0.85567
|
0
|
Returns True if chromosome_name is neither cassette nor chromosome/scaffold, False otherwise.
|
def is_other_chromosome(chromosome_name):
if is_cassette_chromosome(chromosome_name): return False
if chromosome_name.startswith('chr') or chromosome_name.startswith('scaffold'): return False
else: return True
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def is_cassette_chromosome(chromosome_name):\n return (\"insertion_cassette\" in chromosome_name)",
"def check_chromosome_composition(chromosome: str, nurses_number: int) -> bool:\n\n # The number of genes in the chromosome is generated\n genes = 21 * nurses_number\n\n # Here is removed white spaces\n chromosome = chromosome.replace(' ', '')\n\n # If the number of genes on the chromosome is different from the expected\n # number of genes, the function returns False\n if len(chromosome) != genes:\n return False\n\n # For each gene is verified if it is composed by zero or one. Otherwise, the function returns False\n for i in range(len(chromosome)):\n if chromosome[i] != '0' and chromosome[i] != '1':\n return False\n\n # If all the restrictions was satisfied, the function returns True\n return True",
"def check_for_chr(sam):\n if 'chr' in sam.references[0]:\n return True\n return False",
"def is_alld(self):\n g = self.get_gene().get_seq()\n for i in range(1, len(g)):\n if g[i] is 'c':\n return False\n return True",
"def is_allc(self):\n g = self.get_gene().get_seq()\n for i in range(1, len(g)):\n if g[i] is 'd':\n return False\n return True",
"def can_mutate(self, ga, chromosome):\n return len(chromosome.genes) < len(ga._gene_bank)",
"def isValid(self):\n if(not self.name or len(self.name) == 0):\n return False\n return True",
"def has_name(self):\n return self.unpack_word(0x2) != 0",
"def is_nucleic_acid(self):\n return False",
"def has_cargo(self) -> bool:\n return bool(self.proto.cargo_space_taken)",
"def check_coverage_collision(self, row, column):\n\n if self.cov_grid[row][column] == OCCUPIED:\n return True\n else:\n return False",
"def is_valid_compound_name(name: str) -> bool:\n return n2s.has_smiles(name)",
"def is_ucsc_reference_name(name):\n return (normalize_reference_name(name) in normalized_ucsc_reference_names)",
"def invariant(self):\n\t\treturn ((self.name != \"\") and (self.locationId != \"\"))",
"def is_nucleic_acid(self):\n return True",
"def __bool__(self):\n return len(self.atoms) >= 1",
"def is_valid_cname(common_name: str) -> bool:\n return True if Band.band_range(common_name) else False",
"def _quick_and_dirty_glyph_is_empty(font, glyph_name):\n if 'glyf' in font:\n glyph = font['glyf'][glyph_name]\n if not glyph.isComposite():\n if glyph.numberOfContours == 0:\n return True\n return False\n elif 'CFF2' in font:\n top_dict = font['CFF2'].cff.topDictIndex[0]\n else:\n top_dict = font['CFF '].cff.topDictIndex[0]\n char_strings = top_dict.CharStrings\n char_string = char_strings[glyph_name]\n if len(char_string.bytecode) <= 1:\n return True\n return False",
"def treasure_condition(locations):\n return not len(locations) == 0",
"def validate_strand(strand: str) -> bool:\n strand = strand.upper()\n count = dict(Counter(strand))\n for k in count.keys():\n if k not in NUCLEOTIDES:\n raise Exception(\"Invalid DNA sequence\")\n return True",
"def __nonzero__(self):\n # XXX: check the name and the characterID?\n if self.data.get('name'): return 1\n return 0",
"def valid_coverage_cell(self, row, column):\n\n if (row < self.cov_grid.shape[0] and\n row >= 0) and \\\n (column < self.cov_grid.shape[1] and\n column >= 0):\n return True\n else:\n return False",
"def _is_reserved_name(content_name: str) -> bool:\n return content_name in RESERVED_NAMES",
"def isDna(self):\n from MolKit.PDBresidueNames import Nucleotides\n\n dnaRes = [x for x in self.residues if \\\n x.type.strip() in Nucleotides]\n\n water = [x for x in self.residues if x.type in ['HOH', 'WAT']]\n\n if len(dnaRes) and len(dnaRes)+len(water) == len(self.residues):\n self.isDNA = True\n return True\n else:\n self.isDNA = False\n return False",
"def isValid(self):\n return self.file_name != \"\" and self.line_number != 0",
"def _is_clone_snapshot_name(self, snapshot):\n name = snapshot.split('@')[-1]\n return name.startswith('cinder-clone-snapshot-')",
"def isProteic(self):\n from MolKit.PDBresidueNames import AAnames\n\n self.AARes = [x for x in self.residues if x.type in AAnames]\n\n water = [x for x in self.residues if x.type in ['HOH', 'WAT']]\n\n if len(self.AARes) and len(self.AARes)+len(water) == len(self.residues):\n return True\n else:\n return False",
"def has_crossing_len2_ob(self) -> bool:\n fcell = self.first_cell\n scell = self.second_cell\n if self._fuse_row:\n possible_obs = [\n GriddedPerm((0, 1), (fcell, scell)),\n GriddedPerm((1, 0), (scell, fcell)),\n ]\n else:\n possible_obs = [\n GriddedPerm((0, 1), (fcell, scell)),\n GriddedPerm((1, 0), (fcell, scell)),\n ]\n return any(ob in possible_obs for ob in self._tiling.obstructions)",
"def is_constructing_scv(self) -> bool:\n return self.orders and self.orders[0].ability.id in {\n AbilityId.TERRANBUILD_ARMORY,\n AbilityId.TERRANBUILD_BARRACKS,\n AbilityId.TERRANBUILD_BUNKER,\n AbilityId.TERRANBUILD_COMMANDCENTER,\n AbilityId.TERRANBUILD_ENGINEERINGBAY,\n AbilityId.TERRANBUILD_FACTORY,\n AbilityId.TERRANBUILD_FUSIONCORE,\n AbilityId.TERRANBUILD_GHOSTACADEMY,\n AbilityId.TERRANBUILD_MISSILETURRET,\n AbilityId.TERRANBUILD_REFINERY,\n AbilityId.TERRANBUILD_SENSORTOWER,\n AbilityId.TERRANBUILD_STARPORT,\n AbilityId.TERRANBUILD_SUPPLYDEPOT,\n }",
"def is_part_of_split(self):\n return self.from_case or self.split_cases.count() > 0"
] |
[
"0.79243827",
"0.64666486",
"0.60545814",
"0.6003928",
"0.5928852",
"0.57831943",
"0.5766314",
"0.5740043",
"0.5698536",
"0.56569386",
"0.5632043",
"0.5613601",
"0.56032854",
"0.5572482",
"0.5534552",
"0.5523383",
"0.552156",
"0.55130076",
"0.5477924",
"0.54777753",
"0.5452592",
"0.54452133",
"0.541836",
"0.5405929",
"0.54043263",
"0.53669816",
"0.536554",
"0.5358972",
"0.5348353",
"0.5332406"
] |
0.8339933
|
0
|
Takes a (chrom, start_pos, end_pos, strand) tuple raises MutantError if it's wrong. Start_pos and end_pos should be int, 1based, inclusive (so in AATTGG, the position of AA is 12) unlike in HTSeq! Strand should be +/. No checks are done on chrom.
|
def check_valid_position_tuple(pos):
try: chrom, start_pos, end_pos, strand = pos
except (TypeError, ValueError): raise MutantError("Didn't get a correct position tuple! %s"%pos)
if strand not in SEQ_STRANDS: raise MutantError("Invalid strand %s!"%strand)
if start_pos < 1: raise MutantError("Sequence positions must be positive!")
if start_pos > end_pos: raise MutantError("Sequence start can't be after end!")
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def overlaps(self, chrom, start, end, strand=None):\n if (self.chrom != chrom \n or min(self.end, end) - max(self.start, start) <= 0 \n or (strand is not None and self.strand != strand)): \n return False\n return True",
"def get_ref_and_start_and_offset(forward_ref_sequence: str,\n strand: bed_pb2.BedRecord.Strand,\n chrom_start: int,\n chrom_end: int) -> Tuple[str, int, int]:\n ref_sequence = forward_ref_sequence\n if strand == bed_pb2.BedRecord.Strand.FORWARD_STRAND:\n start = chrom_start\n offset = 1\n elif strand == bed_pb2.BedRecord.Strand.REVERSE_STRAND:\n start = chrom_end\n offset = -1\n # For the reverse strand, we want the reverse complement.\n ref_sequence = reverse_complement(forward_ref_sequence)\n else:\n raise ValueError('Strand must be set.')\n return ref_sequence, start, offset",
"def test_RNA_position_strand_equality(self):\n \n tool = pybedtools.create_interval_from_list(\"chr1 50 60 ENSMUSG1_1_83;ENSMUSG1_6_83 0 - 60 60\".split())\n location_dict = {\"ENSMUSG1\" : {\"strand\" : \"+\", \"regions\" : [(0,100),\n ] \n }\n }\n \n self.assertRaises(ValueError, RNA_position, tool, location_dict)",
"def __init__(self, chromosome, strand, full_position=None, position_before=None, position_after=None, immutable=False):\n # need to make instance mutable to be able to set anything, due to how __setattr__ is decorated\n self.make_mutable_REMEMBER_CLEANUP_FIRST() \n # now start setting attributes\n self.chromosome = chromosome\n self.strand = strand\n # parse full_position if provided\n if full_position is not None:\n if (position_before is not None) or (position_after is not None):\n raise ValueError(\"If providing full_position, cannot also provide position_before/position_after!\")\n self.position_before, self.position_after = self._parse_full_position(full_position)\n # otherwise use position_before and/or position_after\n else:\n if position_before is None and position_after is None:\n raise ValueError(\"Can't create an Insertion_position object with no known position values!\")\n try:\n self.position_before = None if position_before is None else int(position_before)\n self.position_after = None if position_after is None else int(position_after)\n except TypeError: \n raise ValueError(\"position_before/position_after must be int-castable or None!\")\n if immutable: self.make_immutable()",
"def get_chrom_start_end_from_string(s):\n try:\n chrom, s_e = s.split('__substr__')\n start, end = s_e.split('_')\n return chrom, int(start), int(end)\n except Exception:\n raise ValueError(\"String %s must be of format '{chrom}__substr__{start}_{end}'\" % s)",
"def HTSeq_pos_to_tuple(HTSeq_pos):\n try:\n chrom = HTSeq_pos.chrom\n except AttributeError:\n raise MutantError(\"Invalid position %s! Need an HTSeq iv object. (If empty, maybe read wasn't aligned?)\"%(HTSeq_pos,))\n strand = HTSeq_pos.strand\n # HTSeq is 0-based and I want 1-based, thus the +1; end has no +1 because in HTSeq end is the base AFTER the alignment.\n start_pos = HTSeq_pos.start+1\n end_pos = HTSeq_pos.end\n output_pos = (chrom, start_pos, end_pos, strand)\n check_valid_position_tuple(output_pos)\n return output_pos",
"def coordinates(self, name, start=None, end=None):\n record = self.process(name)\n if not start and not end:\n start = 1\n end = record.end - record.start + 1\n positions = {}\n match_positions = []\n\n if record.strand == '+':\n _start = 1\n for relative, actual in enumerate(range(record.start - 1, record.end),\n start=_start):\n positions[relative] = actual\n for pos in range(start, end + 1):\n match_positions.append(positions[pos])\n return [(record.scaffold, min(match_positions), max(match_positions) + 1,\n record.mirbase_name, 0, record.strand)]\n\n elif record.strand == '-':\n _start = 1\n for relative, actual in enumerate(reversed(range(record.start - 1,\n record.end)), start=_start):\n positions[relative] = actual\n for pos in range(start, end + 1):\n match_positions.append(positions[pos])\n return [(record.scaffold, min(match_positions), max(match_positions) + 1,\n record.mirbase_name, 0, record.strand)]",
"def chrom_and_pos(self, index, index2=None):\n if index2 is None:\n return self._chrom_and_pos(index)\n else:\n start_chrom, start_pos = self._chrom_and_pos(index)\n end_chrom, end_pos = self._chrom_and_pos(index2)\n if start_chrom != end_chrom:\n start_pos = 0 # was end of previous chromosome, so change to beginning of current\n return end_chrom, int(start_pos), int(end_pos) # convert from numpy int type",
"def parse_positions(self, start_pos, end_pos):\r\n\r\n start_column = ord(start_pos[0]) - 97\r\n if len(start_pos) == 2:\r\n start_row = ord(start_pos[1]) - 49\r\n else:\r\n start_row = 9\r\n end_column = ord(end_pos[0]) - 97\r\n if len(end_pos) == 2:\r\n end_row = ord(end_pos[1]) - 49\r\n else:\r\n end_row = 9\r\n return [start_row, start_column, end_row, end_column]",
"def parse_position(chrom_pos: str):\n chrom, pos = chrom_pos.split('_')\n return chrom, int(pos)",
"def test_RNA_position_fail(self):\n \n tool = pybedtools.create_interval_from_list(\"chr1 50 60 ENSMUSG1_1_83;ENSMUSG1_6_83 0 - 10 10\".split())\n location_dict = {\"ENSMUSG1\" : {\"strand\" : \"-\", \"regions\" : [(100, 150),\n (25,50),\n ] \n }\n }\n \n self.assertEqual(RNA_position(tool, location_dict), (None, None))\n \n tool = pybedtools.create_interval_from_list(\"chr1 50 60 ENSMUSG1_1_83;ENSMUSG1_6_83 0 - 175 175\".split())\n \n self.assertEqual(RNA_position(tool, location_dict), (None, None))",
"def check_variant_start_and_end_positions(\n input_df: pd.DataFrame, start_pos_col: str, end_pos_col: str, filename: str\n) -> tuple:\n errors = \"\"\n warnings = \"\"\n\n if any(input_df[start_pos_col] > input_df[end_pos_col]):\n errors = (\n f\"{filename}: Your variants file has record(s) that have an end position \"\n \"value less than the start position value. Please update your file to be consistent. \"\n \"When we annotate using the genome-nexus-annotation-pipeline, the records with this \"\n \"position discrepancy will show a blank reference and variant allele.\\n\"\n )\n return errors, warnings",
"def coordinates(self, name, start=None, end=None):\n if \"|\" in name:\n self.name = name.split(\"|\")[0]\n else:\n self.name = name\n positions = {}\n match_positions = []\n records = []\n segments = []\n result_segments = []\n for record in self.process(self.name):\n records.append(record)\n records.sort(key=lambda x: int(x.exon_number))\n\n if records[0].strand == '+':\n _start = 1\n for record in records:\n for relative, actual in enumerate(range(record.start, record.end + 1),\n start=_start):\n positions[relative] = actual\n _start = relative + 1\n for pos in range(start, end):\n match_positions.append(positions[pos])\n for key, group in groupby(enumerate(match_positions),\n lambda x: x[0] - x[-1]):\n segment = list(map(itemgetter(1), group))\n segments.append([segment[0], segment[-1]])\n for segment in segments:\n for record in records:\n if segment[0] >= record.start and segment[1] <= record.end:\n result_segments.append((record.scaffold, segment[0], segment[1],\n record.transcript_id + '|' + record.gene_name, 0, record.strand))\n\n elif records[0].strand == '-':\n _start = 1\n for record in records:\n for relative, actual in enumerate(reversed(range(record.start,\n record.end + 1)), start=_start):\n positions[relative] = actual\n _start = relative + 1\n for pos in range(start, end):\n match_positions.append(positions[pos])\n for key, group in groupby(enumerate(reversed(match_positions)),\n lambda x: x[0] - x[-1]):\n segment = list(map(itemgetter(1), group))\n segments.append([segment[0], segment[-1]])\n for segment in segments:\n for record in records:\n if segment[0] >= record.start and segment[1] <= record.end:\n result_segments.append((record.scaffold, segment[0], segment[1],\n record.transcript_id + '|' + record.gene_name, 0, record.strand))\n\n if len(result_segments) == 0:\n logger.debug('%s, %s, %s' % (name, start, end))\n logger.debug('%s' % str(segments))\n for r in records:\n logger.debug('%s %s %s %s' % (r.scaffold, r.strand,\n r.start, r.end))\n\n return result_segments",
"def _get_insertion_info(insertion_pos, allowed_strand_vals=SEQ_STRANDS):\n try:\n strand, ins_start, ins_end = insertion_pos.strand, insertion_pos.min_position, insertion_pos.max_position\n except AttributeError:\n strand, ins_start, ins_end = insertion_pos\n if allowed_strand_vals is not None:\n assert strand in allowed_strand_vals, \"Strand should be %s, and is %s!\"%(' or '.join(allowed_strand_vals), strand)\n return strand, ins_start, ins_end",
"def get_ival_start_end(\n coordinate: str, start: int, end: int, cds_start: int,\n errors: List) -> Optional[Tuple[int, int]]:\n try:\n start = int(start)\n if end is None:\n end = start\n end = int(end)\n except (ValueError, TypeError):\n errors.append(\"Start/End must be valid ints\")\n return None\n\n if coordinate == \"c\":\n if cds_start:\n start += cds_start\n end += cds_start\n return start, end",
"def validate_strand(strand: str) -> bool:\n strand = strand.upper()\n count = dict(Counter(strand))\n for k in count.keys():\n if k not in NUCLEOTIDES:\n raise Exception(\"Invalid DNA sequence\")\n return True",
"def ind_pos(position, ind, current_geno, chr_starts, chr_ends):\n ind_starts = chr_starts[ind]\n ind_ends = chr_ends[ind]\n #print [position, ind, current_geno, ind_starts, ind_ends]\n in_interval = False\n for interval in range(len(ind_starts)):\n if position > int(ind_starts[interval]) and position < int(ind_ends[interval]):\n in_interval = True\n break\n if in_interval:\n return(current_geno)\n else:\n return(\"./.\")",
"def _check_basic_pos_inputs(self, get_pos_function):\n # should raise exception for invalid argument (valid arguments: HTSeq position object or (chrom,start,end,strand) tuple\n # (strand must be +/-, and start can't be after end)\n for bad_flanking_region in [None, '', 'aaa', 0, 1, 0.65, [], {}, True, False, ('C',2,3,4),('C',2,3,'x'),('C',3,2,'-')]:\n for cassette_end in SEQ_ENDS:\n for relative_read_direction in RELATIVE_READ_DIRECTIONS:\n self.assertRaises(MutantError, get_pos_function, bad_flanking_region, cassette_end, relative_read_direction)\n # should raise exception for invalid cassette_end or relative_read_direction\n bad_vals = ['','aaa',0,1,[],{},None,True,False,'start','end','middle','read','leftmost','rightmost']\n for bad_val in bad_vals:\n for relative_read_direction in RELATIVE_READ_DIRECTIONS:\n self.assertRaises(MutantError, get_pos_function, ('C',1,5,'+'), bad_val, relative_read_direction)\n for cassette_end in SEQ_ENDS:\n self.assertRaises(MutantError, get_pos_function, ('C',1,5,'+'), cassette_end, bad_val)",
"def find_range_from_cons_pos(my_pos, gpcr_pdb):\n (ext_range,chain)=gpcr_pdb[my_pos]\n pos_range=str(ext_range)\n #pos_range=ext_range+\"-\"+ext_range\n return pos_range",
"def get_label_start_end(\n label_base_positions: Iterable[int],\n strand: bed_pb2.BedRecord.Strand) -> Tuple[Optional[int], Optional[int]]:\n # Gap and padding tokens may have a position of -1, since they are not\n # actually present in the reference. Remove all instances of -1, since we do\n # not want to consider it when computing min/max position.\n valid_label_base_positions = set(label_base_positions)\n valid_label_base_positions.discard(-1)\n\n if not valid_label_base_positions:\n return None, None\n start = min(valid_label_base_positions)\n end = max(valid_label_base_positions)\n if strand == bed_pb2.BedRecord.Strand.FORWARD_STRAND:\n end += 1\n elif strand == bed_pb2.BedRecord.Strand.REVERSE_STRAND:\n start -= 1\n else:\n raise ValueError('Strand must be set.')\n return start, end",
"def __init__(self, chromosome, label, start_base, end_base,\n gstrand=None, sub_bands=None):\n\n if start_base >= end_base:\n raise ValueError('wrong band coordinates '\n '%d-%d' % (start_base, end_base))\n\n self._chromosome = chromosome\n self._label = label\n self._start_base = start_base\n self._end_base = end_base\n\n self._gstrand = gstrand\n self._sub_bands = sub_bands",
"def genomic_to_cds(pos_gm, gene_info):\n strand = gene_info[2]\n rg = gene_info[0]\n head_tail = rg[0][:2] + rg[-1][:2]\n s, e = min(head_tail), max(head_tail)\n irg = [_ for _ in rg if _[0] <= pos_gm <= _[1]]\n\n if len(irg) < 0:\n return ['err', f'{pos_gm} not in cds range head={s}, tail={e}']\n if len(irg) > 1:\n return ['err', f'multiple match found: {pos_gm} \\n{irg}']\n\n irg = irg[0]\n len1 = irg[1] - pos_gm\n if len1 < 0:\n return ['err', f'wrong range order: strand={strand}, gm_pos={pos_gm}, irg={irg}']\n\n if strand == '+':\n if irg[2] > irg[3]:\n return ['err', f'wrong range cds order: strand = plus, irg={irg}']\n return irg[2] + len1\n\n elif strand == '-':\n if irg[2] < irg[3]:\n return ['err', f'wrong range cds order: strand = minus, irg={irg}']\n\n return irg[3] + len1\n\n else:\n return ['err', f'wrong strand {strand}']",
"def extract_from_range(tgt_start, tgt_end, src_start, src_end, max_phrase_len):\n # print(\"rages\", tgt_start, tgt_end, src_start, src_end)\n if tgt_end < 0:\n return \n # If `src_align_idx` out of the `src_start` and `src_target`.\n for src_align_idx, tgt_align_idx in alignment:\n # target align point\n # sorce align point out of range\n if ((tgt_start <= tgt_align_idx <= tgt_end) and \n (src_align_idx < src_start or src_align_idx > src_end)): \n return\n phrase_set = set()\n ts = tgt_start # For increment\n while True:\n te = min(tgt_end, ts+max_phrase_len-1) # For decrement\n # te = tgt_end \n while True:\n # Add phrase pair (src_start, src_end, tgt_start, tgt_end)\n src_phrase = \" \".join(src_sent[i] for i in range(src_start,src_end+1))\n tgt_phrase = \" \".join(tgt_sent[i] for i in range(ts,te+1))\n phrase_set.add(((src_start, src_end+1), src_phrase, tgt_phrase))\n te+= 1\n # Add phrase until `te` aligned or out of range\n if te in tgt_aligned or te == tgt_len:\n break\n ts-=1\n # Add phrase until `te` aligned or out of range\n if ts in tgt_aligned or ts < 0:\n break\n \n return phrase_set",
"def bed_to_interval(contig, bed_start, bed_end, name='', score='', strand='',\n block_ids='', superblock_ids=''):\n try:\n # assure positions to be integers\n # convert from 0,1-based to 1,1-based positions\n start = int(bed_start) + 1\n end = int(bed_end)\n except ValueError:\n raise ValueError(\"'start' and 'end' should be integers\")\n\n # perform sanity check to check for incorrect formatting\n assert (end - start) >= 0, (\"Not a valid BED interval.\"\n \"(bedEnd - bedStart) must be >= 0.\")\n\n # fallback to empty list for optional element ids\n ids = [element_ids.split(',') if element_ids else []\n for element_ids in (block_ids, superblock_ids)]\n\n return BaseInterval(contig, start, end, name, score, strand, *ids)",
"def calculate_global_position(strand, start, end, relative_position):\n if strand == 1:\n global_position = [start + x for x in relative_position]\n elif strand == -1:\n global_position = [end - x for x in relative_position]\n else:\n raise ValueError(\"Strand must be 1 or -1\")\n return global_position",
"def in_range(\n self,\n chrom: Optional[str] = None,\n start: Optional[Numeric] = None,\n end: Optional[Numeric] = None,\n mode: str = \"outer\",\n ):\n starts = [int(start)] if start is not None else None\n ends = [int(end)] if end is not None else None\n results = iter_ranges(self.data, chrom, starts, ends, mode)\n return self.as_dataframe(next(results))",
"def bed_str(self, chrom, strand):\n if len(self.act) == 0:\n act_str = \".\"\n else:\n act_str = \",\".join([str(ai) for ai in sorted(list(self.act))])\n cols = (\n chrom,\n str(int(self.start)),\n str(int(self.end)),\n \".\",\n \"1\",\n strand,\n act_str,\n )\n return \"\\t\".join(cols)",
"def check_subseq_range(subseq_range):\n subseq_range_content = subseq_range.split(\"-\")\n if len(subseq_range_content) != 2:\n err_str = \"A subseq_range must have two arguments (start and stop)\"\n err_str += \" separated by a -\"\n raise ValueError(err_str)\n if int(subseq_range_content[0]) > int(subseq_range_content[1]):\n err_str = \"Start for a subseq_range must be lower than the stop\"\n raise ValueError(err_str)",
"def frame_strand(strand):\n logging.info(\"Framing strand: \" + strand)\n framed_strand = []\n rolling_frame = ['','','']\n frame_begin = -1\n for i in range(0, len(strand)):\n rolling_frame[0] = rolling_frame[1]\n rolling_frame[1] = rolling_frame[2]\n rolling_frame[2] = strand[i]\n if rolling_frame[0]+rolling_frame[1]+rolling_frame[2] == start_codon:\n # We have the frame at this point. Prune the beginning of the string:\n # We are at a pos+2 (c ccc atg ccc ccc c) so cut back 2 and then break\n # ^\n frame_begin = i - 2\n logging.debug(\"Found start codon, strand is now framed at \" + str(frame_begin) + \".\")\n break\n if frame_begin == -1:\n # In this case, there is no valid frame in the strand. Return an empty list.\n logging.info(\"No valid frame in strand.\")\n return [], 0\n pruned_strand = strand[frame_begin:]\n framed_strand = [pruned_strand[i:i+3] for i in range(0, len(pruned_strand), 3)] # Make triples\n logging.debug(\"Framed strand is: \" + str(framed_strand))\n if len(framed_strand[-1]) < 3:\n logging.info(\"Newly framed sequence terminates with non-codon: \" + framed_strand[-1])\n return framed_strand, frame_begin",
"def test_RNA_position_failaure(self):\n \n tool = pybedtools.create_interval_from_list(\"chr1 50 60 ENSMUSG1_1_83;ENSMUSG1_6_83 0 - 50 50\".split())\n location_dict = {\"ENSMUSG2\" : {\"strand\" : \"+\", \"regions\" : [(30, 40),\n (10,20)\n ] \n }\n }\n self.assertRaises(KeyError, RNA_position, tool, location_dict)"
] |
[
"0.6354471",
"0.6235148",
"0.61575633",
"0.593017",
"0.5807833",
"0.56864846",
"0.559741",
"0.55016994",
"0.5491126",
"0.5472953",
"0.5449186",
"0.54104984",
"0.5406418",
"0.54044247",
"0.5370745",
"0.53121483",
"0.5310372",
"0.52801085",
"0.52617604",
"0.5255385",
"0.525042",
"0.5238578",
"0.52193636",
"0.5205579",
"0.5187608",
"0.51718724",
"0.51587975",
"0.5114631",
"0.5113597",
"0.5108089"
] |
0.7372381
|
0
|
Convert an HTSeq.GenomicPosition instance to a (chrom,start_pos,end_pos,strand) tuple. Start_pos and end_pos are 1based, inclusive (so in AATTGG, the position of AA is 12) unlike in HTSeq!
|
def HTSeq_pos_to_tuple(HTSeq_pos):
try:
chrom = HTSeq_pos.chrom
except AttributeError:
raise MutantError("Invalid position %s! Need an HTSeq iv object. (If empty, maybe read wasn't aligned?)"%(HTSeq_pos,))
strand = HTSeq_pos.strand
# HTSeq is 0-based and I want 1-based, thus the +1; end has no +1 because in HTSeq end is the base AFTER the alignment.
start_pos = HTSeq_pos.start+1
end_pos = HTSeq_pos.end
output_pos = (chrom, start_pos, end_pos, strand)
check_valid_position_tuple(output_pos)
return output_pos
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def _make_pos(pos):\n return pos.chromosome, pos.strand, pos.min_position, pos.min_position+20",
"def get_genomic_range( self ):\n return self.snv_chrom + ':' + str( self.snv_start ) + '-' + str( self.snv_end )",
"def chrom_and_pos(self, index, index2=None):\n if index2 is None:\n return self._chrom_and_pos(index)\n else:\n start_chrom, start_pos = self._chrom_and_pos(index)\n end_chrom, end_pos = self._chrom_and_pos(index2)\n if start_chrom != end_chrom:\n start_pos = 0 # was end of previous chromosome, so change to beginning of current\n return end_chrom, int(start_pos), int(end_pos) # convert from numpy int type",
"def parse_position(chrom_pos: str):\n chrom, pos = chrom_pos.split('_')\n return chrom, int(pos)",
"def coordinates(self, name, start=None, end=None):\n record = self.process(name)\n if not start and not end:\n start = 1\n end = record.end - record.start + 1\n positions = {}\n match_positions = []\n\n if record.strand == '+':\n _start = 1\n for relative, actual in enumerate(range(record.start - 1, record.end),\n start=_start):\n positions[relative] = actual\n for pos in range(start, end + 1):\n match_positions.append(positions[pos])\n return [(record.scaffold, min(match_positions), max(match_positions) + 1,\n record.mirbase_name, 0, record.strand)]\n\n elif record.strand == '-':\n _start = 1\n for relative, actual in enumerate(reversed(range(record.start - 1,\n record.end)), start=_start):\n positions[relative] = actual\n for pos in range(start, end + 1):\n match_positions.append(positions[pos])\n return [(record.scaffold, min(match_positions), max(match_positions) + 1,\n record.mirbase_name, 0, record.strand)]",
"def position_tuples(self, protein=False):\n if protein:\n if not self.is_coding():\n raise AttributeError(\n \"Cannot return wild type protein \"\n \"position tuples for non-coding wild \"\n \"type [{}]\".format(self.parent_name)\n )\n else:\n seq = self.protein_seq\n offset = self.protein_offset\n else:\n seq = self.dna_seq\n offset = self.dna_offset\n\n return [(i + offset + 1, seq[i]) for i in range(len(seq))]",
"def get_dna_fragment(self, start_position=0, stop_position=0):\r\n\r\n if start_position != 0:\r\n information = [[\"\".join([chr(data) for data in self._t_strand[0]][start_position: ]),\r\n \"\".join([chr(data) for data in self._t_strand[1]][start_position: ])],\r\n [\"\".join([chr(data) for data in self._c_strand[0]][start_position: ]),\r\n \"\".join([chr(data) for data in self._c_strand[1]][start_position: ])]]\r\n else:\r\n information = [[\"\".join([chr(data) for data in self._t_strand[0]][start_position:]),\r\n \"\".join([chr(data) for data in self._t_strand[1]][start_position:])],\r\n [\"\".join([chr(data) for data in self._c_strand[0]][start_position:]),\r\n \"\".join([chr(data) for data in self._c_strand[1]][start_position:])]]\r\n\r\n return information",
"def get_ref_and_start_and_offset(forward_ref_sequence: str,\n strand: bed_pb2.BedRecord.Strand,\n chrom_start: int,\n chrom_end: int) -> Tuple[str, int, int]:\n ref_sequence = forward_ref_sequence\n if strand == bed_pb2.BedRecord.Strand.FORWARD_STRAND:\n start = chrom_start\n offset = 1\n elif strand == bed_pb2.BedRecord.Strand.REVERSE_STRAND:\n start = chrom_end\n offset = -1\n # For the reverse strand, we want the reverse complement.\n ref_sequence = reverse_complement(forward_ref_sequence)\n else:\n raise ValueError('Strand must be set.')\n return ref_sequence, start, offset",
"def parse_positions(self, start_pos, end_pos):\r\n\r\n start_column = ord(start_pos[0]) - 97\r\n if len(start_pos) == 2:\r\n start_row = ord(start_pos[1]) - 49\r\n else:\r\n start_row = 9\r\n end_column = ord(end_pos[0]) - 97\r\n if len(end_pos) == 2:\r\n end_row = ord(end_pos[1]) - 49\r\n else:\r\n end_row = 9\r\n return [start_row, start_column, end_row, end_column]",
"def _get_insertion_info(insertion_pos, allowed_strand_vals=SEQ_STRANDS):\n try:\n strand, ins_start, ins_end = insertion_pos.strand, insertion_pos.min_position, insertion_pos.max_position\n except AttributeError:\n strand, ins_start, ins_end = insertion_pos\n if allowed_strand_vals is not None:\n assert strand in allowed_strand_vals, \"Strand should be %s, and is %s!\"%(' or '.join(allowed_strand_vals), strand)\n return strand, ins_start, ins_end",
"def getIntPos(self):\n return (int(self.pos[0]),int(self.pos[1]))",
"def getSequence( self,\n contig, \n strand = \"+\", \n start = 0, \n end = 0,\n converter = None,\n as_array = False):\n\n if not self.mIsLoaded: self.__loadIndex()\n\n if contig in self.mSynonyms:\n contig = self.mSynonyms[contig]\n\n if contig not in self.mIndex:\n raise KeyError, \"%s not in index\" % contig\n\n data = self.mIndex[contig]\n # dummy is\n # -> pos_seq for seekable streams\n # -> block_size for unseekable streams\n pos_id, dummy, lsequence = data[:3]\n pos_seq = dummy\n block_size = dummy\n \n if end == 0: end = lsequence\n \n if end > lsequence:\n raise ValueError(\"3' coordinate on %s out of bounds: %i > %i\" % (contig, end, lsequence))\n if start < 0:\n raise ValueError(\"5' coordinate on %s out of bounds: %i < 0\" % (contig, start))\n\n if converter:\n first_pos, last_pos = converter( start, end,\n str(strand) in (\"+\", \"1\"),\n lsequence )\n else:\n first_pos, last_pos = start, end\n if str(strand) in (\"-\", \"0\", \"-1\"):\n first_pos, last_pos = lsequence - last_pos, lsequence - first_pos\n \n assert( first_pos < last_pos )\n \n p = SArray( \"c\" )\n \n if self.mNoSeek:\n ## read directly from position\n p.fromstring( self.mDatabaseFile.read( block_size, data[3], first_pos, last_pos) )\n else:\n first_pos += pos_seq\n last_pos += pos_seq\n\n self.mDatabaseFile.seek( first_pos )\n p.fromstring( self.mDatabaseFile.read( last_pos - first_pos ) )\n\n if str(strand) in (\"-\", \"0\", \"-1\"):\n p.reverse() \n p = SArray(\"c\",\n string.translate( p[:],\n string.maketrans(\"ACGTacgt\", \"TGCAtgca\") ) )\n\n if as_array:\n return p\n else:\n # cast to string\n return p[:]",
"def seq_2_pos(idx):\n\tglobal SEQ2POS\n\tif idx not in SEQ2POS:\n\t\treturn None\n\tcod = SEQ2POS[idx]\n\treturn (cod&0xFFFF) , (cod>>16)",
"def to_tuple(self):\n return (self.row_start, self.row_end, self.col_start, self.col_end)",
"def check_valid_position_tuple(pos):\n try: chrom, start_pos, end_pos, strand = pos\n except (TypeError, ValueError): raise MutantError(\"Didn't get a correct position tuple! %s\"%pos)\n if strand not in SEQ_STRANDS: raise MutantError(\"Invalid strand %s!\"%strand)\n if start_pos < 1: raise MutantError(\"Sequence positions must be positive!\")\n if start_pos > end_pos: raise MutantError(\"Sequence start can't be after end!\")",
"def pack(self) -> Tuple[int, int, str, bool]:\n return (\n self.start.place,\n self.end.place,\n self.label,\n self.has_direction,\n )",
"def calculate_global_position(strand, start, end, relative_position):\n if strand == 1:\n global_position = [start + x for x in relative_position]\n elif strand == -1:\n global_position = [end - x for x in relative_position]\n else:\n raise ValueError(\"Strand must be 1 or -1\")\n return global_position",
"def compute_pos_msa2seq(seq, start, stop):\n new_start = compute_revoffset_pos(seq, start)\n new_stop = compute_revoffset_pos(seq, stop)\n return new_start, new_stop",
"def get_pos(self) -> tuple:\n return self.pos",
"def find_range_from_cons_pos(my_pos, gpcr_pdb):\n (ext_range,chain)=gpcr_pdb[my_pos]\n pos_range=str(ext_range)\n #pos_range=ext_range+\"-\"+ext_range\n return pos_range",
"def _hit_range_get(self):\n return (self.hit_start, self.hit_end)",
"def get_position(self) -> typing.Tuple[int, int]:\n raise NotImplementedError",
"def parse_coordinates(seqfeature):\n start_position = None\n stop_position = None\n start = -1\n stop = -1\n parts = 0\n\n if (isinstance(seqfeature.location, FeatureLocation) or \\\n isinstance(seqfeature.location, CompoundLocation)):\n\n if seqfeature.strand is None:\n pass\n elif isinstance(seqfeature.location, FeatureLocation):\n parts = 1\n start_position = seqfeature.location.start\n stop_position = seqfeature.location.end\n elif isinstance(seqfeature.location, CompoundLocation):\n parts = len(seqfeature.location.parts)\n\n # Skip this compound seqfeature if it is comprised of more\n # than two features (tricky to parse).\n if parts == 2:\n\n # Retrieve compound seqfeature positions based on strand.\n if seqfeature.strand == 1:\n start_position = seqfeature.location.parts[0].start\n stop_position = seqfeature.location.parts[1].end\n elif seqfeature.strand == -1:\n start_position = seqfeature.location.parts[1].start\n stop_position = seqfeature.location.parts[0].end\n else:\n pass\n else:\n pass\n else:\n pass\n else:\n pass\n if isinstance(start_position, ExactPosition):\n start = int(start_position)\n if isinstance(stop_position, ExactPosition):\n stop = int(stop_position)\n return (start, stop, parts)",
"def obtain_seq_pos_info(result,seq_pos,seq_pos_n,chain_name,multiple_chains):\n chain_nm_seq_pos=\"\"\n if multiple_chains:\n chain_nm_seq_pos=chain_name\n for pos in result:\n if pos[0] != \"-\": #Consider only num in the pdb\n seq_pos.append([pos[0][0],pos[0][1],\"\",chain_nm_seq_pos,seq_pos_n]);\n seq_pos_n+=1\n return (seq_pos,seq_pos_n)",
"def extract_sequence(chrom,start,end,fasta_file):\n # extract the sequence from this region with pybedtools\n my_peak = '\\t'.join([chrom,str(start),str(end)])\n bedtool_peak = pybedtools.BedTool(my_peak, from_string=True)\n fasta = pybedtools.example_filename(fasta_file)\n a = a.sequence(fi=fasta)\n #print(open(a.seqfn).read()) ",
"def parse_pos(pos, regexp=POS_REGEXP):\n m = regexp.match(pos)\n return tuple(map(int, m.groups()))",
"def snp2gene(scaffold, pos, gff=dbpaths['gff']):\n\n geneid = 'intergenic'\n geneloc = 'non-coding'\n\n fobj = open(gff, 'rb')\n for line in fobj:\n col = line.split()\n if col[0] == scaffold:\n if col[2] == \"mRNA\":\n if int(col[3])<=int(col[4]):\n if float(col[3]) <= pos <= float(col[4]):\n geneid = re.search('ID=([^;]*);', col[8]).groups()[0]\n else:\n if float(col[4]) <= pos <= float(col[3]):\n geneid = re.search('ID=([^;]*);', col[8]).groups()[0]\n\n if col[2] == \"CDS\":\n if float(col[3]) <= pos <= float(col[4]):\n geneloc = 'coding (exonic)'\n\n fobj.close()\n\n return (geneid, geneloc)",
"def toPosition(self, pos):\n return [ord(pos[0])-ord('a'), int(pos[1])]",
"def toPosition(self, pos):\n return [ord(pos[0])-ord('a'), int(pos[1])]",
"def get_single_location(chrom, pos):\n return CHROMOSOME_TO_CODE[chrom] * int(1e9) + pos"
] |
[
"0.6389698",
"0.61054",
"0.59882236",
"0.5913174",
"0.5900781",
"0.58588254",
"0.58381635",
"0.5809354",
"0.5801149",
"0.57703596",
"0.5639592",
"0.5603268",
"0.55714244",
"0.55568963",
"0.55499804",
"0.5480885",
"0.53992945",
"0.53610545",
"0.53607786",
"0.5334829",
"0.5315633",
"0.5290446",
"0.5281257",
"0.5275482",
"0.527228",
"0.52618915",
"0.52016735",
"0.5166084",
"0.5166084",
"0.51499087"
] |
0.8105561
|
0
|
Initialize all values chromosome/strand are just copied from arguments; positions are more complicated. You must provide either full_position, OR one or both of position_before/position_after. The two position_ arguments must be castable to ints, or None. The full_position argument must be a string of the form '100200', '?200' or '100?', such as would be generated by self.full_position() self.position_before and _after are set based on the two parts of the string. Self.min_/max_position are calculated based on self.position_before/_after both, or whichever one isn't None. If immutable is True, the object is made immutable (by calling self.make_immutable() right after initiation.
|
def __init__(self, chromosome, strand, full_position=None, position_before=None, position_after=None, immutable=False):
# need to make instance mutable to be able to set anything, due to how __setattr__ is decorated
self.make_mutable_REMEMBER_CLEANUP_FIRST()
# now start setting attributes
self.chromosome = chromosome
self.strand = strand
# parse full_position if provided
if full_position is not None:
if (position_before is not None) or (position_after is not None):
raise ValueError("If providing full_position, cannot also provide position_before/position_after!")
self.position_before, self.position_after = self._parse_full_position(full_position)
# otherwise use position_before and/or position_after
else:
if position_before is None and position_after is None:
raise ValueError("Can't create an Insertion_position object with no known position values!")
try:
self.position_before = None if position_before is None else int(position_before)
self.position_after = None if position_after is None else int(position_after)
except TypeError:
raise ValueError("position_before/position_after must be int-castable or None!")
if immutable: self.make_immutable()
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def __init__(self, *args):\n if len(args) == 1:\n position = args[0]\n if len(position) != 2:\n raise PositionError\n self._position = args\n elif len(args) == 2:\n self._position = args\n else:\n raise PositionError",
"def __init__(self, start =None, end =None):\n self.start = start if start else PositionValue2D()\n self.end = end if end else PositionValue2D()",
"def __init__(self, size=0, position=(0, 0)):\n\n \"\"\"Check for errors in given size.\"\"\"\n if type(size) is not int:\n raise TypeError(\"size must be an integer\")\n if size < 0:\n raise ValueError(\"size must be >= 0\")\n\n \"\"\"Check for errors in given position.\"\"\"\n if type(position) is not tuple:\n raise TypeError(\"position must be a tuple of 2 positive integers\")\n if type(position[0]) is not int or type(position[1]) is not int:\n raise TypeError(\"position must be a tuple of 2 positive integers\")\n if position[0] < 0 or position[1] < 0:\n raise ValueError(\"position must be a tuple of 2 positive integers\")\n\n self.__size = size\n self.__position = position",
"def __init__(\n self,\n feature_type: str,\n location: Union[CompoundLocation, FeatureLocation] = None,\n left_right: Tuple[int, int] = None,\n strand: int = None,\n reference_sequence: Seq = None,\n name: str = None\n ):\n\n # make sure that EITHER range OR location was used, NOT both\n if left_right is not None:\n if not isinstance(left_right, tuple):\n raise TypeError(f'Range parameter must be a tuple of 2 integers for {name}')\n elif len(left_right) != 2:\n raise ValueError(f'Range parameter must be a tuple of length 2 for {name}')\n elif not is_int(left_right[0]) or not is_int(left_right[1]):\n raise TypeError(f'Start and end values for range parameter must be integers for {name}')\n elif left_right[0] > left_right[1]:\n raise ValueError(f'First position of range tuple must be <= to second position for {name}')\n elif location is not None:\n raise ValueError(f'Use either the range + strand parameters, or the location parameter for {name}')\n else:\n start, end = left_right\n # FeatureLocation uses 0-indexing; also wants base ints; FeatureLocation will throw an error if strand\n # is not +1, -1, or None, let it do that\n start, end = int(start), int(end)\n self.location = FeatureLocation(start - 1, end, strand)\n elif location is not None:\n if not isinstance(location, FeatureLocation) and not isinstance(location, CompoundLocation):\n raise TypeError(f'Location parameter must be Biopython object for {name}')\n else:\n self.location = location\n else:\n self.location = None\n\n self.type = feature_type\n self.name = name\n\n self._set_sequence(reference_sequence=reference_sequence)",
"def _parse_full_position(cls, full_position_string):\n try:\n before,after = [cls._parse_single_position(s) for s in full_position_string.split('-')]\n except (ValueError,AttributeError):\n raise ValueError(\"The full_position argument must be a string of the form '100-200', '?-200' or '100-?'!\"\n \"Got '%s'\"%(full_position_string,))\n if before is None and after is None:\n raise ValueError(\"At least one section of the full_position argument must be a number!\")\n return before,after",
"def _init(self, position):\n\t\tself._position = position",
"def position(self, position):\n if type(position) is not tuple or len(position) is not 2\\\n or type(position[0]) is not int or position[0] < 0\\\n or type(position[1]) is not int or position[1] < 0:\n\n raise TypeError(\"position must be a tuple of 2 positive integers\")\n\n self.__position = position",
"def _make_pos(pos):\n return pos.chromosome, pos.strand, pos.min_position, pos.min_position+20",
"def set_initial_position(self, gps_pos, attitude):\r\n self.position = deepcopy(gps_pos)\r\n self.attitude = deepcopy(attitude)",
"def __init__(self, board_dimensions: tuple, initial_position: list = None) -> None:\n assert len(board_dimensions) == 2, \"board dimensions must be 2 digit array\"\n assert all(\n [dim >= 0 for dim in board_dimensions]\n ), \"dimensions must be positive\"\n self.board_dimensions = board_dimensions\n if initial_position:\n assert type(initial_position) == list, \"Position must be length 2 list\"\n assert (\n len(initial_position) == 2\n ), \"Position must be a list of length 2 containing x and y coordinates where top left of the board is [0,0]\"\n assert (\n 0 <= initial_position[0] < self.board_dimensions[0]\n ), \"Invalid initial x position\"\n assert (\n 0 <= initial_position[1] < self.board_dimensions[1]\n ), \"invalid initial y position\"\n self.position = initial_position.copy()\n else:\n self.position = [\n np.random.randint(0, board_dimensions[0] - 1),\n np.random.randint(0, board_dimensions[1] - 1),\n ]",
"def __init__(self, position, is_horizontal, map_state):\n\n self.position = position\n self.spawn_position = position[:]\n self.in_spawn_area = True\n self.is_horizontal = is_horizontal\n self.map_state = map_state\n self.previous_direction = (0, 0)",
"def __init__(self, position, momentum, mass):\n self.position = position\n self.momentum = momentum\n self.mass = mass",
"def __init__(self, size=0, position=(0, 0)):\n if type(size) is not int:\n raise TypeError(\"size must be an integer\")\n elif size < 0:\n raise ValueError(\"size must be >= 0\")\n self.__size = size\n self.position = position",
"def __init__(self, position):\n self.position = position\n self.direction = 'U'\n self.length = 0",
"def __init__(self, chromosome, label, start_base, end_base,\n gstrand=None, sub_bands=None):\n\n if start_base >= end_base:\n raise ValueError('wrong band coordinates '\n '%d-%d' % (start_base, end_base))\n\n self._chromosome = chromosome\n self._label = label\n self._start_base = start_base\n self._end_base = end_base\n\n self._gstrand = gstrand\n self._sub_bands = sub_bands",
"def __init__(self, position):\n if not isinstance(position, Tuple):\n raise Error.ArgumentError(f\"Invalid position ({position}) with the type: `{type(position)}`!\\n\"\n \"Only `Tuple` is supported as the type of vertex position.\",\n ModuleErrorCode,\n FileErrorCode, 1)\n\n self.__position = position # vertex position\n self.__rotation_encryption_angle: float = None # angle for rotation encryption\n self.__flipping_encryption_angletype: float = None # angle for flipping encryption\n self.__commands: List[str] = [] # commands to be executed\n self.__outcome = None # measurement outcome",
"def __init__(self, start_pos, end_pos, direction):\n self.s_pos = start_pos\n self.e_pos = end_pos\n self.dir = direction",
"def initialize(self):\n self.positions = self._generate_initial_positions()\n self.scores = np.array(self.compute_scores(self.positions))\n\n self._pso_data.best_positions = self.positions\n self._pso_data.best_scores = self.scores\n\n magic_constant = 2 # feel free to change FIXME\n max_velocity = (self.upper_bound - self.lower_bound) / magic_constant\n shape = (len(self.positions), len(self.lower_bound))\n self._pso_data.velocities = np.random.uniform(low=-max_velocity, high=max_velocity, size=shape)",
"def __init__(self, size=0, position=(0, 0)):\n self.size = size\n self.position = position",
"def __init__(self, size=0, position=(0, 0)):\n self.size = size\n self.position = position",
"def __init__(self, location: Tuple[int, int]) -> None:\n self.loc = location\n self.can_pass = True\n self.up = None\n self.down = None\n self.left = None\n self.right = None",
"def __init__(self, pos, length, direction, board_size):\n self._pos = pos\n self._x_pos, self._y_pos = self._pos\n self._len = length\n self._dir = direction\n self._bs = board_size\n self._is_hit = False\n self._hit_coors = []\n self._coordinates = self.coordinates()",
"def set_open_positions(self, positions):\n self.positions = positions",
"def __init__(self, start, end, value):\n self.start = start\n self.end = end\n self.value = value",
"def test_RNA_position_placement_split(self):\n \n tool = pybedtools.create_interval_from_list(\"chr1 50 60 ENSMUSG1_1_83;ENSMUSG1_6_83 0 + 125 125\".split())\n location_dict = {\"ENSMUSG1\" : {\"strand\" : \"+\", \"regions\" : [(0, 50),\n (100, 150),\n ] \n }\n }\n \n self.assertEqual(RNA_position(tool, location_dict), (.50, .75) )\n \n tool = pybedtools.create_interval_from_list(\"chr1 50 60 ENSMUSG1_1_83;ENSMUSG1_6_83 0 - 25 25\".split())\n location_dict = {\"ENSMUSG1\" : {\"strand\" : \"-\", \"regions\" : [(100, 150),\n (0, 50),\n ] \n }\n }\n \n self.assertEqual(RNA_position(tool, location_dict), (.50, .75))",
"def __init__(self, *args, **kwds):\n if args or kwds:\n super(ultrasnd_bump_ranges, self).__init__(*args, **kwds)\n #message fields cannot be None, assign default values for those that are\n if self.sensor_FL is None:\n self.sensor_FL = sensor_msgs.msg.Range()\n if self.sensor_FR is None:\n self.sensor_FR = sensor_msgs.msg.Range()\n if self.sensor_RR is None:\n self.sensor_RR = sensor_msgs.msg.Range()\n if self.sensor_RL is None:\n self.sensor_RL = sensor_msgs.msg.Range()\n else:\n self.sensor_FL = sensor_msgs.msg.Range()\n self.sensor_FR = sensor_msgs.msg.Range()\n self.sensor_RR = sensor_msgs.msg.Range()\n self.sensor_RL = sensor_msgs.msg.Range()",
"def __init__(self, start=None, size=None, end=None):\n if start is not None:\n if (isinstance(start, bounding_box_pb2.BoundingBox) or\n isinstance(start, BoundingBox)):\n if size is not None or end is not None:\n raise ValueError('a BoundingBox object/proto must be specified alone')\n size = start.size\n start = start.start\n\n if (end is not None) + (start is not None) + (size is not None) != 2:\n raise ValueError('exactly two of start, end, and size must be specified')\n\n if start is not None:\n self.start = geom_utils.ToNumpy3Vector(start)\n if size is not None:\n self.size = geom_utils.ToNumpy3Vector(size)\n if end is not None:\n end = geom_utils.ToNumpy3Vector(end)\n\n if end is not None:\n if size is not None:\n self.start = end - size\n else:\n self.size = end - start",
"def set_initial_position(self, gps_pos, attitude):\r\n self.position, self.attitude = projection(self.mapdata.gps_pos, self.mapdata.attitude, gps_pos, attitude)",
"def check_valid_position_tuple(pos):\n try: chrom, start_pos, end_pos, strand = pos\n except (TypeError, ValueError): raise MutantError(\"Didn't get a correct position tuple! %s\"%pos)\n if strand not in SEQ_STRANDS: raise MutantError(\"Invalid strand %s!\"%strand)\n if start_pos < 1: raise MutantError(\"Sequence positions must be positive!\")\n if start_pos > end_pos: raise MutantError(\"Sequence start can't be after end!\")",
"def __init__(self, *args, **kwds):\n if args or kwds:\n super(observationRPY, self).__init__(*args, **kwds)\n #message fields cannot be None, assign default values for those that are\n if self.position is None:\n self.position = pcl_segment.msg.positionRPY()\n if self.is_Known is None:\n self.is_Known = False\n else:\n self.position = pcl_segment.msg.positionRPY()\n self.is_Known = False"
] |
[
"0.5977929",
"0.56178004",
"0.55555886",
"0.5532431",
"0.55157036",
"0.5480008",
"0.5448816",
"0.5439251",
"0.532895",
"0.5309388",
"0.5269337",
"0.52215236",
"0.518906",
"0.5178596",
"0.51294214",
"0.5125572",
"0.5121275",
"0.5102379",
"0.50387585",
"0.50387585",
"0.50236976",
"0.5021037",
"0.50194323",
"0.5012124",
"0.49962944",
"0.4975751",
"0.4963379",
"0.49566057",
"0.49558365",
"0.4936799"
] |
0.75947535
|
0
|
Parse a full_position string to proper (position_before, position_after) value.
|
def _parse_full_position(cls, full_position_string):
try:
before,after = [cls._parse_single_position(s) for s in full_position_string.split('-')]
except (ValueError,AttributeError):
raise ValueError("The full_position argument must be a string of the form '100-200', '?-200' or '100-?'!"
"Got '%s'"%(full_position_string,))
if before is None and after is None:
raise ValueError("At least one section of the full_position argument must be a number!")
return before,after
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def parse_pos(pos, regexp=POS_REGEXP):\n m = regexp.match(pos)\n return tuple(map(int, m.groups()))",
"def parse_position(chrom_pos: str):\n chrom, pos = chrom_pos.split('_')\n return chrom, int(pos)",
"def handle_position(data: bytes) -> Tuple[bytes, str]:\n x, y, z = struct.unpack('fff', data[0:3 * 4])\n return data[20:], f'Current Position (x,y,z): {x} {y} {z}'",
"def parse_positions(self, start_pos, end_pos):\r\n\r\n start_column = ord(start_pos[0]) - 97\r\n if len(start_pos) == 2:\r\n start_row = ord(start_pos[1]) - 49\r\n else:\r\n start_row = 9\r\n end_column = ord(end_pos[0]) - 97\r\n if len(end_pos) == 2:\r\n end_row = ord(end_pos[1]) - 49\r\n else:\r\n end_row = 9\r\n return [start_row, start_column, end_row, end_column]",
"def parsePosition(self, parse):\n\n if len(parse) == 2:\n ch1 = ord(parse[0].lower())\n ch2 = ord(parse[1].lower())\n\n maxNum = 48 + self.board.size # ascii of max row #\n\n # [Row#][ColLetter]] case\n if 48 < ch1 <= maxNum and 97 <= ch2 < (97 + self.board.size):\n return maxNum - ch1, ch2 - 97 # actual grid indexes of desired position\n\n # [ColLetter][Row#] case\n if 48 < ch2 <= maxNum and 97 <= ch1 < (97 + self.board.size):\n return maxNum - ch2, ch1 - 97 # actual grid indexes of desired position\n return False",
"def _parse_location(location_string):\n location_regex = r\"(\\d+)-(\\d+)(\\(+\\)|\\(-\\)|)\"\n match = re.match(location_regex, location_string.strip())\n start, end, strand = match.groups()\n return int(start), int(end), -1 if strand == \"(-)\" else 1",
"def get_aa_pos_range(self,\n parts: List) -> Optional[Tuple[str, str, str, int, bool]]:\n aa_start = None\n aa_end = None\n pos_start = None\n pos_end = None\n used_one_letter = False\n\n if \"_\" in parts[0] and parts[0].count(\"_\") == 1:\n aa_pos_range = parts[0].split(\"_\")\n if len(aa_pos_range) != 2 or \\\n not aa_pos_range[0] or not aa_pos_range[1]:\n return None\n\n start_aa_pos = \\\n self.get_amino_acid_and_pos(\n aa_pos_range[0], used_one_letter\n )\n\n if start_aa_pos:\n used_one_letter = start_aa_pos[2]\n\n end_aa_pos = \\\n self.get_amino_acid_and_pos(\n aa_pos_range[1], used_one_letter\n )\n\n if start_aa_pos and end_aa_pos:\n aa_start = start_aa_pos[0]\n pos_start = start_aa_pos[1]\n aa_end = end_aa_pos[0]\n pos_end = end_aa_pos[1]\n used_one_letter = end_aa_pos[2]\n\n else:\n aa_and_pos = \\\n self.get_amino_acid_and_pos(\n parts[0], used_one_letter\n )\n if aa_and_pos:\n aa_start = aa_and_pos[0]\n pos_start = aa_and_pos[1]\n used_one_letter = aa_and_pos[2]\n\n return aa_start, aa_end, pos_start, pos_end, used_one_letter",
"def parse_pos(self, pos):\r\n\r\n column = ord(pos[0]) - 97\r\n if len(pos) == 2:\r\n row = ord(pos[1]) - 49\r\n else:\r\n row = 9\r\n return [row, column]",
"def _parse_uncompressed_position(data: str) -> Tuple[float, float, int, str, str]:\n # Decode the latitude and ambiguity\n try:\n lat, ambiguity = APRSUtils.decode_uncompressed_latitude(data[0:8])\n\n except ValueError as e:\n raise ParseError(\"Invalid latitude: {}\".format(e))\n\n # Decode the longitude\n try:\n lng = APRSUtils.decode_uncompressed_longitude(data[9:18])\n\n except ValueError as e:\n raise ParseError(\"Invalid longitude: {}\".format(e))\n\n logger.debug(\"Latitude: {} ({}) Longitude: {}\".format(\n lat, ambiguity, lng\n ))\n\n # Parse the symbol table\n symbol_table = data[8]\n logger.debug(\"Symbol table: {}\".format(symbol_table))\n\n try:\n # Parse the symbol ID\n symbol_id = data[18]\n logger.debug(\"Symbol: {}\".format(symbol_id))\n except IndexError:\n raise ParseError(\"Missing symbol identifier\")\n\n return (lat, lng, ambiguity, symbol_table, symbol_id)",
"def get_positions_deleted(self, parts: List) -> Optional[Tuple[str, str]]:\n if \"_\" in parts[0] and parts[0].count(\"_\") == 1:\n positions = self.get_valid_digits(parts[0])\n if not positions:\n return None\n start_pos_del, end_pos_del = positions\n if start_pos_del > end_pos_del:\n return None\n else:\n start_pos_del = parts[0]\n end_pos_del = None\n if not start_pos_del.isdigit():\n return None\n return start_pos_del, end_pos_del",
"def get_position(filestring, position):\n lines = filestring.split(\"\\n\")\n line_number, place, count = 0, 0, 0\n #print \"Number of lines: \", len(lines)\n \n while line_number < len(lines):\n line = lines[line_number]\n new_count = count + len(line) #+ 1 # +1 nes dar newline pridedame\n if position <= new_count:\n place = position - count\n break\n count = new_count # +1 nes dar newline pridedame\n line_number += 1\n \n print \"\\n\".join([\"%s:%s\" % ((\"===> \" if i==line_number else \"\") + str(i), lines[i]) for i in xrange(len(lines))])\n return (line_number, place)",
"def parse_coordinates(seqfeature):\n start_position = None\n stop_position = None\n start = -1\n stop = -1\n parts = 0\n\n if (isinstance(seqfeature.location, FeatureLocation) or \\\n isinstance(seqfeature.location, CompoundLocation)):\n\n if seqfeature.strand is None:\n pass\n elif isinstance(seqfeature.location, FeatureLocation):\n parts = 1\n start_position = seqfeature.location.start\n stop_position = seqfeature.location.end\n elif isinstance(seqfeature.location, CompoundLocation):\n parts = len(seqfeature.location.parts)\n\n # Skip this compound seqfeature if it is comprised of more\n # than two features (tricky to parse).\n if parts == 2:\n\n # Retrieve compound seqfeature positions based on strand.\n if seqfeature.strand == 1:\n start_position = seqfeature.location.parts[0].start\n stop_position = seqfeature.location.parts[1].end\n elif seqfeature.strand == -1:\n start_position = seqfeature.location.parts[1].start\n stop_position = seqfeature.location.parts[0].end\n else:\n pass\n else:\n pass\n else:\n pass\n else:\n pass\n if isinstance(start_position, ExactPosition):\n start = int(start_position)\n if isinstance(stop_position, ExactPosition):\n stop = int(stop_position)\n return (start, stop, parts)",
"def _parse_amino_pos(self, var):\n if re.match('[a-z]', var[-1]):\n var = var[:-1]\n\n amino = ''\n if re.match('^[a-z]', var):\n amino = var[0]\n pos = int(var[1:])\n else:\n pos = int(var)\n\n return amino, pos",
"def get_position(pos):\n if type(pos) is str:\n return list(map(lambda x: float(x),pos.split(\",\")))\n return pos",
"def parse_position_line(line):\n\n match = Response.regex_position.search(line)\n if match is not None:\n result = dict(\n x=float(match.group(\"x\")),\n y=float(match.group(\"y\")),\n z=float(match.group(\"z\")),\n )\n if match.group(\"e\") is not None:\n # report contains only one E\n result[\"e\"] = float(match.group(\"e\"))\n\n elif match.group(\"es\") is not None:\n # report contains individual entries for multiple extruders (\"E0:... E1:... E2:...\")\n es = match.group(\"es\")\n for m in Response.regex_e_positions.finditer(es):\n result[\"e{}\".format(m.group(\"id\"))] = float(m.group(\"value\"))\n\n else:\n # apparently no E at all, should never happen but let's still handle this\n return None\n\n return result\n\n return None",
"def get_text_positions(self, node, padded):\n # type: (AstNode, bool) -> Tuple[Tuple[int, int], Tuple[int, int]]\n if not hasattr(node, 'first_token'):\n return (1, 0), (1, 0)\n\n start = node.first_token.start\n end = node.last_token.end\n if padded and any(match_token(t, token.NEWLINE) for t in self.get_tokens(node)):\n # Set col_offset to 0 to include leading indentation for multiline statements.\n start = (start[0], 0)\n\n return start, end",
"def parse_flanking_region_aln_or_pos(flanking_region_aln_or_pos):\n try: \n check_valid_position_tuple(flanking_region_aln_or_pos)\n return flanking_region_aln_or_pos\n except MutantError:\n try: \n pos = flanking_region_aln_or_pos.iv\n except AttributeError:\n raise MutantError(\"parse_flanking_region_aln_or_pos input should be HTSeq aln or position tuple! \"\n +\"Got %s\"%(flanking_region_aln_or_pos,))\n if pos: return HTSeq_pos_to_tuple(pos) \n # if unaligned, figure out if unaligned or multi-aligned, and just return the appropriate special position code\n else: \n try: XM_val = get_HTSeq_optional_field(flanking_region_aln_or_pos, 'XM')\n except KeyError: return SPECIAL_POSITIONS.unaligned\n if int(XM_val) > 1: return SPECIAL_POSITIONS.multi_aligned\n else: return SPECIAL_POSITIONS.unaligned",
"def decode_pos(pos):\n return pos // 3, pos % 3",
"def is_position(position):\n return isinstance(position, str) and len(position) == 2 and POS_PATTERN.match(position)",
"def parse_move(move):\n if not (len(move) == 2):\n return None, None\n try:\n row = ord(move[0].upper()) - 65\n col = int(move[1])\n except:\n return None, None\n return row, col",
"def get_insertion_pos_from_flanking_region_pos(flanking_region_aln_or_pos, cassette_end, relative_read_direction, \n immutable_position=True):\n # check that basic values aren't weird\n check_valid_end_info(cassette_end, relative_read_direction)\n # parse flanking_region_aln_or_pos arg - it'll either return a tuple with the basics, or a special position code\n parsed_position = parse_flanking_region_aln_or_pos(flanking_region_aln_or_pos)\n try: chrom, start_pos, end_pos, strand = parsed_position\n except (TypeError, ValueError): return parsed_position\n check_valid_position_tuple(parsed_position)\n ### chromosome is always the same as read, so just leave it as is\n ### cassette strand is the same as read strand, OR the opposite if the read is opposite to cassette (happens in two cases)\n if (cassette_end=='5prime' and relative_read_direction=='inward'): pass\n elif (cassette_end=='3prime' and relative_read_direction=='outward'): pass\n else: strand = ('+' if strand=='-' else '-')\n ### cassette position depends on the read position and cassette_end in a somewhat complex way (see docstring)\n if (cassette_end=='5prime' and strand=='+') or (cassette_end=='3prime' and strand=='-'): pos_before, pos_after = end_pos, None\n else: pos_before, pos_after = None, start_pos\n return Insertion_position(chrom, strand, position_before=pos_before, position_after=pos_after, immutable=immutable_position)",
"def __getPointXYs(self, raw_string):\n try:\n pointsRE = re.compile('^\\((\\d*\\D*, *\\D*\\d*)\\)\\D*\\((\\d*\\D*, *\\D*\\d*)\\)$')\n points = pointsRE.search(raw_string.strip()).groups()\n startPoint = (int(points[0].split(',')[0].strip()), int(points[0].split(',')[1].strip()))\n endPoint = (int(points[1].split(',')[0].strip()), int(points[1].split(',')[1].strip()))\n return self.__validatePoint(startPoint), self.__validatePoint(endPoint)\n except AttributeError:\n traceback.print_exc()\n raise ValueError('Failed to get point coordinates.')",
"def __parse_position_data(self):\n self.add_debug('Parse position data ...')\n\n for i in range(len(self._lines)):\n if self.has_errors(): break\n line = self._lines[i]\n if len(line) < 1: continue\n if self.TIMESTAMP_MARKER in line: continue\n if self.RACK_BARCODE_MARKER in line: continue\n\n msg = 'Unexpected content in line %i: %s' % (i + 1, line)\n if not self.SEPARATOR in line: self.add_error(msg)\n tokens = line.split(self.SEPARATOR)\n if not len(tokens) == 2: self.add_error(msg)\n if self.has_errors(): continue\n\n pos_label = tokens[0].strip()\n if self.position_map.has_key(pos_label):\n msg = 'Duplicate position label \"%s\"' % (pos_label)\n self.add_error(msg)\n if self.has_errors(): continue\n\n tube_barcode = tokens[1].strip()\n if tube_barcode == self.NO_TUBE_PLACEHOLDER: tube_barcode = None\n self.position_map[pos_label] = tube_barcode",
"def get_text_positions(self, node, padded):\n # type: (AstNode, bool) -> Tuple[Tuple[int, int], Tuple[int, int]]\n if getattr(node, \"_broken_positions\", None):\n # This node was marked in util.annotate_fstring_nodes as having untrustworthy lineno/col_offset.\n return (1, 0), (1, 0)\n\n if supports_tokenless(node):\n return self._get_text_positions_tokenless(node, padded)\n\n return self.asttokens.get_text_positions(node, padded)",
"def _parse_rcloc(x, string): # figures out string location\n if x == 'x':\n top = rc.get(f'{string}.top', context=True)\n bottom = rc.get(f'{string}.bottom', context=True)\n if top is None and bottom is None:\n return None\n elif top and bottom:\n return 'both'\n elif top:\n return 'top'\n elif bottom:\n return 'bottom'\n else:\n return 'neither'\n else:\n left = rc.get(f'{string}.left', context=True)\n right = rc.get(f'{string}.right', context=True)\n if left is None and right is None:\n return None\n elif left and right:\n return 'both'\n elif left:\n return 'left'\n elif right:\n return 'right'\n else:\n return 'neither'",
"def get_pos(self) -> tuple:\n return self.pos",
"def get_pos(term):\n # pylint: disable=invalid-name\n # Invalid variable name \"Position\"\n Position = collections.namedtuple('Position', ('row', 'column'))\n\n pos = Position(*term.get_location(timeout=5.0))\n\n if -1 in pos:\n print('stdin: not a human', file=sys.stderr)\n exit(2)\n\n return pos",
"def _make_pos(pos):\n return pos.chromosome, pos.strand, pos.min_position, pos.min_position+20",
"def parse_location(location_str):\n def floatify(latlon):\n \"\"\" Turns a latlon string into a float \"\"\"\n sign = -2. * (latlon[-1].lower() in ['s', 'w']) + 1\n return float(latlon[:-1]) * sign\n points = location_str.strip().split(',')\n if not len(points) == 2:\n raise BadQuery(\"Expected four comma seperated values \"\n \"defining a single point.\")\n\n is_lat = lambda x: x[-1].lower() in ['n', 's']\n lat = filter(is_lat, points)\n if not len(lat) == 1:\n raise BadQuery(\"Expected two latitudes (determined by \" +\n \"values ending in 'N' or 'S'\")\n is_lon = lambda x: x[-1].lower() in ['e', 'w']\n lon = filter(is_lon, points)\n if not len(lon) == 1:\n raise BadQuery(\"Expected two longitudes (determined by \" +\n \"values ending in 'E' or 'W'\")\n lat = floatify(lat[0])\n lon = floatify(lon[0])\n\n # make sure latitude is in range.\n if (lat > 90.) or (lat < -90):\n raise BadQuery(\"Latitude must be within -90 and 90, got %s\" %\n str(lat))\n # we let the user use either longitudes of 0 to 360\n # or -180 to 180, then convert to nautical (-180 to 180).\n if lon > 360. or lon < -180.:\n raise BadQuery(\"Longitudes must be within -180 and 360, got %s\" %\n str(lon))\n # make sure lons end up in -180 to 180.\n lon = np.mod(lon + 180., 360.) - 180.\n\n location = {'latitude': lat,\n 'longitude': lon}\n return location",
"def HTSeq_pos_to_tuple(HTSeq_pos):\n try:\n chrom = HTSeq_pos.chrom\n except AttributeError:\n raise MutantError(\"Invalid position %s! Need an HTSeq iv object. (If empty, maybe read wasn't aligned?)\"%(HTSeq_pos,))\n strand = HTSeq_pos.strand\n # HTSeq is 0-based and I want 1-based, thus the +1; end has no +1 because in HTSeq end is the base AFTER the alignment.\n start_pos = HTSeq_pos.start+1\n end_pos = HTSeq_pos.end\n output_pos = (chrom, start_pos, end_pos, strand)\n check_valid_position_tuple(output_pos)\n return output_pos"
] |
[
"0.64782345",
"0.6377305",
"0.62160516",
"0.612528",
"0.6113427",
"0.5846422",
"0.5825414",
"0.5814289",
"0.5801118",
"0.5782579",
"0.57719696",
"0.5761895",
"0.57479566",
"0.5691542",
"0.56005317",
"0.5598498",
"0.5556253",
"0.552903",
"0.54763263",
"0.5399435",
"0.5392648",
"0.5390407",
"0.5371179",
"0.53504306",
"0.5334877",
"0.53206944",
"0.53049225",
"0.5304101",
"0.5301368",
"0.53009063"
] |
0.8495686
|
0
|
Make key for sorting/comparison based on chromosome/position/strand, with improved chromosomenumber sorting. First two fields are chromosome data splits chromosome into name/number (both optional), so that 'chr2' sorts before 'chr12' (but 'chr' before 'chr1', and 'other_chr1' after 'chr4'), and also so that chromosomes sort first, then other names (cassette, chloroplast/mitochondrial, anything else), then scaffolds. Next two fields are min_/max_position these are always numerically defined, so ?101 and 100? will sort together (as opposed to if we used position_before/_after, which can be None). Next field is strand we want the sorting on position BEFORE strand, it's more readable/sensible that way. Final two fields are position_before/after, to ensure ?101 isn't considered equal to 100101.
|
def _make_key(self):
all_position_values = (chromosome_sort_key(self.chromosome), self.min_position, self.max_position,
self.strand, self.position_before, self.position_after)
return all_position_values
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def sortkey(item):\n chrom, pos, ref, alt = item[0]\n if chrom.startswith('chr'):\n chrom = chrom[3:]\n if chrom.isdigit():\n chrom = int(chrom)\n return (chrom, pos, len(ref), len(alt))",
"def sort(self):\n sort_key = self.data.chromosome.apply(sorter_chrom)\n self.data = (\n self.data.assign(_sort_key_=sort_key)\n .sort_values(by=[\"_sort_key_\", \"start\", \"end\"], kind=\"mergesort\")\n .drop(\"_sort_key_\", axis=1)\n .reset_index(drop=True)\n )",
"def compare(chr1, pos1, chr2, pos2):\n\tpos1 = int(pos1)\n\tpos2 = int(pos2)\n\tif chrsort == 'version':\n\t\tchr1 = mapChrForVersion(chr1)\n\t\tchr2 = mapChrForVersion(chr2)\n\telif chrsort == 'natural':\n\t\tpass # use original chr1, chr2\n\telse:\n\t\tchr1 = chrsort.get(chr1, chr1)\n\t\tchr2 = chrsort.get(chr2, chr2)\n\treturn -1 if (chr1, pos1) < (chr2, pos2) else 1 if (chr1, pos1) > (chr2, pos2) else 0",
"def __init__(self, chromosome, strand, full_position=None, position_before=None, position_after=None, immutable=False):\n # need to make instance mutable to be able to set anything, due to how __setattr__ is decorated\n self.make_mutable_REMEMBER_CLEANUP_FIRST() \n # now start setting attributes\n self.chromosome = chromosome\n self.strand = strand\n # parse full_position if provided\n if full_position is not None:\n if (position_before is not None) or (position_after is not None):\n raise ValueError(\"If providing full_position, cannot also provide position_before/position_after!\")\n self.position_before, self.position_after = self._parse_full_position(full_position)\n # otherwise use position_before and/or position_after\n else:\n if position_before is None and position_after is None:\n raise ValueError(\"Can't create an Insertion_position object with no known position values!\")\n try:\n self.position_before = None if position_before is None else int(position_before)\n self.position_after = None if position_after is None else int(position_after)\n except TypeError: \n raise ValueError(\"position_before/position_after must be int-castable or None!\")\n if immutable: self.make_immutable()",
"def _column_sorting_key(self, c):\n first_index = 0\n if c.startswith('hybrid'):\n first_index = 1\n elif c.startswith('solar'):\n first_index = 2\n elif c.startswith('wind'):\n first_index = 3\n elif c == MERGE_COLUMN:\n first_index = -1\n return first_index, self._hybrid_meta.columns.get_loc(c)",
"def order_chromosomal_contigs(chr_blast_output):\n ordered_chr_contigs = []\n current_contig = \"null\"\n current_contig_direction = 0\n current_contig_hits = 0\n\n with open(chr_blast_output) as blast_matches:\n for hit in blast_matches:\n hit_data = hit.rstrip(\"\\n\").split(\"\\t\")\n core_gene_dir = int(hit_data[0].split(\"|\")[1])\n if float(hit_data[2]) >= 90.0:\n new_contig = hit_data[1]\n new_contig_direction = core_gene_dir*np.sign(int(hit_data[9])-int(hit_data[8]))\n \n if new_contig == current_contig and new_contig_direction == current_contig_direction:\n current_contig_hits += 1\n else: \n contig_tuple = (current_contig, current_contig_direction, current_contig_hits)\n ordered_chr_contigs.append(contig_tuple)\n current_contig = new_contig\n current_contig_direction = new_contig_direction\n current_contig_hits = 1\n\n contig_tuple = (current_contig, current_contig_direction, current_contig_hits)\n ordered_chr_contigs.append(contig_tuple)\n ordered_chr_contigs.pop(0)\n\n #If hits to a contig are not contiguous, keep only the longest run \n chr_contig_dict = {} #stores the longest run for each contig\n remove_list = [] #stores the shorter runs for deletion\n n = -1\n for entry in ordered_chr_contigs:\n n += 1\n contig = entry[0]\n hits = entry[2]\n if contig not in chr_contig_dict:\n chr_contig_dict[contig] = (n, entry)\n elif hits > chr_contig_dict[contig][1][2]:\n remove_list.append(chr_contig_dict[contig])\n chr_contig_dict[contig] = (n, entry)\n else:\n remove_list.append((n, entry))\n\n #The first contig will usually also be the last - both should be kept \n for item in remove_list:\n \n if int(item[0]) == 0 or int(item[0]) == len(ordered_chr_contigs)-1:\n remove_list.remove(item)\n \n remove_list.sort(reverse = True)\n for item in remove_list:\n position = item[0]\n ordered_chr_contigs.pop(position)\n \n return ordered_chr_contigs",
"def _get_sequence_based_sorting_keys(self):\n left_event_defs = self._left_subtree.get_event_definitions()\n right_event_defs = self._right_subtree.get_event_definitions()\n # comparing min and max leaf index of two subtrees\n min_left = min(left_event_defs, key=lambda x: x.index).index # [ { ] } or [ { } ]\n max_left = max(left_event_defs, key=lambda x: x.index).index # { [ } ] or { [ ] }\n min_right = min(right_event_defs, key=lambda x: x.index).index # [ ] { }\n max_right = max(right_event_defs, key=lambda x: x.index).index # { } [ ]\n if max_left < min_right: # 3)\n left_sort, right_sort, rel_op = -1, 0, RelopTypes.SmallerEqual\n elif max_right < min_left: # 4)\n left_sort, right_sort, rel_op = 0, -1, RelopTypes.GreaterEqual\n elif min_left < min_right: # 1)\n left_sort, right_sort, rel_op = 0, 0, RelopTypes.SmallerEqual\n elif min_right < min_left: # 2)\n left_sort, right_sort, rel_op = 0, 0, RelopTypes.GreaterEqual\n if rel_op is None:\n raise Exception(\"rel_op is None, something bad has happened\")\n left_sorting_key = lambda pm: pm.events[left_sort].timestamp\n right_sorting_key = lambda pm: pm.events[right_sort].timestamp\n # left/right_sort == 0 means that left/right subtree will be sorted by first timestamp\n return left_sorting_key, right_sorting_key, rel_op, (left_sort == 0), (right_sort == 0)",
"def GenPositionConsensus(position, cutoff = 0.65,MinPercentBetter = 0.1,GapCutoff = 0.5):\n if GapCutoff - position['-'] < 0.001:#Check to see if it has too many gaps\n return (('-',),position['-'])\n cutoff -= position['-']#otherwise ignore gaps and adjust the cutoff proportionally, basically 1.0 now = 1.0 - %gaps\n threshold = (1-cutoff)*MinPercentBetter#set the threshold, this is the minimum amount better than the next best that a score must be to get accepted, means that ACG = 0.81, AGT = 0.79 returns AGCT not AGC \n for i in [1,2,3,4]:\n permutations = combinations(['A','C','G','T'],i)\n scores = []\n for permutation in permutations:\n total = 0 \n for base in permutation:\n total += position[base] \n scores.append((total,permutation)) \n scores.sort(reverse=True)\n if i == 4:\n return (scores[0][1],scores[0][0],position['-'])#default to returning ACGT (N)\n\n if cutoff - scores[0][0]< 0.001 and \\\n scores[0][0] - scores[1][0] > threshold:\n return (scores[0][1],scores[0][0],position['-'])\n \n return ('*',)",
"def sort_key(self):\n ...",
"def _make_pos(pos):\n return pos.chromosome, pos.strand, pos.min_position, pos.min_position+20",
"def _build_sort1_table(key_itime, keys_map, header_dict,\n form, form_results, form_resultsi,\n disp_dict, stress_dict, strain_dict, force_dict,\n strain_energy_dict, gpstress_dict, log):\n is_results = False\n form_resultsi_subcase = []\n #for key, value in header_dict.items():\n #print(key, value)\n # (isubcase, analysis_code, sort_method,\n # count, ogs, superelement_adaptivity_index) = key\n key_itime0 = key_itime[0]\n key0 = key_itime0[0]\n # (isubcase, analysis_code, sort_method,\n # count, ogs, superelement_adaptivity_index, pval_step) = key\n subcase_id_old = key0[0]\n count_old = key0[3]\n ogs_old = key0[4]\n subtitle_old = key0[5]\n subtitle_old, label_old, superelement_adaptivity_index_old, unused_pval_step_old = keys_map[key0]\n del label_old\n del superelement_adaptivity_index_old\n\n # now that we have the data built, we put it in the form\n # in sorted order\n #\n # TODO: consider pval_step\n for key, itime in key_itime:\n # (isubcase, analysis_code, sort_method,\n # count, ogs, superelement_adaptivity_index, pval_step) = key\n #print('key =', key)\n subcase_id = key[0]\n count = key[3]\n ogs = key[4]\n #print('*ogs =', ogs)\n #subtitle = key[4]\n try:\n subtitle, unused_label, superelement_adaptivity_index, unused_pval_step = keys_map[key]\n except Exception:\n subcase_id = subcase_id_old\n subtitle = subtitle_old + '?'\n superelement_adaptivity_index = '?'\n raise\n\n #print('key =', key)\n if subcase_id != subcase_id_old or subtitle != subtitle_old or ogs != ogs_old:\n count_str = '' if count == 0 else ' ; opt_count=%s' % count_old\n ogs_str = '' if ogs == 0 else '; OGS=%s' % ogs_old\n subcase_str = 'Subcase %s; %s%s%s%s' % (\n subcase_id_old, subtitle_old, superelement_adaptivity_index, count_str, ogs_str)\n #print(subcase_str)\n res = (\n subcase_str.rstrip('; '),\n None,\n form_resultsi_subcase\n )\n form_resultsi.append(res)\n form_resultsi_subcase = []\n subcase_id_old = subcase_id\n subtitle_old = subtitle\n count_old = count\n ogs_old = ogs\n\n\n try:\n header = header_dict[(key, itime)]\n except KeyError: # this hits for strain energy\n msg = 'Missing (key, itime) in header_dict\\n'\n msg += ' key=%s\\n' % str(key)\n\n (subcase, analysis_code, sort_method,\n count, ogs, superelement_adaptivity_index, pval_step) = key\n msg += f' subcase={subcase}\\n'\n msg += f' analysis_code={analysis_code}\\n'\n msg += f' sort_method={sort_method}\\n'\n msg += f' count={count}\\n'\n msg += f' ogs={ogs}\\n'\n msg += f' superelement_adaptivity_index={superelement_adaptivity_index!r}\\n'\n msg += f' pval_step={pval_step!r}\\n'\n\n msg += ' itime=%s\\n' % itime\n msg += ' %s\\n' % str((key, itime))\n msg += 'Possible (key, time):\\n'\n for keyi in header_dict:\n msg += ' %s\\n' % str(keyi)\n #print(msg.rstrip())\n #print('expected = (%s, %r)\\n' % (str(key), itime))\n log.error(msg.rstrip() + '\\n')\n #self.log.error('expected = (%s, %r)\\n' % (str(key), itime))\n continue\n #raise KeyError(msg)\n try:\n header = header.strip()\n except Exception:\n print('header = %r' % header)\n raise\n\n\n form_outi = []\n form_out = (header, None, form_outi)\n disp_formi = disp_dict[(key, itime)]\n stress_formi = stress_dict[(key, itime)]\n strain_formi = strain_dict[(key, itime)]\n force_formi = force_dict[(key, itime)]\n strain_energy_formi = strain_energy_dict[(key, itime)]\n gpstress_formi = gpstress_dict[(key, itime)]\n if disp_formi:\n form_outi += disp_formi\n #form_outi.append(('Disp', None, disp_formi))\n if stress_formi:\n form_outi.append(('Stress', None, stress_formi))\n is_results = True\n if strain_formi:\n form_outi.append(('Strain', None, strain_formi))\n is_results = True\n if force_formi:\n form_outi.append(('Force', None, force_formi))\n is_results = True\n if strain_energy_formi:\n form_outi.append(('Strain Energy', None, strain_energy_formi))\n is_results = True\n if gpstress_formi:\n form_outi.append(('Grid Point Stresses', None, gpstress_formi))\n is_results = True\n\n if form_outi:\n is_results = True\n form_resultsi_subcase.append(form_out)\n #break\n\n #print(\"subcase_id = \", subcase_id)\n if subcase_id:\n count_str = '' if count == 0 else ' ; opt_count=%s' % count_old\n ogs_str = '' if ogs == 0 else '; OGS=%s' % ogs_old\n subcase_str = 'Subcase %s; %s%s%s' % (subcase_id, subtitle, count_str, ogs_str)\n #print('*', subcase_str)\n res = (\n subcase_str.strip('; '),\n None,\n form_resultsi_subcase\n )\n form_resultsi.append(res)\n assert len(form_out) > 0, form_out\n form_resultsi_subcase = []\n\n if is_results:\n form.append(form_results)\n assert len(form_out) > 0, form_out\n #print('formi =', formi)\n #print('form_out =', form_out)\n #print('form_resultsi =', form_resultsi)\n #print('form_results =', form_results)\n #print(form)\n #if len(formi):\n #form.append(form0)\n #print(form)\n #aa\n #print('form', form)\n #print('form_results =', form_results)\n return form",
"def karyotypicSortKey(s):\n if s == \"chrM\": return []\n if s == \"MT\": return [\"~\"]\n return naturalSortKey(s)",
"def sortKey(self, p_str): # real signature unknown; restored from __doc__\n return QCollatorSortKey",
"def test_RNA_position_placement_split(self):\n \n tool = pybedtools.create_interval_from_list(\"chr1 50 60 ENSMUSG1_1_83;ENSMUSG1_6_83 0 + 125 125\".split())\n location_dict = {\"ENSMUSG1\" : {\"strand\" : \"+\", \"regions\" : [(0, 50),\n (100, 150),\n ] \n }\n }\n \n self.assertEqual(RNA_position(tool, location_dict), (.50, .75) )\n \n tool = pybedtools.create_interval_from_list(\"chr1 50 60 ENSMUSG1_1_83;ENSMUSG1_6_83 0 - 25 25\".split())\n location_dict = {\"ENSMUSG1\" : {\"strand\" : \"-\", \"regions\" : [(100, 150),\n (0, 50),\n ] \n }\n }\n \n self.assertEqual(RNA_position(tool, location_dict), (.50, .75))",
"def coordinates(self, name, start=None, end=None):\n if \"|\" in name:\n self.name = name.split(\"|\")[0]\n else:\n self.name = name\n positions = {}\n match_positions = []\n records = []\n segments = []\n result_segments = []\n for record in self.process(self.name):\n records.append(record)\n records.sort(key=lambda x: int(x.exon_number))\n\n if records[0].strand == '+':\n _start = 1\n for record in records:\n for relative, actual in enumerate(range(record.start, record.end + 1),\n start=_start):\n positions[relative] = actual\n _start = relative + 1\n for pos in range(start, end):\n match_positions.append(positions[pos])\n for key, group in groupby(enumerate(match_positions),\n lambda x: x[0] - x[-1]):\n segment = list(map(itemgetter(1), group))\n segments.append([segment[0], segment[-1]])\n for segment in segments:\n for record in records:\n if segment[0] >= record.start and segment[1] <= record.end:\n result_segments.append((record.scaffold, segment[0], segment[1],\n record.transcript_id + '|' + record.gene_name, 0, record.strand))\n\n elif records[0].strand == '-':\n _start = 1\n for record in records:\n for relative, actual in enumerate(reversed(range(record.start,\n record.end + 1)), start=_start):\n positions[relative] = actual\n _start = relative + 1\n for pos in range(start, end):\n match_positions.append(positions[pos])\n for key, group in groupby(enumerate(reversed(match_positions)),\n lambda x: x[0] - x[-1]):\n segment = list(map(itemgetter(1), group))\n segments.append([segment[0], segment[-1]])\n for segment in segments:\n for record in records:\n if segment[0] >= record.start and segment[1] <= record.end:\n result_segments.append((record.scaffold, segment[0], segment[1],\n record.transcript_id + '|' + record.gene_name, 0, record.strand))\n\n if len(result_segments) == 0:\n logger.debug('%s, %s, %s' % (name, start, end))\n logger.debug('%s' % str(segments))\n for r in records:\n logger.debug('%s %s %s %s' % (r.scaffold, r.strand,\n r.start, r.end))\n\n return result_segments",
"def parse_position(chrom_pos: str):\n chrom, pos = chrom_pos.split('_')\n return chrom, int(pos)",
"def placementKey( geo):\n def diagcmp( xyA, xyB):\n \"\"\"\n Compare two positions based on x + y. If x + y is the same for the\n two, compare based on x.\n \"\"\"\n return cmp(xyA[0] + xyA[1], xyB[0] + xyB[1]) or cmp(xyA[0], xyB[0])\n\n sorted = [ tuple(geo[i]) for i in xrange(geo.shape[0]) ]\n sorted.sort( diagcmp)\n return hash(tuple(sorted))",
"def __init__(self, row, startpos=None):\r\n\r\n row = row.rstrip(\"\\n\")\r\n\r\n self.row = row\r\n self.fields = row.split(\"\\t\")\r\n\r\n if startpos is not None:\r\n\r\n self.fields = self.fields[startpos:]\r\n\r\n if row[0] != \"#\":\r\n\r\n self.valid_row = True\r\n\r\n self.chrom = self.fields[0]\r\n self.pos = int(self.fields[1])\r\n self.ref = self.fields[3]\r\n\r\n self.chrompos = self.chrom +\"\\t\"+ str(self.pos)\r\n else:\r\n self.valid_row = False\r\n\r\n\r\n ##########################################################\r\n # type questions\r\n \r\n def is_valid_row(self):\r\n \"\"\" test if the row contains all fields \"\"\"\r\n return self.valid_row\r\n \r\n def is_ref_known(self):\r\n \"\"\" test if the ref is actually a base and not just \"N\" \"\"\"\r\n \r\n if \"N\" in self.ref:\r\n return False\r\n else:\r\n return True\r\n \r\n def get_conpos(self):\r\n \r\n return self.conpos\r\n \r\n def get_con(self):\r\n return self.con\r\n \r\n def get_pos(self):\r\n return self.pos\r\n \r\n def get_alt(self):\r\n return self.alt\r\n \r\n def get_ref(self):\r\n return self.ref\r\n \r\n def cutoff_test(self, tested_trait, direction, cutoff):\r\n \"\"\"\r\n Tests if the tested trait is [above/below] the cutoff\r\n \"\"\"\r\n \r\n if direction == \"max\":\r\n if tested_trait <= cutoff:\r\n return True\r\n \r\n elif direction == \"min\":\r\n if tested_trait >= cutoff:\r\n return True\r\n else:\r\n return False",
"def makeRecombMapBooker(snplist, rholist, chrom):\n if chrom == \"X\":\n mapsize = 44.7\n elif chrom == \"3R\":\n mapsize = 90.9\n elif chrom == \"3L\":\n mapsize = 89.1\n elif chrom == \"2L\":\n mapsize = 63.2\n elif chrom == \"2R\":\n mapsize = 94.8\n elif chrom == \"2RL\":\n mapsize = 158\n elif chrom == \"3RL\":\n mapsize = 180\n poslist = []\n rhocum = []\n cMlist = []\n cMMblist = []\n for i, pos in enumerate(snplist):\n if i == 0:\n rhoTemp = (rholist[i] * (pos))\n else:\n rhoTemp = (rholist[i] * (pos - snplist[i-1]))\n if i == 0:\n rhocum.append(rhoTemp)\n else:\n rhocum.append(rhocum[-1] + rhoTemp)\n poslist.append(pos)\n for i, j in enumerate(rhocum):\n cMperSNP = (j / rhocum[-1])\n cMlist.append(cMperSNP)\n cMMblist.append(((cMlist[i] - cMlist[i-1])*mapsize) / ((snplist[i] - snplist[i-1])/1E6))\n return(poslist, cMMblist, cMlist)",
"def preprocessBed(fname):\n res = {}\n iter = parseBed(fname)\n for i in iter:\n res.setdefault(i.chr,[])\n res[i.chr].append(i)\n for k in res.keys():\n res[k].sort()\n return res",
"def CDR_pos_parser(input):\n infile= open(input,'r')\n CDR1_pos=defaultdict(list)\n CDR2_pos=defaultdict(list)\n name_flag=False\n CDR1_fromto=[]\n CDR2_fromto=[]\n for line in infile.readlines():\n # skip empty rows\n if len(line) < 2: continue\n if line[:5] == \"Query\":\n name = line.strip()[7:]\n name_flag = True\n elif line[:5] == 'CDR1' + '\\t':\n CDR1_fromto=line[5:].split('\\t')\n\n elif line[:5] == 'CDR2' + '\\t':\n CDR2_fromto = line[5:].split('\\t')\n\n if (line[:10] == 'Alignments') & name_flag:\n CDR1_pos[name]=CDR1_fromto\n CDR2_pos[name]=CDR2_fromto\n name_flag = False\n CDR1_fromto = []\n CDR2_fromto = []\n return CDR1_pos,CDR2_pos",
"def sortFitness(chromosomes, target):\n done = False\n fittest = []\n for chrom in chromosomes:\n # Decode\n pheno = decode(chrom)\n\n # Try to evaluate\n try:\n result = eval(pheno)\n except ZeroDivisionError:\n result = 0\n\n # Score based on result\n fit = abs(target-result)\n if fit == 0:\n done = True\n fit = -1\n print(\"%s =%s : %s\" %(pheno.rjust(15), str(result).rjust(10),\n str(fit).rjust(15)))\n fittest.append((fit, chrom))\n # Once we have a list of (fitness, chrom) pairs we can sort\n fittest.sort()\n return done, [item[-1] for item in fittest]",
"def sortKey( self, mode, matrix ):\n # distance calculation...\n distance = polygonsort.distances(\n LOCAL_ORIGIN,\n modelView = matrix,\n projection = mode.getProjection(),\n viewport = mode.getViewport(),\n )[0]\n if self.appearance:\n key = self.appearance.sortKey( mode, matrix )\n else:\n key = (False,[],None)\n if key[0]:\n distance = -distance\n return key[0:2]+ (distance,) + key[1:]",
"def _readAndCombine(inputBed, withinBp):\n junct = {}\n\n # collapse a \n count = 0\n for line in open(inputBed):\n count += 1\n #if count % 100000==0: \n # print count \n if line.startswith(\"track\"):\n #out.write(line.strip()) \n #out.write(\" useScore=1\\n\") \n continue\n\n [chr, start, stop, name, score, strand, thStart, thStop, rgb, blockCount, blockSizes, blockStarts] = line.split(\"\\t\")\n score = float(score)\n if not junct.has_key(chr):\n junct[chr] = {}\n\n if int(blockCount) != 2:\n #print \"Illegal line does not have 2 blocks\" \n #print line \n continue\n\n start = int(start)\n stop = int(stop)\n [size1, size2] = [int(x) for x in blockSizes.split(\",\")[:2]]\n [start1, start2] = [int(x) for x in blockStarts.split(\",\")[:2]]\n leftEdge = start + size1\n rightEdge = start + start2 # start2 is relative to chr start \n intronLength = rightEdge - leftEdge\n\n toCombine = []\n for (other) in junct[chr].keys():\n (otherMinLeft, otherMaxLeft, otherMinRight, otherMaxRight, otherLength) = other\n if otherLength != intronLength:\n continue\n\n if otherMaxLeft < (leftEdge-withinBp) or otherMinLeft > (leftEdge+withinBp):\n continue\n\n if otherMaxRight < (rightEdge-withinBp) or otherMinRight > (rightEdge+withinBp):\n continue\n\n toCombine.append(other)\n\n allLines = [ (score, line, leftEdge, rightEdge) ]\n minLeft = maxLeft = leftEdge\n minRight = maxRight = rightEdge\n for (other) in toCombine:\n (otherMinLeft, otherMaxLeft, otherMinRight, otherMaxRight, intronLength) = other\n minLeft = min(minLeft, otherMinLeft)\n maxLeft = max(maxLeft, otherMaxLeft)\n minRight = min(minRight, otherMinRight)\n maxRight = max(maxRight, otherMaxRight)\n\n allLines.extend(junct[chr][other])\n del junct[chr][other]\n\n junct[chr][ (minLeft, maxLeft, minRight, maxRight, intronLength) ] = allLines\n\n return junct",
"def encode_chromosome(in_num):\n convert_dict = {23: \"X\", 24: \"Y\", 25: \"MT\"}\n return convert_dict[in_num] if in_num in convert_dict else str(in_num)",
"def table_key(self, reindex_dict):\n reindexed_marks = []\n for m in self.component1.marks:\n new_m = reindex_dict.get(m)\n if new_m == None:\n if len(reindex_dict) == 0:\n new_m = 0\n else:\n new_m = max(reindex_dict.values())+1\n reindex_dict[m] = new_m\n reindexed_marks.append(new_m)\n return tuple( [self.component1.genus] + sorted(reindexed_marks) )",
"def natsort_key(s):\n # key consists of triplets (type:int, magnitude:int, value:str)\n key = []\n if '~' in s:\n s = s.replace('~', '\\0')\n for frag in _rc.findall(s):\n if frag < '0':\n key.extend((1, 0, frag + '\\1'))\n elif frag < '1':\n key.extend((2, len(frag.lstrip('0')) - len(frag), frag))\n elif frag < ':':\n key.extend((2, len(frag), frag))\n else:\n key.extend((3, 0, frag + '\\1'))\n if not key or key[-3] == 2:\n key.extend((1, 0, '\\1'))\n return tuple(key)",
"def _custom_sorter(self, key1, key2):\n\n col = self._col\n ascending = self._colSortFlag[col]\n real = self.get_real_col(col)\n item1 = self.itemDataMap[key1][real]\n item2 = self.itemDataMap[key2][real]\n\n # Internationalization of string sorting with locale module\n if isinstance(item1, str) and isinstance(item2, str):\n cmpVal = locale.strcoll(item1, item2)\n elif isinstance(item1, bytes) or isinstance(item2, bytes):\n cmpVal = locale.strcoll(str(item1), str(item2))\n else:\n cmpVal = cmp(item1, item2)\n\n # If the items are equal, then pick something else to make the sort value unique\n if cmpVal == 0:\n cmpVal = cmp(*self.GetSecondarySortValues(col, key1, key2))\n\n if ascending:\n return cmpVal\n else:\n return -cmpVal",
"def set_chrom_dict():\n chrom_dict = {\n str(i):'chr' + str(i) for i in range(1, MAXCHROM)\n }\n chrom_dict.update({\n 'X':'chr23',\n 'Y':'chr24',\n 'XY':'chr25',\n 'M':'chr26',\n 'MT':'chr26',\n 'chrX':'chr23',\n 'chrY':'chr24',\n 'chrXY':'chr25',\n 'chrM':'chr26',\n 'chrMT':'chr26'\n })\n return chrom_dict, MAXCHROM",
"def __init__(self, chromosome, starts, ends, labels, gstrands):\n \n if 1 <= chromosome <= 24:\n self._chromosome = chromosome\n else:\n raise ValueError('wrong chromosome number %d' % chromosome)\n\n # Sort bands by starting base\n sorted_bands = sorted(zip(labels, starts, ends, gstrands),\n key=op.itemgetter(1))\n\n self._band_keys = dict((k[0], i) for i, k in enumerate(sorted_bands))\n self._bands = tuple(ChromosomeBand(self._chromosome, *band)\n for band in sorted_bands)"
] |
[
"0.6112682",
"0.6048693",
"0.56508696",
"0.56142724",
"0.5323903",
"0.5252737",
"0.52512056",
"0.5194273",
"0.51627487",
"0.5153749",
"0.509315",
"0.5088636",
"0.50862974",
"0.50861454",
"0.5084419",
"0.50008214",
"0.4992516",
"0.49760285",
"0.49712116",
"0.49694505",
"0.49468428",
"0.49449608",
"0.49139512",
"0.49026617",
"0.48954722",
"0.4891888",
"0.48808905",
"0.48625916",
"0.4832819",
"0.4827659"
] |
0.6810686
|
0
|
Reversibly make object immutable (reasonably) and hashable.
|
def make_immutable(self):
# just set the flag to make object immutable and hashable
self.immutable = True
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def __hash__(self):\n if self.immutable:\n return hash(self._make_key())\n else:\n raise MutantError(\"This %s is currently mutable, and therefore unhashable! \"%repr(self)\n +\"Run self.make_immutable() to change this.\")",
"def make_mutable_REMEMBER_CLEANUP_FIRST(self):\n # UNSET the flag to make object immutable and hashable - need to do it in a roundabout way,\n # because the immutability prevents simply \"self.immutable = False\" from working!\n self.__dict__['immutable'] = False\n # but if I put __slots__ in, self.__dict__ won't exist any more... TODO Options for then:\n # setattr(self, 'immutable', False) - doesn't seem to work?\n # object.__setattr__(self, 'immutable', False) - does that work?",
"def __hash__(self):\n if getattr(self, \"_immutable\", False):\n return hash((tuple(self.states()), tuple(self.transitions())))\n raise TypeError(\"Finite state machines are mutable, \" \\\n \"and thus not hashable.\")",
"def hashable(cls):\n\n # py2\n assert \"__hash__\" in cls.__dict__\n # py3\n assert cls.__dict__[\"__hash__\"] is not None\n assert \"__eq__\" in cls.__dict__\n\n cls.__ne__ = lambda self, other: not self.__eq__(other)\n\n return cls",
"def __hash__(self):\n raise TypeError(\"Transitions are mutable, and thus not hashable.\")",
"def set_immutable(self):\n self._mutable = False",
"def make_mutable(obj):\n _mutable_objs.append(obj)",
"def _make_immutable(value):\n if isinstance(value, dict):\n return Object(value)\n elif isinstance(value, (list, tuple)):\n return Array(value)\n elif (\n value is None or\n isinstance(value, string_types) or\n isinstance(value, (int, float, bool, Document, Object, Array, Link))\n ):\n return value\n\n raise TypeError(\"Invalid type in document. Got '%s'.\" % type(value))",
"def testContainOnlyImmutables(self):\n aset = set()\n \n aset.add(1)\n aset.add(\"cheka\")\n \n # non-hashable object (that is mutable) objects cannot be contained in set\n self.assertRaises(TypeError, lambda : aset.add([]) )",
"def immutable(self) -> bool:\n return self._immutable",
"def __hash__(self):\n raise NotImplementedError",
"def __hash__(self):\n return self.value.__hash__()",
"def __hash__(self):\n return super().__hash__()",
"def __setitem__(self):\n raise ValueError(\"Dataset objects are immutable\")",
"def shallow_copy(self):\n # TODO: Rename this to __copy__()?\n raise NotImplementedError(\"shallow_copy is not implemented\")",
"def isImmutable(self):\n if self.isPrimaryKey():\n return True\n else:\n return self._immutable",
"def make_immutable(mat):\n if issparse(mat):\n mat.data.flags.writeable = False\n if mat.format in {\"csr\", \"csc\", \"bsr\"}:\n mat.indices.flags.writeable = False\n mat.indptr.flags.writeable = False\n elif mat.format == \"coo\":\n mat.row.flags.writeable = False\n mat.col.flags.writeable = False\n else:\n mat.flags.writeable = False",
"def _mkObject(self):\n return ImmutableObject(\n store=self.store,\n hash=u'somehash',\n contentDigest=u'quux',\n content=self.store.newFilePath('foo'),\n contentType=u'application/octet-stream')",
"def __hash__(self):\n return self.to_hash()",
"def __hash__(self):\n return hash((super().__hash__(), self.permeability))",
"def __hash__(self):\n return id(self)",
"def clone(self) -> Mutator:\n raise NotImplementedError",
"def __hash__(self):\r\n return hash(type(self)) ^ hash(self.broadcastable)",
"def __copy__(self):\n raise NotImplementedError",
"def test_dict_merge_immutable():\n x1 = {'one': 1, 'two': 2}\n x1_cop = x1.copy()\n ir.dict_merge(x1, {'three': 3, 'two': None})\n assert x1 == x1_cop\n ir.dict_merge({'ten': 10, 'one': '1'}, x1)\n assert x1 == x1_cop",
"def __delitem__(self):\n raise ValueError(\"Dataset objects are immutable\")",
"def __hash__(self):\n # TODO: v1.2 - The __hash__ override is deprecated\n # For python 3, any override of __cmp__ or __eq__\n # immutable requires explicit redirect of hash\n # function to the parent class\n # See http://docs.python.org/dev/3.0/\n # reference/datamodel.html#object.__hash__\n return super(UID, self).__hash__()",
"def __sub__(self, obj):\n return self ^ obj",
"def make_hash(o):\n if isinstance(o, (set, tuple, list)):\n return hash(tuple([make_hash(e) for e in o]))\n elif not isinstance(o, dict) and o.__class__.__module__ == 'builtins':\n return hash(o)\n elif not isinstance(o, dict):\n return make_hash(o.__dict__)\n\n new_o = copy.deepcopy(o)\n for k, v in new_o.items():\n new_o[k] = make_hash(v)\n return hash(tuple(frozenset(sorted(new_o.items()))))",
"def hashable(obj):\n return bytes(str(obj), \"utf-8\")"
] |
[
"0.6758572",
"0.67053974",
"0.6542986",
"0.6396433",
"0.6321631",
"0.62582093",
"0.61994404",
"0.606839",
"0.6010715",
"0.5959738",
"0.5907642",
"0.58751816",
"0.5747587",
"0.57078564",
"0.5691022",
"0.56675",
"0.5596881",
"0.55961627",
"0.5588229",
"0.55824023",
"0.55514175",
"0.55367726",
"0.5515875",
"0.5513287",
"0.5513235",
"0.55085486",
"0.54951924",
"0.5470002",
"0.5454877",
"0.545275"
] |
0.8096591
|
0
|
Reversibly make object mutable and nonhashable. REMEMBER TO REMOVE SELF FROM SETS/DICTS BEFORE CALLING!
|
def make_mutable_REMEMBER_CLEANUP_FIRST(self):
# UNSET the flag to make object immutable and hashable - need to do it in a roundabout way,
# because the immutability prevents simply "self.immutable = False" from working!
self.__dict__['immutable'] = False
# but if I put __slots__ in, self.__dict__ won't exist any more... TODO Options for then:
# setattr(self, 'immutable', False) - doesn't seem to work?
# object.__setattr__(self, 'immutable', False) - does that work?
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def make_immutable(self):\n # just set the flag to make object immutable and hashable\n self.immutable = True",
"def make_mutable(obj):\n _mutable_objs.append(obj)",
"def __sub__(self, obj):\n return self ^ obj",
"def shallow_copy(self):\n # TODO: Rename this to __copy__()?\n raise NotImplementedError(\"shallow_copy is not implemented\")",
"def force_frozenset(obj): \n # make it a set/tuple of 1 if it is a scalar and not a set already\n return tuple(force_hashable(obj))",
"def __delitem__(self):\n raise ValueError(\"Dataset objects are immutable\")",
"def __reduce_ex__(self, protocol=None):\n return (\n sm.copyreg._reconstructor,\n (type(self), set, list(self)),\n self.__getstate__(),\n )",
"def set_immutable(self):\n self._mutable = False",
"def testContainOnlyImmutables(self):\n aset = set()\n \n aset.add(1)\n aset.add(\"cheka\")\n \n # non-hashable object (that is mutable) objects cannot be contained in set\n self.assertRaises(TypeError, lambda : aset.add([]) )",
"def __setitem__(self):\n raise ValueError(\"Dataset objects are immutable\")",
"def __isub__(self, obj):\n # calls __sub__\n tmp = self - obj\n self.data = tmp.data\n return self",
"def __deepcopy__(self, memodict=None):\n return self.copy()",
"def __hash__(self):\n if self.immutable:\n return hash(self._make_key())\n else:\n raise MutantError(\"This %s is currently mutable, and therefore unhashable! \"%repr(self)\n +\"Run self.make_immutable() to change this.\")",
"def __set__(self, obj, value):\r\n pass",
"def _disabled(self, *args, **kwargs):\n raise TypeError(\"'%s' does not support mutable operations.\" %\n self.__class__.__name__)",
"def _disabled(self, *args, **kwargs):\n raise TypeError(\"'%s' does not support mutable operations.\" %\n self.__class__)",
"def __getstate__(self):\n copy = self.__dict__.copy()\n copy['_workaround'] = None\n return copy",
"def __copy__(self):\n return self.copy()",
"def copy(self):\n copied = super().copy()\n copied.anonymize()\n return copied",
"def __hash__(self):\n raise TypeError(\"Transitions are mutable, and thus not hashable.\")",
"def __copy__(self):\n raise NotImplementedError",
"def unfreeze(obj, ignore_types=[]):\n if obj is None:\n return obj\n\n to_process = [obj]\n while len(to_process) > 0:\n _obj = to_process.pop()\n\n for attr in dir(_obj):\n if attr.startswith(\"__\"):\n continue\n value = getattr(_obj, attr)\n if isinstance(value, FrozenDict):\n value = {k: v for k, v in value.items()}\n to_process.extend(value.values())\n elif isinstance(value, FrozenList):\n value = [x for x in value]\n to_process.extend(value)\n elif not callable(value) and not isinstance(value, tuple(ignore_types)):\n to_process.append(value)\n\n try:\n setattr(_obj, attr, value)\n except BaseException:\n pass\n\n return obj",
"def copy(self):\n import copy as pcopy\n return pcopy.deepcopy(self)",
"def __getstate__(self):\n # construct a list of unpicklable entties and exclude them from pickling\n nope = ['_divisionClassifier', '_assembledObjects']\n d = dict((key, val) for key, val in self.__dict__.items() if key not in nope) # deepcopy needed\n return d",
"def test_deepcopy(self):\n t = Identity()\n t.transform([2])\n copy.deepcopy(t)",
"def copy(self):\r\n # This way of initializing the copy means it works for subclasses, too.\r\n obj = self.__class__(self)\r\n obj.keyOrder = self.keyOrder[:]\r\n return obj",
"def __hash__(self):\n if getattr(self, \"_immutable\", False):\n return hash((tuple(self.states()), tuple(self.transitions())))\n raise TypeError(\"Finite state machines are mutable, \" \\\n \"and thus not hashable.\")",
"def copy(self):\n return self.__class__(dict(self))",
"def __deepcopy__(self, memo):\n from copy import deepcopy\n return self.__class__(deepcopy(self.items(), memo), self.strict)",
"def strict(cls):\n return frozenset()"
] |
[
"0.72401875",
"0.6723461",
"0.65100807",
"0.62228954",
"0.6092488",
"0.608733",
"0.6045231",
"0.6040895",
"0.6037135",
"0.5984441",
"0.59769535",
"0.5902408",
"0.5894151",
"0.5887292",
"0.587385",
"0.5867318",
"0.5857524",
"0.58271",
"0.58166295",
"0.5779478",
"0.576897",
"0.5698476",
"0.56904525",
"0.5687013",
"0.56811935",
"0.5678082",
"0.5673153",
"0.5667313",
"0.56395817",
"0.56388366"
] |
0.7387795
|
0
|
Return distance between positions, or NaN if on different chromosomes or on different strands and ignore_strand is False.
|
def get_position_distance(pos1, pos2, ignore_strand=False):
NaN = float('nan')
if pos1 in SPECIAL_POSITIONS.all_undefined: return NaN
elif pos2 in SPECIAL_POSITIONS.all_undefined: return NaN
elif pos1.chromosome != pos2.chromosome: return NaN
elif not ignore_strand and pos1.strand != pos2.strand: return NaN
else: return abs(pos1.min_position - pos2.min_position)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def calc_dist(self, neighboring_pos):\n vec = np.array([i[1] - i[0] for i in zip(self.pos, neighboring_pos)])\n dist = np.linalg.norm(vec)\n return vec, dist",
"def distance(self,b,enforceStrand=False):\n if b.chr != self.chr:\n return -1\n if enforceStrand:\n if self.strand!=b.strand:\n return -1\n else:\n return abs(self.start-b.start)",
"def get_distance(start, end):\n\n\t\tloc_start, loc_end, dst_node = create_distance(start, end)\n\t\tdistance = cmds.getAttr(\"%s.distance\" % dst_node)\n\n\t\tcmds.delete([loc_start, loc_end, dst_node])\n\n\t\treturn distance",
"def distance(self, samples=None, twoD=False, posmin=10e-9,\n dXYZ_factors=None):\n data = self.get_data(traces=['psdXYZ', 'positionXYZ'], samples=samples)\n psdXYZ = data[:, 0:3]\n positionXYZ = data[:, 3:6]\n positionXY = data[:, 3:5]\n calibration = self.calibration\n\n # 2D or 3D calculation of the distance in Z\n if twoD:\n psdXYZ[:, Z] = 0.0\n\n distXYZ = distanceXYZ(positionXYZ, psdXYZ=psdXYZ,\n calibration=calibration,\n dXYZ_factors=dXYZ_factors)\n dist = distance(distXYZ, positionXY, posmin=posmin)\n return dist",
"def get_maze_distance(self, pos1, pos2):\n d = self.distancer.get_distance(pos1, pos2)\n return d",
"def distance(self, position):\n s, r = self.local_coordinates(position)\n return abs(r) + max(s - self.length, 0) + max(0 - s, 0)",
"def distance(pos1, pos2):\n return math.sqrt((pos1[0] - pos2[0])**2. + (pos1[1] - pos2[1])**2.)",
"def _pos_distance(pos1, pos2):\n delta = np.array(pos1).astype(float).flatten(\n ) - np.array(pos2).astype(float).flatten()\n delta %= 1\n return la.norm(np.array(np.minimum(delta, 1 - delta)).astype(float))",
"def distance(self) -> float:\n return self._dist_two_wire() # at this time we only support 2-wire meausre",
"def compute_distance(na_coords,COM):\n dist = math.sqrt((na_coords[0] - COM[0])**2 + (na_coords[1]-COM[1])**2 +(na_coords[2]-COM[2])**2)\n return dist",
"def get_distance(self):\n\n # Activate trigger\n self.trigger()\n\n # Detect rising edge of echo pin\n channel = GPIO.wait_for_edge(self.echo_pin, GPIO.RISING, timeout=2)\n if channel is None:\n # Timeout on wait of rising interrupt\n return None\n else:\n # Rising edge detected, save pulse start\n pulse_start = time.time()\n\n\n # Detect falling edge of echo pin\n channel = GPIO.wait_for_edge(self.echo_pin, GPIO.FALLING, timeout=2)\n if channel is None:\n # Timeout on wait of falling interrupt\")\n return None\n else:\n # Falling edge detected, save pulse end\n pulse_end = time.time()\n\n # Calculated pulse width in microseconds (x1mln)\n pulse_width = (pulse_end - pulse_start)*1000000\n\n # Return distance in cm\n return pulse_width / 58",
"def test_get_distance_to_same_place() -> None:\n meters = location_util.distance(\n COORDINATES_PARIS[0],\n COORDINATES_PARIS[1],\n COORDINATES_PARIS[0],\n COORDINATES_PARIS[1],\n )\n\n assert meters == 0",
"def dist(pos1, pos2):\n a, b = pos1\n c, d = pos2\n \n return sqrt((a-c)**2 + (b-d)**2)",
"def get_race_distance(self):\n race_id = self.get_current_race_id(include_horse=False)\n race_id_with_horse = self.get_current_race_id(include_horse=True)\n try:\n distance = self.consolidated_races_db.data.loc[race_id, 'distance']\n return distance\n except KeyError:\n try:\n distance = self.db.data.loc[race_id_with_horse, 'distance']\n return distance\n except KeyError:\n self.verbose_print(f'No race distance info found for {self.current_race_id}')\n return None",
"def compute_distance(self):\n loc = np.extend_dims(self.state[:, :, Boids.Attr.LOC], axis=-1)\n m = np.tile(loc, (1, 1, self.num_boids))\n pos_diff = m-m.transpose(0, 2, 1)\n self.distance = np.linalg.norm(pos_diff, axis=0)",
"def get_dist(row, bstrand=None):\n fields = row.fields\n astart, aend = row.start, row.end\n bstart, bend = int(fields[7]), int(fields[8])\n # TODO: handle ties in distance.\n if bstart >= aend:\n dist = (bstart - aend) + 1\n elif astart >= bend:\n dist = bend - astart\n else:\n 1/0\n if bstrand is None:\n return -dist if row.strand == \"-\" else dist\n else:\n assert row[6 + bstrand] in \"+-\", row[6 + bstrand]\n # dist < 0\n # <--B--- ---A--- (-)\n # ---B--> ---A--- (+)\n if dist < 0: # B leftOf A\n return -dist if row[6 + bstrand] == \"+\" else dist\n \n # dist > 0\n # ---A--- ---B--> (-)\n # ---A--- <--B--- (+)\n if dist > 0: # B rightOf A\n return -dist if row[6 + bstrand] == \"+\" else dist\n\n return dist",
"def _get_distance_by_span(matched_positions, forms):\n if len(set(forms[matched_positions])) < 2:\n return 0\n if len(matched_positions) == 2:\n return _get_trivial_distance(matched_positions)\n start_pos = np.min(matched_positions)\n end_pos = np.max(matched_positions)\n if start_pos != end_pos:\n return np.abs(end_pos - start_pos) + 1\n return 0",
"def _calculate_distances(self):\n all_dists = []\n for ref in range(len(self.atoms)):\n if self.atoms[ref].symbol in self.exclude:\n continue\n indices = list(range(ref+1, len(self.atoms)))\n indices = self._filter_excluded(indices)\n if len(indices) == 0:\n continue\n dists = self.atoms.get_distances(ref, indices, mic=True)\n all_dists += list(dists)\n \n # Normalize by the mean distance\n return np.array(all_dists)/np.mean(all_dists)",
"def distance(p1, p2):\n return None",
"def get_distance(self, coords):\n return distance.distance(coords, (self.lat, self.long)).m",
"def distance(self) -> int:\n return 0",
"def getDistance(pos1, pos2):\r\n return ((pos1[0] - pos2[0]) ** 2 + (pos1[1] - pos2[1]) ** 2) ** 0.5",
"def compute_distance(traj1, traj2, matched_pos):\n distance = np.zeros((len(matched_pos),), dtype=float)\n for i in range(len(matched_pos)):\n if matched_pos[i] == -1:\n continue\n else:\n iou = bbox_overlap(traj1[i, 2:6], traj2[matched_pos[i], 2:6])\n distance[i] = iou\n return distance",
"def _location_distances(self, positions) -> torch.Tensor:\n diff = positions[..., None, :, :] - positions[..., None, :]\n distances = torch.norm(diff, dim=3)\n return distances",
"def get_distance(self, star):\n if self == star:\n return 0\n\n a_car = self.get_cartesian_coords()\n b_car = star.get_cartesian_coords()\n dab = math.degrees(math.acos(a_car[0] * b_car[0] +\n a_car[1] * b_car[1] +\n a_car[2] * b_car[2]))\n return dab",
"def gating_distance(self, mean, covariance, measurements,\n only_position=False):\n mean, covariance = self.project(mean, covariance)\n if only_position:\n mean, covariance = mean[:2], covariance[:2, :2]\n measurements = measurements[:, :2]\n\n cholesky_factor = np.linalg.cholesky(covariance)\n d = measurements - mean\n z = scipy.linalg.solve_triangular(\n cholesky_factor, d.T, lower=True, check_finite=False,\n overwrite_b=True)\n squared_maha = np.sum(z * z, axis=0)\n return squared_maha",
"def distance(self):\n return self.value * len(self.alignment.query)",
"def _chunk_dist(chunk_left, chunk_right):\n left_start = _chunk_start(chunk_left)\n left_end = _chunk_end(chunk_left)\n right_start = _chunk_start(chunk_right)\n right_end = _chunk_end(chunk_right)\n\n if left_start < right_start:\n dist = right_start - left_end\n else:\n dist = left_start - right_end\n if dist < 0:\n raise ValueError(\"unexpected chunk position\")\n return dist",
"def _distance_to(self, coordinates):\n\n return abs(coordinates[0]) + abs(coordinates[1])",
"def calcDistance(self, left, right):\n\n return math.fabs(right-left)"
] |
[
"0.57980406",
"0.5789443",
"0.5782124",
"0.5746988",
"0.57429904",
"0.5669104",
"0.56631744",
"0.56625354",
"0.5643527",
"0.5639918",
"0.5638318",
"0.56260926",
"0.56241703",
"0.5614394",
"0.56075317",
"0.5594846",
"0.558574",
"0.5585104",
"0.5571978",
"0.55256027",
"0.55048877",
"0.55019796",
"0.54927653",
"0.5486564",
"0.5474623",
"0.54740566",
"0.5467566",
"0.5466919",
"0.5437836",
"0.54323125"
] |
0.82225955
|
0
|
Return a Insertion_position instance giving the cassette insertion position based on HTSeq read position. Flanking_region_aln_or_pos should be a HTSeq.Alignment instance or a (chrom,start_pos,end_pos,strand) tuple (the tuple should have 1based endinclusive positions, so AA is 12 in AATT; HTSeq positions are 0based endexclusive); cassette_end gives the side of the insertion the read is on; relative_read_direction should be inward/outward from the cassette. The cassette chromosome will be the same as read chromosome; the cassette strand will be either the same as read strand,
|
def get_insertion_pos_from_flanking_region_pos(flanking_region_aln_or_pos, cassette_end, relative_read_direction,
immutable_position=True):
# check that basic values aren't weird
check_valid_end_info(cassette_end, relative_read_direction)
# parse flanking_region_aln_or_pos arg - it'll either return a tuple with the basics, or a special position code
parsed_position = parse_flanking_region_aln_or_pos(flanking_region_aln_or_pos)
try: chrom, start_pos, end_pos, strand = parsed_position
except (TypeError, ValueError): return parsed_position
check_valid_position_tuple(parsed_position)
### chromosome is always the same as read, so just leave it as is
### cassette strand is the same as read strand, OR the opposite if the read is opposite to cassette (happens in two cases)
if (cassette_end=='5prime' and relative_read_direction=='inward'): pass
elif (cassette_end=='3prime' and relative_read_direction=='outward'): pass
else: strand = ('+' if strand=='-' else '-')
### cassette position depends on the read position and cassette_end in a somewhat complex way (see docstring)
if (cassette_end=='5prime' and strand=='+') or (cassette_end=='3prime' and strand=='-'): pos_before, pos_after = end_pos, None
else: pos_before, pos_after = None, start_pos
return Insertion_position(chrom, strand, position_before=pos_before, position_after=pos_after, immutable=immutable_position)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def get_RISCC_pos_from_read_pos(read_aln_or_pos, cassette_end, relative_read_direction='inward', immutable_position=True):\n check_valid_end_info(cassette_end, relative_read_direction)\n imaginary_relative_direction= ('outward' if relative_read_direction=='inward' else 'inward')\n imaginary_cassette_position = get_insertion_pos_from_flanking_region_pos(read_aln_or_pos, cassette_end, \n imaginary_relative_direction)\n if imaginary_cassette_position in SPECIAL_POSITIONS.all_undefined:\n return imaginary_cassette_position\n real_strand = ('-' if imaginary_cassette_position.strand=='+' else '+')\n return Insertion_position(imaginary_cassette_position.chromosome, real_strand, \n full_position=imaginary_cassette_position.full_position, immutable=immutable_position)",
"def process_read(self, ref, read, ref_offset=0):\n\n if read.alignment.mapping_quality < self.config.min_mapq:\n return\n\n ref_pos = read.alignment.position.position - ref_offset\n read_pos = 0\n # Use set(), as some cigar operations might generate duplicated positions,\n # E.g. for insertions, it extends the candidate positions to\n # [ins_pos - ins_len, ins_pos + ins_len] which might overlap with some\n # nearby mismatches.\n positions = set()\n for cigar in read.alignment.cigar:\n # Break if it reached the end of reference sequence.\n if ref_pos >= len(ref):\n break\n if cigar.operation not in utils.CIGAR_OPS:\n raise ValueError('Unexpected CIGAR operation', cigar, read)\n\n if cigar.operation == cigar_pb2.CigarUnit.ALIGNMENT_MATCH:\n positions.update(\n self._process_align_match(cigar, ref, read, ref_pos, read_pos))\n read_pos += cigar.operation_length\n ref_pos += cigar.operation_length\n elif cigar.operation == cigar_pb2.CigarUnit.SEQUENCE_MISMATCH:\n positions.update(\n self._process_seq_mismatch(cigar, ref, read, ref_pos, read_pos))\n read_pos += cigar.operation_length\n ref_pos += cigar.operation_length\n elif cigar.operation == cigar_pb2.CigarUnit.INSERT:\n positions.update(\n self._process_insert(cigar, ref, read, ref_pos, read_pos))\n read_pos += cigar.operation_length\n elif cigar.operation == cigar_pb2.CigarUnit.CLIP_SOFT:\n positions.update(\n self._process_soft_clip(cigar, ref, read, ref_pos, read_pos))\n read_pos += cigar.operation_length\n elif (cigar.operation == cigar_pb2.CigarUnit.DELETE or\n cigar.operation == cigar_pb2.CigarUnit.SKIP):\n positions.update(\n self._process_delete(cigar, ref, read, ref_pos, read_pos))\n ref_pos += cigar.operation_length\n elif cigar.operation == cigar_pb2.CigarUnit.SEQUENCE_MATCH:\n ref_pos += cigar.operation_length\n read_pos += cigar.operation_length\n elif (cigar.operation == cigar_pb2.CigarUnit.CLIP_HARD or\n cigar.operation == cigar_pb2.CigarUnit.PAD):\n pass\n\n # Yield positions within the range\n for pos in sorted(positions):\n if pos >= 0 and pos < len(ref):\n yield pos",
"def _get_indel_pos(self, variant_pos, read):\n hardclipped = 0 if read.cigartuples[0][0] != 5 else read.cigartuples[0][1] # read location must be adjusted for\n # number of hardclipped bases represented in cigar but not in read_seq https://www.biostars.org/p/119537/\n iloc = variant_pos - read.reference_start + read.query_alignment_start - 1 + hardclipped\n return iloc",
"def test_RNA_position_placement_split(self):\n \n tool = pybedtools.create_interval_from_list(\"chr1 50 60 ENSMUSG1_1_83;ENSMUSG1_6_83 0 + 125 125\".split())\n location_dict = {\"ENSMUSG1\" : {\"strand\" : \"+\", \"regions\" : [(0, 50),\n (100, 150),\n ] \n }\n }\n \n self.assertEqual(RNA_position(tool, location_dict), (.50, .75) )\n \n tool = pybedtools.create_interval_from_list(\"chr1 50 60 ENSMUSG1_1_83;ENSMUSG1_6_83 0 - 25 25\".split())\n location_dict = {\"ENSMUSG1\" : {\"strand\" : \"-\", \"regions\" : [(100, 150),\n (0, 50),\n ] \n }\n }\n \n self.assertEqual(RNA_position(tool, location_dict), (.50, .75))",
"def get_rel_pos(gRNA, min_anchor_length):\n if gRNA['cassette_label'] == 'Orphan':\n return -gRNA['gene_rel_start']\n\n if gRNA['strand'] == 'coding':\n rel_pos = gRNA['circle_start']-gRNA['forward_end']-gRNA['gene_rel_start']\n else:\n rel_pos = gRNA['reverse_start']-gRNA['gene_rel_start']-gRNA['circle_end']\n if rel_pos is pd.NA:\n rel_pos = 0\n\n if rel_pos < 0:\n # find position of first non-WC in pairing\n # If the resulting shortening of the alignment causes the anchor to be\n # less than the min anchor length, make rel_pos just past the non-WC bp\n match = mm_regex.search(gRNA['pairing'][::-1])\n mm_dist = match.start(0)\n if mm_dist + rel_pos < min_anchor_length:\n rel_pos = -(mm_dist+1)\n return rel_pos",
"def _calculate_position(self, lookup, alignment):\n index = 0 # Index of our split CIGAR string\n if alignment.get_rc() or lookup.get_rc(): # If we're reverse complementing\n qpos = lookup.get_reverse_position() - 1 # Start with the reverse position of the SNP, must subtract one\n else: # Otherwise\n qpos = lookup.get_forward_position() # Start with the forward posittion\n while True: # Endless loop to do weird things...\n try: # While we have a CIGAR string to parse\n old = qpos # Store our previously calculated SNP position\n # Seach the CIGAR string as a list, starting with index 0, for indels\n if re.search('M', alignment.get_cigar()[index]): # If we have a perfect match\n if qpos < int(''.join(re.findall(r'\\d+', alignment.get_cigar()[index]))): # If our SNP is in the perfect match\n break # Exit the loop, we have our position\n if re.search('D', alignment.get_cigar()[index]): # If we have a deletion relative to reference\n qpos += int(''.join(re.findall(r'\\d+', alignment.get_cigar()[index]))) # Add the deletion to our SNP position\n if re.search('[IS]', alignment.get_cigar()[index]): # If we have an insertion relative to reference\n qpos -= int(''.join(re.findall(r'\\d+', alignment.get_cigar()[index]))) # Subtract the insertion from our SNP postion\n index += 1 # Increase the index\n if qpos <= 0 or qpos >= lookup.get_length(): # If we've gone beyond the scope of our lookup: 0 is before the sequence, lookup.get_length() is after\n qpos = old # Go back to our previously calculated SNP postion\n break # Exit the loop, we have our position\n except IndexError: # If we run out of CIGAR string codes\n break # Exit the loop, we have our position\n self._position = alignment.get_position() + qpos # Our SNP position is at the mapping position plus the SNP position",
"def parse_flanking_region_aln_or_pos(flanking_region_aln_or_pos):\n try: \n check_valid_position_tuple(flanking_region_aln_or_pos)\n return flanking_region_aln_or_pos\n except MutantError:\n try: \n pos = flanking_region_aln_or_pos.iv\n except AttributeError:\n raise MutantError(\"parse_flanking_region_aln_or_pos input should be HTSeq aln or position tuple! \"\n +\"Got %s\"%(flanking_region_aln_or_pos,))\n if pos: return HTSeq_pos_to_tuple(pos) \n # if unaligned, figure out if unaligned or multi-aligned, and just return the appropriate special position code\n else: \n try: XM_val = get_HTSeq_optional_field(flanking_region_aln_or_pos, 'XM')\n except KeyError: return SPECIAL_POSITIONS.unaligned\n if int(XM_val) > 1: return SPECIAL_POSITIONS.multi_aligned\n else: return SPECIAL_POSITIONS.unaligned",
"def relative_position(self) -> Tuple[int, int]:\n return self.position[0] - self.region.rect.x, self.position[1] - self.region.rect.y",
"def relative_position(self) -> Tuple[int, int]:\n return self.position[0] - self.region.rect.x, self.position[1] - self.region.rect.y",
"def reading_of(self, ac_pos):\n\n diff = np.subtract(self.pos, ac_pos)\n rng = norm(diff)\n brg = atan2(diff[1], diff[0])\n return rng, brg",
"def nt_pos(self, pos):\n seq_consumed = 0\n if self.coding_blocks is None or len(self.coding_blocks) == 0:\n return int(self.end - pos - 1 if self.rev_strand else pos - self.start)\n for block in (reversed(self.coding_blocks) if self.rev_strand else self.coding_blocks):\n if pos >= block[0] and pos < block[1]:\n if self.rev_strand: return (block[1] - pos - 1 + seq_consumed)\n else: return (pos - block[0] + seq_consumed)\n else: \n seq_consumed += block[1] - block[0]\n raise RuntimeError(\"Position %d not within feature %s\" % (pos, self.seq_record.name))",
"def test_RNA_position_fail(self):\n \n tool = pybedtools.create_interval_from_list(\"chr1 50 60 ENSMUSG1_1_83;ENSMUSG1_6_83 0 - 10 10\".split())\n location_dict = {\"ENSMUSG1\" : {\"strand\" : \"-\", \"regions\" : [(100, 150),\n (25,50),\n ] \n }\n }\n \n self.assertEqual(RNA_position(tool, location_dict), (None, None))\n \n tool = pybedtools.create_interval_from_list(\"chr1 50 60 ENSMUSG1_1_83;ENSMUSG1_6_83 0 - 175 175\".split())\n \n self.assertEqual(RNA_position(tool, location_dict), (None, None))",
"def get_trans_pos(genome):\n pos = random.randint(100, len(genome.seq)-100) # insert position\n if pos in genome.unavail_pos:\n pos = get_trans_pos(genome)\n return pos",
"def test_RNA_position_placement(self):\n \n \n tool = pybedtools.create_interval_from_list(\"chr1 50 60 ENSMUSG1_1_83;ENSMUSG1_6_83 0 + 60 60\".split())\n location_dict = {\"ENSMUSG1\" : {\"strand\" : \"+\", \"regions\" : [(0,100),\n ] \n }\n }\n \n self.assertEqual(RNA_position(tool, location_dict), (.60, .60))\n \n tool = pybedtools.create_interval_from_list(\"chr1 50 60 ENSMUSG1_1_83;ENSMUSG1_6_83 0 - 60 60\".split())\n location_dict = {\"ENSMUSG1\" : {\"strand\" : \"-\", \"regions\" : [(0,100),\n ] \n }\n }\n \n #individual_fraction, total_fraction\n self.assertEqual(RNA_position(tool, location_dict), (.4, .4))",
"def add_RISCC_read(self, seq, new_position, N_errors=None, read_count=1):\n # TODO why are we even using Insertion_position objects here?? Those aren't insertion positions with a start-end, just single positions... But still need to be able to deal with unaligned/multi as well as proper positions.\n if not isinstance(new_position, Insertion_position) and new_position not in SPECIAL_POSITIONS.all_undefined:\n raise MutantError(\"RISCC read position %s is unacceptable - must be Insertion_position object or one of %s!\"%(\n new_position, ', '.join(SPECIAL_POSITIONS.all_undefined)))\n # self.RISCC_genome_side_aligned_reads is a position:data dict\n if new_position not in SPECIAL_POSITIONS.all_undefined:\n try:\n # MAYBE-TODO check that the same seq isn't present in a different position?\n self.RISCC_genome_side_aligned_reads[new_position][1] += read_count\n try: self.RISCC_genome_side_aligned_reads[new_position][2][seq][0] += read_count\n except KeyError: self.RISCC_genome_side_aligned_reads[new_position][2][seq] = [read_count, N_errors]\n except KeyError:\n seq_count_error_dict = {seq: [read_count, N_errors]}\n self.RISCC_genome_side_aligned_reads[new_position] = [new_position, read_count, seq_count_error_dict, \n SPECIAL_GENE_CODES.not_determined, '?', '?', '?']\n # self.RISCC_genome_side_unaligned_reads is a seq:data dict, since the positions aren't usable as keys\n else:\n try:\n self.RISCC_genome_side_unaligned_reads[seq][1] += read_count\n self.RISCC_genome_side_aligned_reads[seq][2][seq][0] += read_count\n except KeyError:\n self.RISCC_genome_side_unaligned_reads[seq] = [new_position, read_count, {seq: [read_count, N_errors]}, \n SPECIAL_GENE_CODES.not_determined, '?', '?', '?']\n # Note: adding gene/annotation info for those is implemented in the dataset methods.",
"def parse_cigar_tuple(self, cigar_code, length, alignment_position, ref_sequence, read_sequence, read_id, quality):\n # get what kind of code happened\n ref_index_increment = length\n read_index_increment = length\n\n # deal different kinds of operations\n if cigar_code == 0:\n # match\n self.parse_match(read_id=read_id,\n alignment_position=alignment_position,\n length=length,\n read_sequence=read_sequence,\n ref_sequence=ref_sequence,\n qualities=quality)\n elif cigar_code == 1:\n # insert\n # alignment position is where the next alignment starts, for insert and delete this\n # position should be the anchor point hence we use a -1 to refer to the anchor point\n self.parse_insert(read_id=read_id,\n alignment_position=alignment_position-1,\n read_sequence=read_sequence,\n qualities=quality)\n ref_index_increment = 0\n elif cigar_code == 2 or cigar_code == 3:\n # delete or ref_skip\n # alignment position is where the next alignment starts, for insert and delete this\n # position should be the anchor point hence we use a -1 to refer to the anchor point\n self.parse_delete(read_id=read_id,\n alignment_position=alignment_position-1,\n ref_sequence=ref_sequence,\n length=length)\n read_index_increment = 0\n elif cigar_code == 4:\n # soft clip\n ref_index_increment = 0\n # print(\"CIGAR CODE ERROR SC\")\n elif cigar_code == 5:\n # hard clip\n ref_index_increment = 0\n read_index_increment = 0\n # print(\"CIGAR CODE ERROR HC\")\n elif cigar_code == 6:\n # pad\n ref_index_increment = 0\n read_index_increment = 0\n # print(\"CIGAR CODE ERROR PAD\")\n else:\n raise(\"INVALID CIGAR CODE: %s\" % cigar_code)\n\n return ref_index_increment, read_index_increment",
"def calculate_global_position(strand, start, end, relative_position):\n if strand == 1:\n global_position = [start + x for x in relative_position]\n elif strand == -1:\n global_position = [end - x for x in relative_position]\n else:\n raise ValueError(\"Strand must be 1 or -1\")\n return global_position",
"def parse_insert(self, read_id, alignment_position, read_sequence, qualities):\n # the allele is the anchor + what's being deleted\n allele = self.reference_dictionary[alignment_position] + read_sequence\n\n # record the insert where it first starts\n self.mismatch_count[alignment_position] += 1\n self._update_read_allele_dictionary(read_id, alignment_position + 1, allele, INSERT_ALLELE, max(qualities))\n self._update_insert_dictionary(read_id, alignment_position, read_sequence, qualities)",
"def seek_commit_position(self, commit_position):\n return self._most_recent_log_matching(\n '^Cr-Commit-Position: %s' % commit_position)",
"def _get_insertion_info(insertion_pos, allowed_strand_vals=SEQ_STRANDS):\n try:\n strand, ins_start, ins_end = insertion_pos.strand, insertion_pos.min_position, insertion_pos.max_position\n except AttributeError:\n strand, ins_start, ins_end = insertion_pos\n if allowed_strand_vals is not None:\n assert strand in allowed_strand_vals, \"Strand should be %s, and is %s!\"%(' or '.join(allowed_strand_vals), strand)\n return strand, ins_start, ins_end",
"def ind_pos(position, ind, current_geno, chr_starts, chr_ends):\n ind_starts = chr_starts[ind]\n ind_ends = chr_ends[ind]\n #print [position, ind, current_geno, ind_starts, ind_ends]\n in_interval = False\n for interval in range(len(ind_starts)):\n if position > int(ind_starts[interval]) and position < int(ind_ends[interval]):\n in_interval = True\n break\n if in_interval:\n return(current_geno)\n else:\n return(\"./.\")",
"def _preposition_for_region_as_goal(self, region: SituationRegion) -> str:\n if region.distance == INTERIOR:\n return \"in\"\n elif (\n region.distance == EXTERIOR_BUT_IN_CONTACT\n and region.direction\n and region.direction.positive\n # TODO: put constraints on the axis\n ):\n return \"on\"\n elif region.distance == PROXIMAL and not region.direction:\n # beside hack for dynamic situations\n if (\n self.situation.actions\n and self.situation.actions[0].during\n and self.situation.actions[0].during.objects_to_paths\n ):\n for _, path in self.situation.actions[\n 0\n ].during.objects_to_paths.items():\n if (\n path.reference_destination_object == region\n and SIDE in path.properties\n ):\n return \"beside\"\n # See: https://github.com/isi-vista/adam/issues/836\n if USE_NEAR in self.situation.syntax_hints:\n return \"near\"\n else:\n return \"to\"\n elif region.direction == GRAVITATIONAL_UP:\n if USE_ABOVE_BELOW in self.situation.syntax_hints:\n return \"above\"\n else:\n return \"over\"\n elif region.direction == GRAVITATIONAL_DOWN:\n if USE_ABOVE_BELOW in self.situation.syntax_hints:\n return \"below\"\n else:\n return \"under\"\n elif region.distance == DISTAL and not region.direction:\n return \"far from\"\n elif region.direction and self.situation.axis_info:\n if not self.situation.axis_info.addressee:\n raise RuntimeError(\n f\"Unable to translate region into a preposition because an addressee is lacking. \"\n f\"Region: {region}\\nSituation: {self.situation}\"\n )\n # HACK, from M3\n # see: https://github.com/isi-vista/adam/issues/573\n if isinstance(region.direction.relative_to_axis, FacingAddresseeAxis):\n # \"in front of\" and \"behind\" is defined without a distance as you can accurate use the phrase\n # regardless of distance example:\n # \"the teacher is in front of your laptop\"\n # (Assuming the laptop is near the back of class and the addressee is facing the front of the room)\n # \"your friend is in front of your laptop\"\n # (Assuming the friend is one row up in the classroom)\n if region.direction.positive:\n return \"in front of\"\n else:\n return \"behind\"\n elif (\n region.direction.relative_to_axis != GRAVITATIONAL_DOWN_TO_UP_AXIS\n and region.distance == PROXIMAL\n ):\n return \"beside\"\n else:\n raise RuntimeError(\n f\"Don't know how to translate {region} to a preposition yet\"\n )\n else:\n raise RuntimeError(\n f\"Don't know how to translate {region} to a preposition yet\"\n )",
"def test_RNA_position_failaure(self):\n \n tool = pybedtools.create_interval_from_list(\"chr1 50 60 ENSMUSG1_1_83;ENSMUSG1_6_83 0 - 50 50\".split())\n location_dict = {\"ENSMUSG2\" : {\"strand\" : \"+\", \"regions\" : [(30, 40),\n (10,20)\n ] \n }\n }\n self.assertRaises(KeyError, RNA_position, tool, location_dict)",
"def coverage_over_region(input_bam, region, reference, min_mapq=40, min_baseq=15, min_anchor=11):\n if reference:\n depth_result = pysam.depth( # pylint: disable=no-member\n \"-Q\", str(min_mapq),\n \"-q\", str(min_baseq),\n \"-l\", str(min_anchor),\n \"-r\", region,\n \"--reference\", reference,\n input_bam,\n )\n else:\n depth_result = pysam.depth( # pylint: disable=no-member\n \"-Q\", str(min_mapq),\n \"-q\", str(min_baseq),\n \"-l\", str(min_anchor),\n \"-r\", region,\n input_bam,\n )\n # start, end are 0-indexed half-open coordinates\n _, start, end = pysam.libcutils.parse_region(region=region)\n region_length = end - start\n if len(depth_result) > 0 and region_length > 0:\n depths = np.loadtxt(io.StringIO(depth_result), dtype=int, usecols=2)\n total_coverage = np.sum(depths)\n return (total_coverage / region_length, total_coverage, region_length)\n else:\n return (0., 0., region_length)",
"def HTSeq_pos_to_tuple(HTSeq_pos):\n try:\n chrom = HTSeq_pos.chrom\n except AttributeError:\n raise MutantError(\"Invalid position %s! Need an HTSeq iv object. (If empty, maybe read wasn't aligned?)\"%(HTSeq_pos,))\n strand = HTSeq_pos.strand\n # HTSeq is 0-based and I want 1-based, thus the +1; end has no +1 because in HTSeq end is the base AFTER the alignment.\n start_pos = HTSeq_pos.start+1\n end_pos = HTSeq_pos.end\n output_pos = (chrom, start_pos, end_pos, strand)\n check_valid_position_tuple(output_pos)\n return output_pos",
"def get(self, offset: int) -> Position:\n line = bisect_right(self.line_starts, offset) - 1\n character = offset - self.line_starts[line]\n return Position(line=line, character=character)",
"def cpos2codon(self, cpos):\n self.ensure_seq()\n cpos = int(cpos)\n if self.strand == \"+\":\n np = []\n for beg, end in self.exons:\n np += list(range(max(beg, self.cds_beg),\n min(self.cds_end, end)+1))\n assert len(np) == len(self.seq)\n\n ni = cpos*3\n if ni <= len(np):\n codon = Codon()\n codon.index = cpos\n codon.locs = tuple(np[ni-3:ni])\n codon.gene = self.gene\n codon.chrm = self.chrm\n codon.strand = self.strand\n codon.seq = self.seq[ni-3:ni]\n return codon\n else:\n raise IncompatibleTranscriptError('invalid_cDNA_position_%d;expect_[0_%d]' % (ni, len(np)))\n else:\n np = []\n for beg, end in reversed(self.exons):\n np += list(range(min(self.cds_end, end),\n max(beg, self.cds_beg)-1,-1))\n assert len(np) == len(self.seq)\n\n ni = cpos*3\n if ni <= len(np):\n codon = Codon()\n codon.index = cpos\n codon.locs = tuple(reversed(np[ni-3:ni]))\n codon.gene = self.gene\n codon.chrm = self.chrm\n codon.strand = self.strand\n codon.seq = self.seq[ni-3:ni]\n return codon\n else:\n raise IncompatibleTranscriptError('invalid_cDNA_position_%d;expect_[0_%d]' % (ni, len(np)))",
"def get_original_span(self, input_processed_span: Span,\n align_mode: str = \"relaxed\"):\n assert align_mode in [\"relaxed\", \"strict\", \"backward\", \"forward\"]\n\n req_begin = input_processed_span.begin\n req_end = input_processed_span.end\n\n def get_original_index(input_index: int, is_begin_index: bool,\n mode: str) -> int:\n r\"\"\"\n Args:\n input_index: begin or end index of the input span\n is_begin_index: if the index is the begin index of the input\n span or the end index of the input span\n mode: alignment mode\n Returns:\n Original index that aligns with input_index\n \"\"\"\n if len(self.processed_original_spans) == 0:\n return input_index\n\n len_processed_text = len(self._text)\n orig_index = None\n prev_end = 0\n for (inverse_span, original_span) in self.processed_original_spans:\n # check if the input_index lies between one of the unprocessed\n # spans\n if prev_end <= input_index < inverse_span.begin:\n increment = original_span.begin - inverse_span.begin\n orig_index = input_index + increment\n # check if the input_index lies between one of the processed\n # spans\n elif inverse_span.begin <= input_index < inverse_span.end:\n # look backward - backward shift of input_index\n if is_begin_index and mode in [\"backward\", \"relaxed\"]:\n orig_index = original_span.begin\n if not is_begin_index and mode == \"backward\":\n orig_index = original_span.begin - 1\n\n # look forward - forward shift of input_index\n if is_begin_index and mode == \"forward\":\n orig_index = original_span.end\n if not is_begin_index and mode in [\"forward\", \"relaxed\"]:\n orig_index = original_span.end - 1\n\n # break if the original index is populated\n if orig_index is not None:\n break\n prev_end = inverse_span.end\n\n if orig_index is None:\n # check if the input_index lies between the last unprocessed\n # span\n inverse_span, original_span = self.processed_original_spans[-1]\n if inverse_span.end <= input_index < len_processed_text:\n increment = original_span.end - inverse_span.end\n orig_index = input_index + increment\n else:\n # check if there input_index is not valid given the\n # alignment mode or lies outside the processed string\n raise ValueError(f\"The input span either does not adhere \"\n f\"to the {align_mode} alignment mode or \"\n f\"lies outside to the processed string.\")\n return orig_index\n\n orig_begin = get_original_index(req_begin, True, align_mode)\n orig_end = get_original_index(req_end - 1, False, align_mode) + 1\n\n return Span(orig_begin, orig_end)",
"def find_read_candidates(self, read):\n self.read_allele_dictionary = {}\n ref_alignment_start = read.reference_start\n ref_alignment_stop = self.get_read_stop_position(read)\n # if the region has reached a very high coverage, we are not going to parse through all the reads\n if self.coverage[ref_alignment_start] > 300:\n return False\n cigar_tuples = read.cigartuples\n read_sequence = read.query_sequence\n read_id = read.query_name\n read_quality = read.query_qualities\n ref_sequence = self.fasta_handler.get_sequence(chromosome_name=self.chromosome_name,\n start=ref_alignment_start,\n stop=ref_alignment_stop+10)\n\n self.read_info[read_id] = (ref_alignment_start, ref_alignment_stop, read.mapping_quality, read.is_reverse)\n for pos in range(ref_alignment_start, ref_alignment_stop):\n self.read_id_by_position[pos].append((read_id, ref_alignment_start, ref_alignment_stop))\n for i, ref_base in enumerate(ref_sequence):\n self.reference_dictionary[ref_alignment_start + i] = ref_base\n\n # read_index: index of read sequence\n # ref_index: index of reference sequence\n read_index = 0\n ref_index = 0\n found_valid_cigar = False\n for cigar in cigar_tuples:\n cigar_code = cigar[0]\n length = cigar[1]\n # get the sequence segments that are effected by this operation\n ref_sequence_segment = ref_sequence[ref_index:ref_index+length]\n read_quality_segment = read_quality[read_index:read_index+length]\n read_sequence_segment = read_sequence[read_index:read_index+length]\n\n if cigar_code != 0 and found_valid_cigar is False:\n read_index += length\n continue\n found_valid_cigar = True\n\n # send the cigar tuple to get attributes we got by this operation\n ref_index_increment, read_index_increment = \\\n self.parse_cigar_tuple(cigar_code=cigar_code,\n length=length,\n alignment_position=ref_alignment_start+ref_index,\n ref_sequence=ref_sequence_segment,\n read_sequence=read_sequence_segment,\n read_id=read_id,\n quality=read_quality_segment)\n\n # increase the read index iterator\n read_index += read_index_increment\n ref_index += ref_index_increment\n\n # after collecting all alleles from reads, update the global dictionary\n for position in self.read_allele_dictionary.keys():\n if position < self.region_start_position or position > self.region_end_position:\n continue\n self.rms_mq[position] += read.mapping_quality * read.mapping_quality\n for record in self.read_allele_dictionary[position]:\n # there can be only one record per position in a read\n allele, allele_type = record\n\n if allele_type == MATCH_ALLELE or allele_type == MISMATCH_ALLELE:\n # If next allele is indel then group it with the current one, don't make a separate one\n if position + 1 <= ref_alignment_stop and position + 1 in self.read_allele_dictionary.keys():\n next_allele, next_allele_type = list(self.read_allele_dictionary[position + 1].keys())[0]\n if next_allele_type == INSERT_ALLELE or next_allele_type == DELETE_ALLELE:\n continue\n self.positional_read_info[position].append(\n (read_id, ref_alignment_start, ref_alignment_stop, read.mapping_quality))\n self._update_positional_allele_dictionary(read_id, position, allele, allele_type,\n read.mapping_quality)\n else:\n # it's an insert or delete, so, add to the previous position\n self.positional_read_info[position-1].append(\n (read_id, ref_alignment_start, ref_alignment_stop, read.mapping_quality))\n self._update_positional_allele_dictionary(read_id, position-1, allele, allele_type,\n read.mapping_quality)\n return True",
"def simulate_read(self):\n\n fastafile = ps.FastaFile(self.genome_fa)\n # left split read\n\n insert = int(np.random.normal(self.insert_size, (self.insert_size / 12), 1))\n start = int(np.random.randint(self.chr_pos_start, (self.chr_pos_end + 1)))\n left_end = start + self.read_length\n total_end = start + int(np.round(insert))\n right_start = total_end - self.read_length\n if total_end > self.chr_pos_end:\n # split read scenario or insert spanning split read scenario\n if left_end > self.chr_pos_end:\n # left read spanning split read scenario\n # left_read\n left_dntps = self.chr_pos_end - start\n right_dntps = self.read_length - left_dntps\n\n # the error could be here\n left_split_read = fastafile.fetch(self.chr, start, self.chr_pos_end)\n right_split_read = fastafile.fetch(self.chr, self.chr_pos_start, (self.chr_pos_start + right_dntps))\n left_read = left_split_read + right_split_read\n\n # right_read\n right_start = self.chr_pos_start + int(round(self.insert_size - left_dntps - self.read_length))\n right_read = fastafile.fetch(self.chr, right_start, (right_start + self.read_length))\n\n # assertion to check the error here\n\n common_id = \"%s|%s|%s:%s-%s:%s|%s:%s|1|%s\" % (\n self.read_number,\n self.chr,\n start,\n self.chr_pos_end,\n self.chr_pos_start,\n (self.chr_pos_start + right_dntps),\n right_start,\n (right_start + self.read_length),\n self.circle_id,\n )\n\n else:\n if right_start > self.chr_pos_end:\n # insert spanning split read scenario\n left_read = fastafile.fetch(self.chr, start, (start + self.read_length))\n right_start = self.chr_pos_start + (right_start - self.chr_pos_end)\n right_read = fastafile.fetch(self.chr, right_start, (right_start + self.read_length))\n common_id = \"%s|%s|%s:%s|%s:%s|3|%s\" % (\n self.read_number,\n self.chr,\n start,\n (start + self.read_length),\n right_start,\n (right_start + self.read_length),\n self.circle_id,\n )\n else:\n # right split read scenario\n assert right_start <= self.chr_pos_end\n assert (right_start + self.read_length) > self.chr_pos_end\n left_read = fastafile.fetch(self.chr, start, (start + self.read_length))\n\n # compute right dntps\n left_dntps = self.chr_pos_end - right_start\n right_dntps = self.read_length - left_dntps\n left_split_read = fastafile.fetch(self.chr, right_start, self.chr_pos_end)\n right_split_read = fastafile.fetch(self.chr, self.chr_pos_start, (self.chr_pos_start + right_dntps))\n right_read = left_split_read + right_split_read\n common_id = \"%s|%s|%s:%s|%s:%s-%s:%s|2|%s\" % (\n self.read_number,\n self.chr,\n start,\n (start + self.read_length),\n right_start,\n self.chr_pos_end,\n self.chr_pos_start,\n (self.chr_pos_start, right_dntps),\n self.circle_id,\n )\n\n else:\n # non split read scenario\n left_read = fastafile.fetch(self.chr, start, (start + self.read_length))\n # correct right read start\n right_read = fastafile.fetch(self.chr, right_start, (right_start + self.read_length))\n common_id = \"%s|%s|%s:%s|%s:%s|0|%s\" % (\n self.read_number,\n self.chr,\n start,\n (start + self.read_length),\n right_start,\n (right_start + self.read_length),\n self.circle_id,\n )\n\n return (right_read, left_read, common_id)"
] |
[
"0.7088554",
"0.5690543",
"0.52616596",
"0.5222307",
"0.5163519",
"0.5146661",
"0.51298785",
"0.5016734",
"0.5016734",
"0.50140184",
"0.49561983",
"0.49236864",
"0.48811024",
"0.4854148",
"0.48433566",
"0.47868958",
"0.4782669",
"0.47505528",
"0.47457227",
"0.47210136",
"0.46688858",
"0.46609023",
"0.46509832",
"0.46500877",
"0.46496737",
"0.4638458",
"0.45983136",
"0.45444614",
"0.45416278",
"0.45345703"
] |
0.7968826
|
0
|
Return an Insertion_position instance giving the position of the far end of RISCC genomeside read. The output strand should be the same as that of the cassette, given relative_read_direction inward/outward from the cassette. See get_insertion_pos_from_flanking_region_pos for how the inputs work, and how this is done for insertion positions. This is essentially calculating an "insertion position" from the OTHER SIDE of this read (so if the read goes inward toward the cassette, it calculates the position as if it went away from the cassette, and vice versa), and then reversing the strand to make it match the strand of the real cassette.
|
def get_RISCC_pos_from_read_pos(read_aln_or_pos, cassette_end, relative_read_direction='inward', immutable_position=True):
check_valid_end_info(cassette_end, relative_read_direction)
imaginary_relative_direction= ('outward' if relative_read_direction=='inward' else 'inward')
imaginary_cassette_position = get_insertion_pos_from_flanking_region_pos(read_aln_or_pos, cassette_end,
imaginary_relative_direction)
if imaginary_cassette_position in SPECIAL_POSITIONS.all_undefined:
return imaginary_cassette_position
real_strand = ('-' if imaginary_cassette_position.strand=='+' else '+')
return Insertion_position(imaginary_cassette_position.chromosome, real_strand,
full_position=imaginary_cassette_position.full_position, immutable=immutable_position)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def get_insertion_pos_from_flanking_region_pos(flanking_region_aln_or_pos, cassette_end, relative_read_direction, \n immutable_position=True):\n # check that basic values aren't weird\n check_valid_end_info(cassette_end, relative_read_direction)\n # parse flanking_region_aln_or_pos arg - it'll either return a tuple with the basics, or a special position code\n parsed_position = parse_flanking_region_aln_or_pos(flanking_region_aln_or_pos)\n try: chrom, start_pos, end_pos, strand = parsed_position\n except (TypeError, ValueError): return parsed_position\n check_valid_position_tuple(parsed_position)\n ### chromosome is always the same as read, so just leave it as is\n ### cassette strand is the same as read strand, OR the opposite if the read is opposite to cassette (happens in two cases)\n if (cassette_end=='5prime' and relative_read_direction=='inward'): pass\n elif (cassette_end=='3prime' and relative_read_direction=='outward'): pass\n else: strand = ('+' if strand=='-' else '-')\n ### cassette position depends on the read position and cassette_end in a somewhat complex way (see docstring)\n if (cassette_end=='5prime' and strand=='+') or (cassette_end=='3prime' and strand=='-'): pos_before, pos_after = end_pos, None\n else: pos_before, pos_after = None, start_pos\n return Insertion_position(chrom, strand, position_before=pos_before, position_after=pos_after, immutable=immutable_position)",
"def get_read_stop_position(read):\n ref_alignment_stop = read.reference_end\n\n # only find the position if the reference end is fetched as none from pysam API\n if ref_alignment_stop is None:\n positions = read.get_reference_positions()\n\n # find last entry that isn't None\n i = len(positions) - 1\n ref_alignment_stop = positions[-1]\n while i > 0 and ref_alignment_stop is None:\n i -= 1\n ref_alignment_stop = positions[i]\n\n return ref_alignment_stop",
"def calculate_global_position(strand, start, end, relative_position):\n if strand == 1:\n global_position = [start + x for x in relative_position]\n elif strand == -1:\n global_position = [end - x for x in relative_position]\n else:\n raise ValueError(\"Strand must be 1 or -1\")\n return global_position",
"def _get_indel_pos(self, variant_pos, read):\n hardclipped = 0 if read.cigartuples[0][0] != 5 else read.cigartuples[0][1] # read location must be adjusted for\n # number of hardclipped bases represented in cigar but not in read_seq https://www.biostars.org/p/119537/\n iloc = variant_pos - read.reference_start + read.query_alignment_start - 1 + hardclipped\n return iloc",
"def get_end_loc(self) -> Tuple[int, int]:\n assert self.pos_marker\n return self.pos_marker.working_loc_after(\n self.raw,\n )",
"def get_rel_pos(gRNA, min_anchor_length):\n if gRNA['cassette_label'] == 'Orphan':\n return -gRNA['gene_rel_start']\n\n if gRNA['strand'] == 'coding':\n rel_pos = gRNA['circle_start']-gRNA['forward_end']-gRNA['gene_rel_start']\n else:\n rel_pos = gRNA['reverse_start']-gRNA['gene_rel_start']-gRNA['circle_end']\n if rel_pos is pd.NA:\n rel_pos = 0\n\n if rel_pos < 0:\n # find position of first non-WC in pairing\n # If the resulting shortening of the alignment causes the anchor to be\n # less than the min anchor length, make rel_pos just past the non-WC bp\n match = mm_regex.search(gRNA['pairing'][::-1])\n mm_dist = match.start(0)\n if mm_dist + rel_pos < min_anchor_length:\n rel_pos = -(mm_dist+1)\n return rel_pos",
"def convert_rpos(self, rp, left=True):\n incr = -1 if left else 1\n if rp < self.rstart:\n print(\"WARNING: position %d is outside reference boundaries\" % rp,\n file=sys.stderr)\n return self.qstart\n elif rp >= self.rend:\n print(\"WARNING: position %d is outside reference boundaries\" % rp,\n file=sys.stderr)\n return self.qend\n else:\n ap = self._rpos_to_apos[rp]\n while 0 <= ap <= self.alen:\n if ap in self._apos_to_qpos:\n return self._apos_to_qpos[ap]\n ap += incr\n print(\"WARNING: position %d is outside alignment\" % ap,\n file=sys.stderr)\n return None",
"def gnuc_roll_right_ins(chrm, pos, gnuc_insseq):\n\n sb = faidx.SeqBuf(chrm, pos)\n chrmlen = faidx.refgenome.chrm2len(chrm)\n _gnuc_insseq_ = deque(gnuc_insseq)\n while True:\n if pos + 1 >= chrmlen:\n break\n right_base = sb.get_base(chrm, pos+1)\n leftmost = _gnuc_insseq_[0]\n if right_base == 'N' or leftmost == 'N':\n break\n if right_base != leftmost:\n break\n _gnuc_insseq_.popleft()\n _gnuc_insseq_.append(right_base)\n pos += 1\n\n return pos, ''.join(_gnuc_insseq_)",
"def affected_end(self):\n types = {alt.type for alt in self.ALT} # set!\n BAD_MIX = {INS, SV, BND, SYMBOLIC} # don't mix well with others\n if (BAD_MIX & types) and len(types) == 1 and list(types)[0] == INS:\n # Only insertions, return 0-based position right of first base\n return self.POS # right of first base\n else: # Return 0-based end position, behind last REF base\n return (self.POS - 1) + len(self.REF)",
"def _calc_relative_move_direction(self, char, direction):\n if char in (\"Left\", \"Right\"):\n di = -1 if self.video.hflip else 1\n else:\n di = -1 if self.video.vflip else 1\n return direction * di",
"def _calculate_position(self, lookup, alignment):\n index = 0 # Index of our split CIGAR string\n if alignment.get_rc() or lookup.get_rc(): # If we're reverse complementing\n qpos = lookup.get_reverse_position() - 1 # Start with the reverse position of the SNP, must subtract one\n else: # Otherwise\n qpos = lookup.get_forward_position() # Start with the forward posittion\n while True: # Endless loop to do weird things...\n try: # While we have a CIGAR string to parse\n old = qpos # Store our previously calculated SNP position\n # Seach the CIGAR string as a list, starting with index 0, for indels\n if re.search('M', alignment.get_cigar()[index]): # If we have a perfect match\n if qpos < int(''.join(re.findall(r'\\d+', alignment.get_cigar()[index]))): # If our SNP is in the perfect match\n break # Exit the loop, we have our position\n if re.search('D', alignment.get_cigar()[index]): # If we have a deletion relative to reference\n qpos += int(''.join(re.findall(r'\\d+', alignment.get_cigar()[index]))) # Add the deletion to our SNP position\n if re.search('[IS]', alignment.get_cigar()[index]): # If we have an insertion relative to reference\n qpos -= int(''.join(re.findall(r'\\d+', alignment.get_cigar()[index]))) # Subtract the insertion from our SNP postion\n index += 1 # Increase the index\n if qpos <= 0 or qpos >= lookup.get_length(): # If we've gone beyond the scope of our lookup: 0 is before the sequence, lookup.get_length() is after\n qpos = old # Go back to our previously calculated SNP postion\n break # Exit the loop, we have our position\n except IndexError: # If we run out of CIGAR string codes\n break # Exit the loop, we have our position\n self._position = alignment.get_position() + qpos # Our SNP position is at the mapping position plus the SNP position",
"def traverse_global_position(strand, reference_position, distance):\n if strand == 1:\n new_position = [x + distance for x in reference_position]\n elif strand == -1:\n new_position = [x - distance for x in reference_position]\n else:\n raise ValueError(\"Strand must be 1 or -1\")\n return new_position",
"def relative_position(self) -> Tuple[int, int]:\n return self.position[0] - self.region.rect.x, self.position[1] - self.region.rect.y",
"def relative_position(self) -> Tuple[int, int]:\n return self.position[0] - self.region.rect.x, self.position[1] - self.region.rect.y",
"def get_new_origin(self, direction=None):\n y, x = 1, 0\n direction_coords = {'origin': (0, 0), 'right': (0, 1), 'left': (0, -1)}\n if direction and direction in direction_coords:\n y, x = direction_coords[direction]\n return (self.origin[0] + y, self.origin[1] + x)",
"def right(self):\r\n z = len(direction_tuple)\r\n if self.d in direction_tuple:\r\n index = direction_tuple.index(self.d)\r\n if index == (z-1):\r\n self.d = direction_tuple[0]\r\n else:\r\n self.d = direction_tuple[index + 1]\r\n else:\r\n print(\"NO VALID ROBOT POSITION\")",
"def get_coord_in_direction(self, position, direction):\n x_r = position[x]\n y_r = position[y]\n if direction == NORTH:\n y_r = y_r + 1\n elif direction == NORTHEAST:\n x_r = x_r + 1\n y_r = y_r + 1\n elif direction == EAST:\n x_r = x_r + 1\n elif direction == SOUTHEAST:\n x_r = x_r + 1\n y_r = y_r - 1\n elif direction == SOUTH:\n y_r = y_r - 1\n elif direction == SOUTHWEST:\n x_r = x_r - 1\n y_r = y_r - 1\n elif direction == WEST:\n x_r = x_r - 1\n else: # direction == NORTHWEST\n x_r = x_r - 1\n y_r = y_r + 1\n\n return (x_r, y_r)",
"def get_relative_position_matrix(length, max_relative_position, direction, offset=True):\n range_vec = torch.arange(length).long()\n if torch.cuda.is_available():\n range_vec = range_vec.cuda()\n range_mat = range_vec[:, None].expand(length, length)\n distance_mat = range_mat - range_mat.transpose(0, 1)\n if max_relative_position is None:\n distance_mat_clipped = distance_mat\n else:\n distance_mat_clipped = torch.clamp(distance_mat,\n -max_relative_position, max_relative_position)\n if direction:\n # Shift values to be >= 0. Each integer still uniquely identifies a relative\n # position difference.\n if offset and max_relative_position is not None:\n final_mat = distance_mat_clipped + max_relative_position\n else:\n final_mat = distance_mat_clipped\n else:\n # Do not distinguish the forward and backward positions.\n # Just leave the absolute relative position representation.\n final_mat = distance_mat_clipped.abs()\n return final_mat",
"def downright(self):\n return Coord([self.x + 1, self.y + 1])",
"def nt_pos(self, pos):\n seq_consumed = 0\n if self.coding_blocks is None or len(self.coding_blocks) == 0:\n return int(self.end - pos - 1 if self.rev_strand else pos - self.start)\n for block in (reversed(self.coding_blocks) if self.rev_strand else self.coding_blocks):\n if pos >= block[0] and pos < block[1]:\n if self.rev_strand: return (block[1] - pos - 1 + seq_consumed)\n else: return (pos - block[0] + seq_consumed)\n else: \n seq_consumed += block[1] - block[0]\n raise RuntimeError(\"Position %d not within feature %s\" % (pos, self.seq_record.name))",
"def upright(self):\n return Coord([self.x + 1, self.y - 1])",
"def find_range_from_cons_pos(my_pos, gpcr_pdb):\n (ext_range,chain)=gpcr_pdb[my_pos]\n pos_range=str(ext_range)\n #pos_range=ext_range+\"-\"+ext_range\n return pos_range",
"def get_new_position(row_delta, column_delta):\n new_row = start_row - row_delta\n new_column = start_column + column_delta\n return new_row, new_column",
"def get_end_plus_coordinate(self):\r\n if self.__orientation == Direction.VERTICAL:\r\n end_plus_coordinate = (self.__location[0] + self.__length,\r\n self.__location[1])\r\n if self.__orientation == Direction.HORIZONTAL:\r\n end_plus_coordinate = (\r\n self.__location[0], self.__location[1] + self.__length)\r\n return end_plus_coordinate",
"def _get_look_right(self):\n view_direction = self.look_to - self.look_from\n right_vec = normalize(cross_product(view_direction, self.look_up))\n return right_vec",
"def get_ref_and_start_and_offset(forward_ref_sequence: str,\n strand: bed_pb2.BedRecord.Strand,\n chrom_start: int,\n chrom_end: int) -> Tuple[str, int, int]:\n ref_sequence = forward_ref_sequence\n if strand == bed_pb2.BedRecord.Strand.FORWARD_STRAND:\n start = chrom_start\n offset = 1\n elif strand == bed_pb2.BedRecord.Strand.REVERSE_STRAND:\n start = chrom_end\n offset = -1\n # For the reverse strand, we want the reverse complement.\n ref_sequence = reverse_complement(forward_ref_sequence)\n else:\n raise ValueError('Strand must be set.')\n return ref_sequence, start, offset",
"def getStartAndEndCoordinates(alignedSegment):\n return alignedSegment.reference_start, getFirstNonClippedPositionInRead(alignedSegment, readSeq), \\\n alignedSegment.reference_end-1, getLastNonClippedPositionInRead(alignedSegment, readSeq)",
"def relative_to_abs(rel_traj, start_pos, axis=0):\n # batch, seq_len, 2\n # the relative xy cumulated across time first\n displacement = np.cumsum(rel_traj, axis=axis)\n abs_traj = displacement + np.array([start_pos]) # [1,2]\n return abs_traj",
"def end_effectors_pos(self):\n def relative_pos_in_egocentric_frame(physics):\n end_effector = physics.bind(self._entity.end_effectors).xpos\n torso = physics.bind(self._entity.root_body).xpos\n xmat = np.reshape(physics.bind(self._entity.root_body).xmat, (3, 3))\n return np.reshape(np.dot(end_effector - torso, xmat), -1)\n return observable.Generic(relative_pos_in_egocentric_frame)",
"def test_RNA_position_placement_split(self):\n \n tool = pybedtools.create_interval_from_list(\"chr1 50 60 ENSMUSG1_1_83;ENSMUSG1_6_83 0 + 125 125\".split())\n location_dict = {\"ENSMUSG1\" : {\"strand\" : \"+\", \"regions\" : [(0, 50),\n (100, 150),\n ] \n }\n }\n \n self.assertEqual(RNA_position(tool, location_dict), (.50, .75) )\n \n tool = pybedtools.create_interval_from_list(\"chr1 50 60 ENSMUSG1_1_83;ENSMUSG1_6_83 0 - 25 25\".split())\n location_dict = {\"ENSMUSG1\" : {\"strand\" : \"-\", \"regions\" : [(100, 150),\n (0, 50),\n ] \n }\n }\n \n self.assertEqual(RNA_position(tool, location_dict), (.50, .75))"
] |
[
"0.690913",
"0.56426275",
"0.5469719",
"0.5355928",
"0.5296487",
"0.52198416",
"0.5152106",
"0.50924796",
"0.49645847",
"0.49341005",
"0.4889715",
"0.48862326",
"0.485524",
"0.485524",
"0.48241213",
"0.47878337",
"0.47712404",
"0.47377774",
"0.4734826",
"0.46962333",
"0.46710476",
"0.46619305",
"0.46355653",
"0.46216738",
"0.461597",
"0.46081305",
"0.46061474",
"0.45985806",
"0.45938957",
"0.4568478"
] |
0.6778229
|
1
|
Transform Insertion_position instance into (strand, ins_start, ins_end) tuple if it's not one already.
|
def _get_insertion_info(insertion_pos, allowed_strand_vals=SEQ_STRANDS):
try:
strand, ins_start, ins_end = insertion_pos.strand, insertion_pos.min_position, insertion_pos.max_position
except AttributeError:
strand, ins_start, ins_end = insertion_pos
if allowed_strand_vals is not None:
assert strand in allowed_strand_vals, "Strand should be %s, and is %s!"%(' or '.join(allowed_strand_vals), strand)
return strand, ins_start, ins_end
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def copy(self):\n return Insertion_position(self.chromosome, self.strand, position_before=self.position_before, \n position_after=self.position_after)",
"def __repr__(self):\n return \"Insertion_position('%s', '%s', full_position='%s', immutable=%s)\"%(self.chromosome, self.strand, \n self.full_position, self.immutable)",
"def HTSeq_pos_to_tuple(HTSeq_pos):\n try:\n chrom = HTSeq_pos.chrom\n except AttributeError:\n raise MutantError(\"Invalid position %s! Need an HTSeq iv object. (If empty, maybe read wasn't aligned?)\"%(HTSeq_pos,))\n strand = HTSeq_pos.strand\n # HTSeq is 0-based and I want 1-based, thus the +1; end has no +1 because in HTSeq end is the base AFTER the alignment.\n start_pos = HTSeq_pos.start+1\n end_pos = HTSeq_pos.end\n output_pos = (chrom, start_pos, end_pos, strand)\n check_valid_position_tuple(output_pos)\n return output_pos",
"def getInserts(my_diff, my_cigar):\n my_ranges = list(getDiffLocs(my_cigar, 'I'))\n my_ins = (my_diff[x:y] for x,y in my_ranges)\n return ((x[0], 'I', y) if len(y) == 1 else (x[0], 'S', y)\n for x,y in zip(my_ranges, my_ins))",
"def get_transcript_genomic_inserted_sequence(\n self, parts: List\n ) -> Optional[Tuple[Union[str, int], Union[str, int]]]:\n # Check inserted sequences\n if \"_\" in parts[1] and parts[1].count(\"_\") == 1:\n # Replaced by sequence positions\n inserted_sequences = self.get_valid_digits(parts[1])\n if not inserted_sequences:\n return None\n inserted_sequence1, inserted_sequence2 = inserted_sequences\n if inserted_sequence1 > inserted_sequence2:\n return None\n else:\n # Replaced by nucleotides\n inserted_sequence1 = self.get_sequence(parts[1])\n inserted_sequence2 = None\n return inserted_sequence1, inserted_sequence2",
"def __init__(self, chromosome, strand, full_position=None, position_before=None, position_after=None, immutable=False):\n # need to make instance mutable to be able to set anything, due to how __setattr__ is decorated\n self.make_mutable_REMEMBER_CLEANUP_FIRST() \n # now start setting attributes\n self.chromosome = chromosome\n self.strand = strand\n # parse full_position if provided\n if full_position is not None:\n if (position_before is not None) or (position_after is not None):\n raise ValueError(\"If providing full_position, cannot also provide position_before/position_after!\")\n self.position_before, self.position_after = self._parse_full_position(full_position)\n # otherwise use position_before and/or position_after\n else:\n if position_before is None and position_after is None:\n raise ValueError(\"Can't create an Insertion_position object with no known position values!\")\n try:\n self.position_before = None if position_before is None else int(position_before)\n self.position_after = None if position_after is None else int(position_after)\n except TypeError: \n raise ValueError(\"position_before/position_after must be int-castable or None!\")\n if immutable: self.make_immutable()",
"def get_ref_and_start_and_offset(forward_ref_sequence: str,\n strand: bed_pb2.BedRecord.Strand,\n chrom_start: int,\n chrom_end: int) -> Tuple[str, int, int]:\n ref_sequence = forward_ref_sequence\n if strand == bed_pb2.BedRecord.Strand.FORWARD_STRAND:\n start = chrom_start\n offset = 1\n elif strand == bed_pb2.BedRecord.Strand.REVERSE_STRAND:\n start = chrom_end\n offset = -1\n # For the reverse strand, we want the reverse complement.\n ref_sequence = reverse_complement(forward_ref_sequence)\n else:\n raise ValueError('Strand must be set.')\n return ref_sequence, start, offset",
"def _make_pos(pos):\n return pos.chromosome, pos.strand, pos.min_position, pos.min_position+20",
"def pack(self) -> Tuple[int, int, str, bool]:\n return (\n self.start.place,\n self.end.place,\n self.label,\n self.has_direction,\n )",
"def get_insertion_pos_from_flanking_region_pos(flanking_region_aln_or_pos, cassette_end, relative_read_direction, \n immutable_position=True):\n # check that basic values aren't weird\n check_valid_end_info(cassette_end, relative_read_direction)\n # parse flanking_region_aln_or_pos arg - it'll either return a tuple with the basics, or a special position code\n parsed_position = parse_flanking_region_aln_or_pos(flanking_region_aln_or_pos)\n try: chrom, start_pos, end_pos, strand = parsed_position\n except (TypeError, ValueError): return parsed_position\n check_valid_position_tuple(parsed_position)\n ### chromosome is always the same as read, so just leave it as is\n ### cassette strand is the same as read strand, OR the opposite if the read is opposite to cassette (happens in two cases)\n if (cassette_end=='5prime' and relative_read_direction=='inward'): pass\n elif (cassette_end=='3prime' and relative_read_direction=='outward'): pass\n else: strand = ('+' if strand=='-' else '-')\n ### cassette position depends on the read position and cassette_end in a somewhat complex way (see docstring)\n if (cassette_end=='5prime' and strand=='+') or (cassette_end=='3prime' and strand=='-'): pos_before, pos_after = end_pos, None\n else: pos_before, pos_after = None, start_pos\n return Insertion_position(chrom, strand, position_before=pos_before, position_after=pos_after, immutable=immutable_position)",
"def coordinates(self, name, start=None, end=None):\n record = self.process(name)\n if not start and not end:\n start = 1\n end = record.end - record.start + 1\n positions = {}\n match_positions = []\n\n if record.strand == '+':\n _start = 1\n for relative, actual in enumerate(range(record.start - 1, record.end),\n start=_start):\n positions[relative] = actual\n for pos in range(start, end + 1):\n match_positions.append(positions[pos])\n return [(record.scaffold, min(match_positions), max(match_positions) + 1,\n record.mirbase_name, 0, record.strand)]\n\n elif record.strand == '-':\n _start = 1\n for relative, actual in enumerate(reversed(range(record.start - 1,\n record.end)), start=_start):\n positions[relative] = actual\n for pos in range(start, end + 1):\n match_positions.append(positions[pos])\n return [(record.scaffold, min(match_positions), max(match_positions) + 1,\n record.mirbase_name, 0, record.strand)]",
"def test_RNA_position_placement_split(self):\n \n tool = pybedtools.create_interval_from_list(\"chr1 50 60 ENSMUSG1_1_83;ENSMUSG1_6_83 0 + 125 125\".split())\n location_dict = {\"ENSMUSG1\" : {\"strand\" : \"+\", \"regions\" : [(0, 50),\n (100, 150),\n ] \n }\n }\n \n self.assertEqual(RNA_position(tool, location_dict), (.50, .75) )\n \n tool = pybedtools.create_interval_from_list(\"chr1 50 60 ENSMUSG1_1_83;ENSMUSG1_6_83 0 - 25 25\".split())\n location_dict = {\"ENSMUSG1\" : {\"strand\" : \"-\", \"regions\" : [(100, 150),\n (0, 50),\n ] \n }\n }\n \n self.assertEqual(RNA_position(tool, location_dict), (.50, .75))",
"def affected_start(self):\n types = {alt.type for alt in self.ALT} # set!\n BAD_MIX = {INS, SV, BND, SYMBOLIC} # don't mix well with others\n if (BAD_MIX & types) and len(types) == 1 and list(types)[0] == INS:\n # Only insertions, return 0-based position right of first base\n return self.POS # right of first base\n else: # Return 0-based start position of first REF base\n return self.POS - 1 # left of first base",
"def _get_insertion_state(\n self,\n insertion_state: int\n ) -> Tuple[bool, bool]:\n if not self.insertion._state_initialized:\n self.log.debug('insertion state not initialized, '\n 'scheduling lightpath calcs for later')\n\n schedule_task(self._calc_cache_lightpath_state, delay=2.0)\n raise MirrorLogicError('insertion state not initialized')\n\n x_in = self.insertion.check_inserted(insertion_state)\n x_out = self.insertion.check_removed(insertion_state)\n\n return x_out, x_in",
"def get_position(self) -> typing.Tuple[int, int]:\n raise NotImplementedError",
"def position_tuples(self, protein=False):\n if protein:\n if not self.is_coding():\n raise AttributeError(\n \"Cannot return wild type protein \"\n \"position tuples for non-coding wild \"\n \"type [{}]\".format(self.parent_name)\n )\n else:\n seq = self.protein_seq\n offset = self.protein_offset\n else:\n seq = self.dna_seq\n offset = self.dna_offset\n\n return [(i + offset + 1, seq[i]) for i in range(len(seq))]",
"def getStartPosMapper(seq, subst=None):\n if subst is None:\n subst = make_identity_substitution_matrix(1, -1, alphabet=AALPHABET)\n def findPos(pep):\n d = ssw(pep)\n return int(d['query_begin'] - d['target_begin'])\n \n ssw = StripedSmithWaterman(query_sequence=seq,\n protein=True,\n substitution_matrix=subst)\n return findPos",
"def insert(self, position, insert):\n assert all(new in self.ALPHABET for new in insert)\n if position < 1 or position - 1 > len(self.sequence):\n raise ValueError(f\"Insertion position {position} out of bonds for given sequence.\")\n self.sequence = f\"{self.sequence[: position - 1]}{insert}{self.sequence[position:]}\"\n if \"mutations\" in self.metadata.keys():\n self.metadata[\"mutations\"] += f\" ins{position}{insert}\"\n else:\n self.metadata[\"mutations\"] = f\"ins{position}{insert}\"",
"def obtain_seq_pos_info(result,seq_pos,seq_pos_n,chain_name,multiple_chains):\n chain_nm_seq_pos=\"\"\n if multiple_chains:\n chain_nm_seq_pos=chain_name\n for pos in result:\n if pos[0] != \"-\": #Consider only num in the pdb\n seq_pos.append([pos[0][0],pos[0][1],\"\",chain_nm_seq_pos,seq_pos_n]);\n seq_pos_n+=1\n return (seq_pos,seq_pos_n)",
"def _extract_inserts(self, query) :\n\t\tsparql = self.n.sparql\n\t\t\n\t\t# because the loop below alter's the contents of each insert\n\t\tquery = copy.copy(query)\n\t\t\n\t\t# grab the insert list\n\t\tinserts = query[sparql.insert]\n\t\t\n\t\tnew_inserts = []\n\t\tfor insert in inserts :\n\t\t\tif sparql.create in insert :\n\t\t\t\tvar = insert[sparql.subject]\n\t\t\t\tpredicate = insert[sparql.predicate]\n\t\t\t\t\n\t\t\t\tdel insert[sparql.subject]\n\t\t\t\tdel insert[sparql.predicate]\n\t\t\t\t\n\t\t\t\tif predicate is None :\n\t\t\t\t\tnew_inserts.append(insert)\n\t\t\t\telse :\n\t\t\t\t\tnew_inserts.append({\n\t\t\t\t\t\tsparql.var : var,\n\t\t\t\t\t\tpredicate : insert,\n\t\t\t\t\t})\n\t\treturn new_inserts",
"def get_label_start_end(\n label_base_positions: Iterable[int],\n strand: bed_pb2.BedRecord.Strand) -> Tuple[Optional[int], Optional[int]]:\n # Gap and padding tokens may have a position of -1, since they are not\n # actually present in the reference. Remove all instances of -1, since we do\n # not want to consider it when computing min/max position.\n valid_label_base_positions = set(label_base_positions)\n valid_label_base_positions.discard(-1)\n\n if not valid_label_base_positions:\n return None, None\n start = min(valid_label_base_positions)\n end = max(valid_label_base_positions)\n if strand == bed_pb2.BedRecord.Strand.FORWARD_STRAND:\n end += 1\n elif strand == bed_pb2.BedRecord.Strand.REVERSE_STRAND:\n start -= 1\n else:\n raise ValueError('Strand must be set.')\n return start, end",
"def genInsertPosition(self):\n insize = np.random.normal(self.insertSize, self.insertStdev)\n while True:\n start = random.randint(self.fpstart, self.fpend)\n end = start + insize\n if end < self.fpend and self.isValid(start, end):\n return (start, end - self.readlen)",
"def _get_insertion_state(self, x: float) -> Tuple[bool, bool]:\n if self.x_ranges == []:\n # default case for always-in mirrors\n return False, True\n\n if np.shape(self.x_ranges) != (2, 2):\n # improper ranges for insertion, fail\n raise MirrorLogicError(\n 'Provided x-ranges are the malformed. '\n f'got: {np.shape(self.x_ranges)}, expected (2,2)')\n\n ins_bools = self._find_matching_range_indices(self.x_ranges, x)\n return ins_bools[0], ins_bools[1] # out, in",
"def getStartAndEndCoordinates(alignedSegment):\n return alignedSegment.reference_start, getFirstNonClippedPositionInRead(alignedSegment, readSeq), \\\n alignedSegment.reference_end-1, getLastNonClippedPositionInRead(alignedSegment, readSeq)",
"def get_start_loc(self) -> Tuple[int, int]:\n assert self.pos_marker\n return self.pos_marker.working_loc",
"def position_in_operon(self):\n if self.transcription_units:\n tu_lengths = [len(tu.location) for tu in self.transcription_units]\n longest_tu = self.transcription_units[int(np.argmax(tu_lengths))]\n if longest_tu.location.strand == 1:\n gene_starts = sorted([gene.location.start.position for gene in longest_tu.genes])\n this_gene_start = self.location.start.position\n else:\n gene_starts = sorted([gene.location.end.position for gene in longest_tu.genes])\n gene_starts.reverse()\n this_gene_start = self.location.end.position\n position = np.where(np.array(gene_starts) == this_gene_start)[0][0] + 1\n else:\n position = 1\n return position",
"def coordinates(self, name, start=None, end=None):\n if \"|\" in name:\n self.name = name.split(\"|\")[0]\n else:\n self.name = name\n positions = {}\n match_positions = []\n records = []\n segments = []\n result_segments = []\n for record in self.process(self.name):\n records.append(record)\n records.sort(key=lambda x: int(x.exon_number))\n\n if records[0].strand == '+':\n _start = 1\n for record in records:\n for relative, actual in enumerate(range(record.start, record.end + 1),\n start=_start):\n positions[relative] = actual\n _start = relative + 1\n for pos in range(start, end):\n match_positions.append(positions[pos])\n for key, group in groupby(enumerate(match_positions),\n lambda x: x[0] - x[-1]):\n segment = list(map(itemgetter(1), group))\n segments.append([segment[0], segment[-1]])\n for segment in segments:\n for record in records:\n if segment[0] >= record.start and segment[1] <= record.end:\n result_segments.append((record.scaffold, segment[0], segment[1],\n record.transcript_id + '|' + record.gene_name, 0, record.strand))\n\n elif records[0].strand == '-':\n _start = 1\n for record in records:\n for relative, actual in enumerate(reversed(range(record.start,\n record.end + 1)), start=_start):\n positions[relative] = actual\n _start = relative + 1\n for pos in range(start, end):\n match_positions.append(positions[pos])\n for key, group in groupby(enumerate(reversed(match_positions)),\n lambda x: x[0] - x[-1]):\n segment = list(map(itemgetter(1), group))\n segments.append([segment[0], segment[-1]])\n for segment in segments:\n for record in records:\n if segment[0] >= record.start and segment[1] <= record.end:\n result_segments.append((record.scaffold, segment[0], segment[1],\n record.transcript_id + '|' + record.gene_name, 0, record.strand))\n\n if len(result_segments) == 0:\n logger.debug('%s, %s, %s' % (name, start, end))\n logger.debug('%s' % str(segments))\n for r in records:\n logger.debug('%s %s %s %s' % (r.scaffold, r.strand,\n r.start, r.end))\n\n return result_segments",
"def cis_insertions():\n\n return [\n # 1000 bp upstream of Trp53bp2.\n Insertion(id='INS1', chromosome='1', position=182408172,\n strand=1, support=2, sample='s1',\n metadata=frozendict({'cis_id': 'CIS1'})),\n # Different chromosome.\n Insertion(id='INS2', chromosome='4', position=77843175,\n strand=1, support=2, sample='s1',\n metadata=frozendict({'cis_id': 'CIS2'}))\n ] # yapf: disable",
"def get_gtf_region_position_info(region_gtf_pr):\n ## Check if region has no features\n if region_gtf_pr.empty:\n return dict()\n\n gene_info_dict = dict()\n for name, group in region_gtf_pr.df.groupby(\"transcript_id\"):\n for row in group.itertuples():\n\n ## Add Transcript Info\n if row.Feature == \"transcript\":\n\n if row.gene_id not in gene_info_dict:\n gene_info_dict[row.gene_id] = dict()\n if row.transcript_id not in gene_info_dict[row.gene_id]:\n gene_info_dict[row.gene_id][row.transcript_id] = {\"exons\": []}\n\n gene_info_dict[row.gene_id][row.transcript_id][\"chrom\"] = row.Chromosome\n gene_info_dict[row.gene_id][row.transcript_id][\"start\"] = row.Start\n gene_info_dict[row.gene_id][row.transcript_id][\"end\"] = row.End\n gene_info_dict[row.gene_id][row.transcript_id][\n \"gene_symbol\"\n ] = row.gene_name\n gene_info_dict[row.gene_id][row.transcript_id][\n \"biotype\"\n ] = row.gene_type\n gene_info_dict[row.gene_id][row.transcript_id][\"strand\"] = row.Strand\n\n ## Add exon feature info\n elif row.Feature == \"exon\":\n\n if row.gene_id not in gene_info_dict:\n gene_info_dict[row.gene_id] = dict()\n if row.transcript_id not in gene_info_dict[row.gene_id]:\n gene_info_dict[row.gene_id][row.transcript_id] = {\"exons\": []}\n\n gene_info_dict[row.gene_id][row.transcript_id][\"exons\"].append(\n {\"start\": row.Start, \"end\": row.End, \"exon_number\": row.exon_number}\n )\n\n return gene_info_dict",
"def get_pos(self) -> tuple:\n return self.pos"
] |
[
"0.6175135",
"0.60323584",
"0.5965107",
"0.5956609",
"0.5523276",
"0.5421098",
"0.52681917",
"0.524433",
"0.52264607",
"0.5174197",
"0.5142862",
"0.512147",
"0.50527865",
"0.5031616",
"0.5016128",
"0.5003051",
"0.49822026",
"0.49640447",
"0.49618804",
"0.49489638",
"0.49039945",
"0.48966074",
"0.48959976",
"0.48776954",
"0.48475066",
"0.48303473",
"0.48232368",
"0.48095652",
"0.48002437",
"0.47951424"
] |
0.7646623
|
0
|
Look up insertion_pos in chromosome_GFF_record; return (gene_ID,orientation,subfeature,dist_to_edges). Insertion_pos is an Insertion_position instance, or a (strand, start_pos, end_pos) tuple; insertion_pos is assumed to be in the chromosome given by chromosome_GFF_record (the caller should check that). If insertion_pos overlaps a gene in chromosome_GFF_record, return a 4tuple of geneID, orientation ('sense' if insertion_pos and gene are in the same direction, 'antisense' otherwise), the name of the subfeature (exon/intron/UTR) Insertion_pos is in (or '?' if detailed_features is False), and the distance from the 5' and 3' edge of the gene. If Insertion_pos is on the edge of two features, subfeature will be 'X/Y'; if it's something more unlikely/complicated, subfeature will be 'X/Y/Z??' and a warning will be printed to STDOUT. If gene has multiple mRNAs (splice variants) and the insertion is in different features depending on splice variant, the features will be separated with MULTIPLE_mRNA_JOIN (like "intron|exon").
|
def find_gene_by_pos_gff3(insertion_pos, chromosome_GFF_record, detailed_features=False, nearest_genes_for_intergenic=False,
quiet=False):
from BCBio import GFF # importing here instead of toplevel because sometimes it's missing and I want to be able to use this file
# MAYBE-TODO add gene name as well as ID? But gff parsing doesn't seem to grab those at all - leave it for a separate function.
# (see experiments/arrayed_library/1311_small-lib_mutant-distribution-file/notes.txt)
# MAYBE-TODO add gene lengths? Right now they can be inferred from the distances to gene start/end for mutants in genes,
# which seems sufficient.
# get the needed information from either input format
strand, ins_start, ins_end = _get_insertion_info(insertion_pos, None)
### Go over all the genes in the chromosome record,
# and calculate the distance from the insertion (or 0 if insertion overlaps the gene)
gene_distances = []
for gene in chromosome_GFF_record.features:
# for GFF positions, always add 1 to the gene/feature start, because BCBio uses 0-based and I use 1-based,
# but don't add 1 to the end, because BCBio uses end-exclusive and I use end-inclusive.
gene_start, gene_end = gene.location.start.position+1, gene.location.end.position
if position_test_overlap(gene_start, gene_end, ins_start, ins_end): gene_distances.append((gene, 0))
elif nearest_genes_for_intergenic:
if gene_end < ins_start: gene_distances.append((gene, gene_end-ins_start))
elif gene_start > ins_end: gene_distances.append((gene, gene_start-ins_end))
else: raise MutantError("Gene position confusion!")
### Pick genes to return - either all the genes that overlap the insertion, OR the closest gene on each side if none overlap
if not gene_distances:
nearest_genes = []
elif min(abs(dist) for (gene,dist) in gene_distances) == 0:
nearest_genes = [(gene,dist) for (gene,dist) in gene_distances if dist==0]
elif nearest_genes_for_intergenic:
# note that sometimes there ARE no genes on one/both sides!
nearest_genes = []
genes_upstream = [(gene,dist) for (gene,dist) in gene_distances if dist < 0]
genes_downstream = [(gene,dist) for (gene,dist) in gene_distances if dist > 0]
if genes_upstream: nearest_genes.append(max(genes_upstream, key = lambda gene_and_dist: gene_and_dist[1]))
if genes_downstream: nearest_genes.append(min(genes_downstream, key = lambda gene_and_dist: gene_and_dist[1]))
else:
nearest_genes = []
### Get all the data for each gene
# (see notes_on_GFF_parsing.txt for what a GFF3 record (chromosome_GFF_record) will be like)
gene_data_list = []
for (gene,dist) in nearest_genes:
gene_start, gene_end = gene.location.start.position+1, gene.location.end.position
gene_ID = gene.id
# for mutants in genes, calculate orientation of insertion_pos vs gene
if dist == 0:
if strand=='both': orientation = 'both'
elif gene.strand==1: orientation = 'sense' if strand=='+' else 'antisense'
elif gene.strand==-1: orientation = 'sense' if strand=='-' else 'antisense'
else: orientation = '?'
# for intergenic mutants, check whether the mutant is upstream or downstream of the gene, instead of orientation
else:
if dist * gene.strand < 0: orientation = 'downstream'
else: orientation = 'upstream'
# calculate distances:
# - if insertion is inside gene, give distance to 5' end and 3' end, separated by a comma
# - if it's intergenic, just give the distance to the closest side of the gene (already calculated)
if dist == 0:
dist1 = ins_end - gene_start
dist2 = gene_end - ins_start
if gene.strand==1: distances = '%s,%s'%(dist1,dist2)
else: distances = '%s,%s'%(dist2,dist1)
else: distances = str(abs(dist))
# basic features: intergenic, gene, gene edge
if dist == 0:
# if it overlaps an edge, note that in features_basic by adding 'gene_edge'
# (MAYBE-TODO if I look at things like inner/outer flanking regions, this is where that would go as well)
if position_test_contains(gene_start, gene_end, ins_start,ins_end): features_basic = []
else: features_basic = ['gene_edge']
else: features_basic = ['intergenic']
# figure out which feature of the gene the insertion is in, IF we're looking for detailed ones (it's a lot of code)
if dist != 0: inner_feature = ''
elif not detailed_features: inner_feature = '?'
else:
if len(gene.sub_features)==0: inner_features = ['no_mRNA']
else: inner_features = []
for mRNA in gene.sub_features:
if gene.sub_features[0].type != 'mRNA':
if not quiet:
print("Warning: gene %s in gff file has unexpected non-mRNA sub-features! "%gene_ID
+"Returning '??' feature.")
inner_features.append('??')
else:
mRNA_start, mRNA_end = mRNA.location.start.position+1,mRNA.location.end.position
# if insertion_pos is outside the mRNA, use 'outside_mRNA' as inner_feature
if not position_test_overlap(mRNA_start, mRNA_end, ins_start, ins_end):
inner_features.append('outside_mRNA')
# if insertion_pos is inside the mRNA and mRNA has no subfeatures, use 'mRNA_no_exons' as inner_feature
elif len(mRNA.sub_features)==0:
if position_test_contains(mRNA_start, mRNA_end, ins_start, ins_end): inner_features.append('mRNA_no_exons')
else: inner_features.append('mRNA_edge')
else:
# otherwise go over all subfeatures, see which ones contain/overlap insertion_pos
# (check for overlap only if the contains test failed)
features_inside = []
if position_test_contains(mRNA_start, mRNA_end, ins_start, ins_end): features_edge = []
else: features_edge = ['mRNA_edge']
for feature in mRNA.sub_features:
feature_start, feature_end = feature.location.start.position+1, feature.location.end.position
try: feature_type = GENE_FEATURE_NAMES[feature.type]
except KeyError: feature_type = feature.type
if position_test_contains(feature_start, feature_end, ins_start, ins_end):
features_inside.append(feature_type)
elif position_test_overlap(feature_start, feature_end, ins_start, ins_end):
features_edge.append(feature_type)
# MAYBE-TODO may want to treat exons before 5'UTR or after 3'UTR specially?
# Not worth it, none in current file.
# if insertion_pos is inside a single mRNA subfeature, use the type of the subfeature as inner_feature
if len(features_inside)==1 and len(features_edge)==0:
inner_features.append(features_inside[0])
# if insertion_pos is on the edge of two mRNA subfeatures, use 'subfeature1/subfeature2'
elif len(features_inside)==0 and len(features_edge)==2:
inner_features.append('/'.join(features_edge))
# MAYBE-TODO treat insertions CLOSE to an edge specially too? How large is a splice junction?
# if insertion_pos is on the edge of ONE mRNA subfeature, or not touching any subfeatures at all,
# the implied subfeature is either an intron (if between features) or mRNA_before/after_exons,
# (which shouldn't happen in normal files).
elif len(features_inside)==0 and len(features_edge)<=1:
# figure out what the implied feature is - outside intron in CDS (normal) or UTR, or outside all exons
# note that before/after and 5'/3' are swapped if gene is on minus strand!
CDS_features = [feature for feature in mRNA.sub_features if feature.type=='CDS']
if ins_start < min([feature.location.start.position+1 for feature in mRNA.sub_features]):
if gene.strand==1: implied_feature = 'mRNA_before_exons'
elif gene.strand==-1: implied_feature = 'mRNA_after_exons'
elif ins_end > max([feature.location.end.position for feature in mRNA.sub_features]):
if gene.strand==1: implied_feature = 'mRNA_after_exons'
elif gene.strand==-1: implied_feature = 'mRNA_before_exons'
elif ins_start < min([feature.location.start.position+1 for feature in CDS_features]):
if gene.strand==1: implied_feature = "5'UTR_intron"
elif gene.strand==-1: implied_feature = "3'UTR_intron"
elif ins_end > max([feature.location.end.position for feature in CDS_features]):
if gene.strand==1: implied_feature = "3'UTR_intron"
elif gene.strand==-1: implied_feature = "5'UTR_intron"
else:
implied_feature = 'intron'
# set inner_feature based on whether insertion_pos is on a real/implied feature edge
# or completely inside an implied feature
if len(features_edge)==1:
inner_features.append(features_edge[0] + '/' + implied_feature)
elif len(features_edge)==0:
inner_features.append(implied_feature)
# if insertion_pos is inside two features, or inside one and on the edge of another,
# print a warning, and use all the feature names, with a ?? at the end to mark strangeness
else:
inner_features.append('/'.join(features_inside+features_edge) + '??')
if not quiet:
print(("Warning: Location (%s,%s) matched multiple features (%s) "
+"in gene %s!")%(ins_start, ins_end, inner_features[-1], gene_ID))
inner_feature = MULTIPLE_mRNA_JOIN.join(sorted(set(inner_features)))
# prepend whatever gene-level features (edge etc, or []) were found at the start to the full value
full_feature = '/'.join(features_basic + ([inner_feature] if inner_feature else []))
gene_data_list.append([gene_ID, orientation, full_feature, distances])
### Return appropriate value
# if no gene matching insertion_pos was found, return special value
if not gene_data_list:
return [SPECIAL_GENE_CODES.not_found, '-', '-', '-']
# if single gene found, return its info
elif len(gene_data_list) == 1:
return gene_data_list[0]
# if multiple genes found, return data like this: "gene1 & gene2", "sense & antisense", "intron & CDS/4'UTR",
# except that the feature for intergenic insertions should be "intergenic" instead of "intergenic & intergenic".
else:
full_data = [MULTIPLE_GENE_JOIN.join(multiple_vals) for multiple_vals in zip(*gene_data_list)]
if full_data[2] == MULTIPLE_GENE_JOIN.join(['intergenic']*2): full_data[2] = 'intergenic'
return full_data
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def _get_insertion_info(insertion_pos, allowed_strand_vals=SEQ_STRANDS):\n try:\n strand, ins_start, ins_end = insertion_pos.strand, insertion_pos.min_position, insertion_pos.max_position\n except AttributeError:\n strand, ins_start, ins_end = insertion_pos\n if allowed_strand_vals is not None:\n assert strand in allowed_strand_vals, \"Strand should be %s, and is %s!\"%(' or '.join(allowed_strand_vals), strand)\n return strand, ins_start, ins_end",
"def snp2gene(scaffold, pos, gff=dbpaths['gff']):\n\n geneid = 'intergenic'\n geneloc = 'non-coding'\n\n fobj = open(gff, 'rb')\n for line in fobj:\n col = line.split()\n if col[0] == scaffold:\n if col[2] == \"mRNA\":\n if int(col[3])<=int(col[4]):\n if float(col[3]) <= pos <= float(col[4]):\n geneid = re.search('ID=([^;]*);', col[8]).groups()[0]\n else:\n if float(col[4]) <= pos <= float(col[3]):\n geneid = re.search('ID=([^;]*);', col[8]).groups()[0]\n\n if col[2] == \"CDS\":\n if float(col[3]) <= pos <= float(col[4]):\n geneloc = 'coding (exonic)'\n\n fobj.close()\n\n return (geneid, geneloc)",
"def get_gtf_region_position_info(region_gtf_pr):\n ## Check if region has no features\n if region_gtf_pr.empty:\n return dict()\n\n gene_info_dict = dict()\n for name, group in region_gtf_pr.df.groupby(\"transcript_id\"):\n for row in group.itertuples():\n\n ## Add Transcript Info\n if row.Feature == \"transcript\":\n\n if row.gene_id not in gene_info_dict:\n gene_info_dict[row.gene_id] = dict()\n if row.transcript_id not in gene_info_dict[row.gene_id]:\n gene_info_dict[row.gene_id][row.transcript_id] = {\"exons\": []}\n\n gene_info_dict[row.gene_id][row.transcript_id][\"chrom\"] = row.Chromosome\n gene_info_dict[row.gene_id][row.transcript_id][\"start\"] = row.Start\n gene_info_dict[row.gene_id][row.transcript_id][\"end\"] = row.End\n gene_info_dict[row.gene_id][row.transcript_id][\n \"gene_symbol\"\n ] = row.gene_name\n gene_info_dict[row.gene_id][row.transcript_id][\n \"biotype\"\n ] = row.gene_type\n gene_info_dict[row.gene_id][row.transcript_id][\"strand\"] = row.Strand\n\n ## Add exon feature info\n elif row.Feature == \"exon\":\n\n if row.gene_id not in gene_info_dict:\n gene_info_dict[row.gene_id] = dict()\n if row.transcript_id not in gene_info_dict[row.gene_id]:\n gene_info_dict[row.gene_id][row.transcript_id] = {\"exons\": []}\n\n gene_info_dict[row.gene_id][row.transcript_id][\"exons\"].append(\n {\"start\": row.Start, \"end\": row.End, \"exon_number\": row.exon_number}\n )\n\n return gene_info_dict",
"async def test_genomic_insertion(test_handler, genomic_insertion,\n grch38_genomic_insertion):\n resp = await test_handler.normalize(\"NC_000017.10:g.37880993_37880994insGCTTACGTGATG\") # noqa: E501\n assertion_checks(resp.variation_descriptor, grch38_genomic_insertion,\n \"NC_000017.10:g.37880993_37880994insGCTTACGTGATG\")\n\n fixture_id = \\\n \"normalize.variation:NC_000017.10%3Ag.37880993_37880994insGCTTACGTGATG\"\n resp = await test_handler.normalize(\"17-37880993-G-GGCTTACGTGATG\")\n assert resp.variation_descriptor.id == \\\n \"normalize.variation:17-37880993-G-GGCTTACGTGATG\"\n resp.variation_descriptor.id = fixture_id\n assertion_checks(resp.variation_descriptor, grch38_genomic_insertion,\n \"17-37880993-G-GGCTTACGTGATG\")\n\n resp = await test_handler.normalize(\n \"ERBB2 g.37880993_37880994insGCTTACGTGATG\")\n assert resp.variation_descriptor.id ==\\\n \"normalize.variation:ERBB2%20g.37880993_37880994insGCTTACGTGATG\"\n resp.variation_descriptor.id = fixture_id\n assertion_checks(resp.variation_descriptor, genomic_insertion,\n \"ERBB2 g.37880993_37880994insGCTTACGTGATG\")",
"def find_min_gene_distance(sequence_record, starting_values=None):\n min_distance = len(sequence_record.seq) if starting_values is None else starting_values[0]\n min_gene1 = 'none' if starting_values is None else starting_values[1]\n min_gene2 = 'none' if starting_values is None else starting_values[2]\n all_gene_positions = []\n for gene in sequence_record.features:\n # BCBio uses 0-based and end-exclusive positions (first-third base is bases 0,1,2, i.e range 0-3), \n # so add 1 to start and keep end as is to convert to 1-based-end-inclusive\n all_gene_positions.append((gene.location.start.position, gene.location.end.position-1, gene.id))\n all_gene_positions.sort()\n for (_,gene1_end,gene1_name), (gene2_start,_,gene2_name) in itertools.izip(all_gene_positions,all_gene_positions[1:]):\n # subtract 1 from distance, so if gene1 is 1-4 and gene2 is 5-9 the distance is 0\n gene_distance = gene2_start - gene1_end - 1\n if gene_distance < min_distance:\n min_distance = gene_distance \n min_gene1, min_gene2 = gene1_name, gene2_name\n return min_distance, min_gene1, min_gene2",
"def read_gff(gff):\n genome = getseq(args.genome)\n dictoftranscripts = {}\n for k in open(gff):\n if not k.startswith(\"#\"):\n lines = k.strip().split(\"\\t\")\n if lines[2] == \"exon\":\n strand = lines[6]\n chromosome = lines[0]\n start = lines[3]\n end = lines[4]\n transcriptid = re.search(\"Parent=transcript:(.*)\", lines[8]).group(1)\n if transcriptid + \"#\" + chromosome in dictoftranscripts:\n dictoftranscripts[transcriptid + \"#\" + chromosome].extend([start, end])\n else:\n dictoftranscripts[transcriptid + \"#\" + chromosome] = []\n dictoftranscripts[transcriptid + \"#\" + chromosome].extend([start, end])\n\n for key, value in dictoftranscripts.iteritems():\n value.sort()\n print value\n for coord1 in value:\n\n for coord2 in value[1:]:\n #print coord1, coord2\n if int(coord1) != int(value[-1]) and value.index(coord2) != value.index(coord1)+1 and value.index(coord2) > value.index(coord1):\n\n exon1_start = int(coord1)\n exon1_end = int(coord2)\n #print exon1_start, exon1_end\n #print key.split(\"#\")[1]\n #print value.index(coord1), value.index(coord2)\n exon_seq = genome.get(key.split(\"#\")[1],\"NA\")\n\n if exon_seq != \"NA\":\n sequence_exon = exon_seq[exon1_start:exon1_end+1]\n #print exon1_start, exon1_end, sequence_exon\n for start, end, strand, frame, pro in translate(sequence_exon):\n junction =\n print start, end, strand, frame, pro",
"def get_insertion_pos_from_flanking_region_pos(flanking_region_aln_or_pos, cassette_end, relative_read_direction, \n immutable_position=True):\n # check that basic values aren't weird\n check_valid_end_info(cassette_end, relative_read_direction)\n # parse flanking_region_aln_or_pos arg - it'll either return a tuple with the basics, or a special position code\n parsed_position = parse_flanking_region_aln_or_pos(flanking_region_aln_or_pos)\n try: chrom, start_pos, end_pos, strand = parsed_position\n except (TypeError, ValueError): return parsed_position\n check_valid_position_tuple(parsed_position)\n ### chromosome is always the same as read, so just leave it as is\n ### cassette strand is the same as read strand, OR the opposite if the read is opposite to cassette (happens in two cases)\n if (cassette_end=='5prime' and relative_read_direction=='inward'): pass\n elif (cassette_end=='3prime' and relative_read_direction=='outward'): pass\n else: strand = ('+' if strand=='-' else '-')\n ### cassette position depends on the read position and cassette_end in a somewhat complex way (see docstring)\n if (cassette_end=='5prime' and strand=='+') or (cassette_end=='3prime' and strand=='-'): pos_before, pos_after = end_pos, None\n else: pos_before, pos_after = None, start_pos\n return Insertion_position(chrom, strand, position_before=pos_before, position_after=pos_after, immutable=immutable_position)",
"def getInserts(my_diff, my_cigar):\n my_ranges = list(getDiffLocs(my_cigar, 'I'))\n my_ins = (my_diff[x:y] for x,y in my_ranges)\n return ((x[0], 'I', y) if len(y) == 1 else (x[0], 'S', y)\n for x,y in zip(my_ranges, my_ins))",
"def genomic_insertion(erbb2_context):\n params = {\n \"id\": \"normalize.variation:NC_000017.10%3Ag.37880993_37880994insGCTTACGTGATG\", # noqa: E501\n \"type\": \"VariationDescriptor\",\n \"variation_id\": \"ga4gh:VA.nHB0_mpsq2t90S-znr81oCi2cY5CMdUe\",\n \"variation\": {\n \"_id\": \"ga4gh:VA.nHB0_mpsq2t90S-znr81oCi2cY5CMdUe\",\n \"location\": {\n \"_id\": \"ga4gh:VSL.E0o4HCXjy1EUthF1m32oj_Bc45g5YmEm\",\n \"interval\": {\n \"end\": {\"value\": 2500, \"type\": \"Number\"},\n \"start\": {\"value\": 2488, \"type\": \"Number\"},\n \"type\": \"SequenceInterval\"\n },\n \"sequence_id\": \"ga4gh:SQ.y9b4LVMiCXpZxOg9Xt1NwRtssA03MwWM\",\n \"type\": \"SequenceLocation\"\n },\n \"state\": {\n \"sequence\": \"TACGTGATGGCTTACGTGATGGCT\",\n \"type\": \"LiteralSequenceExpression\"\n },\n \"type\": \"Allele\"\n },\n \"molecule_context\": \"transcript\",\n \"structural_type\": \"SO:0000667\",\n \"vrs_ref_allele_seq\": \"TACGTGATGGCT\",\n \"gene_context\": erbb2_context\n }\n return VariationDescriptor(**params)",
"def __repr__(self):\n return \"Insertion_position('%s', '%s', full_position='%s', immutable=%s)\"%(self.chromosome, self.strand, \n self.full_position, self.immutable)",
"def test_ctgs(\n insertions, # type: List[Insertion]\n reference, # type: Reference\n gene_ids=None, # type: Set[str]\n chromosomes=None, # type: Set[str]\n pattern=None, # type: str\n per_sample=True, # type: bool\n window=None #type: Tuple[int, int]\n):\n\n # Default to shared chromosome sequences (typically drops some\n # of the more esoteric extra scaffold/patch sequences).\n if chromosomes is None:\n reference_seq = pyfaidx.Fasta(str(reference.fasta_path))\n reference_gtf = GtfIterator(reference.indexed_gtf_path)\n\n chromosomes = list(\n set(reference_seq.keys()) & set(reference_gtf.contigs))\n\n if len(chromosomes) == 0:\n ValueError('No chromosomes are shared between the reference '\n 'sequence and reference gtf files')\n\n if len(chromosomes) == 0:\n raise ValueError('At least one chromosome must be given')\n\n # Determine gene windows using GTF.\n logging.info('Generating gene windows')\n gene_windows = _build_gene_windows(\n reference.indexed_gtf_path, window=window, chromosomes=chromosomes)\n\n # Subset insertions to gene intervals.\n insertions = _subset_to_windows(insertions, gene_windows)\n\n if gene_ids is None:\n gene_ids = set(ins.metadata['gene_id'] for ins in insertions)\n\n # Collapse insertions per gene/sample (recommended).\n # Corrects for hopping/multiple detection issues.\n if per_sample:\n logging.info('Collapsing insertions')\n insertions = list(_collapse_per_sample(insertions))\n\n # Calculate total number of pattern occurrences within intervals.\n logging.info('Counting pattern occurrences')\n reference_seq = pyfaidx.Fasta(str(reference.fasta_path))\n\n total = count_total(\n reference_seq, pattern=pattern, intervals=gene_windows.values())\n\n # Calculate p-values for each gene.\n logging.info('Calculating significance for genes')\n insertion_trees = GenomicIntervalTree.from_objects_position(\n insertions, chrom_attr='seqname')\n\n p_values = {\n gene_id: test_region(\n insertions=insertions,\n reference_seq=reference_seq,\n region=gene_windows[gene_id],\n total=total,\n pattern=pattern,\n filters=[lambda ins, gid=gene_id: ins.metadata['gene_id'] == gid],\n insertion_trees=insertion_trees)\n for gene_id in gene_ids\n }\n\n # Build result frame.\n result = pd.DataFrame.from_records(\n iter(p_values.items()), columns=['gene_id', 'p_value'])\n\n # Calculate corrected p-value using bonferroni correction.\n result['q_value'] = (result['p_value'] * len(result)).clip_upper(1.0)\n\n # Sort by q-value and p-value.\n result.sort_values(by=['q_value', 'p_value'], inplace=True)\n\n if len(insertions) > 0:\n # Annotate with gene_name if possible.\n if 'gene_name' in insertions[0].metadata:\n name_map = {\n ins.metadata['gene_id']: ins.metadata['gene_name']\n for ins in insertions\n }\n result.insert(1, 'gene_name', result['gene_id'].map(name_map))\n else:\n result['gene_name'] = np.nan\n\n # Annotate with frequency.\n frequency = (Insertion.to_frame(insertions)\n .groupby('gene_id')['sample'].nunique()\n .reset_index(name='n_samples'))\n result = pd.merge(result, frequency, on='gene_id', how='left')\n else:\n result['gene_name'] = np.nan\n result['n_samples'] = np.nan\n\n return result",
"def index_gff(gff, logger):\n f_in = open(gff, \"r\")\n gene_start_stop_dict = dict()\n gene_scaff_dict = dict()\n gene_first_exon_dict = dict()\n gene_direction = dict()\n gene_gff_line = dict()\n gene_set = set([])\n for line in f_in:\n if line.startswith(\"#\"):\n continue\n if not line.strip():\n continue\n assert len(line.split(\"\\t\")) == 9 , \"GFF fields wrong length should be 9\"\n scaff, source, feature, start, stop, score, \\\n direction, frame, gene_info = line.split(\"\\t\")\n gene = split_gene_name(gene_info)\n scaff = scaff.rstrip()\n if feature == \"gene\":\n gene_gff_line[gene] = line\n gene_set.add(gene)\n start_stop = \"%s\\t%s\" % (start, stop)\n gene_start_stop_dict[gene] = start_stop\n gene_scaff_dict[gene] = scaff\n gene_direction[gene] = direction\n if not gene in gene_first_exon_dict.keys():\n if feature == \"exon\" or feature == \"CDS\":\n start_stop = \"%s\\t%s\" % (start, stop)\n gene_first_exon_dict[gene] = start_stop\n f_in.close()\n logger.info(\"Number of genes = %d\", len(gene_set))\n return gene_start_stop_dict, gene_first_exon_dict, \\\n gene_scaff_dict, gene_direction, gene_set, gene_gff_line",
"def process_gff(gff_file, feat='CDS', id_sym=\"gene_id=\"):\n gene_to_gene_length = {}\n with open(gff_file, \"r\") as fh:\n for line in fh:\n line = line.strip()\n if line.startswith('>'):\n break\n elif line.startswith((\"#\", \" \")) or len(line) == 0:\n continue\n elif line.split('\\t')[2] != feat:\n continue\n else:\n start = int(line.split(\"\\t\")[3].strip())\n end = int(line.split(\"\\t\")[4].strip())\n gene_length = abs(end - start)/1000\n #prokka = line.split(\"\\t\")[-1].split(\";\")[0].strip(id_sym)\n prokka = line.split(\"\\t\")[-1].split(id_sym)[1].split(\";\")[0]\n # This would give me the prokka id\n gene_to_gene_length[prokka] = gene_length\n return gene_to_gene_length",
"def get_transcript_genomic_inserted_sequence(\n self, parts: List\n ) -> Optional[Tuple[Union[str, int], Union[str, int]]]:\n # Check inserted sequences\n if \"_\" in parts[1] and parts[1].count(\"_\") == 1:\n # Replaced by sequence positions\n inserted_sequences = self.get_valid_digits(parts[1])\n if not inserted_sequences:\n return None\n inserted_sequence1, inserted_sequence2 = inserted_sequences\n if inserted_sequence1 > inserted_sequence2:\n return None\n else:\n # Replaced by nucleotides\n inserted_sequence1 = self.get_sequence(parts[1])\n inserted_sequence2 = None\n return inserted_sequence1, inserted_sequence2",
"def parse_gt_tuple(vcf_record, sample_index):\n if sample_index is None:\n raise exceptions.G2GVCFError(\"Sample index must contain a value\")\n\n sample_data = vcf_record[sample_index]\n gt = None\n fi = None\n left = None\n right = None\n phase = None\n gt_left = None\n gt_right = None\n\n # check for to see if ALT is <CN*> or something not ACGT\n if vcf_record.alt.find('<') == -1 and sample_data != '.':\n formats = vcf_record.format.split(':')\n gt_index = formats.index('GT')\n fi_index = formats.index('FI') if 'FI' in formats else None\n\n try:\n # parse the GT field\n gt = sample_data.split(':')[gt_index]\n\n # make sure a call can be made\n if gt != '.' and gt != './.' and gt != '.|.':\n if GENOTYPE_PHASED in gt:\n genotypes = lmap(int, gt.split(GENOTYPE_PHASED))\n phase = GENOTYPE_PHASED\n elif GENOTYPE_UNPHASED in gt:\n genotypes = lmap(int, gt.split(GENOTYPE_UNPHASED))\n phase = GENOTYPE_UNPHASED\n else:\n raise ValueError(\"Unknown phase in GT, {0}\".format(gt))\n\n # assuming no triploids for now\n if genotypes[0] == 0:\n left = vcf_record.ref\n else:\n left = vcf_record.alt.split(',')[genotypes[0]-1]\n\n if genotypes[1] == 0:\n right = vcf_record.ref\n else:\n right = vcf_record.alt.split(',')[genotypes[1]-1]\n\n gt_left = genotypes[0]\n gt_right = genotypes[1]\n\n # check for to see if ALT is <CN*> or something not ACGT\n #if not REGEX_ALT.match(left) or not REGEX_ALT.match(right):\n # LOG.error(\"VFC2VCI CN FOUND\")\n # gt = None\n # fi = None\n # left = None\n # right = None\n # phase = None\n # gt_left = None\n # gt_right = None\n\n except ValueError as ve:\n LOG.debug(ve)\n except IndexError as ie:\n LOG.debug(ie)\n try:\n if fi_index:\n fi = sample_data.split(':')[fi_index]\n except ValueError as ve:\n LOG.debug(ve)\n except IndexError as ie:\n LOG.debug(ie)\n\n is_snp = len(vcf_record.ref) == 1 == (len(left) if left else 0) == (len(right) if right else 0)\n return GTData(vcf_record.ref, left, right, gt, fi, phase, gt_left, gt_right, is_snp)",
"def parse_gff(g):\n # We also want to store the mRNA->gene information!\n mrna_par = {}\n # And the CDS->mRNA information\n cds_dat = {}\n with open(g, 'r') as f:\n for line in f:\n # if the line is empty or starts with a #, we will skip it\n if line.startswith('#') or line == '\\n':\n continue\n else:\n tmp = line.strip().split('\\t')\n feat_type = tmp[2]\n if feat_type == 'mRNA':\n meta = tmp[8].split(';')\n for m in meta:\n if m.startswith('ID='):\n tx_id = m.split('=')[1]\n if m.startswith('Parent='):\n tx_par = m.split('=')[1]\n mrna_par[tx_id] = tx_par\n elif feat_type == 'CDS':\n scaf = tmp[0]\n start = tmp[3]\n end = tmp[4]\n strand = tmp[6]\n phase = tmp[7]\n meta = tmp[8].split(';')\n for m in meta:\n if m.startswith('ID='):\n cds_id = m.split('=')[1]\n if m.startswith('Parent='):\n cds_par = m.split('=')[1]\n if strand == '-':\n strand = -1\n else:\n strand = 1\n # Watch out for transcripts where there are multiple CDS.\n # This will require a nested dictionary of lists.\n if cds_par in cds_dat:\n pass\n else:\n cds_dat[cds_par] = {}\n if cds_id in cds_dat[cds_par]:\n pass\n else:\n cds_dat[cds_par][cds_id] = []\n # We want to make a SequenceFeature for each CDS chunk\n # Keep in mind that GFF is 1-based, so we have to adjust\n # the start position!\n cds_feat = SeqFeature(\n FeatureLocation(int(start)-1, int(end), strand=strand),\n type=\"CDS\",\n id=cds_id)\n # Add some qualifiers to modify the behavior\n # Use the \"standard\" genetic code from NCBI\n cds_feat.qualifiers['transl_tabl'] = [1]\n # Then, append it into the corresponding dictionary item\n # keeping the chromosome (scaffold) name and phase with it\n cds_dat[cds_par][cds_id].append((cds_feat, scaf, phase))\n else:\n continue\n return (mrna_par, cds_dat)",
"def check_for_overlapping_genes(sequence_record):\n overlapping_gene_pairs = []\n all_gene_positions = []\n for gene in sequence_record.features:\n # BCBio uses 0-based and end-exclusive positions (first-third base is bases 0,1,2, i.e range 0-3), \n # so add 1 to start and keep end as is to convert to 1-based-end-inclusive\n all_gene_positions.append((gene.location.start.position+1, gene.location.end.position, gene.id))\n all_gene_positions.sort()\n for gene1_data,gene2_data in itertools.izip(all_gene_positions,all_gene_positions[1:]):\n (gene1_start,gene1_end,gene1_name), (gene2_start,gene2_end,gene2_name) = gene1_data, gene2_data\n if gene1_end>=gene2_start:\n overlapping_gene_pairs.append((gene1_name,gene2_name))\n # check for \"gene1 contains gene2\", print a warning, since it can make other things not work right\n if gene1_end>=gene2_end:\n print(\"WARNING: gene %s is completely inside gene %s! \"%(gene1_name, gene2_name)\n +\"Various gene-position-related results may be inaccurate.\")\n return overlapping_gene_pairs\n # MAYBE-TODO rewrite it so it actually detects ALL overlaps? Right now if gene A contains nonoverlapping genes B and C, it'll sort them as (A,B,C) since A starts first, so it'll detect the (A,B) overlap, but it won't detect the (A,C) overlap because it doesn't CHECK (A,C), only (A,B) and (B,C). This could be fixed either by just brute-force checking all gene pairs (and then using DNA_basic_utilities.position_test_overlap), or by writing something prettier. In any case, not a priority, since generally genes DON'T OVERLAP AT ALL.",
"def GFFParse(gff_file):\n genes, utr5, exons=dict(), dict(), dict()\n transcripts, utr3, cds=dict(), dict(), dict()\n # TODO Include growing key words of different non-coding/coding transcripts \n features=['mrna', 'transcript', 'ncrna', 'mirna', 'pseudogenic_transcript', 'rrna', 'snorna', 'snrna', 'trna', 'scrna', 'mrna_te_gene']\n gff_handle=open(gff_file, \"rU\")\n for gff_line in gff_handle:\n gff_line=gff_line.strip('\\n\\r').split('\\t')\n if re.match(r'#|>', gff_line[0]): # skip commented line or fasta identifier line \n continue\n if len(gff_line)==1: # skip fasta sequence/empty line if present \n continue \n assert len(gff_line)==9, '\\t'.join(gff_line) # not found 9 tab-delimited fields in this line \n if '' in gff_line: # skip this line if there any field with an empty value\n print 'Skipping..', '\\t'.join(gff_line)\n continue\n if gff_line[-1][-1]==';': # trim the last ';' character \n gff_line[-1]=gff_line[-1].strip(';')\n if gff_line[2].lower() in ['gene', 'pseudogene', 'transposable_element_gene']:\n gid, gene_info=None, dict()\n gene_info['start']=int(gff_line[3])\n gene_info['stop']=int(gff_line[4])\n gene_info['chr']=gff_line[0]\n gene_info['source']=gff_line[1]\n gene_info['strand']=gff_line[6]\n for attb in gff_line[-1].split(';'):\n attb=attb.split('=') # gff attributes are separated by key=value pair \n if attb[0]=='ID':\n gid=attb[1]\n break\n genes[(gff_line[0], gid)]=gene_info # store gene information based on the chromosome and gene symbol.\n elif gff_line[2].lower() in features: \n gid, mrna_info=None, dict() \n mrna_info['start']=int(gff_line[3])\n mrna_info['stop']=int(gff_line[4])\n mrna_info['chr']=gff_line[0]\n mrna_info['strand']=gff_line[6]\n mrna_info['type'] = gff_line[2]\n for attb in gff_line[-1].split(';'):\n attb=attb.split('=')\n if attb[0]=='Parent':\n gid=attb[1]\n elif attb[0]=='ID':\n mrna_info[attb[0]]=attb[1]\n for fid in gid.split(','): # child may be mapped to multiple parents ex: Parent=AT01,AT01-1-Protein \n if (gff_line[0], fid) in transcripts:\n transcripts[(gff_line[0], fid)].append(mrna_info)\n else:\n transcripts[(gff_line[0], fid)]=[mrna_info]\n elif gff_line[2].lower() in ['exon', 'pseudogenic_exon']:\n tids, exon_info=None, dict()\n exon_info['start']=int(gff_line[3])\n exon_info['stop']=int(gff_line[4])\n exon_info['chr']=gff_line[0]\n exon_info['strand']=gff_line[6]\n for attb in gff_line[-1].split(';'):\n attb=attb.split('=')\n if attb[0]=='Parent':\n tids=attb[1]\n break\n for tid in tids.split(','):\n if (gff_line[0], tid) in exons:\n exons[(gff_line[0], tid)].append(exon_info)\n else:\n exons[(gff_line[0], tid)]=[exon_info]\n elif gff_line[2].lower() in ['five_prime_utr']:\n utr5_info, tids=dict(), None\n utr5_info['start']=int(gff_line[3])\n utr5_info['stop']=int(gff_line[4])\n utr5_info['chr']=gff_line[0]\n utr5_info['strand']=gff_line[6]\n for attb in gff_line[-1].split(';'):\n attb=attb.split('=')\n if attb[0]=='Parent':\n tids=attb[1]\n break\n for tid in tids.split(','):\n if (gff_line[0], tid) in utr5:\n utr5[(gff_line[0], tid)].append(utr5_info)\n else:\n utr5[(gff_line[0], tid)]=[utr5_info]\n elif gff_line[2].lower() in ['cds']:\n cds_info, tids=dict(), None\n cds_info['start']=int(gff_line[3])\n cds_info['stop']=int(gff_line[4])\n cds_info['chr']=gff_line[0]\n cds_info['strand']=gff_line[6]\n for attb in gff_line[-1].split(';'):\n attb=attb.split('=')\n if attb[0]=='Parent':\n tids=attb[1]\n break\n for tid in tids.split(','):\n if (gff_line[0], tid) in cds:\n cds[(gff_line[0], tid)].append(cds_info)\n else:\n cds[(gff_line[0], tid)]=[cds_info]\n elif gff_line[2].lower() in ['three_prime_utr']:\n utr3_info, tids=dict(), None\n utr3_info['start']=int(gff_line[3])\n utr3_info['stop']=int(gff_line[4])\n utr3_info['chr']=gff_line[0]\n utr3_info['strand']=gff_line[6]\n for attb in gff_line[-1].split(';'):\n attb=attb.split('=')\n if attb[0]=='Parent':\n tids=attb[1]\n break\n for tid in tids.split(','):\n if (gff_line[0], tid) in utr3:\n utr3[(gff_line[0], tid)].append(utr3_info)\n else:\n utr3[(gff_line[0], tid)]=[utr3_info]\n gff_handle.close()\n return genes, transcripts, exons, utr3, utr5, cds",
"def read_GFF(gff_filename):\n gff_info = {} # loci --> LocusInfo\n tmp = {} # loci PB.X --> list of GFF records for PB.X.Y\n\n for r in collapseGFFReader(gff_filename):\n m = rex_pbid.match(r.seqid)\n if m is None:\n raise Exception(f\"Expected PBID format PB.X.Y but saw {r.seqid}\")\n locus = m.group(1) # ex: PB.1\n if locus not in tmp:\n tmp[locus] = [r]\n gff_info[locus] = LocusInfo(\n chrom=r.chr, strand=r.strand, regions=None, isoforms=None\n )\n else:\n if gff_info[locus].chrom != r.chr:\n logger.warning(\n f\"WARNING: Expected {r.seqid} to be on {gff_info[locus].chrom} but saw {r.chr}. Could be minimap2 multi-mapping inconsistency for repetitive genes. Check later.\\n\"\n )\n tmp[locus].append(r)\n\n # now figure out the exonic regions for each gene PB.X\n for locus, records in tmp.items():\n c = ClusterTree(0, 0)\n for r in records:\n for e in r.ref_exons:\n c.insert(\n max(0, e.start - extra_bp_around_junctions),\n e.end + extra_bp_around_junctions,\n 1,\n )\n\n regions = [(a, b) for (a, b, junk) in c.getregions()]\n regions[0] = (max(0, regions[0][0] - __padding_before_after__), regions[0][1])\n regions[-1] = (\n max(0, regions[-1][0]),\n regions[-1][1] + __padding_before_after__,\n )\n gff_info[locus] = LocusInfo(\n chrom=gff_info[locus].chrom,\n strand=gff_info[locus].strand,\n regions=regions,\n isoforms=[r.seqid for r in records],\n )\n\n return gff_info",
"def test_annotate_from_gff(self):\n from cogent3.parse.fasta import FastaParser\n\n fasta_path = os.path.join(\"data/c_elegans_WS199_dna_shortened.fasta\")\n gff3_path = os.path.join(\"data/c_elegans_WS199_shortened_gff.gff3\")\n name, seq = next(FastaParser(fasta_path))\n\n sequence = Sequence(seq)\n sequence.annotate_from_gff(gff3_path)\n matches = [m for m in sequence.get_annotations_matching(\"*\", extend_query=True)]\n # 13 features with one having 2 parents, so 14 instances should be found\n self.assertEqual(len(matches), 14)",
"def _update_from_exons(self, feature):\n # note that start and end here are in direction of translation\n def start(loc):\n return loc[0][1]\n\n def end(loc):\n if loc[-1][2] == \"+\":\n return loc[-1][1] + loc[-1][3] + 1\n else:\n return loc[-1][1] - loc[-1][3] - 1\n\n if 'exon' in feature:\n # update the feature with the exon locations and sequences\n feature['location'] = [x['location'][0] for x in feature['exon']]\n feature['dna_sequence'] = \"\".join(\n x['dna_sequence'] for x in feature['exon'])\n feature['dna_sequence_length'] = len(feature['dna_sequence'])\n\n # construct feature location from utrs and cdss if present\n elif 'cds' in feature:\n cds = [copy.deepcopy(self.feature_dict[feature['cds']])]\n locs = [] # type: list\n seq = \"\"\n for frag in feature.get('five_prime_UTR', []) + cds + \\\n feature.get('three_prime_UTR', []):\n\n # merge into last location if adjacent\n if locs and abs(end(locs) - start(frag['location'])) == 1:\n # extend the location length by the length of the first\n # location in the fragment\n first = frag['location'].pop(0)\n locs[-1][3] += first[3]\n\n locs.extend(frag['location'])\n seq += frag['dna_sequence']\n\n feature['location'] = locs\n feature['dna_sequence'] = seq\n feature['dna_sequence_length'] = len(seq)\n\n # remove these properties as they are no longer needed\n for x in ['five_prime_UTR', 'three_prime_UTR', 'exon']:\n feature.pop(x, None)\n\n else:\n ValueError('Feature {feature[\"id\"]} must contain either exon or cds data to '\n 'construct an accurate location and sequence')",
"def parse_flanking_region_aln_or_pos(flanking_region_aln_or_pos):\n try: \n check_valid_position_tuple(flanking_region_aln_or_pos)\n return flanking_region_aln_or_pos\n except MutantError:\n try: \n pos = flanking_region_aln_or_pos.iv\n except AttributeError:\n raise MutantError(\"parse_flanking_region_aln_or_pos input should be HTSeq aln or position tuple! \"\n +\"Got %s\"%(flanking_region_aln_or_pos,))\n if pos: return HTSeq_pos_to_tuple(pos) \n # if unaligned, figure out if unaligned or multi-aligned, and just return the appropriate special position code\n else: \n try: XM_val = get_HTSeq_optional_field(flanking_region_aln_or_pos, 'XM')\n except KeyError: return SPECIAL_POSITIONS.unaligned\n if int(XM_val) > 1: return SPECIAL_POSITIONS.multi_aligned\n else: return SPECIAL_POSITIONS.unaligned",
"def genInsertPosition(self):\n insize = np.random.normal(self.insertSize, self.insertStdev)\n while True:\n start = random.randint(self.fpstart, self.fpend)\n end = start + insize\n if end < self.fpend and self.isValid(start, end):\n return (start, end - self.readlen)",
"def ungap_feature_ends(feat, rec):\n if feat.location.start < 0:\n feat.location = FeatureLocation(0, feat.location.end, feat.location.strand)\n\n if feat.location.end < 0:\n feat.location = FeatureLocation(feat.location.start, 0, feat.location.strand)\n\n if feat.location.start > feat.location.end:\n feat.location = FeatureLocation(feat.location.end, feat.location.start, feat.location.strand)\n\n if type(feat.location) == CompoundLocation:\n parts = []\n for part in feat.location.parts:\n part = ungap_feature_ends(SeqFeature(part), rec)\n parts.append(part.location)\n feat.location = CompoundLocation(parts, feat.location.operator)\n\n elif type(feat.location) == FeatureLocation:\n extract = str(feat.extract(rec.seq))\n front_gaps = re.search(\"^-+\", extract)\n if front_gaps:\n if not feat.location.strand or feat.location.strand == 1:\n new_start = feat.location.start + len(front_gaps.group(0))\n feat.location = FeatureLocation(new_start, feat.location.end, 1)\n else:\n new_end = feat.location.end - len(front_gaps.group(0))\n feat.location = FeatureLocation(feat.location.start, new_end, -1)\n\n rear_gaps = re.search(\"-+$\", extract)\n if rear_gaps:\n if not feat.location.strand or feat.location.strand == 1:\n new_end = feat.location.end - len(rear_gaps.group(0))\n feat.location = FeatureLocation(feat.location.start, new_end, 1)\n else:\n new_start = feat.location.start + len(rear_gaps.group(0))\n feat.location = FeatureLocation(new_start, feat.location.end, -1)\n else:\n raise TypeError(\"FeatureLocation or CompoundLocation object required.\")\n return feat",
"def findgene(fname, dbpaths=dbpaths):\n scaf = []\n gbeg = []\n gend = []\n gfor = []\n gsta = []\n gdif = []\n cuffgenes = {}\n\n fobj = open(fname)\n for line in fobj:\n col = line.split()\n scaf.append( re.search('[sCcafold]*[0-9]+', col[3]).group() )\n gbeg.append( int(re.search(':(.*)-', col[3]).groups()[0]) )\n gend.append( int(re.search('-(.*)', col[3]).groups()[0]) )\n gfor.append(float(col[7]))\n gsta.append(float(col[8]))\n gdif.append(float(col[9]))\n\n fobj.close()\n print \"Significant transcripts read\"\n\n\n for result in range(len(scaf)):\n cur_scaf = scaf[result]\n cur_gbeg = gbeg[result]\n cur_gend = gend[result]\n cur_gfor = gfor[result]\n cur_gsta = gsta[result]\n cur_gdif = gdif[result]\n fobj = open(dbpaths['gff'])\n for line in fobj:\n col = line.split()\n if col[2] == \"mRNA\":\n if col[0] == cur_scaf:\n if float(col[3]) <= cur_gend and float(col[4]) >= cur_gbeg:\n try:\n cuffgenes[(cur_scaf, cur_gbeg)] = (re.search('ID=([^;]*);', col[8]).groups()[0], cur_scaf, cur_gbeg, cur_gend, cur_gfor, cur_gsta, cur_gdif)\n except AttributeError:\n print col[8]\n fobj.close()\n\n return cuffgenes",
"def parse_gt(vcf_record, sample_index):\n if sample_index is None:\n raise exceptions.G2GVCFError(\"Sample index must contain a value\")\n\n sample_data = vcf_record.samples[sample_index]\n gt = None\n fi = None\n left = None\n right = None\n phase = None\n\n # check for to see if ALT is <CN*> or something not ACGT\n if vcf_record.alt.find('<') == -1 and sample_data != '.':\n #if sample_data != '.':\n gt_index = vcf_record.format.split(':').index('GT')\n fi_index = vcf_record.format.split(':').index('FI')\n\n try:\n # parse the GT field\n gt = sample_data.split(':')[gt_index]\n\n # make sure a call can be made\n if gt != '.' and gt != './.' and gt != '.|.':\n if GENOTYPE_PHASED in gt:\n genotypes = lmap(int, gt.split(GENOTYPE_PHASED))\n phase = GENOTYPE_PHASED\n elif GENOTYPE_UNPHASED in gt:\n genotypes = lmap(int, gt.split(GENOTYPE_UNPHASED))\n phase = GENOTYPE_UNPHASED\n else:\n raise ValueError(\"Unknown phase in GT, {0}\".format(gt))\n\n # assuming no triploids for now\n if genotypes[0] == 0:\n left = vcf_record.ref\n else:\n left = vcf_record.alt[genotypes[0]-1]\n\n if genotypes[1] == 0:\n right = vcf_record.ref\n else:\n right = vcf_record.alt[genotypes[1]-1]\n\n gt_left = genotypes[0]\n gt_right = genotypes[1]\n\n # check for to see if ALT is <CN*> or something not ACGT\n if not REGEX_ALT.match(gt_left):\n left = None\n gt_left = None\n\n if not REGEX_ALT.match(gt_right):\n right = None\n gt_right = None\n\n except ValueError as ve:\n LOG.debug(ve)\n except IndexError as ie:\n LOG.debug(ie)\n try:\n fi = sample_data.split(':')[fi_index]\n except ValueError as ve:\n LOG.debug(ve)\n except IndexError as ie:\n LOG.debug(ie)\n\n is_snp = len(vcf_record.REF) == 1 == (len(left) if left else 0) == (len(right) if right else 0)\n return GTData(vcf_record.REF, left, right, gt, fi, phase, gt_left, gt_right, is_snp)",
"def extract_genes(seq_record):\n return [f for f in seq_record.features if f.type == \"gene\"]",
"def locus2gene(scaflist, gbeglist, gendlist, gdatalist=False, gff=dbpaths['gff'], comprehensive=True ):\n cuffgenes = {}\n\n for result in range(len(scaflist)):\n if result % 1000 == 0:\n print \"%d genes matched of %d\" % (result, len(scaflist))\n cur_scaf = scaflist[result]\n cur_gbeg = gbeglist[result]\n cur_gend = gendlist[result]\n if gdatalist:\n cur_gdata = gdatalist[result]\n else:\n cur_gdata = 0\n fobj = open(gff, 'rb')\n for line in fobj:\n col = line.split()\n if col[2] == \"mRNA\":\n if col[0] == cur_scaf:\n if float(col[3]) <= cur_gend and float(col[4]) >= cur_gbeg:\n try:\n if (cur_scaf, cur_gbeg) in cuffgenes:\n cuffgenes[(cur_scaf, cur_gbeg, 2)] = (re.search('ID=([^;]*);', col[8]).groups()[0], cur_scaf, cur_gbeg, cur_gend, cur_gdata)\n else:\n cuffgenes[(cur_scaf, cur_gbeg)] = (re.search('ID=([^;]*);', col[8]).groups()[0], cur_scaf, cur_gbeg, cur_gend, cur_gdata)\n if not comprehensive:\n break\n except AttributeError:\n print col[8]\n fobj.close()\n\n return cuffgenes",
"def feature_table(chr_id, source, orient, genes, transcripts, cds, exons, unk):\n for gname, ginfo in genes.items():\n line = [str(chr_id), \n 'gbk_to_gff',\n ginfo[3],\n str(ginfo[0]),\n str(ginfo[1]),\n '.',\n ginfo[2],\n '.',\n 'ID='+str(gname)+';Name='+str(gname)+';Note='+ginfo[-1]]\n print '\\t'.join(line) \n ## construct the transcript line is not defined in the original file \n t_line = [str(chr_id), 'gbk_to_gff', source, 0, 1, '.', ginfo[2], '.'] \n\n if not transcripts:\n t_line.append('ID=Transcript:'+str(gname)+';Parent='+str(gname))\n\n if exons: ## get the entire transcript region from the defined feature\n t_line[3] = str(exons[gname][0][0])\n t_line[4] = str(exons[gname][0][-1])\n elif cds:\n t_line[3] = str(cds[gname][0][0])\n t_line[4] = str(cds[gname][0][-1])\n print '\\t'.join(t_line) \n\n if exons:\n exon_line_print(t_line, exons[gname], 'Transcript:'+str(gname), 'exon')\n\n if cds:\n exon_line_print(t_line, cds[gname], 'Transcript:'+str(gname), 'CDS')\n if not exons:\n exon_line_print(t_line, cds[gname], 'Transcript:'+str(gname), 'exon')\n\n else: ## transcript is defined \n for idx in transcripts[gname]: \n t_line[2] = idx[3]\n t_line[3] = str(idx[0])\n t_line[4] = str(idx[1])\n t_line.append('ID='+str(idx[2])+';Parent='+str(gname))\n print '\\t'.join(t_line) \n \n ## feature line print call \n if exons:\n exon_line_print(t_line, exons[gname], str(idx[2]), 'exon')\n if cds:\n exon_line_print(t_line, cds[gname], str(idx[2]), 'CDS')\n if not exons:\n exon_line_print(t_line, cds[gname], str(idx[2]), 'exon')\n\n if len(genes) == 0: ## feature entry with fragment information \n \n line = [str(chr_id), 'gbk_to_gff', source, 0, 1, '.', orient, '.'] \n fStart = fStop = None \n\n for eid, ex in cds.items(): \n fStart = ex[0][0] \n fStop = ex[0][-1]\n\n for eid, ex in exons.items(): \n fStart = ex[0][0] \n fStop = ex[0][-1]\n\n if fStart or fStart:\n\n line[2] = 'gene'\n line[3] = str(fStart)\n line[4] = str(fStop)\n line.append('ID=Unknown_Gene_' + str(unk) + ';Name=Unknown_Gene_' + str(unk))\n print \"\\t\".join(line)\n\n if not cds:\n line[2] = 'transcript'\n else:\n line[2] = 'mRNA'\n line[8] = 'ID=Unknown_Transcript_' + str(unk) + ';Parent=Unknown_Gene_' + str(unk)\n print \"\\t\".join(line)\n \n if exons:\n exon_line_print(line, cds[None], 'Unknown_Transcript_' + str(unk), 'exon')\n \n if cds:\n exon_line_print(line, cds[None], 'Unknown_Transcript_' + str(unk), 'CDS')\n if not exons:\n exon_line_print(line, cds[None], 'Unknown_Transcript_' + str(unk), 'exon')\n \n unk +=1 \n\n return unk",
"def generate_gff( mapfile, funtax_orf_file ):\n annotation2assembly_map = pd.read_table(mapfile,\n names=['annotation','assembly','length'],\n index_col='annotation')\n funtax_gff = pd.read_table( funtax_orf_file.name, engine='python', encoding='ISO-8859-1', quoting=3)\n funtax_gff['seqid'] = funtax_gff.join(annotation2assembly_map, on='Contig_Name')['assembly']\n funtax_gff['source'] = 'Prodigal_v2.00'\n funtax_gff['type'] = 'CDS'\n funtax_gff['score'] = 100.0\n funtax_gff['phase'] = 0\n funtax_gff['attributes'] = funtax_gff['ORF_ID'].str.replace(r'(.*)', r'ID=\\1;')\n return funtax_gff[['seqid','source', 'type','start', 'end', 'score', 'strand','phase','attributes']]"
] |
[
"0.61753327",
"0.5651202",
"0.5566418",
"0.5268026",
"0.5207312",
"0.51637614",
"0.5153046",
"0.5099371",
"0.4980735",
"0.49739644",
"0.49500814",
"0.49482694",
"0.49347588",
"0.49312872",
"0.4915447",
"0.48882285",
"0.4873452",
"0.48689857",
"0.48640868",
"0.4856279",
"0.48496598",
"0.4811365",
"0.47609866",
"0.4710731",
"0.46941465",
"0.46268988",
"0.46184835",
"0.46036047",
"0.4589743",
"0.45680338"
] |
0.7680272
|
0
|
Set all readcountrelated data to 0/empty.
|
def _set_readcount_related_data_to_zero(self):
self.total_read_count = 0
self.perfect_read_count = 0
self.RISCC_genome_side_aligned_reads = {}
self.RISCC_genome_side_unaligned_reads = {}
self.sequences_counts_positions_errors = {}
# TODO should all this really be readcount-related? Well, it IS, but when I have a multi-dataset mutant, do I really want to keep the seq/position/count details and the genome-side RISCC read data per dataset rather than total? Hard to tell, really. In a perfect world I wouldn't be doing multiple RISCC datasets anyway!
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def clear(self):\n self.counts = [0] * len(self.values)\n if HAS_NUMPY:\n self.counts = numpy.array(self.counts)",
"def reset(self):\n for i in range(0, len(self.__counts)):\n self.__counts[i] = 0\n self.__overflow = 0\n self.__total_count = 0\n self.__total_values = 0\n self.__min = None\n self.__max = None",
"def clear(self):\n self.counts = [{} for _ in range(len(self.counts))]",
"def reset_count(self):\n self.count = 0",
"def reset(self):\n self.reset_count += 1\n self._init_data()",
"def reset(self):\n self.val = 0\n self.avg = 0\n self.sum = 0\n self.count = 0",
"def reset(self):\n self._total_value = 0.0\n self._count = 0",
"def resetCounters(self):\n self.chain.zero_counters()\n counters = self.session.query(Counter).all()\n self.session.query(Counter).delete()",
"def reset(self):\n self.avg = 0\n self.sum = 0\n self.cnt = 0",
"def _reset_count(self):\n self._triple_count = 0\n self._error_count = 0\n self._ignored_count = 0",
"def reset(self):\n for reader in self.__readers:\n reader.reset()\n if self.__buffer is not None:\n self.__buffer.clear()\n self.__length = 0",
"def resetWriteCount(self):\n self.writeCount = 0",
"def reset(self):\n self.table[:, :] = 0\n self.counts[:] = 0\n self.names = []\n self.hashesperid.resize(0)\n self.dirty = True",
"def reset(self):\n self.correct_count = 0\n self.total_count = 0",
"def reset(self):\n self.count = 0\n self.soft = False\n self.can_double = True\n self.can_split = False\n self.first_card = 0",
"def reset(self):\n self.sum_metric = 0.\n self.num_inst = 0.\n self.metrics.reset_stats()",
"def clear(self):\n self.xi[:] = 0\n self.meanlogr[:] = 0\n self.weight[:] = 0\n self.npairs[:] = 0",
"def reset (self):\n self.counter = 0",
"def reset(self):\n self.total_pulls = 0\n self.total_score = 0\n self.npulls = np.zeros(self.k)\n self.score = np.zeros(self.k)",
"def _clear(self):\n self.xi.ravel()[:] = 0\n self.xi_im.ravel()[:] = 0\n self.meanr.ravel()[:] = 0\n self.meanlogr.ravel()[:] = 0\n self.weight.ravel()[:] = 0\n self.npairs.ravel()[:] = 0\n self._varxi = None\n self._cov = None",
"def reset(self):\n for k in self.data_keys:\n setattr(self, k, [])\n self.size = 0",
"def reset(self):\n self.counter = 0",
"def reset_stats() -> None:\n STATS[\"cleaned\"] = 0\n STATS[\"null\"] = 0\n STATS[\"unknown\"] = 0",
"def reset(self):\n self.cardinality = 0\n self.sax_character = 0\n self.wildcardbits = 0",
"def reset(self) -> None:\n self.true_positives = 0\n self.all_positives = 0",
"def reset(self, complete=False):\n self.sum = 0\n self.n = 0\n if complete:\n self.running_avg = []",
"def clear_data(cls):\n cls.__data.clear()\n cls.__counters.clear()",
"def clear():\n\t\tModel.counter = 0",
"def clear(self):\n self._baseline = 0\n self._sensitivity_im = 0\n self._is_update = False",
"def reset(self) -> None:\n self.statistics = defaultdict(int)"
] |
[
"0.73472196",
"0.71501696",
"0.7069055",
"0.70679396",
"0.7005426",
"0.69340074",
"0.6897218",
"0.68605644",
"0.6838163",
"0.68360454",
"0.6801618",
"0.6767612",
"0.6755343",
"0.67272305",
"0.66804826",
"0.66208565",
"0.65441126",
"0.6536732",
"0.6534575",
"0.6529003",
"0.65287095",
"0.6527668",
"0.65029705",
"0.64986855",
"0.6486056",
"0.6479864",
"0.6477681",
"0.6476028",
"0.6474896",
"0.645456"
] |
0.8255836
|
0
|
Help function to get readinfocontaining object for both multidataset and single mutants. Here this just returns self; more complicated version is for multidataset mutants. Strict is ignored and only present to make the implementation consistent with the multidataset version.
|
def read_info(self, dataset_name=None, strict=False):
if dataset_name is None: return self
else: raise MutantError("This is NOT a multi-dataset mutant - cannot provide dataset_name arg!")
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def read_info(self, dataset_name=None, strict=False):\n if dataset_name is None: \n raise MutantError(\"This is a multi-dataset mutant - must provide dataset_name arg!\")\n if strict:\n self._check_dataset_presence(dataset_name)\n return self.by_dataset[dataset_name]\n else:\n try: return self.by_dataset[dataset_name]\n except KeyError: return blank_readcount_only_mutant()\n # TODO unit-tests?",
"def __getattribute__(self, name: str) -> Any:\n if name in (FLD_TITLE, FLD_ABSTRACT, FLD_FEES, FLD_ACCESS_CONSTRAINTS, FLD_CONTACT_POSITION, FLD_CONTACT_ORGANISATION):\n return self.read_local_metadata(name)\n elif name == FLD_KEYWORDS:\n kw = self.read_local_metadata(FLD_KEYWORDS)\n if kw:\n return set(kw.split(\",\"))\n else:\n return set()\n elif name == FLD_ATTRIBUTION:\n return self.read_local_metadata(FLD_ATTRIBUTION)\n else:\n return super().__getattribute__(name)",
"def _most_common_mutants_info(self, dataset=None):\n summ = self._get_summary(dataset)\n most_common_mutants = summ.most_common_mutants\n m = most_common_mutants[0]\n # calculate the fraction of total reads per mutant, assuming each mutant has the same readcount\n assert len(set([m.read_info(dataset).total_read_count for m in most_common_mutants])) == 1\n readcount_info = value_and_percentages(m.read_info(dataset).total_read_count, [summ.aligned_read_count])\n if len(most_common_mutants) == 1: return \"%s (%s)\"%(readcount_info, m.position)\n else: return \"%s (%s mutants)\"%(readcount_info, len(most_common_mutants))",
"def GetMaterial(self, *args):\n return _XCAFDoc.XCAFDoc_MaterialTool_GetMaterial(self, *args)",
"def info_materials_raw_get():\n materials = _material_by_group(427) # 427 == intermediate group\n return materials, 200",
"def info_materials_intermediates_get():\n materials = _material_by_group(428) # 428 == intermediate group\n return materials, 200",
"def XCAFDoc_MaterialTool_Set(*args):\n return _XCAFDoc.XCAFDoc_MaterialTool_Set(*args)",
"def __call__(self, read1, read2, info1: ModificationInfo, info2: ModificationInfo):",
"def get_read_labeling (self, savels=False, env=os.environ):\n if savels:\n assert not self.oldls, 'weird, oldls should be None'\n self.oldls = flmo.get_labelset ()\n\n ep_ls = flmo.get_labelset ()\n ep_ls.set_O ()\n\n # if the tag is a read-protect tag, add it to our EP slabel\n # and add its capability to our O label, and get privs so the\n # EP label will be valid.\n for t in self.get_label (flume.LABEL_S, env=env):\n if (t.prefix () & (flume.HANDLE_OPT_GROUP |\n flume.HANDLE_OPT_DEFAULT_ADD |\n flume.HANDLE_OPT_DEFAULT_SUBTRACT |\n flume.HANDLE_OPT_IDENTIFIER)) == 0:\n\n ep_ls.set_S (ep_ls.get_S() + t)\n ep_ls.set_O (ep_ls.get_O() + t.toCapabilities ())\n \n if len (ep_ls.get_O()) > 0:\n self.acquire_capabilities (env=env)\n\n des_ls = self.get_file_labelset (env=env)\n sql_prefix = self._prefix (ep_ls, des_ls)\n return ep_ls, des_ls, sql_prefix",
"def info_materials_polymer_get():\n materials = _material_by_group(974) # 974 == intermediate group\n return materials, 200",
"def MaterialTool(*args):\n return _XCAFDoc.XCAFDoc_DocumentTool_MaterialTool(*args)",
"def XCAFDoc_DocumentTool_MaterialTool(*args):\n return _XCAFDoc.XCAFDoc_DocumentTool_MaterialTool(*args)",
"def get_info(transmogrifier, category=None, section=None, short=True):\n res = []\n try:\n rows = transmogrifier._collected_info\n except AttributeError:\n return res\n else:\n for dic in rows:\n if category is not None and dic['category'] != category:\n continue\n if section is not None and dic['section'] != section:\n continue\n if short:\n res.append(dic['info'])\n else:\n res.append(dic)\n return res",
"def get_mol_info(self):\n return",
"def give_single_dataset_mutant(self, single_dataset_name, force=False):\n if single_dataset_name not in self.by_dataset.keys() and not force:\n raise MutantError(\"This mutant doesn't have a %s dataset! \"%single_dataset_name\n +\"Use force=True argument if you want a zero-readcount mutant returned anyway.\")\n # generate new mutant, fill it with readcount-related data from self.by_dataset[single_dataset_name] \n # and general data from self\n new_mutant = Insertional_mutant()\n new_mutant._copy_non_readcount_data(self)\n new_mutant._copy_readcount_related_data(self.by_dataset[single_dataset_name])\n return new_mutant",
"def protare(self, verbosity=0, lazyParsing=True):\n\n return self.read(verbosity=verbosity, lazyParsing=lazyParsing)",
"def _get_other_set(self, other):\n return other._set if isinstance(other, ReadingSet) else other",
"def simplify(self, ixreader):\r\n return self",
"def get_metadata(self, docname, moderator):\n raise NotImplementedError()",
"def __call__(self, mut_dat, output, variant, item, translocations, fusions, all_except):\n if variant and item:\n mut_dat = self._get_variant(mut_dat, variant, item, all_except=all_except) #get specific variant\n else:\n try:\n mut_dat = self._get_variant(mut_dat, \"Variant_Classification\", \"Silent\", all_except=True)\n except AssertionError:\n pass\n\n cases = {\"gene\": self._single_entity_mutated, #dict of functions to used based on version of handler being used\n \"line\": self._single_entity_mutated,\n \"canc\": self._multiple_entity_mutated,\n \"org\": self._multiple_entity_mutated}\n\n return cases[self.version](mut_dat, output, variant, item, translocations, fusions, all_except) #get mutations",
"def get_mn_info(self):\n\t\treturn self._infoCommonMuscleConnections, self._infoSpecialConnections",
"def object_readers(name, *, specify_reader=False):\n\treturn object_access('read', name, specify_reader)",
"def get_material_set(**kw):\n mat_ids = set()\n volumes = get_volume_list()\n for v in volumes:\n d = volume_metadata( v )\n if( kw.get('with_rho') is True ):\n # rho is undefined for the void material and dagmc may return anything.\n if d['material'] == 0:\n mat_ids.add( (d['material'], 0.0) )\n else:\n mat_ids.add( (d['material'], d['rho']) )\n else:\n mat_ids.add( d['material'] )\n return mat_ids",
"def info_materials_get():\n materials = _material_by_group() # empty means all groups\n return materials, 200",
"def _info(self) -> tfds.core.DatasetInfo:\n return tfds.core.DatasetInfo(\n builder=self,\n description=_DESCRIPTION,\n features=tfds.features.FeaturesDict({\n 'id': tfds.features.Text(),\n 'program': tfds.features.Text(),\n 'date': tfds.features.Text(),\n 'url': tfds.features.Text(),\n 'summary': tfds.features.Text(),\n 'utt': tfds.features.Sequence(tfds.features.Text()),\n 'speaker': tfds.features.Sequence(tfds.features.Text()),\n }),\n supervised_keys=('utt', 'summary'),\n homepage='https://github.com/zcgzcgzcg1/MediaSum',\n citation=_CITATION,\n )",
"def wrap_information_object_c_ic_na_1(self, message):\n return self.wrap_qualifier_of_interrogation(message)",
"def data_details_return(data, data_set):\r\n data.update(data_resources[data_set])\r\n return data",
"def soloMaterial(*args, attr: Union[AnyStr, bool]=\"\", last: bool=True, node: Union[AnyStr,\n bool]=\"\", unsolo: bool=True, q=True, query=True, **kwargs)->Union[bool, Any]:\n pass",
"def read_dataset_info(path=None, paths=None, index_col=None, filter_by_min_spacing=False, verbose=False):\n if (path is None and paths is None) or (path is not None and paths is not None):\n raise ValueError(\"Only one of 'path' or 'paths' arguments must be provided\")\n\n dataset_info = get_dicom_info(glob.glob(path) if path is not None else paths, verbose=verbose)\n if filter_by_min_spacing:\n output_indices = (\n dataset_info\n .groupby('AccessionNumber')\n .agg({'SpacingZ': 'idxmin'})\n )\n index_df = dataset_info.loc[output_indices.loc[:, 'SpacingZ'], :]\n else:\n index_df = dataset_info\n return index_df if index_col is None else index_df.set_index(index_col)",
"def _read_data(self) -> MMD:\n\t\tif self.config.source_type == SourceType.LOCAL_FILE:\n\t\t\treturn self._read_files()\n\t\telif self.config.source_type == SourceType.HDFS:\n\t\t\treturn self._read_hdfs()\n\t\telif self.config.source_type == SourceType.NEO4J:\n\t\t\treturn self._read_neo4j(self.config.graph_db)\n\n\t\telse:\n\t\t\traise NotImplementedError(\"The source type {} has not been implemented yet.\".format(loader_config.source_type))"
] |
[
"0.7060582",
"0.49853617",
"0.47985914",
"0.47944397",
"0.47569603",
"0.47407067",
"0.46908507",
"0.4618769",
"0.456103",
"0.45301",
"0.45223302",
"0.45051187",
"0.4483471",
"0.445618",
"0.44560418",
"0.44347364",
"0.44001314",
"0.4360957",
"0.43532157",
"0.4349832",
"0.43496007",
"0.43468922",
"0.43451574",
"0.43365708",
"0.43204838",
"0.4316982",
"0.43094867",
"0.42937717",
"0.429177",
"0.4285075"
] |
0.726781
|
0
|
Raise Error if dataset_name isn't None; give explanation. Many Insertional_mutant methods take a dataset_name argument for consistency with the multidataset subclass, but for nonsubclass objects this shouldn't be provided. The only reason for having them at all is so that you get a more useful error message when you try to provide a dataset_name, rather than just getting an ArgumentError or such.
|
def _ensure_dataset_None(dataset_name):
if dataset_name is not None:
raise MutantError("Don't try to provide a dataset_name on a single mutant (rather than the multi-dataset subclass)!")
# MAYBE-TODO this could be accomplished with a decorator instead, right?
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def _validate_dataset_name(self, dataset_name: Optional[str]) -> str:\n if dataset_name is None:\n if self.num_datasets > 1:\n raise ValueError(\"`dataset_name` is required if there are \"\n \"more than one datasets.\")\n dataset_name = next(iter(self._datasets))\n if dataset_name not in self._datasets:\n raise ValueError(\"Dataset not found: \", dataset_name)\n return dataset_name",
"def give_single_dataset_mutant(self, single_dataset_name, force=False):\n if single_dataset_name not in self.by_dataset.keys() and not force:\n raise MutantError(\"This mutant doesn't have a %s dataset! \"%single_dataset_name\n +\"Use force=True argument if you want a zero-readcount mutant returned anyway.\")\n # generate new mutant, fill it with readcount-related data from self.by_dataset[single_dataset_name] \n # and general data from self\n new_mutant = Insertional_mutant()\n new_mutant._copy_non_readcount_data(self)\n new_mutant._copy_readcount_related_data(self.by_dataset[single_dataset_name])\n return new_mutant",
"def _check_dataset_consistency(self):\n if not self.multi_dataset: \n raise MutantError(\"_check_dataset_consistency only makes sense for multi-datasets!\")\n def _check_sets_raise_error(set1, set2, set1_name, set2_name):\n if not set1==set2:\n raise MutantError(\"Multi-dataset mutant pool has different %s and %s dataset sets! %s, %s\"%(set1_name, \n set2_name, set1, set2))\n datasets_from_summary = set(self.summary.keys())\n datasets_from_mutants = set.union(*[set(m.by_dataset.keys()) for m in self])\n _check_sets_raise_error(datasets_from_summary, datasets_from_mutants, \"from summary\", \"from mutants\")\n try:\n if self._dataset_order is not None: \n datasets_from_order = set(self._dataset_order)\n _check_sets_raise_error(datasets_from_order, datasets_from_summary, \"from dataset_order\", \"from summary\")\n except AttributeError:\n pass",
"def add_other_mutant_as_dataset(self, other_mutant, other_mutant_dataset_name, \n overwrite=False, check_constant_data=False):\n if other_mutant_dataset_name in self.by_dataset and not overwrite:\n raise MutantError(\"This mutant already has a %s dataset! Can't overwrite it with \"%other_mutant_dataset_name\n +\"new one. Choose a different name for new dataset, or use overwrite=True argument.\")\n\n # if desired, check that the position/gene data matches (and update if own gene data is unknown)\n # (probably should be using ifs rather than asserts, but I think since they're wrapped in a try/except it's fine)\n if check_constant_data:\n if not self.position == other_mutant.position:\n raise MutantError(\"Can't add mutant2 as dataset to mutant1: the mutant position differs! %s and %s\"%(\n self.position, other_mutant.position))\n try:\n self.update_gene_info(other_mutant.gene, other_mutant.orientation, \n other_mutant.gene_feature, other_mutant.gene_distances)\n except MutantError:\n raise MutantError(\"Can't add mutant2 as dataset to mutant1: the mutant gene data differs!\"\n +\" %s, %s, %s and\"%(self.gene, self.orientation, self.gene_feature)\n +\" %s, %s, %s.\"%(other_mutant.gene, other_mutant.orientation, other_mutant.gene_feature))\n\n # make a new empty Insertional_mutant object to hold the readcount-related data from other_mutant, \n # and put it in the self.by_dataset dictionary under other_mutant_dataset_name\n self.by_dataset[other_mutant_dataset_name] = Insertional_mutant_readcount_only()\n # now fill this new object with readcount-related data from other_mutant\n self.by_dataset[other_mutant_dataset_name]._copy_readcount_related_data(other_mutant)",
"def read_info(self, dataset_name=None, strict=False):\n if dataset_name is None: return self\n else: raise MutantError(\"This is NOT a multi-dataset mutant - cannot provide dataset_name arg!\")",
"def _check_dataset_name_return_data(self, dataset_name, strict=False):\n if strict:\n _check_dataset_presence(self, dataset_name)\n elif dataset_name is None:\n raise MutantError(\"Cannot use None as dataset name!\")\n return self.by_dataset[dataset_name]",
"def construct_dataset_name(self, *args):\n raise NotImplementedError",
"def require_dataset(self, name, shape, dtype, exact=False, **kwds) -> DatasetBase:\n shape = tuple(shape)\n dtype = np.dtype(dtype)\n\n if name not in self:\n return self.create_dataset(name, shape, dtype, **kwds)\n\n dset = self[name]\n if not isinstance(dset, DatasetBase):\n raise TypeError(\n \"Incompatible object (%s) already exists\" % dset.__class__.__name__\n )\n\n if not shape == dset.shape:\n raise TypeError(\n \"Shapes do not match (existing %s vs new %s)\" % (dset.shape, shape)\n )\n\n if exact:\n if not dtype == dset.dtype:\n raise TypeError(\n \"Datatypes do not exactly match (existing %s vs new %s)\"\n % (dset.dtype, dtype)\n )\n elif not np.can_cast(dtype, dset.dtype):\n raise TypeError(\n \"Datatypes cannot be safely cast (existing %s vs new %s)\"\n % (dset.dtype, dtype)\n )\n\n return dset",
"def raise_exception_for_dataset(dataset_reference):\n if dataset_reference.dataset_id == non_existing_dataset_id:\n raise cloud.exceptions.NotFound('')",
"def __init__(self, name: UniprotDatasetNames):\n self.name: UniprotDatasetNames = name",
"def on_the_add_dataset_page_input_the_dataset_name_my_acl_dataset(driver, dataset_name):\n assert wait_on_element(driver, 5, '//h3[text()=\"Add Dataset\"]')\n assert wait_on_element(driver, 5, '//input[@ix-auto=\"input__Name\"]', 'inputable')\n driver.find_element_by_xpath('//input[@ix-auto=\"input__Name\"]').clear()\n driver.find_element_by_xpath('//input[@ix-auto=\"input__Name\"]').send_keys(dataset_name)\n assert wait_on_element(driver, 5, '//mat-select[@ix-auto=\"select__Share Type\"]')\n driver.find_element_by_xpath('//mat-select[@ix-auto=\"select__Share Type\"]').click()\n assert wait_on_element(driver, 5, '//mat-option[@ix-auto=\"option__Share Type_SMB\"]', 'clickable')\n driver.find_element_by_xpath('//mat-option[@ix-auto=\"option__Share Type_SMB\"]').click()",
"def get_dataset_name(self):\n raise NotImplementedError",
"def __init__(self,dataset=scripts,group=\"\"):\n self.dataset = dataset\n self.group=group",
"def test_dtype_conflict(setup_teardown_file):\n f = setup_teardown_file[3]\n grp = f.create_group(\"test\")\n\n dset = grp.create_dataset('foo', (10, 3), 'f')\n with pytest.raises(TypeError):\n grp.require_dataset('foo', (10, 3), 'S10')",
"def create_dataset(\n self, name, shape=None, dtype=None, data=None, **kwds\n ) -> DatasetBase:\n ancestor, group_names, last_name = self._descend(name)\n parent = ancestor._require_descendant_groups(*group_names)\n if last_name in parent:\n raise FileExistsError(f\"Group or dataset found at '{name}'\")\n return parent._create_child_dataset(name, shape, dtype, data, **kwds)",
"def validate_dataset(self):\n pass",
"def __init__(self, datasets, columns=None, name='DataSetConcatenate'):\n if not len(datasets):\n raise ValueError('datasets list to concatenate is empty!')\n\n # -- Check columns --\n if columns is None:\n columns = [_ for _ in datasets[0].columns_name()]\n for d in datasets[1:]:\n columns = [_ for _ in d.columns_name() if _ in columns]\n if not columns:\n raise ValueError('Provided datasets for concatenations have no columns in common')\n if isinstance(columns, (list, tuple)):\n columns = {_: 0 for _ in columns}\n elif isinstance(columns, str):\n columns = {columns: 0}\n elif not isinstance(columns, dict):\n raise ValueError('Columns must either be of type None, str, list, tuple or dict (not: %s)' % type(columns))\n\n # Check presence and compatiblity\n columns_default = columns\n columns = {}\n for col_name, col_default in columns_default.items():\n col = None\n for d in datasets:\n if col_name in d.columns_name():\n d_column = d.column_by_name(col_name)\n if col is None:\n col = d_column\n # Check default value compatibility\n if col.dtype == str and col_default == 0:\n col_default = ''\n columns_default[col_name] = col_default\n if not isinstance(col_default, np.ndarray):\n columns_default[col_name] = np.full(shape=col.shape, fill_value=col_default, dtype=col.dtype)\n else:\n if col_default.shape != col.shape:\n raise ValueError('Default value shape must match columns shape.\\n'\n 'Column %s expected shape: %s, but the given default value shape is: %s'\n % (repr(col_name), col.shape, col_default.shape))\n if col_default.dtype != col.dtype:\n raise ValueError('Default value dtype must match columns dtype.\\n'\n 'Column %s expected dtype: %s, but the given default value dtype is: %s'\n % (repr(col_name), col.dtype, col_default.dtype))\n else:\n if col.shape != d_column.shape:\n raise ValueError('Columns shape must the same across datasets.\\n'\n 'Column %s expected shape: %s, but shape from dataset %s is: %s'\n % (repr(col_name), col.shape, d.dataset_name, d_column.shape))\n if col.dtype != d_column.dtype:\n raise ValueError('Columns dtype must the same across datasets.\\n'\n 'Column %s expected type: %s, but type from dataset %s is: %s'\n % (repr(col_name), col.dtype, d.dataset_name, d_column.dtype))\n\n if col is None:\n raise ValueError('Column %s is not included in any concatenated datasets.' % col_name)\n columns[col_name] = col.shape, col.dtype\n\n # -- Setup dataset --\n super(DataSetConcatenate, self).__init__(name=name, parent_datasets=datasets, pk_type=str)\n self._columns = [DataSetColumn(name, shape, dtype, self) for name, (shape, dtype) in columns.items()]\n self._columns_default = columns_default\n\n self._datasets_start_index = []\n start = 0\n for d in self.parent_datasets:\n self._datasets_start_index.append(start)\n start += d.size",
"def __init__(__self__, *,\n aws_account_id: Optional[pulumi.Input[str]] = None,\n column_groups: Optional[pulumi.Input[Sequence[pulumi.Input['DataSetColumnGroupArgs']]]] = None,\n column_level_permission_rules: Optional[pulumi.Input[Sequence[pulumi.Input['DataSetColumnLevelPermissionRuleArgs']]]] = None,\n data_set_id: Optional[pulumi.Input[str]] = None,\n data_set_refresh_properties: Optional[pulumi.Input['DataSetRefreshPropertiesArgs']] = None,\n data_set_usage_configuration: Optional[pulumi.Input['DataSetUsageConfigurationArgs']] = None,\n dataset_parameters: Optional[pulumi.Input[Sequence[pulumi.Input['DataSetDatasetParameterArgs']]]] = None,\n field_folders: Optional[pulumi.Input['DataSetFieldFolderMapArgs']] = None,\n import_mode: Optional[pulumi.Input['DataSetImportMode']] = None,\n ingestion_wait_policy: Optional[pulumi.Input['DataSetIngestionWaitPolicyArgs']] = None,\n logical_table_map: Optional[pulumi.Input['DataSetLogicalTableMapArgs']] = None,\n name: Optional[pulumi.Input[str]] = None,\n permissions: Optional[pulumi.Input[Sequence[pulumi.Input['DataSetResourcePermissionArgs']]]] = None,\n physical_table_map: Optional[pulumi.Input['DataSetPhysicalTableMapArgs']] = None,\n row_level_permission_data_set: Optional[pulumi.Input['DataSetRowLevelPermissionDataSetArgs']] = None,\n row_level_permission_tag_configuration: Optional[pulumi.Input['DataSetRowLevelPermissionTagConfigurationArgs']] = None,\n tags: Optional[pulumi.Input[Sequence[pulumi.Input['DataSetTagArgs']]]] = None):\n if aws_account_id is not None:\n pulumi.set(__self__, \"aws_account_id\", aws_account_id)\n if column_groups is not None:\n pulumi.set(__self__, \"column_groups\", column_groups)\n if column_level_permission_rules is not None:\n pulumi.set(__self__, \"column_level_permission_rules\", column_level_permission_rules)\n if data_set_id is not None:\n pulumi.set(__self__, \"data_set_id\", data_set_id)\n if data_set_refresh_properties is not None:\n pulumi.set(__self__, \"data_set_refresh_properties\", data_set_refresh_properties)\n if data_set_usage_configuration is not None:\n pulumi.set(__self__, \"data_set_usage_configuration\", data_set_usage_configuration)\n if dataset_parameters is not None:\n pulumi.set(__self__, \"dataset_parameters\", dataset_parameters)\n if field_folders is not None:\n pulumi.set(__self__, \"field_folders\", field_folders)\n if import_mode is not None:\n pulumi.set(__self__, \"import_mode\", import_mode)\n if ingestion_wait_policy is not None:\n pulumi.set(__self__, \"ingestion_wait_policy\", ingestion_wait_policy)\n if logical_table_map is not None:\n pulumi.set(__self__, \"logical_table_map\", logical_table_map)\n if name is not None:\n pulumi.set(__self__, \"name\", name)\n if permissions is not None:\n pulumi.set(__self__, \"permissions\", permissions)\n if physical_table_map is not None:\n pulumi.set(__self__, \"physical_table_map\", physical_table_map)\n if row_level_permission_data_set is not None:\n pulumi.set(__self__, \"row_level_permission_data_set\", row_level_permission_data_set)\n if row_level_permission_tag_configuration is not None:\n pulumi.set(__self__, \"row_level_permission_tag_configuration\", row_level_permission_tag_configuration)\n if tags is not None:\n pulumi.set(__self__, \"tags\", tags)",
"def __init__(self,data_name):\n\t\tif data_name.lower().strip() not in DATASETS.keys():\n\t\t\tprint(f\"{data_name} isn't a valid data name! One of \"+\", \".join(DATASETS.keys()))\n\t\t\traise Exception\n\n\t\tself.data_name = data_name.lower().strip()",
"def test_shape_conflict(setup_teardown_file):\n f = setup_teardown_file[3]\n grp = f.create_group(\"test\")\n\n grp.create_dataset('foo', (10, 3), 'f')\n with pytest.raises(TypeError):\n grp.require_dataset('foo', (10, 4), 'f')",
"def __init__(self, file, sdef, name, path, attrs, parent, value, dtype, compress, link_info=None):\n super(Dataset, self).__init__(file, sdef, name, path, attrs, parent, link_info)\n # print \"Creating Dataset, sdef=\"\n # pp.pprint(sdef)\n if 'attributes' in self.sdef['df']:\n self.attributes = copy.deepcopy(self.sdef['df']['attributes'])\n # del self.sdef['df']['attributes'] # if do this, no need to check for attributes in mk_dsinfo\n # print \"found attributes:\"\n # else:\n # print \"did not find attributes:\"\n # pp.pprint(self.attributes)\n # if self.sdef['df']:\n self.dsinfo = self.mk_dsinfo(value)\n self.merge_attribute_defs(self.attributes, self.dsinfo['atags'])\n # else:\n # definition empty, must be custom dataset\n # self.dsinfo = {}\n self.merge_attrs()\n if self.link_info:\n # this dataset set to link to another. Already done in Node. Nothing to do here\n pass\n else:\n # creating new dataset (normally done)\n self.link_node = None\n # compress = \"gzip\" if compress else None\n # self.h5node = self.h5parent.create_dataset(self.name, data=value,\n # dtype=dtype, compression=compress)\n #- self.file.file_pointer.create_dataset(self.full_path, data=value,\n #- dtype=dtype, compression=compress)\n self.file.create_dataset(self.full_path, data=value, dtype=dtype,\n compress=compress)\n # self.file.h5commands.append(\"create_dataset(%s, %s)\" % (self.full_path, value))\n # if dtype:\n # self.h5node = self.h5parent.create_dataset(self.name, data=value, dtype=dtype)\n # else: # should find out what default value for dtype used in h5py and use that, combine these\n # self.h5node = self.h5parent.create_dataset(self.name, data=value)\n self.set_attr_values()",
"def test_set_bad_name(self):\n dim = Dimension(\"yolo\", \"uniform\", -3, 4, shape=(4, 4))\n with pytest.raises(TypeError):\n dim.name = 4",
"def __init__(self, name_dataset, reduce=False):\n dataset = TUDataset(root='data/TUDataset', name=name_dataset)\n if reduce:\n new_dataset = []\n for i in tqdm(range(len(dataset))):\n aux_graph = copy.deepcopy(dataset[i])\n aux_graph.edge_index = TUDData.reduce_edges(aux_graph.edge_index)\n new_dataset.append(copy.deepcopy(aux_graph))\n dataset = WrapperSynthetic(new_dataset, dataset.num_node_features,\n dataset.num_classes, None)\n super(TUDData, self).__init__(dataset)",
"def builder_cls(\n self,\n name: naming.DatasetName,\n ) -> Type[dataset_builder.DatasetBuilder]:\n if name.namespace not in self.namespaces: # pylint: disable=unsupported-membership-test\n error_msg = f'\\nNamespace {name.namespace} not found.'\n error_msg += (\n f'Note that namespace should be one of: {sorted(self.namespaces)}'\n )\n raise registered.DatasetNotFoundError(error_msg)\n raise NotImplementedError(\n 'builder_cls does not support data_dir-based community datasets. Got: '\n f'{name}'\n )",
"def is_dataset(self):\n return self._dataset is not None",
"def _create_child_dataset(self, name, shape=None, dtype=None, data=None, **kwds):\n pass",
"def __init__(self, dataset: Dataset):\n self.dataset = dataset",
"def find_dataset_using_name(dataset_name):\n\tdataset_filename = \"data.\" + dataset_name + \"_dataset\"\n\tdatasetlib = importlib.import_module(dataset_filename)\n\n\tdataset = None\n\ttarget_dataset_name = dataset_name.replace('_', '') + 'dataset'\n\tfor name, cls in datasetlib.__dict__.items():\n\t\tif name.lower() == target_dataset_name.lower() \\\n\t\t and issubclass(cls, BaseDataset):\n\t\t\tdataset = cls\n\n\tif dataset is None:\n\t\traise NotImplementedError(\"In %s.py, there should be a subclass of BaseDataset with class name that matches %s in lowercase.\" % (dataset_filename, target_dataset_name))\n\n\treturn dataset",
"def __init__(self, df, name):\n try:\n self.df = df\n df.any()\n except AttributeError:\n self.df = df.toPandas()\n\n self.name = name\n\n if self.df.empty:\n raise Exception('Empty Dataset')",
"def set_or_create_dataset(conn: BlitzGateway, project_id: Union[int, None],\n dataset: Union[str, int],\n across_groups: Optional[bool] = True\n ) -> Union[int, None]:\n if isinstance(dataset, str):\n if project_id:\n dataset_id = post_dataset(conn, dataset, project_id=project_id)\n else:\n dataset_id = post_dataset(conn, dataset)\n print(f'Created new Dataset:{dataset_id}')\n elif (isinstance(dataset, int)):\n dataset_id = dataset\n else:\n raise TypeError(\"'dataset' must be str or int\")\n return dataset_id"
] |
[
"0.65393716",
"0.629011",
"0.607913",
"0.6057744",
"0.60284096",
"0.5984428",
"0.5735188",
"0.571796",
"0.5612005",
"0.5570402",
"0.5567123",
"0.55548304",
"0.5546228",
"0.55137175",
"0.5497992",
"0.548842",
"0.5445214",
"0.5429049",
"0.54150707",
"0.5400183",
"0.5380179",
"0.537748",
"0.5372091",
"0.5370649",
"0.5367336",
"0.5305046",
"0.53018695",
"0.528582",
"0.52707994",
"0.5270455"
] |
0.8115274
|
0
|
Add a read to the data (or multiple identical reads, if read_count>1); return True if perfect alignment.
|
def add_read(self, HTSeq_alignment, position=SPECIAL_POSITIONS.unknown, read_count=1, dataset_name=None):
# TODO instead of taking HTSeq_alignment, this could just take the seq and N_errors, like add_RISCC_read does?
self._ensure_dataset_None(dataset_name)
# increment total_read_count, and add read ID to the ID set
self.total_read_count += read_count
# figure out if the read is perfect and increment perfect_read_count if yes; return True if perfect else False.
# TODO may want to come up with a better option than 10 for the "errors" of unaligned seqs
if position in SPECIAL_POSITIONS.all_undefined:
N_errors = 10
else:
N_errors = check_mutation_count_by_optional_NM_field(HTSeq_alignment, negative_if_absent=False)
# add sequence position/readcount data the detailed dictionary.
seq = HTSeq_alignment.read.seq
try: self.sequences_counts_positions_errors[seq][0] += read_count
except KeyError: self.sequences_counts_positions_errors[seq] = [read_count, position, N_errors]
if N_errors==0:
self.perfect_read_count += read_count
return True
else:
return False
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def add_read(self, HTSeq_alignment, position=SPECIAL_POSITIONS.unknown, read_count=1, dataset_name=None):\n readcount_data_container = self._check_dataset_name_return_data(dataset_name)\n Insertional_mutant.add_read(readcount_data_container, HTSeq_alignment, position, read_count)",
"def add(self, read):\n self.additionalReads.append(read)\n self._length += 1",
"def add_read(self, read):\n r = Read(read)\n if read not in self.reads:\n self.reads[read] = r\n else:\n self.reads[read].visit_limit += 1\n self.num_reads += 1",
"def add_RISCC_read(self, seq, new_position, N_errors=None, read_count=1):\n # TODO why are we even using Insertion_position objects here?? Those aren't insertion positions with a start-end, just single positions... But still need to be able to deal with unaligned/multi as well as proper positions.\n if not isinstance(new_position, Insertion_position) and new_position not in SPECIAL_POSITIONS.all_undefined:\n raise MutantError(\"RISCC read position %s is unacceptable - must be Insertion_position object or one of %s!\"%(\n new_position, ', '.join(SPECIAL_POSITIONS.all_undefined)))\n # self.RISCC_genome_side_aligned_reads is a position:data dict\n if new_position not in SPECIAL_POSITIONS.all_undefined:\n try:\n # MAYBE-TODO check that the same seq isn't present in a different position?\n self.RISCC_genome_side_aligned_reads[new_position][1] += read_count\n try: self.RISCC_genome_side_aligned_reads[new_position][2][seq][0] += read_count\n except KeyError: self.RISCC_genome_side_aligned_reads[new_position][2][seq] = [read_count, N_errors]\n except KeyError:\n seq_count_error_dict = {seq: [read_count, N_errors]}\n self.RISCC_genome_side_aligned_reads[new_position] = [new_position, read_count, seq_count_error_dict, \n SPECIAL_GENE_CODES.not_determined, '?', '?', '?']\n # self.RISCC_genome_side_unaligned_reads is a seq:data dict, since the positions aren't usable as keys\n else:\n try:\n self.RISCC_genome_side_unaligned_reads[seq][1] += read_count\n self.RISCC_genome_side_aligned_reads[seq][2][seq][0] += read_count\n except KeyError:\n self.RISCC_genome_side_unaligned_reads[seq] = [new_position, read_count, {seq: [read_count, N_errors]}, \n SPECIAL_GENE_CODES.not_determined, '?', '?', '?']\n # Note: adding gene/annotation info for those is implemented in the dataset methods.",
"def add_read(self, new_read): \n if self.sampling:\n self.convert_to_list()\n self.reads.append(new_read)\n self.total+=1",
"def add_read_to_vec(self, read, copy=None):\n\t\tfor i,s in enumerate(read.seq):\n\t\t\t# the i-th non-gapped position for ref_seq_id starting at offset read.offset\n\t\t\tgapped_pos = self.refmap.ungapped_to_gapped(read.ref_seq_id, read.offset + i)\n\t\t\tDF.add_to_vec(self, nt=s, positions=[gapped_pos], counts=[read.copy if copy is None else copy])",
"def check_added_reads_consistency(cls, profile: DataFrame) -> Series[bool]:\n return (\n profile[cls.kraken_assigned_reads] + profile[cls.added_reads]\n == profile[cls.new_est_reads]\n )",
"def test__add_read(self):\n # using fake HTSeq alignment class from deepseq_utilities; defining one perfect and one imperfect alignment\n # note: the detailed mutation-counting methods are imported from deepseq_utilities and unit-tested there.\n position = Insertion_position('chr1', '+', position_before=3)\n perfect_aln = Fake_HTSeq_aln(seq='AAA', optional_field_data={'NM':0})\n imperfect_aln = Fake_HTSeq_aln(seq='GGG', optional_field_data={'NM':1})\n # adding perfect and imperfect to mutant increases all the counts as expected\n mutant = Insertional_mutant(insertion_position=position)\n mutant.add_read(perfect_aln, read_count=3, position=position)\n assert mutant.total_read_count == mutant.perfect_read_count == 3\n assert mutant.sequences_counts_positions_errors == {'AAA': [3, position, 0]}\n mutant.add_read(imperfect_aln, read_count=1, position=position)\n assert mutant.total_read_count == 4\n assert mutant.perfect_read_count == 3\n assert mutant.sequences_counts_positions_errors == {'AAA': [3, position, 0], 'GGG': [1, position, 1]}\n # same for a multi-dataset mutant - this time we need to specify which dataset we're adding to\n mutant = Insertional_mutant_multi_dataset(insertion_position=position)\n assert len(mutant.by_dataset) == 0\n mutant.add_read(perfect_aln, read_count=3, dataset_name='d1', position=position)\n assert len(mutant.by_dataset) == 1\n assert mutant.by_dataset['d1'].total_read_count == mutant.by_dataset['d1'].perfect_read_count == 3\n assert mutant.by_dataset['d1'].sequences_counts_positions_errors == {'AAA': [3, position, 0]}\n mutant.add_read(imperfect_aln, read_count=1, dataset_name='d1', position=position)\n assert len(mutant.by_dataset) == 1\n assert mutant.by_dataset['d1'].total_read_count == 4\n assert mutant.by_dataset['d1'].perfect_read_count == 3\n assert mutant.by_dataset['d1'].sequences_counts_positions_errors == {'AAA': [3, position, 0], 'GGG': [1, position, 1]}\n # now adding a read to another dataset - nothing changes in dataset d1, but we have new dataset d2 numbers\n mutant.add_read(imperfect_aln, read_count=1, dataset_name='d2', position=position)\n assert len(mutant.by_dataset) == 2\n assert mutant.by_dataset['d1'].total_read_count == 4\n assert mutant.by_dataset['d2'].total_read_count == 1\n assert mutant.by_dataset['d2'].perfect_read_count == 0\n assert mutant.by_dataset['d2'].sequences_counts_positions_errors == {'GGG': [1, position, 1]}\n # it should be impossible to add a read to a specific dataset in a single-dataset mutant \n mutant = Insertional_mutant(insertion_position=position)\n self.assertRaises(MutantError, mutant.add_read, perfect_aln, read_count=3, dataset_name='d1')\n # it should be impossible to add a read to a multi-dataset mutant without giving a dataset_name\n mutant = Insertional_mutant_multi_dataset(insertion_position=position)\n self.assertRaises(MutantError, mutant.add_read, perfect_aln, read_count=3)",
"def _decide_if_replace_read(self, new_position, max_distance):\n # if there are no current reads, add new one\n if not len(self.RISCC_genome_side_aligned_reads): return True\n # if new read isn't \"confirming\", it can't be better\n if not self._if_confirming_read(new_position, max_distance): return False\n # if the new one is \"confirming\" and the old one isn't, new one has to be better\n old_position = self.RISCC_genome_side_aligned_reads.values()[0][0]\n if not self._if_confirming_read(old_position, max_distance): return True\n # if both the old and new position meet the basic conditions, pick the highest-distance one\n # TODO what about directionality and weird cases?\n new_dist = abs(new_position.min_position - self.position.min_position)\n old_dist = abs(old_position.min_position - self.position.min_position)\n if new_dist > old_dist: return True\n else: return False",
"def register_read(self):\n self._reads_since_check += 1",
"def __call__(self, read1, read2):\n self.add_to_batch(*read1, self.read1_batch, self.index)\n self.add_to_batch(*read2, self.read2_batch, self.index)\n self.index += self.lines_per_row\n if self.index >= self.bufsize:\n self.flush()",
"def find_read_candidates(self, read):\n self.read_allele_dictionary = {}\n ref_alignment_start = read.reference_start\n ref_alignment_stop = self.get_read_stop_position(read)\n # if the region has reached a very high coverage, we are not going to parse through all the reads\n if self.coverage[ref_alignment_start] > 300:\n return False\n cigar_tuples = read.cigartuples\n read_sequence = read.query_sequence\n read_id = read.query_name\n read_quality = read.query_qualities\n ref_sequence = self.fasta_handler.get_sequence(chromosome_name=self.chromosome_name,\n start=ref_alignment_start,\n stop=ref_alignment_stop+10)\n\n self.read_info[read_id] = (ref_alignment_start, ref_alignment_stop, read.mapping_quality, read.is_reverse)\n for pos in range(ref_alignment_start, ref_alignment_stop):\n self.read_id_by_position[pos].append((read_id, ref_alignment_start, ref_alignment_stop))\n for i, ref_base in enumerate(ref_sequence):\n self.reference_dictionary[ref_alignment_start + i] = ref_base\n\n # read_index: index of read sequence\n # ref_index: index of reference sequence\n read_index = 0\n ref_index = 0\n found_valid_cigar = False\n for cigar in cigar_tuples:\n cigar_code = cigar[0]\n length = cigar[1]\n # get the sequence segments that are effected by this operation\n ref_sequence_segment = ref_sequence[ref_index:ref_index+length]\n read_quality_segment = read_quality[read_index:read_index+length]\n read_sequence_segment = read_sequence[read_index:read_index+length]\n\n if cigar_code != 0 and found_valid_cigar is False:\n read_index += length\n continue\n found_valid_cigar = True\n\n # send the cigar tuple to get attributes we got by this operation\n ref_index_increment, read_index_increment = \\\n self.parse_cigar_tuple(cigar_code=cigar_code,\n length=length,\n alignment_position=ref_alignment_start+ref_index,\n ref_sequence=ref_sequence_segment,\n read_sequence=read_sequence_segment,\n read_id=read_id,\n quality=read_quality_segment)\n\n # increase the read index iterator\n read_index += read_index_increment\n ref_index += ref_index_increment\n\n # after collecting all alleles from reads, update the global dictionary\n for position in self.read_allele_dictionary.keys():\n if position < self.region_start_position or position > self.region_end_position:\n continue\n self.rms_mq[position] += read.mapping_quality * read.mapping_quality\n for record in self.read_allele_dictionary[position]:\n # there can be only one record per position in a read\n allele, allele_type = record\n\n if allele_type == MATCH_ALLELE or allele_type == MISMATCH_ALLELE:\n # If next allele is indel then group it with the current one, don't make a separate one\n if position + 1 <= ref_alignment_stop and position + 1 in self.read_allele_dictionary.keys():\n next_allele, next_allele_type = list(self.read_allele_dictionary[position + 1].keys())[0]\n if next_allele_type == INSERT_ALLELE or next_allele_type == DELETE_ALLELE:\n continue\n self.positional_read_info[position].append(\n (read_id, ref_alignment_start, ref_alignment_stop, read.mapping_quality))\n self._update_positional_allele_dictionary(read_id, position, allele, allele_type,\n read.mapping_quality)\n else:\n # it's an insert or delete, so, add to the previous position\n self.positional_read_info[position-1].append(\n (read_id, ref_alignment_start, ref_alignment_stop, read.mapping_quality))\n self._update_positional_allele_dictionary(read_id, position-1, allele, allele_type,\n read.mapping_quality)\n return True",
"def read_part(self, read_buffer, buffer_name, total_size):\n if buffer_name[0:2] == \"__\":\n # Perform name mangling\n buffer_name = \"_BinarySocket\" + buffer_name\n\n # Calculate the number of bytes still needed\n cursize = len(self.__dict__[buffer_name])\n remain = total_size - cursize\n if remain == 0:\n return True\n\n # Try to read the remaining bytes from the buffer\n try:\n str = read_buffer.read(remain)\n except EOFError:\n return False\n\n # Append the read data to the buffer and check the new size\n self.__dict__[buffer_name] += str\n new_size = len(self.__dict__[buffer_name])\n if new_size < total_size:\n return False\n\n # Buffer is of proper length\n return True",
"def add_nonaligned_reads(self, N_all_non_aligned, N_unaligned, N_multiple_aligned, replace=False):\n if N_all_non_aligned is not None:\n if 'unknown' in (N_all_non_aligned, self.non_aligned_read_count): self.non_aligned_read_count = 'unknown'\n elif replace or self.non_aligned_read_count is None: self.non_aligned_read_count = int(N_all_non_aligned)\n else: self.non_aligned_read_count += int(N_all_non_aligned)\n elif replace: self.non_aligned_read_count = 'unknown'\n if N_unaligned is not None:\n if 'unknown' in (N_unaligned, self.unaligned): self.unaligned = 'unknown'\n elif replace or self.unaligned is None: self.unaligned = int(N_unaligned)\n else: self.unaligned += int(N_unaligned)\n elif replace: self.unaligned = 'unknown'\n if N_multiple_aligned is not None:\n if 'unknown' in (N_multiple_aligned, self.multiple_aligned): self.multiple_aligned = 'unknown'\n elif replace or self.multiple_aligned is None: self.multiple_aligned = int(N_multiple_aligned)\n else: self.multiple_aligned += int(N_multiple_aligned)\n elif replace: self.multiple_aligned = 'unknown'\n # Note: NO special case for when we don't know the specific categories, but we know total non_aligned is 0, \n # because for old-format files non_aligned is initially 0 but gets increased when reading the actual *.sam file, \n # which contains lines for unaligned reads (which are unaligned or multiple, both output the same with bowtie -m option)",
"def process_read(self, ref, read, ref_offset=0):\n\n if read.alignment.mapping_quality < self.config.min_mapq:\n return\n\n ref_pos = read.alignment.position.position - ref_offset\n read_pos = 0\n # Use set(), as some cigar operations might generate duplicated positions,\n # E.g. for insertions, it extends the candidate positions to\n # [ins_pos - ins_len, ins_pos + ins_len] which might overlap with some\n # nearby mismatches.\n positions = set()\n for cigar in read.alignment.cigar:\n # Break if it reached the end of reference sequence.\n if ref_pos >= len(ref):\n break\n if cigar.operation not in utils.CIGAR_OPS:\n raise ValueError('Unexpected CIGAR operation', cigar, read)\n\n if cigar.operation == cigar_pb2.CigarUnit.ALIGNMENT_MATCH:\n positions.update(\n self._process_align_match(cigar, ref, read, ref_pos, read_pos))\n read_pos += cigar.operation_length\n ref_pos += cigar.operation_length\n elif cigar.operation == cigar_pb2.CigarUnit.SEQUENCE_MISMATCH:\n positions.update(\n self._process_seq_mismatch(cigar, ref, read, ref_pos, read_pos))\n read_pos += cigar.operation_length\n ref_pos += cigar.operation_length\n elif cigar.operation == cigar_pb2.CigarUnit.INSERT:\n positions.update(\n self._process_insert(cigar, ref, read, ref_pos, read_pos))\n read_pos += cigar.operation_length\n elif cigar.operation == cigar_pb2.CigarUnit.CLIP_SOFT:\n positions.update(\n self._process_soft_clip(cigar, ref, read, ref_pos, read_pos))\n read_pos += cigar.operation_length\n elif (cigar.operation == cigar_pb2.CigarUnit.DELETE or\n cigar.operation == cigar_pb2.CigarUnit.SKIP):\n positions.update(\n self._process_delete(cigar, ref, read, ref_pos, read_pos))\n ref_pos += cigar.operation_length\n elif cigar.operation == cigar_pb2.CigarUnit.SEQUENCE_MATCH:\n ref_pos += cigar.operation_length\n read_pos += cigar.operation_length\n elif (cigar.operation == cigar_pb2.CigarUnit.CLIP_HARD or\n cigar.operation == cigar_pb2.CigarUnit.PAD):\n pass\n\n # Yield positions within the range\n for pos in sorted(positions):\n if pos >= 0 and pos < len(ref):\n yield pos",
"def check_readings(self):\n # loading data from log file\n if self.filepath is not None:\n if self.all_read is None:\n return False\n else:\n ei = self.curr_indexi + self.read_step\n if ei >= self.all_read.shape[0]:\n return False\n self.curr_read = self.all_read[self.curr_index: ei, :]\n self.curr_index = ei\n return True\n\n # stream of data from beaglebone\n # check that there is new data avalible\n isnew = not all_data.empty()\n\n if isnew:\n # read most current data\n qsize = all_data.qsize()\n curr_read = [all_data.get_nowait() for _ in range(qsize)]\n self.curr_read = np.concatenate(curr_read)\n\n return isnew",
"def _read_next_alignment(self, stream):",
"def add_item(self, item, index):\n if index in self.d_buffer.keys():\n return True\n elif len(self) < self._size:\n self.d_buffer.update({index: item})\n return True\n else:\n return False",
"def _add_READING(self, w2, row):\n assert self.variant_unit, \"Can't call add_READING if self.variant_unit is None\"\n row['READING'] = self.get_attestation(w2, self.variant_unit)\n return True",
"def add_read_to_vec_using_ref(self, read):\n\t\ti = read.offset\n\t\tfor p in self.refmap.gap_map[read.ref_seq_id][read.offset:(read.offset+len(read.seq))]:\n\t\t\ts = self.refmap.fasta[read.ref_seq_id].seq[i]\n\t\t\tif s=='U': s='T'\n\t\t\tif s not in ('A','T','C','G'): s='N'\n\t\t\tDF.add_to_vec(self, nt=s, positions=[p], counts=[read.copy])\n\t\t\ti += 1",
"def improve_best_RISCC_read(self, seq, new_position, N_errors=None, read_count=1, max_distance=MAX_POSITION_DISTANCE):\n # if there are more than one current reads, you're not using improve_best_RISCC_read consistently!\n if len(self.RISCC_genome_side_aligned_reads) > 1:\n raise MutantError(\"Don't try using the improve_best_RISCC_read when keeping more than one read!\")\n # if decided to replace, discard old genome-side read dict and make new one from just the current read data.\n if self._decide_if_replace_read(new_position, max_distance):\n self.RISCC_genome_side_aligned_reads, self.RISCC_genome_side_unaligned_reads = {}, {}\n self.add_RISCC_read(seq, new_position, N_errors, read_count)\n # TODO make this count unaligned/confirming/non-confirming reads, too, instead of keeping all these counts as functions that read the actual mutant data, which will be missing in this case? I did something like that in mutant_Carette.py.",
"def _align_single_end_reads(self):\n read_aligner = ReadAligner(self._args.segemehl_bin, self._args.progress)\n if self._file_needs_to_be_created(self._pathcreator.index_path):\n read_aligner.build_index(\n self._pathcreator.ref_seq_path_list,\n self._pathcreator.index_path,\n )\n for read_path, output_path, nomatch_path in zip(\n self._pathcreator.processed_read_paths,\n self._pathcreator.read_alignment_bam_paths,\n self._pathcreator.unaligned_reads_paths,\n ):\n if not self._file_needs_to_be_created(output_path):\n continue\n\n read_aligner.run_alignment(\n read_path,\n self._pathcreator.index_path,\n self._pathcreator.ref_seq_path_list,\n output_path,\n nomatch_path,\n int(self._args.processes),\n int(self._args.segemehl_accuracy),\n float(self._args.segemehl_evalue),\n self._args.split,\n paired_end=False,\n )",
"def isaligned(a: np.ndarray, alignment: int) -> bool:\n return (a.ctypes.data % alignment) == 0",
"def read_pair_align(read1, read2):\n r1pos = [x+1 for x in read1.positions]\n r2pos = [x+1 for x in read2.positions]\n if read1.mate_is_reverse and r1pos[0] < r2pos[0]: # read1 is earlier\n read = [r1pos[0], r1pos[-1], r2pos[0], r2pos[-1]]\n elif read2.mate_is_reverse and r2pos[0] < r1pos[0]: # read2 is earlier\n read = [r2pos[0], r2pos[-1], r1pos[0], r1pos[-1]]\n else:\n read = []\n # print(\"Skipping read pair from error in alignment.\")\n # print(\"%s--%s> <%s--%s\" % tuple(read))\n return read",
"def _increment_state(self, bytes_read):\n self._read_state[StateKey.POSITION] += bytes_read",
"def writeOneRead(self, pos, probs, out):\n if not self.isValid(pos, pos + self.readlen):\n return False\n f = self.stream\n f.seek(pos)\n n = 0\n while True:\n b = f.read(1)\n if b not in \"ACGTNXacgtnx\":\n continue\n if random.random() < probs[n]:\n while True:\n nb = random.choice('ACGT')\n if nb != b:\n b = nb\n break\n out.write(b)\n n += 1\n if n == self.readlen:\n break\n return True",
"def needspadding(self):\n return self.datasize % 2 != 0",
"def test_add_seqs_to_alignment(self):\n res = add_seqs_to_alignment(self.seqs2_fp, self.seqs1_aln_fp, RNA)\n self.assertEqual(res.toFasta(), self.add_seqs_aligned)",
"def add_reads(self, new_reads): \n if self.sampling:\n self.convert_to_list()\n self.reads.extend(new_reads)",
"def _is_single_end(self, reads):\n if len([read for read in reads if read.get(\"IsIndexedRead\",\"N\") == \"N\"]) == 1:\n return True\n return False"
] |
[
"0.66703767",
"0.63036084",
"0.60595834",
"0.5847189",
"0.5803102",
"0.5691213",
"0.5633004",
"0.5595244",
"0.5579823",
"0.54536265",
"0.5377282",
"0.5353344",
"0.533654",
"0.53172827",
"0.53101915",
"0.52433026",
"0.52132875",
"0.5118853",
"0.510487",
"0.5064495",
"0.50476015",
"0.5029933",
"0.50297153",
"0.5024232",
"0.5022022",
"0.50092643",
"0.4974453",
"0.49714148",
"0.49447212",
"0.49076247"
] |
0.75120145
|
0
|
Set self.position to be the highestcount DEFINED position; check that all positions are within max_allowed_dist of it.
|
def decide_and_check_position(self, max_allowed_dist=0, ratio_to_ignore=100, OUTPUT=None):
if not self.sequences_counts_positions_errors:
self.position = SPECIAL_POSITIONS.unknown
return
def _order(arg):
s, (c, p, e) = arg
return (p in SPECIAL_POSITIONS.all_undefined, -c, e, s)
main_seq, (main_count, main_pos, main_Nerr) = min(self.sequences_counts_positions_errors.items(), key = _order)
self.position = main_pos
for seq, (count, pos, N_err) in self.sequences_counts_positions_errors.items():
if pos not in SPECIAL_POSITIONS.all_undefined:
if not get_position_distance(main_pos, pos, ignore_strand=False) <= max_allowed_dist:
if count*ratio_to_ignore <= main_count:
# TODO removing these reads is a problem, because we don't remove the genome-side reads!
del self.sequences_counts_positions_errors[seq]
self.total_read_count -= count
if N_err==0: self.perfect_read_count -= count
else:
if OUTPUT is not None:
OUTPUT.write("Warning: Different cassette-side position in same mutant! REMOVING MUTANT. IB %s,"%self.IB
+" %s %s %serr %s reads, %s %s %serr %s reads\n"%(main_pos, main_seq, main_Nerr, main_count,
pos, seq, N_err, count))
return True
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def max_position(self):\n raise NotImplementedError",
"def max_positions(self):\n return None",
"def set_max_position(self, max_us):\n raise NotImplementedError()",
"def __find_max_distance(self):\n return utils.find_max_distance(self.__game)",
"def _update_limits(self):\n if self.pos_x > self.max_x:\n self.max_x = self.pos_x\n if self.pos_y > self.max_y:\n self.max_y = self.pos_y\n if self.pos_x < self.min_x:\n self.min_x = self.pos_x\n if self.pos_y < self.min_y:\n self.min_y = self.pos_y",
"def max_positions(self):\n return self.args.max_positions",
"def max_position_limit(self, value):\n self._write(MX_MAX_POSITION_LIMIT, value)",
"def max_positions(self):\n return 1e6 # an arbitrary large number",
"def max_positions(self):\n return 1e6 # an arbitrary large number",
"def max_positions(self):\n return self.student.max_positions() # also needed in validation runs.",
"def get_max_position(self):\n raise NotImplementedError()",
"def max_positions(self):\n return int(1e5) # an arbitrary large number",
"def max_positions(self):\n return int(1e5) # an arbitrary large number",
"def max_positions(self):\n return int(100000.0)",
"def correct_pos(self, target_pos, last_distance):\n tank_pos = Vec2d(self.tank.body.position)\n current_distance = target_pos.get_distance(tank_pos)\n self.last_distance = current_distance\n if last_distance < current_distance:\n return True\n else:\n return False",
"def max_positions(self):\n if self.embed_positions is None:\n return self.max_target_positions\n return min(self.max_target_positions, self.embed_positions.max_positions)",
"def max_positions(self):\n if self.embed_positions is None:\n return self.max_target_positions\n return min(self.max_target_positions, self.embed_positions.max_positions)",
"def max_positions(self):\n if self.embed_positions is None:\n return self.max_target_positions\n return min(self.max_target_positions, self.embed_positions.max_positions)",
"def max_positions(self):\r\n return (self.args.max_source_positions, self.args.max_target_positions)",
"async def max_distance(self, *args):\n return await self._rpc.max_distance(*args)",
"def max_positions(self):\n if self.embed_positions is None:\n return self.max_target_positions\n return min(self.max_target_positions, self.embed_positions.max_positions())",
"def max_positions(self):\n if self.embed_positions is None:\n return self.max_target_positions\n return min(self.max_target_positions, self.embed_positions.max_positions())",
"def __abs__(self):\n abspos = abs(self.pos)\n absvel = abs(self.vel)\n return np.amax((abspos, absvel))",
"def __get_max_pos(self):\n query = text('select max(song_position) as \"max_position\" from setlist where show_id = :id')\n query = query.bindparams(id=self.id)\n\n result = db.engine.execute(query)\n\n for row in result:\n max_position = row['max_position']\n if max_position is None:\n max_position = 0\n\n result.close()\n\n return max_position",
"def del_max(self):\r\n maxVal = self.find_max()\r\n if maxVal is not None:\r\n self.items[1] = self.items[self.size]\r\n self.items[self.size] = None\r\n self.size -= 1\r\n self.perc_down(1)",
"def max_positions(self):\n return (self.args.max_source_positions, self.args.max_target_positions)",
"def max_positions(self):\n return int(1e5)",
"def SetMaxDistance(self, *args):\n return _BRepAlgo.BRepAlgo_NormalProjection_SetMaxDistance(self, *args)",
"def maxGPSDistance(self):\n # TODO check whether GPS points are already there\n # TODO: move into sl.gps.GPS()\n maxDistance = 0\n gps_point = self.gps_points[0]\n for gpspoint in self.gps_points:\n distance = gpspoint.getGeometry().distance(gps_point.getGeometry())\n gps_point = gps_point\n \n if distance > maxDistance:\n maxDistance = distance\n \n return maxDistance",
"def max_positions(self):\n return (self.cfg.max_source_positions, self.cfg.max_target_positions)"
] |
[
"0.6595603",
"0.6491933",
"0.6433678",
"0.61591154",
"0.6157575",
"0.6144035",
"0.6072857",
"0.5985302",
"0.5985302",
"0.5968435",
"0.5918116",
"0.58851624",
"0.58851624",
"0.58576226",
"0.57616043",
"0.57189703",
"0.57189703",
"0.57189703",
"0.57186514",
"0.5714011",
"0.5707029",
"0.5707029",
"0.56924003",
"0.5678232",
"0.5657943",
"0.56536406",
"0.5651588",
"0.56485975",
"0.5643307",
"0.56291217"
] |
0.6715895
|
0
|
Return the most common sequence in the given data, and its count (or Nth most common sequence if N is provided).
|
def _get_main_sequence_from_data(seqs_to_counts_and_data, N=1, aligned_only=False):
# use key to sort reverse by count but non-reverse by seq
if aligned_only:
filtered_data = [(seq, data) for (seq, data) in seqs_to_counts_and_data.items()
if isinstance(data[1], Insertion_position)]
else:
filtered_data = seqs_to_counts_and_data.items()
sequences_by_count = sorted([(seq,data[0]) for (seq,data) in filtered_data],
key = lambda s_c: (-s_c[1], s_c[0]))
# try returning the Nth sequence and count; return nothing if there are under N sequences.
try: return tuple(sequences_by_count[N-1])
except IndexError: return ('',0)
# MAYBE-TODO should probably make that '-' or something instead of '', empty strings are hard to see.
# On the other hand '-' isn't a valid sequence, and '' is...
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def _counts(data):\n table = collections.Counter(iter(data)).most_common()\n if not table:\n return table\n maxfreq = table[0][1]\n for i in range(1, len(table)):\n if table[i][1] != maxfreq:\n table = table[:i]\n break\n return table",
"def get_most_common(self, lst):\n data = Counter(lst)\n mc = data.most_common(2) \n #if len(mc) == 1 or (mc[0][1] != (mc[1][1])):\n # return mc[0][0]\n #return \"AMB\"\n return data.most_common(1)[0][0]",
"def most_common(lst):\n return max(set(lst), key=lst.count)",
"def most_common(self):\n # Example ouput : ['so', 6]\n return list(sorted(self.frequencies().items(), key = lambda x: x[1], reverse=True)[0])\n #sorted = sorted(self.frequencies().items(), key = lambda x: x[1], reverse=True)\n #return sorted[0] #not list",
"def most_common(data_word):\n stop_words = set(stopwords.words(\"english\"))\n\n #filter out stop words\n data_filtered = [word for word in data_word if word not in stop_words]\n cnt = Counter(data_filtered)\n\n #count most common words\n common = cnt.most_common(100)\n return common",
"def mostcommon(iterable, n=None):\n #import operator\n bag = {}\n bag_get = bag.get\n for elem in iterable:\n bag[elem] = bag_get(elem, 0) + 1\n if n is None:\n return sorted(bag.iteritems(), key=itemgetter(1), reverse=True)\n it = enumerate(bag.iteritems())\n nl = nlargest(n, ((cnt, i, elem) for (i, (elem, cnt)) in it))\n return [(elem, cnt) for cnt, i, elem in nl]",
"def most_common_words(counts, n=-1):\n\n result = sorted(list(counts.items()), key=lambda x: x[1], reverse=True)\n\n if n == -1:\n return result\n else:\n return result[:n]",
"def mostCommon(self):\n d = [r[-1] for r in self]\n return max(set(d), key=d.count)",
"def most_common(iterable):\n from collections import Counter\n\n data = Counter(iterable)\n return data.most_common(1)[0][0]",
"def longest_common_substring_backtrack(s1, s2, i, j, count):\n if i == len(s1) or j == len(s2):\n return 0\n\n if s1[i-1] == s2[j-1]:\n count = 1 + longest_common_substring_backtrack(s1, s2, i+1, j+1, count)\n\n else:\n count = max(count,\n max(longest_common_substring_backtrack(s1, s2, i+1, j, count),\n longest_common_substring_backtrack(s1, s2, i, j+1, count)))\n\n return count",
"def select_most_common(data: pd.Series, n=9, key=\"Other\", v=1) -> dict:\n counts = collections.Counter(data)\n most_common = dict(counts.most_common(n))\n least_common = counts\n for k in most_common.keys():\n least_common.pop(k)\n\n most_common[key] = sum(least_common.values())\n if v:\n print('\\tCombine %i categories' % len(least_common.keys()))\n return most_common",
"def most_common(iterable):\r\n sorted_iterable = sorted((x, i) for i, x in enumerate(iterable))\r\n groups = itertools.groupby(sorted_iterable, key=operator.itemgetter(0))\r\n def _auxfun(g):\r\n _, it = g\r\n count = 0\r\n min_index = len(iterable)\r\n for _, where in it:\r\n count += 1\r\n min_index = min(min_index, where)\r\n return count, -min_index\r\n return max(groups, key=_auxfun)[0]",
"def consensus(*args):\n counts = map(Counter, zip_longest(*args))\n consensus = \"\"\n for c in counts:\n del c[None]\n consensus += c.most_common(1)[0][0]\n return Seq(consensus, args[0].id)",
"def most_common(filename,n):\n\tfreq_dict = dictionary_creation(filename)\n\tt = []\n\tfor key, value in freq_dict.items():\n\t\tt.append((value,key))\n\t\tt.sort(reverse=True)\n\twordlist = []\n\tfreqlist = []\n\tprint n, 'most common words:'\n\tfor freq,word in t[0:n]:\n\t\tprint word,'\\t', freq\n\t\twordlist.append(word)\n\t\tfreqlist.append(freq)\n\treturn wordlist,freqlist",
"def top_three_letters(string):\n print(Counter(string))\n print(Counter(string).most_common(3))",
"def most_frequent(x):\n return Counter(x).most_common()[0][0]",
"def most_common(elems):\n # get an iterable of (item, iterable) pairs\n sl = sorted((x, i) for i, x in enumerate(elems))\n # print 'SL:', SL\n groups = itertools.groupby(sl, key=operator.itemgetter(0))\n\n # auxiliary function to get \"quality\" for an item\n def _auxfun(g):\n item, iterable = g\n count = 0\n min_index = len(elems)\n for _, where in iterable:\n count += 1\n min_index = min(min_index, where)\n # print 'item %r, count %r, minind %r' % (item, count, min_index)\n return count, -min_index\n\n # pick the highest-count/earliest item\n return max(groups, key=_auxfun)[0]",
"def _get_majority_dc(array: List[int]) -> Optional[int]:\n array_length = len(array)\n if array_length == 1:\n return array[0]\n split_index = array_length // 2\n majority_left = _get_majority_dc(array[:split_index])\n majority_right = _get_majority_dc(array[split_index:])\n if majority_left == majority_right:\n return majority_left\n count_majority_left = 0\n count_majority_right = 0\n for item in array:\n if item == majority_left:\n count_majority_left += 1\n elif item == majority_right:\n count_majority_right +=1\n if count_majority_left > split_index:\n return majority_left\n elif count_majority_right > split_index:\n return majority_right\n else:\n return None",
"def solution(n, array):\n\n counters = [0] * n\n\n # Current greatest value calculated so far\n max_count = 0\n\n for i in range(len(array)):\n if array[i] == n + 1:\n # max_count = max(counters)\n counters = [max_count] * n\n else:\n counters[array[i] - 1] += 1\n\n # To avoid calculating max(), we update the max value at each step\n if counters[array[i] - 1] > max_count:\n max_count = counters[array[i] - 1]\n\n return counters",
"def max_repeats(seq):\n max_item = 0\n item_count = [seq.count(item) for item in seq]\n try:\n max_item = max(item_count)\n except ValueError:\n \"arg is an empty sequence\"\n return max_item",
"def compute(data):\n spoken = collections.defaultdict(lambda: collections.deque(maxlen=2))\n\n starting = map(int, data.split(\",\"))\n last_number = None\n i = 0\n\n for i, last_number in enumerate(starting, start=i + 1):\n spoken[last_number].append(i)\n\n for i in range(i + 1, 30000001):\n if len(spoken[last_number]) <= 1:\n last_number = 0\n else:\n last_number = spoken[last_number][1] - spoken[last_number][0]\n\n spoken[last_number].append(i)\n\n return last_number",
"def _get_majority_def(array: List[int]) -> Optional[int]:\n if len(array) == 0:\n return None\n counter = dict()\n for item in array:\n if item in counter:\n counter[item] += 1\n else:\n counter[item] = 1\n majority = max(counter, key=counter.get)\n if counter[majority] > len(array) // 2:\n return majority\n else:\n return None",
"def most_common(self, number=10):\n\n words_full_list = []\n\n for string in self.__corpora:\n words_full_list += string.split()\n\n print(Counter(words_full_list).most_common(number))",
"def counterFrequency(text):\n dictText = {}\n maxN = 0\n mostFrequent = \"\"\n for item in text:\n if (item not in dictText):\n dictText[item] = 1\n else: \n dictText[item] +=1\n \n if (dictText[item] > maxN):\n mostFrequent = item\n maxN = dictText[item]\n return mostFrequent",
"def printCounterByCount(counter):\n for k, v in counter.most_common():\n print(k, v)",
"def longest_common_substring_memoization(s1, s2, i, j, count, cache):\n\n if i == len(s1) or j == len(s2):\n return 0\n\n if cache[i][j] != -1:\n return cache[i][j]\n\n if s1[i-1] == s2[j-1]:\n count = 1 + longest_common_substring_memoization(s1, s2, i+1, j+1, count, cache)\n\n else:\n count = max(count,\n max(longest_common_substring_memoization(s1, s2, i+1, j, count, cache),\n longest_common_substring_memoization(s1, s2, i, j+1, count, cache)))\n\n cache[i][j] = count\n return cache[i][j]",
"def find_max_nr_doc(data):\n queries = list(set(data[:, 1].astype(int)))\n max_nr = 0\n for query in queries:\n n_max = data[data[:,1] == query].shape[0]\n if n_max > max_nr:\n max_nr = n_max\n return max_nr",
"def majority(x):\n c = Counter(x)\n value, _ = c.most_common()[0]\n return value",
"def find_max_with_count(A):\n\n def frmax(lo, hi):\n \"\"\"Use recursion to find maximum value in A[lo:hi+1] incl. count\"\"\"\n if lo == hi: return (0, A[lo])\n\n mid = (lo+hi)//2\n ctleft,left = frmax(lo, mid)\n ctright,right = frmax(mid+1, hi)\n return (1+ctleft+ctright, max(left, right))\n\n return frmax(0, len(A)-1)",
"def get_mostFrequent(self, n=5):\r\n pass"
] |
[
"0.6817413",
"0.6385164",
"0.6149853",
"0.59647363",
"0.59352905",
"0.5902185",
"0.58569103",
"0.5834017",
"0.5760998",
"0.5708807",
"0.56993324",
"0.5690845",
"0.5657929",
"0.56506425",
"0.56504536",
"0.56015867",
"0.5568998",
"0.5519514",
"0.5517345",
"0.551183",
"0.5487164",
"0.5485151",
"0.5463507",
"0.5457629",
"0.5450169",
"0.54300714",
"0.54039764",
"0.53898346",
"0.53855693",
"0.5366717"
] |
0.6733384
|
1
|
Copy nonreadcountrelated data from source_mutant to self (making new copies of all objects).
|
def _copy_non_readcount_data(self, source_mutant):
# COPY the position, not just make another name for the same value - I wrote a copy() function for positions
self.position = source_mutant.position.copy()
# strings are immutable and thus safe to "copy" by adding another name to the same value
self.gene = source_mutant.gene
self.orientation = source_mutant.orientation
self.gene_feature = source_mutant.gene_feature
self.gene_distances = source_mutant.gene_distances
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def _copy_readcount_related_data(self, source_mutant):\n # integers are immutable and thus safe to \"copy\" by adding another name to the same value\n self.total_read_count = source_mutant.total_read_count\n self.perfect_read_count = source_mutant.perfect_read_count\n # using dict to make a COPY of the dict instead of just creating another name for the same value\n self.sequences_counts_positions_errors = dict(source_mutant.sequences_counts_positions_errors)",
"def copy(self):",
"def __copy__(self):\n return self.__class__(self.m, self.n, self.data)",
"def copy(self):\n pass",
"def copy(self):\n pass",
"def copy(self):\n pass",
"def copy(self):\n return self.__class__(\n self.kind, self.link_ids.copy(), self.included_nodes.copy(), self.mass,\n self.name, self.crossring_cleavages.copy(), self.composition.copy())",
"def clone(self,source):\n self.cardinality = source.cardinality\n self.sax_character = source.sax_character\n self.wildcardbits = source.wildcardbits",
"def __copy__(self):\n return GeneratorDataSet(self._inventory, self._data_encoder, self._target_encoder)",
"def _copy_data_from(self, original):\n raise NotImplementedError()",
"def _copy_(self):\n return copy.copy(self)",
"def copy_from(self, other):\n assert not self.is_final\n if self.parent is not None:\n assert other.parent is not None\n self.parent.copy_from(other.parent)\n self.isolated_names = copy.copy(other.isolated_names)\n self.modified = copy.copy(other.modified)\n self.read = copy.copy(other.read)\n self.deleted = copy.copy(other.deleted)\n self.bound = copy.copy(other.bound)\n self.annotations = copy.copy(other.annotations)\n self.params = copy.copy(other.params)",
"def __copy__(self):\n raise NotImplementedError",
"def copy(self):\n return super().copy()",
"def copy(self):\n return self.__class__(*self.sets)",
"def clone(self):",
"def copy(self):\n kopy = self.__class__()\n # Copy the source net\n kopy.source_net = nx.DiGraph(self.source_net)\n return kopy",
"def clone(self) -> Mutator:\n raise NotImplementedError",
"def copy(self):\n\t\treturn pythoncopy.deepcopy(self)",
"def _shallow_clone_dataset(self: TAvalancheDataset) -> TAvalancheDataset:\n dataset_copy = copy.copy(self)\n dataset_copy._flat_data = self._flat_data._shallow_clone_dataset()\n return dataset_copy",
"def __copy__(self):\n return self.copy()",
"def copy(self):\n copied = super().copy()\n copied.anonymize()\n return copied",
"def copy(self):\n return self.mutate().simple_copy()",
"def _shallow_clone_dataset(self: TDataWTransform) -> TDataWTransform:\n dataset_copy = copy.copy(self)\n dataset_copy._transform_groups = copy.copy(dataset_copy._transform_groups)\n dataset_copy._frozen_transform_groups = copy.copy(\n dataset_copy._frozen_transform_groups\n )\n return dataset_copy",
"def copy(self):\n return self.__class__(\n group_list=self.group_list, bits_except_last=self.bits_except_last,\n max_value=self.max_value\n )",
"def __copy__(self, *args, **kwargs):\n return self.copy()",
"def copy(self):\r\n return copy.copy(self)",
"def clone(self):\n raise NotImplementedError",
"def copy(self):\n import copy as pcopy\n return pcopy.deepcopy(self)",
"def deepcopy(self):\n return copymod.deepcopy(self)"
] |
[
"0.6806262",
"0.64745337",
"0.6368794",
"0.6264078",
"0.6264078",
"0.6264078",
"0.6224812",
"0.6219349",
"0.6204146",
"0.61696106",
"0.60658664",
"0.6017084",
"0.59997535",
"0.5997415",
"0.5995374",
"0.5987567",
"0.5922086",
"0.5911224",
"0.58600986",
"0.585722",
"0.5854316",
"0.58408755",
"0.58275604",
"0.58129394",
"0.5799576",
"0.57989526",
"0.57808596",
"0.5770631",
"0.5770401",
"0.57684654"
] |
0.80433446
|
0
|
Copy readcountrelated data from source_mutant to self (making new copies of all objects).
|
def _copy_readcount_related_data(self, source_mutant):
# integers are immutable and thus safe to "copy" by adding another name to the same value
self.total_read_count = source_mutant.total_read_count
self.perfect_read_count = source_mutant.perfect_read_count
# using dict to make a COPY of the dict instead of just creating another name for the same value
self.sequences_counts_positions_errors = dict(source_mutant.sequences_counts_positions_errors)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def _copy_non_readcount_data(self, source_mutant):\n # COPY the position, not just make another name for the same value - I wrote a copy() function for positions\n self.position = source_mutant.position.copy() \n # strings are immutable and thus safe to \"copy\" by adding another name to the same value\n self.gene = source_mutant.gene\n self.orientation = source_mutant.orientation\n self.gene_feature = source_mutant.gene_feature\n self.gene_distances = source_mutant.gene_distances",
"def clone(self,source):\n self.cardinality = source.cardinality\n self.sax_character = source.sax_character\n self.wildcardbits = source.wildcardbits",
"def get_source_counts(self):\n return deepcopy(self._source_counts)",
"def copy(self):",
"def __copy__(self):\n return self.__class__(self.m, self.n, self.data)",
"def _set_readcount_related_data_to_zero(self):\n self.total_read_count = 0\n self.perfect_read_count = 0\n self.RISCC_genome_side_aligned_reads = {}\n self.RISCC_genome_side_unaligned_reads = {}\n self.sequences_counts_positions_errors = {}\n # TODO should all this really be readcount-related? Well, it IS, but when I have a multi-dataset mutant, do I really want to keep the seq/position/count details and the genome-side RISCC read data per dataset rather than total? Hard to tell, really. In a perfect world I wouldn't be doing multiple RISCC datasets anyway!",
"def copy(self):\n return Counter(dict.copy(self))",
"def copy(self):\n return Counter(dict.copy(self))",
"def copy(self):\n pass",
"def copy(self):\n pass",
"def copy(self):\n pass",
"def copy(self):\n return self.__class__(\n self.kind, self.link_ids.copy(), self.included_nodes.copy(), self.mass,\n self.name, self.crossring_cleavages.copy(), self.composition.copy())",
"def _copy_data_from(self, original):\n raise NotImplementedError()",
"def copy_from(self, other):\n assert not self.is_final\n if self.parent is not None:\n assert other.parent is not None\n self.parent.copy_from(other.parent)\n self.isolated_names = copy.copy(other.isolated_names)\n self.modified = copy.copy(other.modified)\n self.read = copy.copy(other.read)\n self.deleted = copy.copy(other.deleted)\n self.bound = copy.copy(other.bound)\n self.annotations = copy.copy(other.annotations)\n self.params = copy.copy(other.params)",
"def __copy__(self):\n return GeneratorDataSet(self._inventory, self._data_encoder, self._target_encoder)",
"def rebuild(self):\n self.from_samples(self.samples)",
"def __init__(self, sourceCollection = None):\n\n self._size = 0\n self._modCount = 0\n\n if sourceCollection:\n for item in sourceCollection:\n self.add(item)",
"def copy(self) -> KeyedEffect:\n # getattr(self, '_last_uid_setter', uid_setter_instance) ??\n return deepcopy(self)",
"def copy(self):\n return self.__class__(*self.sets)",
"def _copy_(self):\n return copy.copy(self)",
"def copy(self) -> \"SampleMetadata\":\n return type(self)(\n samples=self.__internal_samples.copy(),\n axis=0,\n metadata=self.metadata,\n name=self.name,\n )",
"def copy(self):\n return super().copy()",
"def copy():\n copy2(per, per_old)",
"def __copy__(self):\n raise NotImplementedError",
"def copy(self): # real signature unknown; restored from __doc__\n pass",
"def copy(self): # real signature unknown; restored from __doc__\n pass",
"def copy(self): # real signature unknown; restored from __doc__\n pass",
"def clone(self):",
"def copy(self):\n return self.__class__(\n self.xs.copy(), self.ys.copy(),\n self.gauge_length,\n self.sample_width,\n self.sample_thickness,\n self.name\n )",
"def _copy(source, track, filter_f=lambda x: True, coef=1000):\n for msg in source:\n if filter_f(msg):\n track.append(msg.copy(time=int(msg.time*coef)))"
] |
[
"0.74078166",
"0.5897654",
"0.5787232",
"0.57613033",
"0.56062573",
"0.558503",
"0.5516049",
"0.5516049",
"0.5508036",
"0.5508036",
"0.5508036",
"0.5491282",
"0.54707116",
"0.53941333",
"0.537508",
"0.53534603",
"0.53087145",
"0.530071",
"0.52822745",
"0.5266203",
"0.5209987",
"0.5204292",
"0.51974195",
"0.5190359",
"0.51871896",
"0.51871896",
"0.51871896",
"0.51414376",
"0.51321363",
"0.5127235"
] |
0.81424654
|
0
|
Add new genomeside read to RISCC genomeside aligned or unaligned reads, or increment existing one if present.
|
def add_RISCC_read(self, seq, new_position, N_errors=None, read_count=1):
# TODO why are we even using Insertion_position objects here?? Those aren't insertion positions with a start-end, just single positions... But still need to be able to deal with unaligned/multi as well as proper positions.
if not isinstance(new_position, Insertion_position) and new_position not in SPECIAL_POSITIONS.all_undefined:
raise MutantError("RISCC read position %s is unacceptable - must be Insertion_position object or one of %s!"%(
new_position, ', '.join(SPECIAL_POSITIONS.all_undefined)))
# self.RISCC_genome_side_aligned_reads is a position:data dict
if new_position not in SPECIAL_POSITIONS.all_undefined:
try:
# MAYBE-TODO check that the same seq isn't present in a different position?
self.RISCC_genome_side_aligned_reads[new_position][1] += read_count
try: self.RISCC_genome_side_aligned_reads[new_position][2][seq][0] += read_count
except KeyError: self.RISCC_genome_side_aligned_reads[new_position][2][seq] = [read_count, N_errors]
except KeyError:
seq_count_error_dict = {seq: [read_count, N_errors]}
self.RISCC_genome_side_aligned_reads[new_position] = [new_position, read_count, seq_count_error_dict,
SPECIAL_GENE_CODES.not_determined, '?', '?', '?']
# self.RISCC_genome_side_unaligned_reads is a seq:data dict, since the positions aren't usable as keys
else:
try:
self.RISCC_genome_side_unaligned_reads[seq][1] += read_count
self.RISCC_genome_side_aligned_reads[seq][2][seq][0] += read_count
except KeyError:
self.RISCC_genome_side_unaligned_reads[seq] = [new_position, read_count, {seq: [read_count, N_errors]},
SPECIAL_GENE_CODES.not_determined, '?', '?', '?']
# Note: adding gene/annotation info for those is implemented in the dataset methods.
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def add(self, read):\n self.additionalReads.append(read)\n self._length += 1",
"def add_read(self, read):\n r = Read(read)\n if read not in self.reads:\n self.reads[read] = r\n else:\n self.reads[read].visit_limit += 1\n self.num_reads += 1",
"def add_read(self, new_read): \n if self.sampling:\n self.convert_to_list()\n self.reads.append(new_read)\n self.total+=1",
"def add_read(self, HTSeq_alignment, position=SPECIAL_POSITIONS.unknown, read_count=1, dataset_name=None):\n # TODO instead of taking HTSeq_alignment, this could just take the seq and N_errors, like add_RISCC_read does?\n self._ensure_dataset_None(dataset_name)\n # increment total_read_count, and add read ID to the ID set\n self.total_read_count += read_count\n # figure out if the read is perfect and increment perfect_read_count if yes; return True if perfect else False.\n # TODO may want to come up with a better option than 10 for the \"errors\" of unaligned seqs\n if position in SPECIAL_POSITIONS.all_undefined:\n N_errors = 10\n else:\n N_errors = check_mutation_count_by_optional_NM_field(HTSeq_alignment, negative_if_absent=False)\n # add sequence position/readcount data the detailed dictionary.\n seq = HTSeq_alignment.read.seq\n try: self.sequences_counts_positions_errors[seq][0] += read_count\n except KeyError: self.sequences_counts_positions_errors[seq] = [read_count, position, N_errors]\n if N_errors==0: \n self.perfect_read_count += read_count\n return True\n else:\n return False",
"def add_read(self, HTSeq_alignment, position=SPECIAL_POSITIONS.unknown, read_count=1, dataset_name=None):\n readcount_data_container = self._check_dataset_name_return_data(dataset_name)\n Insertional_mutant.add_read(readcount_data_container, HTSeq_alignment, position, read_count)",
"def add_read_to_vec(self, read, copy=None):\n\t\tfor i,s in enumerate(read.seq):\n\t\t\t# the i-th non-gapped position for ref_seq_id starting at offset read.offset\n\t\t\tgapped_pos = self.refmap.ungapped_to_gapped(read.ref_seq_id, read.offset + i)\n\t\t\tDF.add_to_vec(self, nt=s, positions=[gapped_pos], counts=[read.copy if copy is None else copy])",
"def add_read_to_vec_using_ref(self, read):\n\t\ti = read.offset\n\t\tfor p in self.refmap.gap_map[read.ref_seq_id][read.offset:(read.offset+len(read.seq))]:\n\t\t\ts = self.refmap.fasta[read.ref_seq_id].seq[i]\n\t\t\tif s=='U': s='T'\n\t\t\tif s not in ('A','T','C','G'): s='N'\n\t\t\tDF.add_to_vec(self, nt=s, positions=[p], counts=[read.copy])\n\t\t\ti += 1",
"def add_reads(self, new_reads): \n if self.sampling:\n self.convert_to_list()\n self.reads.extend(new_reads)",
"def improve_best_RISCC_read(self, seq, new_position, N_errors=None, read_count=1, max_distance=MAX_POSITION_DISTANCE):\n # if there are more than one current reads, you're not using improve_best_RISCC_read consistently!\n if len(self.RISCC_genome_side_aligned_reads) > 1:\n raise MutantError(\"Don't try using the improve_best_RISCC_read when keeping more than one read!\")\n # if decided to replace, discard old genome-side read dict and make new one from just the current read data.\n if self._decide_if_replace_read(new_position, max_distance):\n self.RISCC_genome_side_aligned_reads, self.RISCC_genome_side_unaligned_reads = {}, {}\n self.add_RISCC_read(seq, new_position, N_errors, read_count)\n # TODO make this count unaligned/confirming/non-confirming reads, too, instead of keeping all these counts as functions that read the actual mutant data, which will be missing in this case? I did something like that in mutant_Carette.py.",
"def _increment_state(self, bytes_read):\n self._read_state[StateKey.POSITION] += bytes_read",
"def register_read(self):\n self._reads_since_check += 1",
"def parse_insert(self, read_id, alignment_position, read_sequence, qualities):\n # the allele is the anchor + what's being deleted\n allele = self.reference_dictionary[alignment_position] + read_sequence\n\n # record the insert where it first starts\n self.mismatch_count[alignment_position] += 1\n self._update_read_allele_dictionary(read_id, alignment_position + 1, allele, INSERT_ALLELE, max(qualities))\n self._update_insert_dictionary(read_id, alignment_position, read_sequence, qualities)",
"def process(self):\n self.reader += 1",
"def process_read(self, ref, read, ref_offset=0):\n\n if read.alignment.mapping_quality < self.config.min_mapq:\n return\n\n ref_pos = read.alignment.position.position - ref_offset\n read_pos = 0\n # Use set(), as some cigar operations might generate duplicated positions,\n # E.g. for insertions, it extends the candidate positions to\n # [ins_pos - ins_len, ins_pos + ins_len] which might overlap with some\n # nearby mismatches.\n positions = set()\n for cigar in read.alignment.cigar:\n # Break if it reached the end of reference sequence.\n if ref_pos >= len(ref):\n break\n if cigar.operation not in utils.CIGAR_OPS:\n raise ValueError('Unexpected CIGAR operation', cigar, read)\n\n if cigar.operation == cigar_pb2.CigarUnit.ALIGNMENT_MATCH:\n positions.update(\n self._process_align_match(cigar, ref, read, ref_pos, read_pos))\n read_pos += cigar.operation_length\n ref_pos += cigar.operation_length\n elif cigar.operation == cigar_pb2.CigarUnit.SEQUENCE_MISMATCH:\n positions.update(\n self._process_seq_mismatch(cigar, ref, read, ref_pos, read_pos))\n read_pos += cigar.operation_length\n ref_pos += cigar.operation_length\n elif cigar.operation == cigar_pb2.CigarUnit.INSERT:\n positions.update(\n self._process_insert(cigar, ref, read, ref_pos, read_pos))\n read_pos += cigar.operation_length\n elif cigar.operation == cigar_pb2.CigarUnit.CLIP_SOFT:\n positions.update(\n self._process_soft_clip(cigar, ref, read, ref_pos, read_pos))\n read_pos += cigar.operation_length\n elif (cigar.operation == cigar_pb2.CigarUnit.DELETE or\n cigar.operation == cigar_pb2.CigarUnit.SKIP):\n positions.update(\n self._process_delete(cigar, ref, read, ref_pos, read_pos))\n ref_pos += cigar.operation_length\n elif cigar.operation == cigar_pb2.CigarUnit.SEQUENCE_MATCH:\n ref_pos += cigar.operation_length\n read_pos += cigar.operation_length\n elif (cigar.operation == cigar_pb2.CigarUnit.CLIP_HARD or\n cigar.operation == cigar_pb2.CigarUnit.PAD):\n pass\n\n # Yield positions within the range\n for pos in sorted(positions):\n if pos >= 0 and pos < len(ref):\n yield pos",
"def __call__(self, read1, read2):\n self.add_to_batch(*read1, self.read1_batch, self.index)\n self.add_to_batch(*read2, self.read2_batch, self.index)\n self.index += self.lines_per_row\n if self.index >= self.bufsize:\n self.flush()",
"def add(self, left: int, right: int, put_idx: int) -> None:\n self.write(left + right, put_idx)",
"def add_RISCC_alignment_files_to_data(self, cassette_side_flank_aligned_file, genome_side_aligned_file, IB_fastq_file, \n allowed_IBs=None, IB_cluster_file=None, \n best_genome_side_only=False, ignore_unaligned=False, \n max_allowed_cassette_side_dist=1, max_cassette_side_ratio_to_ignore=100, \n skip_checks=False, removed_mutant_file='/dev/null', quiet=False):\n # TODO finish docstring\n # MAYBE-TODO add option for not including IBs at all, and making the mutant dict by cassette-side alignment position like before?\n # MAYBE-TODO at some point, maybe add full parsing of multiple alignments, to compare their positions to those of \n # unique-aligned cases, rather than just marking them as multiple but treating them as unaligned?\n # Might not be worth the effort, since if we have unique-aligned cases anyway, we can use those.\n # MAYBE-TODO add ignore_cassette, cassette_only options?\n # MAYBE-TODO add collapsed_readcounts option? That doesn't make much sense for paired-end reads.\n\n if self.multi_dataset: raise MutantError(\"add_RISCC_alignment_files_to_data not implemented for multi-datasets!\")\n if self.summary.cassette_end not in SEQ_ENDS:\n raise MutantError(\"Cannot add data from an alignment reader if cassette_end isn't specified! Please set the \"\n +\"summary.cassette_end attribute of this Insertional_mutant_pool_dataset instance to one of %s first.\"%SEQ_ENDS)\n if self.summary.relative_read_direction not in RELATIVE_READ_DIRECTIONS:\n raise MutantError(\"Cannot add data from an alignment reader if relative_read_direction isn't set! \"\n +\"Please set the relative_read_direction attribute of this Insertional_mutant_pool_dataset instance \"\n +\"to one of %s first.\"%RELATIVE_READ_DIRECTIONS)\n\n # read the IB cluster file; make a read_seq:centroid_seq dictionary for fast lookup.\n if IB_cluster_file is not None:\n if type(IB_cluster_file) == dict:\n IB_centroid_to_seqs = IB_cluster_file\n elif IB_cluster_file.endswith('.pickle'):\n IB_centroid_to_seqs = unpickle(IB_cluster_file)\n else:\n raise MutantError(\"Unknown IB_cluster_file format in add_RISCC_alignment_files_to_data - must be .pickle filename \"\n +\"or a dictionary. Value is %s\"%IB_cluster_file)\n IB_seq_to_centroid = invert_listdict_nodups(IB_centroid_to_seqs)\n\n # set up IB checks - return True if IB is in allowed_IBs or if no allowed_IBs was given.\n if allowed_IBs is None: _IB_check = lambda IB: True\n else: _IB_check = lambda IB: IB in allowed_IBs\n\n for (readname, IB_seq, cassette_side_aln, genome_side_aln) in self._parse_3files_parallel(\n IB_fastq_file, cassette_side_flank_aligned_file, genome_side_aligned_file):\n # get the cassette insertion position (as an Insertion_position object)\n # MAYBE-TODO instead of generating cassette_side_position all the time, even with multiple identical reads, \n # check if seq is already present in mutant, or something? To save time.\n\n # if the IB isn't in the allowed set, skip this read (unless there is no allowed set, then just keep going)\n if not _IB_check(IB_seq): continue\n\n cassette_side_position = get_insertion_pos_from_flanking_region_pos(cassette_side_aln, self.summary.cassette_end, \n self.summary.relative_read_direction, immutable_position=True)\n if ignore_unaligned and cassette_side_position in SPECIAL_POSITIONS.all_undefined:\n continue\n # TODO should probably still count it\n # grab mutant based on IB (clustered or not)\n try: IB_centroid_seq = IB_seq_to_centroid[IB_seq]\n except NameError: IB_centroid_seq = IB_seq\n except KeyError: raise MutantError(\"IB seq %s not found in cluster dict!\"%IB_seq)\n mutant = self.get_mutant(IB_centroid_seq)\n mutant.add_read(cassette_side_aln, cassette_side_position, read_count=1, dataset_name=None)\n # Parse the genome-side alignment result to figure out position; add that to the mutant\n # MAYBE-TODO make an option for the genome-side reads to be outward from the cassette? Unlikely to be needed.\n genome_side_position = get_RISCC_pos_from_read_pos(genome_side_aln, self.summary.cassette_end, 'inward')\n if genome_side_position in SPECIAL_POSITIONS.all_undefined:\n N_errors = 10\n else:\n N_errors = check_mutation_count_by_optional_NM_field(genome_side_aln, negative_if_absent=False)\n if best_genome_side_only:\n mutant.improve_best_RISCC_read(genome_side_aln.read.seq, genome_side_position, N_errors, read_count=1, \n max_distance=MAX_POSITION_DISTANCE)\n else:\n if not ignore_unaligned or genome_side_position not in SPECIAL_POSITIONS.all_undefined:\n mutant.add_RISCC_read(genome_side_aln.read.seq, genome_side_position, N_errors, read_count=1)\n # MAYBE-TODO if ignore_unaligned is True, do we still want to keep a count of unaligned seqs somehow?\n\n # check that all mutants have consistent cassette positions; remove ones that don't.\n if not skip_checks:\n IBs_to_remove = []\n with open(removed_mutant_file, 'w') as REMOVED_MUTANT_FILE:\n for mutant in self:\n if_remove = mutant.decide_and_check_position(max_allowed_cassette_side_dist, \n ratio_to_ignore=max_cassette_side_ratio_to_ignore, OUTPUT=REMOVED_MUTANT_FILE)\n if if_remove: IBs_to_remove.append(mutant.IB)\n summary_text = (\"Removed %s/%s mutants due to different flanking seq positions in one mutant \"\n +\"(if distance >%s and some are within %sx reads of each other).\")%(\n len(IBs_to_remove), len(self), max_allowed_cassette_side_dist, max_cassette_side_ratio_to_ignore)\n REMOVED_MUTANT_FILE.write(\"SUMMARY: \" + summary_text + '\\n')\n if not quiet: print(summary_text + \" - see %s for details.\"%removed_mutant_file)\n for IB in IBs_to_remove:\n self.remove_mutant(IB)\n\n # TODO do we want to add different read category counts to the summary, or make that stuff properties?\n # MAYBE-TODO it might be good to just generate two separate mutant-sets, normal and cassette, with an option called separate_cassette or something, and print them to separate files - but that's more complicated, and right now I don't have the setup for a single dataset having multiple mutant-sets (although I guess I will have to eventually, for removed mutants etc). Right now I do it in mutant_count_alignments.py, which works but there's a lot of code repetition...",
"def addReader(self, reader):\n # This accesses private variables on purpose\n # pylint: disable=W0212\n reader.__context = current()\n BaseReactor.addReader(self, reader)",
"def _align_single_end_reads(self):\n read_aligner = ReadAligner(self._args.segemehl_bin, self._args.progress)\n if self._file_needs_to_be_created(self._pathcreator.index_path):\n read_aligner.build_index(\n self._pathcreator.ref_seq_path_list,\n self._pathcreator.index_path,\n )\n for read_path, output_path, nomatch_path in zip(\n self._pathcreator.processed_read_paths,\n self._pathcreator.read_alignment_bam_paths,\n self._pathcreator.unaligned_reads_paths,\n ):\n if not self._file_needs_to_be_created(output_path):\n continue\n\n read_aligner.run_alignment(\n read_path,\n self._pathcreator.index_path,\n self._pathcreator.ref_seq_path_list,\n output_path,\n nomatch_path,\n int(self._args.processes),\n int(self._args.segemehl_accuracy),\n float(self._args.segemehl_evalue),\n self._args.split,\n paired_end=False,\n )",
"def set_book_read(self, id, read):\n\n try:\n with self._db as db:\n cur = db.cursor()\n cur.execute('UPDATE books SET read = ? WHERE rowid = ?', (read, id))\n if not cur.rowcount:\n raise BookError('Tried to modify book that doesn\\'t exist')\n except sqlite3.Error as e:\n raise BookError(f'Error setting book {id} to read={read}') from e",
"def add_genome(self, genome):\n self.genomes.append(genome)",
"def simulate_read(self):\n\n fastafile = ps.FastaFile(self.genome_fa)\n # left split read\n\n insert = int(np.random.normal(self.insert_size, (self.insert_size / 12), 1))\n start = int(np.random.randint(self.chr_pos_start, (self.chr_pos_end + 1)))\n left_end = start + self.read_length\n total_end = start + int(np.round(insert))\n right_start = total_end - self.read_length\n if total_end > self.chr_pos_end:\n # split read scenario or insert spanning split read scenario\n if left_end > self.chr_pos_end:\n # left read spanning split read scenario\n # left_read\n left_dntps = self.chr_pos_end - start\n right_dntps = self.read_length - left_dntps\n\n # the error could be here\n left_split_read = fastafile.fetch(self.chr, start, self.chr_pos_end)\n right_split_read = fastafile.fetch(self.chr, self.chr_pos_start, (self.chr_pos_start + right_dntps))\n left_read = left_split_read + right_split_read\n\n # right_read\n right_start = self.chr_pos_start + int(round(self.insert_size - left_dntps - self.read_length))\n right_read = fastafile.fetch(self.chr, right_start, (right_start + self.read_length))\n\n # assertion to check the error here\n\n common_id = \"%s|%s|%s:%s-%s:%s|%s:%s|1|%s\" % (\n self.read_number,\n self.chr,\n start,\n self.chr_pos_end,\n self.chr_pos_start,\n (self.chr_pos_start + right_dntps),\n right_start,\n (right_start + self.read_length),\n self.circle_id,\n )\n\n else:\n if right_start > self.chr_pos_end:\n # insert spanning split read scenario\n left_read = fastafile.fetch(self.chr, start, (start + self.read_length))\n right_start = self.chr_pos_start + (right_start - self.chr_pos_end)\n right_read = fastafile.fetch(self.chr, right_start, (right_start + self.read_length))\n common_id = \"%s|%s|%s:%s|%s:%s|3|%s\" % (\n self.read_number,\n self.chr,\n start,\n (start + self.read_length),\n right_start,\n (right_start + self.read_length),\n self.circle_id,\n )\n else:\n # right split read scenario\n assert right_start <= self.chr_pos_end\n assert (right_start + self.read_length) > self.chr_pos_end\n left_read = fastafile.fetch(self.chr, start, (start + self.read_length))\n\n # compute right dntps\n left_dntps = self.chr_pos_end - right_start\n right_dntps = self.read_length - left_dntps\n left_split_read = fastafile.fetch(self.chr, right_start, self.chr_pos_end)\n right_split_read = fastafile.fetch(self.chr, self.chr_pos_start, (self.chr_pos_start + right_dntps))\n right_read = left_split_read + right_split_read\n common_id = \"%s|%s|%s:%s|%s:%s-%s:%s|2|%s\" % (\n self.read_number,\n self.chr,\n start,\n (start + self.read_length),\n right_start,\n self.chr_pos_end,\n self.chr_pos_start,\n (self.chr_pos_start, right_dntps),\n self.circle_id,\n )\n\n else:\n # non split read scenario\n left_read = fastafile.fetch(self.chr, start, (start + self.read_length))\n # correct right read start\n right_read = fastafile.fetch(self.chr, right_start, (right_start + self.read_length))\n common_id = \"%s|%s|%s:%s|%s:%s|0|%s\" % (\n self.read_number,\n self.chr,\n start,\n (start + self.read_length),\n right_start,\n (right_start + self.read_length),\n self.circle_id,\n )\n\n return (right_read, left_read, common_id)",
"def align_reads(self):\n self._test_folder_existance(\n self._pathcreator.required_read_alignment_folders()\n )\n assert self._args.paired_end in [True, False]\n self._pathcreator.set_ref_seq_paths_by_species()\n self._ref_seq_files = self._pathcreator.get_ref_seq_files()\n self._pathcreator.set_ref_seq_path_list()\n self._test_align_file_existance()\n if not self._args.paired_end:\n # Single end reads\n self._read_files = self._pathcreator.get_read_files()\n self._lib_names = self._pathcreator.get_lib_names_single_end()\n self._pathcreator.set_read_files_dep_file_lists_single_end(\n self._read_files, self._lib_names\n )\n self._prepare_reads_single_end()\n print(f\"controller align_single_end_reads start {datetime.now()}\")\n self._align_single_end_reads()\n print(f\"controller align_single_end_reads stop {datetime.now()}\")\n else:\n # Paired end reads\n self._read_file_pairs = self._pathcreator.get_read_file_pairs()\n self._lib_names = self._pathcreator.get_lib_names_paired_end()\n self._pathcreator.set_read_files_dep_file_lists_paired_end(\n self._read_file_pairs, self._lib_names\n )\n self._prepare_reads_paired_end()\n print(f\"controller align_paired_end_reads start {datetime.now()}\")\n self._align_paired_end_reads()\n print(f\"controller align_paired_end_reads stop {datetime.now()}\")\n print(\n f\"controller generate_read_alignment_stats start {datetime.now()}\"\n )\n self._generate_read_alignment_stats(\n self._lib_names,\n self._pathcreator.read_alignment_bam_paths,\n self._pathcreator.unaligned_reads_paths,\n self._pathcreator.read_alignments_stats_path,\n self._args.paired_end,\n )\n print(f\"controller generate_read_alignment_stats stop {datetime.now()}\")\n if self._args.crossalign_cleaning:\n self._remove_crossaligned_reads()\n\n if self._args.paired_end:\n # Build a bam file containing fragments merged from read\n # pairs\n if not self._args.no_fragment_building:\n fragments = True\n # sort the bam files by name and sam tag hit index to\n # accelerate fragment building\n print(\n f\"controller sort bams by name and index start {datetime.now()}\"\n )\n self._sort_bams_by_name_and_index()\n print(\n f\"controller sort bams by name and index end {datetime.now()}\"\n )\n # build the fragments bam file\n print(f\"controller build_fragments start {datetime.now()}\")\n self._build_fragments()\n print(f\"controller build_fragments stop {datetime.now()}\")\n # generate fragment alignment stats\n print(\n f\"controller generate_fragment_alignmnet_stats start {datetime.now()}\"\n )\n self._generate_read_alignment_stats(\n self._lib_names,\n self._pathcreator.aligned_fragments_bam_paths,\n self._pathcreator.unaligned_reads_paths,\n self._pathcreator.fragment_alignments_stats_path,\n self._args.paired_end,\n fragments,\n )\n print(\n f\"controller generate_fragment_alignmnet_stats stop {datetime.now()}\"\n )\n # write fragment stats table\n print(\n f\"controller write_alignment_stats_table fragments start {datetime.now()}\"\n )\n self._write_alignment_stat_table(\n self._pathcreator.fragment_alignments_stats_path,\n self._pathcreator.fragment_alignment_stats_table_path,\n self._pathcreator.fragment_alignment_stats_table_transposed_path,\n fragments,\n )\n print(\n f\"controller write_alignment_stats_table fragments stop {datetime.now()}\"\n )\n print(\n f\"controller write_alignment_stats_table reads start {datetime.now()}\"\n )\n self._write_alignment_stat_table(\n self._pathcreator.read_alignments_stats_path,\n self._pathcreator.read_alignment_stats_table_path,\n self._pathcreator.read_alignment_stats_table_transposed_path,\n )\n print(\n f\"controller write_alignment_stats_table reads stop {datetime.now()}\"\n )",
"def _increment_state(self, increment):\n self._read_state[StateKey.POSITION] += increment",
"def mergeChainedAlignedSegments(chainedAlignedSegments, refSequence, readSequence):\n cAR = pysam.AlignedSegment()\n aR = chainedAlignedSegments[0]\n cAR.query_name = aR.query_name\n \n #Parameters we don't and therefore set properly\n #cAR.flag = aR.flag\n #cAR.mapq = aR.mapq\n #cAR.mrnm = 0\n #cAR.mpos=0\n #cAR.isize=0\n #cAR.qual = \"<\" * len(readSequence)\n #cAR.tags = aR.tags \n cAR.next_reference_id = -1\n cAR.reference_start = aR.reference_start #Reference start\n cAR.is_reverse = aR.is_reverse\n cAR.query_sequence = reverseComplement(readSequence) if cAR.is_reverse else readSequence\n cAR.reference_id = aR.reference_id\n cigarList = []\n pPos = aR.reference_start\n #Iterate from the other end of the sequence if reversed\n pQPos = -(len(readSequence)-1) if cAR.is_reverse else 0 \n \n for aR in chainedAlignedSegments:\n assert cAR.is_reverse == aR.is_reverse\n #Add a deletion representing the preceding unaligned reference positions\n assert aR.reference_start >= pPos\n if aR.reference_start > pPos:\n cigarList.append((2, aR.reference_start - pPos))\n pPos = aR.reference_start \n \n #Add an insertion representing the preceding unaligned read positions\n #make it a soft clip if it is the first chained alignment\n qPos = getFirstNonClippedPositionInRead(aR, readSequence)\n assert qPos >= pQPos\n if qPos > pQPos:\n cigarList.append((4 if aR == chainedAlignedSegments[0] else 1, qPos - pQPos)) \n pQPos = qPos\n \n #Add the operations of the cigar, filtering hard and soft clipping\n for op, length in aR.cigar:\n assert op in (0, 1, 2, 4, 5)\n if op in (0, 1, 2):\n cigarList.append((op, length))\n if op in (0, 2): #Is match or deletion\n pPos += length\n if op in (0, 1): #Is match or insertion\n pQPos += length\n \n assert pPos <= len(refSequence)\n \n #Set reference end coordinate (which is exclusive)\n #cAR.reference_end = pPos #We don't do this because it is set by cigar string\n \n #Now add any trailing, necessary soft clipping\n if cAR.is_reverse:\n assert pQPos <= 1\n if pQPos < 1:\n cigarList.append((4, -pQPos + 1))\n else:\n assert pQPos <= len(readSequence)\n if pQPos < len(readSequence):\n cigarList.append((4, len(readSequence) - pQPos))\n \n cAR.cigar = tuple(cigarList)\n \n #Check ops\n for op, length in cAR.cigar: #We should have no hard clipped ops\n assert op in (0, 1, 2, 4)\n \n #Reference sequence check coordinates\n assert sum([ length for op, length in cigarList if op in (0, 2)]) == cAR.reference_end - cAR.reference_start\n assert cAR.reference_start >= 0 and cAR.reference_start < len(refSequence)\n assert cAR.reference_end >= 0 and cAR.reference_end <= len(refSequence)\n \n #Read sequence check coordinates\n assert cAR.query_alignment_start >= 0 and cAR.query_alignment_start < len(readSequence)\n assert cAR.query_alignment_end >= 0 and cAR.query_alignment_end <= len(readSequence)\n assert cAR.query_alignment_start + sum([ length for op, length in cigarList if op in (0, 1)]) == cAR.query_alignment_end\n \n return cAR",
"def find_read_candidates(self, read):\n self.read_allele_dictionary = {}\n ref_alignment_start = read.reference_start\n ref_alignment_stop = self.get_read_stop_position(read)\n # if the region has reached a very high coverage, we are not going to parse through all the reads\n if self.coverage[ref_alignment_start] > 300:\n return False\n cigar_tuples = read.cigartuples\n read_sequence = read.query_sequence\n read_id = read.query_name\n read_quality = read.query_qualities\n ref_sequence = self.fasta_handler.get_sequence(chromosome_name=self.chromosome_name,\n start=ref_alignment_start,\n stop=ref_alignment_stop+10)\n\n self.read_info[read_id] = (ref_alignment_start, ref_alignment_stop, read.mapping_quality, read.is_reverse)\n for pos in range(ref_alignment_start, ref_alignment_stop):\n self.read_id_by_position[pos].append((read_id, ref_alignment_start, ref_alignment_stop))\n for i, ref_base in enumerate(ref_sequence):\n self.reference_dictionary[ref_alignment_start + i] = ref_base\n\n # read_index: index of read sequence\n # ref_index: index of reference sequence\n read_index = 0\n ref_index = 0\n found_valid_cigar = False\n for cigar in cigar_tuples:\n cigar_code = cigar[0]\n length = cigar[1]\n # get the sequence segments that are effected by this operation\n ref_sequence_segment = ref_sequence[ref_index:ref_index+length]\n read_quality_segment = read_quality[read_index:read_index+length]\n read_sequence_segment = read_sequence[read_index:read_index+length]\n\n if cigar_code != 0 and found_valid_cigar is False:\n read_index += length\n continue\n found_valid_cigar = True\n\n # send the cigar tuple to get attributes we got by this operation\n ref_index_increment, read_index_increment = \\\n self.parse_cigar_tuple(cigar_code=cigar_code,\n length=length,\n alignment_position=ref_alignment_start+ref_index,\n ref_sequence=ref_sequence_segment,\n read_sequence=read_sequence_segment,\n read_id=read_id,\n quality=read_quality_segment)\n\n # increase the read index iterator\n read_index += read_index_increment\n ref_index += ref_index_increment\n\n # after collecting all alleles from reads, update the global dictionary\n for position in self.read_allele_dictionary.keys():\n if position < self.region_start_position or position > self.region_end_position:\n continue\n self.rms_mq[position] += read.mapping_quality * read.mapping_quality\n for record in self.read_allele_dictionary[position]:\n # there can be only one record per position in a read\n allele, allele_type = record\n\n if allele_type == MATCH_ALLELE or allele_type == MISMATCH_ALLELE:\n # If next allele is indel then group it with the current one, don't make a separate one\n if position + 1 <= ref_alignment_stop and position + 1 in self.read_allele_dictionary.keys():\n next_allele, next_allele_type = list(self.read_allele_dictionary[position + 1].keys())[0]\n if next_allele_type == INSERT_ALLELE or next_allele_type == DELETE_ALLELE:\n continue\n self.positional_read_info[position].append(\n (read_id, ref_alignment_start, ref_alignment_stop, read.mapping_quality))\n self._update_positional_allele_dictionary(read_id, position, allele, allele_type,\n read.mapping_quality)\n else:\n # it's an insert or delete, so, add to the previous position\n self.positional_read_info[position-1].append(\n (read_id, ref_alignment_start, ref_alignment_stop, read.mapping_quality))\n self._update_positional_allele_dictionary(read_id, position-1, allele, allele_type,\n read.mapping_quality)\n return True",
"def shift(reads: List[reads_pb2.Read], op: cigar_pb2.CigarUnit.Operation,\n index) -> None:\n\n op_char = cigar_utils.CIGAR_OPS_TO_CHAR[op]\n for read in reads:\n expanded_cigar = get_expanded_cigar(read)\n if index < len(read.aligned_sequence) and index >= len(expanded_cigar):\n raise ValueError(\n 'Index len: %d, cigar len: %d, seq len: %d, molecule: %s' %\n (index, len(expanded_cigar), len(\n read.aligned_sequence), read.fragment_name))\n if index < len(read.aligned_sequence) and expanded_cigar[index] != op_char:\n seq = read.aligned_sequence\n read.aligned_sequence = seq[:index] + dc_constants.GAP_OR_PAD + seq[index:]\n new_expanded_cigar = expanded_cigar[:\n index] + dc_constants.GAP_OR_PAD + expanded_cigar[\n index:]\n struct_utils.set_string_field(read.info, 'expanded_cigar',\n new_expanded_cigar)",
"def test__add_read(self):\n # using fake HTSeq alignment class from deepseq_utilities; defining one perfect and one imperfect alignment\n # note: the detailed mutation-counting methods are imported from deepseq_utilities and unit-tested there.\n position = Insertion_position('chr1', '+', position_before=3)\n perfect_aln = Fake_HTSeq_aln(seq='AAA', optional_field_data={'NM':0})\n imperfect_aln = Fake_HTSeq_aln(seq='GGG', optional_field_data={'NM':1})\n # adding perfect and imperfect to mutant increases all the counts as expected\n mutant = Insertional_mutant(insertion_position=position)\n mutant.add_read(perfect_aln, read_count=3, position=position)\n assert mutant.total_read_count == mutant.perfect_read_count == 3\n assert mutant.sequences_counts_positions_errors == {'AAA': [3, position, 0]}\n mutant.add_read(imperfect_aln, read_count=1, position=position)\n assert mutant.total_read_count == 4\n assert mutant.perfect_read_count == 3\n assert mutant.sequences_counts_positions_errors == {'AAA': [3, position, 0], 'GGG': [1, position, 1]}\n # same for a multi-dataset mutant - this time we need to specify which dataset we're adding to\n mutant = Insertional_mutant_multi_dataset(insertion_position=position)\n assert len(mutant.by_dataset) == 0\n mutant.add_read(perfect_aln, read_count=3, dataset_name='d1', position=position)\n assert len(mutant.by_dataset) == 1\n assert mutant.by_dataset['d1'].total_read_count == mutant.by_dataset['d1'].perfect_read_count == 3\n assert mutant.by_dataset['d1'].sequences_counts_positions_errors == {'AAA': [3, position, 0]}\n mutant.add_read(imperfect_aln, read_count=1, dataset_name='d1', position=position)\n assert len(mutant.by_dataset) == 1\n assert mutant.by_dataset['d1'].total_read_count == 4\n assert mutant.by_dataset['d1'].perfect_read_count == 3\n assert mutant.by_dataset['d1'].sequences_counts_positions_errors == {'AAA': [3, position, 0], 'GGG': [1, position, 1]}\n # now adding a read to another dataset - nothing changes in dataset d1, but we have new dataset d2 numbers\n mutant.add_read(imperfect_aln, read_count=1, dataset_name='d2', position=position)\n assert len(mutant.by_dataset) == 2\n assert mutant.by_dataset['d1'].total_read_count == 4\n assert mutant.by_dataset['d2'].total_read_count == 1\n assert mutant.by_dataset['d2'].perfect_read_count == 0\n assert mutant.by_dataset['d2'].sequences_counts_positions_errors == {'GGG': [1, position, 1]}\n # it should be impossible to add a read to a specific dataset in a single-dataset mutant \n mutant = Insertional_mutant(insertion_position=position)\n self.assertRaises(MutantError, mutant.add_read, perfect_aln, read_count=3, dataset_name='d1')\n # it should be impossible to add a read to a multi-dataset mutant without giving a dataset_name\n mutant = Insertional_mutant_multi_dataset(insertion_position=position)\n self.assertRaises(MutantError, mutant.add_read, perfect_aln, read_count=3)",
"def addReader(self, reader):\n if reader in self._readers:\n # Don't add the reader if it's already there\n return\n self._readers[reader] = True\n fd = reader.fileno()\n if fd in self._fds:\n (_, writer) = self._fds[fd]\n self._fds[fd] = (reader, writer)\n if writer:\n # We already registered this fd for write events,\n # update it for read events as well.\n self._ioloop.update_handler(fd, IOLoop.READ | IOLoop.WRITE)\n else:\n with NullContext():\n self._fds[fd] = (reader, None)\n self._ioloop.add_handler(fd, self._invoke_callback,\n IOLoop.READ)",
"def _align_paired_end_reads(self):\n read_aligner = ReadAligner(self._args.segemehl_bin, self._args.progress)\n if self._file_needs_to_be_created(self._pathcreator.index_path):\n read_aligner.build_index(\n self._pathcreator.ref_seq_path_list,\n self._pathcreator.index_path,\n )\n for read_path_pair, output_path, nomatch_path in zip(\n self._pathcreator.processed_read_path_pairs,\n self._pathcreator.read_alignment_bam_paths,\n self._pathcreator.unaligned_reads_paths,\n ):\n if not self._file_needs_to_be_created(output_path):\n continue\n read_aligner.run_alignment(\n read_path_pair,\n self._pathcreator.index_path,\n self._pathcreator.ref_seq_path_list,\n output_path,\n nomatch_path,\n int(self._args.processes),\n int(self._args.segemehl_accuracy),\n float(self._args.segemehl_evalue),\n self._args.split,\n paired_end=True,\n )"
] |
[
"0.69418913",
"0.6814255",
"0.6499232",
"0.64566016",
"0.6419348",
"0.6335957",
"0.6130465",
"0.5879859",
"0.58513033",
"0.5830645",
"0.56137127",
"0.5548496",
"0.54873645",
"0.5420207",
"0.5263395",
"0.5173211",
"0.5139469",
"0.51312757",
"0.5119528",
"0.51152825",
"0.50759816",
"0.50720227",
"0.5063757",
"0.50040984",
"0.49611533",
"0.49126738",
"0.49049303",
"0.48926616",
"0.48626488",
"0.48575178"
] |
0.70845866
|
0
|
Compare the read to the current best one replace the best one if this is better. "Better" meaning furthest away from the cassetteside read, while still remaining on the same chromosome, strand, and within max_distance of it. If both reads are on a different chromosome or strand than the cassetteside position (both are bad, really), the first one is kept. If there is no current read, the new one is always used.
|
def improve_best_RISCC_read(self, seq, new_position, N_errors=None, read_count=1, max_distance=MAX_POSITION_DISTANCE):
# if there are more than one current reads, you're not using improve_best_RISCC_read consistently!
if len(self.RISCC_genome_side_aligned_reads) > 1:
raise MutantError("Don't try using the improve_best_RISCC_read when keeping more than one read!")
# if decided to replace, discard old genome-side read dict and make new one from just the current read data.
if self._decide_if_replace_read(new_position, max_distance):
self.RISCC_genome_side_aligned_reads, self.RISCC_genome_side_unaligned_reads = {}, {}
self.add_RISCC_read(seq, new_position, N_errors, read_count)
# TODO make this count unaligned/confirming/non-confirming reads, too, instead of keeping all these counts as functions that read the actual mutant data, which will be missing in this case? I did something like that in mutant_Carette.py.
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def _decide_if_replace_read(self, new_position, max_distance):\n # if there are no current reads, add new one\n if not len(self.RISCC_genome_side_aligned_reads): return True\n # if new read isn't \"confirming\", it can't be better\n if not self._if_confirming_read(new_position, max_distance): return False\n # if the new one is \"confirming\" and the old one isn't, new one has to be better\n old_position = self.RISCC_genome_side_aligned_reads.values()[0][0]\n if not self._if_confirming_read(old_position, max_distance): return True\n # if both the old and new position meet the basic conditions, pick the highest-distance one\n # TODO what about directionality and weird cases?\n new_dist = abs(new_position.min_position - self.position.min_position)\n old_dist = abs(old_position.min_position - self.position.min_position)\n if new_dist > old_dist: return True\n else: return False",
"def get_best_read_position(ref_genome, read, positions, thresh):\n least = 100\n best_pos = None\n for p in positions:\n num_mismatches = get_num_mismatches(read, ref_genome, p)\n if num_mismatches < thresh and num_mismatches < least:\n least = num_mismatches\n best_pos = p\n\n return best_pos",
"def min_best_case(new_node, old_node):\n return new_node.g_val[0] < old_node.g_val[0]",
"def remove_cds_and_remap_reads(self, cds_aln):\n super(GreedySolver, self).remove_cds_and_remap_reads(cds_aln)\n # Dictionary where key is read_id and value is cds alignment to which it maps.\n # If it does not map to any cds alignment then value is None.\n new_read_mappings = {}\n\n for aln_reg in cds_aln.aligned_regions.values():\n if aln_reg.active:\n # Find alternative cds alignment with highest coverage\n best_alt_cds_aln = None\n for alt_cds_aln in self._cds_aln_container.read2cds[aln_reg.read_id]:\n if best_alt_cds_aln == None or self._get_coverage(alt_cds_aln) > self._get_coverage(best_alt_cds_aln): \n best_alt_cds_aln = alt_cds_aln\n # Activate it in best alternative cds alignment (if there is one)\n if (best_alt_cds_aln != None):\n best_alt_cds_aln.aligned_regions[aln_reg.read_id].active = True\n # Add mapping to output dictionary\n new_read_mappings[aln_reg.read_id] = best_alt_cds_aln\n\n # Delete original cds alignment\n del self._cds_aln_container.cds_repository[cds_aln.cds]\n # Remove original cds alignment from read2cds\n for cds_alns in self._cds_aln_container.read2cds.values():\n if cds_aln in cds_alns: cds_alns.remove(cds_aln)\n\n # Force recalculation of coverage for updated cds alignments by forgeting coverage\n for updated_cds_aln in set(filter(lambda x: x != None, new_read_mappings.values())):\n del self._coverages[updated_cds_aln]\n\n return new_read_mappings",
"def get_best(self) -> Chromosome:\n if not (self._best_chromosome is None): # if the best chromosome is unchanged since the last calculation\n return self._best_chromosome\n\n best = None\n best_fitness = None\n\n for chromosome in self._population:\n chromosome_fitness = chromosome.get_fitness()\n\n if best_fitness is None or self._is_fitter(chromosome_fitness, best_fitness):\n best = chromosome\n best_fitness = chromosome_fitness\n\n return best",
"def is_better(self, curr, best, **kwargs):\r\n score_threshold = kwargs.pop('score_threshold', 1e-3)\r\n relative_eps = 1.0 + score_threshold\r\n return curr >= best*relative_eps",
"def best_last_option(self):\n \n # get essential values\n board = self.get_game_space()\n affinity = self.get_affinity()\n \n # pick the right check for the game we are playing\n if isinstance(board, Gomoku):\n \n # get all possible blocks to make a move in\n winning_blocks = board.get_winning_blocks(affinity)\n print('total winning blocks:'+str(len(winning_blocks)))\n best_blocks = []\n best_block = None\n\n # find the largest blocks to place a stone in\n for block in winning_blocks:\n if affinity == BLUE_TILE():\n if len(best_blocks) == 0: best_blocks.append(block)\n elif len(block.blue) > len(best_blocks[0].blue):\n best_blocks = []\n best_blocks.append(block)\n elif len(block.blue) == len(best_blocks[0].blue):\n best_blocks.append(block)\n elif affinity ==RED_TILE():\n if len(best_blocks) == 0: best_blocks.append(block)\n if len(block.red) > len(best_blocks[0].red):\n best_blocks = []\n best_blocks.append(block)\n elif len(block.red) == len(best_blocks[0].red):\n best_blocks.append(block)\n\n # find the best block to place a stone in\n for block in best_blocks:\n if best_block is None: best_block = block \n elif block.tiles[0][0] <= best_block.tiles[0][0]: \n if (block.tiles[0][1] != block.tiles[1][1]):\n if block.direction == 'vertical':\n if block.tiles[WINNING_ROW_SIZE()-1][1] >= best_block.tiles[WINNING_ROW_SIZE()-1][1]:\n if affinity == RED_TILE(): \n if len(block.red) >= len(best_block.red):\n print('considered block:'+str(block.tiles))\n best_block = block \n if affinity == BLUE_TILE(): \n if len(block.blue) >= len(best_block.blue):\n print('considered block:'+str(block.tiles))\n best_block = block\n else:\n if block.tiles[0][1] >= best_block.tiles[0][1]:\n if affinity == RED_TILE(): \n if len(block.red) >= len(best_block.red):\n print('considered block:'+str(block.tiles))\n best_block = block \n if affinity == BLUE_TILE(): \n if len(block.blue) >= len(best_block.blue):\n print('considered block:'+str(block.tiles))\n best_block = block \n else:\n if block.tiles[0][1] >= best_block.tiles[0][1] and block.tiles[1][0] <= best_block.tiles[1][0]:\n if affinity == RED_TILE(): \n if len(block.red) >= len(best_block.red):\n print('considered block:'+str(block.tiles))\n best_block = block \n if affinity == BLUE_TILE(): \n if len(block.blue) >= len(best_block.blue):\n print('considered block:'+str(block.tiles))\n best_block = block \n\n # find the best move to make out of the best block \n # print('best block:'+str(best_block.tiles))\n best_move = (7,-1)\n for tile_i in range(len(best_block.tiles)):\n tile = best_block.tiles[tile_i]\n next_tile = None\n prev_tile = None \n if tile_i+1 in range(len(best_block.tiles)):\n next_tile = best_block.tiles[tile_i+1]\n if tile_i-1 in range(len(best_block.tiles)):\n prev_tile = best_block.tiles[tile_i-1]\n if board.get_tile(tile[0],tile[1]) == BLANK_TILE():\n if prev_tile is not None and next_tile is None:\n if board.get_tile(prev_tile[0],prev_tile[1]) == affinity:\n if tile[0] <= best_move[0]: \n if tile[1] >= tile[1]:\n best_move = tile \n elif next_tile is not None and prev_tile is None:\n if board.get_tile(next_tile[0],next_tile[1]) == affinity:\n if tile[0] <= best_move[0]: \n if tile[1] >= tile[1]:\n best_move = tile \n elif next_tile is not None and prev_tile is not None:\n if board.get_tile(prev_tile[0],prev_tile[1]) == affinity or \\\n board.get_tile(next_tile[0],next_tile[1]) == affinity:\n if tile[0] <= best_move[0]: \n if tile[1] >= tile[1]:\n best_move = tile \n \n return best_move",
"def get_reranker_best(self):\n if len(self.parses):\n return min(self, key=lambda parse: parse.reranker_rank)\n else:\n return None",
"def compareToLocalBest(self):\n if self.functionValue> self.bestXYZ[2]:\n self.bestXYZ[2] = self.functionValue\n self.bestXYZ[0:2] = self.position\n self.bestXYZ = np.array(self.bestXYZ)",
"def best_genome(self) -> Genome:\n return self._candidate",
"def best_match(beer):\n # get a list of donuts that match sugar content for beer\n candidates = get_candidates(beer)\n span = tracer.current_span()\n span.set_tag('donuts.candidates', candidates)\n\n # send the remaining candidates to our taster and pick the best\n max_score = -1\n best_match = None\n\n for candidate in candidates:\n try:\n resp = requests.get(\n \"http://taster:5001/taste\",\n params={\"beer\": beer.name, \"donut\": candidate},\n timeout=2,\n )\n except requests.exceptions.Timeout:\n continue\n\n score = resp.json()[\"score\"]\n if score > max_score:\n max_score = score\n best_match = candidate\n\n return best_match",
"def best_bat(self):\n\n i = 0\n j = 0\n for i in range(self.NP):\n if self.Fitness[i] < self.Fitness[j]:\n j = i\n for i in range(self.D):\n self.best[i] = self.Sol[j][i]\n self.f_min = self.Fitness[j]",
"def get_best_fitness(self):\n f = max(self.characters, key=operator.attrgetter('fitness'))\n self.best_fitness = round(f.fitness, 3)\n self.best_candidate = f",
"def min_worst_case(new_node, old_node):\n return new_node.g_val[1] < old_node.g_val[1]",
"def gbetter(res1,res2):\n \n better = -1 # default unless proven wrong \n dbic = 0 # same to start with\n \n rms1,noise1,par1 = res1.get('rms'),res1.get('noise'),res1.get('par')\n rms2,noise2,par2 = res2.get('rms'),res2.get('noise'),res2.get('par')\n \n # Calculate Bayesian Information Criterion (BIC)\n # lower BICs are better\n bic1 = utils.bayesinfocrit(res1)\n bic2 = utils.bayesinfocrit(res2)\n dbic = bic1-bic2\n\n # Solution 1 is better\n if dbic <= 0:\n better = 0\n # Solution 2 is better\n if dbic > 0 :\n better = 1\n\n return better,dbic\n\n # ---------- OLD CODE, NOT USED ANYMORE ----------\n \n # In case either one is -1 (bad)\n if par1 is not None and par2 is not None:\n if (rms1 == -1) and (rms2 != -1): \n better = 1\n if (rms1 != -1) and (rms2 == -1): \n better = 0 \n if (rms1 == -1) and (rms2 == -1): \n better = -1 \n if (rms1 == -1) or (rms2 == -1): \n return better,dbic\n if (len(par1) < 3) and (len(par2) >= 3): \n better = 1 \n if (len(par2) < 3) and (len(par1) >= 3): \n better = 0 \n if (len(par1) < 3) or (len(par2) < 3): \n return better,dbic\n\n # One is bad, second is better\n if par1 is None:\n return -1,dbic\n \n # Two is bad, first is better \n if par2 is None:\n return -1,dbic\n \n drms1 = rms1-noise1 \n drms2 = rms2-noise2 \n n1 = len(par1)/3 \n n2 = len(par2)/3 \n \n # Clear cut, rms better, n equal or less \n if (drms1 < drms2) and (n1 <= n2): \n better = 0 \n if (drms1 > drms2) and (n1 >= n2): \n better = 1 \n \n # RMS same, N different \n if (drms1 == drms2) and (n1 <= n2): \n better = 0 \n if (drms1 == drms2) and (n1 > n2): \n better = 1 \n \n # Mixed bag, lower RMS but higher N\n if (drms1 < drms2) and (n1 > n2): \n ddrms = drms2-drms1 \n rdrms = ddrms/drms2 # ratio compared to worse one \n dn = n1-n2 \n \n better = 1 # default \n if (dn == 1) and (rdrms > 0.2) : \n better = 0 \n if (dn == 2) and (rdrms > 0.5) : \n better = 0 \n if (dn == 3) and (rdrms > 1.0) : \n better = 0 \n if (dn >= 4) and (rdrms > 2.0) : \n better = 0 \n \n if (drms2 < drms1) and (n2 > n1): \n ddrms = drms1-drms2 \n rdrms = ddrms/drms1 # ratio compared to worse one \n dn = n2-n1 \n \n better = 0 # default \n if (dn == 1) and (rdrms > 0.2) : \n better = 1 \n if (dn == 2) and (rdrms > 0.5) : \n better = 1 \n if (dn == 3) and (rdrms > 1.0) : \n better = 1 \n if (dn >= 4) and (rdrms > 2.0) : \n better = 1 \n \n return better,dbic",
"def check_solution(self, new_protein):\n new_score = new_protein.getscore()\n old_value = self.best_value\n\n if new_score >= old_value:\n self.best_solution = new_protein\n self.best_value = new_score",
"def apply_test_if_better(self):\n\n if self.__genes_test is None or self.__fitness_test is None:\n raise ValueError(\"Test values should not be None.\")\n\n # if test is better\n if self.__fitness_test < self.__fitness:\n self.genes = self.__genes_test\n self.__fitness = self.__fitness_test\n\n self.__genes_test = None\n self.__fitness_test = None\n\n return True\n\n # if original is better\n else:\n self.__genes_test = None\n self.__fitness_test = None\n\n return False",
"def is_better(self, other_model_results):\n buffer_pct = specs.TUNING_BUFFER_PCT\n return self._harvest_stats_dict[buffer_pct].is_better(\n other_model_results._harvest_stats_dict[buffer_pct])",
"def get_best(self):\n scores, ids = self.sort_best()\n return scores[1], ids[1]",
"def it_got_better(new_metric_val, old_metric_val, metric):\n to_maximize = is_metric_to_maximize(metric)\n if to_maximize:\n got_better = new_metric_val > old_metric_val\n else:\n got_better = new_metric_val < old_metric_val\n return got_better",
"def test_close_goes_first_on_loading(self):\n st, frontend_setup = self.get_st_and_fill_frontends()\n closest = np.argmin(frontend_setup['remoteness'])\n len_from_main_st = len(st.get_array(self.run_id, self.target))\n\n for sf_i, sf in enumerate(st.storage):\n st_compare = st.new_context()\n st_compare.storage = [sf]\n len_from_compare = len(st_compare.get_array(self.run_id,\n self.target))\n if sf_i == closest:\n self.assertEqual(len_from_compare, len_from_main_st)\n # else:\n # self.assertNotEqual(len_from_compare, len_from_main_st)",
"def getBestOption(self):\n if len(self.Data) < 1:\n return None\n else:\n bestR = max(self.Data.items(), key=lambda x: x[1]['SPat'].I)\n return bestR[1]",
"def is_new_node_better(self, new_node, old_node, objective):\n switcher = {\n 'min_best_case': self.min_best_case,\n 'min_worst_case': self.min_worst_case\n }\n\n compare_func = switcher[objective]\n return compare_func(new_node, old_node)",
"def _is_better_than(self, metric_name, new, ref):\n if metric_name == f'{self.name}_mean':\n return ref is None or new > ref\n return None",
"def __find_best(self):\n # First look for offensive moves\n for i in range(0, 3):\n col = self.__get_col(i)\n if len(col.get('empty')) == 1:\n if col.get(self.opponent_char) == 2:\n return col.get('empty')[0]\n for i in range(0, 3):\n row = self.__get_row(i)\n if len(row.get('empty')) == 1:\n if row.get(self.opponent_char) == 2:\n return row.get('empty')[0]\n for i in range(0, 2):\n diag = self.__get_diag(i)\n if len(diag.get('empty')) == 1:\n if diag.get(self.opponent_char) == 2:\n return diag.get('empty')[0]\n\n # Then check again looking for defensive moves\n for i in range(0, 3):\n col = self.__get_col(i)\n if len(col.get('empty')) == 1:\n if col.get(self.player_char) == 2:\n return col.get('empty')[0]\n for i in range(0, 3):\n row = self.__get_row(i)\n if len(row.get('empty')) == 1:\n if row.get(self.player_char) == 2:\n return row.get('empty')[0]\n for i in range(0, 2):\n diag = self.__get_diag(i)\n if len(diag.get('empty')) == 1:\n if diag.get(self.player_char) == 2:\n return diag.get('empty')[0]\n\n ##### CLEAN THIS METHOD UP LATER #####\n return None",
"def is_best(self, val) -> bool:\n if self.val is None or (val > self.val):\n self.val = val\n print(\"Updating Best\")\n return True\n else:\n return False",
"def better_sample(self, sample):\n new_logprob_model, new_loglik_data = self._logprob(sample)\n # if there's no best sample recorded yet\n if self.best_sample[0] is None:\n self.best_sample = (sample, new_logprob_model, new_loglik_data)\n self.logprob_model, self.loglik_data = new_logprob_model, new_loglik_data\n logging.info('Initial sample generated, logprob of model: {0}, loglik: {1}'.format(new_logprob_model, new_loglik_data))\n return\n\n # if there's a best sample\n if self.search_data_fit_only:\n better = new_loglik_data - self.best_sample[2]\n else:\n better = new_logprob_model + new_loglik_data - (self.best_sample[1] + self.best_sample[2])\n if better > 0:\n self.no_improv = 0\n self.best_diff.append(better)\n self.logprob_model, self.loglik_data = new_logprob_model, new_loglik_data\n self.best_sample = (copy.deepcopy(sample), new_logprob_model, new_loglik_data)\n logging.info('New best sample found, logprob of model: {0} loglik: {1}'.format(new_logprob_model, new_loglik_data))\n return True\n else:\n self.no_improv += 1\n return False",
"def get_global_best(self, b1, b2):\n if b2[1] < b1[1]:\n return b2\n return b1",
"def check_best(self):\n # Get the most profitable network based on our current data\n new_best = max(self.profit_data.iteritems(),\n key=operator.itemgetter(1))[0]\n\n if self.current_network is None:\n self.logger.info(\n \"No active network, so switching to {} with profit of {:,.4f}\"\n .format(new_best, self.profit_data[new_best]))\n self.next_network = new_best\n self.switch_network()\n return\n\n # If the currently most profitable network is 120% the profitability\n # of what we're mining on, we should switch immediately\n margin_switch = self.config['margin_switch']\n if (margin_switch and\n self.profit_data[self.next_network] >\n (self.profit_data[self.current_network] * margin_switch)):\n self.logger.info(\n \"Network {} {:,.4f} now more profitable than current network \"\n \"{} {:,.4f} by a fair margin. Switching NOW.\"\n .format(new_best, self.profit_data[new_best], self.current_network,\n self.profit_data[self.current_network]))\n self.next_network = new_best\n self.switch_network()\n return\n\n if new_best != self.next_network:\n self.logger.info(\n \"Network {} {:,.4f} now more profitable than current best \"\n \"{} {:,.4f}. Switching on next block from current network {}.\"\n .format(new_best, self.profit_data[new_best], self.next_network,\n self.profit_data[self.next_network], self.current_network))\n self.next_network = new_best\n return\n\n self.logger.debug(\"Network {} {:,.4f} still most profitable\"\n .format(new_best, self.profit_data[new_best]))",
"def find_most_compatible_match(self, candidate):\n best_matchIdx = -1\n best_matchVal = 0\n len_of_match = len(self.match)\n if not candidate.any():\n return None\n for i in candidate:\n if self.W[len_of_match][i] > best_matchVal:\n best_matchVal = self.W[len_of_match][i]\n best_matchIdx = i\n return best_matchIdx"
] |
[
"0.6512735",
"0.5417415",
"0.5378827",
"0.529838",
"0.52264994",
"0.5187538",
"0.51316535",
"0.5130922",
"0.51114696",
"0.50881255",
"0.507212",
"0.50677633",
"0.50491476",
"0.5042516",
"0.5040115",
"0.5025903",
"0.50158477",
"0.5013767",
"0.50123703",
"0.5004463",
"0.49945885",
"0.494152",
"0.4916886",
"0.4898641",
"0.48955983",
"0.48952955",
"0.4891834",
"0.48897728",
"0.4878954",
"0.4874354"
] |
0.6453743
|
1
|
Return the number of unique aligned genomeside positions that confirm the cassetteside position. ("Confirm" means same chromosome, consistent strand, and at most max_distance away)
|
def RISCC_N_confirming_seqs(self, max_distance=MAX_POSITION_DISTANCE):
N = 0
for read_data in self.RISCC_genome_side_aligned_reads.values():
# skip non-aligned reads; check aligned reads for confirming.
try: chrom = read_data[0].chromosome
except AttributeError: continue
if self._if_confirming_read(read_data[0], max_distance):
N += 1
return N
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def RISCC_N_non_confirming_seqs(self, max_distance=MAX_POSITION_DISTANCE):\n return len(self.RISCC_genome_side_aligned_reads) - self.RISCC_N_confirming_seqs(max_distance)",
"def RISCC_N_confirming_reads(self, max_distance=MAX_POSITION_DISTANCE):\n N = 0\n for read_data in self.RISCC_genome_side_aligned_reads.values():\n # skip non-aligned reads; check aligned reads for confirming.\n try: chrom = read_data[0].chromosome\n except AttributeError: continue\n if self._if_confirming_read(read_data[0], max_distance): \n N += read_data[1]\n return N",
"def RISCC_N_non_confirming_reads(self, max_distance=MAX_POSITION_DISTANCE):\n return sum(x[1] for x in self.RISCC_genome_side_aligned_reads.values()) - self.RISCC_N_confirming_reads(max_distance)",
"def RISCC_N_distinct_regions(self, max_distance=MAX_POSITION_DISTANCE):\n # TODO add options for minimum #seqs and #reads to count a region as valid!\n positions_by_chrom_strand = defaultdict(list)\n # add the cassette-side position (single)\n try:\n positions_by_chrom_strand[(self.position.chromosome, self.position.strand)].append(self.position.min_position)\n except AttributeError:\n pass\n # add all the genome-side read positions; skip unaligned ones.\n for read_data in self.RISCC_genome_side_aligned_reads.values():\n pos = read_data[0]\n try: positions_by_chrom_strand[(pos.chromosome, pos.strand)].append(pos.min_position)\n except AttributeError: continue\n # count total number of dictinct regions - different chromosomes or strands, or distance > max_distance\n total_distinct_regions_genome = 0\n total_distinct_regions_cassette = 0\n # for each chromosome, go over all positions and only count ones every MAX_POSITION_DISTANCE as distinct\n for chrom_strand, positions in positions_by_chrom_strand.items():\n positions.sort()\n distinct_regions = [positions[0]]\n for pos in positions[1:]:\n if (pos-distinct_regions[-1]) > max_distance:\n distinct_regions.append(pos)\n if is_cassette_chromosome(chrom_strand[0]): total_distinct_regions_cassette += len(distinct_regions)\n else: total_distinct_regions_genome += len(distinct_regions)\n return total_distinct_regions_genome, total_distinct_regions_cassette",
"def RISCC_max_confirmed_distance(self, max_distance=MAX_POSITION_DISTANCE):\n distances = []\n if self.RISCC_N_confirming_seqs(max_distance) + self.RISCC_N_non_confirming_seqs(max_distance) == 0:\n return float('NaN')\n for RISCC_read_data in self.RISCC_genome_side_aligned_reads.values():\n # Only look at the genome-side reads that match the cassette-side read position!\n # There's a try/except because unaligned reads don't have proper positions.\n try: \n if (RISCC_read_data[0].chromosome == self.position.chromosome \n and RISCC_read_data[0].strand == self.position.strand):\n pos_difference = abs(RISCC_read_data[0].min_position - self.position.min_position)\n else:\n continue\n except AttributeError: \n continue\n if pos_difference <= max_distance:\n distances.append(pos_difference)\n try:\n return max(distances)\n except ValueError:\n return 0\n # this is basically unit-tested by the tests for add_RISCC_read and improve_best_RISCC_read",
"def _getNumberOfAlignedNucleotides(self, dominant, subdominant):\n\t\tnumAligned = 0\n\t\tfor i in range(len(dominant)):\n\t\t\tif dominant[i] is not \" \" and subdominant[i] is not \" \":\n\t\t\t\tif dominant[i] == subdominant[i]:\n\t\t\t\t\tnumAligned += 1\n\t\treturn numAligned",
"def get_number_of_atoms_to_optimize(self):\n v = self.c.get(simulation_cell=True)\n return len(v.data.stoichiometry)",
"def decide_and_check_position(self, max_allowed_dist=0, ratio_to_ignore=100, OUTPUT=None):\n if not self.sequences_counts_positions_errors:\n self.position = SPECIAL_POSITIONS.unknown\n return\n def _order(arg):\n s, (c, p, e) = arg\n return (p in SPECIAL_POSITIONS.all_undefined, -c, e, s)\n main_seq, (main_count, main_pos, main_Nerr) = min(self.sequences_counts_positions_errors.items(), key = _order)\n self.position = main_pos\n for seq, (count, pos, N_err) in self.sequences_counts_positions_errors.items():\n if pos not in SPECIAL_POSITIONS.all_undefined:\n if not get_position_distance(main_pos, pos, ignore_strand=False) <= max_allowed_dist:\n if count*ratio_to_ignore <= main_count:\n # TODO removing these reads is a problem, because we don't remove the genome-side reads!\n del self.sequences_counts_positions_errors[seq]\n self.total_read_count -= count\n if N_err==0: self.perfect_read_count -= count\n else:\n if OUTPUT is not None:\n OUTPUT.write(\"Warning: Different cassette-side position in same mutant! REMOVING MUTANT. IB %s,\"%self.IB \n +\" %s %s %serr %s reads, %s %s %serr %s reads\\n\"%(main_pos, main_seq, main_Nerr, main_count, \n pos, seq, N_err, count))\n return True",
"def position_counter(strains):\n with database.make_connection() as connection:\n pos = []\n for strain in strains:\n # Get every variant position\n cursor = r.table(TABLE).filter({'StrainID': strain}).pluck(\n 'Position').run(connection)\n cur = [strain['Position'] for strain in cursor]\n pos = pos+cur\n common = filter_counts(pos, len(strains))\n return common",
"def RISCC_percent_confirming_seqs(self, max_distance=MAX_POSITION_DISTANCE, round_to_int=False):\n if not self.RISCC_N_aligned_seqs: return float('nan')\n else: \n percent = self.RISCC_N_confirming_seqs(max_distance) / self.RISCC_N_aligned_seqs * 100\n if round_to_int: percent = int(round(percent))\n return percent",
"def number_of_atoms_within_radius(self, distance_cutoff):\n n_atoms = 0\n atom_ids = []\n for contact in self.nearby_atoms:\n other_id = contact.atom_id_no_altloc()\n if (not other_id in atom_ids):\n if (contact.distance() < distance_cutoff):\n n_atoms += 1\n atom_ids.append(other_id) # check for alt confs.\n return n_atoms",
"def n_charged_atoms(mol: Mol) -> int:\n return sum([at.GetFormalCharge() != 0 for at in mol.GetAtoms()])",
"def get_num_mismatches(sequence, ref_genome, position):\n characters = list(sequence)\n num_mismatches = 0\n for i in range(0, len(characters)):\n if position + i >= len(ref_genome):\n break\n if characters[i] != ref_genome[position + i]:\n num_mismatches += 1\n\n return num_mismatches",
"def query_coverage(self):\n s = self.query_aln.replace(\"=\", \"\")\n return len(s)",
"def looks_like_nucleotide_phosphate_site(self,\n min_phosphate_oxygen_atoms = 2,\n distance_cutoff = 2.5) : # XXX wild guess\n n_phosphate_oxygens = 0\n for contact in self.nearby_atoms :\n atom_name = contact.atom_name()\n if (len(atom_name) < 3) or (contact.element not in [\"O\"]):\n continue\n if ((atom_name[0:2] in [\"O1\",\"O2\",\"O3\"]) and\n (atom_name[2] in [\"A\",\"B\",\"G\"])):\n if (contact.distance() <= distance_cutoff):\n n_phosphate_oxygens += 1\n return (n_phosphate_oxygens == min_phosphate_oxygen_atoms)",
"def count_suboptimal_atom_positions(self, lowerBound, upperBound):\n counter = 0\n for i in range(self.conformer.GetNumAtoms()):\n center = self.conformer.GetAtomPosition(i)\n point = [center.x, center.y, center.z]\n surroundingLow = self.kd_tree.query_ball_point(point, lowerBound)\n surroundingHigh = self.kd_tree.query_ball_point(point, upperBound)\n\n if len(surroundingHigh) - len(surroundingLow) > 0:\n counter += 1\n\n return counter / 2",
"def countingPointMutations(seq1, seq2):\n seqLength = len(list(seq1))\n \n hammingDistance=0;\n for i in range(0,seqLength):\n if list(seq1)[i]!=list(seq2)[i]:\n hammingDistance = hammingDistance+1;\n return hammingDistance",
"def hit_coverage(self):\n s = self.hit_aln.replace(\"=\", \"\")\n return len(s)",
"def distance(base_strand, comparison_strand):\n hamming_distance = 0\n\n for nucleotide in range(len(base_strand)):\n if base_strand[nucleotide] != comparison_strand[nucleotide]:\n hamming_distance += 1\n \n return hamming_distance",
"def N(self):\n return len(self.cavity_grid.cavities) + 1",
"def heuristic_misplaced(self):\n misplaced = 0\n\n for i in range(self.PUZZLE_NUM_ROWS):\n for j in range(self.PUZZLE_NUM_COLUMNS):\n if self.position[i][j] != self.PUZZLE_END_POSITION[i][j]:\n misplaced += 1\n\n return misplaced",
"def get_identity(alignment_list):\n size_alignement = len(alignment_list[1])\n identical_nucleotide = 0\n for i in range(len(alignment_list[0])):\n if alignment_list[0][i] == alignment_list[1][i]:\n identical_nucleotide += 1\n\n return (identical_nucleotide / size_alignement) * 100",
"def verifyDistinct( options, data ):\n tot = 0\n for c in data.chrNames:\n s = set()\n d = mafDataOrNone( data.mafBlocksByChrom, c )\n if d is None:\n continue\n for mb in d:\n for i in xrange( mb.refStart, mb.refEnd + 1):\n if i in s:\n sys.stderr.write('duplicate base found! %s %d [%d-%d], %s [%d-%d]\\n'\n % (mb.refChr, i, mb.refStart, mb.refEnd, \n mb.pairChr, mb.pairStart, mb.pairEnd ))\n sys.exit( 1 )\n else:\n s.add( i )\n tot += len( s )\n sys.stderr.write( 'Verify all bases sent to be binned are distinct: Found %s distinct bases in the alignment to the reference genome, no duplicates, OK.\\n' % tot)",
"def count_total_mutations(seqs, database):\n total = 0\n for seq in seqs:\n total += count_minimum_mutations(seq, database)\n return total",
"def get_corrected_index(seq,\n aligned_index):\n \n # Counts the number of nucleotides in aligned sequence, returns\n # count of nucleotides occuring before aligned index reached\n slice_seq=seq[0:aligned_index]\n # If different gap characters used, may need to modify this\n # In current form, it is optimized for speed\n corrected_index=\\\n aligned_index - (slice_seq.count(\"-\") + slice_seq.count(\".\"))\n \n\n \n return corrected_index",
"def _count_concordant_pairs(preds: Tensor, target: Tensor) ->Tensor:\n return torch.cat([_concordant_element_sum(preds, target, i) for i in range(preds.shape[0])]).sum(0)",
"def count_nucleic_acids(self):\n n = 0\n for chain in self.iter_chains():\n n += chain.count_nucleic_acids()\n return n",
"def distance(individual):\n count=0\n val=0\n for i in individual:\n if viajes[val][6]==True and False==vehiculos_esp[i]:\n count+=1\n val+=1\n return count",
"def brute_force_search_solution():\n return len(coin_search(TOTAL, COINS))",
"def num_mutations(self):\n return len(self.fuzz_library)"
] |
[
"0.7093234",
"0.7000722",
"0.6597465",
"0.64881676",
"0.6190726",
"0.61127645",
"0.61078817",
"0.5944024",
"0.59060484",
"0.5810861",
"0.5786735",
"0.57316",
"0.5727625",
"0.56644326",
"0.5658276",
"0.55661505",
"0.5564351",
"0.55567706",
"0.5492828",
"0.54168594",
"0.5399307",
"0.5361001",
"0.5360026",
"0.5353898",
"0.5351313",
"0.5347649",
"0.53475785",
"0.53190154",
"0.5306748",
"0.5303747"
] |
0.74679756
|
0
|
Return the number of unique aligned genomeside positions that DON'T confirm the cassetteside position. ("Confirm" means same chromosome, consistent strand, and at most max_distance away)
|
def RISCC_N_non_confirming_seqs(self, max_distance=MAX_POSITION_DISTANCE):
return len(self.RISCC_genome_side_aligned_reads) - self.RISCC_N_confirming_seqs(max_distance)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def RISCC_N_confirming_seqs(self, max_distance=MAX_POSITION_DISTANCE):\n N = 0\n for read_data in self.RISCC_genome_side_aligned_reads.values():\n # skip non-aligned reads; check aligned reads for confirming.\n try: chrom = read_data[0].chromosome\n except AttributeError: continue\n if self._if_confirming_read(read_data[0], max_distance): \n N += 1\n return N",
"def RISCC_N_non_confirming_reads(self, max_distance=MAX_POSITION_DISTANCE):\n return sum(x[1] for x in self.RISCC_genome_side_aligned_reads.values()) - self.RISCC_N_confirming_reads(max_distance)",
"def RISCC_N_confirming_reads(self, max_distance=MAX_POSITION_DISTANCE):\n N = 0\n for read_data in self.RISCC_genome_side_aligned_reads.values():\n # skip non-aligned reads; check aligned reads for confirming.\n try: chrom = read_data[0].chromosome\n except AttributeError: continue\n if self._if_confirming_read(read_data[0], max_distance): \n N += read_data[1]\n return N",
"def decide_and_check_position(self, max_allowed_dist=0, ratio_to_ignore=100, OUTPUT=None):\n if not self.sequences_counts_positions_errors:\n self.position = SPECIAL_POSITIONS.unknown\n return\n def _order(arg):\n s, (c, p, e) = arg\n return (p in SPECIAL_POSITIONS.all_undefined, -c, e, s)\n main_seq, (main_count, main_pos, main_Nerr) = min(self.sequences_counts_positions_errors.items(), key = _order)\n self.position = main_pos\n for seq, (count, pos, N_err) in self.sequences_counts_positions_errors.items():\n if pos not in SPECIAL_POSITIONS.all_undefined:\n if not get_position_distance(main_pos, pos, ignore_strand=False) <= max_allowed_dist:\n if count*ratio_to_ignore <= main_count:\n # TODO removing these reads is a problem, because we don't remove the genome-side reads!\n del self.sequences_counts_positions_errors[seq]\n self.total_read_count -= count\n if N_err==0: self.perfect_read_count -= count\n else:\n if OUTPUT is not None:\n OUTPUT.write(\"Warning: Different cassette-side position in same mutant! REMOVING MUTANT. IB %s,\"%self.IB \n +\" %s %s %serr %s reads, %s %s %serr %s reads\\n\"%(main_pos, main_seq, main_Nerr, main_count, \n pos, seq, N_err, count))\n return True",
"def RISCC_N_distinct_regions(self, max_distance=MAX_POSITION_DISTANCE):\n # TODO add options for minimum #seqs and #reads to count a region as valid!\n positions_by_chrom_strand = defaultdict(list)\n # add the cassette-side position (single)\n try:\n positions_by_chrom_strand[(self.position.chromosome, self.position.strand)].append(self.position.min_position)\n except AttributeError:\n pass\n # add all the genome-side read positions; skip unaligned ones.\n for read_data in self.RISCC_genome_side_aligned_reads.values():\n pos = read_data[0]\n try: positions_by_chrom_strand[(pos.chromosome, pos.strand)].append(pos.min_position)\n except AttributeError: continue\n # count total number of dictinct regions - different chromosomes or strands, or distance > max_distance\n total_distinct_regions_genome = 0\n total_distinct_regions_cassette = 0\n # for each chromosome, go over all positions and only count ones every MAX_POSITION_DISTANCE as distinct\n for chrom_strand, positions in positions_by_chrom_strand.items():\n positions.sort()\n distinct_regions = [positions[0]]\n for pos in positions[1:]:\n if (pos-distinct_regions[-1]) > max_distance:\n distinct_regions.append(pos)\n if is_cassette_chromosome(chrom_strand[0]): total_distinct_regions_cassette += len(distinct_regions)\n else: total_distinct_regions_genome += len(distinct_regions)\n return total_distinct_regions_genome, total_distinct_regions_cassette",
"def RISCC_max_confirmed_distance(self, max_distance=MAX_POSITION_DISTANCE):\n distances = []\n if self.RISCC_N_confirming_seqs(max_distance) + self.RISCC_N_non_confirming_seqs(max_distance) == 0:\n return float('NaN')\n for RISCC_read_data in self.RISCC_genome_side_aligned_reads.values():\n # Only look at the genome-side reads that match the cassette-side read position!\n # There's a try/except because unaligned reads don't have proper positions.\n try: \n if (RISCC_read_data[0].chromosome == self.position.chromosome \n and RISCC_read_data[0].strand == self.position.strand):\n pos_difference = abs(RISCC_read_data[0].min_position - self.position.min_position)\n else:\n continue\n except AttributeError: \n continue\n if pos_difference <= max_distance:\n distances.append(pos_difference)\n try:\n return max(distances)\n except ValueError:\n return 0\n # this is basically unit-tested by the tests for add_RISCC_read and improve_best_RISCC_read",
"def get_number_of_atoms_to_optimize(self):\n v = self.c.get(simulation_cell=True)\n return len(v.data.stoichiometry)",
"def looks_like_nucleotide_phosphate_site(self,\n min_phosphate_oxygen_atoms = 2,\n distance_cutoff = 2.5) : # XXX wild guess\n n_phosphate_oxygens = 0\n for contact in self.nearby_atoms :\n atom_name = contact.atom_name()\n if (len(atom_name) < 3) or (contact.element not in [\"O\"]):\n continue\n if ((atom_name[0:2] in [\"O1\",\"O2\",\"O3\"]) and\n (atom_name[2] in [\"A\",\"B\",\"G\"])):\n if (contact.distance() <= distance_cutoff):\n n_phosphate_oxygens += 1\n return (n_phosphate_oxygens == min_phosphate_oxygen_atoms)",
"def n_charged_atoms(mol: Mol) -> int:\n return sum([at.GetFormalCharge() != 0 for at in mol.GetAtoms()])",
"def _getNumberOfAlignedNucleotides(self, dominant, subdominant):\n\t\tnumAligned = 0\n\t\tfor i in range(len(dominant)):\n\t\t\tif dominant[i] is not \" \" and subdominant[i] is not \" \":\n\t\t\t\tif dominant[i] == subdominant[i]:\n\t\t\t\t\tnumAligned += 1\n\t\treturn numAligned",
"def get_num_mismatches(sequence, ref_genome, position):\n characters = list(sequence)\n num_mismatches = 0\n for i in range(0, len(characters)):\n if position + i >= len(ref_genome):\n break\n if characters[i] != ref_genome[position + i]:\n num_mismatches += 1\n\n return num_mismatches",
"def number_of_atoms_within_radius(self, distance_cutoff):\n n_atoms = 0\n atom_ids = []\n for contact in self.nearby_atoms:\n other_id = contact.atom_id_no_altloc()\n if (not other_id in atom_ids):\n if (contact.distance() < distance_cutoff):\n n_atoms += 1\n atom_ids.append(other_id) # check for alt confs.\n return n_atoms",
"def count_suboptimal_atom_positions(self, lowerBound, upperBound):\n counter = 0\n for i in range(self.conformer.GetNumAtoms()):\n center = self.conformer.GetAtomPosition(i)\n point = [center.x, center.y, center.z]\n surroundingLow = self.kd_tree.query_ball_point(point, lowerBound)\n surroundingHigh = self.kd_tree.query_ball_point(point, upperBound)\n\n if len(surroundingHigh) - len(surroundingLow) > 0:\n counter += 1\n\n return counter / 2",
"def heuristic_misplaced(self):\n misplaced = 0\n\n for i in range(self.PUZZLE_NUM_ROWS):\n for j in range(self.PUZZLE_NUM_COLUMNS):\n if self.position[i][j] != self.PUZZLE_END_POSITION[i][j]:\n misplaced += 1\n\n return misplaced",
"def verifyDistinct( options, data ):\n tot = 0\n for c in data.chrNames:\n s = set()\n d = mafDataOrNone( data.mafBlocksByChrom, c )\n if d is None:\n continue\n for mb in d:\n for i in xrange( mb.refStart, mb.refEnd + 1):\n if i in s:\n sys.stderr.write('duplicate base found! %s %d [%d-%d], %s [%d-%d]\\n'\n % (mb.refChr, i, mb.refStart, mb.refEnd, \n mb.pairChr, mb.pairStart, mb.pairEnd ))\n sys.exit( 1 )\n else:\n s.add( i )\n tot += len( s )\n sys.stderr.write( 'Verify all bases sent to be binned are distinct: Found %s distinct bases in the alignment to the reference genome, no duplicates, OK.\\n' % tot)",
"def check_min_guide_pairs(df, min_pairs):\n pair_count = (df[['anchor_guide', 'target_guide']]\n .drop_duplicates()\n .groupby('anchor_guide')\n .apply(lambda d: d.shape[0])\n .reset_index(name='n'))\n guides_no_pairs = pair_count.anchor_guide[~(pair_count.n >= min_pairs)].to_list()\n return guides_no_pairs",
"def RISCC_percent_confirming_seqs(self, max_distance=MAX_POSITION_DISTANCE, round_to_int=False):\n if not self.RISCC_N_aligned_seqs: return float('nan')\n else: \n percent = self.RISCC_N_confirming_seqs(max_distance) / self.RISCC_N_aligned_seqs * 100\n if round_to_int: percent = int(round(percent))\n return percent",
"def position_counter(strains):\n with database.make_connection() as connection:\n pos = []\n for strain in strains:\n # Get every variant position\n cursor = r.table(TABLE).filter({'StrainID': strain}).pluck(\n 'Position').run(connection)\n cur = [strain['Position'] for strain in cursor]\n pos = pos+cur\n common = filter_counts(pos, len(strains))\n return common",
"def get_del_pos(genome):\n start_pos = random.randint(100,len(genome.seq)-5100) # positions 100bp from start or end will not be variable\n end_pos = start_pos + random.randint(100,5000)\n unavail = False\n for n in range(start_pos, end_pos):\n if n in genome.unavail_pos:\n unavail = True\n break\n if unavail:\n start_pos, end_pos = get_del_pos(genome)\n return (start_pos, end_pos)",
"def mutants_in_chromosome(self, chromosome):\n return sum(1 for m in self.dataset if m.read_info(self.dataset_name).total_read_count \n and m.position not in SPECIAL_POSITIONS.all_undefined and m.position.chromosome==chromosome)",
"def query_coverage(self):\n s = self.query_aln.replace(\"=\", \"\")\n return len(s)",
"def countingPointMutations(seq1, seq2):\n seqLength = len(list(seq1))\n \n hammingDistance=0;\n for i in range(0,seqLength):\n if list(seq1)[i]!=list(seq2)[i]:\n hammingDistance = hammingDistance+1;\n return hammingDistance",
"def get_corrected_index(seq,\n aligned_index):\n \n # Counts the number of nucleotides in aligned sequence, returns\n # count of nucleotides occuring before aligned index reached\n slice_seq=seq[0:aligned_index]\n # If different gap characters used, may need to modify this\n # In current form, it is optimized for speed\n corrected_index=\\\n aligned_index - (slice_seq.count(\"-\") + slice_seq.count(\".\"))\n \n\n \n return corrected_index",
"def nashaat_strategy(self):\n\n # Disagreement factor, buckets with labeling function conflicts\n has_conflicts = ~(\n (\n self.unique_combs.sum(axis=1) == 0\n ) | (\n self.unique_combs.sum(axis=1) == self.unique_combs.shape[1]\n )\n )\n bucket_margins = self.margin(self.bucket_probs)\n\n # Select buckets with highest uncertainty and disagreeing weak labels\n return np.where(\n np.logical_and.reduce(\n (\n bucket_margins == np.min(\n bucket_margins[self.is_valid_bucket & has_conflicts]),\n self.is_valid_bucket, has_conflicts\n )\n )\n )[0]",
"def depiction_score(self):\n\n collision_penalty = 1\n degenerated_penalty = 0.4\n\n bond_collisions = self.count_bond_collisions()\n degenerated_atoms = self.count_suboptimal_atom_positions(0.0, 0.5)\n\n score = (\n collision_penalty * bond_collisions\n + degenerated_penalty * degenerated_atoms\n )\n\n return round(score, 1)",
"def distance(base_strand, comparison_strand):\n hamming_distance = 0\n\n for nucleotide in range(len(base_strand)):\n if base_strand[nucleotide] != comparison_strand[nucleotide]:\n hamming_distance += 1\n \n return hamming_distance",
"def purgeHis(atoms):\n for a in atoms:\n if getAtype(a) == \"N\" or getAtype(a) == \"NA\":\n found = 0\n for c in atoms:\n if not c == a and dist(c,a) < COVALENT_BOND_DIST:\n found = 1\n break\n if not found:\n atoms.remove(a)\n return atoms\n if DEBUG: print \"Warning! Residue %s appears to be incomplete\" % (atoms[0][17:20]+atoms[0][22:26]+atoms[0][21])\n return False",
"def matchPos(self, allAtoms, pbcCount, forbiddenPosList, forbiddenAtomsList):\n length = len(allAtoms) / pbcCount\n for atomIndex in range(len(allAtoms)):\n atom = allAtoms[atomIndex]\n if atomIndex in forbiddenAtomsList:\n self._places[atomIndex] = (-1)\n self._distances[atomIndex] = (-1000)\n else:\n if self._places[atomIndex] in forbiddenPosList or self._places[atomIndex] == -1:\n self._places[atomIndex] = (self.findPlace(atom, self.lattice\n , forbiddenPosList))\n if not self._places[atomIndex] is None:\n self._distances[atomIndex] = distance(\n self.lattice.positions[self._places[atomIndex]].x0\n , [atom.x, atom.y])\n \n \n minDists = [min([self._distances[i + n * length] for n in range(pbcCount)]) for i in range(length)]\n posMinDists = [posmin([self._distances[i + n * length] for n in range(pbcCount)]) for i in range(length)]\n indexOfAtom = posmax(minDists)\n indexOfAtom += posMinDists[indexOfAtom] * length \n return (indexOfAtom, self._places[indexOfAtom])",
"def hit_coverage(self):\n s = self.hit_aln.replace(\"=\", \"\")\n return len(s)",
"def get_clashes (fixed_struc, moving_struc, minimum_clash_distance):\n clash_set=set()\n \n NS = Bio.PDB.NeighborSearch(list(fixed_struc.get_atoms()))\n clashes =0\n for atoms in moving_struc.get_atoms():\n close = NS.search(atoms.get_coord(), minimum_clash_distance)\n if len(close) > 0:\n for item in close:\n clash_set.add(item.get_parent().get_parent().id)\n clashes +=1\n return clashes"
] |
[
"0.71052176",
"0.6717986",
"0.6702725",
"0.6263347",
"0.60687333",
"0.6005679",
"0.59082055",
"0.5855646",
"0.58537626",
"0.58399194",
"0.56932026",
"0.56851655",
"0.56245494",
"0.56210035",
"0.54615045",
"0.5412676",
"0.5403656",
"0.53759927",
"0.53530586",
"0.5343061",
"0.52985716",
"0.5296417",
"0.5249578",
"0.5248789",
"0.5246178",
"0.5230149",
"0.52209496",
"0.52071226",
"0.5195341",
"0.51916134"
] |
0.7175001
|
0
|
Return the number of aligned genomeside READS that confirm the cassetteside position. ("Confirm" means same chromosome, consistent strand, and at most max_distance away)
|
def RISCC_N_confirming_reads(self, max_distance=MAX_POSITION_DISTANCE):
N = 0
for read_data in self.RISCC_genome_side_aligned_reads.values():
# skip non-aligned reads; check aligned reads for confirming.
try: chrom = read_data[0].chromosome
except AttributeError: continue
if self._if_confirming_read(read_data[0], max_distance):
N += read_data[1]
return N
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def RISCC_N_confirming_seqs(self, max_distance=MAX_POSITION_DISTANCE):\n N = 0\n for read_data in self.RISCC_genome_side_aligned_reads.values():\n # skip non-aligned reads; check aligned reads for confirming.\n try: chrom = read_data[0].chromosome\n except AttributeError: continue\n if self._if_confirming_read(read_data[0], max_distance): \n N += 1\n return N",
"def RISCC_N_non_confirming_seqs(self, max_distance=MAX_POSITION_DISTANCE):\n return len(self.RISCC_genome_side_aligned_reads) - self.RISCC_N_confirming_seqs(max_distance)",
"def RISCC_N_non_confirming_reads(self, max_distance=MAX_POSITION_DISTANCE):\n return sum(x[1] for x in self.RISCC_genome_side_aligned_reads.values()) - self.RISCC_N_confirming_reads(max_distance)",
"def RISCC_max_confirmed_distance(self, max_distance=MAX_POSITION_DISTANCE):\n distances = []\n if self.RISCC_N_confirming_seqs(max_distance) + self.RISCC_N_non_confirming_seqs(max_distance) == 0:\n return float('NaN')\n for RISCC_read_data in self.RISCC_genome_side_aligned_reads.values():\n # Only look at the genome-side reads that match the cassette-side read position!\n # There's a try/except because unaligned reads don't have proper positions.\n try: \n if (RISCC_read_data[0].chromosome == self.position.chromosome \n and RISCC_read_data[0].strand == self.position.strand):\n pos_difference = abs(RISCC_read_data[0].min_position - self.position.min_position)\n else:\n continue\n except AttributeError: \n continue\n if pos_difference <= max_distance:\n distances.append(pos_difference)\n try:\n return max(distances)\n except ValueError:\n return 0\n # this is basically unit-tested by the tests for add_RISCC_read and improve_best_RISCC_read",
"def RISCC_percent_confirming_reads(self, max_distance=MAX_POSITION_DISTANCE, round_to_int=False):\n if not self.RISCC_N_aligned_reads: return float('nan')\n else:\n percent = self.RISCC_N_confirming_reads(max_distance) / self.RISCC_N_aligned_reads * 100\n if round_to_int: percent = int(round(percent))\n return percent",
"def RISCC_percent_confirming_seqs(self, max_distance=MAX_POSITION_DISTANCE, round_to_int=False):\n if not self.RISCC_N_aligned_seqs: return float('nan')\n else: \n percent = self.RISCC_N_confirming_seqs(max_distance) / self.RISCC_N_aligned_seqs * 100\n if round_to_int: percent = int(round(percent))\n return percent",
"def RISCC_N_distinct_regions(self, max_distance=MAX_POSITION_DISTANCE):\n # TODO add options for minimum #seqs and #reads to count a region as valid!\n positions_by_chrom_strand = defaultdict(list)\n # add the cassette-side position (single)\n try:\n positions_by_chrom_strand[(self.position.chromosome, self.position.strand)].append(self.position.min_position)\n except AttributeError:\n pass\n # add all the genome-side read positions; skip unaligned ones.\n for read_data in self.RISCC_genome_side_aligned_reads.values():\n pos = read_data[0]\n try: positions_by_chrom_strand[(pos.chromosome, pos.strand)].append(pos.min_position)\n except AttributeError: continue\n # count total number of dictinct regions - different chromosomes or strands, or distance > max_distance\n total_distinct_regions_genome = 0\n total_distinct_regions_cassette = 0\n # for each chromosome, go over all positions and only count ones every MAX_POSITION_DISTANCE as distinct\n for chrom_strand, positions in positions_by_chrom_strand.items():\n positions.sort()\n distinct_regions = [positions[0]]\n for pos in positions[1:]:\n if (pos-distinct_regions[-1]) > max_distance:\n distinct_regions.append(pos)\n if is_cassette_chromosome(chrom_strand[0]): total_distinct_regions_cassette += len(distinct_regions)\n else: total_distinct_regions_genome += len(distinct_regions)\n return total_distinct_regions_genome, total_distinct_regions_cassette",
"def query_coverage(self):\n s = self.query_aln.replace(\"=\", \"\")\n return len(s)",
"def get_number_of_atoms_to_optimize(self):\n v = self.c.get(simulation_cell=True)\n return len(v.data.stoichiometry)",
"def decide_and_check_position(self, max_allowed_dist=0, ratio_to_ignore=100, OUTPUT=None):\n if not self.sequences_counts_positions_errors:\n self.position = SPECIAL_POSITIONS.unknown\n return\n def _order(arg):\n s, (c, p, e) = arg\n return (p in SPECIAL_POSITIONS.all_undefined, -c, e, s)\n main_seq, (main_count, main_pos, main_Nerr) = min(self.sequences_counts_positions_errors.items(), key = _order)\n self.position = main_pos\n for seq, (count, pos, N_err) in self.sequences_counts_positions_errors.items():\n if pos not in SPECIAL_POSITIONS.all_undefined:\n if not get_position_distance(main_pos, pos, ignore_strand=False) <= max_allowed_dist:\n if count*ratio_to_ignore <= main_count:\n # TODO removing these reads is a problem, because we don't remove the genome-side reads!\n del self.sequences_counts_positions_errors[seq]\n self.total_read_count -= count\n if N_err==0: self.perfect_read_count -= count\n else:\n if OUTPUT is not None:\n OUTPUT.write(\"Warning: Different cassette-side position in same mutant! REMOVING MUTANT. IB %s,\"%self.IB \n +\" %s %s %serr %s reads, %s %s %serr %s reads\\n\"%(main_pos, main_seq, main_Nerr, main_count, \n pos, seq, N_err, count))\n return True",
"def get_num_mismatches(sequence, ref_genome, position):\n characters = list(sequence)\n num_mismatches = 0\n for i in range(0, len(characters)):\n if position + i >= len(ref_genome):\n break\n if characters[i] != ref_genome[position + i]:\n num_mismatches += 1\n\n return num_mismatches",
"def _getNumberOfAlignedNucleotides(self, dominant, subdominant):\n\t\tnumAligned = 0\n\t\tfor i in range(len(dominant)):\n\t\t\tif dominant[i] is not \" \" and subdominant[i] is not \" \":\n\t\t\t\tif dominant[i] == subdominant[i]:\n\t\t\t\t\tnumAligned += 1\n\t\treturn numAligned",
"def number_of_atoms_within_radius(self, distance_cutoff):\n n_atoms = 0\n atom_ids = []\n for contact in self.nearby_atoms:\n other_id = contact.atom_id_no_altloc()\n if (not other_id in atom_ids):\n if (contact.distance() < distance_cutoff):\n n_atoms += 1\n atom_ids.append(other_id) # check for alt confs.\n return n_atoms",
"def hit_coverage(self):\n s = self.hit_aln.replace(\"=\", \"\")\n return len(s)",
"def carn_count(self):\n return len(self.carnivores)",
"def count_matches(sam_input):\n logging.info(\"Counting aligned bases in %s ...\", sam_input.name)\n\n total_bases = 0\n with pysam.AlignmentFile(sam_input, \"r\") as sam:\n for read in sam:\n total_bases += aligned_bases(read.cigar)\n return total_bases",
"def position_counter(strains):\n with database.make_connection() as connection:\n pos = []\n for strain in strains:\n # Get every variant position\n cursor = r.table(TABLE).filter({'StrainID': strain}).pluck(\n 'Position').run(connection)\n cur = [strain['Position'] for strain in cursor]\n pos = pos+cur\n common = filter_counts(pos, len(strains))\n return common",
"def n_charged_atoms(mol: Mol) -> int:\n return sum([at.GetFormalCharge() != 0 for at in mol.GetAtoms()])",
"def get_number_seqs_for_primer(percent_match,\n seq_count):\n \n total_seq_use=int((1-percent_match)*seq_count)\n \n return total_seq_use",
"def _decide_if_replace_read(self, new_position, max_distance):\n # if there are no current reads, add new one\n if not len(self.RISCC_genome_side_aligned_reads): return True\n # if new read isn't \"confirming\", it can't be better\n if not self._if_confirming_read(new_position, max_distance): return False\n # if the new one is \"confirming\" and the old one isn't, new one has to be better\n old_position = self.RISCC_genome_side_aligned_reads.values()[0][0]\n if not self._if_confirming_read(old_position, max_distance): return True\n # if both the old and new position meet the basic conditions, pick the highest-distance one\n # TODO what about directionality and weird cases?\n new_dist = abs(new_position.min_position - self.position.min_position)\n old_dist = abs(old_position.min_position - self.position.min_position)\n if new_dist > old_dist: return True\n else: return False",
"def number_of_favored_ligand_residues(self, ion_params, distance = 3.0,\n exclude_atoms = ()):\n n_res = 0\n resids = []\n for contact in self.nearby_atoms:\n if (contact.atom_name() in exclude_atoms):\n continue\n if (contact.distance() < distance):\n labels = contact.atom.fetch_labels()\n other_resname = contact.resname()\n other_resid = labels.chain_id + labels.resid()\n if ((ion_params.allowed_coordinating_residues is not None) and\n (other_resname in ion_params.allowed_coordinating_residues) and\n (not other_resid in resids)):\n n_res += 1\n resids.append(other_resid)\n return n_res",
"def get_num_strains():\n strains = get_required_strains(None)\n strain_count = len(strains)\n with database.make_connection() as connection:\n # In case reference is included in run\n # Supports current reference\n ref_id = get_current_reference_id()\n for e in strains:\n if e.find(ref_id) != -1:\n strain_count = strain_count-1\n break\n return strain_count",
"def check_seqs(fasta_out, fasta_files, starting_ix, valid_map, qual_mappings,\r\n filters, barcode_len, keep_primer, keep_barcode, barcode_type,\r\n max_bc_errors, retain_unassigned_reads, attempt_bc_correction,\r\n primer_seqs_lens, all_primers, max_primer_mm, disable_primer_check,\r\n reverse_primers, rev_primers, qual_out, qual_score_window=0,\r\n discard_bad_windows=False, min_qual_score=25, min_seq_len=200,\r\n median_length_filtering=None, added_demultiplex_field=None,\r\n reverse_primer_mismatches=0, truncate_ambi_bases=False):\r\n\r\n seq_lengths = {}\r\n\r\n # Record complete barcode + primer + sequence lengths\r\n raw_seq_lengths = {}\r\n # Record sequence lengths after all optional removal of components\r\n final_seq_lengths = {}\r\n\r\n bc_counts = defaultdict(list)\r\n curr_ix = starting_ix\r\n corr_ct = 0 # count of corrected barcodes\r\n\r\n # get the list of barcode lengths in reverse order\r\n barcode_length_order =\\\r\n sorted(set([len(bc.split(',')[0]) for bc in valid_map]))\r\n barcode_length_order = barcode_length_order[::-1]\r\n\r\n primer_mismatch_count = 0\r\n all_primers_lens = sorted(set(all_primers.values()))\r\n\r\n reverse_primer_not_found = 0\r\n\r\n sliding_window_failed = 0\r\n trunc_ambi_base_counts = 0\r\n\r\n below_seq_min_after_trunc = 0\r\n below_seq_min_after_ambi_trunc = 0\r\n\r\n for fasta_in in fasta_files:\r\n for curr_id, curr_seq in parse_fasta(fasta_in):\r\n curr_rid = curr_id.split()[0]\r\n curr_seq = upper(curr_seq)\r\n\r\n curr_len = len(curr_seq)\r\n curr_qual = qual_mappings.get(curr_rid, None)\r\n\r\n # if qual_out:\r\n # curr_qual_out_score = \\\r\n # \"%2.2f\" % float(float(sum(curr_qual))/float(len(curr_qual)))\r\n seq_lengths[curr_rid] = curr_len\r\n failed = False\r\n\r\n for f in filters:\r\n failed = failed or f(curr_rid, curr_seq, curr_qual)\r\n if failed: # if we failed any of the checks, bail out here\r\n bc_counts['#FAILED'].append(curr_rid)\r\n continue\r\n\r\n if barcode_type == 'variable_length':\r\n # Reset the raw_barcode, raw_seq, and barcode_len -- if\r\n # we don't match a barcode from the mapping file, we want\r\n # these values to be None\r\n raw_barcode, raw_seq, barcode_len = (None, None, None)\r\n\r\n curr_valid_map =\\\r\n [curr_bc.split(',')[0] for curr_bc in valid_map]\r\n # Iterate through the barcode length from longest to shortest\r\n for l in barcode_length_order:\r\n # extract the current length barcode from the sequence\r\n bc, seq = get_barcode(curr_seq, l)\r\n # check if the sliced sequence corresponds to a valid\r\n # barcode, and if so set raw_barcode, raw_seq, and\r\n # barcode_len for use in the next steps\r\n if bc in curr_valid_map:\r\n raw_barcode, raw_seq = bc, seq\r\n barcode_len = len(raw_barcode)\r\n break\r\n # if we haven't found a valid barcode, log this sequence as\r\n # failing to match a barcode, and move on to the next sequence\r\n if not raw_barcode:\r\n bc_counts['#FAILED'].append(curr_rid)\r\n continue\r\n\r\n else:\r\n # Get the current barcode to look up the associated primer(s)\r\n raw_barcode, raw_seq = get_barcode(curr_seq, barcode_len)\r\n\r\n if not disable_primer_check:\r\n try:\r\n current_primers = primer_seqs_lens[raw_barcode]\r\n # In this case, all values will be the same, i.e. the length\r\n # of the given primer, or degenerate variations thereof.\r\n primer_len = current_primers.values()[0]\r\n\r\n if primer_exceeds_mismatches(raw_seq[:primer_len],\r\n current_primers, max_primer_mm):\r\n bc_counts['#FAILED'].append(curr_rid)\r\n primer_mismatch_count += 1\r\n continue\r\n except KeyError:\r\n # If the barcode read does not match any of those in the\r\n # mapping file, the situation becomes more complicated. We do\r\n # not know the length the sequence to slice out to compare to\r\n # our primer sets, so, in ascending order of all the given\r\n # primer lengths, a sequence will the sliced out and compared\r\n # to the primer set.\r\n current_primers = all_primers\r\n found_match = False\r\n for seq_slice_len in all_primers_lens:\r\n if not(\r\n primer_exceeds_mismatches(raw_seq[:seq_slice_len],\r\n current_primers, max_primer_mm)):\r\n primer_len = seq_slice_len\r\n found_match = True\r\n break\r\n if not found_match:\r\n bc_counts['#FAILED'].append(curr_rid)\r\n primer_mismatch_count += 1\r\n continue\r\n except IndexError:\r\n # Try to raise meaningful error if problem reading primers\r\n raise IndexError('Error reading primer sequences. If ' +\r\n 'primers were purposefully not included in the mapping ' +\r\n 'file, disable usage with the -p option.')\r\n else:\r\n # Set primer length to zero if primers are disabled.\r\n primer_len = 0\r\n\r\n # split seqs\r\n cbc, cpr, cres = split_seq(curr_seq, barcode_len,\r\n primer_len)\r\n\r\n total_bc_primer_len = len(cbc) + len(cpr)\r\n\r\n # get current barcode\r\n try:\r\n bc_diffs, curr_bc, corrected_bc = \\\r\n check_barcode(cbc, barcode_type, valid_map.keys(),\r\n attempt_bc_correction, added_demultiplex_field, curr_id)\r\n if bc_diffs > max_bc_errors:\r\n raise ValueError(\"Too many errors in barcode\")\r\n corr_ct += bool(corrected_bc)\r\n except Exception as e:\r\n bc_counts[None].append(curr_rid)\r\n continue\r\n\r\n curr_samp_id = valid_map.get(curr_bc, 'Unassigned')\r\n\r\n new_id = \"%s_%d\" % (curr_samp_id, curr_ix)\r\n # check if writing out primer\r\n write_seq = cres\r\n\r\n if reverse_primers == \"truncate_only\":\r\n try:\r\n rev_primer = rev_primers[curr_bc]\r\n mm_tested = {}\r\n for curr_rev_primer in rev_primer:\r\n # Try to find lowest count of mismatches for all\r\n # reverse primers\r\n rev_primer_mm, rev_primer_index = \\\r\n local_align_primer_seq(curr_rev_primer, cres)\r\n mm_tested[rev_primer_mm] = rev_primer_index\r\n\r\n rev_primer_mm = min(mm_tested.keys())\r\n rev_primer_index = mm_tested[rev_primer_mm]\r\n if rev_primer_mm <= reverse_primer_mismatches:\r\n write_seq = write_seq[0:rev_primer_index]\r\n if qual_out:\r\n curr_qual = curr_qual[0:barcode_len +\r\n primer_len + rev_primer_index]\r\n else:\r\n reverse_primer_not_found += 1\r\n except KeyError:\r\n pass\r\n elif reverse_primers == \"truncate_remove\":\r\n try:\r\n rev_primer = rev_primers[curr_bc]\r\n mm_tested = {}\r\n for curr_rev_primer in rev_primer:\r\n # Try to find lowest count of mismatches for all\r\n # reverse primers\r\n rev_primer_mm, rev_primer_index = \\\r\n local_align_primer_seq(curr_rev_primer, cres)\r\n mm_tested[rev_primer_mm] = rev_primer_index\r\n\r\n rev_primer_mm = min(mm_tested.keys())\r\n rev_primer_index = mm_tested[rev_primer_mm]\r\n if rev_primer_mm <= reverse_primer_mismatches:\r\n write_seq = write_seq[0:rev_primer_index]\r\n if qual_out:\r\n curr_qual = curr_qual[0:barcode_len +\r\n primer_len + rev_primer_index]\r\n else:\r\n reverse_primer_not_found += 1\r\n write_seq = False\r\n except KeyError:\r\n bc_counts['#FAILED'].append(curr_rid)\r\n continue\r\n\r\n # Check for quality score windows, truncate or remove sequence\r\n # if poor window found. Previously tested whole sequence-now\r\n # testing the post barcode/primer removed sequence only.\r\n if qual_score_window:\r\n passed_window_check, window_index =\\\r\n check_window_qual_scores(curr_qual, qual_score_window,\r\n min_qual_score)\r\n # Throw out entire sequence if discard option True\r\n if discard_bad_windows and not passed_window_check:\r\n sliding_window_failed += 1\r\n write_seq = False\r\n # Otherwise truncate to index of bad window\r\n elif not discard_bad_windows and not passed_window_check:\r\n sliding_window_failed += 1\r\n write_seq = write_seq[0:window_index]\r\n if qual_out:\r\n curr_qual = curr_qual[0:barcode_len +\r\n primer_len + window_index]\r\n # Check for sequences that are too short after truncation\r\n if len(write_seq) + total_bc_primer_len < min_seq_len:\r\n write_seq = False\r\n below_seq_min_after_trunc += 1\r\n\r\n if truncate_ambi_bases and write_seq:\r\n write_seq_ambi_ix = True\r\n # Skip if no \"N\" characters detected.\r\n try:\r\n ambi_ix = write_seq.index(\"N\")\r\n write_seq = write_seq[0:ambi_ix]\r\n except ValueError:\r\n write_seq_ambi_ix = False\r\n pass\r\n if write_seq_ambi_ix:\r\n # Discard if too short after truncation\r\n if len(write_seq) + total_bc_primer_len < min_seq_len:\r\n write_seq = False\r\n below_seq_min_after_ambi_trunc += 1\r\n else:\r\n trunc_ambi_base_counts += 1\r\n if qual_out:\r\n curr_qual = curr_qual[0:barcode_len +\r\n primer_len + ambi_ix]\r\n\r\n # Slice out regions of quality scores that correspond to the\r\n # written sequence, i.e., remove the barcodes/primers and reverse\r\n # primers if option is enabled.\r\n if qual_out:\r\n qual_barcode, qual_primer, qual_scores_out = \\\r\n split_seq(curr_qual, barcode_len, primer_len)\r\n # Convert to strings instead of numpy arrays, strip off\r\n # brackets\r\n qual_barcode = format_qual_output(qual_barcode)\r\n qual_primer = format_qual_output(qual_primer)\r\n qual_scores_out = format_qual_output(qual_scores_out)\r\n\r\n if not write_seq:\r\n bc_counts['#FAILED'].append(curr_rid)\r\n continue\r\n\r\n if keep_primer:\r\n write_seq = cpr + write_seq\r\n if qual_out:\r\n qual_scores_out = qual_primer + qual_scores_out\r\n if keep_barcode:\r\n write_seq = cbc + write_seq\r\n if qual_out:\r\n qual_scores_out = qual_barcode + qual_scores_out\r\n\r\n # Record number of seqs associated with particular barcode.\r\n bc_counts[curr_bc].append(curr_rid)\r\n\r\n if retain_unassigned_reads and curr_samp_id == \"Unassigned\":\r\n fasta_out.write(\r\n \">%s %s orig_bc=%s new_bc=%s bc_diffs=%s\\n%s\\n\" %\r\n (new_id, curr_rid, cbc, curr_bc, int(bc_diffs), write_seq))\r\n if qual_out:\r\n qual_out.write(\r\n \">%s %s orig_bc=%s new_bc=%s bc_diffs=%s\\n%s\" %\r\n (new_id, curr_rid, cbc, curr_bc, int(bc_diffs),\r\n qual_scores_out))\r\n elif not retain_unassigned_reads and curr_samp_id == \"Unassigned\":\r\n bc_counts['#FAILED'].append(curr_rid)\r\n else:\r\n fasta_out.write(\r\n \">%s %s orig_bc=%s new_bc=%s bc_diffs=%s\\n%s\\n\" %\r\n (new_id, curr_rid, cbc, curr_bc, int(bc_diffs), write_seq))\r\n if qual_out:\r\n qual_out.write(\r\n \">%s %s orig_bc=%s new_bc=%s bc_diffs=%s\\n%s\" %\r\n (new_id, curr_rid, cbc, curr_bc, int(bc_diffs),\r\n qual_scores_out))\r\n\r\n curr_len = len(write_seq)\r\n\r\n #seq_lengths[curr_rid] = curr_len\r\n\r\n curr_ix += 1\r\n\r\n # Record the raw and written seq length of everything passing\r\n # filters\r\n raw_seq_lengths[curr_rid] = len(curr_seq)\r\n final_seq_lengths[curr_id] = curr_len\r\n\r\n if median_length_filtering:\r\n # Read original fasta file output to get sequence lengths\r\n fasta_out.close()\r\n fasta_out = open(fasta_out.name, \"U\")\r\n\r\n # Record sequence lengths for median/mad calculation\r\n sequence_lens = []\r\n for label, seq in parse_fasta(fasta_out):\r\n sequence_lens.append(len(seq))\r\n\r\n '''# Create a temporary file to copy the contents of the fasta file, will\r\n # need to delete once operations complete.\r\n fasta_temp = open(fasta_out.name + \"_tmp.fasta\", \"w\")\r\n\r\n sequence_lens = []\r\n for label, seq in parse_fasta(fasta_lens):\r\n sequence_lens.append(len(seq))\r\n fasta_temp.write(\">%s\\n%s\\n\" % (label, seq))\r\n\r\n fasta_temp.close()\r\n fasta_temp = open(fasta_out.name + \"_tmp.fasta\", \"U\")\r\n\r\n fasta_lens.close()\r\n # Overwrite seqs.fna with length filtered data\r\n fasta_out = open(fasta_out.name, \"w\")'''\r\n\r\n med_abs_dev, med_length = median_absolute_deviation(sequence_lens)\r\n\r\n min_corrected_len = med_length - med_abs_dev *\\\r\n float(median_length_filtering)\r\n max_corrected_len = med_length + med_abs_dev *\\\r\n float(median_length_filtering)\r\n seqs_discarded_median = 0\r\n\r\n fasta_out.seek(0)\r\n\r\n final_written_lens = []\r\n\r\n # Create final seqs.fna\r\n final_fasta_out = open(fasta_out.name.replace('.tmp', ''), \"w\")\r\n\r\n for label, seq in parse_fasta(fasta_out):\r\n curr_len = len(seq)\r\n if curr_len < min_corrected_len or curr_len > max_corrected_len:\r\n seqs_discarded_median += 1\r\n else:\r\n final_fasta_out.write(\">%s\\n%s\\n\" % (label, seq))\r\n final_written_lens.append(len(seq))\r\n\r\n final_fasta_out.close()\r\n fasta_out.close()\r\n remove_files([fasta_out.name])\r\n\r\n else:\r\n min_corrected_len = 0\r\n max_corrected_len = 0\r\n seqs_discarded_median = 0\r\n final_written_lens = 0\r\n\r\n # Copy tmp seqs file to final seqs.fna file\r\n fasta_out.close()\r\n fasta_out = open(fasta_out.name, \"U\")\r\n\r\n # Create final seqs.fna\r\n final_fasta_out = open(fasta_out.name.replace('.tmp', ''), \"w\")\r\n\r\n for label, seq in parse_fasta(fasta_out):\r\n final_fasta_out.write(\">%s\\n%s\\n\" % (label, seq))\r\n\r\n final_fasta_out.close()\r\n fasta_out.close()\r\n remove_files([fasta_out.name])\r\n\r\n median_results = (median_length_filtering, min_corrected_len,\r\n max_corrected_len, seqs_discarded_median, final_written_lens)\r\n\r\n raw_seq_lengths = raw_seq_lengths.values()\r\n final_seq_lengths = final_seq_lengths.values()\r\n\r\n log_out = format_log(bc_counts, corr_ct, valid_map, seq_lengths, filters,\r\n retain_unassigned_reads, attempt_bc_correction, primer_mismatch_count,\r\n max_primer_mm, reverse_primers, reverse_primer_not_found,\r\n sliding_window_failed, below_seq_min_after_trunc, qual_score_window,\r\n discard_bad_windows, min_seq_len, raw_seq_lengths,\r\n final_seq_lengths, median_results, truncate_ambi_bases,\r\n below_seq_min_after_ambi_trunc, )\r\n\r\n #all_seq_lengths, good_seq_lengths = get_seq_lengths(seq_lengths, bc_counts)\r\n\r\n return log_out, seq_lengths.values(), raw_seq_lengths, final_seq_lengths",
"def improve_best_RISCC_read(self, seq, new_position, N_errors=None, read_count=1, max_distance=MAX_POSITION_DISTANCE):\n # if there are more than one current reads, you're not using improve_best_RISCC_read consistently!\n if len(self.RISCC_genome_side_aligned_reads) > 1:\n raise MutantError(\"Don't try using the improve_best_RISCC_read when keeping more than one read!\")\n # if decided to replace, discard old genome-side read dict and make new one from just the current read data.\n if self._decide_if_replace_read(new_position, max_distance):\n self.RISCC_genome_side_aligned_reads, self.RISCC_genome_side_unaligned_reads = {}, {}\n self.add_RISCC_read(seq, new_position, N_errors, read_count)\n # TODO make this count unaligned/confirming/non-confirming reads, too, instead of keeping all these counts as functions that read the actual mutant data, which will be missing in this case? I did something like that in mutant_Carette.py.",
"def nremaining(self) -> int:\n return self._nmines - self._nfound",
"def get_num_carn_landscape(self):\n return len(self.carn_pop)",
"def _calc_coverage(self, cds_aln):\n # Aligned region is part of a read that intersects with cds.\n coverage = 0\n for aln_reg in cds_aln.aligned_regions.values(): # aln_reg is of type CdsAlnSublocation\n location = aln_reg.location # location is of type Location\n coverage += location.length()\n coverage = coverage / float(Location.from_location_str(cds_aln.cds.location).length())\n return coverage",
"def reads_in_chromosome(self, chromosome):\n return sum(m.read_info(self.dataset_name).total_read_count \n for m in self.dataset if m.position not in SPECIAL_POSITIONS.all_undefined and m.position.chromosome==chromosome)",
"def countReadCoverage(bam,chrom,start,end,strand=None):\n\n coverage = []\n start = int(start)\n end = int(end)\n for i in range(end-start+1):\n coverage.append(0.0)\n\n i = 0\n if chrom in bam.references:\n for pcol in bam.pileup(chrom,start,end):\n n = 0\n if pcol.pos >= start and pcol.pos <= end:\n for read in pcol.pileups:\n if strand == '+':\n if not read.alignment.is_reverse and read.alignment.mapq >= 0 and not read.alignment.is_duplicate:\n n += 1\n elif strand == '-':\n if read.alignment.is_reverse and read.alignment.mapq >= 0 and not read.alignment.is_duplicate:\n n += 1\n else:\n if read.alignment.mapq >= 0 and not read.alignment.is_duplicate:\n n += 1\n coverage[i] = n\n i += 1\n\n return coverage",
"def test_perceive_residues_natoms_cyxteine(self, strict_chirality):\n offmol = Molecule.from_file(get_data_file_path(\"proteins/MainChain_CYX.sdf\"))\n # Perceive residue substructures\n offmol.perceive_residues(strict_chirality=strict_chirality)\n counter = 0 # matched atom counter\n for atom in offmol.atoms:\n if atom.metadata:\n counter += 1\n assert counter == offmol.n_atoms"
] |
[
"0.82452977",
"0.76317364",
"0.7584211",
"0.6724946",
"0.62195504",
"0.6124197",
"0.6110739",
"0.5961448",
"0.59085256",
"0.58062243",
"0.57387584",
"0.5732832",
"0.57009476",
"0.56621265",
"0.5628521",
"0.5623645",
"0.56162757",
"0.55914694",
"0.5548447",
"0.54905534",
"0.53993154",
"0.5384139",
"0.5381465",
"0.5379249",
"0.5340579",
"0.53391814",
"0.5311551",
"0.53075826",
"0.5290151",
"0.52737975"
] |
0.8187665
|
1
|
% of unique genomeside sequences that confirm the cassetteside position (same chrom/strand, within max_distance).
|
def RISCC_percent_confirming_seqs(self, max_distance=MAX_POSITION_DISTANCE, round_to_int=False):
if not self.RISCC_N_aligned_seqs: return float('nan')
else:
percent = self.RISCC_N_confirming_seqs(max_distance) / self.RISCC_N_aligned_seqs * 100
if round_to_int: percent = int(round(percent))
return percent
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def RISCC_N_distinct_regions(self, max_distance=MAX_POSITION_DISTANCE):\n # TODO add options for minimum #seqs and #reads to count a region as valid!\n positions_by_chrom_strand = defaultdict(list)\n # add the cassette-side position (single)\n try:\n positions_by_chrom_strand[(self.position.chromosome, self.position.strand)].append(self.position.min_position)\n except AttributeError:\n pass\n # add all the genome-side read positions; skip unaligned ones.\n for read_data in self.RISCC_genome_side_aligned_reads.values():\n pos = read_data[0]\n try: positions_by_chrom_strand[(pos.chromosome, pos.strand)].append(pos.min_position)\n except AttributeError: continue\n # count total number of dictinct regions - different chromosomes or strands, or distance > max_distance\n total_distinct_regions_genome = 0\n total_distinct_regions_cassette = 0\n # for each chromosome, go over all positions and only count ones every MAX_POSITION_DISTANCE as distinct\n for chrom_strand, positions in positions_by_chrom_strand.items():\n positions.sort()\n distinct_regions = [positions[0]]\n for pos in positions[1:]:\n if (pos-distinct_regions[-1]) > max_distance:\n distinct_regions.append(pos)\n if is_cassette_chromosome(chrom_strand[0]): total_distinct_regions_cassette += len(distinct_regions)\n else: total_distinct_regions_genome += len(distinct_regions)\n return total_distinct_regions_genome, total_distinct_regions_cassette",
"def RISCC_N_non_confirming_seqs(self, max_distance=MAX_POSITION_DISTANCE):\n return len(self.RISCC_genome_side_aligned_reads) - self.RISCC_N_confirming_seqs(max_distance)",
"def RISCC_N_confirming_seqs(self, max_distance=MAX_POSITION_DISTANCE):\n N = 0\n for read_data in self.RISCC_genome_side_aligned_reads.values():\n # skip non-aligned reads; check aligned reads for confirming.\n try: chrom = read_data[0].chromosome\n except AttributeError: continue\n if self._if_confirming_read(read_data[0], max_distance): \n N += 1\n return N",
"def _get_single_seq_completeness(self, mapped_record, gene_code='dna'):\n\n map_ref_record = self.map_ref_records[self._get_og_id(mapped_record.id)]\n map_ref_seq = str(map_ref_record.seq).upper()\n map_seq = str(mapped_record.seq).upper()\n if self.ref_records and self._get_og_id(mapped_record.id) in \\\n self.ref_records.keys():\n ref_record = self.ref_records[self._get_og_id(mapped_record.id)]\n ref_seq = str(ref_record.seq).upper()\n else:\n ref_seq = map_ref_seq\n if gene_code == 'dna':\n ref_seq_len = len(ref_seq)\n map_seq_len = len(map_ref_seq)\n non_n_len = len(map_ref_seq) - str(map_seq).count('N')\n map_seq_completeness = non_n_len / map_seq_len\n ref_seq_completeness = non_n_len / ref_seq_len\n elif gene_code == 'aa':\n ref_seq_len = len(ref_seq)\n map_seq_len = len(map_seq)\n non_n_len = len(map_seq) - str(map_seq).count('X')\n map_seq_completeness = non_n_len / map_seq_len\n ref_seq_completeness = non_n_len / ref_seq_len\n return [map_seq_completeness, ref_seq_completeness,\n non_n_len, map_seq_len, ref_seq_len]",
"def RISCC_N_non_confirming_reads(self, max_distance=MAX_POSITION_DISTANCE):\n return sum(x[1] for x in self.RISCC_genome_side_aligned_reads.values()) - self.RISCC_N_confirming_reads(max_distance)",
"def calc_conservation(aln):\n\n length = len(aln.values()[0])\n seqs = aln.values()\n percids = []\n\n # find identity positions\n for i in xrange(length):\n chars = util.hist_dict(util.cget(seqs, i))\n if \"-\" in chars:\n del chars[\"-\"]\n\n if len(chars) == 0:\n percids.append(0.0)\n else:\n pid = max(chars.values()) / float(len(aln))\n percids.append(pid)\n return percids",
"def get_identity(alignment_list):\n size_alignement = len(alignment_list[1])\n identical_nucleotide = 0\n for i in range(len(alignment_list[0])):\n if alignment_list[0][i] == alignment_list[1][i]:\n identical_nucleotide += 1\n\n return (identical_nucleotide / size_alignement) * 100",
"def RISCC_N_confirming_reads(self, max_distance=MAX_POSITION_DISTANCE):\n N = 0\n for read_data in self.RISCC_genome_side_aligned_reads.values():\n # skip non-aligned reads; check aligned reads for confirming.\n try: chrom = read_data[0].chromosome\n except AttributeError: continue\n if self._if_confirming_read(read_data[0], max_distance): \n N += read_data[1]\n return N",
"def RISCC_max_confirmed_distance(self, max_distance=MAX_POSITION_DISTANCE):\n distances = []\n if self.RISCC_N_confirming_seqs(max_distance) + self.RISCC_N_non_confirming_seqs(max_distance) == 0:\n return float('NaN')\n for RISCC_read_data in self.RISCC_genome_side_aligned_reads.values():\n # Only look at the genome-side reads that match the cassette-side read position!\n # There's a try/except because unaligned reads don't have proper positions.\n try: \n if (RISCC_read_data[0].chromosome == self.position.chromosome \n and RISCC_read_data[0].strand == self.position.strand):\n pos_difference = abs(RISCC_read_data[0].min_position - self.position.min_position)\n else:\n continue\n except AttributeError: \n continue\n if pos_difference <= max_distance:\n distances.append(pos_difference)\n try:\n return max(distances)\n except ValueError:\n return 0\n # this is basically unit-tested by the tests for add_RISCC_read and improve_best_RISCC_read",
"def get_all_guides_that_cut_in_cds(self,pam, seq_len_around_cut,\n min_mut_pos_in_guide, max_mut_pos_in_guide,\n excluded_seqs, mapping_cmd, sort_by = '5to3'):\n \n ordered_lefts = self.cds_lefts\n ordered_rights = self.cds_rights\n if (self.is_neg_strand()):\n ordered_lefts = ordered_lefts[::-1]\n ordered_rights = ordered_rights[::-1]\n \n ######\n # search positive strand for pam\n ######\n cur_cds_nt_start = 0\n exon_num = 0\n guides0_chr_pos = np.empty(0,dtype=int)\n guides_cut_chr_pos = np.empty(0,dtype=int)\n guides_cut_gene_dna_pos = np.empty(0,dtype=int)\n guides_exon_num = np.empty(0,dtype=int)\n\n for left,right in zip(ordered_lefts,ordered_rights):\n \n # cut is to the right of the nucleotide\n cur_left_for_pam = left + (self.CRISPR_CUT_INDEX + len(pam) - 1) + (1 * self.is_neg_strand()) \n cur_right_for_pam = right + (self.CRISPR_CUT_INDEX + len(pam) - 1) + (1 * self.is_neg_strand()) \n \n\n \n seq = self.genome_seq[self.chrom].seq[cur_left_for_pam:cur_right_for_pam]\n \n # returns a list of all the positions in that cut in cds\n cur_pam_dists = np.array([m.start() for m in re.finditer(\"(?=\"+pam+\")\", str(seq))])\n \n # removing guides that are not entirely in the CDS\n if ( (not np.isnan(min_mut_pos_in_guide)) and (not np.isnan(max_mut_pos_in_guide)) ):\n min_mut_pos_in_guide = int(min_mut_pos_in_guide)\n max_mut_pos_in_guide = int(max_mut_pos_in_guide)\n \n cur_pam_dists = cur_pam_dists[cur_pam_dists >= (-min_mut_pos_in_guide) - (self.CRISPR_CUT_INDEX + 1 * self.is_neg_strand() ) ]\n cur_pam_dists = cur_pam_dists[cur_pam_dists <= (len(seq) - 1 + len(pam) - 1 ) + ( (-max_mut_pos_in_guide) - (self.CRISPR_CUT_INDEX + 1 * self.is_neg_strand()) ) ]\n \n \n cur_guides0_chr_pos = (cur_pam_dists-1) + cur_left_for_pam \n \n if (self.is_neg_strand()): # negative\n cur_guides_cut_gene_dna_pos = (len(seq)-1-cur_pam_dists) + cur_cds_nt_start\n cur_guides_cut_chr_pos = cur_guides0_chr_pos - (self.CRISPR_CUT_INDEX + 1) # the cut is right of the nt\n else:\n cur_guides_cut_gene_dna_pos = cur_pam_dists + cur_cds_nt_start\n cur_guides_cut_chr_pos = cur_guides0_chr_pos - self.CRISPR_CUT_INDEX # the cut is left of the nt\n \n \n cur_guides_exon_num = np.full_like(cur_guides_cut_gene_dna_pos,exon_num)\n \n \n guides0_chr_pos = np.concatenate((guides0_chr_pos,cur_guides0_chr_pos))\n guides_cut_chr_pos = np.concatenate((guides_cut_chr_pos,cur_guides_cut_chr_pos))\n guides_cut_gene_dna_pos = np.concatenate((guides_cut_gene_dna_pos,cur_guides_cut_gene_dna_pos))\n guides_exon_num = np.concatenate((guides_exon_num,cur_guides_exon_num))\n \n \n cur_cds_nt_start = cur_cds_nt_start + (right - left)\n exon_num = exon_num + 1\n \n \n pos_strand_guides_df = self.__guide_positions_to_df(pam, False, seq_len_around_cut, excluded_seqs, \\\n guides0_chr_pos, guides_cut_chr_pos, guides_cut_gene_dna_pos, guides_exon_num) \n \n ######\n # search negative strand for pam\n ######\n cur_cds_nt_start = 0\n exon_num = 0\n guides0_chr_pos = np.empty(0,dtype=int)\n guides_cut_chr_pos = np.empty(0,dtype=int)\n guides0_gene_dna_pos = np.empty(0,dtype=int)\n guides_cut_gene_dna_pos = np.empty(0,dtype=int)\n guides_exon_num = np.empty(0,dtype=int)\n \n for left,right in zip(ordered_lefts,ordered_rights):\n \n \n cur_left_for_pam = int(left) - (self.CRISPR_CUT_INDEX + len(pam)+1) + (1 * self.is_neg_strand())\n cur_right_for_pam = int(right) - (self.CRISPR_CUT_INDEX + len(pam)+1) + (1 * self.is_neg_strand())\n \n seq = self.genome_seq[self.chrom].seq[cur_left_for_pam:cur_right_for_pam]\n \n revcomp_pam = Seq(pam,generic_dna).reverse_complement()\n \n # returns a list of all the positions in that cut in cds\n cur_pam_dists = np.array([m.start() for m in re.finditer(\"(?=\"+str(revcomp_pam)+\")\", str(seq))])\n \n \n if ( (not np.isnan(min_mut_pos_in_guide)) and (not np.isnan(max_mut_pos_in_guide)) ):\n min_mut_pos_in_guide = int(min_mut_pos_in_guide)\n max_mut_pos_in_guide = int(max_mut_pos_in_guide)\n \n cur_pam_dists = cur_pam_dists[cur_pam_dists >= (-min_mut_pos_in_guide) - (self.CRISPR_CUT_INDEX + 1 * self.is_neg_strand() ) ]\n cur_pam_dists = cur_pam_dists[cur_pam_dists <= (len(seq) - 1 + len(pam) - 1 ) + ( (-max_mut_pos_in_guide) - (self.CRISPR_CUT_INDEX + 1 * self.is_neg_strand()) ) ]\n \n \n \n cur_guides0_chr_pos = (cur_pam_dists+2) + cur_left_for_pam\n \n if (self.is_neg_strand()): # negative \n cur_guides_cut_gene_dna_pos = (len(seq)-1-cur_pam_dists) + cur_cds_nt_start\n cur_guides_cut_chr_pos = cur_guides0_chr_pos + self.CRISPR_CUT_INDEX # the cut is right of the nt\n else: # positive\n cur_guides_cut_gene_dna_pos = cur_pam_dists + cur_cds_nt_start\n cur_guides_cut_chr_pos = cur_guides0_chr_pos + self.CRISPR_CUT_INDEX + 1 # the cut is leftot the nt\n \n \n cur_guides_exon_num = np.full_like(cur_guides_cut_gene_dna_pos,exon_num)\n \n \n guides0_chr_pos = np.concatenate((guides0_chr_pos,cur_guides0_chr_pos))\n guides_cut_chr_pos = np.concatenate((guides_cut_chr_pos,cur_guides_cut_chr_pos))\n guides_cut_gene_dna_pos = np.concatenate((guides_cut_gene_dna_pos,cur_guides_cut_gene_dna_pos))\n guides_exon_num = np.concatenate((guides_exon_num,cur_guides_exon_num))\n \n cur_cds_nt_start = cur_cds_nt_start + (right - left)\n exon_num = exon_num + 1\n \n \n neg_strand_guides_df = self.__guide_positions_to_df(pam, True, seq_len_around_cut, excluded_seqs, \\\n guides0_chr_pos, guides_cut_chr_pos, guides_cut_gene_dna_pos, guides_exon_num)\n \n \n # concating the positive and negative strands guides\n guides_df = pd.concat([pos_strand_guides_df, neg_strand_guides_df])\n \n # adding for each guide its location in the gene (5' -> 3'; fraction)\n guides_df[\"guide_cut_gene_pos_frac\"] = guides_df[\"guide_cut_gene_nt_pos\"] / guides_df[\"CDS_len_nts\"]\n\n \n # if the 'sort' method is onlyStopCodon then leave only guide that cut the stop codon\n if sort_by == 'onlyStopCodon':\n guides_df = guides_df.ix[( ( (guides_df['CDS_len_nts']).values / 3) == ( (guides_df['guide_cut_gene_aa_pos']).values + 1) ) ,:]\n \n # calculating Azimuth score\n #print \"Calculating Azimuth score\"\n guides_df = cal_azimuth_score(guides_df, output_filename_GUIDE_withScores = \"\", guides_PAMm4p3_col_name=\"guide_PAM_m4p3\")\n \n # calculating off targets\n #print \"Testing off targets\"\n guides_df = eval_guides_off_targets(guides_df, self.genome_seq, 'guide_id', 'guide_noPAM', pam, mapping_cmd)\n \n \n return (guides_df)",
"def get_identity(alignment_list):\n min_length = min(len(alignment_list[0]),len(alignment_list[1]))\n count = 0\n for i in range(min_length):\n if alignment_list[0][i] == alignment_list[1][i]:\n count += 1\n percent = count/min_length * 100\n return percent",
"def get_number_seqs_for_primer(percent_match,\n seq_count):\n \n total_seq_use=int((1-percent_match)*seq_count)\n \n return total_seq_use",
"def distance(base_strand, comparison_strand):\n hamming_distance = 0\n\n for nucleotide in range(len(base_strand)):\n if base_strand[nucleotide] != comparison_strand[nucleotide]:\n hamming_distance += 1\n \n return hamming_distance",
"def gc_content(seq):\n result = float(str(seq).count('G') + str(seq).count('C'))/len(seq) *100\n return result",
"def gc_content(sequence):\n gc = sequence.count('G') + sequence.count('C')\n atgc = sequence.count('A') + sequence.count('T') + sequence.count('G') + sequence.count('C')\n \n return (gc/atgc) * 100",
"def codon_counts(self):\n # Removing 5' UTR and 3' UTR sequences\n sequence = self.sequence.replace(self.five_prime_utr_sequence, \"\").replace(self.three_prime_utr_sequence, \"\")\n return len(sequence) / 3",
"def calculate_gc_content(sequence):\n sequence = sequence.upper()\n sc = Counter(sequence)\n return round((sc['C'] + sc['G']) / (sc['A'] + sc['C'] + sc['G'] + sc['T']) * 100, 2)",
"def measure_gcd_success():\n for size in range(2,16):\n print(\"--------- samplesize = %d\" % size)\n d = dict()\n for _ in range(1000):\n q = findpoly(size)\n d.setdefault(q,0)\n d[q] += 1\n for k,v in sorted(d.items(), key=lambda x: x[1]):\n print(\"%5d: %8s\" % (v, k))",
"def get_ccdf(degseq):\n uniques, counts = np.unique(degseq, return_counts=True)\n cumprob = np.cumsum(counts).astype(np.double) / (degseq.size)\n return uniques[::-1], (1. - cumprob)[::-1]",
"def calculate_coverage(path, alignment, number_of_fastas):\n\n path_to_alignment = path + 'Modeling/fasta_alns_and_identities/' + alignment\n fastas_iterator = parse_multifasta_file(path_to_alignment, number_of_fastas)\n fastas = []\n targer_name, target_seq = next(fastas_iterator)\n fastas.append(target_seq)\n length_of_target = 0\n for i in target_seq:\n if i != '-':\n length_of_target += 1\n for i in range(1, number_of_fastas):\n name, seq = next(fastas_iterator)\n fastas.append(seq)\n coverage = 0\n for i in range(len(fastas[0])):\n for j in range(1, len(fastas)):\n if fastas[0][i] != '-' and fastas[j][i] != '-':\n coverage += 1\n break\n coverage_percent = round(coverage / length_of_target * 100, 2)\n return coverage_percent",
"def RISCC_percent_confirming_reads(self, max_distance=MAX_POSITION_DISTANCE, round_to_int=False):\n if not self.RISCC_N_aligned_reads: return float('nan')\n else:\n percent = self.RISCC_N_confirming_reads(max_distance) / self.RISCC_N_aligned_reads * 100\n if round_to_int: percent = int(round(percent))\n return percent",
"def percent_identity(align_1, align_2):\n matches = 0\n for i in range(len(align_1)):\n if align_1[i] == align_2[i]:\n matches+= 1\n percent_identity = matches / len(align_1)\n return percent_identity",
"def countingPointMutations(seq1, seq2):\n seqLength = len(list(seq1))\n \n hammingDistance=0;\n for i in range(0,seqLength):\n if list(seq1)[i]!=list(seq2)[i]:\n hammingDistance = hammingDistance+1;\n return hammingDistance",
"def check_seqs(fasta_out, fasta_files, starting_ix, valid_map, qual_mappings,\r\n filters, barcode_len, keep_primer, keep_barcode, barcode_type,\r\n max_bc_errors, retain_unassigned_reads, attempt_bc_correction,\r\n primer_seqs_lens, all_primers, max_primer_mm, disable_primer_check,\r\n reverse_primers, rev_primers, qual_out, qual_score_window=0,\r\n discard_bad_windows=False, min_qual_score=25, min_seq_len=200,\r\n median_length_filtering=None, added_demultiplex_field=None,\r\n reverse_primer_mismatches=0, truncate_ambi_bases=False):\r\n\r\n seq_lengths = {}\r\n\r\n # Record complete barcode + primer + sequence lengths\r\n raw_seq_lengths = {}\r\n # Record sequence lengths after all optional removal of components\r\n final_seq_lengths = {}\r\n\r\n bc_counts = defaultdict(list)\r\n curr_ix = starting_ix\r\n corr_ct = 0 # count of corrected barcodes\r\n\r\n # get the list of barcode lengths in reverse order\r\n barcode_length_order =\\\r\n sorted(set([len(bc.split(',')[0]) for bc in valid_map]))\r\n barcode_length_order = barcode_length_order[::-1]\r\n\r\n primer_mismatch_count = 0\r\n all_primers_lens = sorted(set(all_primers.values()))\r\n\r\n reverse_primer_not_found = 0\r\n\r\n sliding_window_failed = 0\r\n trunc_ambi_base_counts = 0\r\n\r\n below_seq_min_after_trunc = 0\r\n below_seq_min_after_ambi_trunc = 0\r\n\r\n for fasta_in in fasta_files:\r\n for curr_id, curr_seq in parse_fasta(fasta_in):\r\n curr_rid = curr_id.split()[0]\r\n curr_seq = upper(curr_seq)\r\n\r\n curr_len = len(curr_seq)\r\n curr_qual = qual_mappings.get(curr_rid, None)\r\n\r\n # if qual_out:\r\n # curr_qual_out_score = \\\r\n # \"%2.2f\" % float(float(sum(curr_qual))/float(len(curr_qual)))\r\n seq_lengths[curr_rid] = curr_len\r\n failed = False\r\n\r\n for f in filters:\r\n failed = failed or f(curr_rid, curr_seq, curr_qual)\r\n if failed: # if we failed any of the checks, bail out here\r\n bc_counts['#FAILED'].append(curr_rid)\r\n continue\r\n\r\n if barcode_type == 'variable_length':\r\n # Reset the raw_barcode, raw_seq, and barcode_len -- if\r\n # we don't match a barcode from the mapping file, we want\r\n # these values to be None\r\n raw_barcode, raw_seq, barcode_len = (None, None, None)\r\n\r\n curr_valid_map =\\\r\n [curr_bc.split(',')[0] for curr_bc in valid_map]\r\n # Iterate through the barcode length from longest to shortest\r\n for l in barcode_length_order:\r\n # extract the current length barcode from the sequence\r\n bc, seq = get_barcode(curr_seq, l)\r\n # check if the sliced sequence corresponds to a valid\r\n # barcode, and if so set raw_barcode, raw_seq, and\r\n # barcode_len for use in the next steps\r\n if bc in curr_valid_map:\r\n raw_barcode, raw_seq = bc, seq\r\n barcode_len = len(raw_barcode)\r\n break\r\n # if we haven't found a valid barcode, log this sequence as\r\n # failing to match a barcode, and move on to the next sequence\r\n if not raw_barcode:\r\n bc_counts['#FAILED'].append(curr_rid)\r\n continue\r\n\r\n else:\r\n # Get the current barcode to look up the associated primer(s)\r\n raw_barcode, raw_seq = get_barcode(curr_seq, barcode_len)\r\n\r\n if not disable_primer_check:\r\n try:\r\n current_primers = primer_seqs_lens[raw_barcode]\r\n # In this case, all values will be the same, i.e. the length\r\n # of the given primer, or degenerate variations thereof.\r\n primer_len = current_primers.values()[0]\r\n\r\n if primer_exceeds_mismatches(raw_seq[:primer_len],\r\n current_primers, max_primer_mm):\r\n bc_counts['#FAILED'].append(curr_rid)\r\n primer_mismatch_count += 1\r\n continue\r\n except KeyError:\r\n # If the barcode read does not match any of those in the\r\n # mapping file, the situation becomes more complicated. We do\r\n # not know the length the sequence to slice out to compare to\r\n # our primer sets, so, in ascending order of all the given\r\n # primer lengths, a sequence will the sliced out and compared\r\n # to the primer set.\r\n current_primers = all_primers\r\n found_match = False\r\n for seq_slice_len in all_primers_lens:\r\n if not(\r\n primer_exceeds_mismatches(raw_seq[:seq_slice_len],\r\n current_primers, max_primer_mm)):\r\n primer_len = seq_slice_len\r\n found_match = True\r\n break\r\n if not found_match:\r\n bc_counts['#FAILED'].append(curr_rid)\r\n primer_mismatch_count += 1\r\n continue\r\n except IndexError:\r\n # Try to raise meaningful error if problem reading primers\r\n raise IndexError('Error reading primer sequences. If ' +\r\n 'primers were purposefully not included in the mapping ' +\r\n 'file, disable usage with the -p option.')\r\n else:\r\n # Set primer length to zero if primers are disabled.\r\n primer_len = 0\r\n\r\n # split seqs\r\n cbc, cpr, cres = split_seq(curr_seq, barcode_len,\r\n primer_len)\r\n\r\n total_bc_primer_len = len(cbc) + len(cpr)\r\n\r\n # get current barcode\r\n try:\r\n bc_diffs, curr_bc, corrected_bc = \\\r\n check_barcode(cbc, barcode_type, valid_map.keys(),\r\n attempt_bc_correction, added_demultiplex_field, curr_id)\r\n if bc_diffs > max_bc_errors:\r\n raise ValueError(\"Too many errors in barcode\")\r\n corr_ct += bool(corrected_bc)\r\n except Exception as e:\r\n bc_counts[None].append(curr_rid)\r\n continue\r\n\r\n curr_samp_id = valid_map.get(curr_bc, 'Unassigned')\r\n\r\n new_id = \"%s_%d\" % (curr_samp_id, curr_ix)\r\n # check if writing out primer\r\n write_seq = cres\r\n\r\n if reverse_primers == \"truncate_only\":\r\n try:\r\n rev_primer = rev_primers[curr_bc]\r\n mm_tested = {}\r\n for curr_rev_primer in rev_primer:\r\n # Try to find lowest count of mismatches for all\r\n # reverse primers\r\n rev_primer_mm, rev_primer_index = \\\r\n local_align_primer_seq(curr_rev_primer, cres)\r\n mm_tested[rev_primer_mm] = rev_primer_index\r\n\r\n rev_primer_mm = min(mm_tested.keys())\r\n rev_primer_index = mm_tested[rev_primer_mm]\r\n if rev_primer_mm <= reverse_primer_mismatches:\r\n write_seq = write_seq[0:rev_primer_index]\r\n if qual_out:\r\n curr_qual = curr_qual[0:barcode_len +\r\n primer_len + rev_primer_index]\r\n else:\r\n reverse_primer_not_found += 1\r\n except KeyError:\r\n pass\r\n elif reverse_primers == \"truncate_remove\":\r\n try:\r\n rev_primer = rev_primers[curr_bc]\r\n mm_tested = {}\r\n for curr_rev_primer in rev_primer:\r\n # Try to find lowest count of mismatches for all\r\n # reverse primers\r\n rev_primer_mm, rev_primer_index = \\\r\n local_align_primer_seq(curr_rev_primer, cres)\r\n mm_tested[rev_primer_mm] = rev_primer_index\r\n\r\n rev_primer_mm = min(mm_tested.keys())\r\n rev_primer_index = mm_tested[rev_primer_mm]\r\n if rev_primer_mm <= reverse_primer_mismatches:\r\n write_seq = write_seq[0:rev_primer_index]\r\n if qual_out:\r\n curr_qual = curr_qual[0:barcode_len +\r\n primer_len + rev_primer_index]\r\n else:\r\n reverse_primer_not_found += 1\r\n write_seq = False\r\n except KeyError:\r\n bc_counts['#FAILED'].append(curr_rid)\r\n continue\r\n\r\n # Check for quality score windows, truncate or remove sequence\r\n # if poor window found. Previously tested whole sequence-now\r\n # testing the post barcode/primer removed sequence only.\r\n if qual_score_window:\r\n passed_window_check, window_index =\\\r\n check_window_qual_scores(curr_qual, qual_score_window,\r\n min_qual_score)\r\n # Throw out entire sequence if discard option True\r\n if discard_bad_windows and not passed_window_check:\r\n sliding_window_failed += 1\r\n write_seq = False\r\n # Otherwise truncate to index of bad window\r\n elif not discard_bad_windows and not passed_window_check:\r\n sliding_window_failed += 1\r\n write_seq = write_seq[0:window_index]\r\n if qual_out:\r\n curr_qual = curr_qual[0:barcode_len +\r\n primer_len + window_index]\r\n # Check for sequences that are too short after truncation\r\n if len(write_seq) + total_bc_primer_len < min_seq_len:\r\n write_seq = False\r\n below_seq_min_after_trunc += 1\r\n\r\n if truncate_ambi_bases and write_seq:\r\n write_seq_ambi_ix = True\r\n # Skip if no \"N\" characters detected.\r\n try:\r\n ambi_ix = write_seq.index(\"N\")\r\n write_seq = write_seq[0:ambi_ix]\r\n except ValueError:\r\n write_seq_ambi_ix = False\r\n pass\r\n if write_seq_ambi_ix:\r\n # Discard if too short after truncation\r\n if len(write_seq) + total_bc_primer_len < min_seq_len:\r\n write_seq = False\r\n below_seq_min_after_ambi_trunc += 1\r\n else:\r\n trunc_ambi_base_counts += 1\r\n if qual_out:\r\n curr_qual = curr_qual[0:barcode_len +\r\n primer_len + ambi_ix]\r\n\r\n # Slice out regions of quality scores that correspond to the\r\n # written sequence, i.e., remove the barcodes/primers and reverse\r\n # primers if option is enabled.\r\n if qual_out:\r\n qual_barcode, qual_primer, qual_scores_out = \\\r\n split_seq(curr_qual, barcode_len, primer_len)\r\n # Convert to strings instead of numpy arrays, strip off\r\n # brackets\r\n qual_barcode = format_qual_output(qual_barcode)\r\n qual_primer = format_qual_output(qual_primer)\r\n qual_scores_out = format_qual_output(qual_scores_out)\r\n\r\n if not write_seq:\r\n bc_counts['#FAILED'].append(curr_rid)\r\n continue\r\n\r\n if keep_primer:\r\n write_seq = cpr + write_seq\r\n if qual_out:\r\n qual_scores_out = qual_primer + qual_scores_out\r\n if keep_barcode:\r\n write_seq = cbc + write_seq\r\n if qual_out:\r\n qual_scores_out = qual_barcode + qual_scores_out\r\n\r\n # Record number of seqs associated with particular barcode.\r\n bc_counts[curr_bc].append(curr_rid)\r\n\r\n if retain_unassigned_reads and curr_samp_id == \"Unassigned\":\r\n fasta_out.write(\r\n \">%s %s orig_bc=%s new_bc=%s bc_diffs=%s\\n%s\\n\" %\r\n (new_id, curr_rid, cbc, curr_bc, int(bc_diffs), write_seq))\r\n if qual_out:\r\n qual_out.write(\r\n \">%s %s orig_bc=%s new_bc=%s bc_diffs=%s\\n%s\" %\r\n (new_id, curr_rid, cbc, curr_bc, int(bc_diffs),\r\n qual_scores_out))\r\n elif not retain_unassigned_reads and curr_samp_id == \"Unassigned\":\r\n bc_counts['#FAILED'].append(curr_rid)\r\n else:\r\n fasta_out.write(\r\n \">%s %s orig_bc=%s new_bc=%s bc_diffs=%s\\n%s\\n\" %\r\n (new_id, curr_rid, cbc, curr_bc, int(bc_diffs), write_seq))\r\n if qual_out:\r\n qual_out.write(\r\n \">%s %s orig_bc=%s new_bc=%s bc_diffs=%s\\n%s\" %\r\n (new_id, curr_rid, cbc, curr_bc, int(bc_diffs),\r\n qual_scores_out))\r\n\r\n curr_len = len(write_seq)\r\n\r\n #seq_lengths[curr_rid] = curr_len\r\n\r\n curr_ix += 1\r\n\r\n # Record the raw and written seq length of everything passing\r\n # filters\r\n raw_seq_lengths[curr_rid] = len(curr_seq)\r\n final_seq_lengths[curr_id] = curr_len\r\n\r\n if median_length_filtering:\r\n # Read original fasta file output to get sequence lengths\r\n fasta_out.close()\r\n fasta_out = open(fasta_out.name, \"U\")\r\n\r\n # Record sequence lengths for median/mad calculation\r\n sequence_lens = []\r\n for label, seq in parse_fasta(fasta_out):\r\n sequence_lens.append(len(seq))\r\n\r\n '''# Create a temporary file to copy the contents of the fasta file, will\r\n # need to delete once operations complete.\r\n fasta_temp = open(fasta_out.name + \"_tmp.fasta\", \"w\")\r\n\r\n sequence_lens = []\r\n for label, seq in parse_fasta(fasta_lens):\r\n sequence_lens.append(len(seq))\r\n fasta_temp.write(\">%s\\n%s\\n\" % (label, seq))\r\n\r\n fasta_temp.close()\r\n fasta_temp = open(fasta_out.name + \"_tmp.fasta\", \"U\")\r\n\r\n fasta_lens.close()\r\n # Overwrite seqs.fna with length filtered data\r\n fasta_out = open(fasta_out.name, \"w\")'''\r\n\r\n med_abs_dev, med_length = median_absolute_deviation(sequence_lens)\r\n\r\n min_corrected_len = med_length - med_abs_dev *\\\r\n float(median_length_filtering)\r\n max_corrected_len = med_length + med_abs_dev *\\\r\n float(median_length_filtering)\r\n seqs_discarded_median = 0\r\n\r\n fasta_out.seek(0)\r\n\r\n final_written_lens = []\r\n\r\n # Create final seqs.fna\r\n final_fasta_out = open(fasta_out.name.replace('.tmp', ''), \"w\")\r\n\r\n for label, seq in parse_fasta(fasta_out):\r\n curr_len = len(seq)\r\n if curr_len < min_corrected_len or curr_len > max_corrected_len:\r\n seqs_discarded_median += 1\r\n else:\r\n final_fasta_out.write(\">%s\\n%s\\n\" % (label, seq))\r\n final_written_lens.append(len(seq))\r\n\r\n final_fasta_out.close()\r\n fasta_out.close()\r\n remove_files([fasta_out.name])\r\n\r\n else:\r\n min_corrected_len = 0\r\n max_corrected_len = 0\r\n seqs_discarded_median = 0\r\n final_written_lens = 0\r\n\r\n # Copy tmp seqs file to final seqs.fna file\r\n fasta_out.close()\r\n fasta_out = open(fasta_out.name, \"U\")\r\n\r\n # Create final seqs.fna\r\n final_fasta_out = open(fasta_out.name.replace('.tmp', ''), \"w\")\r\n\r\n for label, seq in parse_fasta(fasta_out):\r\n final_fasta_out.write(\">%s\\n%s\\n\" % (label, seq))\r\n\r\n final_fasta_out.close()\r\n fasta_out.close()\r\n remove_files([fasta_out.name])\r\n\r\n median_results = (median_length_filtering, min_corrected_len,\r\n max_corrected_len, seqs_discarded_median, final_written_lens)\r\n\r\n raw_seq_lengths = raw_seq_lengths.values()\r\n final_seq_lengths = final_seq_lengths.values()\r\n\r\n log_out = format_log(bc_counts, corr_ct, valid_map, seq_lengths, filters,\r\n retain_unassigned_reads, attempt_bc_correction, primer_mismatch_count,\r\n max_primer_mm, reverse_primers, reverse_primer_not_found,\r\n sliding_window_failed, below_seq_min_after_trunc, qual_score_window,\r\n discard_bad_windows, min_seq_len, raw_seq_lengths,\r\n final_seq_lengths, median_results, truncate_ambi_bases,\r\n below_seq_min_after_ambi_trunc, )\r\n\r\n #all_seq_lengths, good_seq_lengths = get_seq_lengths(seq_lengths, bc_counts)\r\n\r\n return log_out, seq_lengths.values(), raw_seq_lengths, final_seq_lengths",
"def hit_coverage(self):\n s = self.hit_aln.replace(\"=\", \"\")\n return len(s)",
"def calculate_distance(seq1,seq2):\r\n mmcounter = 0 #mismatchcount\r\n seqlen = 0 #sequence length\r\n \r\n #cout the sequence length and mismatches\r\n for i in range(len(seq1)):\r\n if seq1[i]!='-' and seq2[i]!='-':\r\n seqlen += 1\r\n if seq1[i] != seq2[i]:\r\n mmcounter += 1\r\n #compute p\r\n p = (mmcounter/seqlen)\r\n #adjust p \r\n if p >= 0.75:\r\n pcorr = float(30)\r\n else:\r\n pcorr = (-3/4)*np.log(1-((4/3)*p))\r\n \r\n return(pcorr)",
"def gc(sequence):\n sequence = sequence.upper()\n return (sequence.count('G') + sequence.count('C')) / float(len(sequence))",
"def query_coverage(self):\n s = self.query_aln.replace(\"=\", \"\")\n return len(s)",
"def syn_gid(self, src_gid, tgt_gid):\n i = 0\n if (src_gid < self.num_mit): # Target is granule\n i = (tgt_gid * self.num_mit + src_gid + 1 + \n self.num_mit + self.num_gran) * 100 + 1\n else: # Target is mitral\n i = (src_gid * self.num_mit + tgt_gid + 1 + \n self.num_mit + self.num_gran) * 100\n return i",
"def evaluate(genome):\n # base fitness\n fit = 1.0\n # promote 1001 starting motif\n matches = 0\n if genome.sequence_A[0] == 1:\n matches += 1\n if genome.sequence_A[1] == 0:\n matches += 1\n if genome.sequence_A[2] == 0:\n matches += 1\n if genome.sequence_A[3] == 1:\n matches += 1\n fit += matches * 0.1\n # finish\n return fit"
] |
[
"0.62409633",
"0.60837936",
"0.600813",
"0.5950237",
"0.59181535",
"0.57900447",
"0.57895535",
"0.57555425",
"0.5730142",
"0.5715034",
"0.5656748",
"0.559014",
"0.5582517",
"0.5577179",
"0.55734116",
"0.5556264",
"0.55458",
"0.54965055",
"0.54712254",
"0.54615307",
"0.5460545",
"0.5456316",
"0.5448914",
"0.5444054",
"0.5443217",
"0.5412772",
"0.54115057",
"0.54112965",
"0.540938",
"0.5408426"
] |
0.6144114
|
1
|
Return number of distinct insertion regions implied by RISCC data (genomic, cassette, and chromosomes). The output is a 3tuple giving the number of distinct genome and cassette regions, and the number of distinct noncassette chromosomes the regions are in. Positions on different chromosomes/strands are always counted as distinct; positions on same chromosome/strand are counted as distinct regions if the distance between them is >=max_distance (THIS IS SLIGHTLY ROUGH). Data used to generate the regions includes the cassetteside position (single) and all the genomeside RISCC positions. Unaligned reads are ignored.
|
def RISCC_N_distinct_regions(self, max_distance=MAX_POSITION_DISTANCE):
# TODO add options for minimum #seqs and #reads to count a region as valid!
positions_by_chrom_strand = defaultdict(list)
# add the cassette-side position (single)
try:
positions_by_chrom_strand[(self.position.chromosome, self.position.strand)].append(self.position.min_position)
except AttributeError:
pass
# add all the genome-side read positions; skip unaligned ones.
for read_data in self.RISCC_genome_side_aligned_reads.values():
pos = read_data[0]
try: positions_by_chrom_strand[(pos.chromosome, pos.strand)].append(pos.min_position)
except AttributeError: continue
# count total number of dictinct regions - different chromosomes or strands, or distance > max_distance
total_distinct_regions_genome = 0
total_distinct_regions_cassette = 0
# for each chromosome, go over all positions and only count ones every MAX_POSITION_DISTANCE as distinct
for chrom_strand, positions in positions_by_chrom_strand.items():
positions.sort()
distinct_regions = [positions[0]]
for pos in positions[1:]:
if (pos-distinct_regions[-1]) > max_distance:
distinct_regions.append(pos)
if is_cassette_chromosome(chrom_strand[0]): total_distinct_regions_cassette += len(distinct_regions)
else: total_distinct_regions_genome += len(distinct_regions)
return total_distinct_regions_genome, total_distinct_regions_cassette
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def num_complementary_regions(self):\n g = self._get_puncturefinder_graph()\n # return g.connected_components_number()\n return nx.number_connected_components(g)",
"def test_count_genomic_region_sizes(self):\n \n regions = OrderedDict()\n regions[\"exons\"] = \"Exon\"\n regions[\"utr3\"] = \"3' UTR\"\n regions[\"utr5\"] = \"5' UTR\"\n regions[\"proxintron500\"] = \"Proximal\\nIntron\"\n regions[\"distintron500\"] = \"Distal\\nIntron\"\n results = count_genomic_region_sizes(os.path.join(clipper.test_dir(), \"regions\"), regions, \"mm9\")",
"def get_inter_cds_regions(annotations):\n # Determine locations of inter-CDS regions for each chromosome\n inter_cds_regions = {}\n\n for chr_id, chromosome in annotations.items():\n # get chromosome dimensions (the first feature represents the\n # chromosome itself)\n ch_end = int(chromosome.features[0].location.end)\n\n # filter out everything except for genes\n genes = [x for x in chromosome.features if x.type == 'gene']\n\n # order by position along chromosome\n genes.sort(key=lambda x: x.location.start)\n\n # add range before first gene\n start = 0\n\n # keep track of strand of polycistronic transcriptional unit\n strand = None\n\n inter_cds_regions[chr_id] = {\n -1: [],\n +1: []\n }\n\n # iterate through genes and store the ranges between them;\n # for TriTrypDB files, the gene boundaries are generally the same\n # as the CDS boundaries.\n for gene in genes:\n # Determine location for the region up to start of the gene\n end = int(gene.location.start)\n\n # Skip over snoRNAs, etc. that are nested inside of other genes\n # For example: TcCLB TcChr22-2 179,000:180,000\n if end <= start:\n continue\n\n # Add CDS to relevant list based on strand\n if strand is None:\n # Left-most gene\n inter_cds_regions[chr_id][gene.location.strand].append((start, end))\n elif strand != gene.location.strand:\n # Add ORFs in both directions at transcription switch sites (TSSs)\n inter_cds_regions[chr_id][+1].append((start, end))\n inter_cds_regions[chr_id][-1].append((start, end))\n else:\n # Within PTU; look for ORFs on same strand\n inter_cds_regions[chr_id][strand].append((start, end))\n\n # update start counter and strand\n start = int(gene.location.end)\n strand = gene.location.strand\n\n # add region after last gene\n inter_cds_regions[chr_id][strand].append((start, ch_end))\n\n return inter_cds_regions",
"def get_regions_counts(fname, seglen, mincounts):\n counts = defaultdict(int)\n seglen=int(seglen)\n with open(fname) as fin:\n infile = csv.DictReader(fin, delimiter='\\t')\n for line in infile:\n if int(line['interactions']) < mincounts:\n continue\n t_reg = (\n line['RNA1 chromosome'],int(int(line['Start of RNA1 first read'])/seglen)*seglen,\n line['RNA1 strand'], \n line['RNA2 chromosome'],int(int(line['Start of RNA2 last read'])/seglen)*seglen,\n line['RNA2 strand'])\n\n counts[t_reg] = int(line['interactions'])\n return counts",
"def num_cusps_of_regions(self):\n G = self._get_puncturefinder_graph()\n # return [sum(G.subgraph(vertices=region).edge_labels())\n # for region in G.connected_components()]\n return [sum(edge[2]['weight']\n for edge in subgraph.edges(data=True))\n for subgraph in nx.connected_component_subgraphs(G)]",
"def get_cds_regions(annotations):\n # Determine locations of CDS regions for each chromosome\n cds_regions = {}\n\n for chr_id, chromosome in annotations.items():\n # get chromosome dimensions (the first feature represents the\n # chromosome itself)\n ch_end = int(chromosome.features[0].location.end)\n\n # filter out everything except for genes\n genes = [x for x in chromosome.features if x.type == 'gene']\n\n # order by position along chromosome\n genes.sort(key=lambda x: x.location.start)\n\n # add range before first gene\n start = 0\n\n # keep track of strand of polycistronic transcriptional unit\n strand = None\n\n cds_regions[chr_id] = {\n -1: [],\n +1: []\n }\n\n # iterate over genes and add CDS coordinates\n for gene in genes:\n coords = (int(gene.location.start), int(gene.location.end))\n cds_regions[chr_id][gene.location.strand].append(coords)\n\n return cds_regions",
"def test_build_genomic_regions(self):\n\n CDS = pybedtools.BedTool(\"\"\"chr1\\t7700\\t7900\\tfoo\\t0\\t+\\n\n chr1\\t7999\\t8500\\tfoo\\t0\\t+\\n\"\"\", from_string = True)\n UTR5 = pybedtools.BedTool(\"\"\"chr1\\t7499\\t7700\\tfoo\\t0\\t+\\n\"\"\", from_string = True)\n UTR3 = pybedtools.BedTool(\"\"\"chr1\\t8500\\t9000\\tfoo\\t0\\t+\\n\"\"\", from_string = True)\n proxintron = pybedtools.BedTool(\"\"\"chr1\\t100\\t300\\tfoo\\t0\\t+\\n\n chr1\\t798\\t998\\tfoo\\t0\\t+\\n\n chr1\\t2000\\t2200\\tfoo\\t0\\t+\\n\n chr1\\t2798\\t2998\\tfoo\\t0\\t+\\n\n chr1\\t6000\\t6200\\tfoo\\t0\\t+\\n\n chr1\\t6798\\t6998\\tfoo\\t0\\t+\\n\n chr1\\t7900\\t7998\\tfoo\\t0\\t+\\n\"\"\", from_string = True\n )\n distintron = pybedtools.BedTool(\"\"\"chr1\\t301\\t797\\tfoo\\t0\\t+\\n\n chr1\\t2201\\t2797\\tfoo\\t0\\t+\\n\n chr1\\t6201\\t6797\\tfoo\\t0\\t+\\n\"\"\", from_string = True)\n \n regions = build_genomic_regions(pybedtools.BedTool(clipper.test_file(\"test.gtf\")), prox_distance=200) \n \n #print UTR3\n\n #print regions['UTR3']\n print proxintron\n print regions['proxintron']\n #print regions['distintron']\n \n self.assertEqual(len(CDS.intersect(regions['CDS'], f= 1.0, r = True)), 2)\n self.assertEqual(len(UTR5.intersect(regions['UTR5'], f= 1.0, r = True)), 1)\n self.assertEqual(len(UTR3.intersect(regions['UTR3'], f= 1.0, r = True)), 1)\n self.assertEqual(len(proxintron.intersect(regions['proxintron'], f= 1.0, r = True)), 7)\n self.assertEqual(len(distintron.intersect(regions['distintron'], f= 1.0, r = True)), 3)",
"def get_unique_covered_percentage(fuzzer_row_covered_regions,\n fuzzer_col_covered_regions):\n\n unique_region_count = 0\n for region in fuzzer_col_covered_regions:\n if region not in fuzzer_row_covered_regions:\n unique_region_count += 1\n return unique_region_count",
"def test_get_coverage_of_region_split_read(self):\n\n # turn of read extension\n self.c.extendPairedEnds = False\n self.c.bamFilesList = [self.bamFile1]\n self.c.binLength = 10\n self.c.stepSize = 10\n resp, _ = self.c.count_reads_in_region('chr_cigar', 0, 100)\n nt.assert_array_equal(resp, np.array([[0.],\n [1.],\n [1.],\n [0.],\n [1.],\n [0.],\n [0.],\n [0.],\n [0.],\n [0.]]))",
"def number_of_carnivores_island(self):\n return np.sum(self.carnivores_on_island)",
"def get_gc_in_islands(cst, nmsk, segs):\n assert isinstance(cst, ChromStruct)\n # load the reference chromosome\n # ref = fasta_array(cst.chrom, cst.refg_files)\n ref = fasta_array(cst.chrom, cst.ancs_files)\n\n # set empty array to fill with CpG sites\n cpg_isl = np.zeros(shape=cst.chlen, dtype=bool)\n\n # go through each CpG island and flip array to one on CpG island segments\n f_in = cst.data + '/coords/cpg_islands/{}.cpg.islands.bed'.format(cst.chrom)\n with open(f_in, 'r') as f:\n for line in f:\n start, end = map(int, line.split()[1:3])\n cpg_isl[start:end] = True\n\n # get the fraction of GC out of all neutral sites in CpG islands\n gc_isl_count = []\n for (start, end) in segs:\n # get the segment of the neutral mask\n cur_msk = nmsk[start:end]\n # if there are no neutral sites in the segment, record a 0\n if not np.sum(cur_msk > 0):\n gc_isl_count.append(0)\n continue\n\n # get the segment of the reference genome\n cur_ref = ref[start:end]\n # get the neutral subset of the segment\n cur_neut = cur_ref[cur_msk > 0]\n # get a mask of GC sites for the neutral sites\n cur_gc = np.in1d(cur_neut, ['C', 'G'])\n\n # get the segment of the CpG islands array\n isl_seg = cpg_isl[start:end]\n # get the neutral subset of the CpG islands array\n isl_neut = isl_seg[cur_msk > 0]\n # if there are no neutral island sites in the segment, record a 0\n if not np.sum(isl_neut > 0):\n gc_isl_count.append(0)\n continue\n\n # check that these masks are all the same size\n assert len(cur_neut) == len(cur_gc) == len(isl_neut)\n # get the count of GC neutral sites in islands\n gc_count = (isl_neut & cur_gc)\n\n # get the fraction of neutral GC sites in islands out of all in islands\n gc_isl_count.append(1.0 * gc_count.sum() / isl_neut.sum())\n\n return np.array(gc_isl_count)",
"def RISCC_N_non_confirming_reads(self, max_distance=MAX_POSITION_DISTANCE):\n return sum(x[1] for x in self.RISCC_genome_side_aligned_reads.values()) - self.RISCC_N_confirming_reads(max_distance)",
"def _discoverNewZincRegions(self):\n newRegionCount = 0\n zincChildRef = self._zincRegion.getFirstChild()\n while zincChildRef.isValid():\n childName = zincChildRef.getName()\n neonChild = self._findChildByName(childName)\n if not neonChild:\n neonChild = NeonRegion(childName, zincChildRef, self)\n neonChild._ancestorModelSourceCreated = True\n self._children.append(neonChild)\n newRegionCount += (1 + neonChild._discoverNewZincRegions())\n zincChildRef = zincChildRef.getNextSibling()\n return newRegionCount",
"def RISCC_N_confirming_seqs(self, max_distance=MAX_POSITION_DISTANCE):\n N = 0\n for read_data in self.RISCC_genome_side_aligned_reads.values():\n # skip non-aligned reads; check aligned reads for confirming.\n try: chrom = read_data[0].chromosome\n except AttributeError: continue\n if self._if_confirming_read(read_data[0], max_distance): \n N += 1\n return N",
"def RISCC_N_non_confirming_seqs(self, max_distance=MAX_POSITION_DISTANCE):\n return len(self.RISCC_genome_side_aligned_reads) - self.RISCC_N_confirming_seqs(max_distance)",
"def get_gc_content(regions, fasta):\r\n\tnuc_count = {\"T\":0, \"t\":0, \"A\":0, \"a\":0, \"G\":1, \"g\":1, \"C\":1, \"c\":1}\r\n\r\n\tgc = 0\r\n\ttotal = 0\r\n\tfasta_obj = pysam.FastaFile(fasta)\r\n\tfor region in regions:\r\n\t\tseq = fasta_obj.fetch(region.chrom, region.start, region.end)\r\n\t\tgc += sum([nuc_count.get(nuc, 0.5) for nuc in seq])\r\n\t\ttotal += region.end - region.start\r\n\tfasta_obj.close()\r\n\tgc_content = gc / float(total)\r\n\r\n\treturn(gc_content)",
"def get_coverage(self):\n coverage = np.zeros(self.Set.Shape, dtype=np.int8)\n for ig in self.Set:\n igram = self.load_ma(ig)\n coverage[~igram.mask] += 1\n\n return coverage",
"def chromosome_lengths(self):\n chr_lens = {}\n for r in self.regions(lazy=True):\n if chr_lens.get(r.chromosome) is None:\n chr_lens[r.chromosome] = r.end\n continue\n if r.end > chr_lens[r.chromosome]:\n chr_lens[r.chromosome] = r.end\n return chr_lens",
"def RISCC_N_confirming_reads(self, max_distance=MAX_POSITION_DISTANCE):\n N = 0\n for read_data in self.RISCC_genome_side_aligned_reads.values():\n # skip non-aligned reads; check aligned reads for confirming.\n try: chrom = read_data[0].chromosome\n except AttributeError: continue\n if self._if_confirming_read(read_data[0], max_distance): \n N += read_data[1]\n return N",
"def trace_region_count(self):\n cmd = enums.JLinkTraceCommand.GET_NUM_REGIONS\n data = ctypes.c_uint32(0)\n res = self._dll.JLINKARM_TRACE_Control(cmd, ctypes.byref(data))\n if (res == 1):\n raise errors.JLinkException('Failed to get trace region count.')\n return data.value",
"def countReadCoverage(bam,chrom,start,end,strand=None):\n\n coverage = []\n start = int(start)\n end = int(end)\n for i in range(end-start+1):\n coverage.append(0.0)\n\n i = 0\n if chrom in bam.references:\n for pcol in bam.pileup(chrom,start,end):\n n = 0\n if pcol.pos >= start and pcol.pos <= end:\n for read in pcol.pileups:\n if strand == '+':\n if not read.alignment.is_reverse and read.alignment.mapq >= 0 and not read.alignment.is_duplicate:\n n += 1\n elif strand == '-':\n if read.alignment.is_reverse and read.alignment.mapq >= 0 and not read.alignment.is_duplicate:\n n += 1\n else:\n if read.alignment.mapq >= 0 and not read.alignment.is_duplicate:\n n += 1\n coverage[i] = n\n i += 1\n\n return coverage",
"def count_coverage(self, contig=None, start=None, stop=None, region=None,\n quality_threshold=15, read_callback='all',\n reference=None, end=None, base_quality_threshold=0):\n\n signature = locals()\n for key in ['self', 'quality_threshold', 'read_callback', 'base_quality_threshold']:\n signature.pop(key)\n\n adenine = array.array('L', [0] * (stop - start + 1))\n cytosine = adenine[:]\n guanine = adenine[:]\n thymine = adenine[:]\n\n for read in self.fetch(**signature):\n if read.cigarstring is not None and read.mapq >= quality_threshold:\n if filter_read(read, read_callback):\n for base, index in cigar_alignment(seq=read.seq, cigar=read.cigarstring,\n start_pos=read.pos, qualities=read.query_qualities,\n base_qual_thresh=base_quality_threshold):\n if start <= index <= stop:\n if base == 'A':\n adenine[index - start] += 1\n elif base == 'G':\n guanine[index - start] += 1\n elif base == 'C':\n guanine[index - start] += 1\n elif base == 'T':\n thymine[index - start] += 1\n else:\n raise ValueError('Read base was {}, not A, T, C, or G'.format(base))\n\n return adenine, cytosine, guanine, thymine",
"def get_number_of_atoms_to_optimize(self):\n v = self.c.get(simulation_cell=True)\n return len(v.data.stoichiometry)",
"def count_nucleic_acids(self):\n n = 0\n for chain in self.iter_chains():\n n += chain.count_nucleic_acids()\n return n",
"def cluster_regions( coords, dist ):\n\n gaps = get_gaps( coords )\n blocks = get_block_size( coords )\n\n max_block = max( blocks )\n center = blocks.index(max_block)\n\n stblock, endblock = scan( gaps, center, dist )\n return get_range( coords[stblock:endblock] )",
"def count_region(\n reference_seq, # type: pyfaidx.Fasta\n region, # type: Tuple[str, int, int]\n pattern=None # type: Optional[str]\n): # type: (...) -> int\n\n chrom, start, end = region\n seq = reference_seq[chrom][int(start):int(end)]\n\n return _count_sequence(seq, regex=_build_regex(pattern))",
"def interval_cardinality(self):\n return len(list(self.lower_contained_intervals()))",
"def check_confidence_regions(sigma1, sigma2, sigma3, samples, mean):\n\tsigma1_count = 0\n\tsigma2_count = 0\n\tsigma3_count = 0\n\t\n\tfor sample in samples[1000:,:]:\n\t\ttest1 = ellipse_boundary(sigma1, sample, mean)\n\t\ttest2 = ellipse_boundary(sigma2, sample, mean)\n\t\ttest3 = ellipse_boundary(sigma3, sample, mean)\n\n\t\tif test1 < 1:\n\t\t\tsigma1_count += 1\n\t\t\tsigma2_count += 1\n\t\t\tsigma3_count += 1\n\t\telif test2 < 1:\n\t\t\tsigma2_count += 1\n\t\t\tsigma3_count += 1\n\t\telif test3 < 1:\n\t\t\tsigma3_count += 1\n\t\n\tregion_count = {'1': sigma1_count, '2': sigma2_count, '3': sigma3_count}\n\tprint('region count')\n\tprint(region_count)\n\tprint('sigma1')\n\tprint(sigma1)\n\tprint('sigma2')\n\tprint(sigma2)\n\tprint('sigma3')\n\tprint(sigma3)\n\t\n\treturn region_count",
"def count_total_mutations_cpp(seqs):\n folder = \"/gpfs/group/cdm/IPRO_Suite/modules/CPP/humanization/\"\n name = \"humanization.out\"\n shutil.copyfile(folder + name, name)\n cmd = \"chmod a+x \" + name\n os.system(cmd)\n seqFile = \"sequences.txt\"\n f = open(seqFile, 'w')\n for s in seqs:\n f.write(s + \"\\n\")\n f.close()\n cmd = \"./humanization.out \" + seqFile\n os.system(cmd)\n countFile = \"counts.txt\"\n if os.path.exists(countFile):\n f = open(countFile, 'r')\n firstline = f.readline().strip(' \\t\\n')\n return int(firstline)\n else:\n text = \"humanization.out cpp code do not give the right counts of the mutations, please check\"\n raise DeimmunizationError(text)",
"def get_singles_counts(sname, seglen, mincounts):\n counts = defaultdict(int)\n with open(sname) as fin:\n infile = csv.DictReader(fin, delimiter='\\t')\n for line in infile:\n ints = int(line['interactions'])\n if ints < mincounts:\n continue\n r1_reg = (\n line['RNA1 chromosome'],int(int(line['Start of RNA1 first read'])/seglen)*seglen,\n line['RNA1 strand'])\n r2_reg = (\n line['RNA2 chromosome'],int(int(line['Start of RNA2 last read'])/seglen)*seglen,\n line['RNA2 strand'])\n counts[r1_reg] += ints\n counts[r2_reg] += ints\n return counts"
] |
[
"0.61226624",
"0.5953533",
"0.59534246",
"0.5841964",
"0.583921",
"0.5546408",
"0.5498247",
"0.54521364",
"0.54442143",
"0.54352367",
"0.5194002",
"0.51646143",
"0.51610243",
"0.51343906",
"0.51083446",
"0.5099048",
"0.5060705",
"0.5036289",
"0.50274146",
"0.50229055",
"0.50040555",
"0.50003016",
"0.49777448",
"0.49748802",
"0.4974611",
"0.49522588",
"0.49330223",
"0.49233595",
"0.48757884",
"0.48737526"
] |
0.82966596
|
0
|
Help function to get readinfocontaining object for both multidataset and single mutants. For multidataset, return self.by_dataset[dataset_name] if present if not present, raises an exception if strict, otherwise returns an empty readinfo object.
|
def read_info(self, dataset_name=None, strict=False):
if dataset_name is None:
raise MutantError("This is a multi-dataset mutant - must provide dataset_name arg!")
if strict:
self._check_dataset_presence(dataset_name)
return self.by_dataset[dataset_name]
else:
try: return self.by_dataset[dataset_name]
except KeyError: return blank_readcount_only_mutant()
# TODO unit-tests?
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def read_info(self, dataset_name=None, strict=False):\n if dataset_name is None: return self\n else: raise MutantError(\"This is NOT a multi-dataset mutant - cannot provide dataset_name arg!\")",
"def _check_dataset_name_return_data(self, dataset_name, strict=False):\n if strict:\n _check_dataset_presence(self, dataset_name)\n elif dataset_name is None:\n raise MutantError(\"Cannot use None as dataset name!\")\n return self.by_dataset[dataset_name]",
"def _most_common_mutants_info(self, dataset=None):\n summ = self._get_summary(dataset)\n most_common_mutants = summ.most_common_mutants\n m = most_common_mutants[0]\n # calculate the fraction of total reads per mutant, assuming each mutant has the same readcount\n assert len(set([m.read_info(dataset).total_read_count for m in most_common_mutants])) == 1\n readcount_info = value_and_percentages(m.read_info(dataset).total_read_count, [summ.aligned_read_count])\n if len(most_common_mutants) == 1: return \"%s (%s)\"%(readcount_info, m.position)\n else: return \"%s (%s mutants)\"%(readcount_info, len(most_common_mutants))",
"def __getattribute__(self, name: str) -> Any:\n if name in (FLD_TITLE, FLD_ABSTRACT, FLD_FEES, FLD_ACCESS_CONSTRAINTS, FLD_CONTACT_POSITION, FLD_CONTACT_ORGANISATION):\n return self.read_local_metadata(name)\n elif name == FLD_KEYWORDS:\n kw = self.read_local_metadata(FLD_KEYWORDS)\n if kw:\n return set(kw.split(\",\"))\n else:\n return set()\n elif name == FLD_ATTRIBUTION:\n return self.read_local_metadata(FLD_ATTRIBUTION)\n else:\n return super().__getattribute__(name)",
"def read_dataset(self, dataset):\n uri = URITemplate(self.baseuri + '/{owner}/{id}').expand(\n owner=self.username, id=dataset)\n return self.session.get(uri)",
"def mutants_in_dataset(self, dataset_name=None):\n return [mutant for mutant in self if dataset_name is None or mutant.by_dataset[dataset_name].total_read_count>0]",
"def get_dataset_details(name, analyst):\n\n template = None\n allowed_sources = user_sources(analyst)\n dataset_object = Dataset.objects(name = name,\n source__name__in=allowed_sources).first()\n if not dataset_object:\n error = (\"Either no data exists for this dataset\"\n \" or you do not have permission to view it.\")\n template = \"error.html\"\n args = {'error': error}\n return template, args\n\n dataset_object.sanitize_sources(username=\"%s\" % analyst,\n sources=allowed_sources)\n\n # remove pending notifications for user\n remove_user_from_notification(\"%s\" % analyst, dataset_object.id, 'Dataset')\n\n # subscription\n subscription = {\n 'type': 'Dataset',\n 'id': dataset_object.id,\n 'subscribed': is_user_subscribed(\"%s\" % analyst,\n 'Dataset',\n dataset_object.id),\n }\n\n #objects\n objects = dataset_object.sort_objects()\n\n #relationships\n relationships = dataset_object.sort_relationships(\"%s\" % analyst, meta=True)\n\n # relationship\n relationship = {\n 'type': 'Datset',\n 'value': dataset_object.id\n }\n\n #comments\n comments = {'comments': dataset_object.get_comments(),\n 'url_key':dataset_object.name}\n\n # favorites\n favorite = is_user_favorite(\"%s\" % analyst, 'Dataset', dataset_object.id)\n\n # services\n service_list = get_supported_services('Dataset')\n\n # analysis results\n service_results = dataset_object.get_analysis_results()\n\n args = {'dataset': dataset_object,\n 'objects': objects,\n 'relationships': relationships,\n 'comments': comments,\n 'favorite': favorite,\n 'relationship': relationship,\n 'subscription': subscription,\n 'name': dataset_object.name,\n 'service_list': service_list,\n 'service_results': service_results}\n\n return template, args",
"def read_dataset_info(path=None, paths=None, index_col=None, filter_by_min_spacing=False, verbose=False):\n if (path is None and paths is None) or (path is not None and paths is not None):\n raise ValueError(\"Only one of 'path' or 'paths' arguments must be provided\")\n\n dataset_info = get_dicom_info(glob.glob(path) if path is not None else paths, verbose=verbose)\n if filter_by_min_spacing:\n output_indices = (\n dataset_info\n .groupby('AccessionNumber')\n .agg({'SpacingZ': 'idxmin'})\n )\n index_df = dataset_info.loc[output_indices.loc[:, 'SpacingZ'], :]\n else:\n index_df = dataset_info\n return index_df if index_col is None else index_df.set_index(index_col)",
"def info_materials_intermediates_get():\n materials = _material_by_group(428) # 428 == intermediate group\n return materials, 200",
"def data_details_return(data, data_set):\r\n data.update(data_resources[data_set])\r\n return data",
"def read_one(family_id, material_id):\n # Query the database for the material\n material = (\n Material.query.join(Family, Family.family_id == Material.family_id)\n .filter(Family.family_id == family_id)\n .filter(Material.material_id == material_id)\n .one_or_none()\n )\n\n # Was a material found?\n if material is not None:\n material_schema = MaterialSchema()\n data = material_schema.dump(material).data\n return data\n\n # Otherwise, nope, didn't find that material\n else:\n abort(404, f\"Material not found for Id: {material_id}\")",
"def object_readers(name, *, specify_reader=False):\n\treturn object_access('read', name, specify_reader)",
"def fetch_dataset(\n dataset,\n ignore_hashinfo: bool = False,\n verify: bool = False,\n read_only: bool = False,\n verbose: bool = False,\n pre_scan: bool = True,\n) -> Union[bool, dict[str, Any]]:\n if dataset not in dials_data.datasets.definition:\n return False\n definition = dials_data.datasets.definition[dataset]\n\n target_dir: Path = dials_data.datasets.repository_location() / dataset\n if read_only and not target_dir.is_dir():\n return False\n\n integrity_info = definition.get(\"hashinfo\")\n if not integrity_info or ignore_hashinfo:\n integrity_info = dials_data.datasets.create_integrity_record(dataset)\n\n if \"verify\" not in integrity_info:\n integrity_info[\"verify\"] = [{} for _ in definition[\"data\"]]\n filelist: list[dict[str, Any]] = [\n {\n \"url\": source[\"url\"],\n \"file\": target_dir / os.path.basename(urlparse(source[\"url\"]).path),\n \"files\": source.get(\"files\"),\n \"verify\": hashinfo,\n }\n for source, hashinfo in zip(definition[\"data\"], integrity_info[\"verify\"])\n ]\n\n if pre_scan or read_only:\n if all(\n item[\"file\"].is_file()\n and item[\"verify\"].get(\"size\")\n and item[\"verify\"][\"size\"] == item[\"file\"].stat().st_size\n for item in filelist\n ):\n return True\n if read_only:\n return False\n\n # Obtain a (cooperative) lock on a dataset-specific lockfile, so only one\n # (cooperative) process can enter this context at any one time. The lock\n # file sits in the directory above the dataset directory, as to not\n # interfere with dataset files.\n target_dir.mkdir(parents=True, exist_ok=True)\n with target_dir.with_name(f\".lock.{dataset}\").open(mode=\"w\") as fh:\n with _file_lock(fh):\n verification_records = _fetch_filelist(filelist)\n\n # If any errors occured during download then don't trust the dataset.\n if verify and not all(verification_records):\n return False\n\n integrity_info[\"verify\"] = verification_records\n return integrity_info",
"def __init__(self, dataset, cassette_end, relative_read_direction, dataset_name=None):\n # make sure the arguments are valid values\n if not cassette_end in SEQ_ENDS+['?']: \n raise ValueError(\"The cassette_end variable must be one of %s or '?'!\"%SEQ_ENDS)\n if relative_read_direction not in RELATIVE_READ_DIRECTIONS+['?']: \n raise ValueError(\"The relative_read_direction variable must be %s, or '?'!\"%(', '.join(RELATIVE_READ_DIRECTIONS)))\n # reference to the containing dataset (for read-counting purposes etc), \n # and the dataset name (None if it's a single dataset, string for multi-datasets)\n self.dataset_name = dataset_name\n self.dataset = dataset\n # information on reads that aren't included in the dataset mutants - None or 0 by default\n # TODO I should really go over this and figure out what should be None and what should be 0 and why!!\n self.discarded_read_count, self.discarded_wrong_start, self.discarded_no_cassette = None, None, None\n self.discarded_other_end = 0\n self.non_aligned_read_count, self.unaligned, self.multiple_aligned = 0, 0, 0\n self.ignored_region_read_counts = defaultdict(int)\n # MAYBE-TODO should cassette_end and relative_read_direction be specified for the whole dataset, or just for each set of data added, in add_RISCC_alignment_files_to_data? The only real issue with this would be that then I wouldn't be able to print this information in the summary - or I'd have to keep track of what the value was for each alignment reader added and print that in the summary if it's a single value, or 'varied' if it's different values. Might also want to keep track of how many alignment readers were involved, and print THAT in the summary! Or even print each (infile_name, cassette_end, relative_read_direction) tuple as a separate line in the header.\n self.cassette_end = cassette_end\n self.relative_read_direction = relative_read_direction",
"def test_read(self):\n self.assertArrayEqual(self.dset['a'], self.data['a'])",
"def find_one(cls, dataset_id):\n return super(cls, cls).find_one({DATASET_ID: dataset_id})",
"def get_dataset(self, name, multi_instance=0):\n return [elem for elem in self._data_list\n if elem.name == name and elem.multi_id == multi_instance][0]",
"def get_by_name(self, name: str) -> Optional[\"Dataset\"]:\n raise NotImplementedError",
"def GetMaterial(self, *args):\n return _XCAFDoc.XCAFDoc_MaterialTool_GetMaterial(self, *args)",
"def get_data_set(self, data_set_name):\n return self._data_sets[data_set_name]",
"def data_source_set_info(self) -> Optional[pulumi.Input['DatasourceSetArgs']]:\n return pulumi.get(self, \"data_source_set_info\")",
"def GetAssocDataFromDataset(ReaderAllReturn,datasetname,ErrorOnNoSep=True):\n default = ReaderAllReturn[datasetname]\n defaultData = default.dataset\n defaultNote = HDF5Util.GetHDF5NoteFromDataTuple(default)\n # XXX get rid of hard-coding\n sepExt = \"sep\"\n forceExt = \"force\"\n makeObj = lambda data,note : ProcessSingleWave.WaveObj(DataY=data,\n Note=note)\n # try to get the force and separation\n try:\n AssocData = {\n sepExt:makeObj(defaultData[:,HDF5Util.COLUMN_SEP],defaultNote),\n forceExt:makeObj(defaultData[:,HDF5Util.COLUMN_FORCE],defaultNote)\n }\n except IndexError as e:\n if (ErrorOnNoSep):\n # throw an error if we were asked to\n raise e\n else:\n # assume *only* column is first\n assert len(defaultData.shape) is 1\n AssocData = {\n forceExt:makeObj(defaultData[:],\n defaultNote)\n }\n return AssocData",
"def _info(self) -> tfds.core.DatasetInfo:\n return tfds.core.DatasetInfo(\n builder=self,\n description=_DESCRIPTION,\n features=tfds.features.FeaturesDict({\n 'id': tfds.features.Text(),\n 'program': tfds.features.Text(),\n 'date': tfds.features.Text(),\n 'url': tfds.features.Text(),\n 'summary': tfds.features.Text(),\n 'utt': tfds.features.Sequence(tfds.features.Text()),\n 'speaker': tfds.features.Sequence(tfds.features.Text()),\n }),\n supervised_keys=('utt', 'summary'),\n homepage='https://github.com/zcgzcgzcg1/MediaSum',\n citation=_CITATION,\n )",
"def queryMetadata(self, datasetType, key, format, dataId):\n\n func = getattr(self, 'query_' + datasetType)\n return func(key, format, dataId)",
"def look_for_read_in_analysis(read, int_descr, sim_row, analysis_info, args):\n\t\n\treadID = read['qname']\n\t\n\t# get id side and type from int_descr\n\tif int_descr is not None:\n\t\tid, side, type = int_descr.split('_')\n\t\tif id == '':\n\t\t\tid = None\n\t\tif side == '':\n\t\t\tside = None\n\telse:\n\t\tid, side, type = (None, None, None)\n\t\n\t# side and type will be None if the read wasn't found in the simulation information\n\t# but we're looking for it anyway in the analysis results\n\tassert side in ['left', 'right', None]\n\tassert type in ['chimeric', 'discord', None]\n\t\n\t# if the type is chimeric, we're looking only for a read with the read number appended\n\t# unless read was merged\n\tif type == 'chimeric':\n\t\tif not read['merged']:\n\t\t\treadID = f\"{readID}/{read['num']}\"\n\t\t\t\n\t# does this read cross an integration?\n\tcross_int = (sim_row is not None)\n\t\n\t# dictionary to store matches for this read\n\tsim_matches = {'readID' : readID,\n\t\t\t\t\t'merged': read['merged'],\n\t\t\t\t\t'intID' : id,\n\t\t\t\t\t'found' : False,\n\t\t\t\t\t'n_found' : 0,\n\t\t\t\t\t'side' : side,\n\t\t\t\t\t'correct_side' : None,\n\t\t\t\t\t'type' : type,\n\t\t\t\t\t'correct_type' : None,\n\t\t\t\t\t'correct_host_chr' : None,\n\t\t\t\t\t'host_start_dist' : None, \n\t\t\t\t\t'host_stop_dist' : None, \n\t\t\t\t\t'correct_virus' : None,\n\t\t\t\t\t'virus_start_dist' : None,\n\t\t\t\t\t'virus_stop_dist' : None, \n\t\t\t\t\t'ambig_diff' : None\n\t\t\t\t\t}\n\tmatches = []\n\t\n\t\n\t# look through rows of analysis for matches\n\tfor analysis_row in analysis_info:\n\t\n\t\t# check for readID\n\t\tif analysis_row['ReadID'] == readID:\n\t\t\t\n\t\t\tsim_matches['found'] = True\n\t\t\tsim_matches['n_found'] = 1\n\t\t\t\n\t\t\t# check for correct side\n\t\t\tif analysis_row['Orientation'] == 'hv':\n\t\t\t\tanalysis_side = 'left'\n\t\t\telif analysis_row['Orientation'] == 'vh':\n\t\t\t\tanalysis_side = 'right'\n\t\t\telse:\n\t\t\t\traise ValueError(f'unknown Orientation in analysis results for read {readID}')\n\t\t\tsim_matches['correct_side'] = (analysis_side == side)\n\t\t\t\n\t\t\t# check for correct type\n\t\t\tif analysis_row['OverlapType'] == 'discordant':\n\t\t\t\tanalysis_type = 'discord'\n\t\t\telif analysis_row['OverlapType'] in ['gap', 'overlap', 'none']:\n\t\t\t\tanalysis_type = 'chimeric'\n\t\t\telse:\n\t\t\t\traise ValueError(f'unknown OverlapType in analysis results for read {readID}')\n\t\t\tsim_matches['correct_type'] = (analysis_type == type)\t\t\t\n\t\t\t\n\t\t\t#if this read crosses a simulated int, check for matches between sim and analysis properties\n\t\t\tif cross_int:\n\t\t\t\t# check for correct host chromosome, \n\t\t\t\tsim_matches['correct_host_chr'] = (analysis_row['Chr'] == sim_row['chr'])\n\t\t\t\n\t\t\t\t# check distance between sim and analysis integration sites in host\n\t\t\t\tif sim_matches['correct_host_chr']:\n\t\t\t\t\tif side == 'left':\n\t\t\t\t\t\tsim_start = int(sim_row['hPos'])\n\t\t\t\t\t\tsim_ambig = int(sim_row['juncLengths'].split(',')[0])\n\t\t\t\t\t\tsim_stop = sim_start + sim_ambig\n\t\t\t\t\telse:\n\t\t\t\t\t\tsim_left_ambig = int(sim_row['juncLengths'].split(',')[0])\n\t\t\t\t\t\tsim_right_ambig = int(sim_row['juncLengths'].split(',')[1])\n\t\t\t\t\t\tsim_start = int(sim_row['hPos']) + sim_left_ambig + int(sim_row['hDeleted'])\n\t\t\t\t\t\tsim_stop = sim_start + sim_right_ambig\t\t\t\t\t\t\n\t\t\t\t\t\t\n\t\t\t\t\tanalysis_start = int(analysis_row['IntStart'])\n\t\t\t\t\tanalysis_stop = int(analysis_row['IntStop'])\n\t\t\t\t\t\n\t\t\t\t\t# check distance between starts and stops\t\n\t\t\t\t\tsim_matches['host_start_dist'] = abs(sim_start - analysis_start)\n\t\t\t\t\tsim_matches['host_stop_dist'] = abs(sim_stop - analysis_stop)\n\t\t\t\t\t\n\t\t\t\t\t# check if analysis and sim coordates overlap (allowing some wiggle room)\n\t\t\t\t\tanalysis_start -= args.wiggle_room\n\t\t\t\t\tanalysis_stop += args.wiggle_room\n\t\t\t\t\tsim_matches['host_coords_overlap'] = intersect(sim_start, sim_stop, analysis_start, analysis_stop)\t\n\t\t\t\n\t\t\t\t# check for correct virus\n\t\t\t\tsim_matches['correct_virus'] = (analysis_row['VirusRef'] == sim_row['virus'])\n\t\t\t\t\n\t\t\t\t# check viral coordinates are correct\n\t\t\t\tif sim_matches['correct_virus']:\n\t\t\t\t\tsim_start, sim_stop = get_virus_coordinates(sim_row, side)\n\t\t\t\t\tanalysis_start = int(analysis_row['VirusStart'])\n\t\t\t\t\tanalysis_stop = int(analysis_row['VirusStop'])\n\t\t\t\t\t\n\t\t\t\t\t# check distance between starts and stops\n\t\t\t\t\tsim_matches['virus_start_dist'] = abs(sim_start - analysis_start)\n\t\t\t\t\tsim_matches['virus_stop_dist'] = abs(sim_stop - analysis_stop)\n\t\t\t\t\t\n\t\t\t\t\t# check if analysis and sim coordates overlap (allowing some wiggle room)\n\t\t\t\t\tanalysis_start -= args.wiggle_room\n\t\t\t\t\tanalysis_stop += args.wiggle_room\n\t\t\t\t\tsim_matches['virus_coords_overlap'] = intersect(sim_start, sim_stop, analysis_start, analysis_stop)\n\t\t\t\t\t\n\t\t\t\t\t\t\t\t\n\t\t\t# append a copy of sim_matches, so that in the case of multiple matches\n\t\t\t# we can check for the best one\n\t\t\tmatches.append(dict(sim_matches))\n\n\t# if we didn't get any matches\n\tif len(matches) == 0:\n\t\tmatches.append(sim_matches)\n\t\t\n\t# if we found more than one match, need to update n_found in each\n\tif len(matches) > 1:\n\t\tprint(f\"WARNING: read {readID} found more than once in analysis matches\")\n\t\tfor match_dict in matches:\n\t\t\tmatch_dict['n_found'] = len(matches)\n\t\t\t\n\t# discordant pairs that were merged are a special case\n\t# if a discordant pair crossing an integration is merged, without correction\n\t# it will be scored as fp for chimeric and fn for discordant, but it should\n\t# actually be tp for chimeric and tn for discordant\n\t\n\treturn matches",
"def give_single_dataset_mutant(self, single_dataset_name, force=False):\n if single_dataset_name not in self.by_dataset.keys() and not force:\n raise MutantError(\"This mutant doesn't have a %s dataset! \"%single_dataset_name\n +\"Use force=True argument if you want a zero-readcount mutant returned anyway.\")\n # generate new mutant, fill it with readcount-related data from self.by_dataset[single_dataset_name] \n # and general data from self\n new_mutant = Insertional_mutant()\n new_mutant._copy_non_readcount_data(self)\n new_mutant._copy_readcount_related_data(self.by_dataset[single_dataset_name])\n return new_mutant",
"def _read_data(self) -> MMD:\n\t\tif self.config.source_type == SourceType.LOCAL_FILE:\n\t\t\treturn self._read_files()\n\t\telif self.config.source_type == SourceType.HDFS:\n\t\t\treturn self._read_hdfs()\n\t\telif self.config.source_type == SourceType.NEO4J:\n\t\t\treturn self._read_neo4j(self.config.graph_db)\n\n\t\telse:\n\t\t\traise NotImplementedError(\"The source type {} has not been implemented yet.\".format(loader_config.source_type))",
"def dataset_read(self, name):\n\n # Checks inputs\n check_type(value=name, allowed_types=str, var_name=\"name\", raise_exception=True)\n\n ret = {\n 'ts_list': [],\n 'description': None\n }\n\n response = self.send(root_url=self.session.dm_url + self.root_url,\n verb=GenericClient.VERB.GET,\n template=TEMPLATES['dataset_read'],\n uri_params={\n 'name': name\n })\n\n is_404(response=response, msg=\"Dataset %s not found in database\" % name)\n\n if response.status_code == 200:\n if 'fids' in response.json:\n ret['ts_list'] = response.json['fids']\n\n if 'description' in response.json:\n ret['description'] = response.json['description']\n\n return ret\n raise SystemError(\"Something wrong happened\")",
"def query(self, dataset):\n\n host = self.options.host\n debug = self.options.verbose\n idx = self.options.idx\n limit = self.options.limit\n \n def check(ds):\n query = 'dataset=%s' % ds\n result = Das.get_data(host, query, idx, limit, debug)\n result = result.replace('null','None')\n result = result.replace('true','True')\n result = result.replace('false','False')\n data = eval(result)\n if data['status'] != 'ok':\n raise Exception(\"Das query failed: Output is '%s'\" % data)\n return (data['data'],data)\n\n data = None\n exists = False\n \n if self.options.name is None:\n #guess the dataset name in DBS\n tokens = [t for t in dataset.split(os.sep) if t]\n if len(tokens) >= 3:\n #DBS names always have three entries\n ds = os.sep + os.sep.join(tokens[0:3])\n if ds:\n exists, data = check(ds)\n self.options.name = ds\n else:\n exists, data = check(self.options.name)\n if not exists:\n raise Exception(\"Specified dataset '%s' not found in Das. Please check.\" % self.options.name)\n \n if data is None:\n raise Exception(\"Dataset '%s' not found in Das. Please check.\" % self.dataset)\n return data",
"def _check_dataset_consistency(self):\n if not self.multi_dataset: \n raise MutantError(\"_check_dataset_consistency only makes sense for multi-datasets!\")\n def _check_sets_raise_error(set1, set2, set1_name, set2_name):\n if not set1==set2:\n raise MutantError(\"Multi-dataset mutant pool has different %s and %s dataset sets! %s, %s\"%(set1_name, \n set2_name, set1, set2))\n datasets_from_summary = set(self.summary.keys())\n datasets_from_mutants = set.union(*[set(m.by_dataset.keys()) for m in self])\n _check_sets_raise_error(datasets_from_summary, datasets_from_mutants, \"from summary\", \"from mutants\")\n try:\n if self._dataset_order is not None: \n datasets_from_order = set(self._dataset_order)\n _check_sets_raise_error(datasets_from_order, datasets_from_summary, \"from dataset_order\", \"from summary\")\n except AttributeError:\n pass"
] |
[
"0.7426683",
"0.5558325",
"0.52826",
"0.5180236",
"0.5094579",
"0.49507886",
"0.49337375",
"0.490193",
"0.48570403",
"0.48174632",
"0.48094055",
"0.48014075",
"0.47814924",
"0.47698608",
"0.47466514",
"0.47369713",
"0.47281137",
"0.47247413",
"0.47166374",
"0.47152117",
"0.47028297",
"0.47005934",
"0.46964386",
"0.4674528",
"0.46697232",
"0.4662211",
"0.46502057",
"0.4632756",
"0.46133193",
"0.46108338"
] |
0.7791701
|
0
|
Add read to given dataset (see docstring for Insertional_mutant version) dataset_name is required.
|
def add_read(self, HTSeq_alignment, position=SPECIAL_POSITIONS.unknown, read_count=1, dataset_name=None):
readcount_data_container = self._check_dataset_name_return_data(dataset_name)
Insertional_mutant.add_read(readcount_data_container, HTSeq_alignment, position, read_count)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def add_to_dataset(self, dataset: Dataset):\n pass",
"def add_reference(self, dataset=None):\n if not dataset:\n raise aspecd.exceptions.MissingDatasetError\n dataset_reference = aspecd.dataset.DatasetReference()\n dataset_reference.from_dataset(dataset=dataset)\n self.references.append(dataset_reference)",
"def on_the_add_dataset_page_input_the_dataset_name_my_acl_dataset(driver, dataset_name):\n assert wait_on_element(driver, 5, '//h3[text()=\"Add Dataset\"]')\n assert wait_on_element(driver, 5, '//input[@ix-auto=\"input__Name\"]', 'inputable')\n driver.find_element_by_xpath('//input[@ix-auto=\"input__Name\"]').clear()\n driver.find_element_by_xpath('//input[@ix-auto=\"input__Name\"]').send_keys(dataset_name)\n assert wait_on_element(driver, 5, '//mat-select[@ix-auto=\"select__Share Type\"]')\n driver.find_element_by_xpath('//mat-select[@ix-auto=\"select__Share Type\"]').click()\n assert wait_on_element(driver, 5, '//mat-option[@ix-auto=\"option__Share Type_SMB\"]', 'clickable')\n driver.find_element_by_xpath('//mat-option[@ix-auto=\"option__Share Type_SMB\"]').click()",
"def update_dataset(self, dataset, name=None, description=None):\n uri = URITemplate(self.baseuri + '/{owner}/{id}').expand(\n owner=self.username, id=dataset)\n return self.session.patch(uri, json=self._attribs(name, description))",
"def get_dataset_reference(self, dataset_name):\n\n print_debug(\"Geting dataset :\" + dataset_name)\n dataset = DatasetFactory.get(dataset_file_name=dataset_name)\n return dataset",
"def add_dataset(self, task_name, dataset=None, *, aliases=None):\n self._datasets.append(dataset if dataset is not None else TaskData())\n last_index = len(self._datasets) - 1\n self._aliases[task_name] = last_index\n\n if aliases is not None:\n for alias in aliases:\n self._aliases[alias] = last_index\n\n if len(self._datasets) == 1:\n self._default_index = 0",
"def with_input(self, dataset_name, project_key=None, role=\"main\"):\n return self._with_input(dataset_name, project_key, role)",
"def switch_to_dataset(self, dataset_name: Optional[str] = None):\n self._current_dataset_name = self._validate_dataset_name(dataset_name)",
"def load_data(self, dataset, dataset_name):\n with open(dataset, \"r\", encoding=\"utf-8\") as f:\n self.data = json.load(f)\n self.dataset_name = dataset_name",
"def add_merged_dataset(self, new_dataset):\n self._add_linked_data(self.MERGED_DATASETS, self.merged_dataset_ids,\n new_dataset.dataset_id)",
"def read_info(self, dataset_name=None, strict=False):\n if dataset_name is None: return self\n else: raise MutantError(\"This is NOT a multi-dataset mutant - cannot provide dataset_name arg!\")",
"def add(self, datasetName, rows):\r\n\r\n rows = np.vstack(rows)\r\n\r\n\t\t# Add the sample into the buffer\r\n try:\r\n self.datasetBuffer[datasetName].extend(rows)\r\n except:\r\n # Initialize the buffer\r\n self.datasetBuffer[datasetName] = []\r\n self.datasetBuffer[datasetName].extend(rows)\r\n\r\n # Create the dataset\r\n self._createDatasets(datasetName)\r\n\r\n # Initiliaze dataset index count\r\n self.idxs[datasetName] = 0\r\n\r\n # Update the number of samples in the buffer\r\n self.totalFeatures += len(rows)\r\n\r\n\t\t# Check to see if we have reached the maximum buffer size\r\n if self.totalFeatures >= self.maxBufferSize:\r\n\r\n\t\t\t# write the buffers to file\r\n self._writeBuffers()",
"def add_tag(self, dataset: \"Dataset\", tag: \"DatasetTag\"):\n raise NotImplementedError",
"def update_dataset(self, data_name: str, append: pd.DataFrame):\n df = getattr(self, data_name)\n setattr(self, data_name, df.join(append, how='left'))",
"def add_other_mutant_as_dataset(self, other_mutant, other_mutant_dataset_name, \n overwrite=False, check_constant_data=False):\n if other_mutant_dataset_name in self.by_dataset and not overwrite:\n raise MutantError(\"This mutant already has a %s dataset! Can't overwrite it with \"%other_mutant_dataset_name\n +\"new one. Choose a different name for new dataset, or use overwrite=True argument.\")\n\n # if desired, check that the position/gene data matches (and update if own gene data is unknown)\n # (probably should be using ifs rather than asserts, but I think since they're wrapped in a try/except it's fine)\n if check_constant_data:\n if not self.position == other_mutant.position:\n raise MutantError(\"Can't add mutant2 as dataset to mutant1: the mutant position differs! %s and %s\"%(\n self.position, other_mutant.position))\n try:\n self.update_gene_info(other_mutant.gene, other_mutant.orientation, \n other_mutant.gene_feature, other_mutant.gene_distances)\n except MutantError:\n raise MutantError(\"Can't add mutant2 as dataset to mutant1: the mutant gene data differs!\"\n +\" %s, %s, %s and\"%(self.gene, self.orientation, self.gene_feature)\n +\" %s, %s, %s.\"%(other_mutant.gene, other_mutant.orientation, other_mutant.gene_feature))\n\n # make a new empty Insertional_mutant object to hold the readcount-related data from other_mutant, \n # and put it in the self.by_dataset dictionary under other_mutant_dataset_name\n self.by_dataset[other_mutant_dataset_name] = Insertional_mutant_readcount_only()\n # now fill this new object with readcount-related data from other_mutant\n self.by_dataset[other_mutant_dataset_name]._copy_readcount_related_data(other_mutant)",
"def add(self, read):\n self.additionalReads.append(read)\n self._length += 1",
"def give_single_dataset_mutant(self, single_dataset_name, force=False):\n if single_dataset_name not in self.by_dataset.keys() and not force:\n raise MutantError(\"This mutant doesn't have a %s dataset! \"%single_dataset_name\n +\"Use force=True argument if you want a zero-readcount mutant returned anyway.\")\n # generate new mutant, fill it with readcount-related data from self.by_dataset[single_dataset_name] \n # and general data from self\n new_mutant = Insertional_mutant()\n new_mutant._copy_non_readcount_data(self)\n new_mutant._copy_readcount_related_data(self.by_dataset[single_dataset_name])\n return new_mutant",
"def set_dataset(self, role, dset):\n\n\t\tself.datasets[role] = dset",
"def add(self, dataset: Dataset,\n with_lineage: bool = True, archive_less_mature: Optional[int] = None) -> Dataset:\n\n def process_bunch(dss, main_ds, transaction):\n edges = []\n\n # First insert all new datasets\n for ds in dss:\n is_new = transaction.insert_dataset(ds.metadata_doc_without_lineage(), ds.id, ds.product.id)\n sources = ds.sources\n if is_new and sources is not None:\n edges.extend((name, ds.id, src.id)\n for name, src in sources.items())\n\n # Second insert lineage graph edges\n for ee in edges:\n transaction.insert_dataset_source(*ee)\n\n # Finally update location for top-level dataset only\n if main_ds.uris is not None:\n self._ensure_new_locations(main_ds, transaction=transaction)\n\n _LOG.info('Indexing %s', dataset.id)\n\n if with_lineage:\n ds_by_uuid = flatten_datasets(dataset)\n all_uuids = list(ds_by_uuid)\n\n present = {k: v for k, v in zip(all_uuids, self.bulk_has(all_uuids))}\n\n if present[dataset.id]:\n _LOG.warning('Dataset %s is already in the database', dataset.id)\n return dataset\n\n dss = [ds for ds in [dss[0] for dss in ds_by_uuid.values()] if not present[ds.id]]\n else:\n if self.has(dataset.id):\n _LOG.warning('Dataset %s is already in the database', dataset.id)\n return dataset\n\n dss = [dataset]\n\n with self._db_connection(transaction=True) as transaction:\n process_bunch(dss, dataset, transaction)\n if archive_less_mature is not None:\n self.archive_less_mature(dataset, archive_less_mature)\n\n return dataset",
"def load(name):\n if name in datasets:\n\n return pd.read_csv(os.path.join(datasets_path, \"%s.csv\" % name))\n else:\n raise ValueError(\"Dataset not found!\")",
"def load_dataset(dataset_name):\n url = METADATA[dataset_name][\"url\"]\n f = urlopen(url)\n data = _read_rows(f)\n f.close()\n return data",
"def add_read(self, read):\n r = Read(read)\n if read not in self.reads:\n self.reads[read] = r\n else:\n self.reads[read].visit_limit += 1\n self.num_reads += 1",
"def create_dataset(project, dataset_name):\n dataset = dataset_name\n get_dataset = project.datasets.get(dataset_name=dataset)\n project.datasets.create(dataset_name=dataset_name)\n \n return get_dataset",
"def add_or_remove(self, dataset: \"Dataset\") -> None:\n raise NotImplementedError",
"def add_data(self, dataset, library):\n _elements = dataset.get_files()\n parent_folder = dataset._data[\"source_dir\"].split('/')[-1:][0]\n # for every dataset element, determine if its a list or a string\n for _e in _elements:\n _val = _elements[_e]\n # add dataset if its a simple string\n if isinstance(_val, str):\n self._datasets.append(self._add_dataset(_val, library, parent_folder))\n # add dataset collection if it's a list\n elif isinstance(_val, list):\n self._dataset_collections.append(self._add_dataset_collection(_val, library, parent_folder, _e))\n else:\n self.err(\"Dataset has weird file {} whose type {} is strange\".format(_e, type(_val)))",
"def assign_dataset(self, dataset_uuid):\n assert os.path.exists(os.path.join(DATASETS_PATH, dataset_uuid))\n self.dataset_uuid = dataset_uuid\n if self.name is None:\n self.set_name(dataset_uuid)",
"def update_dataset(\n self,\n dataset: DatasetDB,\n ) -> DatasetDB:\n dataset_id = dataset.id\n\n self._es.update_document(\n index=DATASETS_INDEX_NAME,\n doc_id=dataset_id,\n document=self._dataset_to_es_doc(dataset),\n )\n return dataset",
"def add_read(self, new_read): \n if self.sampling:\n self.convert_to_list()\n self.reads.append(new_read)\n self.total+=1",
"def resolve_dataset(self, which_set, dataset_name):\n p = path.join(self.dataset_root, dataset_name + \"/\")\n\n if not(path.isdir(serial.preprocess(p))):\n raise IOError(\"MRI dataset directory %s not found.\"\n % serial.preprocess(p))\n\n if which_set == 'train':\n data_path = p + 'train.npy'\n label_path = p + 'train_labels.npy'\n elif which_set == 'test':\n data_path = p + 'test.npy'\n label_path = p + 'test_labels.npy'\n else:\n if which_set != \"full\":\n raise ValueError(\"dataset \\'%s\\' not supported.\" % which_set)\n data_path = p + \"full_unshuffled.npy\"\n label_path = p + \"full_labels_unshuffled.npy\"\n \n data_path = serial.preprocess(data_path)\n label_path = serial.preprocess(label_path)\n\n if not(path.isfile(data_path)):\n raise ValueError(\"Dataset %s not found in %s\" %(which_set,\n serial.preprocess(p)))\n return data_path, label_path",
"def dataset_read(self, name):\n\n # Checks inputs\n check_type(value=name, allowed_types=str, var_name=\"name\", raise_exception=True)\n\n ret = {\n 'ts_list': [],\n 'description': None\n }\n\n response = self.send(root_url=self.session.dm_url + self.root_url,\n verb=GenericClient.VERB.GET,\n template=TEMPLATES['dataset_read'],\n uri_params={\n 'name': name\n })\n\n is_404(response=response, msg=\"Dataset %s not found in database\" % name)\n\n if response.status_code == 200:\n if 'fids' in response.json:\n ret['ts_list'] = response.json['fids']\n\n if 'description' in response.json:\n ret['description'] = response.json['description']\n\n return ret\n raise SystemError(\"Something wrong happened\")"
] |
[
"0.6095995",
"0.59818745",
"0.5971621",
"0.58394057",
"0.57641375",
"0.5694613",
"0.56901795",
"0.5672957",
"0.56374884",
"0.5603721",
"0.5561695",
"0.54902554",
"0.5469454",
"0.5448146",
"0.5352664",
"0.53225374",
"0.5190837",
"0.51891935",
"0.51786965",
"0.5148901",
"0.51347345",
"0.5099838",
"0.5060369",
"0.5045753",
"0.49629095",
"0.49520972",
"0.49484602",
"0.4933467",
"0.49273857",
"0.49203593"
] |
0.6288449
|
0
|
Add counts to given dataset (see docstring for Insertional_mutant version) dataset_name is required.
|
def add_counts(self, total_count, perfect_count, sequence_variant_count, assume_new_sequences=False, dataset_name=None):
readcount_data_container = self._check_dataset_name_return_data(dataset_name)
Insertional_mutant.add_counts(readcount_data_container, total_count, perfect_count, sequence_variant_count,
assume_new_sequences, dataset_name=None)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def add_to_dataset(self, dataset: Dataset):\n pass",
"def add_sequence_and_counts(self, seq, seq_count, add_to_uniqseqcount=True, dataset_name=None):\n readcount_data_container = self._check_dataset_name_return_data(dataset_name)\n Insertional_mutant.add_sequence_and_counts(readcount_data_container, seq, seq_count, add_to_uniqseqcount, dataset_name=None)",
"def add_dataset(self, task_name, dataset=None, *, aliases=None):\n self._datasets.append(dataset if dataset is not None else TaskData())\n last_index = len(self._datasets) - 1\n self._aliases[task_name] = last_index\n\n if aliases is not None:\n for alias in aliases:\n self._aliases[alias] = last_index\n\n if len(self._datasets) == 1:\n self._default_index = 0",
"def update_dataset(self, dataset, name=None, description=None):\n uri = URITemplate(self.baseuri + '/{owner}/{id}').expand(\n owner=self.username, id=dataset)\n return self.session.patch(uri, json=self._attribs(name, description))",
"def datasets(self, datasets):\n self.__datasetsAll = datasets\n self.__datasets = list(datasets)\n self.__axisDomains = None\n for ds in self.__datasetsAll:\n self.__datasetsPerClass[ds[-1]] = self.__datasetsPerClass.get(ds[-1], 0) + 1\n self.dataChanged.emit()",
"def add_count_data(self, counts: Dict[datetime, int]):\n raise NotImplementedError()",
"def increment(self, count_name):\n prop_name = 'count_' + count_name\n setattr(self, prop_name, getattr(self, prop_name, 0) + 1)",
"def add_data(self, file_name: str, fabricated_count: dict) -> None:\n\n assert file_name not in self._meta_data_dict, \"Error, filename has already been used.\"\n\n self._meta_data_dict[file_name] = fabricated_count",
"def add_counter(self, data, metric_id=None):\n self._post_data(prefix_id='counters', data=data, metric_id=metric_id)",
"def record_count(self, name, value, tags=None):\n identity = self.create_identity(name, tags, \"count\")\n with self._lock:\n self._batch[identity] = self._batch.get(identity, 0) + value",
"def get_num_datasets(self, data):\n dsets = set()\n for items in data:\n dsetid = items[3]\n dsets.add(dsetid)\n return len(dsets)",
"def add_data(ss, y):\n K = len(ss['counts'])\n if y >= 0 and y < K:\n ss['counts'][y] += 1\n elif y == K:\n ss['counts'].append(1)\n else:\n raise ValueError(\"k is not valid: \" + str(y))",
"def add_count(self):\n self.count += 1",
"def on_the_add_dataset_page_input_the_dataset_name_my_acl_dataset(driver, dataset_name):\n assert wait_on_element(driver, 5, '//h3[text()=\"Add Dataset\"]')\n assert wait_on_element(driver, 5, '//input[@ix-auto=\"input__Name\"]', 'inputable')\n driver.find_element_by_xpath('//input[@ix-auto=\"input__Name\"]').clear()\n driver.find_element_by_xpath('//input[@ix-auto=\"input__Name\"]').send_keys(dataset_name)\n assert wait_on_element(driver, 5, '//mat-select[@ix-auto=\"select__Share Type\"]')\n driver.find_element_by_xpath('//mat-select[@ix-auto=\"select__Share Type\"]').click()\n assert wait_on_element(driver, 5, '//mat-option[@ix-auto=\"option__Share Type_SMB\"]', 'clickable')\n driver.find_element_by_xpath('//mat-option[@ix-auto=\"option__Share Type_SMB\"]').click()",
"def count(\n self,\n dim: Dims = None,\n *,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> Dataset:\n return self.reduce(\n duck_array_ops.count,\n dim=dim,\n numeric_only=False,\n keep_attrs=keep_attrs,\n **kwargs,\n )",
"def update_counters(counter: dict[str, int], new_counts: dict[str, int]) -> dict[str, int]:\n for (name, count) in new_counts.items():\n counter[name] += count\n return counter",
"def update_dataset(\n self,\n dataset: DatasetDB,\n ) -> DatasetDB:\n dataset_id = dataset.id\n\n self._es.update_document(\n index=DATASETS_INDEX_NAME,\n doc_id=dataset_id,\n document=self._dataset_to_es_doc(dataset),\n )\n return dataset",
"def scatter_count(input: torch.Tensor):\n return scatter_add(torch.ones_like(input, dtype=torch.long), input.long())",
"def count_elements_in_dataset(dataset):\n return dataset.count()",
"def add_count(self, denom: CashDenomination, count: int) -> None:\n if not self.__open:\n raise RuntimeError(\"Cash drawer must be open to modify.\")\n if count < 0:\n raise ValueError(\"Count must not be negative.\")\n self.__contents[denom] += count",
"def dataCount(self, collectionName):\n count = collectionName.find().count()\n return count",
"def increment_all(collection: Collection, data):\n return collection.update_many({}, {'$inc': data}).modified_count",
"def count(\n self,\n dim: Dims = None,\n *,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> DataArray:\n return self.reduce(\n duck_array_ops.count,\n dim=dim,\n keep_attrs=keep_attrs,\n **kwargs,\n )",
"def switch_to_dataset(self, dataset_name: Optional[str] = None):\n self._current_dataset_name = self._validate_dataset_name(dataset_name)",
"def increment_count(self, data, suffix=''):\n # Just to show data coming in...\n assert data['hello'] == 'world'\n\n self.count += 1\n return {\"count\": self.count}",
"def dataCountBy(self, collectionName, catagory, data):\n count = collectionName.find({catagory: data}).count()\n return count",
"def calculate_dataset_stats(dsets):\n dsinfo = {\n 'nfiles': 0,\n 'nfilesfinished': 0,\n 'nfilesfailed': 0,\n 'nfilesmissing': 0,\n 'pctfinished': 0.0,\n 'pctfailed': 0,\n 'neventsTot': 0,\n 'neventsUsedTot': 0,\n 'neventsRemaining': 0,\n 'neventsOutput': 0,\n }\n if not dsets or len(dsets) == 0:\n return dsets, dsinfo\n\n if len(dsets) > 0:\n for ds in dsets:\n if 'datasetname' in ds and len(ds['datasetname']) > 0:\n if not str(ds['datasetname']).startswith('user'):\n scope = str(ds['datasetname']).split('.')[0]\n else:\n scope = '.'.join(str(ds['datasetname']).split('.')[:2])\n if ':' in scope:\n scope = str(scope).split(':')[0]\n ds['scope'] = scope\n\n # input primary datasets\n if 'type' in ds and ds['type'] in ['input', 'pseudo_input'] and 'masterid' in ds and ds['masterid'] is None:\n if 'nevents' in ds and ds['nevents'] is not None and int(ds['nevents']) > 0:\n dsinfo['neventsTot'] += int(ds['nevents'])\n if 'neventsused' in ds and ds['neventsused'] is not None and int(ds['neventsused']) > 0:\n dsinfo['neventsUsedTot'] += int(ds['neventsused'])\n\n if 'nfiles' in ds and int(ds['nfiles']) > 0:\n dsinfo['nfiles'] += int(ds['nfiles'])\n dsinfo['nfilesfinished'] += int(ds['nfilesfinished']) if 'nfilesfinished' in ds else 0\n dsinfo['nfilesfailed'] += int(ds['nfilesfailed']) if 'nfilesfailed' in ds else 0\n ds['percentfinished'] = round_to_n_digits(100. * int(ds['nfilesfinished']) / int(ds['nfiles']), 1, method='floor')\n\n # nfilesmissing is not counted in nfiles in the DB\n if 'nfilesmissing' in ds and ds['nfilesmissing'] is not None:\n dsinfo['nfilesmissing'] += int(ds['nfilesmissing'])\n\n elif 'type' in ds and ds['type'] in ('output', ) and 'streamname' in ds and ds['streamname'] is not None and ds['streamname'] == 'OUTPUT0':\n # OUTPUT0 - the first and the main steam of outputs\n dsinfo['neventsOutput'] += int(ds['nevents']) if 'nevents' in ds and ds['nevents'] and ds['nevents'] > 0 else 0\n\n dsinfo['neventsRemaining'] = dsinfo['neventsTot'] - dsinfo['neventsUsedTot']\n dsinfo['pctfinished'] = round_to_n_digits(100.*dsinfo['nfilesfinished']/dsinfo['nfiles'], 0, method='floor') if dsinfo['nfiles'] > 0 else 0\n dsinfo['pctfailed'] = round_to_n_digits(100.*dsinfo['nfilesfailed']/dsinfo['nfiles'], 0, method='floor') if dsinfo['nfiles'] > 0 else 0\n\n return dsets, dsinfo",
"def add_data(self, name, data_dir, tasks):\n assert isinstance(name, str), \"Must input a valid dataset name.\"\n assert isinstance(data_dir, str), \"Must input a valid data directory.\"\n assert isinstance(tasks, dict), \"Must input a valid tasks.\"\n\n new_data = {\n \"data_dir\": data_dir,\n \"keywords\": self._get_keywords_from_tasks(tasks),\n \"tasks\": tasks\n }\n self.data[\"dataset\"][name] = new_data\n self.update_categories()\n self.write_data_cache(self.data)",
"def add_reference(self, dataset=None):\n if not dataset:\n raise aspecd.exceptions.MissingDatasetError\n dataset_reference = aspecd.dataset.DatasetReference()\n dataset_reference.from_dataset(dataset=dataset)\n self.references.append(dataset_reference)",
"def train(self, corpus):\n for sentence in corpus.corpus:\n for datum in sentence.data: \n self.unigramCounts[datum.word] += 1\n self.totalCount += 1"
] |
[
"0.59968686",
"0.56138575",
"0.5464458",
"0.5406013",
"0.5392766",
"0.5368051",
"0.5356496",
"0.5332156",
"0.53183275",
"0.52841157",
"0.5158277",
"0.5155341",
"0.5072814",
"0.5054961",
"0.5034717",
"0.5012029",
"0.49825618",
"0.49813405",
"0.4977953",
"0.49759346",
"0.4974931",
"0.49707726",
"0.49647957",
"0.4944028",
"0.49371576",
"0.49349806",
"0.49253485",
"0.49019036",
"0.4899425",
"0.48878223"
] |
0.57066137
|
1
|
Add seqs/counts to given dataset (see docstring for Insertional_mutant version) dataset_name is required.
|
def add_sequence_and_counts(self, seq, seq_count, add_to_uniqseqcount=True, dataset_name=None):
readcount_data_container = self._check_dataset_name_return_data(dataset_name)
Insertional_mutant.add_sequence_and_counts(readcount_data_container, seq, seq_count, add_to_uniqseqcount, dataset_name=None)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def add_to_dataset(self, dataset: Dataset):\n pass",
"def add_dataset(self, task_name, dataset=None, *, aliases=None):\n self._datasets.append(dataset if dataset is not None else TaskData())\n last_index = len(self._datasets) - 1\n self._aliases[task_name] = last_index\n\n if aliases is not None:\n for alias in aliases:\n self._aliases[alias] = last_index\n\n if len(self._datasets) == 1:\n self._default_index = 0",
"def add_counts(self, total_count, perfect_count, sequence_variant_count, assume_new_sequences=False, dataset_name=None):\n readcount_data_container = self._check_dataset_name_return_data(dataset_name)\n Insertional_mutant.add_counts(readcount_data_container, total_count, perfect_count, sequence_variant_count, \n assume_new_sequences, dataset_name=None)",
"def datasets(self, datasets):\n self.__datasetsAll = datasets\n self.__datasets = list(datasets)\n self.__axisDomains = None\n for ds in self.__datasetsAll:\n self.__datasetsPerClass[ds[-1]] = self.__datasetsPerClass.get(ds[-1], 0) + 1\n self.dataChanged.emit()",
"def add_reference(self, dataset=None):\n if not dataset:\n raise aspecd.exceptions.MissingDatasetError\n dataset_reference = aspecd.dataset.DatasetReference()\n dataset_reference.from_dataset(dataset=dataset)\n self.references.append(dataset_reference)",
"def on_the_add_dataset_page_input_the_dataset_name_my_acl_dataset(driver, dataset_name):\n assert wait_on_element(driver, 5, '//h3[text()=\"Add Dataset\"]')\n assert wait_on_element(driver, 5, '//input[@ix-auto=\"input__Name\"]', 'inputable')\n driver.find_element_by_xpath('//input[@ix-auto=\"input__Name\"]').clear()\n driver.find_element_by_xpath('//input[@ix-auto=\"input__Name\"]').send_keys(dataset_name)\n assert wait_on_element(driver, 5, '//mat-select[@ix-auto=\"select__Share Type\"]')\n driver.find_element_by_xpath('//mat-select[@ix-auto=\"select__Share Type\"]').click()\n assert wait_on_element(driver, 5, '//mat-option[@ix-auto=\"option__Share Type_SMB\"]', 'clickable')\n driver.find_element_by_xpath('//mat-option[@ix-auto=\"option__Share Type_SMB\"]').click()",
"def switch_to_dataset(self, dataset_name: Optional[str] = None):\n self._current_dataset_name = self._validate_dataset_name(dataset_name)",
"def update_dataset(self, dataset, name=None, description=None):\n uri = URITemplate(self.baseuri + '/{owner}/{id}').expand(\n owner=self.username, id=dataset)\n return self.session.patch(uri, json=self._attribs(name, description))",
"def add_data(self, name, data_dir, tasks):\n assert isinstance(name, str), \"Must input a valid dataset name.\"\n assert isinstance(data_dir, str), \"Must input a valid data directory.\"\n assert isinstance(tasks, dict), \"Must input a valid tasks.\"\n\n new_data = {\n \"data_dir\": data_dir,\n \"keywords\": self._get_keywords_from_tasks(tasks),\n \"tasks\": tasks\n }\n self.data[\"dataset\"][name] = new_data\n self.update_categories()\n self.write_data_cache(self.data)",
"def iterate_data(self):\n if \"single\" in self.dataset_name:\n # Index 0 for list of sentence lengths, index 1 for list of token lengths\n self.stat_dict = {'question': [[], []], 'summary': [[], []], 'article': [[], []]}\n for answer_id in self.data:\n summary = self.data[answer_id]['summary']\n articles = self.data[answer_id]['articles']\n question = self.data[answer_id]['question']\n if args.tokenize:\n self._get_token_cnts(summary, 'summary')\n self._get_token_cnts(articles, 'article')\n self._get_token_cnts(question, 'question')\n self._write_stats(\"token_counts\")\n\n if \"multi\" in self.dataset_name:\n self.stat_dict = {'question': [[], []], 'summary': [[], []], 'article': [[], []]}\n for q_id in self.data:\n summary = self.data[q_id]['summary']\n question = self.data[q_id]['question']\n if args.tokenize:\n self._get_token_cnts(summary, 'summary')\n self._get_token_cnts(question, 'question')\n question = self.data[q_id]['question']\n for answer_id in self.data[q_id]['articles']:\n articles = self.data[q_id]['articles'][answer_id][0]\n if args.tokenize:\n self._get_token_cnts(articles, 'article')\n self._write_stats(\"token_counts\")\n\n if self.dataset_name == \"complete_dataset\":\n self.stat_dict = {'urls': [], 'sites': []}\n article_dict = {}\n print(\"Counting answers, sites, unique urls, and tokenized counts of unique articles\")\n answer_cnt = 0\n for q_id in self.data:\n for a_id in self.data[q_id]['answers']:\n answer_cnt += 1\n url = self.data[q_id]['answers'][a_id]['url']\n article = self.data[q_id]['answers'][a_id]['article']\n if url not in article_dict:\n article_dict[url] = article\n self.stat_dict['urls'].append(url)\n assert \"//\" in url, url\n site = url.split(\"//\")[1].split(\"/\")\n self.stat_dict['sites'].append(site[0])\n print(\"# of Answers:\", answer_cnt)\n print(\"Unique articles: \", len(article_dict)) # This should match up with count written to file\n self._write_stats(\"full collection\")\n\n # Get token/sent averages of unique articles\n if args.tokenize:\n self.stat_dict = {'article': [[], []]}\n for a in article_dict:\n self._get_token_cnts(article_dict[a], 'article')\n self._write_stats(\"token_counts\")",
"def add(self, datasetName, rows):\r\n\r\n rows = np.vstack(rows)\r\n\r\n\t\t# Add the sample into the buffer\r\n try:\r\n self.datasetBuffer[datasetName].extend(rows)\r\n except:\r\n # Initialize the buffer\r\n self.datasetBuffer[datasetName] = []\r\n self.datasetBuffer[datasetName].extend(rows)\r\n\r\n # Create the dataset\r\n self._createDatasets(datasetName)\r\n\r\n # Initiliaze dataset index count\r\n self.idxs[datasetName] = 0\r\n\r\n # Update the number of samples in the buffer\r\n self.totalFeatures += len(rows)\r\n\r\n\t\t# Check to see if we have reached the maximum buffer size\r\n if self.totalFeatures >= self.maxBufferSize:\r\n\r\n\t\t\t# write the buffers to file\r\n self._writeBuffers()",
"def datasets(self, datasets):\n\n self._datasets = datasets",
"def train(self, corpus):\n for sentence in corpus.corpus:\n for datum in sentence.data: \n self.unigramCounts[datum.word] += 1\n self.totalCount += 1",
"def add_merged_dataset(self, new_dataset):\n self._add_linked_data(self.MERGED_DATASETS, self.merged_dataset_ids,\n new_dataset.dataset_id)",
"def add_tag(self, dataset: \"Dataset\", tag: \"DatasetTag\"):\n raise NotImplementedError",
"def set_dataset(self, role, dset):\n\n\t\tself.datasets[role] = dset",
"def update_dataset(\n self,\n dataset: DatasetDB,\n ) -> DatasetDB:\n dataset_id = dataset.id\n\n self._es.update_document(\n index=DATASETS_INDEX_NAME,\n doc_id=dataset_id,\n document=self._dataset_to_es_doc(dataset),\n )\n return dataset",
"def update_data(self, name, cache_dir=None, data_dir=None, tasks=None):\n assert name, \"Must input a valid dataset name.\"\n assert name in self.data[\"dataset\"], \"The dataset \\'{}\\' does not exist in the cache.\" \\\n .format(name)\n if cache_dir:\n self.data[\"dataset\"][name][\"cache_dir\"] = cache_dir\n if data_dir:\n self.data[\"dataset\"][name][\"data_dir\"] = data_dir\n if tasks:\n self.data[\"dataset\"][name][\"tasks\"] = tasks\n self.data[\"dataset\"][name][\"keywords\"] = self._get_keywords_from_tasks(tasks)\n if cache_dir or data_dir or tasks:\n self.update_categories()\n self.write_data_cache(self.data)",
"def assign_dataset(self, dataset_uuid):\n assert os.path.exists(os.path.join(DATASETS_PATH, dataset_uuid))\n self.dataset_uuid = dataset_uuid\n if self.name is None:\n self.set_name(dataset_uuid)",
"def update_dataset(self, data_name: str, append: pd.DataFrame):\n df = getattr(self, data_name)\n setattr(self, data_name, df.join(append, how='left'))",
"def sum_datasets(dslist):\n #Assume all same length, same axis values\n newds = zeros_like(dslist[0])\n AddCifMetadata.add_standard_metadata(newds)\n title_info = \"\"\n proc_info = \"\"\"This dataset was created by summing points from multiple datasets. Points were \n assumed to coincide exactly. Data reduction information for the individual source datasets is as follows:\"\"\"\n for one_ds in dslist:\n newds += one_ds\n title_info = title_info + one_ds.title + \"+\"\n proc_info += \"\\n\\n===Dataset %s===\\n\" % str(one_ds.title) \n try:\n proc_info += one_ds.harvest_metadata(\"CIF\")[\"_pd_proc_info_data_reduction\"]\n except KeyError,AttributeError:\n pass\n newds.title = title_info[:-1] #chop off trailing '+'\n newds.axes[0] = dslist[0].axes[0]\n # Add some basic metadata based on metadata of first dataset\n newds.copy_cif_metadata(dslist[0])\n newds.add_metadata('_pd_proc_info_data_reduction',proc_info,\"CIF\")\n return newds",
"def add_data(self, file_name: str, fabricated_count: dict) -> None:\n\n assert file_name not in self._meta_data_dict, \"Error, filename has already been used.\"\n\n self._meta_data_dict[file_name] = fabricated_count",
"def learnDataset(self, data_loader):\n\n print(\"learning dataset\")\n # we have 127940 sentences in total\n count = 0\n for sample in data_loader:\n input_sentence = sample[\"input\"][0]\n target_sentence = sample[\"target\"][0]\n\n # NOTE: target_word & input_word are actually indecies of words, instead of word strings\n # NOTE: the first word has index 1\n first_target = int(target_sentence[1])\n first_input = int(input_sentence[1])\n\n self.emiss_factors[0][(first_input, first_target)] += 1\n\n prev_target = first_target\n for word_idx in range(2, 16):\n # note that word_idx is 0 is always <BOS>\n target_word = int(target_sentence[word_idx])\n input_word = int(input_sentence[word_idx])\n\n self.emiss_factors[word_idx - 1][(input_word, target_word)] += 1\n self.trans_factors[word_idx - 2][(prev_target, target_word)] += 1\n prev_target = target_word\n\n print(\"{}/127940\".format(count), end = \"\\r\")\n count += 1\n print(\"127940/127940\")\n\n # all data updated, no need to do any insertion\n for i in range(15):\n self.emiss_factors[i].fixed()\n for i in range(14):\n self.trans_factors[i].fixed()",
"def add_other_mutant_as_dataset(self, other_mutant, other_mutant_dataset_name, \n overwrite=False, check_constant_data=False):\n if other_mutant_dataset_name in self.by_dataset and not overwrite:\n raise MutantError(\"This mutant already has a %s dataset! Can't overwrite it with \"%other_mutant_dataset_name\n +\"new one. Choose a different name for new dataset, or use overwrite=True argument.\")\n\n # if desired, check that the position/gene data matches (and update if own gene data is unknown)\n # (probably should be using ifs rather than asserts, but I think since they're wrapped in a try/except it's fine)\n if check_constant_data:\n if not self.position == other_mutant.position:\n raise MutantError(\"Can't add mutant2 as dataset to mutant1: the mutant position differs! %s and %s\"%(\n self.position, other_mutant.position))\n try:\n self.update_gene_info(other_mutant.gene, other_mutant.orientation, \n other_mutant.gene_feature, other_mutant.gene_distances)\n except MutantError:\n raise MutantError(\"Can't add mutant2 as dataset to mutant1: the mutant gene data differs!\"\n +\" %s, %s, %s and\"%(self.gene, self.orientation, self.gene_feature)\n +\" %s, %s, %s.\"%(other_mutant.gene, other_mutant.orientation, other_mutant.gene_feature))\n\n # make a new empty Insertional_mutant object to hold the readcount-related data from other_mutant, \n # and put it in the self.by_dataset dictionary under other_mutant_dataset_name\n self.by_dataset[other_mutant_dataset_name] = Insertional_mutant_readcount_only()\n # now fill this new object with readcount-related data from other_mutant\n self.by_dataset[other_mutant_dataset_name]._copy_readcount_related_data(other_mutant)",
"def add_read(self, HTSeq_alignment, position=SPECIAL_POSITIONS.unknown, read_count=1, dataset_name=None):\n readcount_data_container = self._check_dataset_name_return_data(dataset_name)\n Insertional_mutant.add_read(readcount_data_container, HTSeq_alignment, position, read_count)",
"def _write_dataset(name, dataset, indices, num_shards):\n tf.logging.info(\"Writing dataset %s\", name)\n borders = np.int32(np.linspace(0, len(indices), num_shards + 1))\n for i in range(num_shards):\n filename = os.path.join(FLAGS.output_dir, \"%s-%.5d-of-%.5d\" % (name, i,\n num_shards))\n shard_indices = indices[borders[i]:borders[i + 1]]\n _write_shard(filename, dataset, shard_indices)\n tf.logging.info(\"Wrote dataset indices [%d, %d) to output shard %s\",\n borders[i], borders[i + 1], filename)\n tf.logging.info(\"Finished writing %d sentences in dataset %s.\",\n len(indices), name)",
"def get_simple_dataset(seq_len, uid_len, uid_colname, count_dict, tokens):\n \n cat_lst = []\n \n if \"n_ppp_adverse\" in count_dict:\n ppp = get_sequences(\n adverse=1,\n helper=1,\n unhelper=0,\n seq_len=seq_len,\n label=1,\n uid_len=uid_len,\n uid_colname=uid_colname,\n n_seq=count_dict[\"n_ppp_adverse\"],\n tokens=tokens,\n )\n cat_lst.append(ppp)\n \n if \"n_pp_adverse\" in count_dict: \n pp = get_sequences(\n adverse=1,\n helper=0,\n unhelper=0,\n seq_len=seq_len,\n label=1,\n uid_len=uid_len,\n uid_colname=uid_colname,\n n_seq=count_dict[\"n_pp_adverse\"],\n tokens=tokens,\n )\n cat_lst.append(pp)\n \n if \"n_p_adverse\" in count_dict:\n p = get_sequences(\n adverse=0,\n helper=3,\n unhelper=0,\n seq_len=seq_len,\n label=1,\n uid_len=uid_len,\n uid_colname=uid_colname,\n n_seq=count_dict[\"n_p_adverse\"],\n tokens=tokens,\n )\n cat_lst.append(p)\n \n if \"n_nnn_adverse\" in count_dict:\n nnn = get_sequences(\n adverse=0,\n helper=0,\n unhelper=3,\n seq_len=seq_len,\n label=0,\n uid_len=uid_len,\n uid_colname=uid_colname,\n n_seq=count_dict[\"n_nnn_adverse\"],\n tokens=tokens,\n )\n cat_lst.append(nnn)\n \n if \"n_nn_adverse\" in count_dict:\n nn = get_sequences(\n adverse=0,\n helper=1,\n unhelper=2,\n seq_len=seq_len,\n label=0,\n uid_len=uid_len,\n uid_colname=uid_colname,\n n_seq=count_dict[\"n_nn_adverse\"],\n tokens=tokens,\n )\n cat_lst.append(nn)\n \n if \"n_n_adverse\" in count_dict:\n n = get_sequences(\n adverse=0,\n helper=2,\n unhelper=1,\n seq_len=seq_len,\n label=0,\n uid_len=uid_len,\n uid_colname=uid_colname,\n n_seq=count_dict[\"n_n_adverse\"],\n tokens=tokens,\n )\n cat_lst.append(n)\n\n dataset = pd.concat(cat_lst, axis=0)\n dataset.reset_index(inplace=True)\n indexes = [idx for idx in range(dataset.shape[0])]\n random.shuffle(indexes)\n dataset = dataset.iloc[indexes, :]\n #dataset = dataset.sample(frac=1).reset_index(drop=True)\n\n print(f\"dataset: {dataset.shape}\")\n print(f\"ratio:\\n{dataset.label.value_counts(normalize=True)}\\n\")\n\n return dataset",
"def update(self, name, cache_dir=None, data_dir=None, tasks=None):\n assert name, \"Must input a valid dataset name.\"\n self.manager.update_data(name, cache_dir, data_dir, tasks)",
"def make_code(self, datasetname):\n try:\n if(datasetname):\n for i in range(1, len(self.extract()) + 1):\n self.formated = datasetname + \"%03d\" % (i,)\n yield self.formated \n except:\n raise",
"def add(self, name, data_dir, tasks):\n assert isinstance(name, str), \"Must input a valid dataset name.\"\n assert isinstance(data_dir, str), \"Must input a valid directory (data_dir).\"\n assert isinstance(tasks, dict), \"Must input a valid tasks.\"\n\n self.manager.add_data(name, data_dir, tasks)"
] |
[
"0.58334297",
"0.56141096",
"0.5360643",
"0.52961487",
"0.5008856",
"0.5008631",
"0.49966735",
"0.49872187",
"0.49399087",
"0.49057576",
"0.48849142",
"0.48333997",
"0.48133266",
"0.48062634",
"0.47892147",
"0.47850326",
"0.47840506",
"0.47757092",
"0.4765327",
"0.47402558",
"0.47347546",
"0.47232547",
"0.4710057",
"0.46814713",
"0.46553826",
"0.46493378",
"0.4634546",
"0.46255523",
"0.4614123",
"0.46036807"
] |
0.65377134
|
0
|
Copy all readcountrelated data from other_mutant to self.by_dataset dictionary[other_mutant_dataset_name]. If self isn't a multidataset mutant, raise an exception. If check_constant_data is True, check that the position/gene data of self and other_mutant matches. If self already has a other_mutant_dataset_name dataset, raise MutantError, unless overwrite=True, then overwrite.
|
def add_other_mutant_as_dataset(self, other_mutant, other_mutant_dataset_name,
overwrite=False, check_constant_data=False):
if other_mutant_dataset_name in self.by_dataset and not overwrite:
raise MutantError("This mutant already has a %s dataset! Can't overwrite it with "%other_mutant_dataset_name
+"new one. Choose a different name for new dataset, or use overwrite=True argument.")
# if desired, check that the position/gene data matches (and update if own gene data is unknown)
# (probably should be using ifs rather than asserts, but I think since they're wrapped in a try/except it's fine)
if check_constant_data:
if not self.position == other_mutant.position:
raise MutantError("Can't add mutant2 as dataset to mutant1: the mutant position differs! %s and %s"%(
self.position, other_mutant.position))
try:
self.update_gene_info(other_mutant.gene, other_mutant.orientation,
other_mutant.gene_feature, other_mutant.gene_distances)
except MutantError:
raise MutantError("Can't add mutant2 as dataset to mutant1: the mutant gene data differs!"
+" %s, %s, %s and"%(self.gene, self.orientation, self.gene_feature)
+" %s, %s, %s."%(other_mutant.gene, other_mutant.orientation, other_mutant.gene_feature))
# make a new empty Insertional_mutant object to hold the readcount-related data from other_mutant,
# and put it in the self.by_dataset dictionary under other_mutant_dataset_name
self.by_dataset[other_mutant_dataset_name] = Insertional_mutant_readcount_only()
# now fill this new object with readcount-related data from other_mutant
self.by_dataset[other_mutant_dataset_name]._copy_readcount_related_data(other_mutant)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def give_single_dataset_mutant(self, single_dataset_name, force=False):\n if single_dataset_name not in self.by_dataset.keys() and not force:\n raise MutantError(\"This mutant doesn't have a %s dataset! \"%single_dataset_name\n +\"Use force=True argument if you want a zero-readcount mutant returned anyway.\")\n # generate new mutant, fill it with readcount-related data from self.by_dataset[single_dataset_name] \n # and general data from self\n new_mutant = Insertional_mutant()\n new_mutant._copy_non_readcount_data(self)\n new_mutant._copy_readcount_related_data(self.by_dataset[single_dataset_name])\n return new_mutant",
"def _check_dataset_consistency(self):\n if not self.multi_dataset: \n raise MutantError(\"_check_dataset_consistency only makes sense for multi-datasets!\")\n def _check_sets_raise_error(set1, set2, set1_name, set2_name):\n if not set1==set2:\n raise MutantError(\"Multi-dataset mutant pool has different %s and %s dataset sets! %s, %s\"%(set1_name, \n set2_name, set1, set2))\n datasets_from_summary = set(self.summary.keys())\n datasets_from_mutants = set.union(*[set(m.by_dataset.keys()) for m in self])\n _check_sets_raise_error(datasets_from_summary, datasets_from_mutants, \"from summary\", \"from mutants\")\n try:\n if self._dataset_order is not None: \n datasets_from_order = set(self._dataset_order)\n _check_sets_raise_error(datasets_from_order, datasets_from_summary, \"from dataset_order\", \"from summary\")\n except AttributeError:\n pass",
"def _copy_readcount_related_data(self, source_mutant):\n # integers are immutable and thus safe to \"copy\" by adding another name to the same value\n self.total_read_count = source_mutant.total_read_count\n self.perfect_read_count = source_mutant.perfect_read_count\n # using dict to make a COPY of the dict instead of just creating another name for the same value\n self.sequences_counts_positions_errors = dict(source_mutant.sequences_counts_positions_errors)",
"def remove_mutants_in_other_dataset(self, other_dataset, readcount_min=1, perfect_reads=False):\n # TODO do I want this to be based on non-exact position equality instead?\n if perfect_reads: get_readcount = lambda m: m.perfect_read_count\n else: get_readcount = lambda m: m.total_read_count\n # go over all mutants in self; need to convert the iterator to a list to make a separate copy, \n # otherwise we'd be modifying the iterator while iterating through it, which isn't allowed.\n for mutant in list(self):\n if get_readcount(other_dataset.get_mutant(mutant.IB)) >= readcount_min:\n self.remove_mutant(mutant.IB)\n # TODO really I shouldn't be removing mutants outright, just noting them as removed or something... In that case should they or should they not show up in \"for m in self\"? Probably not - they should have a separate dictionary?\n # TODO should I keep track of removed reads, and print in summary? PROBABLY.\n # LATER-TODO unit-test - it does have run-tests though.",
"def remove_mutants_not_in_other_dataset(self, other_dataset, readcount_min=1, perfect_reads=False):\n # TODO do I want this to be based on non-exact position equality instead?\n if perfect_reads: get_readcount = lambda m: m.perfect_read_count\n else: get_readcount = lambda m: m.total_read_count\n # go over all mutants in self; need to convert the iterator to a list to make a separate copy, \n # otherwise we'd be modifying the iterator while iterating through it, which isn't allowed.\n for mutant in list(self):\n if get_readcount(other_dataset.get_mutant(mutant.IB)) < readcount_min:\n self.remove_mutant(mutant.IB)\n # TODO really I shouldn't be removing mutants outright, just noting them as removed or something... In that case should they or should they not show up in \"for m in self\"? Probably not - they should have a separate dictionary?\n # TODO should I keep track of removed reads, and print in summary? PROBABLY.\n # LATER-TODO unit-test - it does have run-tests though.",
"def merge(self, other):\n if other.n_points != self.n_points:\n raise ValueError(\n 'Deduplicator size mismatch: '\n f'{self.n_points} != {other.n_points}'\n )\n self.data_reduced.extend(other.data_reduced)\n self.data_kd.extend(other.data_kd)",
"def _copy_non_readcount_data(self, source_mutant):\n # COPY the position, not just make another name for the same value - I wrote a copy() function for positions\n self.position = source_mutant.position.copy() \n # strings are immutable and thus safe to \"copy\" by adding another name to the same value\n self.gene = source_mutant.gene\n self.orientation = source_mutant.orientation\n self.gene_feature = source_mutant.gene_feature\n self.gene_distances = source_mutant.gene_distances",
"def create_dataset_like(self, name, other, **kwupdate) -> DatasetBase:\n for k in (\"shape\", \"dtype\", \"chunks\", \"fillvalue\"):\n kwupdate.setdefault(k, getattr(other, k))\n\n # Special case: the maxshape property always exists, but if we pass it\n # to create_dataset, the new dataset will automatically get chunked\n # layout. So we copy it only if it is different from shape.\n if other.maxshape != other.shape:\n kwupdate.setdefault(\"maxshape\", other.maxshape)\n\n return self.create_dataset(name, **kwupdate)",
"def read_info(self, dataset_name=None, strict=False):\n if dataset_name is None: \n raise MutantError(\"This is a multi-dataset mutant - must provide dataset_name arg!\")\n if strict:\n self._check_dataset_presence(dataset_name)\n return self.by_dataset[dataset_name]\n else:\n try: return self.by_dataset[dataset_name]\n except KeyError: return blank_readcount_only_mutant()\n # TODO unit-tests?",
"def combine(self, other) -> None:\n assert self.id_ == other.id_\n assert self.type_ == other.type_\n self.count += other.count",
"def mutants_in_dataset(self, dataset_name=None):\n return [mutant for mutant in self if dataset_name is None or mutant.by_dataset[dataset_name].total_read_count>0]",
"def set_all_data_internal(self, check_data=True):\n for key, dataset in self.datasets.items():\n if (\n isinstance(dataset, mfdataarray.MFArray)\n or (\n isinstance(dataset, mfdatalist.MFList)\n and dataset.structure.type == DatumType.recarray\n )\n and dataset.enabled\n ):\n dataset.store_internal(check_data=check_data)",
"def test__add_read(self):\n # using fake HTSeq alignment class from deepseq_utilities; defining one perfect and one imperfect alignment\n # note: the detailed mutation-counting methods are imported from deepseq_utilities and unit-tested there.\n position = Insertion_position('chr1', '+', position_before=3)\n perfect_aln = Fake_HTSeq_aln(seq='AAA', optional_field_data={'NM':0})\n imperfect_aln = Fake_HTSeq_aln(seq='GGG', optional_field_data={'NM':1})\n # adding perfect and imperfect to mutant increases all the counts as expected\n mutant = Insertional_mutant(insertion_position=position)\n mutant.add_read(perfect_aln, read_count=3, position=position)\n assert mutant.total_read_count == mutant.perfect_read_count == 3\n assert mutant.sequences_counts_positions_errors == {'AAA': [3, position, 0]}\n mutant.add_read(imperfect_aln, read_count=1, position=position)\n assert mutant.total_read_count == 4\n assert mutant.perfect_read_count == 3\n assert mutant.sequences_counts_positions_errors == {'AAA': [3, position, 0], 'GGG': [1, position, 1]}\n # same for a multi-dataset mutant - this time we need to specify which dataset we're adding to\n mutant = Insertional_mutant_multi_dataset(insertion_position=position)\n assert len(mutant.by_dataset) == 0\n mutant.add_read(perfect_aln, read_count=3, dataset_name='d1', position=position)\n assert len(mutant.by_dataset) == 1\n assert mutant.by_dataset['d1'].total_read_count == mutant.by_dataset['d1'].perfect_read_count == 3\n assert mutant.by_dataset['d1'].sequences_counts_positions_errors == {'AAA': [3, position, 0]}\n mutant.add_read(imperfect_aln, read_count=1, dataset_name='d1', position=position)\n assert len(mutant.by_dataset) == 1\n assert mutant.by_dataset['d1'].total_read_count == 4\n assert mutant.by_dataset['d1'].perfect_read_count == 3\n assert mutant.by_dataset['d1'].sequences_counts_positions_errors == {'AAA': [3, position, 0], 'GGG': [1, position, 1]}\n # now adding a read to another dataset - nothing changes in dataset d1, but we have new dataset d2 numbers\n mutant.add_read(imperfect_aln, read_count=1, dataset_name='d2', position=position)\n assert len(mutant.by_dataset) == 2\n assert mutant.by_dataset['d1'].total_read_count == 4\n assert mutant.by_dataset['d2'].total_read_count == 1\n assert mutant.by_dataset['d2'].perfect_read_count == 0\n assert mutant.by_dataset['d2'].sequences_counts_positions_errors == {'GGG': [1, position, 1]}\n # it should be impossible to add a read to a specific dataset in a single-dataset mutant \n mutant = Insertional_mutant(insertion_position=position)\n self.assertRaises(MutantError, mutant.add_read, perfect_aln, read_count=3, dataset_name='d1')\n # it should be impossible to add a read to a multi-dataset mutant without giving a dataset_name\n mutant = Insertional_mutant_multi_dataset(insertion_position=position)\n self.assertRaises(MutantError, mutant.add_read, perfect_aln, read_count=3)",
"def test_write(self):\n data2 = self.data.copy()\n data2['a'] *= 2\n self.dset['a'] = data2\n self.assertTrue(np.all(self.dset[...] == data2))\n data2['b'] *= 4\n self.dset['b'] = data2\n self.assertTrue(np.all(self.dset[...] == data2))\n data2['a'] *= 3\n data2['c'] *= 3\n self.dset['a','c'] = data2\n self.assertTrue(np.all(self.dset[...] == data2))",
"def read_info(self, dataset_name=None, strict=False):\n if dataset_name is None: return self\n else: raise MutantError(\"This is NOT a multi-dataset mutant - cannot provide dataset_name arg!\")",
"def PassData(self, other):\n for this,that in zip(self.DataSet, other.DataSet):\n for assoc in [ArrayAssociation.POINT, ArrayAssociation.CELL, ArrayAssociation.ROW]:\n if this.HasAttributes(assoc) and that.HasAttributes(assoc):\n this.GetAttributes(assoc).PassData(that.GetAttributes(assoc))",
"def test_write_noncompound(self):\n data2 = self.data.copy()\n data2['b'] = 1.0\n self.dset['b'] = 1.0\n self.assertTrue(np.all(self.dset[...] == data2))",
"def test_duplicate_images_error(self):\n with self.assertRaises(AssertionError):\n disk.merge_datasets(self.input_datasets, self.output_dataset)\n\n # Original dataset shouldn't be modified.\n self.assertEqual(0, len(self.output_dataset.metadata()))",
"def _check_compatible_fill_values(self, other: \"FlattenedStorage\"):\n for k in set(self._fill_values).intersection(other._fill_values):\n if np.isnan(self._fill_values[k]) and np.isnan(other._fill_values[k]):\n continue\n else:\n if self._fill_values[k] != other._fill_values[k]:\n raise ValueError(\n \"Fill values for arrays in storages don't match, can't perform requested operation\"\n )",
"def resplit_datasets(dataset, other_dataset, random_seed=None, split=None):\n # Prevent circular dependency\n from torchnlp.datasets import Dataset\n\n concat = dataset.rows + other_dataset.rows\n shuffle(concat, random_seed=random_seed)\n if split is None:\n return Dataset(concat[:len(dataset)]), Dataset(concat[len(dataset):])\n else:\n split = max(min(round(len(concat) * split), len(concat)), 0)\n return Dataset(concat[:split]), Dataset(concat[split:])",
"def merge(self, other_corpus, warnings: bool = True):\n utts1 = list(self.iter_utterances())\n utts2 = list(other_corpus.iter_utterances())\n\n combined_utts = self._merge_utterances(utts1, utts2, warnings=warnings)\n new_corpus = Corpus(utterances=list(combined_utts))\n\n # Note that we collect Users from the utt sets directly instead of the combined utts, otherwise\n # differences in User meta will not be registered for duplicate Utterances (because utts would be discarded\n # during merging)\n all_users_meta, all_users_meta_conflict = self._collect_user_data([utts1, utts2])\n Corpus._update_corpus_user_data(new_corpus, all_users_meta, all_users_meta_conflict, warnings=warnings)\n\n # Merge CORPUS metadata\n new_corpus.meta = self.meta\n for key, val in other_corpus.meta.items():\n if key in new_corpus.meta and new_corpus.meta[key] != val:\n if warnings: print(warning(\"Found conflicting values for corpus metadata: {}. \"\n \"Overwriting with other corpus's metadata.\".format(key)))\n new_corpus.meta[key] = val\n\n # Merge CONVERSATION metadata\n convos1 = self.iter_conversations()\n convos2 = other_corpus.iter_conversations()\n\n for convo in convos1:\n new_corpus.get_conversation(convo.id).meta = convo.meta\n\n for convo in convos2:\n for key, val in convo.meta.items():\n curr_meta = new_corpus.get_conversation(convo.id).meta\n if key in curr_meta and curr_meta[key] != val:\n if warnings: print(warning(\"Found conflicting values for conversation: {} for meta key: {}. \"\n \"Overwriting with other corpus's conversation metadata\".format(convo.id, key)))\n curr_meta[key] = val\n\n new_corpus.update_users_data()\n\n return new_corpus",
"def _set_readcount_related_data_to_zero(self):\n self.total_read_count = 0\n self.perfect_read_count = 0\n self.RISCC_genome_side_aligned_reads = {}\n self.RISCC_genome_side_unaligned_reads = {}\n self.sequences_counts_positions_errors = {}\n # TODO should all this really be readcount-related? Well, it IS, but when I have a multi-dataset mutant, do I really want to keep the seq/position/count details and the genome-side RISCC read data per dataset rather than total? Hard to tell, really. In a perfect world I wouldn't be doing multiple RISCC datasets anyway!",
"def proc_dataset_v2(write=False):\n \n path = load_config()\n M = pd.read_csv(path['metadata_file'])\n T = pd.read_csv(path['rna_file'])\n mCH = pd.read_csv(path['mCH_file'])\n CH = pd.read_csv(path['CH_file'])\n\n def format_df(df):\n \"\"\"The inputs are genes x cells. Transpose data and rename columns\"\"\"\n df = df.transpose()\n df.rename(columns=df.iloc[0], inplace=True)\n df.drop('gene', inplace=True)\n df.index.rename('sample_id', inplace=True)\n return df\n\n T = format_df(T)\n mCH = format_df(mCH)\n CH = format_df(CH)\n\n #Update metadata\n M = pd.read_csv(path['metadata_file'])\n M.rename(columns={'Unnamed: 0': 'sample_id'}, inplace=True)\n M.set_index(keys='sample_id', drop=True, inplace=True)\n\n #Sort cells by metadata\n sorted_index = M.sort_values(by='SubClusterAnno').index\n M = M.loc[sorted_index]\n T = T.loc[sorted_index]\n mCH = mCH.loc[sorted_index]\n CH = CH.loc[sorted_index]\n\n assert np.array_equal(CH.columns, mCH.columns), \"Genes are not in the same order\"\n assert np.array_equal(T.columns, mCH.columns), \"Genes are not in the same order\"\n assert M.index.equals(T.index), \"Cells are not in the same order\"\n assert M.index.equals(CH.index), \"Cells are not in the same order\"\n assert M.index.equals(mCH.index), \"Cells are not in the same order\"\n\n # CPM-normalize counts\n X = T.values.astype(np.float32)\n X = (X/np.sum(X, axis=1, keepdims=True))*1e6\n X = np.log1p(X)\n T = pd.DataFrame(data=X, index=T.index, columns=T.columns)\n print('Completed T processing')\n\n # For methylation data\n X = CH.values.astype(float)\n Y = mCH.values.astype(float)\n Z = np.divide(Y,X+1e-10)\n E = pd.DataFrame(data=Z, index=mCH.index, columns=mCH.columns)\n print('Completed E processing')\n\n # select genes based on variance in log normalized CPM values in T\n def calc_highvar_genes(df):\n vars = np.var(df.values, axis=0)\n order_vars = np.argsort(-vars) # descending order\n sorted_highvar_genes = df.columns.values[order_vars]\n return sorted_highvar_genes\n\n data = {'sorted_highvar_T_genes': calc_highvar_genes(T),\n 'sorted_highvar_E_genes': calc_highvar_genes(E)}\n print('Completed finding high variance features')\n\n if write:\n feather.write_dataframe(T, path['data_dir'] / 'T_dat_v2.feather')\n feather.write_dataframe(E, path['data_dir'] / 'E_dat_v2.feather')\n feather.write_dataframe(M, path['data_dir'] / 'Meta_v2.feather')\n sio.savemat(path['data_dir'] / 'highvar_genes_v2.mat', data)\n return T, E, M, data",
"def check_dataset(self):\n url = self.metadata.metadata_url\n document = Document.objects.get(\n metadata=self.metadata,\n document_type=DocumentEnum.METADATA.value,\n is_original=True,\n )\n original_document = document.content\n self.check_document(url, original_document)",
"def update(self, dataset: Dataset, updates_allowed=None, archive_less_mature=None):\n existing = self.get(dataset.id)\n can_update, safe_changes, unsafe_changes = self.can_update(dataset, updates_allowed)\n\n if not safe_changes and not unsafe_changes:\n self._ensure_new_locations(dataset, existing)\n _LOG.info(\"No changes detected for dataset %s\", dataset.id)\n return dataset\n\n for offset, old_val, new_val in safe_changes:\n _LOG.info(\"Safe change in %s from %r to %r\", _readable_offset(offset), old_val, new_val)\n\n for offset, old_val, new_val in unsafe_changes:\n _LOG.warning(\"Unsafe change in %s from %r to %r\", _readable_offset(offset), old_val, new_val)\n\n if not can_update:\n raise ValueError(f\"Unsafe changes in {dataset.id}: \" + (\n \", \".join(\n _readable_offset(offset)\n for offset, _, _ in unsafe_changes\n )\n ))\n\n _LOG.info(\"Updating dataset %s\", dataset.id)\n\n product = self.types.get_by_name(dataset.product.name)\n with self._db_connection(transaction=True) as transaction:\n if not transaction.update_dataset(dataset.metadata_doc_without_lineage(), dataset.id, product.id):\n raise ValueError(\"Failed to update dataset %s...\" % dataset.id)\n if archive_less_mature is not None:\n self.archive_less_mature(dataset, archive_less_mature)\n\n self._ensure_new_locations(dataset, existing)\n\n return dataset",
"def test_read_write_2(dset_full):\n file_name = \"test.hdf5\"\n dset_full.write(file_name)\n\n dset_new = dataset.Dataset.read(file_name)\n\n # Test internal references in the dataset\n assert id(dset_new.site_pos.other) == id(dset_new.sat_pos)\n assert id(dset_new.site_delta.ref_pos) == id(dset_new.site_pos)\n assert id(dset_new.site_posvel.other) == id(dset_new.sat_posvel)\n assert id(dset_new.site_posvel_delta.ref_pos) == id(dset_new.site_posvel)\n\n assert id(dset_new.group.site_pos.other) == id(dset_new.group.sat_pos)\n assert id(dset_new.group.site_delta.ref_pos) == id(dset_new.group.site_pos)\n assert id(dset_new.group.site_posvel.other) == id(dset_new.group.sat_posvel)\n assert id(dset_new.group.site_posvel_delta.ref_pos) == id(dset_new.group.site_posvel)\n\n # Verify that new dataset have different references than original object\n for field_name, field in dset_full._fields.items():\n assert id(field.data) != id(dset_new._fields[field_name].data)\n try:\n for group_field_name, group_field in field.data._fields.items():\n assert id(group_field.data) != id(dset_new._fields[field_name].data._fields[group_field_name].data)\n except AttributeError:\n # Field is not a group\n pass\n\n os.remove(file_name)",
"def _update_dataset(lc, geno, dataset, delete_resources=False):\n package_update_required = False\n if not _dataset_match(geno, dataset):\n dataset.update(_dataset_fields(geno))\n package_update_required = True\n\n chromos = dict(\n (chromo['resource_name'], chromo) for chromo in geno['resources'])\n\n # migrate recombinant1 datasets which had no resource\n # name to identify resource\n if (len(chromos) == 1 and len(dataset['resources']) == 1\n and dataset['resources'][0]['name'] == 'data'):\n dataset['resources'][0]['name'] = geno['resources'][0]['resource_name']\n package_update_required = True\n\n # collect updated resources\n out_resources = []\n for resource in dataset['resources']:\n if resource['name'] not in chromos:\n if not delete_resources:\n out_resources.append(resource)\n continue\n\n r = chromos.pop(resource['name'])\n\n if not _resource_match(r, resource):\n resource.update(_resource_fields(r))\n package_update_required = True\n\n out_resources.append(resource)\n\n # missing resources\n if chromos:\n out_resources.extend(\n # dummy url for old ckan compatibility reasons\n dict(_resource_fields(chromo), url='http://')\n for chromo in chromos.values())\n package_update_required = True\n\n if (package_update_required or\n len(out_resources) != len(dataset['resources'])):\n dataset['resources'] = out_resources\n dataset = lc.call_action('package_update', dataset)\n\n return dataset",
"def merge(self, other):\n from .dataset import Dataset\n\n if other is None:\n return self.to_dataset()\n else:\n other_vars = getattr(other, 'variables', other)\n coords = merge_coords_without_align([self.variables, other_vars])\n return Dataset._from_vars_and_coord_names(coords, set(coords))",
"def CopyData(self, p_int, vtkDataSetAttributes, p_int_1, vtkDataSetAttributes_1, p_int_2):\n ...",
"def read_stats(self, filename, new_times=None, read_full=False):\n input_ds = xr.open_mfdataset(filename, parallel=self.PARALLEL_IO,\n chunks=self.CHUNK_STATS_IO)\n vars_to_read = self.list_of_vars\n if read_full:\n vars_to_read = self.list_of_full_vars\n covs_ds = input_ds[vars_to_read]\n \n if self.num_cross_covs != 0 and covs_ds.dims['dstn'] != self.num_cross_covs:\n raise ValueError(\"Number of separation distances in the input \",\n \"stats file {} \".format(filename),\n \"is {} \".format(covs_ds.dims['dstn']),\n \"which is different to the number requested \",\n \"in this calculation {}\".format(self.num_cross_covs))\n\n if self.variable not in covs_ds[\"variable\"]:\n raise ValueError(\"Variable name in the \",\n \"input dataset {} \".format(self.variable),\n \"is not in the existing \",\n \"stats file {}\".format(covs_ds[\"variable\"]))\n\n if new_times is not None:\n if np.intersect1d(covs_ds[\"times\"].values, new_times.values) is not None:\n raise ValueError(\"Some of the times in the input model error \",\n \"fields {} \".format(new_times.values),\n \"already exist in the output stats \",\n \"file {}\".format(covs_ds[\"times\"].values))\n new_times_ds = xr.Dataset({\"times\": xr.DataArray(new_times, dims=[self.time_dim])})\n self.covs_ds = xr.concat([covs_ds, new_times_ds],\n dim=self.time_dim,\n coords=[self.time_varname],\n data_vars=[\"times\"])\n else:\n self.covs_ds = covs_ds"
] |
[
"0.57595855",
"0.57518274",
"0.56194407",
"0.55894625",
"0.5359541",
"0.5177784",
"0.5133202",
"0.48773158",
"0.48567188",
"0.47647667",
"0.47475588",
"0.47311136",
"0.47105187",
"0.46874717",
"0.46292877",
"0.45662835",
"0.44594386",
"0.44543228",
"0.44340476",
"0.44145992",
"0.44144058",
"0.44108063",
"0.44043908",
"0.43935663",
"0.43818924",
"0.4370831",
"0.43593583",
"0.43573168",
"0.43423688",
"0.43399057"
] |
0.77522314
|
0
|
Return a singledataset mutant based on single_dataset_name; don't modify current mutant. If there is no single_dataset_name in current mutant's by_dataset dictionary, raise exception, unless force is True, then return new mutant with zero readcount.
|
def give_single_dataset_mutant(self, single_dataset_name, force=False):
if single_dataset_name not in self.by_dataset.keys() and not force:
raise MutantError("This mutant doesn't have a %s dataset! "%single_dataset_name
+"Use force=True argument if you want a zero-readcount mutant returned anyway.")
# generate new mutant, fill it with readcount-related data from self.by_dataset[single_dataset_name]
# and general data from self
new_mutant = Insertional_mutant()
new_mutant._copy_non_readcount_data(self)
new_mutant._copy_readcount_related_data(self.by_dataset[single_dataset_name])
return new_mutant
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def read_info(self, dataset_name=None, strict=False):\n if dataset_name is None: return self\n else: raise MutantError(\"This is NOT a multi-dataset mutant - cannot provide dataset_name arg!\")",
"def _ensure_dataset_None(dataset_name):\n if dataset_name is not None:\n raise MutantError(\"Don't try to provide a dataset_name on a single mutant (rather than the multi-dataset subclass)!\")\n # MAYBE-TODO this could be accomplished with a decorator instead, right?",
"def read_info(self, dataset_name=None, strict=False):\n if dataset_name is None: \n raise MutantError(\"This is a multi-dataset mutant - must provide dataset_name arg!\")\n if strict:\n self._check_dataset_presence(dataset_name)\n return self.by_dataset[dataset_name]\n else:\n try: return self.by_dataset[dataset_name]\n except KeyError: return blank_readcount_only_mutant()\n # TODO unit-tests?",
"def _check_dataset_name_return_data(self, dataset_name, strict=False):\n if strict:\n _check_dataset_presence(self, dataset_name)\n elif dataset_name is None:\n raise MutantError(\"Cannot use None as dataset name!\")\n return self.by_dataset[dataset_name]",
"def add_other_mutant_as_dataset(self, other_mutant, other_mutant_dataset_name, \n overwrite=False, check_constant_data=False):\n if other_mutant_dataset_name in self.by_dataset and not overwrite:\n raise MutantError(\"This mutant already has a %s dataset! Can't overwrite it with \"%other_mutant_dataset_name\n +\"new one. Choose a different name for new dataset, or use overwrite=True argument.\")\n\n # if desired, check that the position/gene data matches (and update if own gene data is unknown)\n # (probably should be using ifs rather than asserts, but I think since they're wrapped in a try/except it's fine)\n if check_constant_data:\n if not self.position == other_mutant.position:\n raise MutantError(\"Can't add mutant2 as dataset to mutant1: the mutant position differs! %s and %s\"%(\n self.position, other_mutant.position))\n try:\n self.update_gene_info(other_mutant.gene, other_mutant.orientation, \n other_mutant.gene_feature, other_mutant.gene_distances)\n except MutantError:\n raise MutantError(\"Can't add mutant2 as dataset to mutant1: the mutant gene data differs!\"\n +\" %s, %s, %s and\"%(self.gene, self.orientation, self.gene_feature)\n +\" %s, %s, %s.\"%(other_mutant.gene, other_mutant.orientation, other_mutant.gene_feature))\n\n # make a new empty Insertional_mutant object to hold the readcount-related data from other_mutant, \n # and put it in the self.by_dataset dictionary under other_mutant_dataset_name\n self.by_dataset[other_mutant_dataset_name] = Insertional_mutant_readcount_only()\n # now fill this new object with readcount-related data from other_mutant\n self.by_dataset[other_mutant_dataset_name]._copy_readcount_related_data(other_mutant)",
"def mutants_in_dataset(self, dataset_name=None):\n return [mutant for mutant in self if dataset_name is None or mutant.by_dataset[dataset_name].total_read_count>0]",
"def get_dataset(self, name, enforce=None, download=True):\n if self._meta is None:\n self._get_meta()\n return self._get_dataset(name, enforce=enforce, download=download)",
"def find_one(cls, dataset_id):\n return super(cls, cls).find_one({DATASET_ID: dataset_id})",
"def require_dataset(self, name, shape, dtype, exact=False, **kwds) -> DatasetBase:\n shape = tuple(shape)\n dtype = np.dtype(dtype)\n\n if name not in self:\n return self.create_dataset(name, shape, dtype, **kwds)\n\n dset = self[name]\n if not isinstance(dset, DatasetBase):\n raise TypeError(\n \"Incompatible object (%s) already exists\" % dset.__class__.__name__\n )\n\n if not shape == dset.shape:\n raise TypeError(\n \"Shapes do not match (existing %s vs new %s)\" % (dset.shape, shape)\n )\n\n if exact:\n if not dtype == dset.dtype:\n raise TypeError(\n \"Datatypes do not exactly match (existing %s vs new %s)\"\n % (dset.dtype, dtype)\n )\n elif not np.can_cast(dtype, dset.dtype):\n raise TypeError(\n \"Datatypes cannot be safely cast (existing %s vs new %s)\"\n % (dset.dtype, dtype)\n )\n\n return dset",
"def get_dataset_reference(self, dataset_name):\n\n print_debug(\"Geting dataset :\" + dataset_name)\n dataset = DatasetFactory.get(dataset_file_name=dataset_name)\n return dataset",
"def create_dataset_like(self, name, other, **kwupdate) -> DatasetBase:\n for k in (\"shape\", \"dtype\", \"chunks\", \"fillvalue\"):\n kwupdate.setdefault(k, getattr(other, k))\n\n # Special case: the maxshape property always exists, but if we pass it\n # to create_dataset, the new dataset will automatically get chunked\n # layout. So we copy it only if it is different from shape.\n if other.maxshape != other.shape:\n kwupdate.setdefault(\"maxshape\", other.maxshape)\n\n return self.create_dataset(name, **kwupdate)",
"def dataset(self):\n with self._lock:\n if self._dataset is None:\n if isinstance(self._orig_dataset, DaskLazyIndexer):\n self._orig_dataset = self._orig_dataset.dataset\n dataset = dask_getitem(self._orig_dataset, self.keep)\n for transform in self.transforms:\n dataset = transform(dataset)\n self._dataset = dataset\n self._orig_dataset = None\n return self._dataset",
"def create_simple_restriction(datasets, datset_name, restriction):\n if datset_name in datasets and restriction is not None:\n dataset = datasets[datset_name]\n return get_restriction(restriction, dataset)",
"def get(dataset_name: str, redownload: bool = False) -> Dataset:\n return Dataset._from_url(dataset_name, force=redownload)",
"def _make_test_mutant_dataset(positions_and_readcounts_string, raw_chrom_names=False):\n dataset = Insertional_mutant_pool_dataset()\n if not positions_and_readcounts_string: \n return dataset\n for N, string in enumerate(positions_and_readcounts_string.split(', ')):\n raw_pos, readcount = string.split(' ')\n if '/' in readcount: readcount, perfect = [int(x) for x in readcount.split('/')]\n else: readcount = perfect = int(readcount)\n assert readcount >= perfect, \"In mutant string %s, perfect readcount is over total - not allowed!\"%string\n if '+' in raw_pos: strand = '+'\n elif '-' in raw_pos: strand = '-'\n else: raise Exception(\"Short-position %s has no strand!\"%raw_pos)\n chrom, pos = raw_pos.split(strand)\n pos = int(pos)\n if not raw_chrom_names:\n if chrom: chrom = 'chromosome_%s'%chrom\n else: chrom = 'chromosome_1'\n elif not chrom:\n raise Exception(\"Short-position %s has no chromosome name - can't use with raw_chrom_names!\")\n full_pos = Insertion_position(chrom, strand, position_before=pos, immutable=True)\n mutant = Insertional_mutant(IB=str(N), insertion_position=full_pos)\n mutant.total_read_count = readcount\n mutant.perfect_read_count = perfect\n dataset.add_mutant(mutant)\n return dataset",
"def get_by_name(self, name: str) -> Optional[\"Dataset\"]:\n raise NotImplementedError",
"def flatten(self, in_place=True):\n new_dataset = TaskData()\n\n for i, dataset in enumerate(self._datasets):\n if i != self._default_index:\n new_dataset.merge(dataset)\n\n new_dataset.merge(self.default_dataset)\n\n # point all aliases to the new, single dataset\n new_aliases = {alias: 0 for alias, _ in self._aliases.items()}\n\n # replace existing datasets or return a new MultiTaskData object\n if in_place:\n self._datasets = [new_dataset]\n self._aliases = new_aliases\n self._default_index = 0\n else:\n return MultiTaskData(dataset=new_dataset, aliases=list(new_aliases.keys()))",
"def mount_multi_dataset(argv):\n if len(argv) >= 1:\n try:\n bimpl = sdm_backends.Backends.get_backend_instance(backend, config.get_backend_config(backend))\n\n for d in argv:\n dataset = d.strip().lower()\n mount_path = bimpl.make_default_mount_path(dataset, config.get_backend_config(backend).default_mount_path)\n abs_mount_path = sdm_util.get_abs_path(mount_path)\n res = process_mount_dataset(dataset, abs_mount_path)\n if res > 0:\n return res\n return 0\n except sdm_absbackends.AbstractBackendException, e:\n sdm_util.print_message(\"Cannot mount dataset - %s\" % dataset, True, sdm_util.LogLevel.ERROR)\n sdm_util.print_message(e, True, sdm_util.LogLevel.ERROR)\n return 1\n else:\n show_help([\"mmount\"])\n return 1",
"def _single_entity_mutated(self, mut_dat, output, variant, item, translocations, fusions, all_except):\n out_dict = {\"names\": lambda x: list(set(x[self.by[self.version]])), #functions for returning specific data types\n \"dataframe\": lambda x: x,\n \"dict\": lambda x: dict(zip(x[self.by[self.version]], x[variant]))}\n\n return out_dict[output](mut_dat)",
"def default_dataset(self):\n return self.get_by_index(self._default_index)",
"def _check_dataset_consistency(self):\n if not self.multi_dataset: \n raise MutantError(\"_check_dataset_consistency only makes sense for multi-datasets!\")\n def _check_sets_raise_error(set1, set2, set1_name, set2_name):\n if not set1==set2:\n raise MutantError(\"Multi-dataset mutant pool has different %s and %s dataset sets! %s, %s\"%(set1_name, \n set2_name, set1, set2))\n datasets_from_summary = set(self.summary.keys())\n datasets_from_mutants = set.union(*[set(m.by_dataset.keys()) for m in self])\n _check_sets_raise_error(datasets_from_summary, datasets_from_mutants, \"from summary\", \"from mutants\")\n try:\n if self._dataset_order is not None: \n datasets_from_order = set(self._dataset_order)\n _check_sets_raise_error(datasets_from_order, datasets_from_summary, \"from dataset_order\", \"from summary\")\n except AttributeError:\n pass",
"def get_main_sequence(self, N=1, dataset_name=None, aligned_only=False):\n if dataset_name is not None:\n seqs_to_counts_and_data = self.by_dataset[dataset_name].sequences_counts_positions_errors\n else:\n seqs_to_counts_and_data = self.sequences_counts_positions_errors\n # MAYBE-TODO print a warning if different dataset mutants have different main sequences?\n return Insertional_mutant._get_main_sequence_from_data(seqs_to_counts_and_data, N, aligned_only)\n # MAYBE-TODO should there be a warning/failure/something if it's a multi-dataset mutant and the user wants\n # an overall main sequence and only some of the mutants have any sequence data?",
"def get_dataset(self, name, multi_instance=0):\n return [elem for elem in self._data_list\n if elem.name == name and elem.multi_id == multi_instance][0]",
"def generate_dataset(self):\n if self.training:\n dataset = UnpairedDataset(self.opt, self.training)\n datasetA, datasetB = dataset.generate(cacheA='./dataA.tfcache', cacheB='./dataB.tfcache')\n dataA_iter = datasetA.make_initializable_iterator()\n dataB_iter = datasetB.make_initializable_iterator()\n\n return dataA_iter, dataB_iter, dataA_iter.get_next(), dataB_iter.get_next()\n else: # only need shadow dataset for testing\n dataset = SingleDataset(self.opt, self.training)\n datasetA = dataset.generate()\n dataA_iter = datasetA.make_initializable_iterator()\n\n return dataA_iter, dataA_iter.get_next()",
"def get_dataset_option_setter(dataset_name):\n dataset_class = find_dataset_using_name(dataset_name)\n return dataset_class.modify_commandline_options",
"def get(dataset, treemaker, force_reload=False):\n global treemakers\n treemaker_name, treemaker = get_treemaker_name_and_class(treemaker)\n if not hasattr(treemaker, '__version__'):\n raise RuntimeError(\"Please add a __version__ attribute to treemaker %s\" % treemaker_name)\n minitree_filename = \"%s_%s.root\" % (dataset,\n treemaker_name)\n\n try:\n minitree_path = find_file_in_folders(minitree_filename, hax.config['minitree_paths'])\n print(\"Found minitree at %s\" % minitree_path)\n\n # Check the version of the minitree file\n f = ROOT.TFile(minitree_path, 'UPDATE')\n metadata = json.loads(f.Get('metadata').GetTitle())\n if LooseVersion(metadata['version']) < treemaker.__version__:\n print(\"Minitreefile %s is outdated (version %s, treemaker is version %s), will be recreated\" % (minitree_path,\n metadata['version'],\n treemaker.__version__))\n minitree_path = None\n f.Close()\n\n except FileNotFoundError:\n minitree_path = None\n\n if minitree_path is None or force_reload:\n # We have to make the minitree file\n # This will raise FileNotFoundError if the root file is not found\n skimmed_data = treemaker().get_data(dataset)\n print(\"Created minitree %s for dataset %s\" % (treemaker.__name__, dataset))\n\n # Make a minitree\n minitree_path = hax.config['minitree_paths'][0] + minitree_filename\n root_numpy.array2root(skimmed_data.to_records(), minitree_path,\n treename=treemaker.__name__, mode='recreate')\n\n # Write metadata\n f = ROOT.TFile(minitree_path, 'UPDATE')\n ROOT.TNamed('metadata', json.dumps(dict(version=treemaker.__version__,\n created_by=get_user_id(),\n documentation=treemaker.__doc__,\n timestamp=str(datetime.now())))).Write()\n f.Close()\n\n return minitree_path",
"def resolve_dataset(self, which_set, dataset_name):\n p = path.join(self.dataset_root, dataset_name + \"/\")\n\n if not(path.isdir(serial.preprocess(p))):\n raise IOError(\"MRI dataset directory %s not found.\"\n % serial.preprocess(p))\n\n if which_set == 'train':\n data_path = p + 'train.npy'\n label_path = p + 'train_labels.npy'\n elif which_set == 'test':\n data_path = p + 'test.npy'\n label_path = p + 'test_labels.npy'\n else:\n if which_set != \"full\":\n raise ValueError(\"dataset \\'%s\\' not supported.\" % which_set)\n data_path = p + \"full_unshuffled.npy\"\n label_path = p + \"full_labels_unshuffled.npy\"\n \n data_path = serial.preprocess(data_path)\n label_path = serial.preprocess(label_path)\n\n if not(path.isfile(data_path)):\n raise ValueError(\"Dataset %s not found in %s\" %(which_set,\n serial.preprocess(p)))\n return data_path, label_path",
"def get_dataset(self, dataset_path=None, normalize=True, return_original=False):\n if dataset_path is None:\n dataset_path = self.dir\n \n if \"mocap\" in dataset_path.lower():\n print(\"Loading Mocap dataset.\")\n df = get_mocap()\n df_orig = df\n elif \"profi\" in dataset_path.lower():\n print(\"Loading Profiset dataset.\")\n df = get_profiset()\n df_orig = df\n else:\n print(\"Loading CoPhIR dataset.\")\n df_orig, attr_lengths = get_objects_with_indexes(self.labels, f'{dataset_path}/level-{str(self.n_levels)}.txt', f'{dataset_path}/objects.txt')\n if normalize:\n df = scale_per_descriptor(df_orig, self.labels, attr_lengths)\n else:\n df = df_orig\n \n assert df.shape[1] == self.descriptor_values + self.n_levels + len([\"object_id\"])\n logging.info(f\"Loaded dataset of shape: {df.shape}\")\n if return_original:\n return df, df_orig\n else:\n return df",
"def test_download_dataset_full_already_exists(tmp_path, force, expect_data):\n\n with open(tmp_path / \"dataset\", \"wb\") as f:\n f.write(b\"This is local data\")\n\n pennylane.data.data_manager._download_dataset(\n \"dataset/path\", tmp_path / \"dataset\", attributes=None, force=force\n )\n\n with open(tmp_path / \"dataset\", \"rb\") as f:\n assert f.read() == expect_data",
"def get_dataset(self):\n return datasets.get_dataset(self.dataset_id)"
] |
[
"0.6210885",
"0.61094046",
"0.5971368",
"0.59341747",
"0.5776455",
"0.54970247",
"0.534117",
"0.50612116",
"0.49933696",
"0.4967775",
"0.4955689",
"0.4900989",
"0.47795683",
"0.47575963",
"0.47566655",
"0.47380063",
"0.470888",
"0.46375087",
"0.46355745",
"0.46318737",
"0.45837763",
"0.45312226",
"0.45251635",
"0.45243064",
"0.45224953",
"0.44849873",
"0.44663602",
"0.4461722",
"0.44608572",
"0.44571665"
] |
0.8597811
|
0
|
Add notNone arg values to non_aligned_read_count, unaligned and multiple_aligned (or replace them). If the original values are None, or replace is True, replace instead of adding. If either the original or new value is 'unknown', the result is 'unknown' as well. If any of the args is None, don't modify the original value, unless replace is True, then set to 'unknown'.
|
def add_nonaligned_reads(self, N_all_non_aligned, N_unaligned, N_multiple_aligned, replace=False):
if N_all_non_aligned is not None:
if 'unknown' in (N_all_non_aligned, self.non_aligned_read_count): self.non_aligned_read_count = 'unknown'
elif replace or self.non_aligned_read_count is None: self.non_aligned_read_count = int(N_all_non_aligned)
else: self.non_aligned_read_count += int(N_all_non_aligned)
elif replace: self.non_aligned_read_count = 'unknown'
if N_unaligned is not None:
if 'unknown' in (N_unaligned, self.unaligned): self.unaligned = 'unknown'
elif replace or self.unaligned is None: self.unaligned = int(N_unaligned)
else: self.unaligned += int(N_unaligned)
elif replace: self.unaligned = 'unknown'
if N_multiple_aligned is not None:
if 'unknown' in (N_multiple_aligned, self.multiple_aligned): self.multiple_aligned = 'unknown'
elif replace or self.multiple_aligned is None: self.multiple_aligned = int(N_multiple_aligned)
else: self.multiple_aligned += int(N_multiple_aligned)
elif replace: self.multiple_aligned = 'unknown'
# Note: NO special case for when we don't know the specific categories, but we know total non_aligned is 0,
# because for old-format files non_aligned is initially 0 but gets increased when reading the actual *.sam file,
# which contains lines for unaligned reads (which are unaligned or multiple, both output the same with bowtie -m option)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def add_values(self, *values, replace=False):\n\n if replace: self.reset_values()\n for value in values: self.values += [tuple(value)]",
"def add_inplace(a, b):",
"def replace_na(data, replace=\"average\", remove=False, columns):\n \n return",
"def _post_processing(\n kwargs, skip_translate, invalid\n): # pylint: disable=unused-argument\n # If any defaults were not expicitly passed, add them\n for item in DEFAULTS:\n if item not in kwargs:\n kwargs[item] = DEFAULTS[item]",
"def replace_typeval(self, combined, replacement):\n raise NotImplementedError(\"This is an abstract method.\")",
"def add_discarded_reads(self, N_all_discarded, N_wrong_start, N_no_cassette, N_other_end, replace=False):\n # if self doesn't have an N_other_end attribute (some older datasets don't, this is for those), set it to 0\n try: self.discarded_other_end\n except AttributeError: self.discarded_other_end = 0\n # set everything\n if N_all_discarded is not None:\n if 'unknown' in (N_all_discarded, self.discarded_read_count): self.discarded_read_count = 'unknown'\n elif replace or self.discarded_read_count is None: self.discarded_read_count = int(N_all_discarded)\n else: self.discarded_read_count += int(N_all_discarded)\n elif replace: self.discarded_read_count = 'unknown'\n if N_wrong_start is not None:\n if 'unknown' in (N_wrong_start, self.discarded_wrong_start): self.discarded_wrong_start = 'unknown'\n elif replace or self.discarded_wrong_start is None: self.discarded_wrong_start = int(N_wrong_start)\n else: self.discarded_wrong_start += int(N_wrong_start)\n elif replace: self.discarded_wrong_start = 'unknown'\n if N_no_cassette is not None:\n if 'unknown' in (N_no_cassette, self.discarded_no_cassette): self.discarded_no_cassette = 'unknown'\n elif replace or self.discarded_no_cassette is None: self.discarded_no_cassette = int(N_no_cassette)\n else: self.discarded_no_cassette += int(N_no_cassette)\n elif replace: self.discarded_no_cassette = 'unknown'\n if N_other_end is not None:\n if 'unknown' in (N_other_end, self.discarded_other_end): self.discarded_other_end = 'unknown'\n elif replace or self.discarded_other_end is None: self.discarded_other_end = int(N_other_end)\n else: self.discarded_other_end += int(N_other_end)\n elif replace: self.discarded_other_end = 'unknown'\n # special case for when we don't know the specific discarded categories, but we know total discarded is 0, \n # so the specific categories must be 0 too:\n if self.discarded_read_count == 0: \n self.discarded_wrong_start, self.discarded_no_cassette, self.discarded_other_end = 0, 0, 0",
"def or__inplace(a,b):",
"def _none_subst(self, *args):\n\n # Imports\n import numpy as np\n\n # Initialize argument list return value, and as None not found\n arglist = [a for a in args]\n none_found = False\n\n # Check for None values\n none_vals = list(map(lambda e: isinstance(e, type(None)), arglist))\n\n # Error if more than one None; handle if exactly one; pass through if\n # none.\n if np.count_nonzero(none_vals) > 1:\n raise ValueError(\n \"Multiple 'None' values [indices {0}] not supported\"\n .format(tuple(np.nonzero(none_vals)[0])))\n elif np.count_nonzero(none_vals) == 1:\n # Must be no iterables that are not strings. Thus, an element-wise\n # test for iterability and an element-wise test for stringiness\n # must give matching arrays\n if not all(np.equal(list(map(np.iterable, arglist)),\n list(map(lambda e: isinstance(e, str), arglist)))):\n raise ValueError(\n \"'None' as parameter invalid with non-str iterables\")\n ## end if\n\n # Parameters okay; replace the None with the appropriate range()\n none_found = True\n none_loc = np.nonzero(none_vals)[0][0]\n arglist[none_loc] = \\\n range(self.num_geoms if none_loc == 0 else self.num_atoms)\n ## end if\n\n # Return the arguments list and the none-found value\n return arglist",
"def add_values(self, *values, replace=False):\n\n if replace: self.reset_values()\n for value in values: self.values = np.append(self.values, value)",
"def _update(self, *keys_and_val):\n if len(xxx) < 2:\n raise NotEnoughInfo\n value, *location = xxx[::-1]\n location.reverse()\n final_key = location.pop()\n ptr__target_dct = get_target_dct(location)\n ptr__target_dct[final_key] = value\n return",
"def update(self, *others):\r\n return self.r.sunion(self.r_key, *[o.r_key for o in others])",
"def __add_xx_args(self, other_xx_args_dict):\n self.__merge_xx_switch_args(other_xx_args_dict)\n self.__merge_xx_value_args(other_xx_args_dict)",
"def le_inplace(a,b):",
"def replace(self, *args, **kwargs): # real signature unknown\r\n pass",
"def __merge_xx_value_args(self, other_xx_args):\n if 'value' in other_xx_args:\n other_value_args = other_xx_args['value']\n if 'value' in self.__xx_args:\n my_value_args = self.__xx_args['value']\n for key, value in other_value_args.iteritems():\n my_value_args[key] = value\n else:\n self.__xx_args['value'] = copy.deepcopy(other_value_args)",
"def replace_with_arg(self, src_arg, tgt_arg):",
"def _merge_values(self, values: Iterable[Tuple[Any, Any]], merged_size: int, **kwargs) -> Optional[Any]:\n raise NotImplementedError()",
"def __adjust(self, *args):\n return \"adjust\"",
"def replaceWithAndMaybeDelete(self, *args):\n return _libsbml.Replacing_replaceWithAndMaybeDelete(self, *args)",
"def add_default_args(kwargs_old, **kwargs_new):\n for key in kwargs_new:\n if key not in kwargs_old:\n kwargs_old[key] = kwargs_new[key]",
"def _UpdateWithKwargs(base, **kwargs):\n conflicts = set(kwargs.keys()) & set(base.keys())\n if conflicts:\n raise GanetiApiError(\"Required fields can not be specified as\"\n \" keywords: %s\" % \", \".join(conflicts))\n\n base.update((key, value) for key, value in kwargs.items()\n if key != \"dry_run\")",
"def monkey_set_params(self, **args):\n self._monkey_set_params_counter += 1\n assert self._args == (args,), 'unexpected additional arguments. Keep the type in mind'",
"def _assign_fields_to_params(cls, fields, params):\n if fields is None:\n fields = cls.get_default_read_fields()\n if fields:\n params['fields'] = ','.join(fields)",
"def replace(self, **kwargs) -> FactorValuesMetadata:\n if not kwargs:\n return self\n return replace(self, **kwargs)",
"def updateFromNamespace(self, args: argparse.Namespace, *, remove: bool = False):\n undefined = object()\n\n for field in dataclasses.fields(self):\n # In case a user wants to overwrite a field with None,\n # we use not None but `undefined` as the default value\n member = getattr(args, field.name, undefined)\n if member is undefined:\n continue\n setattr(self, field.name, member)\n if remove:\n delattr(args, field.name)",
"def and__inplace(a,b):",
"def align(self, *, skip_corners=False, return_on_invalid_result=False, warpwarnings=False, **kwargs):\n #load the images for all HPFs and keep them in memory as long as\n #the AlignSample is active\n self.getDAPI()\n self.logger.info(\"starting alignment\")\n\n weighted_sum_mse = 0.\n sum_weights = 0.\n done = set()\n\n for i, overlap in enumerate(self.overlaps, start=1):\n if skip_corners and overlap.tag in [1,3,7,9] :\n continue\n self.logger.debug(f\"aligning overlap {overlap.n} ({i}/{len(self.overlaps)})\")\n result = None\n #check if the inverse overlap has already been aligned\n #(e.g. if the current overlap is between (1, 2), check the overlap between (2, 1))\n #if so, we don't have to align again\n if self.inverseoverlapsdictkey(overlap) in done:\n inverseoverlap = self.overlapsdict[self.inverseoverlapsdictkey(overlap)]\n if hasattr(inverseoverlap, \"result\"):\n result = overlap.getinversealignment(inverseoverlap)\n #do the alignment\n if result is None:\n result = overlap.align(gputhread=self.gputhread, gpufftdict=self.gpufftdict, **kwargs)\n done.add(self.overlapsdictkey(overlap))\n\n #contribution of the mean squared difference after alignment\n #to the weighted sum\n if result is not None and result.exit == 0: \n w = (overlap.cutimages[0].shape[0]*overlap.cutimages[0].shape[1])\n weighted_sum_mse+=w*result.mse[2]\n sum_weights+=w\n else :\n if result is None:\n reason = \"is None\"\n else:\n reason = f\"has exit status {result.exit}\"\n if return_on_invalid_result :\n if warpwarnings: self.logger.warningglobal(f'Overlap number {i} alignment result {reason}: returning 1e10!!')\n return 1e10\n else :\n if warpwarnings: self.logger.warningglobal(f'Overlap number {i} alignment result {reason}: adding 1e10 to sum_mse!!')\n w = (overlap.cutimages[0].shape[0]*overlap.cutimages[0].shape[1])\n weighted_sum_mse+=w*1e10\n sum_weights+=w\n\n self.logger.info(\"finished align loop for \"+self.SlideID)\n return weighted_sum_mse/sum_weights",
"def addAllNumericHas (self, other):\n \n if self.hasOutErrorPackets():\n if other.hasOutErrorPackets():\n self.outErrorPackets += other.outErrorPackets\n \n if self.hasInErrorPackets():\n if other.hasInErrorPackets():\n self.inErrorPackets += other.inErrorPackets\n \n if self.hasInDiscardPackets():\n if other.hasInDiscardPackets():\n self.inDiscardPackets += other.inDiscardPackets\n \n if self.hasOutUnicastPackets():\n if other.hasOutUnicastPackets():\n self.outUnicastPackets += other.outUnicastPackets\n \n if self.hasInMulticastPackets():\n if other.hasInMulticastPackets():\n self.inMulticastPackets += other.inMulticastPackets\n \n if self.hasOutBroadcastPackets():\n if other.hasOutBroadcastPackets():\n self.outBroadcastPackets += other.outBroadcastPackets\n \n if self.hasInBroadcastPackets():\n if other.hasInBroadcastPackets():\n self.inBroadcastPackets += other.inBroadcastPackets\n \n if self.hasOutMulticastPackets():\n if other.hasOutMulticastPackets():\n self.outMulticastPackets += other.outMulticastPackets\n \n if self.hasInUnknownProtocolPackets():\n if other.hasInUnknownProtocolPackets():\n self.inUnknownProtocolPackets += other.inUnknownProtocolPackets\n \n if self.hasOutDiscardPackets():\n if other.hasOutDiscardPackets():\n self.outDiscardPackets += other.outDiscardPackets\n \n if self.hasInUnicastPackets():\n if other.hasInUnicastPackets():\n self.inUnicastPackets += other.inUnicastPackets\n \n if self.hasOutOctets():\n if other.hasOutOctets():\n self.outOctets += other.outOctets\n \n if self.hasInOctets():\n if other.hasInOctets():\n self.inOctets += other.inOctets\n \n \n pass",
"def _adjust(self, offset, size, *keep_refs):\n for basic_block in self._cfg.values():\n for instr in basic_block:\n instr.adjust(offset, size, instr in keep_refs)",
"def _handle_load_unknown(self, data, original):\n for key, val in original.items():\n if key not in self.fields:\n data[key] = val\n return data"
] |
[
"0.48802876",
"0.48767427",
"0.48273054",
"0.47863755",
"0.46374926",
"0.45955205",
"0.45936954",
"0.45863825",
"0.45304078",
"0.4525031",
"0.4487904",
"0.4453184",
"0.44289082",
"0.43912235",
"0.43337944",
"0.43334466",
"0.43315616",
"0.4321398",
"0.4320638",
"0.43204042",
"0.42595962",
"0.42585775",
"0.42525008",
"0.4250814",
"0.4250751",
"0.42282867",
"0.42270666",
"0.4205432",
"0.41817945",
"0.41714472"
] |
0.66629004
|
0
|
Return total number of reads in given chromosome.
|
def reads_in_chromosome(self, chromosome):
return sum(m.read_info(self.dataset_name).total_read_count
for m in self.dataset if m.position not in SPECIAL_POSITIONS.all_undefined and m.position.chromosome==chromosome)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def count(self, chromosome):\n return self.chromosome_list.count(to_chromosome(chromosome))",
"def test_count_total_reads(self):\n \n bam = pybedtools.BedTool(clipper.test_file(\"allup_test.bam\"))\n gene_dfn = pybedtools.BedTool(clipper.test_file(\"hg19_genes.bed\"))\n \n result = count_total_reads(bam, gene_dfn)\n \n self.assertEqual(result, 2086)",
"def countReadCoverage(bam,chrom,start,end,strand=None):\n\n coverage = []\n start = int(start)\n end = int(end)\n for i in range(end-start+1):\n coverage.append(0.0)\n\n i = 0\n if chrom in bam.references:\n for pcol in bam.pileup(chrom,start,end):\n n = 0\n if pcol.pos >= start and pcol.pos <= end:\n for read in pcol.pileups:\n if strand == '+':\n if not read.alignment.is_reverse and read.alignment.mapq >= 0 and not read.alignment.is_duplicate:\n n += 1\n elif strand == '-':\n if read.alignment.is_reverse and read.alignment.mapq >= 0 and not read.alignment.is_duplicate:\n n += 1\n else:\n if read.alignment.mapq >= 0 and not read.alignment.is_duplicate:\n n += 1\n coverage[i] = n\n i += 1\n\n return coverage",
"def numAlleles(self, chrom):\n return self._max[chrom]",
"def test_count_reads_in_region_total(self):\n self.c.skipZeros = False\n self.c.stepSize = 200\n self.c.binLength = 200\n resp, _ = self.c.count_reads_in_region(self.chrom, 0, 200)\n nt.assert_equal(resp, np.array([[2, 4.]]))",
"def count_number_of_reads(filename: Path) -> int:\n\tif filename.suffix == '.gz':\n\t\tcommand = f\"zcat {filename}\"\n\telse:\n\t\tcommand = f\"cat {filename}\"\n\tprocess = subprocess.Popen(command.split(), stdout = subprocess.PIPE)\n\toutput = subprocess.check_output([\"wc\", \"-l\"], stdin = process.stdout)\n\n\treads = int(output.strip()) / 4\n\treturn int(reads)",
"def contig_count(contig):\n return sum([1 for line in open(contig, 'rU').readlines() if line.startswith('>')])",
"def get_chromosome_reads(bam):\n stats = bam.get_index_statistics()\n mapped_reads = {}\n for stat in stats:\n mapped_reads[stat[0]] = [stat[1], stat[2], stat[3]]\n if stat[2] != 0:\n warnings.warn(\"Unmapped reads found in chromosome \" + stat[0])\n\n return mapped_reads",
"def get_count(bam, max_workers):\n print (\"Count total number of paired reads in %s ...\"%bam)\n cmd = ['samtools','view','-c','-f', '3','-@',str(max_workers),bam]\n out, err = subprocess.Popen(cmd, stdin = subprocess.PIPE, stdout=subprocess.PIPE).communicate()\n return int(out.split()[0])",
"def get_chromosome_length(genome):\n \n chr_list = {}\n \n for key in genome:\n chr_list[key] = len(genome[key])\n\n return chr_list",
"def get_rom_page_count(self, page, page_size):\n total_roms = Roms(self._connection).page_size(page_size).page_offset(page).get_count()\n return int(float(total_roms) / page_size)",
"def gatherReadCounts(samplesList, scriptsDir, threads, alignmentPath, outRoot, stype, mode):\n reads = 0\n ext = \".pruned.bam\"\n if mode == \"all_reads\":\n ext = \".bam\"\n for i in range(len(samplesList)):\n bam = os.path.join(alignmentPath, outRoot) + \".\" + stype + \".\" + str(i) + ext\n reads += int(subprocess.run([os.path.join(scriptsDir, \"get_readcount.sh\"), bam, str(threads)], capture_output=True, text=True).stdout.strip(\"\\n\"))\n return reads",
"def count(self):\n return len(self.read_ints())",
"def get_chrom_length(request, genome, chrom):\n logger.debug(\"annotation_server.get_chrom_length called for genome: %s chromosome: %s\" % (genome, chrom)) \n \n if genome in SUPPORTED_GENOMES:\n current_table = eval(genome+ \"_ChromInfo\")\n curr_vals = current_table.objects.filter(chrom__iexact=chrom).values('chrom', 'size')\n data = ValuesQuerySetToDict(curr_vals)\n return HttpResponse(data, 'application/json')\n else:\n return HttpResponse(status=400)\n \n # TODO: return genome lengths according to chrom order i.e. 1,2,3 etc. \n #cursor = connection.cursor() \n #if (chrom):\n # query = \"\"\"SELECT chrom, size from %s.chrominfo where chrom ilike '%s'\"\"\" % (genome, chrom)\n #cursor.execute(query)\n #return HttpResponse(cursor_to_json(cursor), 'application/javascript')",
"def count(self):\n\t\treturn sum(read.copy for read in self.__iter__())",
"def get_sequence_length(bam):\n\n chr_lengths = {} # 'I' | 230218, 'II' | 813184, ...\n chr_lengths_cumsum = {} # 'I' | 0, 'II' | 230218, 'III' | 1043402, ...\n ref_summedlength = 0\n ref_tid = get_chromosome_names(bam)\n for key in ref_tid.keys():\n ref_length = bam.get_reference_length(key)\n chr_lengths[key] = bam.get_reference_length(key)\n chr_lengths_cumsum[key] = ref_summedlength\n ref_summedlength += ref_length\n\n return chr_lengths, chr_lengths_cumsum",
"def _chrom_sizes(fasta_file):\n from pysam import FastaFile\n fa = FastaFile(fasta_file)\n chrom_lens = OrderedDict([(name, l) for name, l in zip(fa.references, fa.lengths)])\n if len(chrom_lens) == 0:\n raise ValueError(f\"no chromosomes found in fasta file: {fasta_file}. \"\n \"Make sure the file path is correct and that the fasta index \"\n \"file {fasta_file}.fai is up to date\")\n fa.close()\n return chrom_lens",
"def total_number():\r\n total_number = 0\r\n file_read = read_file()\r\n for key in file_read:\r\n total_number = total_number + len(file_read[key])\r\n return total_number",
"def get_total_coverage(bam_file, outfile):\n # Run samtools idxstats (this get the coverage for all transcripts:\n # assigne the outfile with the temp folder to keep thing more tidy\n oufile_dir_file = os.path.join(\"temp_reads_per_base\",\n outfile)\n cmd = \" \".join(['samtools',\n 'idxstats',\n bam_file,\n '>',\n oufile_dir_file])\n # data was saved in idxstats_filename\n # call the func\n pipe = subproces_func(cmd)\n # creat a dictioanry to hold all the total expression values for the\n # transcripts.\n overall_expression_dic = dict()\n with open(oufile_dir_file, \"r\") as handle:\n for line in handle:\n data = line.rstrip(\"\\n\").split(\"\\t\")\n transcript = data[0]\n overall_expression_dic[transcript] = [int(x) for x in data[1:]]\n # print overall_expression_dic[\"Mp_O_20647_c0_seq2\"]\n # returns a dictionary: key[transcript], vals = ['577', '274', '0'] len,\n # reads_mapped, last_coloumn\n return overall_expression_dic",
"def get_read_length(read_fn, n_read=10000):\n with GzipFile(read_fn, mode='rb') as f:\n h = SeqIO.QualityIO.FastqGeneralIterator(f)\n i = 0\n l = []\n while i < n_read:\n try:\n t = h.next()\n l.append(len(t[1]))\n i += 1\n except StopIteration:\n logger.warning(\"Requested %d reads but reached the end of the file after %d\", n_read, i)\n return int(np.round(np.mean(l)))",
"def get_number_seqs_for_primer(percent_match,\n seq_count):\n \n total_seq_use=int((1-percent_match)*seq_count)\n \n return total_seq_use",
"def count(self, base):\n return self._dna.count(base)",
"def count_matches(sam_input):\n logging.info(\"Counting aligned bases in %s ...\", sam_input.name)\n\n total_bases = 0\n with pysam.AlignmentFile(sam_input, \"r\") as sam:\n for read in sam:\n total_bases += aligned_bases(read.cigar)\n return total_bases",
"def written_reads(self) -> int:\n return sum(self._written_lengths1.values())",
"def test_get_coverage_of_region_split_read(self):\n\n # turn of read extension\n self.c.extendPairedEnds = False\n self.c.bamFilesList = [self.bamFile1]\n self.c.binLength = 10\n self.c.stepSize = 10\n resp, _ = self.c.count_reads_in_region('chr_cigar', 0, 100)\n nt.assert_array_equal(resp, np.array([[0.],\n [1.],\n [1.],\n [0.],\n [1.],\n [0.],\n [0.],\n [0.],\n [0.],\n [0.]]))",
"def countgenes():\n directory = openfile('db_directory.txt')\n no_genes_file = directory+'GENES_IN_HPO.txt'\n GENES_IN_HPO = openfile(no_genes_file)\n #GENES_IN_HPO = openfile(numbergenes_file)\n return int(GENES_IN_HPO)",
"def chromosome_lengths(self):\n chr_lens = {}\n for r in self.regions(lazy=True):\n if chr_lens.get(r.chromosome) is None:\n chr_lens[r.chromosome] = r.end\n continue\n if r.end > chr_lens[r.chromosome]:\n chr_lens[r.chromosome] = r.end\n return chr_lens",
"def get_population_count(data_path:str, roi):\n try: # open population file\n df_pop = pd.read_csv(data_path / 'population_estimates.csv')\n except:\n print(\"Missing population_estimates.csv in data-path\")\n\n try:\n population = df_pop.query('roi == \"{}\"'.format(roi))['population'].values\n except:\n print(\"{} population estimate not found in population_estimates.csv\".format(args.roi))\n\n return int(population)",
"def hit_coverage(self):\n s = self.hit_aln.replace(\"=\", \"\")\n return len(s)",
"def get_count(cls):\n total = 0\n for counter in SimpleCounterShard.objects.all():\n total += counter.count\n return total"
] |
[
"0.7780695",
"0.66113275",
"0.6425552",
"0.6353458",
"0.631541",
"0.6280178",
"0.6219694",
"0.6006945",
"0.59613764",
"0.5910651",
"0.58733356",
"0.5851103",
"0.58273345",
"0.5808138",
"0.57850856",
"0.5781013",
"0.5725598",
"0.5714295",
"0.5690985",
"0.56745404",
"0.5669577",
"0.5632503",
"0.56005377",
"0.5582175",
"0.5573894",
"0.5568932",
"0.55387676",
"0.55368185",
"0.549829",
"0.549234"
] |
0.8146626
|
0
|
Return total number of mutants in given chromosome.
|
def mutants_in_chromosome(self, chromosome):
return sum(1 for m in self.dataset if m.read_info(self.dataset_name).total_read_count
and m.position not in SPECIAL_POSITIONS.all_undefined and m.position.chromosome==chromosome)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def count(self, chromosome):\n return self.chromosome_list.count(to_chromosome(chromosome))",
"def N_genes_with_multiple_mutants(self):\n return len(self.genes_with_multiple_mutants)",
"def reads_in_chromosome(self, chromosome):\n return sum(m.read_info(self.dataset_name).total_read_count \n for m in self.dataset if m.position not in SPECIAL_POSITIONS.all_undefined and m.position.chromosome==chromosome)",
"def count_total_mutations(seqs, database):\n total = 0\n for seq in seqs:\n total += count_minimum_mutations(seq, database)\n return total",
"def __len__(self) -> int:\n return len(self.__mutants)",
"def num_mutations(self):\n return sum(len(site.mutations) for site in self.sites())",
"def member_count(self) -> int:\n return sum([g.member_count for g in self.guilds])",
"def __len__(self):\n return len(self.chromosome_list)",
"def mutation_count(variant):\n _validate_str(variant)\n if variant == WILD_TYPE_VARIANT:\n return 0\n elif variant == SYNONYMOUS_VARIANT:\n return 0\n else:\n result = [x.strip() for x in variant.split(\",\")]\n if len(set(result)) != len(result):\n raise ValueError(\"Duplicate mutant substrings found in variant\")\n return len(result)",
"def num_mutations(self, this_node=None, path=()):\n\n if this_node is None:\n this_node = self.root\n self.total_num_mutations = 0\n\n if isinstance(path, tuple):\n path = list(path)\n\n for edge in self.edges_from(this_node.id):\n next_node = self.nodes[edge.dst]\n self.total_num_mutations += next_node.num_mutations()\n\n if edge.src != self.root.id:\n path.append(edge)\n\n self.num_mutations(next_node, path)\n\n # finished with the last node on the path, pop it off the path stack.\n if path:\n path.pop()\n\n return self.total_num_mutations",
"def get_chromosome_length(genome):\n \n chr_list = {}\n \n for key in genome:\n chr_list[key] = len(genome[key])\n\n return chr_list",
"def to_number_of_molecules(self, total_substance_molecules, tolerance=None):\n raise NotImplementedError()",
"def count_nucleotides(mat):\n\n final_counts = np.ones((4, mat.shape[1]))\n\n for i in range(len(mat[0, :])):\n cur_nucleotides = np.ones((4, 1))\n a_count = 0\n c_count = 0\n g_count = 0\n t_count = 0\n for j in range(len(mat[:, 0])):\n if mat[j, i] == 'A':\n a_count = a_count + 1\n elif mat[j, i] == 'C':\n c_count = c_count + 1\n elif mat[j, i] == 'G':\n g_count = g_count + 1\n elif mat[j, i] == 'T':\n t_count = t_count + 1\n cur_nucleotides = np.array([a_count, c_count, g_count, t_count])\n final_counts[:, i] = cur_nucleotides\n return final_counts",
"def count_total_mutations_cpp(seqs):\n folder = \"/gpfs/group/cdm/IPRO_Suite/modules/CPP/humanization/\"\n name = \"humanization.out\"\n shutil.copyfile(folder + name, name)\n cmd = \"chmod a+x \" + name\n os.system(cmd)\n seqFile = \"sequences.txt\"\n f = open(seqFile, 'w')\n for s in seqs:\n f.write(s + \"\\n\")\n f.close()\n cmd = \"./humanization.out \" + seqFile\n os.system(cmd)\n countFile = \"counts.txt\"\n if os.path.exists(countFile):\n f = open(countFile, 'r')\n firstline = f.readline().strip(' \\t\\n')\n return int(firstline)\n else:\n text = \"humanization.out cpp code do not give the right counts of the mutations, please check\"\n raise DeimmunizationError(text)",
"def count(self, volume):\n\n countResult = 0\n\n for x in range(volume.shape[0]):\n for y in range(volume.shape[1]):\n for z in range(volume.shape[2]):\n if self.isMember(volume[x,y,z]):\n countResult += 1\n\n return countResult",
"def member_count(ctx, verbosity):\n\n if verbosity is not None:\n logging.basicConfig(level=getattr(logging, verbosity))\n else:\n logging.getLogger(__name__).addHandler(logging.NullHandler())\n\n ma = MailmanAdmin(os.environ['GEOUSAGE_MAILMAN_ADMIN_URL'],\n os.environ['GEOUSAGE_MAILMAN_ADMIN_PASSWORD'])\n\n click.echo(ma.member_count)",
"def getNumReactants(self):\n return _libsbml.Reaction_getNumReactants(self)",
"def get_members_count(self, *args, **kwargs):\n return self.bot.get_chat_members_count(self.id, *args, **kwargs)",
"def count_nucleotides(dna, nucleotide):\n return dna.count(nucleotide)",
"def number_of_permutations(self) -> int:\n perms = math.factorial(len(self._word))\n for v in self._char_counts.values():\n if v > 1:\n perms /= math.factorial(v)\n return perms",
"def count_nucleic_acids(self):\n n = 0\n for chain in self.iter_chains():\n n += chain.count_nucleic_acids()\n return n",
"def numAlleles(self, chrom):\n return self._max[chrom]",
"def getTotalMancount(self, playerID):\n count=0\n for camp in self.__camps:\n if( camp.getOwner() == playerID ):\n count = count + camp.getMancount()\n for army in self.__armies:\n if( army.getOwner() == playerID ):\n count = count + army.getMancount()\n return count",
"def member_count(self):\n return len(self.members)",
"def calculate_protein_mass(protein: str):\n result = 0\n for p in protein:\n result += monoisotopic_mass_table[p]\n return result",
"def membership_count(self, S_go, S_gene):\n return self.go_count(S_gene, S_go)\n #c=self.go_count(S_gene)\n #if type(S_go)!=set:\n # S_go=set(S_go)\n #c={ k:v for k,v in c.items() if k in S_go }\n #return c",
"def _get_mutation_amount(self):\n return self._get_sign() * self._get_number()",
"def count_nucleotides(dna, nucleotide):\n count = 0\n for char in dna:\n if char == nucleotide:\n count += 1\n return count",
"def num_mutations(self):\n return len(self.fuzz_library)",
"def get_num_plants(self) -> int:\r\n\r\n return len(self.plants)"
] |
[
"0.7149963",
"0.66784513",
"0.622585",
"0.6020063",
"0.5979922",
"0.5883878",
"0.57367784",
"0.5664344",
"0.5582601",
"0.55447096",
"0.54473275",
"0.54364485",
"0.5431855",
"0.53666407",
"0.53529334",
"0.5345837",
"0.5343593",
"0.53417605",
"0.5323589",
"0.52852833",
"0.524494",
"0.52282846",
"0.5182455",
"0.51731426",
"0.5170275",
"0.5162833",
"0.514295",
"0.512322",
"0.5120981",
"0.51100975"
] |
0.7370951
|
0
|
Return (gene_feature,count) list, biologically sorted, optionally with all "boundary" features counted as one. The source gene feature counts are based on the self.mutant_counts_by_feature dict. If merge_confusing_features==True, any locations containing '??' will be listed as '??'. If merge_boundary_features==True, any locations containing '|' and no '??' will be listed as 'multiple_splice_variants'. If merge_boundary_features==True, any locations containing '/' and no '??' will be listed as 'boundary'.
|
def merged_gene_feature_counts(self, merge_multi_splice_variants=True, merge_boundary_features=True,
merge_confusing_features=False):
merged_feature_count_dict = defaultdict(int)
for feature, count in self.mutant_counts_by_feature.items():
# note that anything containing '??' AND '/' never gets merged as boundary
if '??' in feature:
if merge_confusing_features: merged_feature_count_dict['??'] += count
else: merged_feature_count_dict[feature] += count
elif '|' in feature and merge_multiple_splice_variants: merged_feature_count_dict['multiple_splice_variants'] += count
elif '/' in feature and merge_boundary_features: merged_feature_count_dict['boundary'] += count
else: merged_feature_count_dict[feature] += count
return merged_feature_count_dict
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def compute_feature_properties(self):\n\n self.valuecounts = {}\n self.unique_values = {}\n self.missing_ratios = {}\n self.counts = {}\n self.codemaps = {}\n for f in self.features:\n # Compute various things\n all_values = [self.data[l].get(f,\"?\") for l in self.data]\n missing_data_ratio = all_values.count(\"?\") / (1.0*len(all_values))\n non_q_values = [v for v in all_values if v != \"?\"]\n counts = {}\n for v in non_q_values:\n counts[v] = non_q_values.count(v)\n unique_values = list(set(non_q_values))\n # Sort unique_values carefully.\n # Possibly all feature values are numeric strings, e.g. \"1\", \"2\", \"3\".\n # If we sort these as strings then we get weird things like \"10\" < \"2\".\n # This can actually matter for things like ordinal models.\n # So convert these to ints first...\n if all([v.isdigit() for v in unique_values]):\n unique_values = list(map(int, unique_values))\n unique_values.sort()\n unique_values = list(map(str, unique_values))\n # ...otherwise, just sort normally\n else:\n unique_values.sort()\n self.unique_values[f] = unique_values\n\n N = len(unique_values)\n self.valuecounts[f] = N\n self.missing_ratios[f] = missing_data_ratio\n self.counts[f] = counts\n self.codemaps[f] = self.build_codemap(unique_values)",
"def getFeatures(self, gameState, action):\r\n features = util.Counter()\r\n successor = self.getSuccessor(gameState, action)\r\n foodList = self.getFood(successor).asList() \r\n features['successorScore'] = -len(foodList)#self.getScore(successor)\r\n\r\n if len(foodList) > 0: # This should always be True, but better safe than sorry\r\n myPos = successor.getAgentState(self.index).getPosition()\r\n minDistance = min([self.getMazeDistance(myPos, food) for food in foodList])\r\n features['distanceToFood'] = minDistance\r\n return features",
"def _merge_image_features(feature_class_type: Type[Union[kapture.Keypoints,\n kapture.Descriptors,\n kapture.GlobalFeatures]],\n feature_type: str,\n features_list: Union[List[Optional[kapture.Keypoints]],\n List[Optional[kapture.Descriptors]],\n List[Optional[kapture.GlobalFeatures]]],\n features_paths: List[str],\n output_path: str,\n tar_handlers: List[TarCollection]\n ) -> Union[kapture.Keypoints, kapture.Descriptors, kapture.GlobalFeatures]:\n assert len(features_list) > 0\n assert len(features_paths) == len(features_list)\n\n # find no none value\n val = [(i, d) for i, d in enumerate(features_list) if d is not None]\n assert len(val) > 0\n\n merged_features = val[0][1]\n for j, (i, features) in enumerate(val):\n assert isinstance(features, feature_class_type)\n assert features.type_name == merged_features.type_name\n assert features.dtype == merged_features.dtype\n assert features.dsize == merged_features.dsize\n if feature_class_type == kapture.Descriptors or feature_class_type == kapture.GlobalFeatures:\n assert not isinstance(features, kapture.Keypoints) # IDE type check help\n assert not isinstance(merged_features, kapture.Keypoints) # IDE type check help\n assert features.metric_type == merged_features.metric_type\n if feature_class_type == kapture.Descriptors:\n assert isinstance(features, kapture.Descriptors) # IDE type check help\n assert isinstance(merged_features, kapture.Descriptors) # IDE type check help\n assert features.keypoints_type == merged_features.keypoints_type\n for name in features:\n if j > 0 and name in merged_features:\n getLogger().warning(f'{name} was found multiple times.')\n else:\n merged_features.add(name)\n if output_path:\n # TODO: uses kapture.io.features_list.get_image_features_dirpath()\n in_path = kapture.io.features.get_features_fullpath(feature_class_type,\n feature_type,\n features_paths[i],\n name,\n tar_handlers[i])\n out_path = kapture.io.features.get_features_fullpath(feature_class_type,\n feature_type,\n output_path,\n name)\n if in_path != out_path:\n # skip actual copy if file does not actually move.\n os.makedirs(os.path.dirname(out_path), exist_ok=True)\n if isinstance(in_path, str):\n shutil.copy(in_path, out_path)\n else:\n # in_path is a tuple [str, TarHandler]\n # keypoints are not stored in a file, have to read them to be able to copy them\n array = in_path[1].get_array_from_tar(in_path[0], features.dtype, features.dsize)\n array_to_file(out_path, array)\n return merged_features",
"def build_by_features(self, features):\n v = [0] * self.f\n masks = [1 << i for i in range(self.f)]\n if isinstance(features, dict):\n features = features.items()\n for f in features:\n if isinstance(f, basestring):\n h = self.hashfunc(f.encode('utf-8'))\n w = 1\n else:\n assert isinstance(f, collections.Iterable)\n h = self.hashfunc(f[0].encode('utf-8'))\n w = f[1]\n for i in range(self.f):\n v[i] += w if h & masks[i] else -w\n ans = 0\n for i in range(self.f):\n if v[i] >= 0:\n ans |= masks[i]\n self.value = ans",
"def build_by_features(self, features):\n v = [0] * self.f\n masks = [1 << i for i in range(self.f)]\n if isinstance(features, dict):\n features = features.items()\n for f in features:\n if isinstance(f, basestring):\n h = self.hashfunc(f.encode('utf-8'))\n w = 1\n else:\n assert isinstance(f, collections.Iterable)\n h = self.hashfunc(f[0].encode('utf-8'))\n w = f[1]\n for i in range(self.f):\n v[i] += w if h & masks[i] else -w\n ans = 0\n for i in range(self.f):\n if v[i] >= 0:\n ans |= masks[i]\n self.value = ans",
"def get_feature_set_SB(tweet):\n #pos-tag frequencies\n# print \"Tagged words in tweet: \", tweet.tagged_words\n pos_tag_freq = {}\n additional_freq = {}\n for phrase in tweet.tagged_words:\n for word in phrase:\n try:\n tag = word['pos']\n pos_tag_freq[tag] = pos_tag_freq.get(tag, 0) + 1\n# if tag=='PRtinf':\n# pos_tag_freq[tag] = pos_tag_freq.get(tag, 0) + 1\n# elif tag=='ADJS':\n# pos_tag_freq[tag] = pos_tag_freq.get(tag, 0) + 1\n# elif tag=='ADJ':\n# pos_tag_freq[tag] = pos_tag_freq.get(tag, 0) + 1\n# elif tag=='NP':\n# pos_tag_freq[tag] = pos_tag_freq.get(tag, 0) + 1\n# elif tag=='DET':\n# pos_tag_freq[tag] = pos_tag_freq.get(tag, 0) + 1\n# elif tag=='P':\n# pos_tag_freq[tag] = pos_tag_freq.get(tag, 0) + 1\n if tag in ADJECTIVES:\n additional_freq['adjectives'] = additional_freq.get(tag, 0) + 1\n elif tag in ADVERBS: \n additional_freq['adverbs'] = additional_freq.get(tag, 0) + 1\n elif tag in PRONOUNS:\n additional_freq['pronoun'] = 1\n except KeyError:\n continue\n# print \"Tag frequencies: \", pos_tag_freq\n for key in pos_tag_freq.keys():\n pos_tag_freq[key] = pos_tag_freq[key]*1.0\n #number of adjectives in sentence, number of adverbs in sentence(except ikke), pronoun in sentence(binary) \n #Number of exclamation marks, number of emoticons,\n emoticons = tweet.nrof_happyemoticons+tweet.nrof_sademoticons\n if emoticons>0:\n additional_freq['emoticons'] = emoticons*1.0\n if tweet.nrof_exclamations>0:\n additional_freq['exclamations'] = tweet.nrof_exclamations*1.0\n \n# print \"Additional frequencies: \", additional_freq\n# raw_input(\"Continue?\")\n \n #Concatenate the dicts\n features= dict(pos_tag_freq.items() + additional_freq.items())\n# print \"All features: \", features\n# raw_input(\"Continue?\")\n return features",
"def feature_list(self) -> google.protobuf.internal.containers.MessageMap[builtins.str, global___FeatureList]:",
"def used_features(self) -> List[str]:\n mapped = map_pipeline_names(self.input_features, self.output_features)\n return list(set(mapped))",
"def add_neighbor_count_features(edge_df, X_df, structures_df):\n cnt_df = edge_df.groupby(\n ['molecule_name', 'atom_index_0']).size().to_frame('EF_neighbor_count')\n cnt_df.reset_index(inplace=True)\n cnt_df.rename({'atom_index_0': 'atom_index_zero'}, inplace=True, axis=1)\n\n X_df = X_df.reset_index()\n X_df = pd.merge(\n X_df,\n cnt_df,\n how='left',\n left_on=['molecule_name', 'atom_index_1'],\n right_on=['molecule_name', 'atom_index_zero'])\n\n X_df.rename({\n 'EF_neighbor_count': 'EF_atom_index_1_neighbor_count'\n },\n inplace=True,\n axis=1)\n X_df.drop(['atom_index_zero'], inplace=True, axis=1)\n X_df.set_index('id', inplace=True)\n incorrect_absence = 100 * X_df['EF_atom_index_1_neighbor_count'].isna(\n ).sum() / X_df.shape[0]\n print('[EdgeFeatures] Setting following percentage of edges to 0:',\n incorrect_absence)\n\n X_df['EF_atom_index_1_neighbor_count'] = X_df[\n 'EF_atom_index_1_neighbor_count'].fillna(0).astype(np.uint8)\n return X_df",
"def finalize(param, input_files='count_files'):\n\n import csv\n HELPER.writeLog('Collecting featureCount raw counts ... \\n', param)\n\n #check which of these files are actually available\n working_files = [iFile for iFile in param[input_files] if iFile != '']\n\n if len(working_files) > 0:\n #get feature ID using the first column in the first file in the list of working files\n csv_file = open(working_files[0])\n csv_reader = csv.reader(csv_file, delimiter='\\t')\n\n #For featureCount output, we want to skip the first two lines as they\n #include the featureCount call and the headers which we don't want\n next(csv_reader, None)\n next(csv_reader, None)\n\n #Now start by taking the list of identifier,\n #which is the first column in the file\n counts = [row[0] for row in csv_reader]\n csv_file.close()\n\n #get all the expression values\n header = 'ID'\n for idx in range(param['num_samples']):\n if param[input_files] != '':\n header = header+'\\t'+param['stub'][idx]\n csv_file = open(param[input_files][idx])\n csv_reader = csv.reader(csv_file, delimiter='\\t')\n\n #Here too we want to skip the first two lines, before getting the counts\n next(csv_reader, None)\n next(csv_reader, None)\n #Now start getting the counts (row[6]) and add in the ID (counts[i]) before it\n idx = 0\n for row in csv_reader:\n counts[idx] = counts[idx]+'\\t'+row[6]\n idx += 1\n csv_file.close()\n\n #output the file\n out_file = param['working_dir']+'deliverables/featureCount_raw_counts.txt'\n out_handle = open(out_file, 'w')\n out_handle.write(header+'\\n')\n\n for i in range(len(counts)):\n out_handle.write(counts[i]+'\\n')\n out_handle.close()\n\n #output_phenotype_file\n HELPER.writeLog('Writing phenotype data ... \\n', param)\n MODULE_HELPER.output_sample_info(param)\n\n #write summary stats\n #featureCount does this on its own so we can just fetch each summary file\n #check which of these files are actually available\n working_files = [iFile+'.summary' for iFile in param[input_files] if iFile != '']\n\n if len(working_files) > 0:\n #get Status column from summary file using the first column in\n #the first file in the list of working files\n csv_file = open(working_files[0])\n csv_reader = csv.reader(csv_file, delimiter='\\t')\n #Here, we want to skip the first line, as it simply points to the\n #alignment file used when running featureCount\n next(csv_reader, None)\n\n #Now start by taking the list of identifier,\n #which is the first column in the file\n entry = [row[0] for row in csv_reader]\n csv_file.close()\n\n #get all the summary stats for each sample\n header = 'Status'\n\n for idx in range(param['num_samples']):\n if param[input_files] != '':\n header = header+'\\t'+param['stub'][idx]\n #Fetch the corresponding sample's summary file\n csv_file = open(param[input_files][idx]+'.summary')\n csv_reader = csv.reader(csv_file, delimiter='\\t')\n #Again, we want to skip the first line, as it simply points\n #to the alignment file used when running featureCount\n next(csv_reader, None)\n\n #Now start getting the stats (row[1]) and add in the Status\n # (counts[i]) before it\n i = 0\n for row in csv_reader:\n entry[i] = entry[i]+'\\t'+row[1]\n i += 1\n csv_file.close()\n #output the file\n out_handle = open(param['working_dir']+\n 'results/featureCount/featureCount_stats.txt',\n 'w')\n out_handle.write(header+'\\n')\n\n for i in range(len(entry)):\n out_handle.write(entry[i]+'\\n')\n out_handle.close()\n else:\n print 'featureCount was not run successfully on any of the files..\\n'",
"def bag_of_features(self, word, normalize=True):\n word_features = self.word_fts(word, normalize)\n features = [v + f for f in self.names for v in ['+', '0', '-']]\n bag = collections.OrderedDict()\n for f in features:\n bag[f] = 0\n vdict = {-1: '-', 0: '0', 1: '+'}\n for w in word_features:\n for (f, v) in w.items():\n bag[vdict[v] + f] += 1\n return numpy.array(list(bag.values()))",
"def getFeatures(self, gameState, action):\n # features = util.Counter()\n # successor = self.getSuccessor(gameState, action)\n # features['successorScore'] = self.getScore(successor)\n # return features\n if self.isOffensive:\n return self.getOffensiveFeatures(gameState, action)\n else:\n return self.getDefensiveFeatures(gameState, action)",
"def match_features(self):\n type_of_None = type(None)\n if type(self.featureDesA) != type_of_None and type(self.featureDesB) != type_of_None:\n matches = self.bfMatcher.match(self.featureDesA, self.featureDesB)\n self.match = sorted(matches, key=lambda x: x.distance)\n self.match = self.match[:50]\n else:\n self.match = []",
"def _get_frequent_features(self):\n feature_terms = [sub_items for items in self.data['noun_and_np'].values for sub_items in items]\n C1 = apriori.createC1(feature_terms)\n D = map(set, feature_terms)\n L1, support_data = apriori.scanD(D,C1,0.01) # minimum support 0.01\n self.frequent_features = map(lambda x: \"\".join(list(x)), L1)",
"def __generate_all_features_indices__(self):\n features = self.features_dict\n histories = self.histories_dict\n for k in range(self.data.getSentencesSize()):\n sentence = self.data.getSentenceByIndex(k)\n tags = self.data.getTagsByIndex(k)\n for i in range(len(sentence)):\n history = HistoryTuple(k, sentence, tags, i)\n history_key = (tags[i], history.getTupleKey())\n features_indices = self.getFeaturesIndices(tags[i], history, True)\n features_key = tuple(features_indices)\n features[features_key] += 1\n if len(features_indices) == 0:\n self.null_histories_set.add(history_key)\n histories[history_key] = features_indices",
"def format_used_features(model_dir):\n feature_keys = {\n \"indel_complexity\": \"ICP\",\n \"dissimilarity\": \"DSM\",\n \"indel_size\": \"ISZ\",\n \"repeat\": \"REP\",\n \"is_uniq_mapped\": \"UQM\",\n \"is_near_boundary\": \"NEB\",\n \"equivalence_exists\": \"EQX\",\n \"is_bidirectional\": \"BID\",\n \"is_multiallelic\": \"MTA\",\n \"is_inframe\": \"FRM\",\n \"is_splice\": \"SPL\",\n \"is_truncating\": \"TRN\",\n \"is_in_cdd\": \"CDD\",\n \"indel_location\": \"LOC\",\n \"is_nmd_insensitive\": \"NMD\",\n \"ipg\": \"IPG\",\n \"cds_length\": \"LEN\",\n \"lc\": \"LC\",\n \"local_lc\": \"LLC\",\n \"gc\": \"GC\",\n \"local_gc\": \"LGC\",\n \"strength\": \"SG\",\n \"local_strength\": \"LSG\",\n \"is_ins\": \"INS\",\n \"is_at_ins\": \"ATI\",\n \"is_at_del\": \"ATD\",\n \"is_gc_ins\": \"GCI\",\n \"is_gc_del\": \"GCD\",\n \"ref_count\": \"REFC\",\n \"alt_count\": \"ALTC\",\n \"is_on_db\": \"SNP\",\n }\n\n feature_dict = make_feature_dict(model_dir)\n\n features_used_for_sni = [\n feature_keys[f] for f in feature_dict[\"single_nucleotide_indels\"]\n ]\n features_used_for_mni = [\n feature_keys[f] for f in feature_dict[\"multi_nucleotide_indels\"]\n ]\n features_used_for_sni.sort()\n features_used_for_mni.sort()\n\n d = {}\n d[\"##features_used_for_single_nucleotide_indels\"] = \";\".join(features_used_for_sni)\n d[\"##features_used_for_multi_nucleotide_indels\"] = \";\".join(features_used_for_mni)\n\n return d",
"def _parse_features(chrom: str, db: FeatureDB, feature_types: List[str]) -> List[Dict]:\n feature_collections = []\n for top_level_feature in _find_all_top_level_non_gene_features(chrom, db, feature_types):\n children = list(db.children(top_level_feature, level=1))\n\n # extract parent locus tag to compare to children\n locus_tag = None\n if BioCantorQualifiers.LOCUS_TAG.value in top_level_feature.attributes:\n locus_tag = top_level_feature.attributes[BioCantorQualifiers.LOCUS_TAG.value][0]\n\n if not children:\n # treat this isolated feature as both FeatureIntervalCollection and FeatureInterval\n feature = _parse_child_features_to_feature_interval([top_level_feature])\n # infer a FeatureCollection from the information on the FeatureInterval\n feature_collection = dict(\n feature_intervals=[feature],\n feature_collection_name=feature[\"feature_name\"],\n feature_collection_id=feature[\"feature_id\"],\n feature_collection_type=top_level_feature.featuretype,\n locus_tag=locus_tag,\n sequence_name=chrom,\n qualifiers=feature[\"qualifiers\"],\n )\n # remove qualifiers from feature\n del feature[\"qualifiers\"]\n else:\n # combine all children into a FeatureInterval\n feature = _parse_child_features_to_feature_interval(children, locus_tag=locus_tag)\n feature_collection_name, feature_collection_id = extract_feature_name_id(top_level_feature.attributes)\n\n feature_collection = dict(\n feature_intervals=[feature],\n feature_collection_name=feature_collection_name,\n feature_collection_id=feature_collection_id,\n feature_collection_type=top_level_feature.featuretype,\n locus_tag=locus_tag,\n sequence_name=chrom,\n qualifiers=filter_and_sort_qualifiers(top_level_feature.attributes),\n )\n\n feature_collections.append(feature_collection)\n return feature_collections",
"def _concatenate_features(features):\n pass",
"def mergeGeometries(self):\n self.geometry = reduce(lambda p1,p2 : p1.union(p2) ,map(lambda tax : tax.biomeGeometry,self.taxonomies))\n return self.geometry",
"def get_feature_count(self, feature, category):\r\n if feature in self.feature_count and category in self.feature_count[feature]:\r\n return float(self.feature_count[feature][category])\r\n else:\r\n return 0.0",
"def to_features(self):\n to_return = dict()\n\n to_return['bias'] = 1.0\n to_return['user:' + self.user] = 1.0\n to_return['format:' + self.format] = 1.0\n to_return['token:' + self.token.lower()] = 1.0\n\n to_return['part_of_speech:' + self.part_of_speech] = 1.0\n for morphological_feature in self.morphological_features:\n to_return['morphological_feature:' + morphological_feature] = 1.0\n to_return['dependency_label:' + self.dependency_label] = 1.0\n\n return to_return",
"def feature_index(self, feature: Text) -> int:\n count = 0\n for feature_name in self.vectorizer.get_feature_names():\n if(feature == feature_name):\n return count\n count += 1",
"def num_features(self) -> Dict[NodeType, int]:\n return self.num_node_features",
"def getFeatures(self, gameState, action):\n\t\tfeatures = util.Counter()\n\t\tfeatures['successorScore'] = self.getScore(gameState)\n\t\treturn features",
"def _get_feature_ids(self):\n # return union of all used features by slave classifiers\n feature_ids = set([])\n for clf in self.__clfs:\n feature_ids = feature_ids.union(set(clf.ca.feature_ids))\n return list(feature_ids)",
"def _get_feature2field(self):\n fea_id = 0\n for names in self.feature_names:\n if names is not None:\n for name in names:\n self.feature2id[name] = fea_id\n fea_id += 1\n\n if self.fields is None:\n field_id = 0\n for key, value in self.feature2id.items():\n self.feature2field[self.feature2id[key]] = field_id\n field_id += 1\n else:\n for key, value in self.fields.items():\n for v in value:\n try:\n self.feature2field[self.feature2id[v]] = key\n except:\n pass",
"def count_unique_features(self):\n return N_UNIQUE_FEATS",
"def calculate_bottleneck_features(self):\n if self.args.histogram == True:\n self.feature_count = self.args.tensor_neurons + self.args.bins\n else:\n self.feature_count = self.args.tensor_neurons",
"def getFeatures(self, gameState, action):\n features = util.Counter()\n successor = self.getSuccessor(gameState, action)\n features['successorScore'] = self.getScore(successor)\n return features",
"def _extract_features(self):\n # print(os.getpid())\n return {n:self._extract_feature(f) for (n,f) in self.features.items()}"
] |
[
"0.55473757",
"0.53517133",
"0.5245574",
"0.51562196",
"0.51562196",
"0.50710744",
"0.5049784",
"0.5049486",
"0.50493675",
"0.5034017",
"0.5019584",
"0.4977286",
"0.4977015",
"0.49388385",
"0.49324378",
"0.49211287",
"0.490738",
"0.49024293",
"0.48971713",
"0.48739794",
"0.48725185",
"0.48638523",
"0.48591813",
"0.48589927",
"0.4853473",
"0.48481977",
"0.48395213",
"0.48352972",
"0.4816848",
"0.480543"
] |
0.7783929
|
0
|
Return list of mutants with the most total reads (in dataset if multidataset).
|
def most_common_mutants(self):
highest_readcount = max([mutant.read_info(self.dataset_name).total_read_count for mutant in self.dataset])
highest_readcount_mutants = [mutant for mutant in self.dataset
if mutant.read_info(self.dataset_name).total_read_count==highest_readcount]
return highest_readcount_mutants
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def _most_common_mutants_info(self, dataset=None):\n summ = self._get_summary(dataset)\n most_common_mutants = summ.most_common_mutants\n m = most_common_mutants[0]\n # calculate the fraction of total reads per mutant, assuming each mutant has the same readcount\n assert len(set([m.read_info(dataset).total_read_count for m in most_common_mutants])) == 1\n readcount_info = value_and_percentages(m.read_info(dataset).total_read_count, [summ.aligned_read_count])\n if len(most_common_mutants) == 1: return \"%s (%s)\"%(readcount_info, m.position)\n else: return \"%s (%s mutants)\"%(readcount_info, len(most_common_mutants))",
"def mutants_in_dataset(self, dataset_name=None):\n return [mutant for mutant in self if dataset_name is None or mutant.by_dataset[dataset_name].total_read_count>0]",
"def get_most_popular_merchants(self):\n if self.model:\n return self.model.wv.index_to_key[: self.num_rec]\n else:\n print(\"train the model before performing this step\")\n return None",
"def most_read_book(self):\n reading_max = 0\n most_reads = \"\"\n for book in self.books.keys():\n rating = book.get_average_rating()\n if rating > reading_max:\n most_reads = book\n reading_max = rating\n else:\n continue\n return most_reads",
"def get_medoids(self):\r\n\r\n return self.__optimal_medoids",
"def get_medoids(self):\n\n return self.__optimal_medoids",
"def mode(self):\r\n\t\t_set\t= set(self.sample)\r\n\t\t_list\t= [self.sample.count(i) for i in _set]\r\n\t\treturn list(_set)[_list.index(max(_list))]",
"def mutants_in_chromosome(self, chromosome):\n return sum(1 for m in self.dataset if m.read_info(self.dataset_name).total_read_count \n and m.position not in SPECIAL_POSITIONS.all_undefined and m.position.chromosome==chromosome)",
"def how_popular_most_popular(data):\r\n #list of artists\r\n artists = []\r\n for key in data:\r\n if key[-1] != \"$\":\r\n for x in data[key]:\r\n artists += [x]\r\n sorted_artists = selection_sort(artists)\r\n count = 1\r\n max_count = 1\r\n max_artists = []\r\n for i in range(len(sorted_artists)-1):\r\n #ends at second to last index because I use i and i + 1\r\n if sorted_artists[i] == sorted_artists[i+1]:\r\n count += 1\r\n else:\r\n if count == max_count:\r\n max_artists += [sorted_artists[i]]\r\n count = 1\r\n elif count > max_count:\r\n max_artists = []\r\n max_artists += [sorted_artists[i]]\r\n max_count = count\r\n count = 1\r\n return max_count",
"def getMostUsedCount( self, limit ):\n cur = self.__conn.cursor()\n cur.execute( \"\"\"SELECT Data, COUNT(Data) AS UseCount\n FROM PrivilegeUse\n GROUP BY Data\n ORDER BY UseCount DESC\n LIMIT %d\"\"\", limit )\n class Use:\n def __init__( self, faq, count ):\n self.faq = faq\n self.count = count\n \n return [ Use(row[0], row[1]) for row in cur.fetchall() ]",
"def N_genes_with_multiple_mutants(self):\n return len(self.genes_with_multiple_mutants)",
"def most_popular_artist(our_data):\n counter_dict = {}\n for artist in all_artists(our_data):\n if artist in counter_dict:\n counter_dict[artist] += 1\n else:\n counter_dict[artist] = 1\n maximum_albums = max(counter_dict.values())\n artist_lists = []\n for keys, values in counter_dict.items():\n if values == maximum_albums:\n artist_lists.append(keys) \n return artist_lists",
"def parse_reads_and_select_candidates(self, reads):\n st_time = time.time()\n # read_id_list = []\n total_reads = 0\n read_unique_id = 0\n for read in reads:\n # check if the read is usable\n if read.mapping_quality >= DEFAULT_MIN_MAP_QUALITY and read.is_secondary is False \\\n and read.is_supplementary is False and read.is_unmapped is False and read.is_qcfail is False:\n\n read.query_name = read.query_name + '_' + str(read_unique_id)\n if self.find_read_candidates(read=read):\n # read_id_list.append(read.query_name)\n total_reads += 1\n read_unique_id += 1\n\n if total_reads == 0:\n return []\n\n selected_allele_list = []\n postprocess_read_id_list = set()\n for pos in self.positional_allele_dictionary:\n if pos < self.region_start_position or pos > self.region_end_position:\n continue\n ref = self.reference_dictionary[pos]\n\n all_allele_dictionary = self.positional_allele_dictionary[pos]\n all_mismatch_count = 0\n for allele in all_allele_dictionary:\n all_mismatch_count += all_allele_dictionary[allele]\n\n # pick the top 2 most frequent allele\n allele_frequency_list = list(sorted(all_allele_dictionary.items(), key=operator.itemgetter(1, 0),\n reverse=True))[:PLOIDY]\n allele_list = self._filter_alleles(pos, allele_frequency_list)\n alt1 = allele_list[0] if len(allele_list) >= 1 else None\n alt2 = allele_list[1] if len(allele_list) >= 2 else '.'\n if alt1 is None:\n continue\n mq_rms = round(math.sqrt(self.rms_mq[pos]/self.coverage[pos]), 3) if self.coverage[pos] > 0 else 0\n dp = self.coverage[pos]\n ref_count = self.coverage[pos] - all_mismatch_count\n candidate_record = [self.chromosome_name] + self._get_record(pos, alt1, alt2, ref, ref_count) + [mq_rms] + [dp]\n postprocess_read_id_list.update(self.read_id_by_position[pos])\n selected_allele_list.append(candidate_record)\n\n postprocess_read_id_list = list(postprocess_read_id_list)\n if len(selected_allele_list) > 0:\n self.postprocess_reference()\n self.postprocess_reads(postprocess_read_id_list)\n\n return selected_allele_list",
"def count_allbest_mirbase_reads(bam, counts_file):\n count_ref_hits(bam, counts_file)",
"def get_mostcommon(path, n, i=3):\n allroles = []\n with open(path, 'rt', encoding='mac_roman') as csvfile:\n csvreader = csv.reader(csvfile, delimiter=' ', quotechar='\"')\n for row in csvreader:\n try:\n role = clean(row[i])\n allroles.append(''.join(role))\n except IndexError:\n pass\n mostc = collections.Counter(allroles)\n roles = mostc.most_common(n)\n mostcroles = [x[0] for x in roles]\n return mostcroles",
"def get_most_popular_authors():\n\tdb = psycopg2.connect(database=DBNAME)\n\tc = db.cursor()\n\tc.execute(\" select t1.name,count(*) as total from authors as t1, articles as t2,log as t3 where t3.path=concat('/article/',t2.slug) and t1.id=t2.author group by t1.name order by total desc limit 3;\")\n\tdata = c.fetchall()\n\tdb.close()\n\n\treturn data",
"def top_50():\r\n file_read = read_file()\r\n vacabulary_list = []\r\n for key in file_read:\r\n vacabulary_list.extend(file_read[key])\r\n top_50 = Counter(vacabulary_list).most_common(50)\r\n return (top_50)",
"def get_most_popular_artists(data):\r\n #list of artists\r\n artists = []\r\n for key in data:\r\n if key[-1] != \"$\":\r\n for x in data[key]:\r\n artists += [x]\r\n sorted_artists = selection_sort(artists)\r\n count = 1\r\n max_count = 1\r\n max_artists = []\r\n for i in range(len(sorted_artists)-1):\r\n #ends at second to last index because I use i and i + 1\r\n if sorted_artists[i] == sorted_artists[i+1]:\r\n count += 1\r\n else:\r\n if count == max_count:\r\n max_artists += [sorted_artists[i]]\r\n count = 1\r\n elif count > max_count:\r\n max_artists = []\r\n max_artists += [sorted_artists[i]]\r\n max_count = count\r\n count = 1\r\n return max_artists",
"def n_most_massive(data, massive_count=1000):\n m = np.array(data['m'][0])\n i = np.argsort(m)\n ind = i[:-1 - massive_count:-1]\n return ind",
"def remove_mutants_below_readcount(self, min_readcount, perfect_reads=False):\n if perfect_reads: get_readcount = lambda m: m.perfect_read_count\n else: get_readcount = lambda m: m.total_read_count\n # go over all mutants in self; need to convert dataset to a list to make a separate copy, \n # otherwise we'd be modifying the dataset while iterating through it, which isn't allowed.\n for mutant in list(self):\n if get_readcount(mutant) < min_readcount:\n self.remove_mutant(mutant.IB)\n # TODO really I shouldn't be removing mutants outright, just noting them as removed or something... In that case should they or should they not show up in \"for m in self\"? Probably not - they should have a separate dictionary?\n # TODO should I keep track of removed reads, and print in summary? MAYBE.",
"def get_most_popular_trend_setters(cls):\n\n authors = cls._get_authors_list()\n trend_setters = cls._get_sorted_trend_setters(authors)\n cls._write_trend_setters_to_file(trend_setters)",
"def most_missed_creators(self, cache_max_age=0):\n expected_items = []\n query = u'CLAIM[195:%s] AND NOCLAIM[170]' % \\\n ',195:'.join(self.collections) # collection\n wd_queryset = wdquery.QuerySet(query)\n\n wd_query = wdquery.WikidataQuery(cacheMaxAge=cache_max_age)\n data = wd_query.query(wd_queryset)\n\n if data.get('status').get('error') == 'OK':\n expected_items = data.get('items')\n\n creator_dict = {}\n counter = 0\n for q_val in expected_items:\n q_item = self.wd.QtoItemPage(q_val)\n data = q_item.get()\n claims = data.get('claims')\n if u'P170' in claims:\n continue\n descr = data.get('descriptions').get('en')\n if descr and descr.startswith(u'painting by '):\n creator = descr[len(u'painting by '):]\n if '(' in creator: # to get rid of disambiguation addition\n creator = creator[:creator.find('(')].strip()\n if creator in creator_dict.keys():\n creator_dict[creator] += 1\n else:\n creator_dict[creator] = 1\n counter += 1\n pywikibot.output(u'Found %d mentions of %d creators' %\n (counter, len(creator_dict)))\n # output\n f = codecs.open(u'creatorHitlist.csv', 'w', 'utf-8')\n for k, v in creator_dict.iteritems():\n f.write(u'%d|%s\\n' % (v, k))\n f.close()",
"def get_mostFrequent(self, n=5):\r\n pass",
"def get_mostFrequent(self, n=5):\r\n pass",
"def get_term_max_coverage(prime_implicants, m_terms):\n term_max_coverage = max(prime_implicants, key=lambda prime_implicant: len(\n [i for i in prime_implicant.get_covered_terms() if i in m_terms]))\n\n return term_max_coverage",
"def CountChildMetadata(self):\n\n assert self.dir\n d = self.descendants = {(self.uid, self.gid, self.mode, None, self.selabel, self.capabilities): 1}\n for i in self.children:\n if i.dir:\n for k, v in i.CountChildMetadata().iteritems():\n d[k] = d.get(k, 0) + v\n else:\n k = (i.uid, i.gid, None, i.mode, i.selabel, i.capabilities)\n d[k] = d.get(k, 0) + 1\n\n # Find the (uid, gid, dmode, fmode, selabel, capabilities)\n # tuple that matches the most descendants.\n\n # First, find the (uid, gid) pair that matches the most\n # descendants.\n ug = {}\n for (uid, gid, _, _, _, _), count in d.iteritems():\n ug[(uid, gid)] = ug.get((uid, gid), 0) + count\n ug = MostPopularKey(ug, (0, 0))\n\n # Now find the dmode, fmode, selabel, and capabilities that match\n # the most descendants with that (uid, gid), and choose those.\n best_dmode = (0, 0755)\n best_fmode = (0, 0644)\n best_selabel = (0, None)\n best_capabilities = (0, None)\n for k, count in d.iteritems():\n if k[:2] != ug: continue\n if k[2] is not None and count >= best_dmode[0]: best_dmode = (count, k[2])\n if k[3] is not None and count >= best_fmode[0]: best_fmode = (count, k[3])\n if k[4] is not None and count >= best_selabel[0]: best_selabel = (count, k[4])\n if k[5] is not None and count >= best_capabilities[0]: best_capabilities = (count, k[5])\n self.best_subtree = ug + (best_dmode[1], best_fmode[1], best_selabel[1], best_capabilities[1])\n\n return d",
"def getSize(self):\n return reduce(max, [m.getSize() for m in self.members])",
"def getBestConcepts(self):\n maxScore = -1\n bestConcepts = []\n chunkTokens = self.getTokens()\n for token in chunkTokens:\n for concept in token.umlsConcepts:\n if concept.score > maxScore:\n maxScore = concept.score\n\n if maxScore > 0:\n for tokens in chunkTokens:\n for concept in token.umlsConcepts:\n if concept.score == maxScore:\n bestConcepts.append(concept)\n return bestConcepts",
"def get_mostPopularAuthors():\n\n query = \"\"\"\n SELECT authors.name,COUNT(*) as views\n FROM articles join authors\n ON articles.author=authors.id\n JOIN log ON log.path LIKE ('/article/' || articles.slug)\n GROUP BY authors.name\n ORDER BY views DESC\n \"\"\"\n\n posts = execute_query(query)\n print('\\nWho are the most popular article authors of all time?')\n for author, views in posts:\n print(author + \" - \" + str(views) + \" views\")",
"def most_abundant(ids, seqs='ignored'):\r\n id_groups = [len(groups[seq_to_group[i]]) for i in ids]\r\n return ids[argmax(id_groups)]"
] |
[
"0.6645123",
"0.60265857",
"0.58677876",
"0.5824081",
"0.57380396",
"0.56142575",
"0.5474463",
"0.54287463",
"0.53988177",
"0.53356403",
"0.5325521",
"0.5312236",
"0.5306984",
"0.5294959",
"0.52739036",
"0.52482295",
"0.523597",
"0.5229304",
"0.5223293",
"0.5181166",
"0.517451",
"0.5165043",
"0.5153543",
"0.5153543",
"0.51488584",
"0.5145326",
"0.5142113",
"0.5138491",
"0.51300263",
"0.51251745"
] |
0.8272326
|
0
|
The set of all genes with at least one mutant in the dataset.
|
def all_genes_in_dataset(self):
# the empty-set argument is needed in case there are no mutants in the dataset - set.union() with empty args is an error.
return set.union(set(), *[set(genes) for N_mutants,genes
in self.dataset.get_gene_dict_by_mutant_number(self.dataset_name).items() if N_mutants>0])
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def genes_with_multiple_mutants(self):\n # the empty-set argument is needed in case there are no mutants in the dataset - set.union() with empty args is an error.\n return set.union(set(), *[set(genes) for N_mutants,genes \n in self.dataset.get_gene_dict_by_mutant_number(self.dataset_name).items() if N_mutants>1])",
"def genes():\n data=pd.read_csv(config['stan'], sep=\" \")\n return list(set(data['Gene_id']))",
"def mutants_in_dataset(self, dataset_name=None):\n return [mutant for mutant in self if dataset_name is None or mutant.by_dataset[dataset_name].total_read_count>0]",
"def contains_generators(self):\n group = self.group\n gens = set()\n for syllable in self.array_form:\n gens.add(group.dtype(((syllable[0], 1),)))\n return set(gens)",
"def mel_gene_set(dict): # this uses the flanking genes, specifically\n\tmel_gene_set = set()\n\tfor k, v in dict.iteritems():\n\t\t#v[0] is up, v[1] is down\n\t\t#print \"this is v:\", v\n\t\tfor mg in v[0]:\n\t\t\tmel_gene_set.add(mg)\n\t\tfor mg in v[1]:\n\t\t\tmel_gene_set.add(mg)\n\treturn mel_gene_set",
"def N_genes_with_multiple_mutants(self):\n return len(self.genes_with_multiple_mutants)",
"def make_set(g, nodes):\n s = Set()\n names = nodes['names']\n for ii,name in enumerate(names):\n \"\"\" \n We will assume node is entirely contained\n in group if they have one atom in common\n \"\"\" \n atoms = mdn.dic2list(nodes[name]['atoms'])\n atom0 = atoms[0]\n if (atom0 in mdn.dic2list(g['atoms'])):\n s.add(ii)\n return s",
"def _get_obj_geneset(self, obj):\n obj_geneset = set(obj.input.get(\"mutations\", []))\n if not obj_geneset:\n # Geneset is given via geneset input:\n gs = self.resolwe.geneset.get(obj.input[\"geneset\"])\n obj_geneset = set(gs.genes)\n\n # Convert to gene symbols in case genes are given as feature ID's\n if gs.output[\"source\"] != \"UCSC\":\n qs = self.resolwe.feature.filter(feature_id__in=list(obj_geneset))\n id_2_name = {obj.feature_id: obj.name for obj in qs}\n obj_geneset = set([id_2_name[gene] for gene in obj_geneset])\n\n return obj_geneset",
"def genes_GT():\n df1=pd.read_csv(config['geneInfo'], sep=\" \")\n df1=df1[df1.chr == '22']\n df2=pd.read_csv(config['counts'], sep=\" \")\n genes=df1.merge(df2.gene_id, on=\"gene_id\")\n return list(set(genes['gene_id']))",
"def get_gene_sets(table, dominant):\n \n known = table[table[\"hgnc\"].isin(dominant)]\n gwide = set(known[\"hgnc\"][known[\"genomewide\"]])\n sugg = set(known[\"hgnc\"][known[\"suggestive\"]])\n \n gene_sets = {\"genomewide\": gwide, \"suggestive\": sugg}\n \n return gene_sets",
"def mutants_in_chromosome(self, chromosome):\n return sum(1 for m in self.dataset if m.read_info(self.dataset_name).total_read_count \n and m.position not in SPECIAL_POSITIONS.all_undefined and m.position.chromosome==chromosome)",
"def _get_genres(self):\n separated = self.movies['genre'].apply(self.separate_genre)\n return {g: True for x in separated for g in x}.keys()",
"def atoms(self):\n return set(self.array_form)",
"def whichSet(self):\n result = []\n # go through all members and if any isSet -- return True\n for index,v in self._items.iteritems():\n if v.isSet:\n result.append(index)\n return result",
"def ngram_eles(self):\n return set(self.ele_freqs)",
"def genotypes(self):\n return self.data.genotypes.values",
"def chromosomes(self):\n chromosomes_set = set()\n chromosomes = []\n for region in self.regions:\n if region.chromosome not in chromosomes_set:\n chromosomes_set.add(region.chromosome)\n chromosomes.append(region.chromosome)\n return chromosomes",
"def crisp_set(self):\n return self.memberships.keys()",
"def get_all_possible_genotypes(self):\n # Get all genotypes.\n return mutations_to_genotypes(self.mutations, wildtype=self.wildtype)",
"def getChemTorsions(self):\n dataDict = self.__dict__\n atomsIncluded = self.chemAtoms.issuperset\n result = frozenset(xx for xx in self.chemComp.chemTorsions if atomsIncluded(xx.chemAtoms))\n return result",
"def sets(self):\n\n return self._collection.distinct('set')",
"def sets(self):\n return self._sets",
"def available_sets(session, player):\n excluded_sets = set(session.taken.keys())\n for grouping in session.exclusives:\n if player.sets.intersection(grouping):\n excluded_sets.update(grouping)\n return [s for s in session.sets if s not in excluded_sets]",
"def finalSubsets(self):\n subs = self.allSubsets()\n for s in self.graph.observed:\n subs = subs[subs[:,s] == 1,] # remove subsets where values in s are not True\n return subs",
"def mutant(self):\n _mutant = []\n _wt = self.wildtype\n for i in range(0, len(self.mutations)):\n site = _wt[i]\n options = self.mutations[i]\n if options is None:\n _mutant.append(_wt[i])\n else:\n for o in options:\n if o != site:\n _mutant.append(o)\n return \"\".join(_mutant)",
"def single(self):\r\n\t\treturn list(set(self.sample))",
"def unique(self):\n return self.element_wise(lambda seqs: list(set(seqs)))",
"def subsets(self):\n return set(self.subset_map.values())",
"def potential_values(self) -> Set[Hashable]:\n\t\treturn set(self.iter_potential_values())",
"def remove_empty_genes(self):\n to_remove = []\n for gene in self.genes:\n if not gene.mrnas:\n to_remove.append(gene)\n if to_remove:\n for gene in to_remove:\n self.genes.remove(gene)\n sys.stderr.write(\"Removed empty gene \" + gene.identifier + \"\\n\")\n self.removed_genes.extend(to_remove)\n return to_remove"
] |
[
"0.8123009",
"0.68949455",
"0.6383538",
"0.6354511",
"0.6219542",
"0.60233015",
"0.5985441",
"0.59377563",
"0.5882648",
"0.5832713",
"0.5786872",
"0.57491076",
"0.574446",
"0.57376844",
"0.5717273",
"0.57007015",
"0.5654419",
"0.56425226",
"0.56347936",
"0.5625611",
"0.5609768",
"0.5603526",
"0.55825424",
"0.556981",
"0.55646574",
"0.5551778",
"0.5545316",
"0.5544925",
"0.5539691",
"0.55000556"
] |
0.84716815
|
0
|
The set of all genes with at TWO OR MORE mutants in the dataset.
|
def genes_with_multiple_mutants(self):
# the empty-set argument is needed in case there are no mutants in the dataset - set.union() with empty args is an error.
return set.union(set(), *[set(genes) for N_mutants,genes
in self.dataset.get_gene_dict_by_mutant_number(self.dataset_name).items() if N_mutants>1])
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def all_genes_in_dataset(self):\n # the empty-set argument is needed in case there are no mutants in the dataset - set.union() with empty args is an error.\n return set.union(set(), *[set(genes) for N_mutants,genes \n in self.dataset.get_gene_dict_by_mutant_number(self.dataset_name).items() if N_mutants>0])",
"def N_genes_with_multiple_mutants(self):\n return len(self.genes_with_multiple_mutants)",
"def genes():\n data=pd.read_csv(config['stan'], sep=\" \")\n return list(set(data['Gene_id']))",
"def mel_gene_set(dict): # this uses the flanking genes, specifically\n\tmel_gene_set = set()\n\tfor k, v in dict.iteritems():\n\t\t#v[0] is up, v[1] is down\n\t\t#print \"this is v:\", v\n\t\tfor mg in v[0]:\n\t\t\tmel_gene_set.add(mg)\n\t\tfor mg in v[1]:\n\t\t\tmel_gene_set.add(mg)\n\treturn mel_gene_set",
"def genes(self) -> Tuple[Gene, ...]:\n return tuple(self.influence_graph.genes)",
"def genes_GT():\n df1=pd.read_csv(config['geneInfo'], sep=\" \")\n df1=df1[df1.chr == '22']\n df2=pd.read_csv(config['counts'], sep=\" \")\n genes=df1.merge(df2.gene_id, on=\"gene_id\")\n return list(set(genes['gene_id']))",
"def _get_obj_geneset(self, obj):\n obj_geneset = set(obj.input.get(\"mutations\", []))\n if not obj_geneset:\n # Geneset is given via geneset input:\n gs = self.resolwe.geneset.get(obj.input[\"geneset\"])\n obj_geneset = set(gs.genes)\n\n # Convert to gene symbols in case genes are given as feature ID's\n if gs.output[\"source\"] != \"UCSC\":\n qs = self.resolwe.feature.filter(feature_id__in=list(obj_geneset))\n id_2_name = {obj.feature_id: obj.name for obj in qs}\n obj_geneset = set([id_2_name[gene] for gene in obj_geneset])\n\n return obj_geneset",
"def _get_genres(self):\n separated = self.movies['genre'].apply(self.separate_genre)\n return {g: True for x in separated for g in x}.keys()",
"def genotypes(self):\n return self.data.genotypes.values",
"def get_my_mutations(quality_cutoff, coverage_cutoff):\n\n # my_mutations = {}\n # with open('/home/perry/Projects/loh/working/murim.exome.aa_chg.vars') as f:\n # for line in f:\n # my_mutations[line.strip()] = True\n # return my_mutations\n\n bed_file = 'data/nimblegen/2.1M_Human_Exome_Annotation/2.1M_Human_Exome.bed'\n bed_chr2st2end, bed_chr2posLs = bed_tools.load_bed(bed_file, \n 'NimbleGen Tiled Regions')\n # NimbleGen Tiled Regions\n # Target Regions\n\n use_data_dir = '/home/perry/Projects/loh/data/all_non_ref_hg18/'\n all_somatic = {}\n all_inherited = {}\n cancer_qualities = mutations.get_consensus_qualities(use_data_dir + 'yusanT.ann')\n normal_qualities = mutations.get_consensus_qualities(use_data_dir + 'yusanN.ann')\n for exome in global_settings.exome_types:\n data_file = use_data_dir + exome\n inherited, somatic, murim = mutations.get_mutations(data_file, normal_qualities,\n cancer_qualities, quality_cutoff,\n False, coverage_cutoff)\n # only use the bed_tools NimbleGen\n # restriction for hg18 data\n for s in somatic['yusan']: \n chr, pos = s.split(':')\n if bed_tools.find_location_in_bed(chr, int(pos), \n bed_chr2posLs,\n bed_chr2st2end):\n all_somatic[s] = True\n for i in inherited['yusan']: \n chr, pos = s.split(':')\n if bed_tools.find_location_in_bed(chr, int(pos), \n bed_chr2posLs,\n bed_chr2st2end):\n all_inherited[i] = True\n return (set(all_somatic.keys()) & set(get_murim_covered(quality_cutoff)), set(all_inherited.keys()) & set(get_murim_covered(quality_cutoff)))",
"def mutant(self):\n _mutant = []\n _wt = self.wildtype\n for i in range(0, len(self.mutations)):\n site = _wt[i]\n options = self.mutations[i]\n if options is None:\n _mutant.append(_wt[i])\n else:\n for o in options:\n if o != site:\n _mutant.append(o)\n return \"\".join(_mutant)",
"def get_all_possible_genotypes(self):\n # Get all genotypes.\n return mutations_to_genotypes(self.mutations, wildtype=self.wildtype)",
"def getAllEdges(mutations):\n\tallEdges = []\n\tfor mutation in mutations: \n\t\tprint mutation\n\t\tfor mutation2 in mutations:\n\t\t\tif not (mutation == mutation2): # No edges connecting to themselves.\n\t\t\t\ttmp = []\n\t\t\t\ttmp.append(mutation)\n\t\t\t\ttmp.append(mutation2)\n\t\t\t\tallEdges.append(tmp)\n\treturn allEdges",
"def mutants_in_chromosome(self, chromosome):\n return sum(1 for m in self.dataset if m.read_info(self.dataset_name).total_read_count \n and m.position not in SPECIAL_POSITIONS.all_undefined and m.position.chromosome==chromosome)",
"def mutants_in_dataset(self, dataset_name=None):\n return [mutant for mutant in self if dataset_name is None or mutant.by_dataset[dataset_name].total_read_count>0]",
"def contains_generators(self):\n group = self.group\n gens = set()\n for syllable in self.array_form:\n gens.add(group.dtype(((syllable[0], 1),)))\n return set(gens)",
"def get_mutual_friends(person1_friends, person2_friends):\n return list(set(person1_friends) & set(person2_friends))",
"def get_gene_sets(table, dominant):\n \n known = table[table[\"hgnc\"].isin(dominant)]\n gwide = set(known[\"hgnc\"][known[\"genomewide\"]])\n sugg = set(known[\"hgnc\"][known[\"suggestive\"]])\n \n gene_sets = {\"genomewide\": gwide, \"suggestive\": sugg}\n \n return gene_sets",
"def get_all_mutagens(self):\n return [self.learning_rate, self.beta1, self.beta2, self.weight_init]",
"def make_set(g, nodes):\n s = Set()\n names = nodes['names']\n for ii,name in enumerate(names):\n \"\"\" \n We will assume node is entirely contained\n in group if they have one atom in common\n \"\"\" \n atoms = mdn.dic2list(nodes[name]['atoms'])\n atom0 = atoms[0]\n if (atom0 in mdn.dic2list(g['atoms'])):\n s.add(ii)\n return s",
"def ngram_eles(self):\n return set(self.ele_freqs)",
"def chromosomes(self):\n chromosomes_set = set()\n chromosomes = []\n for region in self.regions:\n if region.chromosome not in chromosomes_set:\n chromosomes_set.add(region.chromosome)\n chromosomes.append(region.chromosome)\n return chromosomes",
"def mating_pairs(male: set, female: set) -> Compoundset:\n set_of_pairs=set()\n found =True\n while found:\n if len(male)>0 and len(female)>0:\n malegerbil=male.pop()\n femalegerbil=female.pop()\n pairs=(malegerbil,femalegerbil)\n set_of_pairs.add(pairs)\n else:\n found=False\n return set_of_pairs",
"def potential_new_obs(self) -> Set[GriddedPerm]:\n subobs: Set[GriddedPerm] = set()\n for ob in self._tiling.obstructions:\n subobs.update(ob.all_subperms(proper=True))\n subobs.remove(GriddedPerm.empty_perm())\n return subobs",
"def mutations(self, mu):\n # make a copy of the data, and make it an integer\n new_alleles = np.copy(self.geno)\n\n # for an array of the same shape as newAlleles, draw mutations at each\n # position with probability mu.\n vals = np.random.binomial(1, mu, self.size * self.nloci * 2)\n mutate = np.reshape(vals, [ self.size, self.nloci, 2])\n mutate = (mutate == 1)\n # swap zeroes and ones.\n new_alleles[mutate] = 1 - new_alleles[mutate] \n\n # Apply to geno_probs\n new_geno_probs = calculate_geno_probs(new_alleles, mu=mu)\n\n output = genotypeArray(\n geno = new_alleles,\n geno_probs = new_geno_probs,\n names = self.names,\n mothers= self.mothers,\n fathers = self.fathers\n )\n\n return output",
"def permissions(self):\n perms = set()\n for g in self.groups:\n perms = perms | set(g.permissions)\n return perms",
"def mut_space(graph: nx.Graph) -> t.Iterator[t.Tuple[int, t.List[str]]]:\n genes = get_attr(graph, 'gene')\n xs = sorted(chain.from_iterable(((g.P1, g.A1), (g.P2, g.A2)) for g in genes))\n return ((g, sorted(set(x[1] for x in gg))) for g, gg in groupby(xs, key=op.itemgetter(0)))",
"def _generator():\n filename_1 = 'gene.txt'\n filename_2 = 'geneSynonym.txt'\n gene_set_1 = gene_names(filename_1)\n gene_syn = gene_names(filename_2, complete=False)\n genes = gene_set_1 | gene_syn\n return genes",
"def get_group_interests(self):\r\n common_tags = set()\r\n for mem in self.members:\r\n if len(common_tags) == 0:\r\n common_tags = self.get_interests_each_member(mem)\r\n else:\r\n common_tags = common_tags.intersection(self.get_interests_each_member(mem))\r\n return list(common_tags)",
"def genes():\n return [\"b2935\", \"b0723\", \"b0451\"]"
] |
[
"0.80721563",
"0.6948248",
"0.6710061",
"0.6352312",
"0.6285758",
"0.61776936",
"0.5969346",
"0.5953357",
"0.59360653",
"0.5895782",
"0.58800334",
"0.5838031",
"0.5808292",
"0.57483584",
"0.57375014",
"0.5725144",
"0.5679366",
"0.5675286",
"0.5651269",
"0.5647856",
"0.56414944",
"0.5610721",
"0.5601009",
"0.5555911",
"0.5472638",
"0.544807",
"0.5444766",
"0.5431049",
"0.5416614",
"0.53994"
] |
0.8549979
|
0
|
The number of genes with TWO OR MORE mutants in the dataset.
|
def N_genes_with_multiple_mutants(self):
return len(self.genes_with_multiple_mutants)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def n(self):\n return len(self.genotypes)",
"def getNumGenerations(self):\n return 1 + max(m.getNumGenerations() for m in self.members)",
"def __len__(self) -> int:\n return len(self.__mutants)",
"def N_genes_in_dataset(self):\n return len(self.all_genes_in_dataset)",
"def generator_count(self, gen):\n if len(gen) != 1 or gen.array_form[0][1] < 0:\n raise ValueError(\"gen must be a generator\")\n s = gen.array_form[0]\n return s[1]*sum([abs(i[1]) for i in self.array_form if i[0] == s[0]])",
"def mutants_in_chromosome(self, chromosome):\n return sum(1 for m in self.dataset if m.read_info(self.dataset_name).total_read_count \n and m.position not in SPECIAL_POSITIONS.all_undefined and m.position.chromosome==chromosome)",
"def membership_count(self, S_go, S_gene):\n return self.go_count(S_gene, S_go)\n #c=self.go_count(S_gene)\n #if type(S_go)!=set:\n # S_go=set(S_go)\n #c={ k:v for k,v in c.items() if k in S_go }\n #return c",
"def num_protogenes(self):\n return len(self.protogenes.keys())",
"def num_mutations(self):\n return len(self.fuzz_library)",
"def count_genotypes(genotypeList,StateGenPosData, x, y):\r\n allMos = 0\r\n nonEggs = 0\r\n Adults = 0\r\n for i in range(len(genotypeList)):\r\n gt = genotypeList[i]\r\n b = sum(1 for item in StateGenPosData if not 'new' in item[0] and not 'gestating' in item[0] and gt in item[1] and item[2]==(x,y))\r\n c = sum(1 for item in StateGenPosData if 'adult' in item[0] and 'XX' in item[1] and not 'gestating' in item[0] and gt in item[1] and item[2]==(x,y))\r\n d = sum(1 for item in StateGenPosData if 'adult' in item[0] and gt in item[1] and item[2]==(x,y))\r\n## for item in StateGenPosData:\r\n## print(item[0],item[1],item[2])\r\n## if 'adult' in item[0] and gt in item[1] and item[2]==(x,y):\r\n## d+=1\r\n## print('yay')\r\n## if not 'new' in item[0] and not 'egg' in item[0] and not 'gestating' in item[0] and gt in item[1] and item[2]==(x,y):\r\n## c+=1\r\n## if not 'new' in item[0] and not 'gestating' in item[0] and gt in item[1] and item[2]==(x,y):\r\n## b+=1\r\n allMos = allMos + b\r\n nonEggs = nonEggs + c\r\n Adults = Adults + d\r\n return allMos, nonEggs, Adults",
"def num_mutations(self):\n return sum(len(site.mutations) for site in self.sites())",
"def genes_with_multiple_mutants(self):\n # the empty-set argument is needed in case there are no mutants in the dataset - set.union() with empty args is an error.\n return set.union(set(), *[set(genes) for N_mutants,genes \n in self.dataset.get_gene_dict_by_mutant_number(self.dataset_name).items() if N_mutants>1])",
"def __len__(self):\n return len(self._opts) + len(self._groups)",
"def get_num_cams(self, data):\n cams = set()\n for items in data:\n camid = items[2]\n cams.add(camid)\n return len(cams)",
"def count_pegs(self):\r\n count = 0\r\n\r\n for i in range(0, len(self.matrix)):\r\n for j in range(0, len(self.matrix[i])):\r\n if self.matrix[i][j] == \"1\":\r\n count += 1\r\n\r\n return count",
"def __len__(self):\n return len(self._ngrams)",
"def max_individuals(self) -> int:\n return self.group_size.upper * self.groups_allowed",
"def __len__(self):\n return len(self.dataset) * self.samples_per_pair",
"def numberOfNodes( gen ):\n return int( scipy.sum( [ 3.**i for i in range( 1, gen + 2 ) ] ) )",
"def count_male_teams(self):\n return len(self.df['Adult male 11v11 (16-45)'].dropna())",
"def mutation_count(variant):\n _validate_str(variant)\n if variant == WILD_TYPE_VARIANT:\n return 0\n elif variant == SYNONYMOUS_VARIANT:\n return 0\n else:\n result = [x.strip() for x in variant.split(\",\")]\n if len(set(result)) != len(result):\n raise ValueError(\"Duplicate mutant substrings found in variant\")\n return len(result)",
"def many(self):\n return self.opt.NonZeroCount(self._trg)",
"def get_max_individuals_count_of_generation_number(self, generation_number):\n\n\t\treturn self.individuals_per_generation[generation_number-1]",
"def num_grna(self) -> int:\n return len(self.gRNAs)",
"def __len__(self):\n # Header + len(group id) + group id + generation id\n size = self.HEADER_LEN + 2 + len(self.group_id) + 4\n # + len(member id) + member id + len(group assignment)\n size += 2 + len(self.member_id) + 4\n # group assignment tuples\n for member_assignment in self.group_assignment:\n # + len(member id) + member id + len(member assignment) + member assignment\n size += 2 + len(member_assignment.member_id) + 4 + len(member_assignment)\n return size",
"def member_count(self) -> int:\n return sum([g.member_count for g in self.guilds])",
"def __len__(self):\n gen_len = len(self.image_ids) // self.batch_size\n if len(self.image_ids) % self.batch_size != 0:\n gen_len += 1\n return gen_len",
"def num_vertex_sets(self, r):\n return sage.all.binomial(self.n, r)",
"def __len__(self):\n return len(self.beams)",
"def __len__(self):\n return len(self.chromosome_list)"
] |
[
"0.67937684",
"0.67046225",
"0.66733754",
"0.66237825",
"0.647608",
"0.6352982",
"0.62766045",
"0.6200753",
"0.605528",
"0.6032482",
"0.6026197",
"0.59705",
"0.5969641",
"0.595732",
"0.59484786",
"0.5943661",
"0.5912949",
"0.59030277",
"0.5875975",
"0.5866802",
"0.5833311",
"0.57568014",
"0.57490194",
"0.5746329",
"0.5744933",
"0.5731503",
"0.57112473",
"0.56980866",
"0.5680815",
"0.56673527"
] |
0.8153984
|
0
|
Add mutant to dataset.
|
def add_mutant(self, mutant, overwrite=False):
if mutant.IB in self._mutants_by_IB.keys() and not overwrite:
raise MutantError("Can't add mutant that would overwrite previous mutant with same IB! "
+"Pass overwrite=True argument if you want to overwrite.")
self._mutants_by_IB[mutant.IB] = mutant
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def add_to_dataset(self, dataset: Dataset):\n pass",
"def add_other_mutant_as_dataset(self, other_mutant, other_mutant_dataset_name, \n overwrite=False, check_constant_data=False):\n if other_mutant_dataset_name in self.by_dataset and not overwrite:\n raise MutantError(\"This mutant already has a %s dataset! Can't overwrite it with \"%other_mutant_dataset_name\n +\"new one. Choose a different name for new dataset, or use overwrite=True argument.\")\n\n # if desired, check that the position/gene data matches (and update if own gene data is unknown)\n # (probably should be using ifs rather than asserts, but I think since they're wrapped in a try/except it's fine)\n if check_constant_data:\n if not self.position == other_mutant.position:\n raise MutantError(\"Can't add mutant2 as dataset to mutant1: the mutant position differs! %s and %s\"%(\n self.position, other_mutant.position))\n try:\n self.update_gene_info(other_mutant.gene, other_mutant.orientation, \n other_mutant.gene_feature, other_mutant.gene_distances)\n except MutantError:\n raise MutantError(\"Can't add mutant2 as dataset to mutant1: the mutant gene data differs!\"\n +\" %s, %s, %s and\"%(self.gene, self.orientation, self.gene_feature)\n +\" %s, %s, %s.\"%(other_mutant.gene, other_mutant.orientation, other_mutant.gene_feature))\n\n # make a new empty Insertional_mutant object to hold the readcount-related data from other_mutant, \n # and put it in the self.by_dataset dictionary under other_mutant_dataset_name\n self.by_dataset[other_mutant_dataset_name] = Insertional_mutant_readcount_only()\n # now fill this new object with readcount-related data from other_mutant\n self.by_dataset[other_mutant_dataset_name]._copy_readcount_related_data(other_mutant)",
"def give_single_dataset_mutant(self, single_dataset_name, force=False):\n if single_dataset_name not in self.by_dataset.keys() and not force:\n raise MutantError(\"This mutant doesn't have a %s dataset! \"%single_dataset_name\n +\"Use force=True argument if you want a zero-readcount mutant returned anyway.\")\n # generate new mutant, fill it with readcount-related data from self.by_dataset[single_dataset_name] \n # and general data from self\n new_mutant = Insertional_mutant()\n new_mutant._copy_non_readcount_data(self)\n new_mutant._copy_readcount_related_data(self.by_dataset[single_dataset_name])\n return new_mutant",
"def test_add():\n data = io.create_sample_Dataset()\n tmp = data + data\n assert tmp[\"u\"][0, 0, 0] == 2.0",
"def set_dataset(self, role, dset):\n\n\t\tself.datasets[role] = dset",
"def __setitem__(self):\n raise ValueError(\"Dataset objects are immutable\")",
"def mutate_append(self, gene):\n gene.chromosome.append(self.tactics.mutate_select())",
"def add_data(self, data):\n self.data = self.data + data",
"def add_or_remove(self, dataset: \"Dataset\") -> None:\n raise NotImplementedError",
"def add_data(self, data):\n self._data += data",
"def add_muscle_stimulations(self, act):\n self.muscle_stimulations = act",
"def test__add_read(self):\n # using fake HTSeq alignment class from deepseq_utilities; defining one perfect and one imperfect alignment\n # note: the detailed mutation-counting methods are imported from deepseq_utilities and unit-tested there.\n position = Insertion_position('chr1', '+', position_before=3)\n perfect_aln = Fake_HTSeq_aln(seq='AAA', optional_field_data={'NM':0})\n imperfect_aln = Fake_HTSeq_aln(seq='GGG', optional_field_data={'NM':1})\n # adding perfect and imperfect to mutant increases all the counts as expected\n mutant = Insertional_mutant(insertion_position=position)\n mutant.add_read(perfect_aln, read_count=3, position=position)\n assert mutant.total_read_count == mutant.perfect_read_count == 3\n assert mutant.sequences_counts_positions_errors == {'AAA': [3, position, 0]}\n mutant.add_read(imperfect_aln, read_count=1, position=position)\n assert mutant.total_read_count == 4\n assert mutant.perfect_read_count == 3\n assert mutant.sequences_counts_positions_errors == {'AAA': [3, position, 0], 'GGG': [1, position, 1]}\n # same for a multi-dataset mutant - this time we need to specify which dataset we're adding to\n mutant = Insertional_mutant_multi_dataset(insertion_position=position)\n assert len(mutant.by_dataset) == 0\n mutant.add_read(perfect_aln, read_count=3, dataset_name='d1', position=position)\n assert len(mutant.by_dataset) == 1\n assert mutant.by_dataset['d1'].total_read_count == mutant.by_dataset['d1'].perfect_read_count == 3\n assert mutant.by_dataset['d1'].sequences_counts_positions_errors == {'AAA': [3, position, 0]}\n mutant.add_read(imperfect_aln, read_count=1, dataset_name='d1', position=position)\n assert len(mutant.by_dataset) == 1\n assert mutant.by_dataset['d1'].total_read_count == 4\n assert mutant.by_dataset['d1'].perfect_read_count == 3\n assert mutant.by_dataset['d1'].sequences_counts_positions_errors == {'AAA': [3, position, 0], 'GGG': [1, position, 1]}\n # now adding a read to another dataset - nothing changes in dataset d1, but we have new dataset d2 numbers\n mutant.add_read(imperfect_aln, read_count=1, dataset_name='d2', position=position)\n assert len(mutant.by_dataset) == 2\n assert mutant.by_dataset['d1'].total_read_count == 4\n assert mutant.by_dataset['d2'].total_read_count == 1\n assert mutant.by_dataset['d2'].perfect_read_count == 0\n assert mutant.by_dataset['d2'].sequences_counts_positions_errors == {'GGG': [1, position, 1]}\n # it should be impossible to add a read to a specific dataset in a single-dataset mutant \n mutant = Insertional_mutant(insertion_position=position)\n self.assertRaises(MutantError, mutant.add_read, perfect_aln, read_count=3, dataset_name='d1')\n # it should be impossible to add a read to a multi-dataset mutant without giving a dataset_name\n mutant = Insertional_mutant_multi_dataset(insertion_position=position)\n self.assertRaises(MutantError, mutant.add_read, perfect_aln, read_count=3)",
"def add(self, item):\n item = self._prepare_item(len(self), item)\n if item not in self._data:\n self._data.append(item)\n self.__log__.append(SetAdd(value=item))",
"def add_joined_dataset(self, new_data):\n self._add_linked_data(self.JOINED_DATASETS, self.joined_dataset_ids,\n new_data)",
"def update_dataset(self, data_name: str, append: pd.DataFrame):\n df = getattr(self, data_name)\n setattr(self, data_name, df.join(append, how='left'))",
"def add_data(self, new_data, *args):\n raise NotImplementedError",
"def add(self, data):\n if self._filter(data):\n id = self.db._generate_id(data)\n \n if not id == None:\n if self.db._store:\n self.db.append(id, str(data))\n print id, \"stored to\", self.db._generate_path(id)\n else:\n print id\n print data.show2()",
"def store_data(self, data):\n self.data.append(data)",
"def put(self, data: Analyzable, data_category: DatasetCategory = None):\n\n if self.finalized:\n raise RuntimeError(\"Cannot add more elements to a finalized database.\")\n\n # If full\n if self.__get_total_size() >= self.max_data:\n return\n\n # Automatically split sets using split ratio\n if data_category is None:\n if self.__get_training_size() == 0:\n self.__add_to_training(data)\n elif self.__get_testing_size() == 0:\n self.__add_to_testing(data)\n else:\n self.__add_to_training(data) \\\n if self.__get_current_split_ratio() <= self.split_ratio \\\n else self.__add_to_testing(data)\n\n # Manually assign data\n elif data_category == DatasetCategory.TRAINING:\n self.__add_to_training(data)\n elif data_category == DatasetCategory.TESTING:\n self.__add_to_testing(data)",
"def add_data(self, data):\n self.data = data",
"def add_data_tuple(self, stream_id, new_data_tuple, tuple_size_in_bytes):\n if (self.current_data_tuple_set is None) or \\\n (self.current_data_tuple_set.stream.id != stream_id) or \\\n (len(self.current_data_tuple_set.tuples) >= self.data_tuple_set_capacity) or \\\n (self.current_data_tuple_size_in_bytes >= self.max_data_tuple_size_in_bytes):\n self._init_new_data_tuple(stream_id)\n\n added_tuple = self.current_data_tuple_set.tuples.add()\n added_tuple.CopyFrom(new_data_tuple)\n\n self.current_data_tuple_size_in_bytes += tuple_size_in_bytes\n self.total_data_emitted_in_bytes += tuple_size_in_bytes",
"def test_write(self):\n data2 = self.data.copy()\n data2['a'] *= 2\n self.dset['a'] = data2\n self.assertTrue(np.all(self.dset[...] == data2))\n data2['b'] *= 4\n self.dset['b'] = data2\n self.assertTrue(np.all(self.dset[...] == data2))\n data2['a'] *= 3\n data2['c'] *= 3\n self.dset['a','c'] = data2\n self.assertTrue(np.all(self.dset[...] == data2))",
"def add(self, data):\n if data.shape != self.shape:\n self.shape = data.shape\n if isinstance(self.child, vmedian):\n self.child.add(data)\n if (self.child.index == 0):\n self.buffer[self.index, :] = self.child.get(reshape=False)\n self.index = self.index + 1\n else:\n self.buffer[self.index, :] = np.ravel(data)\n self.index = self.index + 1\n\n if self.index == 3:\n self.index = 0\n self.initialized = True",
"def mutator(mutate):\r\n @functools.wraps(mutate)\r\n def ecspy_mutator(random, candidates, args):\r\n mutants = []\r\n for i, cs in enumerate(candidates):\r\n mutants.append(mutate(random, cs, args))\r\n return mutants\r\n ecspy_mutator.single_mutation = mutate\r\n return ecspy_mutator",
"def add_plant(self, plant: 'Plant') -> None:\r\n self.plants.append(plant)\r\n self.centers.append(plant.get_center())\r\n self.make_rows()",
"def addDataField(self, name, value):\n if name in self.__examples: \n raise ValueError(\"Field already exists: \" + name)\n \n self.__storeDataField(name, value)",
"def __add__(self,sample):\n self.add(sample)",
"def __call__(self, mutant, rng):\n raise NotImplementedError",
"def add(self, ds, del_first=False, priority=DatasetActionPriority.DEFAULT):\n if not del_first and ds.is_stored(self._db):\n raise DSIDExists('{} - {}'.format(ds.id, ds.name))\n self._post_sm_msg(ds=ds, action=DatasetAction.ADD, priority=priority, del_first=del_first)",
"def add_data_single(self, pt, val):\n raise NotImplementedError('Abstract Method')"
] |
[
"0.6655881",
"0.6413176",
"0.6019999",
"0.5857906",
"0.56514895",
"0.55309415",
"0.54926395",
"0.54362386",
"0.5384364",
"0.53043133",
"0.52475846",
"0.5245322",
"0.5234252",
"0.5210368",
"0.51780087",
"0.51474565",
"0.5071842",
"0.50533676",
"0.5048791",
"0.50422263",
"0.5017486",
"0.500427",
"0.50008005",
"0.49985775",
"0.4992785",
"0.49901494",
"0.498555",
"0.49796626",
"0.49791732",
"0.49781767"
] |
0.6503815
|
1
|
Remove mutant (by IB) can take a mutant or a IB.
|
def remove_mutant(self, mutant_or_IB):
try: IB = mutant_or_IB.IB
except AttributeError: IB = mutant_or_IB
del self._mutants_by_IB[IB]
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def get_mutant(self, IB):\n return self._mutants_by_IB[IB]",
"def _should_remove(self, mac, obj):\n ret = False\n if getattr(obj, self.toggle_val) == self.toggle_check\\\n and self.toggle.state == 'down':\n ret = True\n return ret",
"def remove_tactic(self):\n tactic_removed = input(\"Enter a tactic to be removed: \")\n self.proof.tactics.remove(tactic_removed)\n for gene in self.population:\n gene.chromosome = [e for e in gene.chromosome if e != tactic_removed]",
"def canRemove(self, p_int): # real signature unknown; restored from __doc__\n return False",
"def test_remove_already_not_subbed(self) -> None:\n result = self.attempt_unsubscribe_of_principal(\n query_count=11,\n target_users=[self.example_user(\"cordelia\")],\n is_realm_admin=True,\n is_subbed=False,\n invite_only=False,\n target_users_subbed=False,\n )\n json = self.assert_json_success(result)\n self.assert_length(json[\"removed\"], 0)\n self.assert_length(json[\"not_removed\"], 1)",
"def __contains__(self, IB):\n return IB in self._mutants_by_IB",
"def test_remove_coach_specific_for_coach_pt2(self):\n self.assertFalse(self.coach1.has_perm(self.AUTH_REMOVE_COACH, self.classrooms[1]))",
"def remove_mutants_not_in_other_dataset(self, other_dataset, readcount_min=1, perfect_reads=False):\n # TODO do I want this to be based on non-exact position equality instead?\n if perfect_reads: get_readcount = lambda m: m.perfect_read_count\n else: get_readcount = lambda m: m.total_read_count\n # go over all mutants in self; need to convert the iterator to a list to make a separate copy, \n # otherwise we'd be modifying the iterator while iterating through it, which isn't allowed.\n for mutant in list(self):\n if get_readcount(other_dataset.get_mutant(mutant.IB)) < readcount_min:\n self.remove_mutant(mutant.IB)\n # TODO really I shouldn't be removing mutants outright, just noting them as removed or something... In that case should they or should they not show up in \"for m in self\"? Probably not - they should have a separate dictionary?\n # TODO should I keep track of removed reads, and print in summary? PROBABLY.\n # LATER-TODO unit-test - it does have run-tests though.",
"def has_remove_permissions(self, obj):\n return True",
"def remove_mutants_in_other_dataset(self, other_dataset, readcount_min=1, perfect_reads=False):\n # TODO do I want this to be based on non-exact position equality instead?\n if perfect_reads: get_readcount = lambda m: m.perfect_read_count\n else: get_readcount = lambda m: m.total_read_count\n # go over all mutants in self; need to convert the iterator to a list to make a separate copy, \n # otherwise we'd be modifying the iterator while iterating through it, which isn't allowed.\n for mutant in list(self):\n if get_readcount(other_dataset.get_mutant(mutant.IB)) >= readcount_min:\n self.remove_mutant(mutant.IB)\n # TODO really I shouldn't be removing mutants outright, just noting them as removed or something... In that case should they or should they not show up in \"for m in self\"? Probably not - they should have a separate dictionary?\n # TODO should I keep track of removed reads, and print in summary? PROBABLY.\n # LATER-TODO unit-test - it does have run-tests though.",
"def test_remove_learner_specific_for_coach_pt2(self):\n self.assertFalse(self.coach1.has_perm(self.AUTH_REMOVE_LEARNER, self.learner_groups[1]))",
"def test_remove_coach_specific_for_coach_pt1(self):\n self.assertTrue(self.coach2.has_perm(self.AUTH_REMOVE_COACH, self.classrooms[1]))",
"def unfriend(self, remove):\n remover_friends_list = self # person terminating the friendship \n \n # remove friend from remover friend list\n remover_friends_list.remove_friend(removee)\n\n #remove friend from removee friend list\n friends_list = FriendList.objects.get(user=removee)\n friend_list.remove_friend(self.user)",
"def test_realm_admin_remove_others_from_subbed_private_stream(self) -> None:\n result = self.attempt_unsubscribe_of_principal(\n query_count=17,\n target_users=[self.example_user(\"cordelia\")],\n is_realm_admin=True,\n is_subbed=True,\n invite_only=True,\n target_users_subbed=True,\n )\n json = self.assert_json_success(result)\n self.assert_length(json[\"removed\"], 1)\n self.assert_length(json[\"not_removed\"], 0)",
"async def massremove(\n self,\n ctx,\n role: discord.Role,\n member: commands.Greedy[discord.Member],\n ):\n\n role = discord.utils.get(ctx.guild.roles, id=role.id)\n\n muted_role = discord.utils.get(ctx.guild.roles, name=\"Muted\")\n punished_role = discord.utils.get(ctx.guild.roles, name=\"Punished\")\n\n if role > ctx.author.top_role:\n return await ctx.send(\n embed=discord.Embed(\n title=\"You don't have permission to remove this role\",\n timestamp=datetime.datetime.utcnow(),\n colour=discord.Colour.darker_grey(),\n )\n )\n\n if role == muted_role or role == punished_role:\n return await ctx.send(\n embed=discord.Embed(\n title=f\"Can not remove *{role}* role using this command.\",\n description=\"For more information run ```.help massremove```\",\n timestamp=datetime.datetime.utcnow(),\n colour=discord.Colour.red(),\n )\n )\n\n for i in member:\n if role not in i.roles:\n await ctx.channel.send(\n embed=discord.Embed(\n title=f\"*{i}* doesn't have *{role}* Role!\",\n timestamp=datetime.datetime.utcnow(),\n colour=discord.Colour.greyple(),\n )\n )\n\n await i.remove_roles(role)\n\n await ctx.send(\n embed=discord.Embed(\n title=f\"*{role}* has been removed from **{len(member)}** members!\",\n timestamp=datetime.datetime.utcnow(),\n colour=discord.Colour.green(),\n )\n )",
"def remove_member(self, persona):\n if persona in self.members:\n self.members.remove(persona)",
"def test_remove_classroom_specific_for_coach_pt2(self):\n self.assertFalse(self.coach1.has_perm('auth.remove_classroom', self.classrooms[1]))",
"def test_realm_admin_remove_others_from_unsubbed_private_stream(self) -> None:\n result = self.attempt_unsubscribe_of_principal(\n query_count=17,\n target_users=[self.example_user(\"cordelia\")],\n is_realm_admin=True,\n is_subbed=False,\n invite_only=True,\n target_users_subbed=True,\n other_sub_users=[self.example_user(\"othello\")],\n )\n json = self.assert_json_success(result)\n self.assert_length(json[\"removed\"], 1)\n self.assert_length(json[\"not_removed\"], 0)",
"def test_cant_remove_other_users_from_stream(self) -> None:\n result = self.attempt_unsubscribe_of_principal(\n query_count=8,\n target_users=[self.example_user(\"cordelia\")],\n is_realm_admin=False,\n is_subbed=True,\n invite_only=False,\n target_users_subbed=True,\n )\n self.assert_json_error(result, \"Insufficient permission\")",
"def test_remove_facility_pt2(self):\n self.assertFalse(self.coach1.has_perm('auth.remove_facility'))",
"def remove_deletions(murim_mutations):\n\n pass",
"def test_remove_learner_group_specific_for_coach_pt2(self):\n self.assertFalse(self.coach1.has_perm('auth.remove_learner_group', self.learner_groups[1]))",
"def test_remove_learner_specific_for_coach_pt1(self):\n self.assertTrue(self.coach2.has_perm(self.AUTH_REMOVE_LEARNER, self.learner_groups[1]))",
"def removeMultiInstance(*args, allChildren: bool=True, b: bool=True, **kwargs)->bool:\n pass",
"def removeEquate(self, name: unicode) -> bool:\n ...",
"async def unmute(self, ctx, member: discord.Member):\n for channel in ctx.guild.text_channels:\n permissions = channel.permissions_for(member)\n\n if permissions.read_messages:\n # This removes the PermissionOverwrite on the channel, it\n # does not grant send_messages=True\n await channel.set_permissions(member, overwrite=None)",
"def remove_chromosome(mutated_genome):\n index = random.randint(0,max(0,len(mutated_genome)-1))\n del mutated_genome[index]",
"def remove():",
"def test_handle_remove_not_admin(self):\n test_user = User(\"userid\")\n team = Team(\"BRS\", \"brs\", \"web\")\n self.db.retrieve.return_value = test_user\n self.db.query.return_value = [team]\n with self.app.app_context():\n self.assertTupleEqual(self.testcommand.handle(\"team remove\"\n \" brs ID\", user),\n (self.testcommand.permission_error, 200))\n self.db.store.assert_not_called()\n self.gh.remove_team_member.assert_not_called()",
"def removeReactant(self, *args):\n return _libsbml.Reaction_removeReactant(self, *args)"
] |
[
"0.60149795",
"0.59993804",
"0.5992167",
"0.59284204",
"0.5794964",
"0.5737003",
"0.5682585",
"0.56353986",
"0.5630366",
"0.5546742",
"0.55403596",
"0.5524952",
"0.54973835",
"0.54950535",
"0.5481185",
"0.54774433",
"0.54470277",
"0.543319",
"0.5393304",
"0.5376256",
"0.5375506",
"0.5372235",
"0.53628325",
"0.53154707",
"0.5295667",
"0.52849877",
"0.5282824",
"0.5274945",
"0.52744204",
"0.52742577"
] |
0.79588664
|
0
|
Return the mutant with given IB. If mutant doesn't exist, create a new one with no position/reads/sequences.
|
def get_mutant(self, IB):
return self._mutants_by_IB[IB]
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def add_mutant(self, mutant, overwrite=False):\n if mutant.IB in self._mutants_by_IB.keys() and not overwrite:\n raise MutantError(\"Can't add mutant that would overwrite previous mutant with same IB! \"\n +\"Pass overwrite=True argument if you want to overwrite.\")\n self._mutants_by_IB[mutant.IB] = mutant",
"def remove_mutant(self, mutant_or_IB):\n try: IB = mutant_or_IB.IB\n except AttributeError: IB = mutant_or_IB\n del self._mutants_by_IB[IB]",
"def __init__(self, IB=None, insertion_position=SPECIAL_POSITIONS.unknown):\n # \"standard\" mutants need general attributes and readcount-related attributes\n self._set_general_attributes(IB, insertion_position)\n self._set_readcount_related_data_to_zero()",
"def __getitem__(self, uuid: UUID) -> Mutant:\n return self.__mutants[uuid]",
"def _mutant(self, idx, F):\r\n # Generate random indices\r\n r = torch.randint(self.pop_size, (3,))\r\n # Re-generate if it contains candidate index\r\n while r[1] == r[0] or r[2] == r[0] or r[2] == r[1] or (idx in r):\r\n r = torch.randint(0, self.pop_size, (3,))\r\n \r\n\r\n # Compute mutant\r\n mutant = self.population[r[0]] + \\\r\n self.k_val * (self.population[self.best] - self.population[r[0]]) + \\\r\n F * (self.population[r[2]] - self.population[r[1]])\r\n #mutant = mutant.to(self.device)\r\n # Crossover\r\n probs = torch.rand(mutant.shape[0],device = self.device)\r\n \r\n return torch.where(probs >= self.cross_prob,self.population[idx],mutant)",
"def mutation(self, i: int) -> Character:\n chance = random.uniform(0, 1)\n if chance <= self.mutation_rate:\n return self.mutate_candidate(i)\n else:\n return self.characters[i]",
"def mutation(self, ind):\n if random.random() > self.mutationRate:\n return\n indexA = random.randint(0, self.genSize-1)\n indexB = random.randint(0, self.genSize-1)\n\n tmp = ind.genes[indexA]\n ind.genes[indexA] = ind.genes[indexB]\n ind.genes[indexB] = tmp\n\n ind.computeFitness()\n self.updateBest(ind)",
"def get_mutated_sequence(focus_seq, mutant, start_idx=1, AA_vocab=AA_vocab):\n mutated_seq = list(focus_seq)\n for mutation in mutant.split(\":\"):\n try:\n from_AA, position, to_AA = mutation[0], int(mutation[1:-1]), mutation[-1]\n except:\n print(\"Issue with mutant: \"+str(mutation))\n relative_position = position - start_idx\n assert (from_AA==focus_seq[relative_position]), \"Invalid from_AA or mutant position: \"+str(mutation)+\" from_AA: \"+str(from_AA) + \" relative pos: \"+str(relative_position) + \" focus_seq: \"+str(focus_seq)\n assert (to_AA in AA_vocab) , \"Mutant to_AA is invalid: \"+str(mutation)\n mutated_seq[relative_position] = to_AA\n return \"\".join(mutated_seq)",
"def interm_to_smt2_ib(ib, env):\n assert isinstance(ib, InstrBlock)\n smt_tr = SmtlibTranslator(env.show_comments, env.assignments_as_lets)\n program_constr = smt_tr.produce_constr_lists(ib)[0]\n # CODE_SMT = utils.conjunct_constrs_smt2(program_constr)\n # CODE_SMT = smt_tr.produce_text(ib)\n return ProgramSmt2(program_constr, smt_tr.let_declarations)",
"def test_mut_replace_terminal_none_available(self):\n ind = self.individuals[self.ind_strings[0]]\n ind_clone = self.gama._toolbox.clone(ind)\n\n with self.assertRaises(ValueError) as error:\n mut_replace_terminal(ind_clone, self.gama._pset)\n\n self.assertTrue(\"Individual could not be mutated\" in str(error.exception))",
"def make_mutant(self,\n pose: pyrosetta.Pose,\n mutation: str,\n chain='A',\n distance: int = 10,\n cycles: int = 5\n ) -> pyrosetta.Pose:\n if pose is None:\n mutant = self.pose.clone()\n else:\n mutant = pose.clone()\n pose2pdb = pose.pdb_info().pdb2pose\n rex = re.match('(\\w)(\\d+)(\\w)', mutation)\n r = pose2pdb(res=int(rex.group(2)), chain=chain)\n rn = pose.residue(r).name1()\n if self.strict_about_starting_residue:\n assert rn == rex.group(1), f'residue {r}(pose)/{rex.group(2)}(pdb) is a {rn}, not a {rex.group()}'\n MutateResidue = pyrosetta.rosetta.protocols.simple_moves.MutateResidue\n MutateResidue(target=r, new_res=self._name3[rex.group(3)]).apply(mutant)\n self.relax_around_mover(mutant, int(rex.group(2)), chain, distance=distance, cycles=cycles,\n own_chain_only=False)\n return mutant",
"def __contains__(self, IB):\n return IB in self._mutants_by_IB",
"def __imatmul__(self, B):\n if isinstance(B, dict):\n if B['leg'] not in self.indexes:\n raise ValueError('Leg of singular values not an indexes '\n 'of self')\n\n if B['symmetries'] != self.symmetries:\n raise ValueError('Not same symmetries')\n\n x, y = self.coupling_id(B['leg'])\n for k in self:\n newshape = [1] * len(self[k].shape)\n newshape[self.indexes.index(B['leg'])] = -1\n self[k] *= B[k[x][y]].reshape(newshape)\n return self\n\n connections = self.connections(B)\n if not connections:\n raise ValueError(f'No connections found between {self} and {B}')\n\n return self.contract(B, (list(connections),) * 2).simplify()",
"def get_mutated_id(arb_id, arb_id_bitmap):\n for i in range(MAX_ID_LENGTH - len(arb_id_bitmap)):\n arb_id_bitmap.append(True)\n\n old_arb_id = \"0\" * (MAX_ID_LENGTH - len(arb_id)) + arb_id\n new_arb_id = \"\"\n\n for i in range(len(arb_id_bitmap)):\n if arb_id_bitmap[i] and i == 0:\n new_arb_id += random.choice(LEAD_ID_CHARACTERS)\n elif arb_id_bitmap[i]:\n new_arb_id += random.choice(CHARACTERS)\n else:\n new_arb_id += old_arb_id[i]\n\n for j in range(MAX_ID_LENGTH - len(arb_id_bitmap)):\n new_arb_id += old_arb_id[len(arb_id_bitmap) + j]\n return new_arb_id",
"def _mutual_friends_motif(self) -> DataFrame:\n\n return self._graph.find(\"(a)-[]->(b); (a)-[]->(c)\").select(\n col(\"a.id\").alias(\"mutual_friend\"),\n col(\"b.id\").alias(\"character_1\"),\n col(\"c.id\").alias(\"character_2\"),\n )",
"def give_single_dataset_mutant(self, single_dataset_name, force=False):\n if single_dataset_name not in self.by_dataset.keys() and not force:\n raise MutantError(\"This mutant doesn't have a %s dataset! \"%single_dataset_name\n +\"Use force=True argument if you want a zero-readcount mutant returned anyway.\")\n # generate new mutant, fill it with readcount-related data from self.by_dataset[single_dataset_name] \n # and general data from self\n new_mutant = Insertional_mutant()\n new_mutant._copy_non_readcount_data(self)\n new_mutant._copy_readcount_related_data(self.by_dataset[single_dataset_name])\n return new_mutant",
"def __call__(self, mutant, rng):\n raise NotImplementedError",
"def mutate_candidate(self, i: int) -> Character:\n candidate = self.characters[i]\n # candidate.dna_generator.dna.print_dna()\n mutation = random.randint(0, 8)\n # mutation = 0\n if mutation == 0:\n candidate.dna_generator.mutate_ability_scores()\n elif mutation == 1:\n candidate.dna_generator.mutate_race()\n elif mutation == 2:\n candidate.dna_generator.mutate_proficiencies()\n elif mutation == 3:\n candidate.dna_generator.mutate_background()\n elif mutation == 4:\n candidate.dna_generator.mutate_fighting_style()\n elif mutation == 5:\n candidate.dna_generator.mutate_armor()\n elif mutation == 6:\n candidate.dna_generator.mutate_weapon()\n elif mutation == 7:\n candidate.dna_generator.mutate_shield()\n elif mutation == 8:\n candidate.dna_generator.mutate_second_weapon()\n return candidate",
"def mut(base,det=False):\n\n bases = ('A','T','C','G')\n base = base.upper()\n if base not in bases:\n raise ValueError(\"base passed to mut(): \" + str(base) + \" not one of (A,T,C,G)\")\n\n if det:\n if base == 'A':\n return 'T'\n elif base == 'T':\n return 'A'\n elif base == 'G':\n return 'C'\n elif base == 'C':\n return 'G'\n\n else:\n mut = base\n while mut == base:\n mut = bases[int(random.uniform(0,4))]\n return mut",
"def bijector(self):\n return self._bijector",
"def __matmul__(self, B):\n if isinstance(B, dict):\n X = self.metacopy()\n\n if B['leg'] not in X.indexes:\n raise ValueError('Leg of singular values not an indexes '\n 'of self')\n\n if B['symmetries'] != X.symmetries:\n raise ValueError('Not same symmetries')\n\n x, y = X.coupling_id(B['leg'])\n for k in self:\n newshape = [1] * len(self[k].shape)\n newshape[X.indexes.index(B['leg'])] = -1\n X[k] = self[k] * B[k[x][y]].reshape(newshape)\n\n return X\n\n connections = self.connections(B)\n if not connections:\n raise ValueError(f'No connections found between {self} and {B}')\n\n return self.contract(B, (list(connections),) * 2).simplify()",
"def generate_mutString(s):\r\n test_seq = Seq.MutableSeq(str(template))\r\n #Introduce the mutation in a test string\r\n for mut in s.split():\r\n pos = int(mut[1:-1]) - 1 #Numbering from 0 in strings \r\n old = mut[0]\r\n new = mut[-1]\r\n if old == test_seq[pos]:\r\n test_seq[pos] = new \r\n else:\r\n print('Initial mutation didnt match')\r\n \r\n return test_seq",
"def set_mi_in_body_frame_optimized(pa):\n # no of bodies\n nb = pa.nb[0]\n # loop over all the bodies\n for i in range(nb):\n fltr = np.where(pa.body_id == i)[0]\n cm_i = pa.cm[3 * i:3 * i + 3]\n\n I = np.zeros(9)\n for j in fltr:\n # Ixx\n I[0] += pa.m[j] * (\n (pa.y[j] - cm_i[1])**2. + (pa.z[j] - cm_i[2])**2.)\n\n # Iyy\n I[4] += pa.m[j] * (\n (pa.x[j] - cm_i[0])**2. + (pa.z[j] - cm_i[2])**2.)\n\n # Izz\n I[8] += pa.m[j] * (\n (pa.x[j] - cm_i[0])**2. + (pa.y[j] - cm_i[1])**2.)\n\n # Ixy\n I[1] -= pa.m[j] * (pa.x[j] - cm_i[0]) * (pa.y[j] - cm_i[1])\n\n # Ixz\n I[2] -= pa.m[j] * (pa.x[j] - cm_i[0]) * (pa.z[j] - cm_i[2])\n\n # Iyz\n I[5] -= pa.m[j] * (pa.y[j] - cm_i[1]) * (pa.z[j] - cm_i[2])\n\n I[3] = I[1]\n I[6] = I[2]\n I[7] = I[5]\n # find the eigen vectors and eigen values of the moi\n vals, R = np.linalg.eigh(I.reshape(3, 3))\n # find the determinant of R\n determinant = np.linalg.det(R)\n if determinant == -1.:\n R[:, 0] = -R[:, 0]\n\n # recompute the moment of inertia about the new coordinate frame\n # if flipping of one of the axis due the determinant value\n R = R.ravel()\n\n if determinant == -1.:\n I = np.zeros(9)\n for j in fltr:\n dx = pa.x[j] - cm_i[0]\n dy = pa.y[j] - cm_i[1]\n dz = pa.z[j] - cm_i[2]\n\n dx0 = (R[0] * dx + R[3] * dy + R[6] * dz)\n dy0 = (R[1] * dx + R[4] * dy + R[7] * dz)\n dz0 = (R[2] * dx + R[5] * dy + R[8] * dz)\n\n # Ixx\n I[0] += pa.m[j] * (\n (dy0)**2. + (dz0)**2.)\n\n # Iyy\n I[4] += pa.m[j] * (\n (dx0)**2. + (dz0)**2.)\n\n # Izz\n I[8] += pa.m[j] * (\n (dx0)**2. + (dy0)**2.)\n\n # Ixy\n I[1] -= pa.m[j] * (dx0) * (dy0)\n\n # Ixz\n I[2] -= pa.m[j] * (dx0) * (dz0)\n\n # Iyz\n I[5] -= pa.m[j] * (dy0) * (dz0)\n\n I[3] = I[1]\n I[6] = I[2]\n I[7] = I[5]\n\n # set the inverse inertia values\n vals = np.array([I[0], I[4], I[8]])\n\n pa.mibp[3 * i:3 * i + 3] = 1. / vals\n\n # get the quaternion from the rotation matrix\n r = Rot.from_dcm(R.reshape(3, 3))\n q_tmp = r.as_quat()\n q = np.zeros(4)\n q[0] = q_tmp[3]\n q[1] = q_tmp[0]\n q[2] = q_tmp[1]\n q[3] = q_tmp[2]\n\n normalize_q_orientation(q)\n pa.q[4 * i:4 * i + 4] = q\n\n # also set the rotation matrix\n pa.R[9 * i:9 * i + 9] = R",
"def launch(self):\n out_log, err_log = fu.get_logs(path=self.path, mutation=self.mut, step=self.step)\n if self.mutation is not None:\n # Read structure with Biopython\n parser = PDBParser(PERMISSIVE=1)\n st = parser.get_structure('s', self.input_pdb_path) # s random id never used\n\n # Remove the side chain of the AA to be mutated\n if self.mutation['chain']!='*':\n chains = [self.mutation['chain']]\n else:\n chains = [chain.id for chain in st[0]]\n\n resnum = int(self.mutation['resnum'])\n\n for chain in chains:\n residue = st[0][chain][(' ', resnum, ' ')]\n backbone_atoms = ['N', 'CA', 'C', 'O', 'CB']\n not_backbone_atoms = []\n\n # The following formula does not work. Biopython bug?\n # for atom in residue:\n # if atom.id not in backbone_atoms:\n # residue.detach_child(atom.id)\n\n for atom in residue:\n if atom.id not in backbone_atoms:\n not_backbone_atoms.append(atom)\n for atom in not_backbone_atoms:\n residue.detach_child(atom.id)\n\n # Change residue name\n residue.resname = self.mutation['mt'].upper()\n\n # Write resultant structure\n w = PDBIO()\n w.set_structure(st)\n prepared_file_path = self.mut+self.step+'prepared.pdb'\n w.save(prepared_file_path)\n else:\n prepared_file_path = self.input_pdb_path\n\n scrwl = 'Scwrl4' if self.scwrl4_path is None else self.scwrl4_path\n cmd = [scrwl, '-i', prepared_file_path, '-o', self.output_pdb_path]\n\n command = cmd_wrapper.CmdWrapper(cmd, out_log, err_log)\n return command.launch()",
"def mute2(individual):\n mutatePt=random.randint(0,len(individual)-1)\n if mutatePt==0:\n individual[mutatePt]=random.uniform(0.0, 0.02)\n elif mutatePt>=2 and mutatePt<=4:\n individual[mutatePt]=random.uniform(0.0, 0.005)\n elif mutatePt==5:\n individual[mutatePt]=random.uniform(0.0, 0.07)\n \n return individual,",
"def test_mut_replace_terminal(self):\n ind1 = self.individuals[self.ind_strings[1]]\n self._test_mutation(ind1, mut_replace_terminal, self._mut_replace_terminal_is_applied)",
"def get_index(uid, i):\n return _SHARED_SEQUENCES[uid][i]",
"def mute(self, factor):\n assert isinstance(factor, (float, int)), \\\n \"'factor' has to be a number. Not a %s.\" \\\n % type(factor).__name__\n assert 0 <= factor <= 1, \"The factor must be between 0 and 1.\"\n\n new_gene = self.copy()\n mutation_tensor = np.random.uniform(0, 1, size=new_gene.bias.shape) < factor\n new_gene.bias += mutation_tensor * np.random.normal(0, 1, size=new_gene.bias.shape)\n new_gene.bias = np.where( 1<new_gene.bias, 1, new_gene.bias)\n new_gene.bias = np.where( 0>new_gene.bias, 0, new_gene.bias)\n # new_gene.bias = np.where(max(0, min(1, new_gene.bias)))\n return new_gene",
"def generate(self, snapshot: Bug, mutations: List[Mutation]) -> Mutant:\n logger.info(\"generating mutant of snapshot '%s' by applying mutations: %s\", # noqa: pycodestyle\n snapshot.name,\n ', '.join([repr(m) for m in mutations]))\n bz = self.__bugzoo\n assert len(mutations) <= 1, \\\n \"higher-order mutation is currently unsupported\"\n\n # NOTE this is *incredibly* unlikely to conflict\n logging.debug(\"generating UUID for mutant...\")\n uuid = uuid4()\n logger.debug(\"generated UUID for mutant: %s\", uuid.hex)\n try:\n assert uuid not in self.__mutants, \"UUID already in use.\"\n except AssertionError:\n logger.exception(\"automatically generated UUID is already in use: %s\", # noqa: pycodestyle\n uuid)\n raise\n logger.debug(\"constructing mutation description...\")\n mutant = Mutant(uuid, snapshot.name, mutations)\n logger.debug(\"constructed mutant description: %s\", mutant)\n\n # generate a diff for the mutant\n logger.debug(\"generating a unified diff for mutant\")\n diff = \\\n self.__sources.mutations_to_diff(snapshot, list(mutant.mutations))\n logger.debug(\"generated unified diff for mutant\")\n\n # generate the Docker image on the BugZoo server\n logger.debug(\"provisioning container to persist mutant as a snapshot\")\n container = bz.containers.provision(snapshot)\n logger.debug(\"provisioned container [%s] for mutant [%s].\",\n container.uid, mutant.uuid.hex)\n try:\n logger.debug(\"applying mutation patch to original source code.\")\n bz.containers.patch(container, diff)\n logger.debug(\"applied mutation patch to original source code.\")\n try:\n logger.debug(\"attempting to build source code for mutant.\")\n outcome = bz.containers.build(container)\n logger.debug(\"built source code for mutant.\")\n except BugZooException:\n raise BuildFailure\n bz.containers.persist(container, mutant.docker_image)\n logger.debug(\"persisted mutant to Docker image\")\n finally:\n del bz.containers[container.uid]\n logger.debug(\"destroyed temporary container [%s] for mutant [%s].\",\n container.uid, mutant.uuid.hex)\n\n # build and register a BugZoo snapshot\n instructions_coverage = snapshot.instructions_coverage\n snapshot_mutated = Bug(name=mutant.snapshot,\n image=mutant.docker_image,\n program=snapshot.program,\n dataset=None,\n source=None,\n source_dir=snapshot.source_dir,\n languages=snapshot.languages,\n compiler=snapshot.compiler,\n tests=snapshot.tests,\n instructions_coverage=instructions_coverage)\n logger.debug(\"Registering snapshot for mutant with BugZoo: %s\",\n mutant.uuid.hex)\n bz.bugs.register(snapshot_mutated)\n logger.debug(\"Registered snapshot for mutant with BugZoo: %s\",\n mutant.uuid.hex)\n\n # track the mutant\n logger.debug(\"Registering mutant with UUID '%s': %s\",\n mutant.uuid.hex,\n mutant)\n self.__mutants[mutant.uuid] = mutant\n logger.debug(\"Registered mutant with UUID '%s'\", mutant.uuid.hex)\n logger.info(\"Generated mutant: %s\", mutant)\n return mutant",
"def dual(self, I=None) -> 'MultiVector':\n if I is None:\n return self.layout.MultiVector(value=self.layout.dual_func(self.value))\n else:\n Iinv = I.inv()\n\n return self * Iinv"
] |
[
"0.6065721",
"0.5824929",
"0.57916296",
"0.5720389",
"0.55480236",
"0.50881934",
"0.47943345",
"0.4750953",
"0.46834594",
"0.4654471",
"0.46411258",
"0.45736966",
"0.45549503",
"0.45424083",
"0.44928995",
"0.44865704",
"0.44850048",
"0.44817802",
"0.44811586",
"0.44272077",
"0.4387678",
"0.43837744",
"0.438113",
"0.43796295",
"0.43627444",
"0.43521196",
"0.4345073",
"0.43445632",
"0.43223146",
"0.43095514"
] |
0.7926696
|
0
|
Check if dataset contains mutant with given IB. You can only check "IB in dataset" at present, not "mutant in dataset", since the latter would probably also just check by IB, so the syntax would be misleading.
|
def __contains__(self, IB):
return IB in self._mutants_by_IB
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def is_improper(i0, i1, i2, i3, bond_set):\n if len(set([i0, i1, i2, i3])) == 4:\n if not ((i0, i1) in bond_set and (i1, i2) in bond_set and (i2, i3) in bond_set):\n return True\n return False",
"def is_in(batch, data):\n _id = batch[-1]\n for d in data:\n if d[-1] == _id:\n return True\n return False",
"def is_proper(i0, i1, i2, i3, bond_set):\n if (i0, i1) in bond_set and (i1, i2) in bond_set and (i2, i3) in bond_set and len(set([i0, i1, i2, i3])) == 4:\n return True\n return False",
"def has(self, item):\n return item in self.mut",
"def get_mutant(self, IB):\n return self._mutants_by_IB[IB]",
"def _contains(self, element):\n if not isinstance(element, Tuple) or len(element) != 2:\n return S.false\n\n if not element[1].is_Integer:\n return S.false\n\n if element[1] >= len(self.sets) or element[1] < 0:\n return S.false\n\n return self.sets[element[1]]._contains(element[0])",
"def __contains__(self, item):\n return item in self.default_dataset",
"def _dataset_match(geno, dataset):\n return all(dataset[k] == v for (k, v) in _dataset_fields(geno).items())",
"def __contains__(self, i):\n if not isinstance(i, Permutation):\n raise TypeError(\"A SymmetricPermutationGroup contains only Permutations as \"\n \"elements, not elements of type %s\" % type(i))\n return i.size == self.degree",
"def __contains__(self, item):\n return item in self._data",
"def _valid_sbu_combination(self, incidence, sbu_set):\n if incidence is None:\n return len([i for i in sbu_set if i.is_metal]) == \\\n self.options.metal_sbu_per_structure\n else:\n if set(sorted([i.degree for i in sbu_set])) == set(sorted(incidence)):\n return len([i for i in sbu_set if i.is_metal]) == \\\n self.options.metal_sbu_per_structure\n else:\n return False",
"def checker(self, product):\n for item in self.instock:\n if item == product:\n return True\n return False",
"def is_apriori(Ck_item, Lksub1):\n for item in Ck_item:\n sub_Ck = Ck_item - frozenset([item])\n if sub_Ck not in Lksub1:\n return False\n return True",
"def is_valid_arg(self, muts, arg):\n for mut in muts:\n if arg in mut.args():\n return True\n\n return False",
"def __contains__(self, i):\n if not isinstance(i, Permutation):\n raise TypeError(\"A PermutationGroup contains only Permutations as \"\n \"elements, not elements of type %s\" % type(i))\n return self.contains(i)",
"def is_apriori(Ck_item, Lksub1):\r\n for item in Ck_item:\r\n sub_Ck = Ck_item - frozenset([item])\r\n if sub_Ck not in Lksub1:\r\n return False\r\n return True",
"def __contains__(self, idx):\n return idx in self._data",
"def __contains__(self, i):\n for j in self:\n if j == i:\n return True\n return False",
"def __contains__(self, item):\r\n if isinstance(item, six.string_types):\r\n return item in self.iternames()\r\n else:\r\n # let's assume we were given a column\r\n return item in self.iterall()",
"def __contains__(self, i):\n if not isinstance(i, FreeGroupElement):\n return False\n group = i.group\n return self == group",
"def __inStructure(self, umls_cui):\n return (umls_cui in self.structure)",
"def checkInBinaryPairAttr(_session, _end, _attr, _const):\n res = searchBinPairsAttrToNode(_session, _end, _attr, _const)\n return len(res) > 0",
"def __contains__(self, i):\n return i in self._ar",
"def is_in_bag(self, item):\n return item in self._bag",
"def contains(self, bag: \"Bag\") -> bool:\n\n for sub_bag_specifier in self.containing_bags:\n sub_bag = bag_registry[sub_bag_specifier[1]]\n if sub_bag == bag:\n return True\n if sub_bag.contains(bag):\n return True\n return False",
"def check_completeness(ISM):\n for item in ISM:\n if item not in ['A', 'T', 'C', 'G', '-']:\n return False\n return True",
"def exists_dataset(self, dataset):\n assert dataset, \"Must input a valid dataset name.\"\n return any(self.get_by_dataset(dataset))",
"def is_same_set(self, item1, item2):\n res = False\n for s in self._data:\n if item1 in s and item2 in s:\n res = True\n break\n return res",
"def _check_attribute_in_list(self, check_attribs, component_attribs):\n getattr = attrgetter('attribute_id')\n for key, group in groupby(component_attribs, getattr):\n if set(check_attribs).intersection([x.id for x in group]):\n return True\n return False",
"def __contains__(self, rq):\n return rq in self._data"
] |
[
"0.59588236",
"0.5774592",
"0.57421535",
"0.56637824",
"0.5647611",
"0.56438285",
"0.55813533",
"0.55634737",
"0.55409455",
"0.5475241",
"0.54204434",
"0.5393025",
"0.5378124",
"0.5369297",
"0.5363346",
"0.5348287",
"0.5324083",
"0.52803415",
"0.5260735",
"0.52119124",
"0.5175179",
"0.51732767",
"0.51656973",
"0.5158864",
"0.5157157",
"0.515568",
"0.5119542",
"0.510329",
"0.50946003",
"0.5076449"
] |
0.7593823
|
0
|
grab the next alignment object from the generator until the name matches ref_name.
|
def _next_until_name_match(generator, ref_name):
curr_name = ''
while not curr_name == ref_name:
curr_aln = generator.next()
curr_name = curr_aln.read.name.split()[0]
return curr_aln
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def __next__(self):\n try:\n stream = self._stream\n except AttributeError:\n raise StopIteration from None\n alignment = self._read_next_alignment(stream)\n if alignment is None:\n raise StopIteration\n return alignment",
"def __next__(self):\n handle = self.handle\n\n if self._header is None:\n line = handle.readline()\n else:\n # Header we saved from when we were parsing\n # the previous alignment.\n line = self._header\n self._header = None\n\n if not line:\n raise StopIteration\n\n while line.rstrip() != \"#=======================================\":\n line = handle.readline()\n if not line:\n raise StopIteration\n\n length_of_seqs = None\n number_of_seqs = None\n ids = []\n header_dict = {}\n\n while line[0] == \"#\":\n # Read in the rest of this alignment header,\n # try and discover the number of records expected\n # and their length\n parts = line[1:].split(\":\", 1)\n key = parts[0].lower().strip()\n if key == \"aligned_sequences\":\n number_of_seqs = int(parts[1].strip())\n assert len(ids) == 0\n # Should now expect the record identifiers...\n for i in range(number_of_seqs):\n line = handle.readline()\n parts = line[1:].strip().split(\":\", 1)\n assert i + 1 == int(parts[0].strip())\n ids.append(parts[1].strip())\n assert len(ids) == number_of_seqs\n if key == \"length\":\n length_of_seqs = int(parts[1].strip())\n\n # Parse the rest of the header\n if key == \"identity\":\n header_dict[\"identity\"] = int(parts[1].strip().split(\"/\")[0])\n if key == \"similarity\":\n header_dict[\"similarity\"] = int(parts[1].strip().split(\"/\")[0])\n if key == \"gaps\":\n header_dict[\"gaps\"] = int(parts[1].strip().split(\"/\")[0])\n if key == \"score\":\n header_dict[\"score\"] = float(parts[1].strip())\n\n # And read in another line...\n line = handle.readline()\n\n if number_of_seqs is None:\n raise ValueError(\"Number of sequences missing!\")\n if length_of_seqs is None:\n raise ValueError(\"Length of sequences missing!\")\n\n if (\n self.records_per_alignment is not None\n and self.records_per_alignment != number_of_seqs\n ):\n raise ValueError(\n \"Found %i records in this alignment, told to expect %i\"\n % (number_of_seqs, self.records_per_alignment)\n )\n\n seqs = [\"\" for id in ids]\n seq_starts = []\n index = 0\n\n # Parse the seqs\n while line:\n if len(line) > 21:\n id_start = line[:21].strip().split(None, 1)\n seq_end = line[21:].strip().split(None, 1)\n if len(id_start) == 2 and len(seq_end) == 2:\n # identifier, seq start position, seq, seq end position\n # (an aligned seq is broken up into multiple lines)\n id, start = id_start\n seq, end = seq_end\n if start >= end:\n # Special case, either a single letter is present,\n # or no letters at all.\n if seq.replace(\"-\", \"\") == \"\":\n start = int(start)\n end = int(end)\n else:\n start = int(start) - 1\n end = int(end)\n else:\n assert seq.replace(\"-\", \"\") != \"\", repr(line)\n start = int(start) - 1 # python counting\n end = int(end)\n\n if index < 0 or index >= number_of_seqs:\n raise ValueError(\n \"Expected index %i in range [0,%i)\"\n % (index, number_of_seqs)\n )\n # The identifier is truncated...\n assert id == ids[index] or id == ids[index][: len(id)]\n\n if len(seq_starts) == index:\n # Record the start\n seq_starts.append(start)\n\n # Check the start...\n if start >= end:\n assert seq.replace(\"-\", \"\") == \"\", line\n elif start - seq_starts[index] != len(seqs[index].replace(\"-\", \"\")):\n raise ValueError(\n \"Found %i chars so far for sequence %i (%s, %r), line says start %i:\\n%s\"\n % (\n len(seqs[index].replace(\"-\", \"\")),\n index,\n id,\n seqs[index],\n start,\n line,\n )\n )\n seqs[index] += seq\n\n # Check the end ...\n if end != seq_starts[index] + len(seqs[index].replace(\"-\", \"\")):\n raise ValueError(\n \"Found %i chars so far for sequence %i (%s, %r, start=%i), file says end %i:\\n%s\"\n % (\n len(seqs[index].replace(\"-\", \"\")),\n index,\n id,\n seqs[index],\n seq_starts[index],\n end,\n line,\n )\n )\n\n index += 1\n if index >= number_of_seqs:\n index = 0\n else:\n # just a start value, this is just alignment annotation (?)\n # print \"Skipping: \" + line.rstrip()\n pass\n elif line.strip() == \"\":\n # Just a spacer?\n pass\n else:\n raise ValueError(\"Unrecognised EMBOSS pairwise line: %r\\n\" % line)\n\n line = handle.readline()\n if (\n line.rstrip() == \"#---------------------------------------\"\n or line.rstrip() == \"#=======================================\"\n ):\n # End of alignment\n self._header = line\n break\n\n assert index == 0\n\n if (\n self.records_per_alignment is not None\n and self.records_per_alignment != len(ids)\n ):\n raise ValueError(\n \"Found %i records in this alignment, told to expect %i\"\n % (len(ids), self.records_per_alignment)\n )\n\n records = []\n for id, seq in zip(ids, seqs):\n if len(seq) != length_of_seqs:\n # EMBOSS 2.9.0 is known to use spaces instead of minus signs\n # for leading gaps, and thus fails to parse. This old version\n # is still used as of Dec 2008 behind the EBI SOAP webservice:\n # http://www.ebi.ac.uk/Tools/webservices/wsdl/WSEmboss.wsdl\n raise ValueError(\n \"Error parsing alignment - sequences of \"\n \"different length? You could be using an \"\n \"old version of EMBOSS.\"\n )\n records.append(SeqRecord(Seq(seq), id=id, description=id))\n return MultipleSeqAlignment(records, annotations=header_dict)",
"def _read_next_alignment(self, stream):",
"def __next__(self):\n\n read = bamnostic.AlignedSegment(self)\n if not read:\n raise StopIteration\n return read",
"def next(self):\n return next(self.gen)",
"def next(self):\n\n read = bamnostic.AlignedSegment(self)\n if not read:\n raise StopIteration\n return read",
"def _assignAlignment(self, aln):\n self.sequence = None\n for i in range(self.nChildren()):\n self.children[i]._assignAlignment(aln)\n for seq in aln.seqs:\n if seq.name == self.label:\n self.sequence = seq\n break",
"def __next__(self):\n self._k += 1\n if self._k < len(self._seq):\n return(self._seq[self._k])\n else:\n # print('*** End of iteration. ***')\n raise StopIteration()",
"def next():",
"def next():",
"def advance(cls, seq_name, first=1):\n doc = cls.objects.coll.find_and_modify(\n {'name':seq_name},\n {'$inc': {'_next': 1}},\n upsert=True,\n new=True)\n return doc['_next']",
"def getNext(self):",
"def fasta_iter_py3(fasta_name):\n rec = None\n for line in open(fasta_name, \"r\"):\n if line[0] == \">\":\n if rec:\n yield rec\n rec = FastaRecord(line.strip()[1:])\n else:\n rec.sequence += line.strip()\n\n if rec:\n yield rec",
"def _get_next_name(self, flow):\n parent_ref = '%s ASSEMBLY' % flow.external_ref\n try:\n self.fg.get(parent_ref)\n except EntityNotFound:\n return parent_ref\n _ac = 0\n while True:\n parent_ref = '%s ASSEMBLY alt %d' % (flow.external_ref, _ac)\n try:\n self.fg.get(parent_ref)\n _ac += 1\n except EntityNotFound:\n return parent_ref",
"def alignment_start(sample_name='alignment'):\n\n smi = SMI_Beamline()\n yield from smi.modeAlignment()\n\n # Set direct beam ROI\n yield from smi.setDirectBeamROI()\n\n sample_id(user_name='test', sample_name=sample_name)\n proposal_id('2023_2', '311564_test')",
"def next(self):\n nextattr = self.iterobj.next()\n return (nextattr.name, self.attrs[nextattr.name])",
"def __next__(self):\n\t\treturn next()",
"def __next__(self):\n return self.next()",
"def __next__(self):\n return self.next()",
"def __next__(self):\n return self.next()",
"def get_next(self, name=None):\n self._get_next_call_count += 1\n if self._get_next_call_count > GET_NEXT_CALL_WARNING_THRESHOLD:\n warnings.warn(GET_NEXT_CALL_WARNING_MESSAGE)\n\n # TODO(b/169442955): Investigate the need for this colocation constraint.\n with ops.colocate_with(self._iterator_resource):\n # pylint: disable=protected-access\n flat_ret = gen_dataset_ops.iterator_get_next(\n self._iterator_resource,\n output_types=self._flat_tensor_types,\n output_shapes=self._flat_tensor_shapes,\n name=name)\n return structure.from_tensor_list(self._element_spec, flat_ret)",
"def lookup_alignment_frag(self, name):\n raise NotImplementedError()",
"def seqFromName(self, name):\n\t\tseqDict = self.sequenceDict()\n\t\treturn seqDict[name]",
"def matchmaker(samfile, semaphore=None):\n #reader = DictReader(samfile)\n labels = ['qname', 'flag', 'rname', 'pos', 'mapq', 'cigar', 'rnext', 'pnext',\n 'tlen', 'seq', 'qual']\n cached_rows = {}\n for line in samfile:\n if line.startswith('@'):\n continue # skip header line\n if 'HCV' not in line:\n continue # skip reads that mapped to another reference\n\n items = line.strip('\\n').split('\\t')\n row = dict(zip(labels, items[:11]))\n qname = row['qname']\n old_row = cached_rows.pop(qname, None)\n if old_row is None:\n cached_rows[qname] = row\n else:\n if semaphore is not None:\n semaphore.acquire()\n # current row should be the second read of the pair\n yield old_row, row",
"def next( self ):\n next(self)",
"def next(self):\r\n pass",
"def next(self):\n pass",
"def next(self):\n pass",
"def next(self):\n pass",
"def next(self):\n pass"
] |
[
"0.63519776",
"0.5905411",
"0.5886675",
"0.5681925",
"0.55885965",
"0.5567959",
"0.5484562",
"0.54833126",
"0.54559267",
"0.54559267",
"0.54290843",
"0.542644",
"0.5404576",
"0.53900135",
"0.5364065",
"0.5349766",
"0.53343314",
"0.533246",
"0.533246",
"0.533246",
"0.53069097",
"0.53029734",
"0.52572596",
"0.52311003",
"0.5229628",
"0.5225165",
"0.52192235",
"0.52192235",
"0.52192235",
"0.52192235"
] |
0.7791878
|
0
|
Parse fastq+sam+sam files in parallel generator yielding (name, seq1, aln2, aln3) tuples. It checks that the read names match (except for the pairedend side or anything else after a space). It assumes that file1 is the reference, and the other two files may have some extra names (will be ignored).
|
def _parse_3files_parallel(cls, file1_fastx, file2_sam, file3_sam):
generator1 = name_seq_generator_from_fasta_fastq(file1_fastx)
generator2 = iter(HTSeq.SAM_Reader(file2_sam))
generator3 = iter(HTSeq.SAM_Reader(file3_sam))
if_finished_1, if_finished_2, if_finished_3 = False, False, False
while True:
try: name1, seq1 = generator1.next()
except StopIteration: if_finished_1, name1 = True, 'NOTHING_HERE'
name1 = name1.split()[0]
try: aln2 = cls._next_until_name_match(generator2, name1)
except StopIteration: if_finished_2 = True
try: aln3 = cls._next_until_name_match(generator3, name1)
except StopIteration: if_finished_3 = True
# if all the files still contained data, yield it
if not any([if_finished_1, if_finished_2, if_finished_3]):
yield (name1, seq1, aln2, aln3)
# if file1 was finished, we're done - it's okay if the other files had some extra reads
elif if_finished_1:
raise StopIteration
# if file1 WASN'T finished but one of the others was, that's a problem!
else:
raise MutantError("Parsing seq/aln files in parallel - inconsistent finished states! "
+"(If finished: %s %s, %s %s, %s %s)"%(file1_fastx, if_finished_1,
file2_sam, if_finished_2, file3_sam, if_finished_3))
# TODO unit-tests! There are some in experiments/arrayed_library/internal_barcode_processing/code/clustering_tools.py for a similar function - test__parse_3seq_parallel
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def fusion(first_fh, fused_fh, compare_file):\r\n # initialize\r\n ha_seq = \"\"\r\n ha_header = \"\"\r\n # parse through file\r\n for line in first_fh:\r\n # if a > is found assume it is header\r\n if line[0] == \">\":\r\n # ha_header = line\r\n # if the header is found (length > 0)\r\n if len(ha_header) > 0:\r\n # pull needed information from header to make new one\r\n matches = re.findall(\"(Strain Name:[AB]\\/[\\/A-Za-z 0-9()\\\\-_'.]+)\", ha_header)\r\n subtype_match = re.findall(\"(Subtype:[A-Za-z0-9]+)\", ha_header)\r\n organ = re.findall(\"(Organism:[\\/A-Za-z 0-9()\\\\-_'.]+)\", ha_header)\r\n ha_header = \">\" + organ[0] + \"|\" + matches[0] + \"|\" + subtype_match[0]\r\n # print(ha_header)\r\n # Call find_match function, input the file to search and the new header created.\r\n na_header, na_seq = find_match(compare_file, ha_header)\r\n # if return is equal then write to new file with two sequences fused\r\n if na_header == ha_header:\r\n write_data_2(fused_fh, ha_header, ha_seq.strip() + \"\\n\" + na_seq.strip())\r\n # reset variables\r\n ha_header = line\r\n ha_seq = \"\"\r\n\r\n else:\r\n # if it is part of the sequence\r\n ha_seq = ha_seq + line\r\n\r\n # To return/write the last entries in the files, won't get written in loop\r\n matches = re.findall(\"(Strain Name:[AB]\\/[\\/A-Za-z 0-9()\\\\-_'.]+)\", ha_header)\r\n subtype_match = re.findall(\"(Subtype:[A-Za-z0-9]+)\", ha_header)\r\n organ = re.findall(\"(Organism:[\\/A-Za-z 0-9()\\\\-_'.]+)\", ha_header)\r\n ha_header = \">\" + organ[0] + \"|\" + matches[0] + \"|\" + subtype_match[0]\r\n na_header, na_seq = find_match(compare_file, ha_header)\r\n if na_header == ha_header:\r\n # print(\"matches2\")\r\n # print(ha_header)\r\n write_data_2(fused_fh, ha_header, ha_seq.strip() + \"\\n\" + na_seq.strip())\r\n\r\n # Close Files\r\n first_fh.close()\r\n fused_fh.close()",
"def process(\n self,\n name_and_reads: Tuple[str, List[reads_pb2.Read]],\n ) -> Iterable[Tuple[str, List[reads_pb2.Read]]]:\n name, reads = name_and_reads[0], list(name_and_reads[1])\n reads_copy = copy.deepcopy(reads)\n # Indent sequence strings by starting position.\n for read in reads_copy:\n indent = dc_constants.GAP_OR_PAD * read.alignment.position.position\n read.aligned_sequence = indent + read.aligned_sequence\n indented_cigar_str = indent + struct_utils.get_string_field(\n read.info, 'expanded_cigar')[0]\n struct_utils.set_string_field(read.info, 'expanded_cigar',\n indented_cigar_str)\n yield name, reads_copy",
"def parse_sam(rows):\n row1, row2 = rows\n mseqs = {}\n failed_list = []\n insert_list = []\n rname = row1['rname']\n qname = row1['qname']\n cigar1 = row1['cigar']\n cigar2 = row2['cigar']\n\n # filtering criteria\n reason = None\n if cigar1 == '*':\n reason = 'R1 unmapped'\n if int(row1['mapq']) < read_mapping_cutoff:\n reason = 'R1 low mapq'\n\n if cigar2 == '*':\n reason = 'R2 unmapped'\n if int(row2['mapq']) < read_mapping_cutoff:\n reason = 'R2 low mapq'\n\n genotype1, genotype2 = None, None\n try:\n genotype1 = row1['rname'].split('-')[1][0]\n genotype2 = row2['rname'].split('-')[1][0]\n except:\n reason = 'discordant map'\n pass\n\n if genotype1 != genotype2:\n reason = 'map conflict'\n\n if reason:\n failed_list.append({'qname': qname,\n 'rname1': row1['rname'],\n 'rname2': row2['rname'],\n 'reason': reason})\n else:\n pos1 = int(row1['pos'])-1 # convert 1-index to 0-index\n _, seq1, qual1, inserts = apply_cigar(cigar1, row1['seq'], row1['qual'])\n \n # report insertions relative to sample consensus\n for left, (iseq, iqual) in inserts.iteritems():\n insert_list.append({'qname': qname,\n 'fwd_rev': 'F' if is_first_read(row1['flag']) else 'R',\n 'refname': rname,\n 'pos': pos1+left,\n 'insert': iseq,\n 'qual': iqual})\n \n seq1 = '-'*pos1 + seq1 # pad sequence on left\n qual1 = '!'*pos1 + qual1 # assign lowest quality to gap prefix so it does not override mate\n \n \n # now process the mate\n pos2 = int(row2['pos'])-1 # convert 1-index to 0-index\n _, seq2, qual2, inserts = apply_cigar(cigar2, row2['seq'], row2['qual'])\n for left, (iseq, iqual) in inserts.iteritems():\n insert_list.append({'qname': qname,\n 'fwd_rev': 'F' if is_first_read(row2['flag']) else 'R',\n 'refname': rname,\n 'pos': pos2+left,\n 'insert': iseq,\n 'qual': iqual})\n seq2 = '-'*pos2 + seq2\n qual2 = '!'*pos2 + qual2\n \n # merge reads\n for qcut in sam2aln_q_cutoffs:\n mseq = merge_pairs(seq1, seq2, qual1, qual2, qcut)\n prop_N = mseq.count('N') / float(len(mseq.strip('-')))\n if prop_N > max_prop_N:\n # fail read pair\n failed_list.append({'qname': qname,\n 'reason': 'merge failure'})\n continue\n mseqs[qcut] = mseq\n\n return rname, mseqs, insert_list, failed_list",
"def parse_multifasta_file(file, number_of_fastas):\n\n with open(file) as file:\n for i in range(number_of_fastas):\n fasts_seq = ''\n fasta_name = file.readline().strip()[1:]\n end_of_file = False\n end_of_seq = False\n while not end_of_seq and not end_of_file:\n x = file.tell()\n seq = file.readline()\n if not seq:\n end_of_file = True\n elif '>' not in seq:\n fasts_seq = fasts_seq + seq\n else:\n file.seek(x)\n end_of_seq = True\n fasts_seq = re.sub(r'\\n', '', fasts_seq)\n yield fasta_name, fasts_seq",
"def read_several_fasta(input_files):\n pb_seq = []\n pb_name = []\n for name in input_files:\n header, seq = read_fasta(name)\n pb_name += header\n pb_seq += seq\n return pb_name, pb_seq",
"def chainSamFile(samFile, outputSamFile, readFastqFile, referenceFastaFile, \n chainFn=chainFn):\n sam = pysam.Samfile(samFile, \"r\" )\n refSequences = getFastaDictionary(referenceFastaFile) #Hash of names to sequences\n \n alignmentsHash = {}\n for aR in samIterator(sam): #Iterate on the sam lines and put into buckets by read\n #This should be improved, because the whole sam file is being stored in memory\n if aR.query_name not in alignmentsHash:\n alignmentsHash[aR.query_name] = {}\n if aR.reference_id not in alignmentsHash[aR.query_name]:\n alignmentsHash[aR.query_name][aR.reference_id] = []\n alignmentsHash[aR.query_name][aR.reference_id].append(aR)\n\n #Now write out the sam file\n outputSam = pysam.Samfile(outputSamFile, \"wh\", template=sam)\n \n #Chain together the reads\n chainedAlignedSegments = []\n for readName, readSeq, qualValues in fastqRead(readFastqFile):\n readName = readName.split()[0] #Remove any white space from the name\n if readName in alignmentsHash:\n for refID in alignmentsHash[readName].keys():\n alignedSegments = alignmentsHash[readName][refID]\n refSeq = refSequences[sam.getrname(refID)]\n chainedAlignedSegments.append(mergeChainedAlignedSegments(chainFn(alignedSegments, \n refSeq, readSeq), refSeq, readSeq))\n alignmentsHash.pop(readName)\n assert len(alignmentsHash) == 0 #All reads in the sam file should be in the input sequence file\n \n #Sort chained alignments by reference coordinates\n chainedAlignedSegments.sort(key=lambda aR : (sam.getrname(aR.reference_id), \\\n aR.reference_start, aR.reference_end)) \n \n for cAR in chainedAlignedSegments:\n outputSam.write(cAR)\n sam.close()\n outputSam.close()",
"def fastq_reader(fastq):\n group_gen = grouper(fastq, 4)\n for record in group_gen:\n # drop the @ before the name and any text after a whitespace\n name = record[0].split(' ')[0][1:].strip()\n seq = record[1].strip()\n yield name, seq",
"def read_in_file():\n\t# Declare variables\n\treads = []\n\n\t# Get command line arguments\n\targuments = sys.argv\n\targuments_length = len(arguments)\n\n\t# Read file is the first argument\n\tread_file_name = arguments[1]\n\n\t# Process read file \n\tread_file = open(read_file_name, 'r')\n\tfor line in read_file:\n\t\tread_info = line.split()\n\t\tread_string = read_info[2].replace('\\'', '')\n\t\tnew_read = GenerativeRead(read_string, [], read_info[5], read_info[3], None, [], read_info[0], read_info[1], read_info[4]) \n\t\treads.append(new_read)\n\tread_file.close()\n\n\t# Repeat regions file in the second argument\n\trepeat_file_name = arguments[2]\n\n\t# Process repeat file\n\trepeat_file = open(repeat_file_name, 'r')\n\talignments = [[]]\n\talignment_index = -1\n\tprevious_line = ''\n\n\n\tfor line in repeat_file:\n\t\talignment_info = line.split()\n\n\t\t# This consists of a tuple of alignment string, alignment start position and alignment chromosome\n\t\t#new_align = alignment_info[2], alignment_info[4], alignment_info[3]\n\n\t\tnew_align = Alignment(alignment_info[2], None, alignment_info[4], alignment_info[3])\n\n\t\tif previous_line != alignment_info[0]:\n\t\t\t# It is not a repeat\n\t\t\talignment_index = alignment_index + 1\n\t\t\talignments.append([])\n\t\t\tprevious_line = alignment_info[0]\n\n\t\talignments[alignment_index].append(new_align)\n\n\trepeat_file.close()\n\n\t# Associate each read with the other alignments\n\tfor read in reads:\n\t\t# Find the other alignments\n\t\tpos = read.get_position()\n\t\tfound = False\n\t\tfound_index = -1\n\n\t\tfor a_index, alignment_lists in enumerate(alignments):\n\t\t\t# find matching alignments\n\t\t\t# TODO: Don't add alignment already have\n\t\t\t# TODO: Make functional with filter\n\t\t\tfor align in alignment_lists:\n\t\t\t\tif align.get_position() == pos:\n\t\t\t\t\tfound = True\n\t\t\t\t\tfound_index = a_index\n\t\t\t\t\tbreak\n\n\t\t\tif found is True:\n\t\t\t\tbreak\n\n\t\tif found is True:\n\t\t\tfor new_align in alignments[found_index]:\n\t\t\t\tread.add_alignment(new_align)\n\t\t\t\n\n\n\t# SNP files are the remaining ones\n\tsnp_file_names = [arguments[file_id] for file_id in range(3, arguments_length) ]\n\n\t# Process SNP files\n\tfor file_name in snp_file_names:\n\t\tsnp_file = open(file_name, 'r')\n\n\t\tfor line in snp_file:\n\t\t\tsnp_info = line.split()\n\t\t\tsnps = snp_info[3].split('/')\n\t\t\tsnp_pos = int(float(snp_info[2]))\n\n\t\t\t# Ignore alleles that are longer than one base\n\n\t\t\t\n\t\t\tif all(len(x) < 2 for x in snps):\n\n\t\t\t\t# Iterate through reads and determine whether or not it contains this SNP\n\t\t\t\tpos_low = snp_pos - 49\n\t\t\t\n\n\t\t\t\tfor read in reads:\n\t\t\t\t\tpositions = read.get_alignment_positions()\n\n\t\t\t\t\tfor p_index, p in enumerate(positions):\n\t\t\t\t\t\tp = int(float(p))\n\t\t\t\t\t\tif p >= pos_low and p <= snp_pos:\n\t\t\t\t\t\t\t# Get index of snp\n\t\t\t\t\t\t\toffset = snp_pos - p\n\t\t\t\t\t\t\tcalls = [0, 0, 0, 0]\n\t\t\t\t\t\t\tfor snp in snps:\n\t\t\t\t\t\t\t\tcall_index = get_base_num(snp)\n\t\t\t\t\t\t\t\tcalls[call_index] = 1\n\n\t\t\t\t\t\t\t# Add the SNP to the read\n\t\t\t\t\t\t\tread.add_snp(p_index, offset, calls)\n\t\t\t\t\t\t\t\n\t\tsnp_file.close()\n\treturn reads",
"def getReadSamFile(read_file,rnameList):\n size = len(rnameList)\n prev = 0\n ends = range(0, size, 20)\n ends += [size]\n ends.pop(0)\n \n \n \n for i in ends:\n chrs = rnameList[prev:i]\n f = []\n ch_p = ''\n jj = 0\n for j in range(0,i-prev):\n samfile = os.path.join(working_dir, 'MappedRead.'+chrs[j]+'.sam')\n log.info('Generating ' + samfile)\n f.append(open(samfile, \"w\"))\n for line in open(read_file, \"r\"):\n \n itemList = line[:-1].split('\\t')\n \n if len(itemList) < 11:\n continue\n #print itemList\n if itemList[0][0:1] == '@':\n continue\n line_ch = itemList[2]\n if line_ch == '*':\n continue\n if int(itemList[1]) & 0b100 != 0:\n continue\n \n if ch_p != line_ch:\n for j in range(0,i-prev):\n if chrs[j] == line_ch:\n f[j].write(line)\n jj = j\n ch_p = line_ch\n continue\n #end for j in range(0,i-prev):\n elif ch_p == line_ch:\n f[jj].write(line)\n '''\n for j in range(0,i-prev):\n if chrs[j] == line_ch:\n f[j].write(line)\n continue\n '''\n for fp in f:\n fp.close()\n prev = i",
"def iterative_align(fq_in, tmp_dir, ref, n_cpu, sam_out, minimap2=False, min_len=20):\n # initial length of the fragments to align\n n = min_len\n # set with the name of the unaligned reads :\n remaining_reads = set()\n total_reads = 0\n # Store path of SAM containing aligned reads at each iteration.\n iter_out = []\n\n # If there is already a file with the same name as the output file,\n # remove it. Otherwise, ignore.\n with contextlib.suppress(FileNotFoundError):\n os.remove(sam_out)\n\n # Bowtie only accepts uncompressed fastq: uncompress it into a temp file\n if not minimap2 and ct.is_compressed(fq_in):\n uncomp_path = os.path.join(tmp_dir, os.path.basename(fq_in) + \".tmp\")\n with ct.read_compressed(fq_in) as inf:\n with open(uncomp_path, \"w\") as uncomp:\n st.copyfileobj(inf, uncomp)\n else:\n uncomp_path = fq_in\n\n # Index genome if using bowtie2\n index = False\n if not minimap2:\n index = os.path.join(tmp_dir, os.path.basename(ref))\n cmd = \"bowtie2-build {0} {1}\".format(ref, index)\n sp.call(cmd, shell=True)\n\n # Counting reads\n with ct.read_compressed(uncomp_path) as inf:\n for line in inf:\n total_reads += 1\n total_reads /= 4\n\n # Use first read to guess read length.\n with ct.read_compressed(uncomp_path) as inf:\n size = inf.readline()\n # Stripping newline.\n size = len(inf.readline().rstrip())\n\n print(\"{0} reads to parse\".format(total_reads))\n\n # iterative alignment per se\n while n <= size:\n print(\"\\n\" + \"-\" * 10 + \"\\nn = {0}\".format(n))\n iter_out += [os.path.join(tmp_dir, \"trunc_{0}.sam\".format(str(n)))]\n # Generate a temporary input fastq file with the n first nucleotids\n # of the reads.\n print(\"Generating truncated reads\")\n truncated_reads = truncate_reads(\n tmp_dir, uncomp_path, remaining_reads, n, min_len\n )\n\n # Align the truncated reads on reference genome\n print(\"Aligning reads\")\n temp_alignment = \"{0}/temp_alignment.sam\".format(tmp_dir)\n map_args = {\n \"fa\": ref,\n \"threads\": n_cpu,\n \"sam\": temp_alignment,\n \"fq\": truncated_reads,\n \"idx\": index,\n }\n if minimap2:\n cmd = \"minimap2 -x sr -a -t {threads} {fa} {fq} > {sam}\".format(**map_args)\n else:\n cmd = \"bowtie2 -x {idx} -p {threads} --rdg 500,3 --rfg 500,3 --quiet --very-sensitive -S {sam} {fq}\".format(\n **map_args\n )\n sp.call(cmd, shell=True)\n\n # filter the reads: the reads whose truncated end was aligned are written\n # to the output file.\n # The reads whose truncated end was not aligned are kept for the next round.\n print(\"Reporting aligned reads\")\n remaining_reads = filter_samfile(temp_alignment, iter_out[-1])\n\n n += 20\n\n # one last round without trimming\n print(\"\\n\" + \"-\" * 10 + \"\\nn = {0}\".format(size))\n print(\"Generating truncated reads\")\n truncated_reads = truncate_reads(\n tmp_dir, uncomp_path, remaining_reads, size, min_len\n )\n print(\"Aligning reads\")\n if minimap2:\n cmd = \"minimap2 -x sr -a -t {1} {0} {3} > {2}\".format(\n ref, n_cpu, temp_alignment, truncated_reads\n )\n else:\n cmd = \"bowtie2 -x {0} -p {1} --rdg 500,3 --rfg 500,3 --quiet --very-sensitive -S {2} {3}\".format(\n index, n_cpu, temp_alignment, truncated_reads\n )\n sp.call(cmd, shell=True)\n print(\"Reporting aligned reads\")\n iter_out += [os.path.join(tmp_dir, \"trunc_{0}.sam\".format(str(n)))]\n remaining_reads = filter_samfile(temp_alignment, iter_out[-1])\n n_remaining = len(remaining_reads)\n\n # Report unaligned reads as well\n iter_out += [os.path.join(tmp_dir, \"unaligned.sam\")]\n temp_sam = ps.AlignmentFile(temp_alignment, \"r\")\n unmapped = ps.AlignmentFile(iter_out[-1], \"w\", template=temp_sam)\n for r in temp_sam:\n # Do not write supplementary alignments (keeping 1 alignment/read)\n if r.query_name in remaining_reads and not r.is_supplementary:\n unmapped.write(r)\n unmapped.close()\n temp_sam.close()\n\n # Merge all aligned reads and unmapped reads into a single sam\n ps.merge(\"-O\", \"SAM\", \"-@\", str(n_cpu), sam_out, *iter_out)\n print(\n \"{0} reads aligned / {1} total reads.\".format(\n total_reads - len(remaining_reads), total_reads\n )\n )\n\n return 0",
"def parse_sam(in_file, out_file, read_type , strand):\n out_handle = open(out_file , 'a')\n if strand == 'watson':\n nt = ['C']\n else:\n nt = ['G']\n count = 0\n # print 'Warning, only works for forward mapped reads'\n mismatch = 0\n clip_count_total = 0\n for line in open(in_file, 'r'):\n modulo_line_no = count % 2\n #alternates between 0 and 1\n if line.startswith('@'):\n continue\n split_line = line.rstrip('\\n').split('\\t')\n #skip read pairs with improper flags.\n #TODO: do this filtering in mark_PCR_duplicates or elsewhere with access to pysam.\n if split_line[1] not in ['0', '99', '147']:\n mismatch += 1\n count += 1\n # continue\n char_count = ''\n clip_count = 0\n for char in split_line[5]:\n if not char.isalpha():\n char_count += char\n elif char == 'S':\n clip_count += int(char_count)\n else:\n char_count = ''\n if clip_count > 6:\n clip_count_total += 1\n count += 1\n # continue\n header = split_line[0].split('|')\n #meth_post list can be present for both R1 and R2 the last Samtools tag added should be the RN:Z: tag, look\n #to the right of this tag only\n meth_pos_list = split_line[0][split_line[0].rindex(':Z:'):].split('|')[1:]\n out_line = [header[0]]\n out_line += split_line[1:9]\n seq = list(split_line[9])\n try:\n meth_pos = [int(n) for n in meth_pos_list[-modulo_line_no].split(',')]\n for n in meth_pos:\n if n >= len(seq):\n break\n if seq[n] not in ['T','A']:\n break\n seq[n] = nt[-modulo_line_no]\n except ValueError:\n pass\n out_line += [''.join(seq)]\n out_line += split_line[10:]\n for item in header[1:]:\n if ':' in item and item not in out_line:\n out_line.append(item)\n # out_line += header[3:6]\n out_handle.write('\\t'.join(out_line) + '\\n')\n count += 1\n print('%s mismatches out of %s' % (mismatch, count))\n print('%s reads out of %s soft clipped more than 5' % (clip_count_total, count))",
"def process(\n self,\n name_and_reads: Tuple[str, Iterable[reads_pb2.Read]],\n ) -> Iterable[Tuple[str, List[reads_pb2.Read]]]:\n\n name, reads = name_and_reads[0], list(name_and_reads[1])\n # Note, examples will only be included in one of the initial counters since\n # we are returning early.\n if not reads:\n self.no_reads_counter.inc()\n return\n\n # Do not error for labels that have multiple alignments to correct molecule.\n # One of the alignments may be a supplementary alignment.\n if self.is_label and len(reads) > 1:\n logging.info('Unexpected: %d labels for %s', len(reads),\n reads[0].fragment_name)\n self.multiple_alignments_counter.inc()\n\n reads_copy = copy.deepcopy(reads)\n for read in reads_copy:\n assert read.aligned_sequence\n base_index = 0\n expanded_sequence = ''\n expanded_cigar_str = ''\n new_cigar_ops = []\n if not self.is_label:\n pw = struct_utils.get_int_field(read.info, 'pw')\n ip = struct_utils.get_int_field(read.info, 'ip')\n new_pw = []\n new_ip = []\n\n for op in read.alignment.cigar:\n # Skip over ops we don't want, such as soft clips.\n if op.operation not in dc_constants.OPS_TO_CONSIDER:\n base_index += op.operation_length\n continue\n if op.operation in dc_constants.READ_ADVANCING_OPS:\n start = base_index\n end = start + op.operation_length\n expanded_sequence += read.aligned_sequence[start:end]\n base_index += op.operation_length\n if not self.is_label:\n new_pw += pw[start:end]\n new_ip += ip[start:end]\n else:\n # Add a special token in sequence where we have deletion.\n expanded_sequence += dc_constants.GAP_OR_PAD * op.operation_length\n\n new_cigar_ops.append(op)\n op_char = cigar_utils.CIGAR_OPS_TO_CHAR[op.operation]\n expanded_cigar_str += op_char * op.operation_length\n\n # Update the read sequence.\n read.aligned_sequence = expanded_sequence\n assert len(read.aligned_sequence) == len(expanded_cigar_str)\n\n # Update the read cigar to only include ops that were kept.\n del read.alignment.cigar[:]\n read.alignment.cigar.extend(new_cigar_ops)\n\n # Save pw, ip, and expanded cigar string to be used downstream.\n if not self.is_label:\n struct_utils.set_int_field(read.info, 'pw', new_pw)\n struct_utils.set_int_field(read.info, 'ip', new_ip)\n # PW and IP won't be the same length as read.aligned_sequence here\n # because we haven't yet spaced out PW and IP based on gaps/padding.\n struct_utils.set_string_field(read.info, 'expanded_cigar',\n expanded_cigar_str)\n yield name, reads_copy",
"def matchmaker(samfile, semaphore=None):\n #reader = DictReader(samfile)\n labels = ['qname', 'flag', 'rname', 'pos', 'mapq', 'cigar', 'rnext', 'pnext',\n 'tlen', 'seq', 'qual']\n cached_rows = {}\n for line in samfile:\n if line.startswith('@'):\n continue # skip header line\n if 'HCV' not in line:\n continue # skip reads that mapped to another reference\n\n items = line.strip('\\n').split('\\t')\n row = dict(zip(labels, items[:11]))\n qname = row['qname']\n old_row = cached_rows.pop(qname, None)\n if old_row is None:\n cached_rows[qname] = row\n else:\n if semaphore is not None:\n semaphore.acquire()\n # current row should be the second read of the pair\n yield old_row, row",
"def find_match(second_file, title):\r\n # Initialize variables/ open files\r\n seq2 = \"\"\r\n header2 = \"\"\r\n match_fh = open(second_file, \"r\")\r\n # parse through lines of file\r\n for lines in match_fh:\r\n # If > found assume its header\r\n if lines[0] == \">\":\r\n # header2 = lines\r\n # If a header has been found, pull strain name, orgainism and subtype for new header\r\n if len(header2) > 0:\r\n matches2 = re.findall(\"(Strain Name:[AB]\\/[\\/A-Za-z 0-9()\\\\-_'.]+)\", header2)\r\n subtype_match2 = re.findall(\"(Subtype:[A-Za-z0-9]+)\", header2)\r\n organ2 = re.findall(\"(Organism:[\\/A-Za-z 0-9()\\\\-_'.]+)\", header2)\r\n header2 = \">\" + organ2[0] + \"|\" + matches2[0] + \"|\" + subtype_match2[0]\r\n # if new header equals input header then return it and the sequence\r\n if header2 == title:\r\n match_fh.close()\r\n print(\"match\")\r\n return header2, seq2\r\n # Reset the header and seq\r\n header2 = lines\r\n seq2 = \"\"\r\n\r\n else:\r\n # if it is part of the sequence\r\n seq2 = seq2 + lines\r\n\r\n # to return the last entry in the file, since loop won't be able to return it\r\n matches2 = re.findall(\"(Strain Name:[AB]\\/[\\/A-Za-z 0-9()\\\\-_'.]+)\", header2)\r\n subtype_match2 = re.findall(\"(Subtype:[A-Za-z0-9]+)\", header2)\r\n organ2 = re.findall(\"(Organism:[\\/A-Za-z 0-9()\\\\-_'.]+)\", header2)\r\n header2 = \">\" + organ2[0] + \"|\" + matches2[0] + \"|\" + subtype_match2[0]\r\n match_fh.close()\r\n return header2, seq2",
"def readFasta(self, fastaFile):\t\n\t\tname, seq = None, []\n\t\tfor line in fastaFile:\n\t\t\tline = line.rstrip()\n\t\t\tif (line.startswith(\">\")):\n\t\t\t\tif name: yield (name, ''.join(seq))\n\t\t\t\tname, seq = line, []\n\t\t\telse:\n\t\t\t\tseq.append(line)\n\t\tif name: yield (name, ''.join(seq))",
"def parse_fasta_file(filename):\n if filename.endswith('.gz'):\n opener = lambda filename: gzip.open(filename, 'rt')\n else:\n opener = lambda filename: open(filename, 'r')\n\n with opener(filename) as f:\n fasta_iter = (it[1] for it in itertools.groupby(f, is_header))\n for name in fasta_iter:\n name = name.__next__()[1:].strip()\n sequences = ''.join(seq.strip() for seq in fasta_iter.__next__())\n yield name, sequences",
"def read_fasta(fasta_name):\n \n \"\"\"first open the file outside \"\"\"\n file_handler = open(fasta_name)\n\n # ditch the boolean (x[0]) and just keep the header or sequence since\n # we know they alternate.\n fasta_iter = (x[1] for x in groupby(file_handler, lambda line: line[0] == \">\"))\n\n for header in fasta_iter:\n # drop the \">\"\n headerStr = header.__next__()[1:].strip()\n\n # join all sequence lines to one.\n seq = \"\".join(s.strip() for s in fasta_iter.__next__())\n\n # yield (headerStr, seq)\n result_record = {'header':headerStr,'seqRecord':seq}\n return result_record",
"def seqs_from_file(filename, exit_on_err=False, return_qual=False):\n # VALIDATE INPUT\n if not isinstance(filename, str):\n msg = 'Filename has to be a string.'\n if exit_on_err:\n sys.stderr.write('Error: %s\\n'%msg)\n sys.exit(1)\n else: raise IOError(msg)\n if not os.path.exists(filename):\n msg = 'File \"%s\" does not exist.'%filename\n if exit_on_err:\n sys.stderr.write('Error: %s\\n'%msg)\n sys.exit(1)\n else: raise IOError(msg)\n \n # EXTRACT DATA\n with open_(filename,\"rt\") as f:\n query_seq_segments = []\n seq, name, desc, qual = '', '', '', ''\n add_segment = query_seq_segments.append\n for l in f:\n if len(l.strip()) == 0: continue\n #sys.stderr.write(\"%s\\n\"%line)\n fields=l.strip().split()\n if l.startswith(\">\"):\n # FASTA HEADER FOUND\n if query_seq_segments != []:\n # YIELD SEQUENCE AND RESET\n seq = ''.join(query_seq_segments)\n yield (seq, name, desc)\n seq, name, desc = '', '', ''\n del query_seq_segments[:]\n name = fields[0][1:]\n desc = ' '.join(fields[1:])\n \n elif l.startswith(\"@\"):\n # FASTQ HEADER FOUND\n name = fields[0][1:]\n desc = ' '.join(fields[1:])\n try:\n # EXTRACT FASTQ SEQUENCE\n seq = next(f).strip().split()[0]\n # SKIP SECOND HEADER LINE AND QUALITY SCORES\n l = next(f)\n qual = next(f).strip() # Qualities\n except:\n break\n else:\n # YIELD SEQUENCE AND RESET\n if return_qual:\n yield (seq, qual, name, desc)\n else:\n yield (seq, name, desc)\n seq, name, desc, qual = '', '', '', ''\n \n elif len(fields[0])>0:\n # EXTRACT FASTA SEQUENCE\n add_segment(fields[0])\n \n # CHECK FOR LAST FASTA SEQUENCE\n if query_seq_segments != []:\n # YIELD SEQUENCE\n seq = ''.join(query_seq_segments)\n yield (seq, name, desc)",
"def parse_fastqc_pair(zip_filename1, zip_filename2, allow_missing=True):\n\n fastqc_r1 = parse_fastqc_output(zip_filename1)\n fastqc_r2 = parse_fastqc_output(zip_filename2)\n\n seq_tot = binop(fastqc_r1['total_sequences'], fastqc_r2['total_sequences'], lambda x,y:x+y)\n flagged_tot = binop(fastqc_r1['flagged_sequences'], fastqc_r2['flagged_sequences'], lambda x,y:x+y)\n read_pairs = binop(fastqc_r1['total_sequences'], fastqc_r2['total_sequences'], min)\n\n summary = { }\n for text in list(fastqc_r1['summary'].keys()) + list(fastqc_r2['summary'].keys()):\n flavor1 = fastqc_r1['summary'].get(text, 'PASS')\n flavor2 = fastqc_r2['summary'].get(text, 'PASS')\n\n summary[text] = 'PASS'\n if 'WARN' in [flavor1,flavor2]:\n summary[text] = 'WARN'\n if 'FAIL' in [flavor1,flavor2]:\n summary[text] = 'FAIL'\n\n if fastqc_r1['total_sequences'] != fastqc_r2['total_sequences']:\n summary['R1/R2 read count mismatch'] = 'FAIL'\n\n if (flagged_tot is not None) and (flagged_tot > 0):\n summary[f'{flagged_tot} sequences flagged as poor quality'] = 'WARN'\n\n return { 'total_sequences': seq_tot,\n 'flagged_sequences': flagged_tot,\n 'read_pairs': read_pairs,\n 'summary': summary }",
"def test_split_fasta_equal_num_seqs_per_file(self):\r\n fd, filename_prefix = mkstemp(dir=get_qiime_temp_dir(),\r\n prefix='split_fasta_tests',\r\n suffix='')\r\n close(fd)\r\n infile = ['>seq1', 'AACCTTAA', '>seq2', 'TTAACC', 'AATTAA',\r\n '>seq3', 'CCTT--AA']\r\n\r\n actual = split_fasta(infile, 1, filename_prefix)\r\n actual_seqs = []\r\n for fp in actual:\r\n actual_seqs += list(open(fp))\r\n remove_files(actual)\r\n\r\n expected = ['%s.%d.fasta' % (filename_prefix, i) for i in range(3)]\r\n\r\n self.assertEqual(actual, expected)\r\n self.assertEqual(\r\n SequenceCollection.from_fasta_records(parse_fasta(infile), DNA),\r\n SequenceCollection.from_fasta_records(parse_fasta(actual_seqs), DNA))",
"def test_assign_seqs_two_fastas(self):\r\n\r\n # Handles two fasta files alone\r\n file_data = {}\r\n file_data['fasta_files'] = [self.valid_fasta_file_no_errors,\r\n self.valid_fasta_file_no_errors]\r\n file_data['qual_files'] = []\r\n #file_data['mapping_file'] = self.valid_mapping_data_golay_upper\r\n file_data['demultiplexed_seqs_f'] = FakeOutFile()\r\n\r\n ids_bcs_added_field = {('AACTCGTCGATG', ''): 's1',\r\n ('AGCAGCACTTGT', ''): 's2', ('ACCGCAGAGTCA', ''): 's3'}\r\n bc_lens = [12]\r\n all_bcs = ['AACTCGTCGATG', 'AGCAGCACTTGT', 'ACCGCAGAGTCA']\r\n keep_barcode = False\r\n barcode_type = \"golay_12\"\r\n max_bc_errors = 1.5\r\n start_index = 1\r\n write_unassigned_reads = False\r\n disable_bc_correction = False\r\n added_demultiplex_field = None\r\n\r\n log_data, bc_freqs, seq_counts, corrected_bc_count =\\\r\n assign_seqs(file_data, ids_bcs_added_field, bc_lens, all_bcs,\r\n keep_barcode, barcode_type, max_bc_errors, start_index,\r\n write_unassigned_reads, disable_bc_correction,\r\n added_demultiplex_field)\r\n\r\n expected_demultiplexed_fasta_seq = '>s1_1 ABCD0001 orig_bc=AACTCGTCGATG new_bc=AACTCGTCGATG bc_diffs=0\\nCAGGACGAGACGAGGTT\\n>s3_2 EFGH0002 orig_bc=ACCGCAGAGTCA new_bc=ACCGCAGAGTCA bc_diffs=0\\nCCAGATTACGAGATTA\\n>s2_3 IJKL0003 orig_bc=AGCAGCACTTGT new_bc=AGCAGCACTTGT bc_diffs=0\\nGACCGATTACGATAACG\\n>s1_4 ABCD0001 orig_bc=AACTCGTCGATG new_bc=AACTCGTCGATG bc_diffs=0\\nCAGGACGAGACGAGGTT\\n>s3_5 EFGH0002 orig_bc=ACCGCAGAGTCA new_bc=ACCGCAGAGTCA bc_diffs=0\\nCCAGATTACGAGATTA\\n>s2_6 IJKL0003 orig_bc=AGCAGCACTTGT new_bc=AGCAGCACTTGT bc_diffs=0\\nGACCGATTACGATAACG\\n'\r\n self.assertEqual(file_data['demultiplexed_seqs_f'].data,\r\n expected_demultiplexed_fasta_seq)\r\n\r\n expected_log_data = {'ACCGCAGAGTCA,s3': 2, 'AACTCGTCGATG,s1': 2,\r\n 'AGCAGCACTTGT,s2': 2}\r\n expected_bc_freqs = {'AACTCGTCGATG': 2, 'AGCAGCACTTGT': 2,\r\n 'ACCGCAGAGTCA': 2}\r\n expected_seq_counts = 6\r\n expected_corrected_bc_count = [0, 0]\r\n\r\n self.assertEqual(log_data, expected_log_data)\r\n self.assertEqual(bc_freqs, expected_bc_freqs)\r\n self.assertEqual(seq_counts, expected_seq_counts)\r\n self.assertEqual(corrected_bc_count, expected_corrected_bc_count)",
"def FASTA_iterator(filename):\n fasta_file=open(filename, \"r\")\n id_fasta=\"\"\n seq_fasta=\"\"\n\n for line in fasta_file:\n if line.startswith(\">\"):\n if id_fasta == \"\":\n id_fasta=line.strip()\n continue\n fasta = id_fasta , seq_fasta\n yield fasta\n seq_fasta=\"\"\n id_fasta=line.strip()\n\n else:\n seq_fasta += line.strip()\n\n if seq_fasta != \"\":\n yield id_fasta, seq_fasta",
"def create_read_list_paired(samfile):\n read_sampler = ReadSampler()\n while True: \n line1 = samfile.readline()\n line2 = samfile.readline()\n if not line2: \n break\n line1 = sam_utils.SamAlignment(line1)\n line2 = sam_utils.SamAlignment(line2)\n if line1.QNAME != line2.QNAME:\n raise ValueError(\"Unpaired read or read with more than one pair\\\n encountered. Check your input file. File must\\\n be sorted by read name, every read must have\\\n a single pair and each pair must have one\\\n mapping. %s %s\"%(line1.QNAME, line2.QNAME))\n try:\n read_sampler.add_read(get_paired_blocks(line1,line2))\n except ValueError as err:\n logging.error(\"Skipping pair %s\"%err)\n except RuntimeError as err:\n logging.error(\"Skipping pair %s\"%err)\n return read_sampler",
"def gene_aligner(fq1_files, smp_name, args, fq2_files=None):\n project_path = init_rnaseq_project(args['path_out'], analysis_type=1)\n gene_align_path = project_path['gene']\n\n ## qc-report\n qc_path = os.path.join(gene_align_path['report'], 'qc')\n # QC_reporter(fq1_files, qc_path).run()\n\n ## update args\n args['fq1'] = fq1_files\n args['fq2'] = fq2_files\n args['path_out'] = gene_align_path['mapping']\n args['smp_name'] = smp_name\n args['align_to_te'] = False\n\n ## run alignment\n map_bam_list = Alignment(**args).run()\n\n ## filt map_genome\n map_bam = []\n for i in map_bam_list:\n for k in i:\n if k.endswith('map_' + args['genome'] + '.bam'):\n map_bam.append(k)\n\n # # create bigWig files\n # for bam in map_bam:\n # bam2bigwig(\n # bam=bam, \n # genome=args['genome'], \n # path_out=gene_align_path['bigWig'],\n # strandness=args['s'], \n # binsize=args['bin_size'],\n # overwrite=args['overwrite']) \n\n return map_bam",
"def test_write_Fasta_from_name_seqs_pairs(self):\r\n\r\n seqs = [('1', \"AAA\"), ('2', \"CCCCC\"), ('3', \"GGGG\")]\r\n\r\n # None fh raises Error\r\n self.assertRaises(\r\n ValueError,\r\n write_Fasta_from_name_seq_pairs,\r\n seqs,\r\n None)\r\n\r\n fd, tmp_filename = mkstemp(prefix=\"test_write_Fasta\",\r\n suffix=\".fna\")\r\n close(fd)\r\n fh = open(tmp_filename, \"w\")\r\n write_Fasta_from_name_seq_pairs(seqs, fh)\r\n fh.close()\r\n actual_seqs = list(parse_fasta(open(tmp_filename, \"U\")))\r\n remove(tmp_filename)\r\n\r\n self.assertEqual(actual_seqs, seqs)",
"def process(\n self,\n name_and_reads: Tuple[str, List[reads_pb2.Read]],\n ) -> Iterable[Tuple[str, List[reads_pb2.Read]]]:\n\n name, subreads = name_and_reads\n subreads_copy = copy.deepcopy(subreads)\n for read in subreads_copy:\n pw = struct_utils.get_int_field(read.info, 'pw')\n ip = struct_utils.get_int_field(read.info, 'ip')\n new_pw = []\n new_ip = []\n pw_ip_index = 0\n\n for base in read.aligned_sequence:\n # Padding and gap tokens are strings and cannot directly be added to pw\n # and ip, which are lists of ints. Instead, integer representations of\n # each must be added.\n if base == dc_constants.GAP_OR_PAD:\n new_pw.append(dc_constants.GAP_OR_PAD_INT)\n new_ip.append(dc_constants.GAP_OR_PAD_INT)\n # If base is neither padding nor gap, copy over the existing pw and ip.\n else:\n assert pw_ip_index < len(pw)\n assert pw_ip_index < len(ip)\n new_pw.append(pw[pw_ip_index])\n new_ip.append(ip[pw_ip_index])\n pw_ip_index += 1\n\n # pw, ip, and sequence should all be of the same length.\n assert len(new_pw) == len(read.aligned_sequence)\n assert len(new_ip) == len(read.aligned_sequence)\n struct_utils.set_int_field(read.info, 'pw', new_pw)\n struct_utils.set_int_field(read.info, 'ip', new_ip)\n\n yield name, subreads_copy",
"def process_reads_joined(args):\n\n watson_joined_r1 = tempfile.NamedTemporaryFile(suffix=\".fastq\", prefix='watson_joined_r1', dir=args.tmpdir,\n delete=False)\n watson_joined_r2 = tempfile.NamedTemporaryFile(suffix=\".fastq\", prefix='watson_joined_r2', dir=args.tmpdir,\n delete=False)\n crick_joined_r1 = tempfile.NamedTemporaryFile(suffix=\".fastq\", prefix='crick_joined_r1', dir=args.tmpdir,\n delete=False)\n crick_joined_r2 = tempfile.NamedTemporaryFile(suffix=\".fastq\", prefix='crick_joined_r2', dir=args.tmpdir,\n delete=False)\n args.watson_joined_r1 = watson_joined_r1.name\n args.watson_joined_r2 = watson_joined_r2.name\n args.crick_joined_r1 = crick_joined_r1.name\n args.crick_joined_r2 = crick_joined_r2.name\n\n print('Started processing joined reads')\n if args.reads_R1.endswith('.gz'):\n r1_handle = gzip.open(args.reads_R1, 'rt')\n r2_handle = gzip.open(args.reads_R2, 'rt')\n else:\n r1_handle = open(args.reads_R1, 'rt')\n r2_handle = open(args.reads_R2, 'rt')\n #make 4 file handles for forward and reverse watson and crick\n watson_r1_handle = open(args.watson_joined_r1, 'w')\n watson_r2_handle = open(args.watson_joined_r2, 'w')\n crick_r1_handle = open(args.crick_joined_r1, 'w')\n crick_r2_handle = open(args.crick_joined_r2, 'w')\n j = 0\n while True:\n read_r1 = []\n read_r2 = []\n for i in range(4):\n try:\n read_r1.append(next(r1_handle))\n read_r2.append(next(r2_handle))\n except StopIteration:\n break\n j += 1\n try:\n if int(args.sequences) == j:\n break\n except TypeError:\n pass\n if not j % 1000000:\n print('Processed %s reads' % (j))\n if not read_r1:\n break\n if 'watson' in read_r1[0].lower():\n convert_r1 = read_r1[1].upper().replace('C', 'T')\n convert_r2 = read_r2[1].upper().replace('G', 'A')\n c_pos = [str(n) for n, i in enumerate(read_r1[1]) if i.upper() == 'C']\n g_pos = [str(n) for n, i in enumerate(read_r2[1].rstrip('\\n')[::-1]) if i.upper() == 'G']\n header = '@%s' % (read_r1[0][1:-1].replace(' ', '|').replace('\\t', '|'))\n header += '|%s\\n' % (','.join(c_pos) + '|' + ','.join(g_pos))\n watson_r1_handle.write(header + convert_r1 + '+\\n' + read_r1[3])\n watson_r2_handle.write(header + convert_r2 + '+\\n' + read_r2[3])\n else:\n convert_r1 = read_r1[1].upper().replace('G', 'A')\n convert_r2 = read_r2[1].upper().replace('C', 'T')\n g_pos = [str(n) for n, i in enumerate(read_r1[1]) if i.upper() == 'G']\n c_pos = [str(n) for n, i in enumerate(read_r2[1].rstrip('\\n')[::-1]) if i.upper() == 'C']\n header = '@%s' % (read_r1[0][1:-1].replace(' ', '|').replace('\\t', '|'))\n header += '|%s\\n' % (','.join(g_pos) + '|' + ','.join(c_pos))\n crick_r1_handle.write(header + convert_r1 + '+\\n' + read_r1[3])\n crick_r2_handle.write(header + convert_r2 + '+\\n' + read_r2[3])\n crick_r1_handle.close()\n crick_r2_handle.close()\n watson_r1_handle.close()\n watson_r2_handle.close()\n return args",
"def parse(self, paired=False, shift=100):\n for read in self.handle:\n if read.is_unmapped or read.is_qcfail or read.is_secondary \\\n or read.is_supplementary:\n continue\n if paired:\n if not read.is_paired:\n logger.debug(\n f\"Skipped single-end read: {read.to_string()!r}\")\n continue\n if read.is_read1 and read.is_proper_pair \\\n and not read.mate_is_unmapped:\n chrom1 = read.reference_name\n start1 = read.reference_start\n end1 = read.reference_end\n chrom2 = read.next_reference_name\n if read.is_reverse:\n start = end1 + read.template_length\n end = end1\n else:\n start = start1\n end = start1 + read.template_length\n if read.template_length == 0:\n logger.debug(\n f\"Detected read with TLEN=0: {read.to_string()!r}\")\n if chrom1 == chrom2:\n yield chrom1, (start + end) // 2\n else:\n continue\n else:\n continue\n else:\n if read.is_paired:\n logger.debug(\n f\"Skipped paired-end read: {read.to_string()!r}\")\n continue\n if read.is_unmapped:\n continue\n else:\n chrom = read.reference_name\n start = read.reference_start\n end = read.reference_end\n if read.is_reverse:\n pos = end - shift\n else:\n pos = start + shift\n yield chrom, pos\n self.handle.close()",
"def align(aligner, reads):\n counter = 0\n for read in SeqIO.parse(reads, \"fasta\"): \n try:\n alignInfo = next(aligner.map(str(read.seq)))\n print(alignInfo) \n except StopIteration:\n print(read.format(\"fasta\"), end='')",
"def combine_alignments(fp1, fp2):\n seqs1 = dict(MinimalFastaParser(fp1))\n seqs2 = dict(MinimalFastaParser(fp2))\n\n if set(seqs1.keys()).intersection(set(seqs2.keys())):\n raise ValueError, \"Conflicting sequence ids in fp1 and fp2\"\n\n combined = seqs1\n combined.update(seqs2)\n\n return combined"
] |
[
"0.6319581",
"0.61748374",
"0.61733645",
"0.61505145",
"0.60918087",
"0.6071285",
"0.6036485",
"0.60167056",
"0.59617764",
"0.5954194",
"0.5929781",
"0.59196126",
"0.58673304",
"0.57962865",
"0.5796238",
"0.5788439",
"0.5781865",
"0.57712334",
"0.57695675",
"0.57692546",
"0.5760806",
"0.57387674",
"0.57206446",
"0.57145786",
"0.5704492",
"0.5691304",
"0.56908625",
"0.56752425",
"0.56750715",
"0.5673794"
] |
0.7638636
|
0
|
Add pairedend RISCC reads to dataset mutants, based on IB clustering.
|
def add_RISCC_alignment_files_to_data(self, cassette_side_flank_aligned_file, genome_side_aligned_file, IB_fastq_file,
allowed_IBs=None, IB_cluster_file=None,
best_genome_side_only=False, ignore_unaligned=False,
max_allowed_cassette_side_dist=1, max_cassette_side_ratio_to_ignore=100,
skip_checks=False, removed_mutant_file='/dev/null', quiet=False):
# TODO finish docstring
# MAYBE-TODO add option for not including IBs at all, and making the mutant dict by cassette-side alignment position like before?
# MAYBE-TODO at some point, maybe add full parsing of multiple alignments, to compare their positions to those of
# unique-aligned cases, rather than just marking them as multiple but treating them as unaligned?
# Might not be worth the effort, since if we have unique-aligned cases anyway, we can use those.
# MAYBE-TODO add ignore_cassette, cassette_only options?
# MAYBE-TODO add collapsed_readcounts option? That doesn't make much sense for paired-end reads.
if self.multi_dataset: raise MutantError("add_RISCC_alignment_files_to_data not implemented for multi-datasets!")
if self.summary.cassette_end not in SEQ_ENDS:
raise MutantError("Cannot add data from an alignment reader if cassette_end isn't specified! Please set the "
+"summary.cassette_end attribute of this Insertional_mutant_pool_dataset instance to one of %s first."%SEQ_ENDS)
if self.summary.relative_read_direction not in RELATIVE_READ_DIRECTIONS:
raise MutantError("Cannot add data from an alignment reader if relative_read_direction isn't set! "
+"Please set the relative_read_direction attribute of this Insertional_mutant_pool_dataset instance "
+"to one of %s first."%RELATIVE_READ_DIRECTIONS)
# read the IB cluster file; make a read_seq:centroid_seq dictionary for fast lookup.
if IB_cluster_file is not None:
if type(IB_cluster_file) == dict:
IB_centroid_to_seqs = IB_cluster_file
elif IB_cluster_file.endswith('.pickle'):
IB_centroid_to_seqs = unpickle(IB_cluster_file)
else:
raise MutantError("Unknown IB_cluster_file format in add_RISCC_alignment_files_to_data - must be .pickle filename "
+"or a dictionary. Value is %s"%IB_cluster_file)
IB_seq_to_centroid = invert_listdict_nodups(IB_centroid_to_seqs)
# set up IB checks - return True if IB is in allowed_IBs or if no allowed_IBs was given.
if allowed_IBs is None: _IB_check = lambda IB: True
else: _IB_check = lambda IB: IB in allowed_IBs
for (readname, IB_seq, cassette_side_aln, genome_side_aln) in self._parse_3files_parallel(
IB_fastq_file, cassette_side_flank_aligned_file, genome_side_aligned_file):
# get the cassette insertion position (as an Insertion_position object)
# MAYBE-TODO instead of generating cassette_side_position all the time, even with multiple identical reads,
# check if seq is already present in mutant, or something? To save time.
# if the IB isn't in the allowed set, skip this read (unless there is no allowed set, then just keep going)
if not _IB_check(IB_seq): continue
cassette_side_position = get_insertion_pos_from_flanking_region_pos(cassette_side_aln, self.summary.cassette_end,
self.summary.relative_read_direction, immutable_position=True)
if ignore_unaligned and cassette_side_position in SPECIAL_POSITIONS.all_undefined:
continue
# TODO should probably still count it
# grab mutant based on IB (clustered or not)
try: IB_centroid_seq = IB_seq_to_centroid[IB_seq]
except NameError: IB_centroid_seq = IB_seq
except KeyError: raise MutantError("IB seq %s not found in cluster dict!"%IB_seq)
mutant = self.get_mutant(IB_centroid_seq)
mutant.add_read(cassette_side_aln, cassette_side_position, read_count=1, dataset_name=None)
# Parse the genome-side alignment result to figure out position; add that to the mutant
# MAYBE-TODO make an option for the genome-side reads to be outward from the cassette? Unlikely to be needed.
genome_side_position = get_RISCC_pos_from_read_pos(genome_side_aln, self.summary.cassette_end, 'inward')
if genome_side_position in SPECIAL_POSITIONS.all_undefined:
N_errors = 10
else:
N_errors = check_mutation_count_by_optional_NM_field(genome_side_aln, negative_if_absent=False)
if best_genome_side_only:
mutant.improve_best_RISCC_read(genome_side_aln.read.seq, genome_side_position, N_errors, read_count=1,
max_distance=MAX_POSITION_DISTANCE)
else:
if not ignore_unaligned or genome_side_position not in SPECIAL_POSITIONS.all_undefined:
mutant.add_RISCC_read(genome_side_aln.read.seq, genome_side_position, N_errors, read_count=1)
# MAYBE-TODO if ignore_unaligned is True, do we still want to keep a count of unaligned seqs somehow?
# check that all mutants have consistent cassette positions; remove ones that don't.
if not skip_checks:
IBs_to_remove = []
with open(removed_mutant_file, 'w') as REMOVED_MUTANT_FILE:
for mutant in self:
if_remove = mutant.decide_and_check_position(max_allowed_cassette_side_dist,
ratio_to_ignore=max_cassette_side_ratio_to_ignore, OUTPUT=REMOVED_MUTANT_FILE)
if if_remove: IBs_to_remove.append(mutant.IB)
summary_text = ("Removed %s/%s mutants due to different flanking seq positions in one mutant "
+"(if distance >%s and some are within %sx reads of each other).")%(
len(IBs_to_remove), len(self), max_allowed_cassette_side_dist, max_cassette_side_ratio_to_ignore)
REMOVED_MUTANT_FILE.write("SUMMARY: " + summary_text + '\n')
if not quiet: print(summary_text + " - see %s for details."%removed_mutant_file)
for IB in IBs_to_remove:
self.remove_mutant(IB)
# TODO do we want to add different read category counts to the summary, or make that stuff properties?
# MAYBE-TODO it might be good to just generate two separate mutant-sets, normal and cassette, with an option called separate_cassette or something, and print them to separate files - but that's more complicated, and right now I don't have the setup for a single dataset having multiple mutant-sets (although I guess I will have to eventually, for removed mutants etc). Right now I do it in mutant_count_alignments.py, which works but there's a lot of code repetition...
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def _assign_reads( medians, centroids ):\n log.info(\"Assigning subreads reads to the closet amplicon cluster\")\n assignments = {'5p':set(), '3p':set()}\n five_prime, three_prime = centroids\n for read, median in medians.iteritems():\n five_prime_diff = abs(median - five_prime)\n three_prime_diff = abs(median - three_prime)\n if five_prime_diff < three_prime_diff:\n assignments['5p'].add( read )\n else:\n assignments['3p'].add( read )\n return assignments",
"def add_RISCC_read(self, seq, new_position, N_errors=None, read_count=1):\n # TODO why are we even using Insertion_position objects here?? Those aren't insertion positions with a start-end, just single positions... But still need to be able to deal with unaligned/multi as well as proper positions.\n if not isinstance(new_position, Insertion_position) and new_position not in SPECIAL_POSITIONS.all_undefined:\n raise MutantError(\"RISCC read position %s is unacceptable - must be Insertion_position object or one of %s!\"%(\n new_position, ', '.join(SPECIAL_POSITIONS.all_undefined)))\n # self.RISCC_genome_side_aligned_reads is a position:data dict\n if new_position not in SPECIAL_POSITIONS.all_undefined:\n try:\n # MAYBE-TODO check that the same seq isn't present in a different position?\n self.RISCC_genome_side_aligned_reads[new_position][1] += read_count\n try: self.RISCC_genome_side_aligned_reads[new_position][2][seq][0] += read_count\n except KeyError: self.RISCC_genome_side_aligned_reads[new_position][2][seq] = [read_count, N_errors]\n except KeyError:\n seq_count_error_dict = {seq: [read_count, N_errors]}\n self.RISCC_genome_side_aligned_reads[new_position] = [new_position, read_count, seq_count_error_dict, \n SPECIAL_GENE_CODES.not_determined, '?', '?', '?']\n # self.RISCC_genome_side_unaligned_reads is a seq:data dict, since the positions aren't usable as keys\n else:\n try:\n self.RISCC_genome_side_unaligned_reads[seq][1] += read_count\n self.RISCC_genome_side_aligned_reads[seq][2][seq][0] += read_count\n except KeyError:\n self.RISCC_genome_side_unaligned_reads[seq] = [new_position, read_count, {seq: [read_count, N_errors]}, \n SPECIAL_GENE_CODES.not_determined, '?', '?', '?']\n # Note: adding gene/annotation info for those is implemented in the dataset methods.",
"def __init__(self, dataset, cassette_end, relative_read_direction, dataset_name=None):\n # make sure the arguments are valid values\n if not cassette_end in SEQ_ENDS+['?']: \n raise ValueError(\"The cassette_end variable must be one of %s or '?'!\"%SEQ_ENDS)\n if relative_read_direction not in RELATIVE_READ_DIRECTIONS+['?']: \n raise ValueError(\"The relative_read_direction variable must be %s, or '?'!\"%(', '.join(RELATIVE_READ_DIRECTIONS)))\n # reference to the containing dataset (for read-counting purposes etc), \n # and the dataset name (None if it's a single dataset, string for multi-datasets)\n self.dataset_name = dataset_name\n self.dataset = dataset\n # information on reads that aren't included in the dataset mutants - None or 0 by default\n # TODO I should really go over this and figure out what should be None and what should be 0 and why!!\n self.discarded_read_count, self.discarded_wrong_start, self.discarded_no_cassette = None, None, None\n self.discarded_other_end = 0\n self.non_aligned_read_count, self.unaligned, self.multiple_aligned = 0, 0, 0\n self.ignored_region_read_counts = defaultdict(int)\n # MAYBE-TODO should cassette_end and relative_read_direction be specified for the whole dataset, or just for each set of data added, in add_RISCC_alignment_files_to_data? The only real issue with this would be that then I wouldn't be able to print this information in the summary - or I'd have to keep track of what the value was for each alignment reader added and print that in the summary if it's a single value, or 'varied' if it's different values. Might also want to keep track of how many alignment readers were involved, and print THAT in the summary! Or even print each (infile_name, cassette_end, relative_read_direction) tuple as a separate line in the header.\n self.cassette_end = cassette_end\n self.relative_read_direction = relative_read_direction",
"def mergeClusters(self,c1,c2):\n combinedBaseRecords = (self.c2b[c1] | self.c2b[c2])\n # Remove these two clusters.\n del self.c2b[c1]\n del self.c2b[c2]\n cNew = self._newClusterId()\n self.c2b[cNew] = combinedBaseRecords\n for baseRecord in combinedBaseRecords:\n self.b2c[baseRecord] = cNew",
"def augment_by_additive_noise(ds, noise_datadir, snr_list, copy_noise_files_to_tmpdir=False):\n logger.info(\"Augmenting dataset with additive noise from '%s'.\", noise_datadir)\n if not os.path.isdir(noise_datadir):\n logger.error(\"Noise source dir '%s' does not exist.\", noise_datadir)\n return\n\n id2type = dict(lidbox.iter_metadata_file(os.path.join(noise_datadir, \"id2label\"), 2))\n type2paths = collections.defaultdict(list)\n for noise_id, path in lidbox.iter_metadata_file(os.path.join(noise_datadir, \"id2path\"), 2):\n type2paths[id2type[noise_id]].append(path)\n del id2type\n\n if copy_noise_files_to_tmpdir:\n tmpdir = os.path.join(os.environ.get(\"TMPDIR\", \"/tmp\"), \"lidbox_noise_signals\")\n logger.info(\"Copying all noise files to TMPDIR '%s'\", tmpdir)\n for noise_type, paths in list(type2paths.items()):\n new_paths = []\n for src in paths:\n dst = os.path.join(tmpdir, noise_type, os.path.basename(src))\n logger.debug(\"%s -> %s\", src, dst)\n os.makedirs(os.path.dirname(dst), exist_ok=True)\n shutil.copyfile(src, dst)\n new_paths.append(dst)\n type2paths[noise_type] = new_paths\n\n type2paths = {t: tf.constant(paths, tf.string) for t, paths in type2paths.items()}\n\n def _update_element_meta(new_id, mixed_signal, x):\n return dict(x, id=new_id, signal=mixed_signal)\n\n def _add_random_noise_and_flatten(x):\n \"\"\"\n Using snr_list, choose len(snr_list) noise signals randomly and create new signal samples by mixing the chosen noise signals with x[\"signal\"] using random SNR dB levels.\n \"\"\"\n # Random noise path indexes and random snr levels\n rand_noise = [\n (noise_type,\n tf.random.uniform([], 0, tf.size(type2paths[noise_type]), tf.int32),\n tf.random.uniform([], snr_low, snr_high, tf.float32))\n for noise_type, snr_low, snr_high in snr_list]\n # Select random noise signals by drawn indexes and read contents from files\n rand_noise = [\n (audio_features.read_wav(type2paths[noise_type][rand_index]), snr)\n for noise_type, rand_index, snr in rand_noise]\n\n # Assert sample rates\n # TODO maybe add inline resampling of noise signals so they match the speech sr\n for (noise, sample_rate), snr in rand_noise:\n tf.debugging.assert_equal(sample_rate, x[\"sample_rate\"], message=\"Invalid noise signals are being used, all noise signals must have same sample rate as speech signals that are being augmented\")\n\n # Fix noise signal length to match x[\"signal\"] by repeating the noise signal if it is too short and then slicing it\n rand_noise = [\n # How many multiples of `noise` fits in x[\"signal\"]\n (tf.cast(tf.size(x[\"signal\"]) / tf.size(noise), tf.int32), noise, snr)\n for (noise, _), snr in rand_noise]\n rand_noise = [\n # Repeat noise and slice\n (tf.tile(noise, [1 + noise_length_ratio])[:tf.size(x[\"signal\"])], snr)\n for noise_length_ratio, noise, snr in rand_noise]\n\n # Mix x[\"signal\"] and chosen noise signals\n mixed_signals = [audio_features.snr_mixer(x[\"signal\"], noise, snr)[2] for noise, snr in rand_noise]\n # Create new utterance ids that contain the mixed noise type and SNR level\n new_ids = [\n tf.strings.join((\n \"augmented\",\n x[\"id\"],\n noise_type,\n tf.strings.join((\"snr\", tf.strings.as_string(snr, precision=2)))),\n separator=\"-\")\n for (noise_type, _, _), (_, snr) in zip(snr_list, rand_noise)]\n\n # Create new elements from the mixed signals and return as dataset\n return (tf.data.Dataset\n .zip((tf.data.Dataset.from_tensor_slices(new_ids),\n tf.data.Dataset.from_tensor_slices(mixed_signals),\n tf.data.Dataset.from_tensors(x).repeat(len(mixed_signals))))\n .map(_update_element_meta))\n\n return ds.interleave(\n _add_random_noise_and_flatten,\n block_length=len(snr_list),\n num_parallel_calls=TF_AUTOTUNE)",
"def add_reads(self, new_reads): \n if self.sampling:\n self.convert_to_list()\n self.reads.extend(new_reads)",
"def proc_dataset_v2(write=False):\n \n path = load_config()\n M = pd.read_csv(path['metadata_file'])\n T = pd.read_csv(path['rna_file'])\n mCH = pd.read_csv(path['mCH_file'])\n CH = pd.read_csv(path['CH_file'])\n\n def format_df(df):\n \"\"\"The inputs are genes x cells. Transpose data and rename columns\"\"\"\n df = df.transpose()\n df.rename(columns=df.iloc[0], inplace=True)\n df.drop('gene', inplace=True)\n df.index.rename('sample_id', inplace=True)\n return df\n\n T = format_df(T)\n mCH = format_df(mCH)\n CH = format_df(CH)\n\n #Update metadata\n M = pd.read_csv(path['metadata_file'])\n M.rename(columns={'Unnamed: 0': 'sample_id'}, inplace=True)\n M.set_index(keys='sample_id', drop=True, inplace=True)\n\n #Sort cells by metadata\n sorted_index = M.sort_values(by='SubClusterAnno').index\n M = M.loc[sorted_index]\n T = T.loc[sorted_index]\n mCH = mCH.loc[sorted_index]\n CH = CH.loc[sorted_index]\n\n assert np.array_equal(CH.columns, mCH.columns), \"Genes are not in the same order\"\n assert np.array_equal(T.columns, mCH.columns), \"Genes are not in the same order\"\n assert M.index.equals(T.index), \"Cells are not in the same order\"\n assert M.index.equals(CH.index), \"Cells are not in the same order\"\n assert M.index.equals(mCH.index), \"Cells are not in the same order\"\n\n # CPM-normalize counts\n X = T.values.astype(np.float32)\n X = (X/np.sum(X, axis=1, keepdims=True))*1e6\n X = np.log1p(X)\n T = pd.DataFrame(data=X, index=T.index, columns=T.columns)\n print('Completed T processing')\n\n # For methylation data\n X = CH.values.astype(float)\n Y = mCH.values.astype(float)\n Z = np.divide(Y,X+1e-10)\n E = pd.DataFrame(data=Z, index=mCH.index, columns=mCH.columns)\n print('Completed E processing')\n\n # select genes based on variance in log normalized CPM values in T\n def calc_highvar_genes(df):\n vars = np.var(df.values, axis=0)\n order_vars = np.argsort(-vars) # descending order\n sorted_highvar_genes = df.columns.values[order_vars]\n return sorted_highvar_genes\n\n data = {'sorted_highvar_T_genes': calc_highvar_genes(T),\n 'sorted_highvar_E_genes': calc_highvar_genes(E)}\n print('Completed finding high variance features')\n\n if write:\n feather.write_dataframe(T, path['data_dir'] / 'T_dat_v2.feather')\n feather.write_dataframe(E, path['data_dir'] / 'E_dat_v2.feather')\n feather.write_dataframe(M, path['data_dir'] / 'Meta_v2.feather')\n sio.savemat(path['data_dir'] / 'highvar_genes_v2.mat', data)\n return T, E, M, data",
"def constella(cur_plms, pc_starscape, group_iter, outfile_prefix):\n # Copy dataframe to avoid modifying the input dataframe\n cur_plms_copy = cur_plms.copy(deep=True)\n\n sanity_check_pos = 2 # Needs to point at days in image identifier!\n\n singleton_no = pc_starscape.shape[0]\n\n if params.debug is not None:\n print(f'{singleton_no} plms to group')\n\n plm_links = linkage(pc_starscape.loc[:, pc_starscape.columns[2:len(pc_starscape.columns)]].values, 'ward')\n\n # For n-1 to 2 leaves on the current hierarchical cluster dendrogram...\n for c in np.arange(singleton_no - 1, 2, -1):\n # Extract current number of clusters for the agglomeration step\n cutree = cut_tree(plm_links, n_clusters=c)\n # Generate a list of all current clusters identified\n group_list = np.unique(cutree)\n\n # For the current cluster being queried...\n for g in group_list:\n # Create list of current clusters row indices in pandas dataframe\n cur_index = [i for i, x in enumerate(cutree == g) if x]\n # Create list of current clusters present group identity assignments\n cur_index_id = np.array(cur_plms_copy.iloc[cur_index, 0])\n # Are any of the plms in the current cluster unnamed, how many?\n empty_count = np.count_nonzero(cur_index_id == None)\n empty_index = [i for (i, v) in zip(cur_index, cur_plms_copy.iloc[cur_index, 0].values == None) if v]\n # Are any of the plms in the current cluster already assigned an identity, what are those identities?\n unique_ids = np.unique(cur_index_id[np.array(cur_index_id) != None])\n\n # If cluster is two unnamed plms exactly, assign this group their own identity as a pair\n if empty_count == 2:\n pair_names = cur_plms_copy.iloc[empty_index, 1].values\n # Sanity check! Pairs must be on different days\n if pair_names[0].split('_')[sanity_check_pos] != pair_names[1].split('_')[sanity_check_pos]:\n cur_plms_copy.iloc[empty_index, 0] = group_iter\n group_iter = group_iter + 1\n else:\n cur_plms_copy.iloc[empty_index[0], 0] = group_iter\n cur_plms_copy.iloc[empty_index[1], 0] = group_iter + 1\n group_iter = group_iter + 2\n\n # For the identities that already exist...\n for uid in unique_ids:\n # If only one plm assigned a name in current cluster and a second unnamed plm exists\n # transfer ID over to create a pair\n if np.count_nonzero(np.array(cur_index_id) == uid) < 2 and empty_count == 1:\n # Store boolean positions for plms with IDs matching current id out of current cluster\n match_ids = [i for i, x in enumerate(cur_plms_copy.iloc[cur_index, 0].values == uid) if x]\n # Store boolean positions for plms which are unnamed out of current cluster\n null_ids = [i for i, x in enumerate(cur_plms_copy.iloc[cur_index, 0].values == None) if x]\n # If exactly 1 matching ID and 1 null ID (i.e. 2 plms total)\n # continue to pass ID name to the unnamed plm\n if len(match_ids) + len(null_ids) == 2:\n # Sanity check! Pairs must be on different days\n pair_names = cur_plms_copy.iloc[[cur_index[i] for i in match_ids + null_ids], 1].values\n if pair_names[0].split('_')[sanity_check_pos] != pair_names[1].split('_')[sanity_check_pos]:\n # Transfer identities to the unnamed plm\n cur_plms_copy.iloc[[cur_index[i] for i in null_ids], 0] = uid\n\n # Now that all groups that can be linked are formed, name rogues...\n rogues = [i for i, x in enumerate(cur_plms_copy.loc[:, 'group'].values == None) if x]\n for rogue in rogues:\n cur_plms_copy.iloc[[rogue], 0] = group_iter\n group_iter = group_iter + 1\n\n grpnames = cur_plms_copy.loc[:, ['group']].values\n plmnames = cur_plms_copy.loc[:, ['plmname']].values\n\n labelnames = []\n\n for li in range(0, len(plmnames)):\n labelnames.append(''.join(plmnames[li] + ' (' + str(int(grpnames[li])) + ')'))\n\n if params.debug is not None:\n plt.figure()\n plt.title('')\n plt.xlabel('')\n plt.ylabel('')\n dendrogram(plm_links, color_threshold=100, orientation=\"left\", leaf_font_size=10, labels=np.array(labelnames))\n plt.tight_layout()\n\n if params.debug == \"print\":\n plt.savefig(outfile_prefix + '_plmHCA.png')\n plt.close()\n elif params.debug == \"plot\":\n plt.show()\n\n return cur_plms_copy, group_iter",
"def mapRev2Cluster(self):\n\n # For each condition, operating on the side effect matching file to reduce down into\n # the more general categories\n clusterMapping = pd.read_csv('ClusteredSideEffects.csv', sep='$', index_col=0)\n for condition in self.conditions:\n print(\"I'm working on {:s}\".format(condition))\n files = glob.glob('ReviewsMatched2SideEffects/{:s}*csv'.format(condition))\n files = np.sort(files)\n\n for i,f in enumerate(files):\n df = pd.read_csv(f, sep='$', index_col=0)\n\n for cluster in np.unique(clusterMapping['Cluster']):\n # Finding the relevant SEs for the cluster\n SEs = clusterMapping[clusterMapping['Cluster'].eq(cluster)]['Side effect']\n\n # Summing across all those SEs in the dataframe and creating a new column\n match = [SE for SE in SEs if SE in df.columns]\n df[cluster] = (df[match].sum(axis=1) > 0)\n \n if not match:\n df[cluster] = [0]*len(df)\n \n # Stacking to allow for the depression split\n if i == 0:\n master_df = df.copy()\n else:\n master_df = master_df.append(df, ignore_index=0, sort=False)\n\n\n # Dropping all columns not in clusters\n clusters = list(np.unique(clusterMapping['Cluster']))\n keepers = ['Medication','Positive polarity','Negative polarity','Medication mentions','Effectiveness']\n keepers += clusters\n master_df = master_df[keepers]\n \n # Writing the stack to a file to load on to AWS\n master_df.to_csv('FinalProcessedReviews/{:s}_processed.csv'.format(condition), sep='$')\n print(\"I've saved the clustered file\\n\")",
"def mergeChainedAlignedSegments(chainedAlignedSegments, refSequence, readSequence):\n cAR = pysam.AlignedSegment()\n aR = chainedAlignedSegments[0]\n cAR.query_name = aR.query_name\n \n #Parameters we don't and therefore set properly\n #cAR.flag = aR.flag\n #cAR.mapq = aR.mapq\n #cAR.mrnm = 0\n #cAR.mpos=0\n #cAR.isize=0\n #cAR.qual = \"<\" * len(readSequence)\n #cAR.tags = aR.tags \n cAR.next_reference_id = -1\n cAR.reference_start = aR.reference_start #Reference start\n cAR.is_reverse = aR.is_reverse\n cAR.query_sequence = reverseComplement(readSequence) if cAR.is_reverse else readSequence\n cAR.reference_id = aR.reference_id\n cigarList = []\n pPos = aR.reference_start\n #Iterate from the other end of the sequence if reversed\n pQPos = -(len(readSequence)-1) if cAR.is_reverse else 0 \n \n for aR in chainedAlignedSegments:\n assert cAR.is_reverse == aR.is_reverse\n #Add a deletion representing the preceding unaligned reference positions\n assert aR.reference_start >= pPos\n if aR.reference_start > pPos:\n cigarList.append((2, aR.reference_start - pPos))\n pPos = aR.reference_start \n \n #Add an insertion representing the preceding unaligned read positions\n #make it a soft clip if it is the first chained alignment\n qPos = getFirstNonClippedPositionInRead(aR, readSequence)\n assert qPos >= pQPos\n if qPos > pQPos:\n cigarList.append((4 if aR == chainedAlignedSegments[0] else 1, qPos - pQPos)) \n pQPos = qPos\n \n #Add the operations of the cigar, filtering hard and soft clipping\n for op, length in aR.cigar:\n assert op in (0, 1, 2, 4, 5)\n if op in (0, 1, 2):\n cigarList.append((op, length))\n if op in (0, 2): #Is match or deletion\n pPos += length\n if op in (0, 1): #Is match or insertion\n pQPos += length\n \n assert pPos <= len(refSequence)\n \n #Set reference end coordinate (which is exclusive)\n #cAR.reference_end = pPos #We don't do this because it is set by cigar string\n \n #Now add any trailing, necessary soft clipping\n if cAR.is_reverse:\n assert pQPos <= 1\n if pQPos < 1:\n cigarList.append((4, -pQPos + 1))\n else:\n assert pQPos <= len(readSequence)\n if pQPos < len(readSequence):\n cigarList.append((4, len(readSequence) - pQPos))\n \n cAR.cigar = tuple(cigarList)\n \n #Check ops\n for op, length in cAR.cigar: #We should have no hard clipped ops\n assert op in (0, 1, 2, 4)\n \n #Reference sequence check coordinates\n assert sum([ length for op, length in cigarList if op in (0, 2)]) == cAR.reference_end - cAR.reference_start\n assert cAR.reference_start >= 0 and cAR.reference_start < len(refSequence)\n assert cAR.reference_end >= 0 and cAR.reference_end <= len(refSequence)\n \n #Read sequence check coordinates\n assert cAR.query_alignment_start >= 0 and cAR.query_alignment_start < len(readSequence)\n assert cAR.query_alignment_end >= 0 and cAR.query_alignment_end <= len(readSequence)\n assert cAR.query_alignment_start + sum([ length for op, length in cigarList if op in (0, 1)]) == cAR.query_alignment_end\n \n return cAR",
"def connection(self, sampleseq, num):\n self.Adjmatrix = np.zeros((self.nodenum, self.nodenum), dtype = int)\n \n for i in range(self.supplynum):\n minindex = np.array(sf.minimumk(self.Dismatrix[self.supplyseries[i], self.trandemandseries], sampleseq[self.supplyseries[i]]))\n self.Adjmatrix[self.supplyseries[i], self.trandemandseries[minindex]] = 1\n# self.Adjmatrix[minindex, self.supplyseries[i]] = 1\n \n for i in range(self.trannum):\n if(np.sum(self.Adjmatrix[self.supplyseries, self.transeries[i]]) == 0):\n minindex = np.array(sf.minimumk(self.Dismatrix[self.supplyseries, self.transeries[i]], num))\n self.Adjmatrix[minindex, self.transeries[i]] = 1\n# self.Adjmatrix[self.transeries[i], minindex] = 1\n \n \n# for i in range(self.supplynum):\n# minindex = np.array(sf.minimumk(self.Dismatrix[self.supplyseries[i], self.supplyseries], num))\n# self.Adjmatrix[self.supplyseries[i], minindex] = 1\n# self.Adjmatrix[minindex, self.supplyseries[i]] = 1\n \n# for i in range(self.trannum):\n# if(np.sum(self.Adjmatrix[self.supplyseries, self.transeries[i]]) != 0):\n# continue\n# minindex = np.array(sf.minimumk(self.Dismatrix[self.supplyseries, self.transeries[i]], num))\n# self.Adjmatrix[minindex, self.transeries[i]] = 1\n## self.Adjmatrix[self.transeries[i], minindex] = 1\n# \n for i in range(self.trannum):\n minindex = np.array(sf.minimumk(self.Dismatrix[self.transeries[i], self.demandseries], min(sampleseq[self.transeries[i]], self.demandnum))) + self.supplynum + self.trannum\n self.Adjmatrix[self.transeries[i], minindex] = 1\n# self.Adjmatrix[minindex, self.transeries[i]] = 1\n \n# for i in range(self.demandnum):\n# if(np.sum(self.Adjmatrix[self.transeries, self.demandseries[i]]) == 0):\n# minindex = np.array(sf.minimumk(self.Dismatrix[self.transeries, self.demandseries[i]], 1)) + self.supplynum\n# self.Adjmatrix[minindex, self.demandseries[i]] = 1\n \n# for i in range(self.trannum):\n# minindex = np.array(sf.minimumk(self.Dismatrix[self.transeries[i], self.transeries], num)) + self.supplynum\n# self.Adjmatrix[self.transeries[i], minindex] = 1\n \n for i in range(self.demandnum):\n if(np.sum(self.Adjmatrix[self.transeries, self.demandseries[i]]) == 0):\n minindex = np.array(sf.minimumk(self.Dismatrix[self.transeries, self.demandseries[i]], num)) + self.supplynum\n self.Adjmatrix[minindex, self.demandseries[i]] = 1\n# self.Adjmatrix[self.demandseries[i], minindex] = 1\n \n for i in range(self.demandnum):\n minindex = np.array(sf.minimumk(self.Dismatrix[self.demandseries[i], self.demandseries], min(sampleseq[self.demandseries[i]] + 1, self.demandnum))) + self.supplynum + self.trannum\n minindex = minindex[1:-1]\n for j in range(len(minindex)):\n if(self.Adjmatrix[self.demandseries[i], minindex[j]] == 1 or self.Adjmatrix[minindex[j], self.demandseries[i]] == 1):\n continue\n self.Adjmatrix[self.demandseries[i], minindex[j]] = 1",
"def process_barcode_paired_stitched(read_data,\r\n output_bc_fastq,\r\n output_fastq,\r\n bc1_len=6,\r\n bc2_len=6,\r\n rev_comp_bc1=False,\r\n rev_comp_bc2=False,\r\n attempt_read_orientation=False,\r\n forward_primers=None,\r\n reverse_primers=None,\r\n output_bc_not_oriented=None,\r\n fastq_out_not_oriented=None,\r\n switch_bc_order=False):\r\n\r\n header_index = 0\r\n sequence_index = 1\r\n quality_index = 2\r\n\r\n read_seq = read_data[sequence_index]\r\n read_qual = read_data[quality_index]\r\n\r\n found_primer_match = False\r\n # Break from orientation search as soon as a match is found\r\n if attempt_read_orientation:\r\n for curr_primer in forward_primers:\r\n if curr_primer.search(read_data[sequence_index]):\r\n found_primer_match = True\r\n break\r\n if not found_primer_match:\r\n for curr_primer in reverse_primers:\r\n if curr_primer.search(read_data[sequence_index]):\r\n read_seq = str(DNA(read_seq).rc())\r\n read_qual = read_qual[::-1]\r\n found_primer_match = True\r\n break\r\n\r\n if not found_primer_match and attempt_read_orientation:\r\n output_bc = output_bc_not_oriented\r\n output_read = fastq_out_not_oriented\r\n else:\r\n output_bc = output_bc_fastq\r\n output_read = output_fastq\r\n\r\n bc_read1 = read_seq[0:bc1_len]\r\n bc_read2 = read_seq[-bc2_len:]\r\n bc_qual1 = read_qual[0:bc1_len]\r\n bc_qual2 = read_qual[-bc2_len:]\r\n\r\n if rev_comp_bc1:\r\n bc_read1 = str(DNA(bc_read1).rc())\r\n bc_qual1 = bc_qual1[::-1]\r\n if rev_comp_bc2:\r\n bc_read2 = str(DNA(bc_read2).rc())\r\n bc_qual2 = bc_qual2[::-1]\r\n\r\n if switch_bc_order:\r\n bc_read1, bc_read2 = bc_read2, bc_read1\r\n bc_qual1, bc_qual2 = bc_qual2, bc_qual1\r\n\r\n bc_lines = format_fastq_record(read_data[header_index],\r\n bc_read1 + bc_read2,\r\n np.hstack([bc_qual1, bc_qual2]))\r\n output_bc.write(bc_lines)\r\n seq_lines = format_fastq_record(read_data[header_index],\r\n read_seq[bc1_len:-bc2_len], read_qual[bc1_len:-bc2_len])\r\n output_read.write(seq_lines)\r\n\r\n return",
"def connect_fluct(self):\n \n if self.do_run:\n \n for m in self.flucts:\n del m \n del self.flucts\n \n for m in self.noises:\n del m \n del self.noises\n \n self.flucts = []\n self.noises = []\n \n for n in range(self.n_celltypes):\n \n for i, gid in enumerate(self.gidlist[n]): # for every cell in the gidlist \n \n #h.mcell_ran4_init(gid)\n \n noiseRandObj = h.Random() # provides NOISE with random stream\n self.noises.append(noiseRandObj) # has to be set here not inside the nmodl function!! \n \n # print str(gid) + \": \" + str(noiseRandObj.normal(0,1))\n \n fluct = h.Ifluct2(self.cells[n][i].soma(0.5))\n fluct.m = self.fluct_m/nA # [nA]\n fluct.s = self.fluct_s[n]/nA # [nA]\n fluct.tau = self.fluct_tau/ms # [ms]\n self.flucts.append(fluct) # add to list \n self.flucts[-1].noiseFromRandom(self.noises[-1]) # connect random generator!\n \n self.noises[-1].MCellRan4(1, gid+1) # set lowindex to gid+1, set highindex to > 0 \n self.noises[-1].normal(0,1)",
"def mutate_random(DNA,AminoAcid,distance,pdic,rev,header,Random,outputpath):\r\n ##debug vals \r\n start = [] # list of start positions of mutations ( start means first mutation in balanced case)\r\n both = [] # start and end position\r\n fobj2= open(outputpath+header+\"_CompleteLog.txt\",\"a\")\r\n fobj2.write(\"BalancedMutation\"+\"\\t\"+\"NewAA\" + \"\\t\" + \"OldAA\"+\"\\t\"+\"NewAAPos\"+\"\\t\"+\"OldAAPos\" +\"\\t\"+ \"NewDNA\"+\"\\t\"+ \"OldDNA\"+ \"\\t\"+\"NewDNAPos\"+\"\\t\"+\"OldDNAPos\"+\"\\n\")\r\n fobj2.close()\r\n \r\n \r\n # generate start positions for mutation (the samplespace)\r\n samplespace = []\r\n for i in range (2,len(AminoAcid),distance/3):\r\n samplespace.append(i)\r\n \r\n \r\n ##random_modification\r\n if (Random ==1):\r\n r.shuffle(samplespace)\r\n else:\r\n pass\r\n \r\n dna_list = list(DNA)\r\n AminoAcid_list = list(AminoAcid)\r\n \r\n '''the lookup dictionary for the aa triplets '''\r\n lookup_dic = INI.createdic(AminoAcid)\r\n\r\n #gotit indicator if a possibility was found to revert the initial changes (start of mutation)\r\n gotit=False\r\n # stat variables\r\n succ_counter = 0\r\n fail_counter = 0 \r\n skip = 0\r\n \r\n ''' Main loop over the AminoAcid'''\r\n for i in samplespace:\r\n ''' no triplet left --> break '''\r\n if(i+2 >len(AminoAcid)):\r\n print(\"\\t(finished...exceeded length of AA)\")\r\n continue\r\n \r\n ''' AA which is going to be mutated'''\r\n AA = AminoAcid_list[i]\r\n \r\n '''index for dna : i*3 --> AminoAcid --> DNA\r\n #not i*3+3 because i starts at AA 2 since we need a right and left neighbor'''\r\n iprime = i*3\r\n \r\n '''AA and corresponding DNA triplet for the middle AA '''\r\n AA_triplet= AminoAcid_list[i-1]+AminoAcid_list[i]+AminoAcid_list[i+1]\r\n DNA_triplet = DNA[iprime:iprime+3]\r\n\r\n # get temporary list of all mutations. Iterate over it to find best possible substitution\r\n mutationsliste,aaliste = getMutation(AA,DNA_triplet)\r\n \r\n \r\n # isvalidposition returns 1 if the position isforbidden, else 0\r\n val = isvalidposition(pdic, iprime, distance)\r\n if (val ==1):\r\n skip+=1\r\n fobj2= open(outputpath+header+\"_CompleteLog.txt\",\"a\")\r\n fobj2.write(str(0)+\"\\t\"+new_AA_triplet + \"\\t\" + \"' '\"+\"\\t\"+str(i)+\"\\t\"+\"' '\" +\"\\t\"+ new_triplet+\"\\t\"+ \"' '\"+ \"\\t\"+str(iprime+position)+\"\\t\"+\"'(skipped)'\"+\"\\n\")\r\n fobj2.close()\r\n continue\r\n \r\n else:\r\n pass\r\n \r\n\r\n for q,item in enumerate(mutationsliste):\r\n \r\n if gotit==True:\r\n break\r\n else:\r\n pass\r\n \r\n ''' old and new variables for before/after the mutation '''\r\n new_triplet = mutationsliste[q]\r\n new_AA = aaliste[q]\r\n new_N,old_N,position = getdifference(DNA_triplet,new_triplet)\r\n new_AA_triplet = AA_triplet[0]+new_AA+AA_triplet[2]\r\n tempdic = pdic\r\n tempdic[iprime+position]=\"M\"\r\n \r\n if (new_AA_triplet in lookup_dic):\r\n '''templist--> contains all starting positions of the \"new_AA_triplet\" which we want to substitute back '''\r\n templist = lookup_dic[new_AA_triplet]\r\n \r\n \r\n # add potential mutation to dictionary\r\n tempposition = [iprime+position,\"M\"]\r\n for l in range(0,len(templist)):\r\n posi = templist[l]\r\n # i*3 --> protein nach DNA, +3 betrachten IMMER mittlere AA\r\n ''' suitable dna position found? '''\r\n if (new_triplet == dna_list[posi*3+3]+dna_list[posi*3+3+1]+dna_list[posi*3+3+2]):\r\n val = isvalidposition(tempdic, posi*3+3+position, distance)\r\n \r\n if (val ==1):\r\n skip+=1\r\n continue\r\n else:\r\n pass\r\n \r\n '''back substitution & do subs on 1st position'''\r\n pdic[posi*3+3+position]=\"R\"\r\n dna_list[posi*3+3+position]= old_N\r\n \r\n pdic[iprime+position]=\"M\"\r\n dna_list[iprime+position]= new_N\r\n \r\n AminoAcid_list[i]= new_AA\r\n AminoAcid_list[posi+1]= AA\r\n \r\n gotit = True\r\n succ_counter+=1\r\n #lookup_dic[new_AA_triplet] = [i for i in lookup_dic[new_AA_triplet] if i!=posi]\r\n lookup_dic[new_AA_triplet].remove(posi)\r\n \r\n '''writing the log file '''\r\n fobj= open(outputpath+header+\"_CompleteLog.txt\",\"a\")\r\n fobj.write(str(1)+\"\\t\"+AA_triplet + \"\\t\" + new_AA_triplet+\"\\t\"+str(i)+\"\\t\"+str(posi) +\"\\t\"+ DNA_triplet+\"\\t\"+ str(new_triplet)+ \"\\t\"+str(iprime+position)+\"\\t\"+str(posi*3+3+position)+\"\\n\")\r\n fobj.close()\r\n \r\n ## statistics\r\n start.append(iprime+position)\r\n both.extend([iprime+position,posi*3+3+position])\r\n break\r\n \r\n # no possible triplet positions for back substitution in lookup_dic \r\n else:\r\n continue\r\n \r\n # after loop \r\n if (gotit==False):\r\n fobj2= open(outputpath+header+\"_CompleteLog.txt\",\"a\")\r\n fobj2.write(str(0)+\"\\t\"+new_AA_triplet + \"\\t\" + \"' '\"+\"\\t\"+str(i)+\"\\t\"+\"' '\" +\"\\t\"+ new_triplet+\"\\t\"+ \"' '\"+ \"\\t\"+str(iprime+position)+\"\\t\"+\"'(tried)'\"+\"\\n\")\r\n fobj2.close()\r\n fail_counter+=1\r\n # reverse substitutions on? (=1) off (=0). If one dont change first mutation in the first place. Else: just change it.. \r\n if (rev==0):\r\n pdic[iprime+position]=\"M\"\r\n dna_list[iprime+position]= new_N\r\n AminoAcid_list[i]= new_AA\r\n start.append(iprime+position)\r\n both.extend([iprime+position]) \r\n elif (gotit==True):\r\n gotit = False\r\n \r\n # stats (INI.savepickle(pdic,header+\"_pdic_e\"))\r\n print(\"\\r\\n########Some stats:########\")\r\n print(\"DNA length:\\t\" + str(len(DNA)))\r\n print(\"max substitutions:\\t\" + str(len(DNA)/distance))\r\n print(\"#Balanced Mutations:\\t\" + str(succ_counter))\r\n \r\n \r\n return (\"\".join(dna_list))",
"def ispcr(args):\n p = OptionParser(ispcr.__doc__)\n p.add_option(\n \"-r\",\n dest=\"rclip\",\n default=1,\n type=\"int\",\n help=\"pair ID is derived from rstrip N chars\",\n )\n opts, args = p.parse_args(args)\n\n if len(args) != 1:\n sys.exit(not p.print_help())\n\n (fastafile,) = args\n ispcrfile = fastafile + \".isPcr\"\n fw = open(ispcrfile, \"w\")\n\n N = opts.rclip\n strip_name = lambda x: x[:-N] if N else str\n\n npairs = 0\n fastaiter = SeqIO.parse(fastafile, \"fasta\")\n for a, b in grouper(fastaiter, 2):\n\n aid, bid = [strip_name(x) for x in (a.id, b.id)]\n assert aid == bid, \"Name mismatch {0}\".format((aid, bid))\n\n print(\"\\t\".join((aid, str(a.seq), str(b.seq))), file=fw)\n npairs += 1\n\n fw.close()\n logging.debug(\"A total of {0} pairs written to `{1}`.\".format(npairs, ispcrfile))",
"def improve_best_RISCC_read(self, seq, new_position, N_errors=None, read_count=1, max_distance=MAX_POSITION_DISTANCE):\n # if there are more than one current reads, you're not using improve_best_RISCC_read consistently!\n if len(self.RISCC_genome_side_aligned_reads) > 1:\n raise MutantError(\"Don't try using the improve_best_RISCC_read when keeping more than one read!\")\n # if decided to replace, discard old genome-side read dict and make new one from just the current read data.\n if self._decide_if_replace_read(new_position, max_distance):\n self.RISCC_genome_side_aligned_reads, self.RISCC_genome_side_unaligned_reads = {}, {}\n self.add_RISCC_read(seq, new_position, N_errors, read_count)\n # TODO make this count unaligned/confirming/non-confirming reads, too, instead of keeping all these counts as functions that read the actual mutant data, which will be missing in this case? I did something like that in mutant_Carette.py.",
"def ssBIRCH(self, n_clusters):\n self.classifier = \"Spectral-Spatial-BIRCH\"\n print \"TODO\"",
"def run(self, max_clusters):\n sample_dist_matrix = self.matrix_dist()\n self.link.print_link()\n first_clus = self.clusters[0] # initialize first cluster to merge into\n second_clus = self.clusters[0] # initialize second cluster to merge\n max_samples_dist = max(sample_dist_matrix.values())\n # initialize minimun distance between two samples\n min_dist = max_samples_dist\n while len(self.clusters) > max_clusters: # clustering loop\n for clus in self.clusters: # iterate over every cluster\n for other_clus in self.clusters: # iterate over other clusters\n if clus.c_id > other_clus.c_id: # avoid duplicates and make sure to pass correct key to dictionary\n # compute distance between two clusters according to current link\n clus_dist = self.link.compute(clus, other_clus, sample_dist_matrix)\n if clus_dist < min_dist: # keep the minimum distance and its clusters\n min_dist = clus_dist\n first_clus = other_clus\n second_clus = clus\n self.clusters.remove(second_clus) # remove the cluster that's getting merged from clusters list\n first_clus.merge(second_clus) # merge the cluster with higher id into the other\n min_dist = max_samples_dist # restore high distance in order to start the search again\n\n sum_sil = self.compute_summery_silhouette(sample_dist_matrix)\n # print results\n for clus in self.clusters:\n clus.print_details(sum_sil[clus.c_id])\n print(f'Whole data: silhouette = {sum_sil[0]}, RI = {self.compute_rand_index()}')",
"def proc_dataset_v1(write=False):\n \n path = load_config()\n M = pd.read_csv(path['metadata_file'])\n T = pd.read_csv(path['rna_file'])\n mCH = pd.read_csv(path['mCH_file'])\n CH = pd.read_csv(path['CH_file'])\n\n def format_df(df):\n \"\"\"The inputs are genes x cells. Transpose data and rename columns\"\"\"\n df = df.transpose()\n df.rename(columns=df.iloc[0], inplace=True)\n df.drop('gene', inplace=True)\n df.index.rename('sample_id', inplace=True)\n return df\n\n T = format_df(T)\n mCH = format_df(mCH)\n CH = format_df(CH)\n\n #Update metadata\n M = pd.read_csv(path['metadata_file'])\n M.rename(columns={'Unnamed: 0': 'sample_id'}, inplace=True)\n M.set_index(keys='sample_id', drop=True, inplace=True)\n\n #Sort cells by metadata\n sorted_index = M.sort_values(by='SubClusterAnno').index\n M = M.loc[sorted_index]\n T = T.loc[sorted_index]\n mCH = mCH.loc[sorted_index]\n CH = CH.loc[sorted_index]\n\n assert np.array_equal(CH.columns, mCH.columns), \"Genes are not in the same order\"\n assert np.array_equal(T.columns, mCH.columns), \"Genes are not in the same order\"\n assert M.index.equals(T.index), \"Cells are not in the same order\"\n assert M.index.equals(CH.index), \"Cells are not in the same order\"\n assert M.index.equals(mCH.index), \"Cells are not in the same order\"\n\n # CPM-normalize counts\n X = T.values.astype(np.float32)\n X = (X/np.sum(X, axis=1, keepdims=True))*1e6\n X = np.log1p(X)\n T = pd.DataFrame(data=X, index=T.index, columns=T.columns)\n print('Completed T processing')\n\n # For methylation data\n X = CH.values.astype(float)\n Y = mCH.values.astype(float)\n Z = np.log1p(Y) - np.log1p(X)\n E = pd.DataFrame(data=Z, index=mCH.index, columns=mCH.columns)\n print('Completed E processing')\n\n # select genes based on variance in log normalized CPM values in T\n def calc_highvar_genes(df):\n vars = np.var(df.values, axis=0)\n order_vars = np.argsort(-vars) # descending order\n sorted_highvar_genes = df.columns.values[order_vars]\n return sorted_highvar_genes\n\n data = {'sorted_highvar_T_genes': calc_highvar_genes(T),\n 'sorted_highvar_E_genes': calc_highvar_genes(E)}\n print('Completed finding high variance features')\n\n if write:\n feather.write_dataframe(T, path['data_dir'] / 'T_dat.feather')\n feather.write_dataframe(E, path['data_dir'] / 'E_dat.feather')\n feather.write_dataframe(M, path['data_dir'] / 'Meta.feather')\n sio.savemat(path['data_dir'] / 'highvar_genes.mat', data)\n return T, E, M, data",
"def conn_ii(data, y, roi=None, times=None, mi_type='cc', gcrn=True, dt=1,\n verbose=None, **kw_links):\n set_log_level(verbose)\n\n # _________________________________ INPUTS ________________________________\n # inputs conversion\n kw_links.update({'directed': False, 'net': False})\n data, cfg = conn_io(\n data, y=y, times=times, roi=roi, agg_ch=False, win_sample=None,\n name='II', verbose=verbose, kw_links=kw_links\n )\n\n # extract variables\n x, attrs = data.data, cfg['attrs']\n y, roi, times = data['y'].data, data['roi'].data, data['times'].data\n x_s, x_t = cfg['x_s'], cfg['x_t']\n roi_p, n_pairs = cfg['roi_p'], len(x_s)\n\n # build the indices when using multi-variate mi\n assert dt >= 1\n idx = np.mgrid[0:len(times) - dt + 1, 0:dt].sum(0)\n times = times[idx].mean(1)\n _, n_roi, n_times = len(y), len(roi), len(times)\n\n # copnorm the data\n if gcrn:\n logger.info(\" Apply the Gaussian Copula Rank Normalization\")\n x = copnorm_nd(x, axis=0)\n if mi_type == 'cc':\n y = copnorm_nd(y, axis=0)\n\n # transpose the data to be (n_roi, n_times, 1, n_trials)\n x = np.transpose(x, (1, 2, 0))\n\n logger.info(f\"Compute II on {n_pairs} connectivity pairs\")\n\n # __________________________________ II __________________________________\n # optional argument of gcmi\n kw_mi = CONFIG['KW_GCMI'].copy()\n kw_mi['minorm'] = False\n\n # compute mi on each node of the network\n pbar = ProgressBar(range(n_roi + n_pairs),\n mesg='Estimating MI on each node I(X;S)')\n\n mi_node = np.zeros((n_roi, n_times), dtype=float)\n for n_r in range(n_roi):\n mi_node[n_r, :] = _conn_mi(x[n_r, idx, :], y, mi_type, **kw_mi)\n pbar.update_with_increment_value(1)\n\n pbar._tqdm.desc = 'Estimating total information I(X,Y;S)'\n infotot = np.zeros((n_pairs, n_times))\n for n_p, (s, t) in enumerate(zip(x_s, x_t)):\n _x_s, _x_t = x[s, ...], x[t, ...]\n\n # total information estimation\n x_st = np.concatenate((_x_s[idx, ...], _x_t[idx, ...]), axis=1)\n infotot[n_p, :] = _conn_mi(x_st, y, mi_type, **kw_mi)\n\n pbar.update_with_increment_value(1)\n\n # interaction information\n interinfo = infotot - mi_node[x_s, :] - mi_node[x_t, :]\n\n # _______________________________ OUTPUTS _________________________________\n attrs['mi_type'] = mi_type\n attrs['gcrn'] = gcrn\n attrs['dt'] = dt\n attrs['unit'] = 'Bits'\n interinfo = xr.DataArray(\n interinfo, dims=('roi', 'times'), coords=(roi_p, times), name='II',\n attrs=check_attrs(attrs)\n )\n\n return interinfo",
"def _align_paired_end_reads(self):\n read_aligner = ReadAligner(self._args.segemehl_bin, self._args.progress)\n if self._file_needs_to_be_created(self._pathcreator.index_path):\n read_aligner.build_index(\n self._pathcreator.ref_seq_path_list,\n self._pathcreator.index_path,\n )\n for read_path_pair, output_path, nomatch_path in zip(\n self._pathcreator.processed_read_path_pairs,\n self._pathcreator.read_alignment_bam_paths,\n self._pathcreator.unaligned_reads_paths,\n ):\n if not self._file_needs_to_be_created(output_path):\n continue\n read_aligner.run_alignment(\n read_path_pair,\n self._pathcreator.index_path,\n self._pathcreator.ref_seq_path_list,\n output_path,\n nomatch_path,\n int(self._args.processes),\n int(self._args.segemehl_accuracy),\n float(self._args.segemehl_evalue),\n self._args.split,\n paired_end=True,\n )",
"def cluster_mols(rd_mols, mols, target):\n id_mols = [x.pk for x in mols]\n out_data = run_lig_cluster(rd_mols, id_mols)\n for clust_type in out_data:\n for cluster in out_data[clust_type]:\n # look for molgroup with same coords - need to implement tolerance?\n mol_group = search_for_molgroup_by_coords(coords=[out_data[clust_type][cluster][\"centre_of_mass\"][0],\n out_data[clust_type][cluster][\"centre_of_mass\"][1],\n out_data[clust_type][cluster][\"centre_of_mass\"][2]],\n target=target.title)\n if not mol_group:\n mol_group = MolGroup()\n if clust_type != \"c_of_m\":\n mol_group.group_type = \"PC\"\n else:\n mol_group.group_type = \"MC\"\n mol_group.target_id = target\n mol_group.x_com = out_data[clust_type][cluster][\"centre_of_mass\"][0]\n mol_group.y_com = out_data[clust_type][cluster][\"centre_of_mass\"][1]\n mol_group.z_com = out_data[clust_type][cluster][\"centre_of_mass\"][2]\n mol_group.description = clust_type\n mol_group.save()\n for mol_id in out_data[clust_type][cluster][\"mol_ids\"]:\n if mol_id not in [a['id'] for a in mol_group.mol_id.values()]:\n this_mol = Molecule.objects.get(id=mol_id)\n mol_group.mol_id.add(this_mol)",
"def interconnect(self):\n assert len(self._readdata) == len(self._readdatavalid)\n ndevs = len(self._readdata)\n av = self\n\n @always_seq(self.clk.posedge, reset=self.reset)\n def rtl_or_combine():\n rddats, valids, waits = 0, 0, 0\n for ii in range(ndevs):\n rddats = rddats | av._readdata[ii]\n valids = valids | av._readdatavalid[ii]\n waits = waits | av._waitrequest[ii]\n\n av.readdata.next = rddats\n av.readdatavalid.next = valids\n av.waitrequest.next = waits\n\n return rtl_or_combine",
"def add_mutant(self, mutant, overwrite=False):\n if mutant.IB in self._mutants_by_IB.keys() and not overwrite:\n raise MutantError(\"Can't add mutant that would overwrite previous mutant with same IB! \"\n +\"Pass overwrite=True argument if you want to overwrite.\")\n self._mutants_by_IB[mutant.IB] = mutant",
"def add_clusterings(self, clustering):\n self.clustering.append(clustering)",
"def update_cds(self, line, cds):\n args = self.extract_cds_args(line)\n cds.add_indices(args['indices'])\n cds.add_phase(args['phase'])\n cds.add_identifier(args['identifier'])\n if 'score' in args:\n cds.add_score(args['score'])",
"def SecondaryComplex_to_Bid_Alternate():\n Parameter('RIP3_0' , 2.0e4) # molecules per cell\n Parameter('BidK_0' , 5.0e3) # molecules per cell\n \n alias_model_components()\n Initial(RIP3(bRHIM = None, state = 'unmod'), RIP3_0) # RIP3\n Initial(BidK(bf = None), BidK_0)\n # ==============================================================\n # Assembly of Complex II, Riptosome and Necrosome\n # --------------------------------------------------------------\n # FADD + TRADD[active] <-> FADD:TRADD[active]\n # FADD + RIP1 <-> FADD:RIP1\n # TRADD + RIP1 <-> TRADD:RIP1\n \n # CD95_to_secondary complex contains the rules for recruitment of proC8 to FADD.\n # (RIP1 or TRADD):FADD + proC8 <-> (RIP1 or TRADD):FADD:proC8\n # (RIP1 or TRADD):FADD:proC8 + proC8 <-> (RIP1 or TRADD):FADD:proC8:proC8\n # (RIP1 or TRADD):FADD:proC8 + flip_L <-> (RIP1 or TRADD):FADD:proC8:flip_L\n # (RIP1 or TRADD):FADD:proC8 + flip_S <-> (RIP1 or TRADD):proC8:flip_S\n \n # RIP1%ProC8%ProC8(in a complex) >> RIP1[trunc] + C8 + (remains of the complex)\n # RIP1%ProC8%cFlip[L](in a complex) >> RIP1[trunc] + remains of the complex)\n # RIP1%cFlip[S](in a complex) + RIP3 >> RIP1:RIP3(in a complex, i.e. necrosome)\n \n # RIP1 + C8 <-> RIP1:C8 >> RIP1[trunc] + C8\n # RIP3 + C8 <-> RIP3:C8 >> RIP3[trunc] + C8\n # Bid + C8 <-> Bid:C8 >> Bid[trunc] + C8\n \n # -------------Assembling Complex II-----------------\n Parameter('Ka_RIP1_FADD', 1e-7) # Biochemica et Biophysica Acta 1834(2013) 292-300\n Parameter('Kd_RIP1_FADD', 1e-8) # Biochemica et Biophysica Acta 1834(2013) 292-300\n alias_model_components()\n \n #Assembling TRADD dependent Complex II\n bind(FADD(bDD = None, bDED1 = None, bDED2 = None), 'bDD', TRADD(bDD1=None, state = 'active'), 'bDD1', [1e-6, 1e-3])\n bind(FADD(bDD = None, bDED1 = None, bDED2 = None), 'bDD', RIP1(bDD = None, state = 'deub'), 'bDD', [1e-8, 1e-1])\n \n #Recruiting RIP1 to secondary complex and TRADD dependent Complex II\n bind(FADD(bDD = None, bDED1 = ANY, bDED2 = ANY), 'bDD', RIP1(bDD=None, bRHIM = None, state = 'unmod'), 'bDD', [Ka_RIP1_FADD, Kd_RIP1_FADD])\n bind(FADD(bDD = None, bDED1 = ANY, bDED2 = ANY), 'bDD', RIP1(bDD=None, bRHIM = None, state = 'deub'), 'bDD', [Ka_RIP1_FADD, Kd_RIP1_FADD])\n \n #bind(TRADD(bDD2 = None, state = 'active'),'bDD2', RIP1(bDD = None, bRHIM = None, state = 'unmod'), 'bDD', [1e-6, 1e-1])\n bind(TRADD(bDD2 = None, state = 'active'),'bDD2', RIP1(bDD = None, bRHIM = None, state = 'deub'), 'bDD', [1e-6, 1e-1])\n # For simplicity, I am neglecting the binary intereaction that occurs between proC8 and RIP1.\n # Binding of proC8 and c-flip to FADD is accomplished in CD95_to_Secondary complex.\n \n #--------------RIP1 Truncation reactions-------------\n #---Truncation by C8---------------------------------\n RIP_CIIA_proC8 = RIP1(bDD=ANY, bRHIM = None, state = 'unmod')% TRADD(bDD2 = None, bDD1 = ANY, state = 'active') % FADD(bDD=ANY, bDED1=ANY, bDED2=ANY)%proC8(bDED=ANY)%proC8(bDED=ANY)\n RIP_CIIA_proC8_alt = RIP1(bDD=ANY, bRHIM = None, state = 'deub')% TRADD(bDD2 = None, bDD1 = ANY, state = 'active') % FADD(bDD=ANY, bDED1=ANY, bDED2=ANY)%proC8(bDED=ANY)%proC8(bDED=ANY)\n \n RIP_CIIB_proC8 = RIP1(bDD=ANY, bRHIM = None, state = 'unmod')% FADD(bDD=ANY, bDED1=ANY, bDED2=ANY)%proC8(bDED=ANY)%proC8(bDED=ANY)\n RIP_CIIB_proC8_alt = RIP1(bDD=ANY, bRHIM = None, state = 'deub')% FADD(bDD=ANY, bDED1=ANY, bDED2=ANY)%proC8(bDED=ANY)%proC8(bDED=ANY)\n \n CIIA = TRADD(bDD2 = None, bDD1 = ANY, state = 'active') % FADD(bDD=ANY, bDED1=None, bDED2=None)\n \n Rule('RIP1_truncation_CIIA', RIP_CIIA_proC8 >> CIIA + C8(bf = None, state = 'A') + RIP1(bDD=None, bRHIM = None, state = 'trunc'), Parameter('k11',1e-1))\n Rule('RIP1_truncation_CIIA_alt', RIP_CIIA_proC8_alt >> CIIA + C8(bf = None, state = 'A') + RIP1(bDD=None, bRHIM = None, state = 'trunc'), Parameter('k11a',1e-6))\n \n Rule('RIP1_truncation_CIIB', RIP_CIIB_proC8 >> FADD(bDD=None, bDED1=None, bDED2=None)+ C8(bf = None, state = 'A') + RIP1(bDD=None, bRHIM = None, state = 'trunc'), Parameter('k12', 1e-1))\n Rule('RIP1_truncation_CIIB_alt', RIP_CIIB_proC8_alt >> FADD(bDD=None, bDED1=None, bDED2=None)+ C8(bf = None, state = 'A') + RIP1(bDD=None, bRHIM = None, state = 'trunc'), Parameter('k12a', 1e-6))\n \n catalyze_state(C8(bf = None, state = 'A'), 'bf', RIP1(bDD=None), 'bRHIM', 'state', 'unmod', 'trunc', [1e-6, 1e-3, 1e-1])\n catalyze_state(C8(bf = None, state = 'A'), 'bf', RIP1(bDD=None), 'bRHIM', 'state', 'deub', 'trunc', [1e-6, 1e-3, 1e-1])\n \n #---Truncation by proC8:cFlip_L---------------------\n Riptosome_FADD = RIP1(bDD=1, bRHIM = None, state = 'unmod')%FADD(bDD=1, bDED1=ANY, bDED2=ANY)%proC8(bDED = ANY)%flip_L(bDED = ANY)\n Riptosome_FADD_alt = RIP1(bDD=1, bRHIM = None, state = 'deub')%FADD(bDD=1, bDED1=ANY, bDED2=ANY)%proC8(bDED = ANY)%flip_L(bDED = ANY)\n \n Riptosome_TRADD = RIP1(bDD=1, bRHIM = None, state = 'unmod')%TRADD(bDD1=ANY, bDD2=1)%FADD(bDD=ANY, bDED1=ANY, bDED2=ANY)%proC8(bDED = ANY)%flip_L(bDED = ANY)\n Riptosome_TRADD_alt = RIP1(bDD=1, bRHIM = None, state = 'deub')%TRADD(bDD1=ANY, bDD2=1)%FADD(bDD=ANY, bDED1=ANY, bDED2=ANY)%proC8(bDED = ANY)%flip_L(bDED = ANY)\n \n Rule('RIP1_truncation_FADD', Riptosome_FADD >> FADD(bDD=None, bDED1=ANY, bDED2=ANY)%proC8(bDED = ANY)%flip_L(bDED = ANY) + RIP1(bDD=None, bRHIM = None, state = 'trunc'), Parameter('k13', 1e-1))\n Rule('RIP1_truncation_FADD_alt', Riptosome_FADD_alt >> FADD(bDD=None, bDED1=ANY, bDED2=ANY)%proC8(bDED = ANY)%flip_L(bDED = ANY) + RIP1(bDD=None, bRHIM = None, state = 'trunc'), Parameter('k13a', 1e-1))\n Rule('RIP1_truncation_TRADD', Riptosome_TRADD >> FADD(bDD=None, bDED1=ANY, bDED2=ANY)%proC8(bDED = ANY)%flip_L(bDED = ANY) + RIP1(bDD=None, bRHIM = None, state = 'trunc'), Parameter('k14', 10))\n Rule('RIP1_truncation_TRADD_alt', Riptosome_TRADD_alt >> FADD(bDD=None, bDED1=ANY, bDED2=ANY)%proC8(bDED = ANY)%flip_L(bDED = ANY) + RIP1(bDD=None, bRHIM = None, state = 'trunc'), Parameter('k14a', 10))\n \n # -------------RIP3 Binding Interactions----------------\n Ripto1_Flip_S = FADD(bDD=ANY, bDED1=ANY, bDED2=ANY) % RIP1(bDD=ANY, bRHIM=None, state='unmod') % TRADD(bDD1=ANY, bDD2=ANY, state='active') % flip_S(bDED=ANY) % proC8(bDED=ANY)\n Ripto2_Flip_S = FADD(bDD=ANY, bDED1=ANY, bDED2=ANY) % RIP1(bDD=ANY, bRHIM=None, state='unmod') % flip_S(bDED=ANY) % proC8(bDED=ANY)\n Necrosome1 = FADD(bDD=ANY, bDED1=ANY, bDED2=ANY) % RIP1(bDD=ANY, bRHIM=6, state='unmod') % TRADD(bDD1=ANY, bDD2=ANY, state='active') % flip_S(bDED=ANY) % proC8(bDED=ANY) % RIP3(bRHIM= 6, state = 'unmod')\n Necrosome2 = FADD(bDD=ANY, bDED1=ANY, bDED2=ANY) % RIP1(bDD=ANY, bRHIM=5, state='unmod') % flip_S(bDED=ANY) % proC8(bDED=ANY) % RIP3(bRHIM= 5, state = 'unmod')\n \n Ripto1_Flip_S_alt = FADD(bDD=ANY, bDED1=ANY, bDED2=ANY) % RIP1(bDD=ANY, bRHIM=None, state='deub') % TRADD(bDD1=ANY, bDD2=ANY, state='active') % flip_S(bDED=ANY) % proC8(bDED=ANY)\n Ripto2_Flip_S_alt = FADD(bDD=ANY, bDED1=ANY, bDED2=ANY) % RIP1(bDD=ANY, bRHIM=None, state='deub') % flip_S(bDED=ANY) % proC8(bDED=ANY)\n Necrosome1_alt = FADD(bDD=ANY, bDED1=ANY, bDED2=ANY) % RIP1(bDD=ANY, bRHIM=6, state='deub') % TRADD(bDD1=ANY, bDD2=ANY, state='active') % flip_S(bDED=ANY) % proC8(bDED=ANY) % RIP3(bRHIM= 6, state = 'unmod')\n Necrosome2_alt = FADD(bDD=ANY, bDED1=ANY, bDED2=ANY) % RIP1(bDD=ANY, bRHIM=5, state='deub') % flip_S(bDED=ANY) % proC8(bDED=ANY) % RIP3(bRHIM= 5, state = 'unmod')\n \n Rule('RIP3_binding1', Ripto1_Flip_S + RIP3(bRHIM= None, state = 'unmod') <> Necrosome1, Parameter('k15', 1e-6), Parameter('k16', 1e-3))\n Rule('RIP3_binding2', Ripto2_Flip_S + RIP3(bRHIM= None, state = 'unmod') <> Necrosome2, Parameter('k17', 1e-6), Parameter('k18', 1e-3))\n Rule('RIP3_binding1_alt', Ripto1_Flip_S_alt + RIP3(bRHIM= None, state = 'unmod') <> Necrosome1_alt, Parameter('k15a', 1e-6), Parameter('k16a', 1e-3))\n Rule('RIP3_binding2_alt', Ripto2_Flip_S_alt + RIP3(bRHIM= None, state = 'unmod') <> Necrosome2_alt, Parameter('k17a', 1e-6), Parameter('k18a', 1e-3))\n \n #RIP3 Truncation\n catalyze_state(C8(bf = None, state = 'A'), 'bf', RIP3(), 'bRHIM', 'state', 'unmod', 'trunc', [1e-6, 1e-3, 1e-1])\n \n #-------------Bid Interactions--------------------------\n # Bid Phosphorylation and Truncation\n catalyze_state(BidK(), 'bf', Bid(), 'bf', 'state', 'U', 'po4', [1e-6, 1e-3, 1e-1])\n catalyze_state(C8(bf = None, state = 'A'), 'bf', Bid(), 'bf', 'state', 'U', 'T', [1.04e-5, 0.005, 0.1])\n \n # Bid-PO4 sequestering RIP1\n bind(RIP1(bDD = None, bRHIM = None, state = 'unmod'), 'bRHIM', Bid(bf = None, state = 'po4'), 'bf', [1e-6, 1e-3])\n bind(RIP1(bDD = None, bRHIM = None, state = 'deub'), 'bRHIM', Bid(bf = None, state = 'po4'), 'bf', [1e-6, 1e-3])",
"def read_mir(self, filepath, isource=None, irec=None, isb=None, corrchunk=None):\n # Use the mir_parser to read in metadata, which can be used to select data.\n mir_data = mir_parser.MirParser(filepath)\n\n # Select out data that we want to work with.\n if isource is None:\n isource = mir_data.in_read[\"isource\"][0]\n if irec is None:\n irec = mir_data.bl_read[\"irec\"][0]\n if isb is None:\n isb = mir_data.bl_read[\"isb\"][0]\n if corrchunk is None:\n corrchunk = mir_data.sp_read[\"corrchunk\"][0]\n\n mir_data.use_in = mir_data.in_read[\"isource\"] == isource\n mir_data.use_bl = np.logical_and(\n np.logical_and(\n mir_data.bl_read[\"isb\"] == isb, mir_data.bl_read[\"ipol\"] == 0\n ),\n mir_data.bl_read[\"irec\"] == irec,\n )\n mir_data.use_sp = mir_data.sp_read[\"corrchunk\"] == corrchunk\n\n # Load up the visibilities into the MirParser object. This will also update the\n # filters, and will make sure we're looking at the right metadata.\n mir_data._update_filter()\n if len(mir_data.in_data) == 0:\n raise IndexError(\"No valid records matching those selections!\")\n\n mir_data.load_data(load_vis=True, load_raw=True)\n\n # Create a simple array/list for broadcasting values stored on a\n # per-intergration basis in MIR into the (tasty) per-blt records in UVDATA.\n bl_in_maparr = [mir_data.inhid_dict[idx] for idx in mir_data.bl_data[\"inhid\"]]\n\n # Derive Nants_data from baselines.\n self.Nants_data = len(\n np.unique(\n np.concatenate((mir_data.bl_data[\"iant1\"], mir_data.bl_data[\"iant2\"]))\n )\n )\n\n self.Nants_telescope = 8\n self.Nbls = int(self.Nants_data * (self.Nants_data - 1) / 2)\n self.Nblts = len(mir_data.bl_data)\n self.Nfreqs = int(mir_data.sp_data[\"nch\"][0])\n self.Npols = 1 # todo: We will need to go back and expand this.\n self.Nspws = 1 # todo: We will need to go back and expand this.\n self.Ntimes = len(mir_data.in_data)\n self.ant_1_array = mir_data.bl_data[\"iant1\"] - 1\n self.ant_2_array = mir_data.bl_data[\"iant2\"] - 1\n self.antenna_names = [\n \"Ant 1\",\n \"Ant 2\",\n \"Ant 3\",\n \"Ant 4\",\n \"Ant 5\",\n \"Ant 6\",\n \"Ant 7\",\n \"Ant 8\",\n ]\n self.antenna_numbers = np.arange(8)\n\n # Prepare the XYZ coordinates of the antenna positions.\n antXYZ = np.zeros([self.Nants_telescope, 3])\n for idx in range(self.Nants_telescope):\n if (idx + 1) in mir_data.antpos_data[\"antenna\"]:\n antXYZ[idx] = mir_data.antpos_data[\"xyz_pos\"][\n mir_data.antpos_data[\"antenna\"] == (idx + 1)\n ]\n\n # Get the coordinates from the entry in telescope.py\n lat, lon, alt = get_telescope(\"SMA\")._telescope_location.lat_lon_alt()\n self.telescope_location_lat_lon_alt = (lat, lon, alt)\n # Calculate antenna postions in EFEF frame. Note that since both\n # coordinate systems are in relative units, no subtraction from\n # telescope geocentric position is required , i.e we are going from\n # relRotECEF -> relECEF\n self.antenna_positions = uvutils.ECEF_from_rotECEF(antXYZ, lon)\n self.baseline_array = self.antnums_to_baseline(\n self.ant_1_array, self.ant_2_array, attempt256=False\n )\n\n fsky = mir_data.sp_data[\"fsky\"][0] * 1e9 # GHz -> Hz\n fres = mir_data.sp_data[\"fres\"][0] * 1e6 # MHz -> Hz\n nch = mir_data.sp_data[\"nch\"][0]\n\n self.channel_width = fres\n # Need the half-channel offset below because of the weird way\n # in which MIR identifies the \"center\" of the band\n self.freq_array = fsky + fres * (np.arange(nch) - (nch / 2 - 0.5))\n\n # TODO: This will need to be fixed when spw > 1\n self.freq_array = np.reshape(self.freq_array, (1, -1))\n self.history = \"Raw Data\"\n self.instrument = \"SWARM\"\n\n # todo: This won't work when we have multiple spectral windows.\n self.integration_time = mir_data.sp_data[\"integ\"]\n\n # todo: Using MIR V3 convention, will need to be V2 compatible eventually.\n self.lst_array = (\n mir_data.in_data[\"lst\"][bl_in_maparr].astype(float) + (0.0 / 3600.0)\n ) * (np.pi / 12.0)\n\n # todo: We change between xx yy and rr ll, so we will need to update this.\n self.polarization_array = np.asarray([-5])\n\n self.spw_array = np.asarray([0])\n\n self.telescope_name = \"SMA\"\n time_array_mjd = mir_data.in_read[\"mjd\"][bl_in_maparr]\n self.time_array = time_array_mjd + 2400000.5\n\n # Need to flip the sign convention here on uvw, since we use a1-a2 versus the\n # standard a2-a1 that uvdata expects\n self.uvw_array = (-1.0) * np.transpose(\n np.vstack(\n (mir_data.bl_data[\"u\"], mir_data.bl_data[\"v\"], mir_data.bl_data[\"w\"])\n )\n )\n\n # todo: Raw data is in correlation coefficients, we may want to convert to Jy.\n self.vis_units = \"uncalib\"\n\n self._set_phased()\n\n sou_list = mir_data.codes_data[mir_data.codes_data[\"v_name\"] == b\"source\"]\n\n self.object_name = sou_list[sou_list[\"icode\"] == isource][\"code\"][0].decode(\n \"utf-8\"\n )\n\n self.phase_center_ra = mir_data.in_data[\"rar\"][0]\n self.phase_center_dec = mir_data.in_data[\"decr\"][0]\n self.phase_center_epoch = mir_data.in_data[\"epoch\"][0]\n\n self.phase_center_epoch = float(self.phase_center_epoch)\n self.antenna_diameters = np.zeros(self.Nants_telescope) + 6\n self.blt_order = (\"time\", \"baseline\")\n self.data_array = np.reshape(\n np.array(mir_data.vis_data),\n (self.Nblts, self.Nspws, self.Nfreqs, self.Npols),\n )\n # Don't need the data anymore, so drop it\n mir_data.unload_data()\n self.flag_array = np.zeros(self.data_array.shape, dtype=bool)\n self.nsample_array = np.ones(self.data_array.shape, dtype=np.float32)",
"def add_communites(self):\n\n query = '''\n MATCH (c1:)-[r:INTERACTS]->(c2:)\n RETURN c1.name, c2.name, r.weight AS weight\n '''\n ig = IGraph.TupleList(self.graph.run(query), weights=True)\n\n clusters = IGraph.community_walktrap(ig, weights=\"weight\").as_clustering()\n\n nodes = [{\"name\": node[\"name\"]} for node in ig.vs]\n for node in nodes:\n idx = ig.vs.find(name=node[\"name\"]).index\n node[\"community\"] = clusters.membership[idx]\n\n write_clusters_query = '''\n UNWIND {nodes} AS n\n MATCH (c:) WHERE c.name = n.name\n SET c.community = toInt(n.community)\n '''\n\n self.graph.run(write_clusters_query, nodes=nodes)",
"def insert_bicluster_info( self, db, db_file, run2id, row2id, col2id ):\n\t\t# Get all biclusters from cmonkey run\n\t\tconn = sqlite3.connect(db_file)\n\t \tc = conn.cursor()\n\t \tc.execute(\"SELECT max(iteration) FROM cluster_stats;\")\n\t \tlast_run = c.fetchone()[0] # i think there is an indexing problem in cMonkey python!! \n\t \tw = (last_run,)\n\t \tc.execute(\"SELECT cluster FROM cluster_stats WHERE iteration = ?;\",w)\n\t\tbiclusters = [self.assemble_bicluster_info_single( db, db_file, c, last_run, i[0], run2id, row2id, col2id ) for i in c.fetchall()]\n\t\tbicluster_info_collection = self.db.bicluster_info\n\n\t\t# Check whether documents are already present in the collection before insertion\n\t\tif bicluster_info_collection.count() > 0:\n\t\t\td_f = filter( None, [ self.check4existence( bicluster_info_collection, i, \"run_id\", i[\"run_id\"], \"cluster\", i[\"cluster\"] ) for i in biclusters ] )\n\t\telse:\n\t\t\td_f = biclusters\n\t\t\n\n\t\tprint \"%s new records to write\" % len( d_f )\n\n\t\tif len(d_f) > 0:\n\t\t\tbicluster_info_collection.insert( d_f )\n\n\t\treturn bicluster_info_collection"
] |
[
"0.5384356",
"0.50701267",
"0.50394577",
"0.49323264",
"0.48745298",
"0.4874417",
"0.4799774",
"0.47634873",
"0.47457853",
"0.47029266",
"0.46941206",
"0.46837968",
"0.46686867",
"0.46466538",
"0.4642227",
"0.46303773",
"0.46147153",
"0.46107095",
"0.46027693",
"0.46007156",
"0.45975694",
"0.4596363",
"0.45772687",
"0.45677117",
"0.45584443",
"0.45488274",
"0.45468804",
"0.45406237",
"0.45402387",
"0.4532312"
] |
0.6346291
|
0
|
Set self.gene_annotation_header and self.total_genes_in_genome based on inputs. Set to blank if all inputs are blank; otherwise to the single unique nonblank value on the list; if there are multiple distinct nonblank values, raise exception.
|
def _set_joint_genome_info(self, gene_annotation_header_values, total_genes_in_genome_values):
# Merge any pieces of global information that's not per-dataset
self.gene_annotation_header = merge_values_to_unique(gene_annotation_header_values, blank_value=[], convert_for_set=tuple,
value_name='gene_annotation_header', context='datasets in multi-dataset')
self.total_genes_in_genome = merge_values_to_unique(total_genes_in_genome_values, blank_value=0,
value_name='total_genes_in_genome', context='datasets in multi-dataset')
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def fill_empty_geomean_vals(product):\n\theaders = ['chemaxon', 'epi', 'test', 'sparc', 'geomean', 'measured']\n\tfor prop_data_list in product['data']:\n\t\tif len(prop_data_list) < len(headers):\n\t\t\tprop_data_list.append('')\n\treturn product",
"def coerce_empty_numeric_values(self):\n if \"numeric\" in self.annot_types:\n numeric_columns = self.file.xs(\n \"numeric\", axis=1, level=1, drop_level=False\n ).columns.tolist()\n self.file[numeric_columns].replace(\"\", np.nan, inplace=True)",
"def apply_check_annotation_list(row):\n if len(row) == 0:\n return(['blank'])\n else:\n return(row)",
"def mark_empty_annotations(df):\n\n def apply_check_annotation_list(row):\n \"\"\" apply function to test if len(annotations) = 0 \"\"\"\n if len(row) == 0:\n return(['blank'])\n else:\n return(row)\n\n df['annotation_list_with_empty'] = df['annotation_list'].apply(lambda row:apply_check_annotation_list(row))\n return(df)",
"def clear(self):\r\n self.firstname_value.set('')\r\n self.lastname_value.set('')\r\n self.id_number_value.set('')\r\n self.country_value.set('')",
"def clearData(self):\r\n self.title.setVal(\"\"),\r\n self.first.setVal(\"\"),\r\n self.middle.setVal(\"\"),\r\n self.last.setVal(\"\"),\r\n self.suffix.setVal(\"\"),\r\n self.phone.setVal(\"\"),\r\n self.ext.setVal(\"\"),\r\n self.email.setVal(\"\"),\r\n self.affiliation.setVal(\"\")\r\n self.fullName.setVal(\"\")",
"def _genotype_updated(self):\n if self.data.get(\"GT\", None) is None:\n self.gt_alleles = None\n self.called = None\n self.ploidy = None\n else:\n self.gt_alleles = []\n for allele in ALLELE_DELIM.split(str(self.data[\"GT\"])):\n if allele == \".\":\n self.gt_alleles.append(None)\n else:\n self.gt_alleles.append(int(allele))\n self.called = all([al is not None for al in self.gt_alleles])\n self.ploidy = len(self.gt_alleles)",
"def resize_invalid_genes(self):\n\n for i in range(self.chromosome_size):\n if self.genes[i] > 1:\n self.genes[i] = 1\n elif self.genes[i] < 0:\n self.genes[i] = 0",
"def resize_invalid_genes(self):\n\n for i in range(self.chromosome_size):\n if self.genes[i] > 1:\n self.genes[i] = 1\n elif self.genes[i] < 0:\n self.genes[i] = 0",
"def check_input_empty(user_inputs):\n\n is_empty = True\n index = 0\n for item in user_inputs:\n if item == \"\":\n user_inputs[index] = None\n else:\n is_empty = False\n index += 1\n\n return is_empty, user_inputs",
"def check_empty_fields_before_bounds(header,\r\n mapping_data,\r\n warnings):\r\n\r\n desc_field = \"Description\"\r\n correction = 1\r\n primer_field = \"LinkerPrimerSequence\"\r\n\r\n try:\r\n desc_field_ix = header.index(desc_field) + correction\r\n primer_field_ix = header.index(primer_field) + correction\r\n except ValueError:\r\n # Skip if Description field not present, already get header error\r\n return warnings\r\n\r\n for curr_row in range(len(mapping_data)):\r\n for curr_col in range(primer_field_ix, desc_field_ix):\r\n curr_field = mapping_data[curr_row][curr_col].replace('\\n', '')\r\n if not curr_field:\r\n warnings.append('Empty data field ' +\r\n '%s found\\t%d,%d' %\r\n (mapping_data[\r\n curr_row][curr_col].replace('\\n', ''),\r\n curr_row + correction, curr_col))\r\n\r\n return warnings",
"def _set_unique_and_null_vals(self):\n self.unique_vals = {}\n \n df_col = self.df[self.col]\n u_vals = pandas.unique( df_col[ df_col.notnull() ] )\n \n for val in u_vals:\n self.unique_vals[val] = np.where( df_col==val)[0]\n \n null_inds = np.where(self.df.isnull()[self.col]) [0]\n if null_inds.size:\n self.unique_vals['NULL__'] = null_inds",
"def _validate(self, queryset):\n values_distinct = queryset.values(\n *self._invoice_report_common_fields\n ).distinct()\n if values_distinct.count() != 1:\n raise ValidationError(self._get_non_unique_error(queryset))\n if not all(values_distinct[0].values()):\n raise ValidationError(\"None of {} can't be empty\".format(', '.join(\n self._invoice_report_common_fields\n )))",
"def clear(self):\n self.chromosome_list = []",
"def remove_empty_genes(self):\n to_remove = []\n for gene in self.genes:\n if not gene.mrnas:\n to_remove.append(gene)\n if to_remove:\n for gene in to_remove:\n self.genes.remove(gene)\n sys.stderr.write(\"Removed empty gene \" + gene.identifier + \"\\n\")\n self.removed_genes.extend(to_remove)\n return to_remove",
"def _check_mandatory(self):\n for subtoken in self.subtokens:\n if subtoken.mandatory != 0:\n self.mandatory = np.random.uniform()\n return\n self.mandatory = 0",
"def _populate_always_present_fields(self, field):\n defaults = [\n (\"label\", \"\"),\n (\"instructions\", \"\"),\n (\"placeholder\", \"\"),\n (\"defaultValue\", \"\"),\n (\"restrictions\", {}),\n (\"errorMessages\", {}),\n ]\n field.update({\n key: value\n for key, value in defaults if key not in field\n })",
"def setNone(self):\n self.setValue([])",
"def _none_subst(self, *args):\n\n # Imports\n import numpy as np\n\n # Initialize argument list return value, and as None not found\n arglist = [a for a in args]\n none_found = False\n\n # Check for None values\n none_vals = list(map(lambda e: isinstance(e, type(None)), arglist))\n\n # Error if more than one None; handle if exactly one; pass through if\n # none.\n if np.count_nonzero(none_vals) > 1:\n raise ValueError(\n \"Multiple 'None' values [indices {0}] not supported\"\n .format(tuple(np.nonzero(none_vals)[0])))\n elif np.count_nonzero(none_vals) == 1:\n # Must be no iterables that are not strings. Thus, an element-wise\n # test for iterability and an element-wise test for stringiness\n # must give matching arrays\n if not all(np.equal(list(map(np.iterable, arglist)),\n list(map(lambda e: isinstance(e, str), arglist)))):\n raise ValueError(\n \"'None' as parameter invalid with non-str iterables\")\n ## end if\n\n # Parameters okay; replace the None with the appropriate range()\n none_found = True\n none_loc = np.nonzero(none_vals)[0][0]\n arglist[none_loc] = \\\n range(self.num_geoms if none_loc == 0 else self.num_atoms)\n ## end if\n\n # Return the arguments list and the none-found value\n return arglist",
"def set_default_values_as_needed(self):\n if self.verbose:\n click.echo('Updating required default values')\n for field in ARGUMENTS_DEFAULT_VALUES:\n if self.__class__.__name__ in ARGUMENTS_DEFAULT_VALUES[field][1]:\n self.data[field] = ARGUMENTS_DEFAULT_VALUES[field][0]",
"def clear(self):\n self.counts = [0] * len(self.values)\n if HAS_NUMPY:\n self.counts = numpy.array(self.counts)",
"def setNull(self):\n self.components = [0 for i in range(len(self.components))]",
"def test_process_id_map_empty_data_fields(self):\r\n\r\n header, mapping_data, comments, errors, warnings =\\\r\n process_id_map(self.empty_fields_fp)\r\n\r\n expected_header = [\r\n 'SampleID',\r\n 'BarcodeSequence',\r\n 'LinkerPrimerSequence',\r\n 'Treatment',\r\n 'ReversePrimer',\r\n 'Description']\r\n expected_mapping_data = [['PC.354',\r\n 'AGCACGAGCCTA',\r\n 'YATGCTGCCTCCCGTAGGAGT',\r\n 'Control',\r\n 'ATGACCGATTRGACCAG',\r\n 'Control_mouse_I.D._354'],\r\n ['PC.355',\r\n 'AACTCGTCGATG',\r\n 'YATGCTGCCTCCCGTAGGAGT',\r\n 'Control',\r\n '',\r\n ''],\r\n ['PC.356',\r\n 'ACAGACCACTCA',\r\n 'YATGCTGCCTCCCGTAGGAGT',\r\n 'Control',\r\n 'ATGACCGATTRGACCAG',\r\n 'Control_mouse_I.D._356']]\r\n expected_comments = [\r\n 'Example mapping file for the QIIME analysis package. These 9 samples are from a study of the effects of exercise and diet on mouse cardiac physiology (Crawford, et al, PNAS, 2009).']\r\n expected_errors = ['Missing expected DNA sequence\\t2,4']\r\n expected_warnings = [\r\n 'Empty data field found\\t2,4',\r\n 'Empty data field found\\t2,5']\r\n\r\n self.assertEqual(header, expected_header)\r\n self.assertEqual(mapping_data, expected_mapping_data)\r\n self.assertEqual(comments, expected_comments)\r\n self.assertEqual(errors, expected_errors)\r\n self.assertEqual(warnings, expected_warnings)",
"def autosomes(self, also=None):\n is_auto = self.chromosome.str.match(r\"(chr)?\\d+$\", na=False)\n if not is_auto.any():\n # The autosomes, if any, are not named with plain integers\n return self\n if also is not None:\n if isinstance(also, pd.Series):\n is_auto |= also\n else:\n # The assumption is that `also` is a single chromosome name or an iterable thereof.\n if isinstance(also, str):\n also = [also]\n for a_chrom in also:\n is_auto |= self.chromosome == a_chrom\n return self[is_auto]",
"def fill_hom(patient, gene):\n\n first = 'HR_' + patient + '_First_' + gene + '_Split'\n second = 'HR_' + patient + '_Second_' + gene + '_Split'\n\n for column in data.columns:\n f = re.match(second, column)\n if f:\n data[second] = data[second].fillna(data[first])\n else:\n pass",
"def fill_missing(self) -> None:\n\n self.fill_missing_rows()\n self.fill_missing_source_parameters()\n return",
"def reset_annotations(self):\n # FIXME: this state does not make sense\n self.annotation_date_set = False\n self.annotation_comment_set = False\n self.annotation_type_set = False\n self.annotation_spdx_id_set = False",
"def set_values(self):\n\n if self.featureType != \"gene\":\n self.transcriptId = self.meta['transcript_id']\n self.transcriptName = self.meta['transcript_name']\n self.transcriptBioType = self.meta['transcript_biotype']\n if self.featureType == 'exon':\n self.exonNum = self.meta['exon_number']\n self.exonId = self.meta['exon_id']\n elif self.featureType == 'CDS' or self.featureType == 'intron':\n self.exonNum = self.meta['exon_number']",
"def fill_blanks_randomly(grid):\n for row in grid:\n for i in range(len(row)):\n if row[i] is None:\n row[i] = get_random_char()",
"def clear_inputs(self):\n self.root.ids.input_title.text, self.root.ids.input_artist.text, self.root.ids.input_year.text = (\"\", \"\", \"\")"
] |
[
"0.5443785",
"0.48637012",
"0.48046562",
"0.47582003",
"0.4712509",
"0.46778277",
"0.4662032",
"0.4654403",
"0.4654403",
"0.45855978",
"0.45841703",
"0.45520344",
"0.45424688",
"0.45242676",
"0.451611",
"0.4511423",
"0.44895178",
"0.44530323",
"0.43801317",
"0.4379961",
"0.4358364",
"0.4343787",
"0.43366528",
"0.43291447",
"0.43278405",
"0.42919356",
"0.42870334",
"0.42645735",
"0.42570102",
"0.42525303"
] |
0.6543988
|
0
|
List of all mutants with nonzero reads in dataset_name (or all mutants if dataset_name=None).
|
def mutants_in_dataset(self, dataset_name=None):
return [mutant for mutant in self if dataset_name is None or mutant.by_dataset[dataset_name].total_read_count>0]
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def all_genes_in_dataset(self):\n # the empty-set argument is needed in case there are no mutants in the dataset - set.union() with empty args is an error.\n return set.union(set(), *[set(genes) for N_mutants,genes \n in self.dataset.get_gene_dict_by_mutant_number(self.dataset_name).items() if N_mutants>0])",
"def genes_with_multiple_mutants(self):\n # the empty-set argument is needed in case there are no mutants in the dataset - set.union() with empty args is an error.\n return set.union(set(), *[set(genes) for N_mutants,genes \n in self.dataset.get_gene_dict_by_mutant_number(self.dataset_name).items() if N_mutants>1])",
"def most_common_mutants(self):\n highest_readcount = max([mutant.read_info(self.dataset_name).total_read_count for mutant in self.dataset])\n highest_readcount_mutants = [mutant for mutant in self.dataset \n if mutant.read_info(self.dataset_name).total_read_count==highest_readcount]\n return highest_readcount_mutants",
"def mutants_in_chromosome(self, chromosome):\n return sum(1 for m in self.dataset if m.read_info(self.dataset_name).total_read_count \n and m.position not in SPECIAL_POSITIONS.all_undefined and m.position.chromosome==chromosome)",
"def read_info(self, dataset_name=None, strict=False):\n if dataset_name is None: \n raise MutantError(\"This is a multi-dataset mutant - must provide dataset_name arg!\")\n if strict:\n self._check_dataset_presence(dataset_name)\n return self.by_dataset[dataset_name]\n else:\n try: return self.by_dataset[dataset_name]\n except KeyError: return blank_readcount_only_mutant()\n # TODO unit-tests?",
"def test_names_no_mats(self):\n m = mats.Materials()\n self.assertEqual([], m.names())",
"def reactants(self):\n return [k for k, v in iteritems(self._metabolites) if v < 0]",
"def remove_mutants_not_in_other_dataset(self, other_dataset, readcount_min=1, perfect_reads=False):\n # TODO do I want this to be based on non-exact position equality instead?\n if perfect_reads: get_readcount = lambda m: m.perfect_read_count\n else: get_readcount = lambda m: m.total_read_count\n # go over all mutants in self; need to convert the iterator to a list to make a separate copy, \n # otherwise we'd be modifying the iterator while iterating through it, which isn't allowed.\n for mutant in list(self):\n if get_readcount(other_dataset.get_mutant(mutant.IB)) < readcount_min:\n self.remove_mutant(mutant.IB)\n # TODO really I shouldn't be removing mutants outright, just noting them as removed or something... In that case should they or should they not show up in \"for m in self\"? Probably not - they should have a separate dictionary?\n # TODO should I keep track of removed reads, and print in summary? PROBABLY.\n # LATER-TODO unit-test - it does have run-tests though.",
"def all_minimal():\n results = DatalabData.filter_minimal(None, None, None, False)\n return results",
"def _check_dataset_name_return_data(self, dataset_name, strict=False):\n if strict:\n _check_dataset_presence(self, dataset_name)\n elif dataset_name is None:\n raise MutantError(\"Cannot use None as dataset name!\")\n return self.by_dataset[dataset_name]",
"def filterDataset(dat, dataset):\n #\n dat = dat[dat['organism'].isin(dataset)]\n no_mmei_index = dat['mmei']=='no'\n nonstop_index = dat['mutstop']=='no'\n zerofit_index = dat['fitness'].abs()>1e-4\n mutwt_index = dat['mutwt']=='no'\n dat = dat[no_mmei_index & nonstop_index & zerofit_index & mutwt_index]\n #print \"Filtered data\"\n return dat",
"def _most_common_mutants_info(self, dataset=None):\n summ = self._get_summary(dataset)\n most_common_mutants = summ.most_common_mutants\n m = most_common_mutants[0]\n # calculate the fraction of total reads per mutant, assuming each mutant has the same readcount\n assert len(set([m.read_info(dataset).total_read_count for m in most_common_mutants])) == 1\n readcount_info = value_and_percentages(m.read_info(dataset).total_read_count, [summ.aligned_read_count])\n if len(most_common_mutants) == 1: return \"%s (%s)\"%(readcount_info, m.position)\n else: return \"%s (%s mutants)\"%(readcount_info, len(most_common_mutants))",
"def empty(self, name, condition=None):\n empty = []\n if not isinstance(name, list): name = [name]\n return_bool = len(name) == 1\n if condition:\n df = pd.DataFrame(self[self.take(condition), name])\n else:\n df = self._data\n for n in name:\n if df[n].count() == 0:\n empty.append(n)\n if return_bool:\n return bool(empty)\n else:\n return empty",
"def _ensure_dataset_None(dataset_name):\n if dataset_name is not None:\n raise MutantError(\"Don't try to provide a dataset_name on a single mutant (rather than the multi-dataset subclass)!\")\n # MAYBE-TODO this could be accomplished with a decorator instead, right?",
"def get_all_individuals(self):\n return [individual for node in self.nodes[1:] for individual in node.individuals if len(individual.data_records) > 0]",
"def discard_none_targets(dataset):\r\n indices = []\r\n for (ii,sample) in enumerate(dataset):\r\n target = sample[1]\r\n if target is not None:\r\n indices.append(ii)\r\n\r\n return Subset(dataset,indices)",
"def get_all_muted(cls, community_id):\n return DB.query_col(\"\"\"SELECT name FROM hive_accounts\n WHERE id IN (SELECT account_id FROM hive_roles\n WHERE community_id = :community_id\n AND role_id < 0)\"\"\",\n community_id=community_id)",
"def read_info(self, dataset_name=None, strict=False):\n if dataset_name is None: return self\n else: raise MutantError(\"This is NOT a multi-dataset mutant - cannot provide dataset_name arg!\")",
"def missing_samples(self):\n missing = [s for s in self.subjects if len(s.samples) == 0]\n if len(missing) == 0:\n return None\n return missing",
"def whichSet(self):\n result = []\n # go through all members and if any isSet -- return True\n for index,v in self._items.iteritems():\n if v.isSet:\n result.append(index)\n return result",
"def mutant(self):\n _mutant = []\n _wt = self.wildtype\n for i in range(0, len(self.mutations)):\n site = _wt[i]\n options = self.mutations[i]\n if options is None:\n _mutant.append(_wt[i])\n else:\n for o in options:\n if o != site:\n _mutant.append(o)\n return \"\".join(_mutant)",
"def get_material_set(**kw):\n mat_ids = set()\n volumes = get_volume_list()\n for v in volumes:\n d = volume_metadata( v )\n if( kw.get('with_rho') is True ):\n # rho is undefined for the void material and dagmc may return anything.\n if d['material'] == 0:\n mat_ids.add( (d['material'], 0.0) )\n else:\n mat_ids.add( (d['material'], d['rho']) )\n else:\n mat_ids.add( d['material'] )\n return mat_ids",
"def all_datasets(self) -> set[str]:\n return set(self.frames.idx.get_dataset_ids())",
"def all_flat(cls, materials):\n if isinstance(materials, dict):\n return all([m.is_freq_flat() for m in materials.values()])\n else:\n return all([m.is_freq_flat() for m in materials])",
"def getNotMyCamps(self):\n r = []\n for p in self.__camps:\n if(p.getOwner() != 1):\n r.append(p)\n return r",
"def remove_mutants_in_other_dataset(self, other_dataset, readcount_min=1, perfect_reads=False):\n # TODO do I want this to be based on non-exact position equality instead?\n if perfect_reads: get_readcount = lambda m: m.perfect_read_count\n else: get_readcount = lambda m: m.total_read_count\n # go over all mutants in self; need to convert the iterator to a list to make a separate copy, \n # otherwise we'd be modifying the iterator while iterating through it, which isn't allowed.\n for mutant in list(self):\n if get_readcount(other_dataset.get_mutant(mutant.IB)) >= readcount_min:\n self.remove_mutant(mutant.IB)\n # TODO really I shouldn't be removing mutants outright, just noting them as removed or something... In that case should they or should they not show up in \"for m in self\"? Probably not - they should have a separate dictionary?\n # TODO should I keep track of removed reads, and print in summary? PROBABLY.\n # LATER-TODO unit-test - it does have run-tests though.",
"def get_mask(self, dataset_name):\n p = path.join(self.dataset_root, dataset_name + \"/\")\n mask_path = serial.preprocess(p + \"mask.npy\")\n mask = np.load(mask_path)\n if not np.all(np.bitwise_or(mask == 0, mask == 1)):\n raise ValueError(\"Mask has incorrect values.\")\n return mask",
"def get_masks(data):\n return [patient[0] for i, patient in enumerate(data) if i in good_patients]",
"def getNeutralCamps(self):\n r = []\n for p in self.__camps:\n if(p.getOwner() == 0):\n r.append(p)\n return r",
"def mask_nodata(self):\n ds_out = self._obj\n for var in self.vars:\n ds_out[var] = ds_out[var].raster.mask_nodata()\n return ds_out"
] |
[
"0.6230707",
"0.537474",
"0.53254586",
"0.52580535",
"0.5242609",
"0.4971017",
"0.4961417",
"0.49199918",
"0.49074063",
"0.49041936",
"0.48865938",
"0.48811442",
"0.48632625",
"0.48450068",
"0.48434752",
"0.4763551",
"0.47558308",
"0.4752522",
"0.4736801",
"0.47363997",
"0.47269368",
"0.47003445",
"0.4692156",
"0.46887556",
"0.46634033",
"0.46627712",
"0.46500158",
"0.46472153",
"0.46320838",
"0.46319714"
] |
0.8349818
|
0
|
Raise MutantError if self.summary, mutants, and sef.dataset_order don't all have the same set of datasets!
|
def _check_dataset_consistency(self):
if not self.multi_dataset:
raise MutantError("_check_dataset_consistency only makes sense for multi-datasets!")
def _check_sets_raise_error(set1, set2, set1_name, set2_name):
if not set1==set2:
raise MutantError("Multi-dataset mutant pool has different %s and %s dataset sets! %s, %s"%(set1_name,
set2_name, set1, set2))
datasets_from_summary = set(self.summary.keys())
datasets_from_mutants = set.union(*[set(m.by_dataset.keys()) for m in self])
_check_sets_raise_error(datasets_from_summary, datasets_from_mutants, "from summary", "from mutants")
try:
if self._dataset_order is not None:
datasets_from_order = set(self._dataset_order)
_check_sets_raise_error(datasets_from_order, datasets_from_summary, "from dataset_order", "from summary")
except AttributeError:
pass
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def _ensure_dataset_None(dataset_name):\n if dataset_name is not None:\n raise MutantError(\"Don't try to provide a dataset_name on a single mutant (rather than the multi-dataset subclass)!\")\n # MAYBE-TODO this could be accomplished with a decorator instead, right?",
"def test_duplicate_images_error(self):\n with self.assertRaises(AssertionError):\n disk.merge_datasets(self.input_datasets, self.output_dataset)\n\n # Original dataset shouldn't be modified.\n self.assertEqual(0, len(self.output_dataset.metadata()))",
"def add_other_mutant_as_dataset(self, other_mutant, other_mutant_dataset_name, \n overwrite=False, check_constant_data=False):\n if other_mutant_dataset_name in self.by_dataset and not overwrite:\n raise MutantError(\"This mutant already has a %s dataset! Can't overwrite it with \"%other_mutant_dataset_name\n +\"new one. Choose a different name for new dataset, or use overwrite=True argument.\")\n\n # if desired, check that the position/gene data matches (and update if own gene data is unknown)\n # (probably should be using ifs rather than asserts, but I think since they're wrapped in a try/except it's fine)\n if check_constant_data:\n if not self.position == other_mutant.position:\n raise MutantError(\"Can't add mutant2 as dataset to mutant1: the mutant position differs! %s and %s\"%(\n self.position, other_mutant.position))\n try:\n self.update_gene_info(other_mutant.gene, other_mutant.orientation, \n other_mutant.gene_feature, other_mutant.gene_distances)\n except MutantError:\n raise MutantError(\"Can't add mutant2 as dataset to mutant1: the mutant gene data differs!\"\n +\" %s, %s, %s and\"%(self.gene, self.orientation, self.gene_feature)\n +\" %s, %s, %s.\"%(other_mutant.gene, other_mutant.orientation, other_mutant.gene_feature))\n\n # make a new empty Insertional_mutant object to hold the readcount-related data from other_mutant, \n # and put it in the self.by_dataset dictionary under other_mutant_dataset_name\n self.by_dataset[other_mutant_dataset_name] = Insertional_mutant_readcount_only()\n # now fill this new object with readcount-related data from other_mutant\n self.by_dataset[other_mutant_dataset_name]._copy_readcount_related_data(other_mutant)",
"def _check_heterogeneous_mutations(self):\n # Currently, frontend assigns empty list if this value is not entered.\n mutations = {str(d.input.get(\"mutations\", [])) for d in self._data}\n genesets = {str(d.input.get(\"geneset\", \"\")) for d in self._data}\n\n if len(mutations) > 1:\n name = \"mutations\"\n multiple = mutations\n elif len(genesets) > 1:\n name = \"genesets\"\n multiple = genesets\n else:\n return\n\n raise ValueError(\n f\"Variants should be computed with the same {name} input. \"\n f\"Variants of samples in collection {self.collection.name} \"\n f\"have been computed with {', '.join(list(multiple))}.\\n\"\n \"Use geneset filter in the VariantTables constructor.\\n\"\n )",
"def _check_dataset(self, dataset):\n if not isinstance(dataset, Dataset):\n raise ValueError('wrong training_set or validation_set are not instances of the nn.Dataset class')\n\n if dataset.inputs.shape[1] != self.arch[0]:\n raise ValueError('dataset inputs shape is inconsistent with number of network input nodes.')\n\n if dataset.targets.shape[1] != self.arch[-1]:\n raise ValueError('dataset targets shape is inconsistent with number of network output nodes.')",
"def give_single_dataset_mutant(self, single_dataset_name, force=False):\n if single_dataset_name not in self.by_dataset.keys() and not force:\n raise MutantError(\"This mutant doesn't have a %s dataset! \"%single_dataset_name\n +\"Use force=True argument if you want a zero-readcount mutant returned anyway.\")\n # generate new mutant, fill it with readcount-related data from self.by_dataset[single_dataset_name] \n # and general data from self\n new_mutant = Insertional_mutant()\n new_mutant._copy_non_readcount_data(self)\n new_mutant._copy_readcount_related_data(self.by_dataset[single_dataset_name])\n return new_mutant",
"def _check_consistency(self) -> None:\n lbl_vals_from_metadata = set(self.infos.keys())\n lbl_vals_from_data = set(np.unique(self.data))\n # TODO: check if numerical datatype shenanigans ruin the day\n # i.e. something along the lines of 1.0 != 1\n symm_diff = lbl_vals_from_data ^ lbl_vals_from_metadata\n\n if len(symm_diff) != 0:\n msg = (f'Label mismatch between data and metadata! Expected vanishing '\n f'symmetric difference but got: {symm_diff}')\n raise ValueError(msg)",
"def check_invalid_datasets_derivation(fix, dataset_gateway: IDatasetGateway, **_):\n invalid_datasets = []\n\n def fix_or_report(dataset):\n if fix:\n dataset.unfreeze()\n dataset.derived_from = None\n dataset.freeze()\n communication.info(f\"Fixing dataset '{dataset.name}'\")\n else:\n invalid_datasets.append(dataset.name)\n\n for dataset in dataset_gateway.get_provenance_tails():\n while dataset.derived_from is not None and dataset.derived_from.url_id is not None:\n if dataset.same_as or dataset.derived_from.url_id == dataset.id:\n fix_or_report(dataset)\n break\n\n try:\n dataset = dataset_gateway.get_by_id(dataset.derived_from.url_id)\n except errors.ObjectNotFoundError:\n fix_or_report(dataset)\n break\n\n if not invalid_datasets:\n return True, False, None\n\n problems = (\n WARNING\n + \"There are invalid dataset metadata in the project (use 'renku doctor --fix' to fix them):\"\n + \"\\n\\n\\t\"\n + \"\\n\\t\".join(click.style(name, fg=\"yellow\") for name in invalid_datasets)\n + \"\\n\"\n )\n\n return False, True, problems",
"def test_verify_set_multi(self):\n self._verify([self.applied_commands['setm']])",
"def validate_dataset(self):\n pass",
"def validate_dataset(self):\n if np.all(self.L_bpe == self.bpe_l):\n pass\n\n super(StandardDataset, self).validate_dataset()",
"def verify_pandas(self):\n self.check_dataset_duplicate_ids(self.vertices)\n # self.check_dataset_children_ids()\n self.check_dataset_litter_ids()\n self.check_dataset_dates()",
"def test_DistanceMatrices_setter_wrong_number(self):\r\n self.assertRaises(ValueError, setattr, self.mc, 'DistanceMatrices',\r\n [self.overview_dm])\r\n self.assertRaises(ValueError, setattr, self.mc, 'DistanceMatrices',\r\n [self.overview_dm, self.overview_dm, self.overview_dm])",
"def test_make_compatible_taxa_summaries_incompatible(self):\r\n self.assertRaises(ValueError, _make_compatible_taxa_summaries,\r\n self.taxa_summary3, self.taxa_summary4)\r\n self.assertRaises(ValueError, _make_compatible_taxa_summaries,\r\n self.taxa_summary1, self.taxa_summary2)",
"def test_shape_conflict(setup_teardown_file):\n f = setup_teardown_file[3]\n grp = f.create_group(\"test\")\n\n grp.create_dataset('foo', (10, 3), 'f')\n with pytest.raises(TypeError):\n grp.require_dataset('foo', (10, 4), 'f')",
"def test_DistanceMatrices_setter_wrong_number(self):\r\n self.assertRaises(ValueError, setattr, self.overview_mantel,\r\n 'DistanceMatrices', [self.overview_dm])\r\n self.assertRaises(ValueError, setattr, self.overview_mantel,\r\n 'DistanceMatrices', [self.overview_dm, self.overview_dm,\r\n self.overview_dm])",
"def test_DistanceMatrices_setter_wrong_dims(self):\r\n self.assertRaises(ValueError, setattr, self.cs, 'DistanceMatrices',\r\n [self.overview_dm, self.single_ele_dm])\r\n # Also test that constructor raises this error.\r\n self.assertRaises(ValueError, CorrelationStats, [self.overview_dm,\r\n self.single_ele_dm])",
"def _check_group(self):\n if len(self.groups) != 2:\n raise ValueError(\"There have to be two groups!\")\n\n # Check the number of atoms in each group is the same\n n_group1 = 0\n for key, value in self.groups[0].items():\n n_group1 += value\n\n n_group2 = 0\n for key, value in self.groups[1].items():\n n_group2 += value\n\n if n_group1 != n_group2:\n f1 = self._group2formula(self.groups[0])\n f2 = self._group2formula(self.groups[1])\n msg = \"The two groups have to have the same number of atoms.\\n\"\n msg += \"Group 1: {} Group 2: {}\".format(f1, f2)\n raise ValueError(msg)",
"def _validate_random_seeds(self):\n if self.random_seeds:\n if len(self.random_seeds) != len(self.sampler):\n raise ValueError(\"Number of given range objects in random_seeds\"\\\n \"and number of sampler objects need to be equal!\")\n if len(set(list(map(len,self.random_seeds)))) != 1:\n raise ValueError(\"Length of range objects in random_seeds\"\\\n \"list must be equal!\")",
"def _most_common_mutants_info(self, dataset=None):\n summ = self._get_summary(dataset)\n most_common_mutants = summ.most_common_mutants\n m = most_common_mutants[0]\n # calculate the fraction of total reads per mutant, assuming each mutant has the same readcount\n assert len(set([m.read_info(dataset).total_read_count for m in most_common_mutants])) == 1\n readcount_info = value_and_percentages(m.read_info(dataset).total_read_count, [summ.aligned_read_count])\n if len(most_common_mutants) == 1: return \"%s (%s)\"%(readcount_info, m.position)\n else: return \"%s (%s mutants)\"%(readcount_info, len(most_common_mutants))",
"def test_compute_correlation_invalid_num_permutations(self):\r\n self.assertRaises(ValueError, _compute_correlation,\r\n self.taxa_summary1, self.taxa_summary1, 'paired',\r\n 'spearman', 'high', -10, 0.22222)",
"def test_call_incompatible_data(self):\r\n self.cs_overview.DistanceMatrices = [self.single_ele_dm,\r\n self.single_ele_dm]\r\n self.assertRaises(ValueError, self.cs_overview)",
"def _validate_compatibility(self):\r\n for dm in self.DistanceMatrices:\r\n for samp_id in dm.ids:\r\n if samp_id not in self.MetadataMap.SampleIds:\r\n raise ValueError(\"The sample ID '%s' was not found in the \"\r\n \"metadata map.\" % samp_id)\r\n for cat in self.Categories:\r\n if cat not in self.MetadataMap.CategoryNames:\r\n raise ValueError(\"The category '%s' was not found in the \"\r\n \"metadata map.\" % cat)",
"def _check_result(self, tesselation, orig_gdf, unique_id):\n # check against input layer\n ids_original = list(orig_gdf[unique_id])\n ids_generated = list(tesselation[unique_id])\n if len(ids_original) != len(ids_generated):\n\n self.collapsed = set(ids_original).difference(ids_generated)\n warnings.warn(\n f\"Tessellation does not fully match buildings. \"\n f\"{len(self.collapsed)} element(s) collapsed \"\n f\"during generation - unique_id: {self.collapsed}\"\n )\n\n # check MultiPolygons - usually caused by error in input geometry\n self.multipolygons = tesselation[tesselation.geometry.type == \"MultiPolygon\"][\n unique_id\n ]\n if len(self.multipolygons) > 0:\n warnings.warn(\n \"Tessellation contains MultiPolygon elements. Initial objects should \"\n f\"be edited. unique_id of affected elements: {list(self.multipolygons)}\"\n )",
"def test_DistanceMatrices_setter_wrong_number(self):\r\n self.assertRaises(ValueError, setattr, self.pm,\r\n 'DistanceMatrices', [self.overview_dm])\r\n self.assertRaises(ValueError, setattr, self.pm,\r\n 'DistanceMatrices', [self.overview_dm, self.overview_dm])",
"def test_set_molecule_error(self):\n mol = Molecule.from_smiles(\"CCO\")\n atom = Atom(6, 0, False)\n atom.molecule = mol\n with pytest.raises(AssertionError, match=\"already has an associated molecule\"):\n atom.molecule = mol",
"def test_compare_taxa_summaries_invalid_input(self):\r\n # Invalid comparison mode.\r\n self.assertRaises(ValueError, compare_taxa_summaries,\r\n self.taxa_summary_obs1, self.taxa_summary_exp1, 'foo',\r\n 'pearson')\r\n # Invalid correlation type.\r\n self.assertRaises(ValueError, compare_taxa_summaries,\r\n self.taxa_summary_obs1, self.taxa_summary_exp1, 'paired',\r\n 'foo')\r\n # Invalid tail type.\r\n self.assertRaises(ValueError, compare_taxa_summaries,\r\n self.taxa_summary_obs1, self.taxa_summary_exp1, 'paired',\r\n 'spearman', 'foo')\r\n # Invalid number of permutations.\r\n self.assertRaises(ValueError, compare_taxa_summaries,\r\n self.taxa_summary_obs1, self.taxa_summary_exp1, 'paired',\r\n 'spearman', 'high', -1)\r\n # Invalid confidence level.\r\n self.assertRaises(ValueError, compare_taxa_summaries,\r\n self.taxa_summary_obs1, self.taxa_summary_exp1, 'paired',\r\n 'spearman', 'high', 0, 1)",
"def test_summary_stats_inconsistent_activities_provided(self):\n athlete_df = pd.DataFrame({\n 'id': [1, 2],\n 'name': ['foo', 'bar']\n })\n activity_df = pd.DataFrame({\n 'athlete_id': [1, 2, 1],\n 'average_speed': [25, 25, 12],\n 'unused_measurement': [1, 1, 1],\n 'type': ['Ride', 'Ride', 'Run'] # Mixture of rides and runs\n })\n\n self.assertRaises(ValueError, summary_stats, athlete_df, activity_df)",
"def test_06_self_cannot_upgrade_group(self):\n meowers = self.meowers\n cat = self.cat\n dog = self.dog\n cat.uaccess.share_group_with_user(meowers, dog, PrivilegeCodes.VIEW)\n self.assertFalse(dog in meowers.gaccess.edit_users)\n self.assertTrue(dog in meowers.gaccess.members)\n self.assertTrue(\n is_equal_to_as_set(\n [dog],\n dog.uaccess.get_group_unshare_users(meowers)))\n with self.assertRaises(PermissionDenied):\n dog.uaccess.share_group_with_user(\n meowers, dog, PrivilegeCodes.VIEW)\n with self.assertRaises(PermissionDenied):\n dog.uaccess.share_group_with_user(\n meowers, dog, PrivilegeCodes.CHANGE)\n self.assertTrue(dog in meowers.gaccess.members)\n self.assertTrue(\n is_equal_to_as_set(\n [dog],\n dog.uaccess.get_group_unshare_users(meowers)))",
"def check_integrity(self) -> None:\n for subset in self.subsets:\n if not self._check_subset_integrity(subset):\n raise ValueError(f\"subset {subset} not found or corrupt\")"
] |
[
"0.60590947",
"0.5892247",
"0.58170164",
"0.5749973",
"0.5669064",
"0.56428903",
"0.56069684",
"0.5565046",
"0.55602545",
"0.5522278",
"0.55144644",
"0.55016494",
"0.5482615",
"0.54678094",
"0.5438125",
"0.54357666",
"0.54034233",
"0.5400385",
"0.538548",
"0.53822416",
"0.53796005",
"0.5377455",
"0.5372003",
"0.53562105",
"0.5345576",
"0.5343572",
"0.53413117",
"0.5334759",
"0.53220415",
"0.53219664"
] |
0.78113204
|
0
|
A specific order of datasets, for printing can be set directly, defaults to alphabetical sort.
|
def dataset_order(self):
self._check_dataset_consistency()
try: return self._dataset_order
except AttributeError: return sorted(self.summary.keys())
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def sort_by_default(self):\n self.data.sort()",
"def test_categories_are_sorted(self):\n self.data_sorted(self.test_data['shirts'], self.test_data['pants'])",
"def sort_music_data(sort_by = None):\n for lists in read_file():\n print(lists)\n pass",
"def _reorder_collected(self, data):\n priority = {\n 'post': 1,\n 'get': 2,\n 'put': 2,\n 'patch': 2,\n 'head': 2,\n 'options': 2,\n 'delete': 3,\n }\n data = sorted(\n data,\n key=lambda x: priority.get(getattr(x, 'name', ''), 4))\n return data",
"def sortby(self):\n ...",
"def sort_results(self):\n pass",
"def order_data(self, data, order):\n return data",
"def orderList(dataSource,**kwargs):\n\treturn sorted(dataSource)",
"def set_trec_order(self):\n for k in self._run:\n self._run[k].sort(key=lambda x:(x.get_score(),x.get_doc()),reverse=True)\n for r in self._run[k]:\n print r.get_str()",
"def order(self):\n raise NotImplementedError()",
"def sort(self,desc):\n\tself.__sort(\"\",\"\",desc)",
"def orderby():\n pass",
"def data_for_sorting() -> NoReturn:\n raise NotImplementedError",
"def data_for_sorting() -> NoReturn:\n raise NotImplementedError",
"def sort_entries(self):\n if not len(self.student_list):\n print('There is no contents to sort')\n return\n\n opt = self.input_options(['n', 'a', 'g'], 1, 'Sort by name(n) or average(a) or grade(g)')\n if opt.upper() == 'N':\n self.print_dataframe(self.student_list.sort_values(by=['name', 'average'], ascending=[True,False]))\n elif opt.upper() == 'A' or opt.upper() == 'G':\n self.print_dataframe(self.student_list.sort_values(by=['average', 'name'], ascending=[False,True]))",
"def test_query_sort_default_sort_order(self):\n doc_count = 10\n field_to_be_sorted_by = \"data\"\n prefix = get_rand_string()\n\n data = [prefix + \"-\" + str(x) for x in range(10)]\n\n # Same user_id for all documents\n user_id = get_rand_string()\n\n for datum in data:\n self.conn.add(id=get_rand_string(), user_id=user_id, data=datum)\n self.conn.commit()\n\n results = self.conn.query(q=\"user_id:\" + user_id, sort=\"data\").results\n\n self.assertEquals(len(results), doc_count,\n \"There should be %d documents returned, got:%d, results:%s\" % (\n doc_count, len(results), results))\n\n query_data = [doc[\"data\"] for doc in results]\n\n for idx, datum in enumerate(sorted(data)):\n self.assertEquals(datum, query_data[idx],\n \"Expected %s instead of %s on position %s in query_data:%s\" % (\n datum, query_data[idx], idx, query_data))",
"def sorted(self): \n pass",
"def list(self):\n return list(sorted(self.manager.data[\"dataset\"].keys()))",
"def data_sort(gdf,str):\n gdf = gdf.sort_values(by = [str])\n \n return gdf",
"def setOrder(self, order):\n\t\tself.orderInData = order",
"def order_output(self):\n self.output = self.output[self.cols]",
"def sortLoadFiles(self):\n self.loadFiles.sort()\n self.loadFiles.sort(lambda a,b: cmp(a[-3:].lower(), b[-3:].lower()))",
"def sorted_outputs(self):\n return self.outputs.order_by(\"dataset_idx\")",
"def sortChoices(self):\n self.formatList.sort()",
"def sort(self, Ncol, order):\n self.emit(SIGNAL(\"layoutAboutToBeChanged()\"))\n self.arraydata = sorted(self.arraydata, key=operator.itemgetter(Ncol)) \n if order != Qt.DescendingOrder:\n self.arraydata.reverse()\n self.emit(SIGNAL(\"layoutChanged()\"))",
"def sort(self, Ncol, order):\n self.emit(SIGNAL(\"layoutAboutToBeChanged()\"))\n self.arraydata = sorted(self.arraydata, key=operator.itemgetter(Ncol)) \n if order == Qt.DescendingOrder:\n self.arraydata.reverse()\n self.emit(SIGNAL(\"layoutChanged()\"))",
"def test_query_sort_nondefault_sort_order(self):\n doc_count = 10\n field_to_be_sorted_by = \"data\"\n prefix = get_rand_string()\n\n data = [prefix + \"-\" + str(x) for x in range(10)]\n\n # Same user_id for all documents\n user_id = get_rand_string()\n\n for datum in data:\n self.conn.add(id=get_rand_string(), user_id=user_id, data=datum)\n self.conn.commit()\n\n results = self.conn.query(q=\"user_id:\" + user_id, sort=\"data\",\n sort_order=\"desc\").results\n\n self.assertEquals(len(results), doc_count,\n \"There should be %d documents returned, got:%d, results:%s\" % (\n doc_count, len(results), results))\n\n query_data = [doc[\"data\"] for doc in results]\n\n for idx, datum in enumerate(reversed(sorted(data))):\n self.assertEquals(datum, query_data[idx],\n \"Expected %s instead of %s on position %s in query_data:%s\" % (\n datum, query_data[idx], idx, query_data))",
"def dataSort(self, collectionName, catagory, method='ASCENDING'):\n if method == 'ASCENDING':\n results = collectionName.find().sort(catagory, pymongo.ASCENDING)\n elif method == 'DESCENDING':\n results = collectionName.find().sort(catagory, pymongo.DESCENDING)\n return results",
"def sort_key(self):\n ...",
"def asc(self):\n self.get_output = sorted((value, key) for (key, value) in self.get_output.items())"
] |
[
"0.6724266",
"0.6340067",
"0.6126065",
"0.6017532",
"0.6009489",
"0.59664404",
"0.59582067",
"0.59187067",
"0.5870945",
"0.5865429",
"0.5843606",
"0.584108",
"0.5812359",
"0.5812359",
"0.5776693",
"0.577544",
"0.57472616",
"0.56993455",
"0.56975144",
"0.56738526",
"0.56730783",
"0.5662983",
"0.56561226",
"0.56410366",
"0.5618658",
"0.56149733",
"0.5604886",
"0.56003684",
"0.5598525",
"0.55891484"
] |
0.74094456
|
0
|
Remove any mutants with at least readcount_min reads in other_dataset (or perfect reads, if perfect_reads=True)
|
def remove_mutants_in_other_dataset(self, other_dataset, readcount_min=1, perfect_reads=False):
# TODO do I want this to be based on non-exact position equality instead?
if perfect_reads: get_readcount = lambda m: m.perfect_read_count
else: get_readcount = lambda m: m.total_read_count
# go over all mutants in self; need to convert the iterator to a list to make a separate copy,
# otherwise we'd be modifying the iterator while iterating through it, which isn't allowed.
for mutant in list(self):
if get_readcount(other_dataset.get_mutant(mutant.IB)) >= readcount_min:
self.remove_mutant(mutant.IB)
# TODO really I shouldn't be removing mutants outright, just noting them as removed or something... In that case should they or should they not show up in "for m in self"? Probably not - they should have a separate dictionary?
# TODO should I keep track of removed reads, and print in summary? PROBABLY.
# LATER-TODO unit-test - it does have run-tests though.
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def remove_mutants_not_in_other_dataset(self, other_dataset, readcount_min=1, perfect_reads=False):\n # TODO do I want this to be based on non-exact position equality instead?\n if perfect_reads: get_readcount = lambda m: m.perfect_read_count\n else: get_readcount = lambda m: m.total_read_count\n # go over all mutants in self; need to convert the iterator to a list to make a separate copy, \n # otherwise we'd be modifying the iterator while iterating through it, which isn't allowed.\n for mutant in list(self):\n if get_readcount(other_dataset.get_mutant(mutant.IB)) < readcount_min:\n self.remove_mutant(mutant.IB)\n # TODO really I shouldn't be removing mutants outright, just noting them as removed or something... In that case should they or should they not show up in \"for m in self\"? Probably not - they should have a separate dictionary?\n # TODO should I keep track of removed reads, and print in summary? PROBABLY.\n # LATER-TODO unit-test - it does have run-tests though.",
"def remove_mutants_below_readcount(self, min_readcount, perfect_reads=False):\n if perfect_reads: get_readcount = lambda m: m.perfect_read_count\n else: get_readcount = lambda m: m.total_read_count\n # go over all mutants in self; need to convert dataset to a list to make a separate copy, \n # otherwise we'd be modifying the dataset while iterating through it, which isn't allowed.\n for mutant in list(self):\n if get_readcount(mutant) < min_readcount:\n self.remove_mutant(mutant.IB)\n # TODO really I shouldn't be removing mutants outright, just noting them as removed or something... In that case should they or should they not show up in \"for m in self\"? Probably not - they should have a separate dictionary?\n # TODO should I keep track of removed reads, and print in summary? MAYBE.",
"def filter_umis(\n molecule_table: pd.DataFrame, min_reads_per_umi: int = 100\n) -> pd.DataFrame:\n return molecule_table[molecule_table[\"readCount\"] >= min_reads_per_umi]",
"def add_filter_min_reads(self, min_reads):\n mr_filter = self.existence_array > min_reads\n self.filter &= mr_filter",
"def prune(self, min_count):\n if not self.sorted:\n self.sort()\n for k, count in enumerate(self.Nx):\n if count < min_count:\n self.truncate(k)\n break",
"def prune(read_objects):\r\n\t\timport numpy as np\r\n\t\tbest_reads = []\r\n\t\tread_name_reads = {}\r\n\t\tfor read in read_objects:\r\n\t\t\t\tif read.name in read_name_reads:\r\n\t\t\t\t\t\tread_name_reads[read.name].append(read)\r\n\t\t\t\telse:\r\n\t\t\t\t\t\tread_name_reads[read.name] = [read]\r\n\t\tfor read_name, read_objects in read_name_reads.items():\r\n\t\t\t\te_scores = [read.e_score for read in read_objects]\r\n\t\t\t\ti = np.argmin(e_scores)\r\n\t\t\t\tbest_reads.append(read_objects[i])\r\n\t\treturn best_reads",
"def remove_low_quality_for_matched(matches, read_count, phreds, min_phred_score, ditched_f=None):\n\tcount = count_unique = 0\n\tkk = matches.keys()\n\tfor k in kk:\n\t\tm = matches[k]\n\t\tif any( x < min_phred_score for x in phreds[m.read.tostring()] ):\n\t\t\tcount += read_count[m.read.tostring()]\n\t\t\tcount_unique += 1\n\t\t\tif ditched_f is not None:\n\t\t\t\tditched_f.write(\"@{id}\\n{seq}\\n+{id}\\n{qual}\\n\".format( id=k, seq=m.read, \\\n\t\t\t\t\tqual=m.quality ))\n\t\t\tdel matches[k]\n\t\t\tdel read_count[m.read.tostring()]\n\t\t\tdel phreds[m.read.tostring()]\n\treturn count, count_unique",
"def trim_texts_by_count(self, min_count=100):\n\n for tid, text in self.graph.nodes(data=True):\n if text['count'] < min_count:\n self.graph.remove_node(tid)",
"def remove_matching_reads(filename, cont_file):\n if not os.path.exists(cont_file + '.bwt'):\n cml = shlex.split('bwa index %s' % cont_file)\n subprocess.call(cml)\n cml = 'bwa mem -t 2 %s %s 2> /dev/null | samtools view -f 4 -h - | samtools bam2fq - ' % (cont_file, filename)\n cml += '| seqtk seq -A - > clean_reads.fasta'\n\n subprocess.call(cml, shell=True)\n return 'clean_reads.fasta'",
"def mutants_in_dataset(self, dataset_name=None):\n return [mutant for mutant in self if dataset_name is None or mutant.by_dataset[dataset_name].total_read_count>0]",
"def filter_rare_genes(data, *extra_data, cutoff=0, min_cells=5):\n gene_sums = measure.gene_capture_count(data, cutoff=cutoff)\n keep_genes_idx = gene_sums >= min_cells\n data = select.select_cols(data, *extra_data, idx=keep_genes_idx)\n return data",
"def remove_pruned_supersets(supersets, max_non_deps):\n for n in supersets[:]:\n if max_non_deps.contains_subset(n.attrs):\n supersets.remove(n)",
"def filter_data():\n df_rating = pd.read_csv(MAIN_FOLDER.parent / 'rating.csv')\n df_tag = pd.read_csv(MAIN_FOLDER.parent / 'genome_scores.csv')\n\n n_users = int(len(df_rating[DatasetColumnName.USER_ID.value].unique()) * 0.01)\n print(f'Initial dataset size: {df_rating.shape[0]} ratings')\n\n grouped = df_rating.groupby([DatasetColumnName.MOVIE_ID.value])\n n_movies = grouped.size()\n index_names = n_movies[n_movies > n_users].index\n\n filtered_df_rating = df_rating[df_rating[DatasetColumnName.MOVIE_ID.value].isin(index_names)]\n filtered_df_tag = df_tag[df_tag[DatasetColumnName.MOVIE_ID.value].isin(index_names)]\n\n print(f'Filtered dataset size: {filtered_df_rating.shape[0]} ratings')\n print(f'Reduced dataset size on {np.round((df_rating.shape[0] - filtered_df_rating.shape[0]) / df_rating.shape[0], 2) * 100}%')\n\n filtered_df_rating.to_csv(MAIN_FOLDER.parent / 'filtered_rating.csv', index=False)\n filtered_df_tag.to_csv(MAIN_FOLDER.parent / 'filtered_tag.csv', index=False)",
"def mask_large_samples(data, thres, obs_min, static=None, return_mask=False):\n result_data = []\n n = len(data) #number of data views of compact format (values, times, indices, ..)\n mask = data[8] <= thres\n min_mask = data[8] >= obs_min #mask patients with less than n_mc_smps many num_obs_values\n print('-> {} patients have less than {} observation values'.format(np.sum(~min_mask),obs_min))\n mask = np.logical_and(mask, min_mask)\n print('---> Removing {} patients'.format(np.sum(~mask)))\n for i in np.arange(n):\n result_data.append(data[i][mask])\n if static is not None:\n result_static = static[mask]\n if return_mask:\n return result_data, result_static, mask \n else:\n return result_data, result_static\n else:\n if return_mask:\n return result_data, mask\n else:\n return result_data",
"def filterSamples(self, sample_ids_to_keep, strict=True):\r\n for sid in self.SampleIds:\r\n if sid not in sample_ids_to_keep:\r\n del self._metadata[sid]\r\n\r\n if strict:\r\n extra_samples = set(sample_ids_to_keep) - set(self.SampleIds)\r\n\r\n if extra_samples:\r\n raise ValueError(\"Could not find the following sample IDs in \"\r\n \"metadata map: %s\" % ', '.join(extra_samples))",
"def remove_pruned_subsets(subsets, min_deps):\n for n in subsets[:]:\n if min_deps.contains_superset(n.attrs):\n subsets.remove(n)",
"def remove_distance_extremes(scan, low, high):\n scan.samples[:] = [sample for sample in scan.samples if (\n sample.distance >= low and sample.distance <= high)]",
"def mappability(store, cutoff=.50, filter_srrs=None, keep_srrs=None):\n\n se = store['prealn/workflow/hisat2'][['num_reads', 'num_reads_unpaired', 'num_unaligned']].copy()\n se.dropna(inplace=True)\n se['prop_unaligned'] = se['num_unaligned'] / se['num_reads']\n\n pe = store['prealn/workflow/hisat2'][['num_reads', 'num_reads_paired', 'num_concordant_reads_unaligned']].copy()\n pe.dropna(inplace=True)\n pe['prop_unaligned'] = pe['num_concordant_reads_unaligned'] / pe['num_reads']\n\n df = pd.concat([se, pe])\n df.reset_index(inplace=True)\n\n df = remove_rows(df, 'srr', filter_srrs)\n df = keep_rows(df, 'srr', keep_srrs)\n\n return df.loc[df['prop_unaligned'] <= cutoff, ['srx', 'srr']]",
"def _clean_hits(reads):\n new_reads = defaultdict(realign)\n for r in reads:\n world = {}\n sc = 0\n for p in reads[r].precursors:\n world[p] = reads[r].precursors[p].get_score(len(reads[r].sequence))\n if sc < world[p]:\n sc = world[p]\n new_reads[r] = reads[r]\n for p in world:\n logger.debug(\"score %s %s %s\" % (r, p, world[p]))\n if sc != world[p]:\n logger.debug(\"remove %s %s %s\" % (r, p, world[p]))\n new_reads[r].remove_precursor(p)\n\n return new_reads",
"def filter_cells(\n molecule_table: pd.DataFrame,\n min_umi_per_cell: int = 10,\n min_avg_reads_per_umi: float = 2.0,\n) -> pd.DataFrame:\n # Detect if the UMI column contains UMI counts or the actual UMI sequence\n umi_count = molecule_table[\"UMI\"].dtype != object\n\n cell_groups = molecule_table.groupby(\"cellBC\")\n umis_per_cell = (\n cell_groups[\"UMI\"].sum() if umi_count else cell_groups.size()\n )\n umis_per_cell_mask = umis_per_cell >= min_umi_per_cell\n avg_reads_per_umi = cell_groups[\"readCount\"].sum() / umis_per_cell\n avg_read_per_umi_mask = avg_reads_per_umi >= min_avg_reads_per_umi\n\n umis_per_cell_passing = set(umis_per_cell_mask.index[umis_per_cell_mask])\n avg_read_per_umi_passing = set(\n avg_read_per_umi_mask.index[avg_read_per_umi_mask]\n )\n passing_cells = umis_per_cell_passing & avg_read_per_umi_passing\n passing_mask = molecule_table[\"cellBC\"].isin(passing_cells)\n n_cells = molecule_table[\"cellBC\"].nunique()\n logger.info(\n f\"Filtered out {n_cells - len(passing_cells)} cells with too few UMIs \"\n \"or too few average number of reads per UMI.\"\n )\n molecule_table_filt = molecule_table[~passing_mask]\n n_umi_filt = (\n molecule_table_filt[\"UMI\"].sum()\n if umi_count\n else molecule_table_filt.shape[0]\n )\n logger.info(f\"Filtered out {n_umi_filt} UMIs as a result.\")\n return molecule_table[passing_mask].copy()",
"def truncate_reads(tmp_dir, infile, unaligned_set, n, min_len):\n\n outfile = \"{0}/truncated.fastq\".format(tmp_dir)\n with ps.FastxFile(infile, \"r\") as inf, open(outfile, \"w\") as outf:\n for entry in inf:\n if entry.name in unaligned_set or n == min_len:\n entry.sequence = entry.sequence[:n]\n entry.quality = entry.quality[:n]\n outf.write(str(entry) + \"\\n\")\n return outfile",
"def contamination(store, cutoff=50, filter_srrs=None, keep_srrs=None):\n\n df = store['prealn/workflow/fastq_screen'].copy()\n df.reset_index(inplace=True)\n df = df[['srx', 'srr', 'reference', 'one_hit_one_library_percent']].set_index(['srx', 'srr', 'reference']).unstack()\n df.columns = df.columns.droplevel(0)\n df.reset_index(inplace=True)\n\n df = remove_rows(df, 'srr', filter_srrs)\n df = keep_rows(df, 'srr', keep_srrs)\n\n return df.loc[df['dm6'] >= cutoff, ['srx', 'srr']]",
"def test_wrong_length_with_filter(multi_mol_system_irregular, sequence):\n # We exclude the second molecule. The filter excludes it based on the\n # number of nodes, which is 15 because it has 5 residues with 3 nodes\n # each.\n processor = dssp.AnnotateResidues(\n \"test\",\n sequence,\n molecule_selector=lambda mol: len(mol.nodes) != (5 * 3),\n )\n with pytest.raises(ValueError):\n processor.run_system(multi_mol_system_irregular)",
"def _zero_out_most_similar_conformer(self):\n n_confs = len(self._coor_set)\n\n # Make a square matrix for pairwise RMSDs, where\n # - the lower triangle (and diagonal) are np.inf\n # - the upper triangle contains the pairwise RMSDs (k=1 to exclude diagonal)\n pairwise_rmsd_matrix = np.zeros((n_confs,) * 2)\n pairwise_rmsd_matrix[np.tril_indices(n_confs)] = np.inf\n for i, j in zip(*np.triu_indices(n_confs, k=1)):\n pairwise_rmsd_matrix[i, j] = calc_rmsd(self._coor_set[i], self._coor_set[j])\n\n # Which coords have the lowest RMSD?\n # `idx_low_rmsd` will contain the coordinates of the lowest value in the pairwise matrix\n # a.k.a. the indices of the closest confs\n idx_low_rmsd = np.array(\n np.unravel_index(\n np.argmin(pairwise_rmsd_matrix), pairwise_rmsd_matrix.shape\n )\n )\n low_rmsd = pairwise_rmsd_matrix[tuple(idx_low_rmsd)]\n logger.debug(\n f\"Lowest RMSD between conformers {idx_low_rmsd.tolist()}: {low_rmsd:.06f} Å\"\n )\n\n # Of these, which has the lowest occupancy?\n occs_low_rmsd = self._occupancies[idx_low_rmsd]\n idx_to_zero, idx_to_keep = idx_low_rmsd[occs_low_rmsd.argsort()]\n\n # Assign conformer we want to remove with an occupancy of 0\n logger.debug(\n f\"Zeroing occupancy of conf {idx_to_zero} (of {n_confs}): \"\n f\"occ={self._occupancies[idx_to_zero]:.06f} vs {self._occupancies[idx_to_keep]:.06f}\"\n )\n if (\n self.options.write_intermediate_conformers\n ): # Output all conformations before we remove them\n self._write_intermediate_conformers(prefix=\"cplex_remove\")\n self._occupancies[idx_to_zero] = 0",
"def remove_intersection(cls, first_df: pd.DataFrame, second_df: pd.DataFrame):\n\n first_df = first_df[~first_df.isin(second_df)].dropna()\n cls.logger.info(f'{len(first_df)} emails left to spam after removing already spammed')\n return first_df",
"def test_filter_samples_from_distance_matrix_negate(self):\r\n actual = filter_samples_from_distance_matrix(\r\n parse_distmat(self.input_dm1),\r\n [\"ABC blah\", \"DEF\"],\r\n negate=True)\r\n self.assertEqual(actual, expected_dm1a)\r\n actual = filter_samples_from_distance_matrix(\r\n parse_distmat(self.input_dm1),\r\n [\"ABC\", \"XYZ\"],\r\n negate=True)\r\n self.assertEqual(actual, expected_dm1b)",
"def mask_test_train_count(data, split, rating_threshold): \n # create a copy of the full data for reduction\n training_set = data.copy()\n\n # create max split\n max_split = int(split*(training_set.nnz))\n\n # find index of values which are not empty and over threshold\n rating_inds = np.nonzero(training_set > rating_threshold)\n \n # create list of index pairs\n rating_pairs = list(zip(rating_inds[0], rating_inds[1]))\n\n # Split ration, based on threshold\n thres_max = len(rating_pairs)\n\n if thres_max > max_split:\n masking_ratio = max_split / thres_max\n else:\n sys.exit('Your threshold for rating is too high, please recalculate and lower down the threshold')\n\n # calculate the number of samples to be removed in training set\n num_samples = int(np.ceil(masking_ratio*len(rating_pairs)))\n\n # get random samples\n samples = random.sample(rating_pairs, num_samples)\n\n # remove selected samples in training set\n user_inds = [index[0] for index in samples]\n item_inds = [index[1] for index in samples]\n training_set[user_inds, item_inds] = 0 \n\n return training_set, list(set(user_inds)), np.array(samples)",
"def trim(args) :\n from trimmer import trim_reads\n trim_reads(args)",
"def filter_reads(filename, max_n=100000, min_len=129):\n # run seqtk trimfq to trim low quality ends\n logging.info('Trimming reads with seqtk, subsample, and delete reads shorter than %d', min_len)\n r1 = 'seqtk trimfq %s | seqtk seq -L %d | seqtk sample - %d > high_quality.fastq' % (filename, min_len, max_n)\n subprocess.call(r1, shell=True, universal_newlines=True)\n return 'high_quality.fastq'",
"def prune(pybel_list, min_RMSD):\n #Set up OBAling object\n align = openbabel.OBAlign()\n #Loop\n i = 0\n total_removed = 0\n while i < len(pybel_list):\n referens = pybel_list[i].OBMol #reference\n align.SetRefMol(referens)\n j = i + 1\n while j < len(pybel_list):\n target = pybel_list[j].OBMol #target\n align.SetTargetMol(target)\n #Align and ret rmsd\n if align.Align():\n rmsd = align.GetRMSD()\n if rmsd < min_RMSD:\n pybel_list.pop(j) #remove from both lists\n total_removed += 1\n else:\n j = j + 1\n else:\n print \"Couldn't align\"\n raise Exception()\n #end of inner loop\n i = i + 1\n #end of outer loop\n print \"finished deleting, total number of \\\n removed conformers is\", total_removed\n return pybel_list"
] |
[
"0.7992664",
"0.7603158",
"0.6634517",
"0.5842482",
"0.5786048",
"0.563014",
"0.55709016",
"0.5474332",
"0.5413037",
"0.5322938",
"0.52375",
"0.5231341",
"0.51369655",
"0.5125441",
"0.5116463",
"0.5109765",
"0.5081535",
"0.5081438",
"0.5073213",
"0.5072249",
"0.50319",
"0.50147116",
"0.49960336",
"0.49836162",
"0.49806264",
"0.49644986",
"0.4936267",
"0.49342322",
"0.49262124",
"0.49238518"
] |
0.7930307
|
1
|
Remove any mutants with at least readcount_min reads in other_dataset (or perfect reads, if perfect_reads=True)
|
def remove_mutants_not_in_other_dataset(self, other_dataset, readcount_min=1, perfect_reads=False):
# TODO do I want this to be based on non-exact position equality instead?
if perfect_reads: get_readcount = lambda m: m.perfect_read_count
else: get_readcount = lambda m: m.total_read_count
# go over all mutants in self; need to convert the iterator to a list to make a separate copy,
# otherwise we'd be modifying the iterator while iterating through it, which isn't allowed.
for mutant in list(self):
if get_readcount(other_dataset.get_mutant(mutant.IB)) < readcount_min:
self.remove_mutant(mutant.IB)
# TODO really I shouldn't be removing mutants outright, just noting them as removed or something... In that case should they or should they not show up in "for m in self"? Probably not - they should have a separate dictionary?
# TODO should I keep track of removed reads, and print in summary? PROBABLY.
# LATER-TODO unit-test - it does have run-tests though.
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def remove_mutants_in_other_dataset(self, other_dataset, readcount_min=1, perfect_reads=False):\n # TODO do I want this to be based on non-exact position equality instead?\n if perfect_reads: get_readcount = lambda m: m.perfect_read_count\n else: get_readcount = lambda m: m.total_read_count\n # go over all mutants in self; need to convert the iterator to a list to make a separate copy, \n # otherwise we'd be modifying the iterator while iterating through it, which isn't allowed.\n for mutant in list(self):\n if get_readcount(other_dataset.get_mutant(mutant.IB)) >= readcount_min:\n self.remove_mutant(mutant.IB)\n # TODO really I shouldn't be removing mutants outright, just noting them as removed or something... In that case should they or should they not show up in \"for m in self\"? Probably not - they should have a separate dictionary?\n # TODO should I keep track of removed reads, and print in summary? PROBABLY.\n # LATER-TODO unit-test - it does have run-tests though.",
"def remove_mutants_below_readcount(self, min_readcount, perfect_reads=False):\n if perfect_reads: get_readcount = lambda m: m.perfect_read_count\n else: get_readcount = lambda m: m.total_read_count\n # go over all mutants in self; need to convert dataset to a list to make a separate copy, \n # otherwise we'd be modifying the dataset while iterating through it, which isn't allowed.\n for mutant in list(self):\n if get_readcount(mutant) < min_readcount:\n self.remove_mutant(mutant.IB)\n # TODO really I shouldn't be removing mutants outright, just noting them as removed or something... In that case should they or should they not show up in \"for m in self\"? Probably not - they should have a separate dictionary?\n # TODO should I keep track of removed reads, and print in summary? MAYBE.",
"def filter_umis(\n molecule_table: pd.DataFrame, min_reads_per_umi: int = 100\n) -> pd.DataFrame:\n return molecule_table[molecule_table[\"readCount\"] >= min_reads_per_umi]",
"def add_filter_min_reads(self, min_reads):\n mr_filter = self.existence_array > min_reads\n self.filter &= mr_filter",
"def prune(self, min_count):\n if not self.sorted:\n self.sort()\n for k, count in enumerate(self.Nx):\n if count < min_count:\n self.truncate(k)\n break",
"def prune(read_objects):\r\n\t\timport numpy as np\r\n\t\tbest_reads = []\r\n\t\tread_name_reads = {}\r\n\t\tfor read in read_objects:\r\n\t\t\t\tif read.name in read_name_reads:\r\n\t\t\t\t\t\tread_name_reads[read.name].append(read)\r\n\t\t\t\telse:\r\n\t\t\t\t\t\tread_name_reads[read.name] = [read]\r\n\t\tfor read_name, read_objects in read_name_reads.items():\r\n\t\t\t\te_scores = [read.e_score for read in read_objects]\r\n\t\t\t\ti = np.argmin(e_scores)\r\n\t\t\t\tbest_reads.append(read_objects[i])\r\n\t\treturn best_reads",
"def remove_low_quality_for_matched(matches, read_count, phreds, min_phred_score, ditched_f=None):\n\tcount = count_unique = 0\n\tkk = matches.keys()\n\tfor k in kk:\n\t\tm = matches[k]\n\t\tif any( x < min_phred_score for x in phreds[m.read.tostring()] ):\n\t\t\tcount += read_count[m.read.tostring()]\n\t\t\tcount_unique += 1\n\t\t\tif ditched_f is not None:\n\t\t\t\tditched_f.write(\"@{id}\\n{seq}\\n+{id}\\n{qual}\\n\".format( id=k, seq=m.read, \\\n\t\t\t\t\tqual=m.quality ))\n\t\t\tdel matches[k]\n\t\t\tdel read_count[m.read.tostring()]\n\t\t\tdel phreds[m.read.tostring()]\n\treturn count, count_unique",
"def trim_texts_by_count(self, min_count=100):\n\n for tid, text in self.graph.nodes(data=True):\n if text['count'] < min_count:\n self.graph.remove_node(tid)",
"def remove_matching_reads(filename, cont_file):\n if not os.path.exists(cont_file + '.bwt'):\n cml = shlex.split('bwa index %s' % cont_file)\n subprocess.call(cml)\n cml = 'bwa mem -t 2 %s %s 2> /dev/null | samtools view -f 4 -h - | samtools bam2fq - ' % (cont_file, filename)\n cml += '| seqtk seq -A - > clean_reads.fasta'\n\n subprocess.call(cml, shell=True)\n return 'clean_reads.fasta'",
"def mutants_in_dataset(self, dataset_name=None):\n return [mutant for mutant in self if dataset_name is None or mutant.by_dataset[dataset_name].total_read_count>0]",
"def filter_rare_genes(data, *extra_data, cutoff=0, min_cells=5):\n gene_sums = measure.gene_capture_count(data, cutoff=cutoff)\n keep_genes_idx = gene_sums >= min_cells\n data = select.select_cols(data, *extra_data, idx=keep_genes_idx)\n return data",
"def remove_pruned_supersets(supersets, max_non_deps):\n for n in supersets[:]:\n if max_non_deps.contains_subset(n.attrs):\n supersets.remove(n)",
"def filter_data():\n df_rating = pd.read_csv(MAIN_FOLDER.parent / 'rating.csv')\n df_tag = pd.read_csv(MAIN_FOLDER.parent / 'genome_scores.csv')\n\n n_users = int(len(df_rating[DatasetColumnName.USER_ID.value].unique()) * 0.01)\n print(f'Initial dataset size: {df_rating.shape[0]} ratings')\n\n grouped = df_rating.groupby([DatasetColumnName.MOVIE_ID.value])\n n_movies = grouped.size()\n index_names = n_movies[n_movies > n_users].index\n\n filtered_df_rating = df_rating[df_rating[DatasetColumnName.MOVIE_ID.value].isin(index_names)]\n filtered_df_tag = df_tag[df_tag[DatasetColumnName.MOVIE_ID.value].isin(index_names)]\n\n print(f'Filtered dataset size: {filtered_df_rating.shape[0]} ratings')\n print(f'Reduced dataset size on {np.round((df_rating.shape[0] - filtered_df_rating.shape[0]) / df_rating.shape[0], 2) * 100}%')\n\n filtered_df_rating.to_csv(MAIN_FOLDER.parent / 'filtered_rating.csv', index=False)\n filtered_df_tag.to_csv(MAIN_FOLDER.parent / 'filtered_tag.csv', index=False)",
"def mask_large_samples(data, thres, obs_min, static=None, return_mask=False):\n result_data = []\n n = len(data) #number of data views of compact format (values, times, indices, ..)\n mask = data[8] <= thres\n min_mask = data[8] >= obs_min #mask patients with less than n_mc_smps many num_obs_values\n print('-> {} patients have less than {} observation values'.format(np.sum(~min_mask),obs_min))\n mask = np.logical_and(mask, min_mask)\n print('---> Removing {} patients'.format(np.sum(~mask)))\n for i in np.arange(n):\n result_data.append(data[i][mask])\n if static is not None:\n result_static = static[mask]\n if return_mask:\n return result_data, result_static, mask \n else:\n return result_data, result_static\n else:\n if return_mask:\n return result_data, mask\n else:\n return result_data",
"def filterSamples(self, sample_ids_to_keep, strict=True):\r\n for sid in self.SampleIds:\r\n if sid not in sample_ids_to_keep:\r\n del self._metadata[sid]\r\n\r\n if strict:\r\n extra_samples = set(sample_ids_to_keep) - set(self.SampleIds)\r\n\r\n if extra_samples:\r\n raise ValueError(\"Could not find the following sample IDs in \"\r\n \"metadata map: %s\" % ', '.join(extra_samples))",
"def remove_pruned_subsets(subsets, min_deps):\n for n in subsets[:]:\n if min_deps.contains_superset(n.attrs):\n subsets.remove(n)",
"def mappability(store, cutoff=.50, filter_srrs=None, keep_srrs=None):\n\n se = store['prealn/workflow/hisat2'][['num_reads', 'num_reads_unpaired', 'num_unaligned']].copy()\n se.dropna(inplace=True)\n se['prop_unaligned'] = se['num_unaligned'] / se['num_reads']\n\n pe = store['prealn/workflow/hisat2'][['num_reads', 'num_reads_paired', 'num_concordant_reads_unaligned']].copy()\n pe.dropna(inplace=True)\n pe['prop_unaligned'] = pe['num_concordant_reads_unaligned'] / pe['num_reads']\n\n df = pd.concat([se, pe])\n df.reset_index(inplace=True)\n\n df = remove_rows(df, 'srr', filter_srrs)\n df = keep_rows(df, 'srr', keep_srrs)\n\n return df.loc[df['prop_unaligned'] <= cutoff, ['srx', 'srr']]",
"def remove_distance_extremes(scan, low, high):\n scan.samples[:] = [sample for sample in scan.samples if (\n sample.distance >= low and sample.distance <= high)]",
"def _clean_hits(reads):\n new_reads = defaultdict(realign)\n for r in reads:\n world = {}\n sc = 0\n for p in reads[r].precursors:\n world[p] = reads[r].precursors[p].get_score(len(reads[r].sequence))\n if sc < world[p]:\n sc = world[p]\n new_reads[r] = reads[r]\n for p in world:\n logger.debug(\"score %s %s %s\" % (r, p, world[p]))\n if sc != world[p]:\n logger.debug(\"remove %s %s %s\" % (r, p, world[p]))\n new_reads[r].remove_precursor(p)\n\n return new_reads",
"def filter_cells(\n molecule_table: pd.DataFrame,\n min_umi_per_cell: int = 10,\n min_avg_reads_per_umi: float = 2.0,\n) -> pd.DataFrame:\n # Detect if the UMI column contains UMI counts or the actual UMI sequence\n umi_count = molecule_table[\"UMI\"].dtype != object\n\n cell_groups = molecule_table.groupby(\"cellBC\")\n umis_per_cell = (\n cell_groups[\"UMI\"].sum() if umi_count else cell_groups.size()\n )\n umis_per_cell_mask = umis_per_cell >= min_umi_per_cell\n avg_reads_per_umi = cell_groups[\"readCount\"].sum() / umis_per_cell\n avg_read_per_umi_mask = avg_reads_per_umi >= min_avg_reads_per_umi\n\n umis_per_cell_passing = set(umis_per_cell_mask.index[umis_per_cell_mask])\n avg_read_per_umi_passing = set(\n avg_read_per_umi_mask.index[avg_read_per_umi_mask]\n )\n passing_cells = umis_per_cell_passing & avg_read_per_umi_passing\n passing_mask = molecule_table[\"cellBC\"].isin(passing_cells)\n n_cells = molecule_table[\"cellBC\"].nunique()\n logger.info(\n f\"Filtered out {n_cells - len(passing_cells)} cells with too few UMIs \"\n \"or too few average number of reads per UMI.\"\n )\n molecule_table_filt = molecule_table[~passing_mask]\n n_umi_filt = (\n molecule_table_filt[\"UMI\"].sum()\n if umi_count\n else molecule_table_filt.shape[0]\n )\n logger.info(f\"Filtered out {n_umi_filt} UMIs as a result.\")\n return molecule_table[passing_mask].copy()",
"def truncate_reads(tmp_dir, infile, unaligned_set, n, min_len):\n\n outfile = \"{0}/truncated.fastq\".format(tmp_dir)\n with ps.FastxFile(infile, \"r\") as inf, open(outfile, \"w\") as outf:\n for entry in inf:\n if entry.name in unaligned_set or n == min_len:\n entry.sequence = entry.sequence[:n]\n entry.quality = entry.quality[:n]\n outf.write(str(entry) + \"\\n\")\n return outfile",
"def contamination(store, cutoff=50, filter_srrs=None, keep_srrs=None):\n\n df = store['prealn/workflow/fastq_screen'].copy()\n df.reset_index(inplace=True)\n df = df[['srx', 'srr', 'reference', 'one_hit_one_library_percent']].set_index(['srx', 'srr', 'reference']).unstack()\n df.columns = df.columns.droplevel(0)\n df.reset_index(inplace=True)\n\n df = remove_rows(df, 'srr', filter_srrs)\n df = keep_rows(df, 'srr', keep_srrs)\n\n return df.loc[df['dm6'] >= cutoff, ['srx', 'srr']]",
"def test_wrong_length_with_filter(multi_mol_system_irregular, sequence):\n # We exclude the second molecule. The filter excludes it based on the\n # number of nodes, which is 15 because it has 5 residues with 3 nodes\n # each.\n processor = dssp.AnnotateResidues(\n \"test\",\n sequence,\n molecule_selector=lambda mol: len(mol.nodes) != (5 * 3),\n )\n with pytest.raises(ValueError):\n processor.run_system(multi_mol_system_irregular)",
"def _zero_out_most_similar_conformer(self):\n n_confs = len(self._coor_set)\n\n # Make a square matrix for pairwise RMSDs, where\n # - the lower triangle (and diagonal) are np.inf\n # - the upper triangle contains the pairwise RMSDs (k=1 to exclude diagonal)\n pairwise_rmsd_matrix = np.zeros((n_confs,) * 2)\n pairwise_rmsd_matrix[np.tril_indices(n_confs)] = np.inf\n for i, j in zip(*np.triu_indices(n_confs, k=1)):\n pairwise_rmsd_matrix[i, j] = calc_rmsd(self._coor_set[i], self._coor_set[j])\n\n # Which coords have the lowest RMSD?\n # `idx_low_rmsd` will contain the coordinates of the lowest value in the pairwise matrix\n # a.k.a. the indices of the closest confs\n idx_low_rmsd = np.array(\n np.unravel_index(\n np.argmin(pairwise_rmsd_matrix), pairwise_rmsd_matrix.shape\n )\n )\n low_rmsd = pairwise_rmsd_matrix[tuple(idx_low_rmsd)]\n logger.debug(\n f\"Lowest RMSD between conformers {idx_low_rmsd.tolist()}: {low_rmsd:.06f} Å\"\n )\n\n # Of these, which has the lowest occupancy?\n occs_low_rmsd = self._occupancies[idx_low_rmsd]\n idx_to_zero, idx_to_keep = idx_low_rmsd[occs_low_rmsd.argsort()]\n\n # Assign conformer we want to remove with an occupancy of 0\n logger.debug(\n f\"Zeroing occupancy of conf {idx_to_zero} (of {n_confs}): \"\n f\"occ={self._occupancies[idx_to_zero]:.06f} vs {self._occupancies[idx_to_keep]:.06f}\"\n )\n if (\n self.options.write_intermediate_conformers\n ): # Output all conformations before we remove them\n self._write_intermediate_conformers(prefix=\"cplex_remove\")\n self._occupancies[idx_to_zero] = 0",
"def remove_intersection(cls, first_df: pd.DataFrame, second_df: pd.DataFrame):\n\n first_df = first_df[~first_df.isin(second_df)].dropna()\n cls.logger.info(f'{len(first_df)} emails left to spam after removing already spammed')\n return first_df",
"def test_filter_samples_from_distance_matrix_negate(self):\r\n actual = filter_samples_from_distance_matrix(\r\n parse_distmat(self.input_dm1),\r\n [\"ABC blah\", \"DEF\"],\r\n negate=True)\r\n self.assertEqual(actual, expected_dm1a)\r\n actual = filter_samples_from_distance_matrix(\r\n parse_distmat(self.input_dm1),\r\n [\"ABC\", \"XYZ\"],\r\n negate=True)\r\n self.assertEqual(actual, expected_dm1b)",
"def mask_test_train_count(data, split, rating_threshold): \n # create a copy of the full data for reduction\n training_set = data.copy()\n\n # create max split\n max_split = int(split*(training_set.nnz))\n\n # find index of values which are not empty and over threshold\n rating_inds = np.nonzero(training_set > rating_threshold)\n \n # create list of index pairs\n rating_pairs = list(zip(rating_inds[0], rating_inds[1]))\n\n # Split ration, based on threshold\n thres_max = len(rating_pairs)\n\n if thres_max > max_split:\n masking_ratio = max_split / thres_max\n else:\n sys.exit('Your threshold for rating is too high, please recalculate and lower down the threshold')\n\n # calculate the number of samples to be removed in training set\n num_samples = int(np.ceil(masking_ratio*len(rating_pairs)))\n\n # get random samples\n samples = random.sample(rating_pairs, num_samples)\n\n # remove selected samples in training set\n user_inds = [index[0] for index in samples]\n item_inds = [index[1] for index in samples]\n training_set[user_inds, item_inds] = 0 \n\n return training_set, list(set(user_inds)), np.array(samples)",
"def trim(args) :\n from trimmer import trim_reads\n trim_reads(args)",
"def filter_reads(filename, max_n=100000, min_len=129):\n # run seqtk trimfq to trim low quality ends\n logging.info('Trimming reads with seqtk, subsample, and delete reads shorter than %d', min_len)\n r1 = 'seqtk trimfq %s | seqtk seq -L %d | seqtk sample - %d > high_quality.fastq' % (filename, min_len, max_n)\n subprocess.call(r1, shell=True, universal_newlines=True)\n return 'high_quality.fastq'",
"def prune(pybel_list, min_RMSD):\n #Set up OBAling object\n align = openbabel.OBAlign()\n #Loop\n i = 0\n total_removed = 0\n while i < len(pybel_list):\n referens = pybel_list[i].OBMol #reference\n align.SetRefMol(referens)\n j = i + 1\n while j < len(pybel_list):\n target = pybel_list[j].OBMol #target\n align.SetTargetMol(target)\n #Align and ret rmsd\n if align.Align():\n rmsd = align.GetRMSD()\n if rmsd < min_RMSD:\n pybel_list.pop(j) #remove from both lists\n total_removed += 1\n else:\n j = j + 1\n else:\n print \"Couldn't align\"\n raise Exception()\n #end of inner loop\n i = i + 1\n #end of outer loop\n print \"finished deleting, total number of \\\n removed conformers is\", total_removed\n return pybel_list"
] |
[
"0.7931165",
"0.76038706",
"0.66349447",
"0.5843144",
"0.5782819",
"0.5628872",
"0.5569599",
"0.5472574",
"0.54124075",
"0.53238225",
"0.5237388",
"0.52301025",
"0.5135954",
"0.51233846",
"0.5114688",
"0.51076555",
"0.5081119",
"0.5079994",
"0.50722814",
"0.50722796",
"0.50311095",
"0.5013998",
"0.49969313",
"0.49823013",
"0.4978954",
"0.49627352",
"0.49343827",
"0.49319857",
"0.49268484",
"0.49206612"
] |
0.79933906
|
0
|
Remove any mutants with below readcount_min reads (or perfect reads, if perfect_reads=True)
|
def remove_mutants_below_readcount(self, min_readcount, perfect_reads=False):
if perfect_reads: get_readcount = lambda m: m.perfect_read_count
else: get_readcount = lambda m: m.total_read_count
# go over all mutants in self; need to convert dataset to a list to make a separate copy,
# otherwise we'd be modifying the dataset while iterating through it, which isn't allowed.
for mutant in list(self):
if get_readcount(mutant) < min_readcount:
self.remove_mutant(mutant.IB)
# TODO really I shouldn't be removing mutants outright, just noting them as removed or something... In that case should they or should they not show up in "for m in self"? Probably not - they should have a separate dictionary?
# TODO should I keep track of removed reads, and print in summary? MAYBE.
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def remove_mutants_not_in_other_dataset(self, other_dataset, readcount_min=1, perfect_reads=False):\n # TODO do I want this to be based on non-exact position equality instead?\n if perfect_reads: get_readcount = lambda m: m.perfect_read_count\n else: get_readcount = lambda m: m.total_read_count\n # go over all mutants in self; need to convert the iterator to a list to make a separate copy, \n # otherwise we'd be modifying the iterator while iterating through it, which isn't allowed.\n for mutant in list(self):\n if get_readcount(other_dataset.get_mutant(mutant.IB)) < readcount_min:\n self.remove_mutant(mutant.IB)\n # TODO really I shouldn't be removing mutants outright, just noting them as removed or something... In that case should they or should they not show up in \"for m in self\"? Probably not - they should have a separate dictionary?\n # TODO should I keep track of removed reads, and print in summary? PROBABLY.\n # LATER-TODO unit-test - it does have run-tests though.",
"def remove_mutants_in_other_dataset(self, other_dataset, readcount_min=1, perfect_reads=False):\n # TODO do I want this to be based on non-exact position equality instead?\n if perfect_reads: get_readcount = lambda m: m.perfect_read_count\n else: get_readcount = lambda m: m.total_read_count\n # go over all mutants in self; need to convert the iterator to a list to make a separate copy, \n # otherwise we'd be modifying the iterator while iterating through it, which isn't allowed.\n for mutant in list(self):\n if get_readcount(other_dataset.get_mutant(mutant.IB)) >= readcount_min:\n self.remove_mutant(mutant.IB)\n # TODO really I shouldn't be removing mutants outright, just noting them as removed or something... In that case should they or should they not show up in \"for m in self\"? Probably not - they should have a separate dictionary?\n # TODO should I keep track of removed reads, and print in summary? PROBABLY.\n # LATER-TODO unit-test - it does have run-tests though.",
"def filter_umis(\n molecule_table: pd.DataFrame, min_reads_per_umi: int = 100\n) -> pd.DataFrame:\n return molecule_table[molecule_table[\"readCount\"] >= min_reads_per_umi]",
"def prune(self, min_count):\n if not self.sorted:\n self.sort()\n for k, count in enumerate(self.Nx):\n if count < min_count:\n self.truncate(k)\n break",
"def remove_matching_reads(filename, cont_file):\n if not os.path.exists(cont_file + '.bwt'):\n cml = shlex.split('bwa index %s' % cont_file)\n subprocess.call(cml)\n cml = 'bwa mem -t 2 %s %s 2> /dev/null | samtools view -f 4 -h - | samtools bam2fq - ' % (cont_file, filename)\n cml += '| seqtk seq -A - > clean_reads.fasta'\n\n subprocess.call(cml, shell=True)\n return 'clean_reads.fasta'",
"def remove_low_quality_for_matched(matches, read_count, phreds, min_phred_score, ditched_f=None):\n\tcount = count_unique = 0\n\tkk = matches.keys()\n\tfor k in kk:\n\t\tm = matches[k]\n\t\tif any( x < min_phred_score for x in phreds[m.read.tostring()] ):\n\t\t\tcount += read_count[m.read.tostring()]\n\t\t\tcount_unique += 1\n\t\t\tif ditched_f is not None:\n\t\t\t\tditched_f.write(\"@{id}\\n{seq}\\n+{id}\\n{qual}\\n\".format( id=k, seq=m.read, \\\n\t\t\t\t\tqual=m.quality ))\n\t\t\tdel matches[k]\n\t\t\tdel read_count[m.read.tostring()]\n\t\t\tdel phreds[m.read.tostring()]\n\treturn count, count_unique",
"def trim_texts_by_count(self, min_count=100):\n\n for tid, text in self.graph.nodes(data=True):\n if text['count'] < min_count:\n self.graph.remove_node(tid)",
"def add_filter_min_reads(self, min_reads):\n mr_filter = self.existence_array > min_reads\n self.filter &= mr_filter",
"def prune(read_objects):\r\n\t\timport numpy as np\r\n\t\tbest_reads = []\r\n\t\tread_name_reads = {}\r\n\t\tfor read in read_objects:\r\n\t\t\t\tif read.name in read_name_reads:\r\n\t\t\t\t\t\tread_name_reads[read.name].append(read)\r\n\t\t\t\telse:\r\n\t\t\t\t\t\tread_name_reads[read.name] = [read]\r\n\t\tfor read_name, read_objects in read_name_reads.items():\r\n\t\t\t\te_scores = [read.e_score for read in read_objects]\r\n\t\t\t\ti = np.argmin(e_scores)\r\n\t\t\t\tbest_reads.append(read_objects[i])\r\n\t\treturn best_reads",
"def trim_quality(self, reads):\n cut = self.quality_cutoff * 3\n start = 0\n qscores = reads[0][3]\n qual = ord(qscores[0]) + ord(qscores[1]) + ord(qscores[2]) - 99\n while qual < cut:\n start += 1\n try:\n qual += ord(qscores[start + 2]) - ord(qscores[start - 1])\n except IndexError:\n break\n stop = len(qscores)\n qual = ord(qscores[-1]) + ord(qscores[-2]) + ord(qscores[-3]) - 99\n while qual < cut:\n stop -= 1\n try:\n qual += ord(qscores[stop - 3]) - ord(qscores[stop])\n except IndexError:\n break\n reads[0][1] = reads[0][1][start:stop]\n reads[0][3] = reads[0][3][start:stop]",
"def truncate_reads(tmp_dir, infile, unaligned_set, n, min_len):\n\n outfile = \"{0}/truncated.fastq\".format(tmp_dir)\n with ps.FastxFile(infile, \"r\") as inf, open(outfile, \"w\") as outf:\n for entry in inf:\n if entry.name in unaligned_set or n == min_len:\n entry.sequence = entry.sequence[:n]\n entry.quality = entry.quality[:n]\n outf.write(str(entry) + \"\\n\")\n return outfile",
"def _clean_hits(reads):\n new_reads = defaultdict(realign)\n for r in reads:\n world = {}\n sc = 0\n for p in reads[r].precursors:\n world[p] = reads[r].precursors[p].get_score(len(reads[r].sequence))\n if sc < world[p]:\n sc = world[p]\n new_reads[r] = reads[r]\n for p in world:\n logger.debug(\"score %s %s %s\" % (r, p, world[p]))\n if sc != world[p]:\n logger.debug(\"remove %s %s %s\" % (r, p, world[p]))\n new_reads[r].remove_precursor(p)\n\n return new_reads",
"def filter_reads(filename, max_n=100000, min_len=129):\n # run seqtk trimfq to trim low quality ends\n logging.info('Trimming reads with seqtk, subsample, and delete reads shorter than %d', min_len)\n r1 = 'seqtk trimfq %s | seqtk seq -L %d | seqtk sample - %d > high_quality.fastq' % (filename, min_len, max_n)\n subprocess.call(r1, shell=True, universal_newlines=True)\n return 'high_quality.fastq'",
"def supportingReadsFilter(spot, args):\n if spot.tags[\"label\"] == \"INS\":\n errId = 1\n errLab = 'insertion'\n elif spot.tags[\"label\"] == \"DEL\":\n errId = 2\n errLab = 'deletion'\n else:#don't worry about other types\n return False\n\n begin, ending = spot.fetchbounds()\n begin -= args.buffer #abs(begin-ending)*.5\n ending += args.buffer #abs(begin-ending)*.5\n #do the hard work\n reads = args.bam.fetch(str(spot.chrom), begin, ending)\n totSizes = []\n coverage = 0\n nReadsErr = 0\n #For tandem\n strandCnt = {True: 0, False: 0}\n \n #count reads and errSizes\n for i in reads:\n mySize = 0\n coverage += 1\n start = i.pos - 1\n cigar = expandCigar(i.cigar)\n curSize = 0\n extraSize = 0\n readHasErr = False\n \n #What if I just intersect any stretches of errors with my boundaries.\n #Then for insertions I'll keep coordinates\n #For deletions I'll user outer bounds?\n for code in cigar: \n if code != 1:\n start += 1\n #must be in region\n if start < begin:\n continue\n if start >= ending:\n break\n \n if code == errId:\n curSize += 1\n if curSize != 0 and code != errId:\n if curSize >= args.minIndelErr:\n readHasErr = True\n mySize += curSize\n elif curSize > 1:#1bp errors will inflate\n extraSize += curSize\n curSize = 0\n \n\n if readHasErr and mySize >= args.minIndelSize:\n nReadsErr += 1\n totSizes.append(mySize + extraSize)\n strandCnt[i.is_reverse] += 1\n \n spot.tags[\"strandCnt\"] = \"%d,%d\" % (strandCnt[False], strandCnt[True])\n if len(totSizes) == 0:\n logging.debug(\"no %s found!? %s\" % (errLab, str(spot)))\n return True # true you should filter\n \n if len(totSizes) < max(math.ceil(coverage * args.minIndelPct), args.minErrReads):\n logging.debug(\"not large cnt %s found %s \" % (errLab, str(spot)))\n return True\n \n totSizes.sort()\n totSizes = numpy.array(totSizes)\n mean = totSizes.mean()\n median = numpy.percentile(totSizes, 50)\n firstQ = numpy.percentile(totSizes, 25)\n thirdQ = numpy.percentile(totSizes, 75)\n \n logging.debug(\"PassFilt %s\" % (str(spot))) \n logging.debug(\"cov %d\" % coverage )\n logging.debug(\"size %d %s\" % (len(totSizes), str(totSizes)))\n logging.debug(\"mean %d\" % mean )\n logging.debug(\"median %d\" % median)\n logging.debug(\"firstQ %d\" % firstQ)\n logging.debug(\"thirdQ %d\" % thirdQ)\n \n spot.tags[\"szCount\"] = int(nReadsErr)\n spot.tags[\"szMean\"] = int(mean)\n spot.tags[\"szMedian\"] = int(median)\n spot.tags[\"sz1stQ\"] = int(firstQ)\n spot.tags[\"sz3rdQ\"] = int(thirdQ)\n return False",
"def filterfn(read):\n return (read.is_proper_pair and read.is_paired and read.tlen > 0 and not read.is_supplementary and not read.is_duplicate and not read.is_unmapped and not read.mate_is_unmapped)",
"def trim_fastq_files_to_length(desired_read_length = 36):\n\n mkdir(READ_LENGTH_TRIMMED_FASTQ_DIR)\n\n template = \"\"\"zcat {input_fastq} | fastx_trimmer -l {desired_read_length} -z -o {output_fastq}\"\"\"\n\n printp(\"\"\"\\n#\\n# trim reads to a certain length\\n#\"\"\")\n printp(\"\"\"\\n# drmr:label read-length-trimming\"\"\")\n printp(\"\"\"\\n# drmr:job time_limit=2h working_directory={}\"\"\".format(READ_LENGTH_TRIMMED_FASTQ_DIR))\n\n for sample, info in DATA.items():\n for x in ['treatment', 'control']:\n input_fastq = get_fastq(get_srr(sample)) if x == 'treatment' else get_fastq(get_input_control_srr(sample))\n output_fastq = get_read_length_trimmed_fastq(get_srr(sample)) if x == 'treatment' else get_read_length_trimmed_fastq(get_input_control_srr(sample))\n printp(template.format(**locals()))\n\n printp(\"\"\"\\n# drmr:wait\"\"\")",
"def prune(pybel_list, min_RMSD):\n #Set up OBAling object\n align = openbabel.OBAlign()\n #Loop\n i = 0\n total_removed = 0\n while i < len(pybel_list):\n referens = pybel_list[i].OBMol #reference\n align.SetRefMol(referens)\n j = i + 1\n while j < len(pybel_list):\n target = pybel_list[j].OBMol #target\n align.SetTargetMol(target)\n #Align and ret rmsd\n if align.Align():\n rmsd = align.GetRMSD()\n if rmsd < min_RMSD:\n pybel_list.pop(j) #remove from both lists\n total_removed += 1\n else:\n j = j + 1\n else:\n print \"Couldn't align\"\n raise Exception()\n #end of inner loop\n i = i + 1\n #end of outer loop\n print \"finished deleting, total number of \\\n removed conformers is\", total_removed\n return pybel_list",
"def contamination(store, cutoff=50, filter_srrs=None, keep_srrs=None):\n\n df = store['prealn/workflow/fastq_screen'].copy()\n df.reset_index(inplace=True)\n df = df[['srx', 'srr', 'reference', 'one_hit_one_library_percent']].set_index(['srx', 'srr', 'reference']).unstack()\n df.columns = df.columns.droplevel(0)\n df.reset_index(inplace=True)\n\n df = remove_rows(df, 'srr', filter_srrs)\n df = keep_rows(df, 'srr', keep_srrs)\n\n return df.loc[df['dm6'] >= cutoff, ['srx', 'srr']]",
"def filter_reads(reads, positions):\n\n\tfiltered_reads = []\n\tread_array = array(reads)\n\n\tfor position in positions:\n\t\tlow_position = position - 25\n\t\thigh_position = position + 25\n\t\tp = []\n\n\t\tfor r in reads:\n\t\t\tpos = int(float(r.get_position()))\n\t\t\tif pos > low_position and pos < high_position:\n\t\t\t\tp.append(r) \n\n\t\tfiltered_reads.extend(p)\n\n\treturn filtered_reads",
"def remove_stuck(traj,size):\n from numpy import sqrt, where\n \n r_min = traj.groupby('particle').first()\n r_max = traj.groupby('particle').last()\n\n pos_columns = ['x','y']\n dist = r_min[pos_columns] - r_max[pos_columns]\n dist_eu = sqrt(dist['x']**2+dist['y']**2)\n\n index_remove = dist_eu.index[where(dist_eu < size)]\n \n traj_new = traj\n for i in range(len(index_remove)):\n traj_new = traj_new[(traj_new['particle'] != index_remove[i])]\n \n return traj_new",
"def prune(self, upper, lower):\n # max_count = sorted([self.counts[key] for key in self.counts.keys()])[::-1][upper]\n max_count = upper\n\n print('Removed all words that occur less than {} times and more than {} times'.format(lower, upper))\n for i, doc in enumerate(self.docs):\n new_doc = []\n for word in doc:\n if self.counts[word] <= max_count and self.counts[word] > lower:\n new_doc.append(word)\n self.docs[i] = new_doc",
"def trimDups( options, data ):\n for c in data.chrNames:\n prevBlock = MafBlock()\n replacement = []\n if c not in data.mafBlocksByChrom:\n data.mafBlocksByChrom[ c ] = replacement\n continue\n for m in data.mafBlocksByChrom[ c ]:\n if m.refStart <= prevBlock.refEnd:\n if m.refEnd > prevBlock.refEnd:\n # only add in the new, distinct, bases\n m.refStart = prevBlock.refEnd + 1\n else:\n # this block is totally covered by the previous block\n continue\n replacement.append( m )\n prevBlock = m\n data.mafBlocksByChrom[ c ] = replacement",
"def trim(args) :\n from trimmer import trim_reads\n trim_reads(args)",
"def mappability(store, cutoff=.50, filter_srrs=None, keep_srrs=None):\n\n se = store['prealn/workflow/hisat2'][['num_reads', 'num_reads_unpaired', 'num_unaligned']].copy()\n se.dropna(inplace=True)\n se['prop_unaligned'] = se['num_unaligned'] / se['num_reads']\n\n pe = store['prealn/workflow/hisat2'][['num_reads', 'num_reads_paired', 'num_concordant_reads_unaligned']].copy()\n pe.dropna(inplace=True)\n pe['prop_unaligned'] = pe['num_concordant_reads_unaligned'] / pe['num_reads']\n\n df = pd.concat([se, pe])\n df.reset_index(inplace=True)\n\n df = remove_rows(df, 'srr', filter_srrs)\n df = keep_rows(df, 'srr', keep_srrs)\n\n return df.loc[df['prop_unaligned'] <= cutoff, ['srx', 'srr']]",
"def remove_distance_extremes(scan, low, high):\n scan.samples[:] = [sample for sample in scan.samples if (\n sample.distance >= low and sample.distance <= high)]",
"def discard_samples(chain_length):\n return min(chain_length / 10, MAX_GEN_DISCARD)",
"def remove_outliers(seqs, num_stds, fraction_seqs_for_stats=.95):\r\n # load the alignment and compute the consensus sequence\r\n aln = Alignment.from_fasta_records(parse_fasta(seqs), DNA)\r\n consensus_seq = aln.majority_consensus()\r\n # compute the hamming distance between all sequences in the alignment\r\n # and the consensus sequence\r\n dists_to_consensus = [s.distance(consensus_seq) for s in aln]\r\n # compute the average and standard deviation distance from the consensus\r\n average_distance = mean(dists_to_consensus)\r\n std_distance = std(dists_to_consensus)\r\n # compute the distance cutoff\r\n dist_cutoff = average_distance + num_stds * std_distance\r\n # for all sequences, determine if they're distance to the consensus\r\n # is less then or equal to the cutoff distance. if so, add the sequence's\r\n # identifier to the list of sequence identifiers to keep\r\n seqs_to_keep = []\r\n for seq_id, dist_to_consensus in izip(aln.ids(), dists_to_consensus):\r\n if dist_to_consensus <= dist_cutoff:\r\n seqs_to_keep.append(seq_id)\r\n # filter the alignment to only keep the sequences identified in the step\r\n # above\r\n filtered_aln = aln.subalignment(seqs_to_keep=seqs_to_keep)\r\n # and return the filtered alignment\r\n return filtered_aln",
"def prune(mapq=30):\n\n mkdir(PRUNE_DIR)\n\n #\n # samtools filters:\n # -f 3: keep properly paired and mapped reads\n # -F 4: filter out unmapped reads\n # -F 8: filter out unmapped mates\n # -F 256: filter out secondary reads\n # -F 1024: filter out duplicates marked by Picard above\n # -F 2048: filter out supplementary reads\n #\n\n template = \"\"\"samtools view -b -h -F 4 -F 256 -F 1024 -F 2048 -q {mapq} {input_bam} {autosomes} > {output_bam}; samtools index {output_bam}\"\"\"\n\n printp(\"\"\"\\n# drmr:label prune\\n\"\"\")\n printp(\"\"\"# drmr:job nodes=1 processors=1 memory=4g time_limit=4h working_directory={}\"\"\".format(PRUNE_DIR))\n printp(\"\"\"\\n#\\n# prune the BAM files with marked duplicates down to properly paired\"\"\")\n printp(\"\"\"# and mapped primary autosomal alignments of good quality, for peak calling\\n#\\n\"\"\")\n\n for sample, info in DATA.items():\n for x in ['treatment', 'control']:\n input_bam = get_md_bam(sample, control = False) if x == 'treatment' else get_md_bam(sample, control = True)\n output_bam = get_pruned_bam(sample, control = False) if x == 'treatment' else get_pruned_bam(sample, control = True)\n autosomes = ' '.join(AUTOSOMAL_REFERENCES[get_genome(sample)])\n printp(template.format(**locals()), timed=True)\n\n printp(\"\"\"\\n# drmr:wait\"\"\")",
"def dereplication_fulllength(amplicon_file, minseqlen, mincount):\n\n seq_list = []\n for seq in read_fasta(amplicon_file, minseqlen):\n seq_list.append(seq)\n\n for o in Counter(seq_list).most_common():\n if o[1] > mincount:\n yield o",
"def trim(self, ratio=10000):\n trimmed, total = 0, 0\n for sources in self.sources():\n for s in (self.tp_by_source_and_text[sources],\n self.fp_by_source_and_text[sources],\n self.fn_by_source_and_text[sources],\n self.overlap_by_source_and_text[sources]):\n try:\n max_count = s.most_common(1)[0][1]\n except IndexError:\n continue\n for k, v in list(s.items()):\n if v * ratio < max_count:\n trimmed += 1\n del s[k]\n total += 1\n print(f'trimmed {trimmed}/{total} ({trimmed/total:.1%})',\n file=sys.stderr, flush=True)"
] |
[
"0.7271599",
"0.71542186",
"0.65226847",
"0.6235168",
"0.5892789",
"0.5843919",
"0.58214986",
"0.5782074",
"0.5752648",
"0.5536262",
"0.5524399",
"0.551734",
"0.5517231",
"0.5339049",
"0.5330954",
"0.53044456",
"0.5239596",
"0.5233504",
"0.5209945",
"0.51907754",
"0.5190602",
"0.5183533",
"0.5181003",
"0.51709265",
"0.51698405",
"0.5159008",
"0.51407576",
"0.5105104",
"0.5093646",
"0.50872695"
] |
0.837361
|
0
|
To each mutant in the dataset, add the gene it's in (look up gene positions for each mutant using genefile). ALSO add gene data to all the RISCCgenomesideread mutants inside each mutant! If detailed_features is True, also look up whether the mutant is in an exon/intron/UTR. Read the file in N_run_groups passes to avoid using up too much memory/CPU.
|
def find_genes_for_mutants(self, genome_version, genefile, detailed_features=True, include_RISCC_reads=False,
nearest_genes_for_intergenic=False, N_run_groups=3, verbosity_level=1):
if self.multi_dataset: raise MutantError("find_genes_for_mutants not implemented for multi-datasets!")
# MAYBE-TODO implement for multi-datasets? The actual gene-finding would be easy, since it'd just work on
# multi-dataset mutants instead of single-dataset ones; adding stuff to summary would be harder.
# Group all the mutants by chromosome, so that I can go over each chromosome in genefile separately
# instead of reading in all the data at once (which uses a lot of memory)
# Inclue both the main mutants, AND all the RISCC genome-side read sub-mutants if wanted.
insertion_data_by_chromosome = defaultdict(list)
for mutant in self:
if mutant.position not in SPECIAL_POSITIONS.all_undefined:
insertion_data_by_chromosome[mutant.position.chromosome].append(mutant)
if include_RISCC_reads:
for RISCC_read_data in mutant.RISCC_genome_side_aligned_reads.values():
insertion_data_by_chromosome[RISCC_read_data[0].chromosome].append(RISCC_read_data)
self._find_genes_for_list(insertion_data_by_chromosome, genome_version, genefile,
detailed_features, nearest_genes_for_intergenic, N_run_groups, verbosity_level)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def proc_dataset_v2(write=False):\n \n path = load_config()\n M = pd.read_csv(path['metadata_file'])\n T = pd.read_csv(path['rna_file'])\n mCH = pd.read_csv(path['mCH_file'])\n CH = pd.read_csv(path['CH_file'])\n\n def format_df(df):\n \"\"\"The inputs are genes x cells. Transpose data and rename columns\"\"\"\n df = df.transpose()\n df.rename(columns=df.iloc[0], inplace=True)\n df.drop('gene', inplace=True)\n df.index.rename('sample_id', inplace=True)\n return df\n\n T = format_df(T)\n mCH = format_df(mCH)\n CH = format_df(CH)\n\n #Update metadata\n M = pd.read_csv(path['metadata_file'])\n M.rename(columns={'Unnamed: 0': 'sample_id'}, inplace=True)\n M.set_index(keys='sample_id', drop=True, inplace=True)\n\n #Sort cells by metadata\n sorted_index = M.sort_values(by='SubClusterAnno').index\n M = M.loc[sorted_index]\n T = T.loc[sorted_index]\n mCH = mCH.loc[sorted_index]\n CH = CH.loc[sorted_index]\n\n assert np.array_equal(CH.columns, mCH.columns), \"Genes are not in the same order\"\n assert np.array_equal(T.columns, mCH.columns), \"Genes are not in the same order\"\n assert M.index.equals(T.index), \"Cells are not in the same order\"\n assert M.index.equals(CH.index), \"Cells are not in the same order\"\n assert M.index.equals(mCH.index), \"Cells are not in the same order\"\n\n # CPM-normalize counts\n X = T.values.astype(np.float32)\n X = (X/np.sum(X, axis=1, keepdims=True))*1e6\n X = np.log1p(X)\n T = pd.DataFrame(data=X, index=T.index, columns=T.columns)\n print('Completed T processing')\n\n # For methylation data\n X = CH.values.astype(float)\n Y = mCH.values.astype(float)\n Z = np.divide(Y,X+1e-10)\n E = pd.DataFrame(data=Z, index=mCH.index, columns=mCH.columns)\n print('Completed E processing')\n\n # select genes based on variance in log normalized CPM values in T\n def calc_highvar_genes(df):\n vars = np.var(df.values, axis=0)\n order_vars = np.argsort(-vars) # descending order\n sorted_highvar_genes = df.columns.values[order_vars]\n return sorted_highvar_genes\n\n data = {'sorted_highvar_T_genes': calc_highvar_genes(T),\n 'sorted_highvar_E_genes': calc_highvar_genes(E)}\n print('Completed finding high variance features')\n\n if write:\n feather.write_dataframe(T, path['data_dir'] / 'T_dat_v2.feather')\n feather.write_dataframe(E, path['data_dir'] / 'E_dat_v2.feather')\n feather.write_dataframe(M, path['data_dir'] / 'Meta_v2.feather')\n sio.savemat(path['data_dir'] / 'highvar_genes_v2.mat', data)\n return T, E, M, data",
"def proc_dataset_v1(write=False):\n \n path = load_config()\n M = pd.read_csv(path['metadata_file'])\n T = pd.read_csv(path['rna_file'])\n mCH = pd.read_csv(path['mCH_file'])\n CH = pd.read_csv(path['CH_file'])\n\n def format_df(df):\n \"\"\"The inputs are genes x cells. Transpose data and rename columns\"\"\"\n df = df.transpose()\n df.rename(columns=df.iloc[0], inplace=True)\n df.drop('gene', inplace=True)\n df.index.rename('sample_id', inplace=True)\n return df\n\n T = format_df(T)\n mCH = format_df(mCH)\n CH = format_df(CH)\n\n #Update metadata\n M = pd.read_csv(path['metadata_file'])\n M.rename(columns={'Unnamed: 0': 'sample_id'}, inplace=True)\n M.set_index(keys='sample_id', drop=True, inplace=True)\n\n #Sort cells by metadata\n sorted_index = M.sort_values(by='SubClusterAnno').index\n M = M.loc[sorted_index]\n T = T.loc[sorted_index]\n mCH = mCH.loc[sorted_index]\n CH = CH.loc[sorted_index]\n\n assert np.array_equal(CH.columns, mCH.columns), \"Genes are not in the same order\"\n assert np.array_equal(T.columns, mCH.columns), \"Genes are not in the same order\"\n assert M.index.equals(T.index), \"Cells are not in the same order\"\n assert M.index.equals(CH.index), \"Cells are not in the same order\"\n assert M.index.equals(mCH.index), \"Cells are not in the same order\"\n\n # CPM-normalize counts\n X = T.values.astype(np.float32)\n X = (X/np.sum(X, axis=1, keepdims=True))*1e6\n X = np.log1p(X)\n T = pd.DataFrame(data=X, index=T.index, columns=T.columns)\n print('Completed T processing')\n\n # For methylation data\n X = CH.values.astype(float)\n Y = mCH.values.astype(float)\n Z = np.log1p(Y) - np.log1p(X)\n E = pd.DataFrame(data=Z, index=mCH.index, columns=mCH.columns)\n print('Completed E processing')\n\n # select genes based on variance in log normalized CPM values in T\n def calc_highvar_genes(df):\n vars = np.var(df.values, axis=0)\n order_vars = np.argsort(-vars) # descending order\n sorted_highvar_genes = df.columns.values[order_vars]\n return sorted_highvar_genes\n\n data = {'sorted_highvar_T_genes': calc_highvar_genes(T),\n 'sorted_highvar_E_genes': calc_highvar_genes(E)}\n print('Completed finding high variance features')\n\n if write:\n feather.write_dataframe(T, path['data_dir'] / 'T_dat.feather')\n feather.write_dataframe(E, path['data_dir'] / 'E_dat.feather')\n feather.write_dataframe(M, path['data_dir'] / 'Meta.feather')\n sio.savemat(path['data_dir'] / 'highvar_genes.mat', data)\n return T, E, M, data",
"def cluster_build_trees(\n identity, set_name, cluster_file=None, click_loguru=None\n):\n options = click_loguru.get_global_options()\n user_options = click_loguru.get_user_global_options()\n parallel = user_options[\"parallel\"]\n set_path = Path(set_name)\n # read and possibly update proteomes\n proteomes_path = set_path / PROTEOMES_FILE\n proteomes_in = read_tsv_or_parquet(proteomes_path)\n proteomes = sort_proteome_frame(proteomes_in)\n if not proteomes_in.equals(proteomes):\n logger.info(\"proteomes sort order changed, writing new proteomes file\")\n write_tsv_or_parquet(proteomes, proteomes_path)\n n_proteomes = len(proteomes)\n # read and update fragment ID's\n frags = read_tsv_or_parquet(set_path / FRAGMENTS_FILE)\n frags[\"frag.idx\"] = pd.array(frags.index, dtype=pd.UInt32Dtype())\n frag_frames = {}\n for dotpath, subframe in frags.groupby(by=[\"path\"]):\n frag_frames[dotpath] = subframe.copy().set_index(\"frag.orig_id\")\n arg_list = []\n concat_fasta_path = set_path / \"proteins.fa\"\n for i, row in proteomes.iterrows():\n arg_list.append((row, concat_fasta_path, frag_frames[row[\"path\"]]))\n file_idx = {}\n stem_dict = {}\n for i, row in proteomes.iterrows():\n stem = row[\"path\"]\n file_idx[stem] = i\n stem_dict[i] = stem\n if cluster_file is None:\n if concat_fasta_path.exists():\n concat_fasta_path.unlink()\n if not options.quiet:\n logger.info(\n f\"Renaming fragments and concatenating sequences for {len(arg_list)}\"\n \" proteomes:\"\n )\n for args in arg_list:\n write_protein_fasta(args)\n del arg_list\n cwd = Path.cwd()\n os.chdir(set_path)\n n_clusters, run_stats, cluster_hist = homology_cluster(\n \"proteins.fa\",\n identity,\n write_ids=True,\n delete=False,\n cluster_stats=False,\n outname=\"homology\",\n click_loguru=click_loguru,\n )\n log_path = Path(\"homology.log\")\n log_dir_path = Path(\"logs\")\n log_dir_path.mkdir(exist_ok=True)\n shutil.copy2(log_path, \"logs/homology.log\")\n log_path.unlink()\n os.chdir(cwd)\n logger.info(f\"Number of clusters: {n_clusters}\")\n del cluster_hist\n del run_stats\n concat_fasta_path.unlink()\n else: # use pre-existing clusters\n homology_path = set_path / \"homology\"\n if homology_path.exists():\n shutil.rmtree(homology_path)\n inclusts = pd.read_csv(cluster_file, sep=\"\\t\")\n for col in [\"cluster_id\", \"members\"]:\n if col not in inclusts.columns:\n logger.error(\n f'Column named \"{col}\" not found in external homology cluster file'\n )\n sys.exit(1)\n cluster_counts = inclusts[\"cluster_id\"].value_counts()\n cluster_map = pd.Series(\n range(len(cluster_counts)), index=cluster_counts.index\n )\n cluster_ids = inclusts[\"cluster_id\"].map(cluster_map)\n cluster_sizes = inclusts[\"cluster_id\"].map(cluster_counts)\n predef_clusters = pd.DataFrame(\n {\n \"cluster_id\": cluster_ids,\n \"size\": cluster_sizes,\n \"members\": inclusts[\"members\"],\n }\n )\n predef_clusters.sort_values(by=[\"cluster_id\"], inplace=True)\n predef_clusters.drop(\n predef_clusters[predef_clusters[\"size\"] < 2].index,\n axis=0,\n inplace=True,\n )\n n_clusters = predef_clusters[\"cluster_id\"].max() + 1\n predef_clusters.index = range(len(predef_clusters))\n external_cluster_path = set_path / EXTERNAL_CLUSTERS_FILE\n logger.info(\n f\"Writing {external_cluster_path} with {len(predef_clusters)} genes\"\n + f\" in {n_clusters} homology clusters\"\n )\n predef_clusters.to_csv(external_cluster_path, sep=\"\\t\")\n del cluster_counts, cluster_map, cluster_sizes, inclusts\n homology_path = set_path / \"homology\"\n homology_path.mkdir(exist_ok=True)\n if not options.quiet:\n logger.info(\n f\"Creating cluster files for for {len(arg_list)}\" \" proteomes:\"\n )\n proteome_no = 0\n for args in arg_list:\n logger.info(f\"doing proteome {proteome_no}\")\n write_protein_fasta(\n args, fasta_dir=homology_path, clusters=predef_clusters\n )\n proteome_no += 1\n del arg_list\n logger.info(\n \"Checking that all cluster files are present (gene-id mismatch)\"\n )\n missing_files = False\n for i in range(n_clusters):\n if not (homology_path / f\"{i}.fa\").exists():\n logger.error(f\"External cluster {i} is missing.\")\n missing_files = True\n if missing_files:\n sys.exit(1)\n #\n # Write homology info back into proteomes\n #\n click_loguru.elapsed_time(\"Alignment/tree-building\")\n hom_mb = DataMailboxes(\n n_boxes=n_proteomes,\n mb_dir_path=(set_path / \"mailboxes\" / \"clusters2proteomes\"),\n file_extension=\"tsv\",\n )\n hom_mb.write_tsv_headers(HOMOLOGY_COLS)\n cluster_paths = [\n set_path / \"homology\" / f\"{i}.fa\" for i in range(n_clusters)\n ]\n bag = db.from_sequence(cluster_paths)\n cluster_stats = []\n if not options.quiet:\n logger.info(\n f\"Calculating MSAs and trees for {len(cluster_paths)} homology\"\n \" clusters:\"\n )\n ProgressBar(dt=SPINNER_UPDATE_PERIOD).register()\n if parallel:\n cluster_stats = bag.map(\n parse_cluster,\n file_dict=file_idx,\n file_writer=hom_mb.locked_open_for_write,\n )\n else:\n for clust_fasta in cluster_paths:\n cluster_stats.append(\n parse_cluster(\n clust_fasta,\n file_dict=file_idx,\n file_writer=hom_mb.locked_open_for_write,\n )\n )\n n_clust_genes = 0\n clusters_dict = {}\n for cluster_id, cluster_dict in cluster_stats:\n n_clust_genes += cluster_dict[\"size\"]\n clusters_dict[cluster_id] = cluster_dict\n del cluster_stats\n clusters = pd.DataFrame.from_dict(clusters_dict).transpose()\n del clusters_dict\n clusters.sort_index(inplace=True)\n grouping_dict = {}\n for i in range(n_proteomes): # keep numbering of single-file clusters\n grouping_dict[f\"[{i}]\"] = i\n grouping_dict[str(list(range(n_proteomes)))] = 0\n for n_members, subframe in clusters.groupby([\"n_memb\"]):\n if n_members == 1:\n continue\n if n_members == n_proteomes:\n continue\n member_counts = pd.DataFrame(subframe[\"n_members\"].value_counts())\n member_counts[\"key\"] = range(len(member_counts))\n for newcol in range(n_members):\n member_counts[f\"memb{newcol}\"] = \"\"\n for member_string, row in member_counts.iterrows():\n grouping_dict[member_string] = row[\"key\"]\n member_list = json.loads(member_string)\n for col in range(n_members):\n member_counts.loc[member_string, f\"memb{col}\"] = stem_dict[\n member_list[col]\n ]\n member_counts = member_counts.set_index(\"key\")\n write_tsv_or_parquet(\n member_counts, set_path / group_key_filename(n_members)\n )\n clusters[\"n_members\"] = clusters[\"n_members\"].map(grouping_dict)\n clusters = clusters.rename(columns={\"n_members\": \"group_key\"})\n n_adj = clusters[\"n_adj\"].sum()\n adj_pct = n_adj * 100.0 / n_clust_genes\n n_adj_clust = sum(clusters[\"adj_groups\"] != 0)\n adj_clust_pct = n_adj_clust * 100.0 / len(clusters)\n logger.info(\n f\"{n_adj} ({adj_pct:.1f}%) out of {n_clust_genes}\"\n + \" clustered genes are adjacent\"\n )\n logger.info(\n f\"{n_adj_clust} ({adj_clust_pct:.1f}%) out of \"\n + f\"{len(clusters)} clusters contain adjacency\"\n )\n write_tsv_or_parquet(clusters, set_path / CLUSTERS_FILE)\n # join homology cluster info to proteome info\n click_loguru.elapsed_time(\"Joining\")\n arg_list = []\n for i, row in proteomes.iterrows():\n arg_list.append(\n (\n i,\n dotpath_to_path(row[\"path\"]),\n )\n )\n bag = db.from_sequence(arg_list)\n hom_stats = []\n if not options.quiet:\n logger.info(f\"Joining homology info to {n_proteomes} proteomes:\")\n ProgressBar(dt=SPINNER_UPDATE_PERIOD).register()\n if parallel:\n hom_stats = bag.map(\n join_homology_to_proteome, mailbox_reader=hom_mb.open_then_delete\n ).compute()\n else:\n for args in arg_list:\n hom_stats.append(\n join_homology_to_proteome(\n args, mailbox_reader=hom_mb.open_then_delete\n )\n )\n hom_mb.delete()\n hom_frame = pd.DataFrame.from_dict(hom_stats)\n hom_frame.set_index([\"prot.idx\"], inplace=True)\n hom_frame.sort_index(inplace=True)\n logger.info(\"Homology cluster coverage:\")\n with pd.option_context(\n \"display.max_rows\", None, \"display.float_format\", \"{:,.2f}%\".format\n ):\n logger.info(hom_frame)\n proteomes = pd.concat([proteomes, hom_frame], axis=1)\n write_tsv_or_parquet(\n proteomes, set_path / PROTEOMOLOGY_FILE, float_format=\"%5.2f\"\n )\n click_loguru.elapsed_time(None)",
"def generate_gene_set_data(\n data,\n genes,\n gene_name_type=\"entrez\",\n gene_set_category=\"c6.all\",\n metric=\"mean\",\n standardize=False,\n data_dir=\"../../Data/examples/Gene_Sets/MSigDB.v7.0/\",\n):\n\n sample_name = None\n if isinstance(data, pd.DataFrame):\n sample_name = data.index\n data = data.values\n elif not isinstance(data, np.ndarray):\n print(\"Input data must be a numpy array or pandas data frame\")\n sys.exit(1)\n\n if standardize:\n scaler = StandardScaler()\n data = scaler.fit_transform(data)\n\n genes = [str(i) for i in genes]\n\n if gene_name_type == \"entrez\":\n gene_set_category = gene_set_category + \".v7.0.entrez.gmt\"\n if gene_name_type == \"symbols\":\n gene_set_category = gene_set_category + \".v7.0.symbols.gmt\"\n f = open(data_dir + gene_set_category, \"r\")\n x = f.readlines()\n gene_sets = {}\n for i in range(len(x)):\n temp = x[i].split(\"\\n\")[0].split(\"\\t\")\n gene_sets[temp[0]] = temp[2:]\n\n gene_set_data = np.empty((data.shape[0], len(gene_sets)))\n gene_set_data.fill(np.nan)\n gene_set_names = np.array(list(gene_sets.keys()))\n for i in range(len(gene_set_names)):\n idi = np.where(np.isin(genes, gene_sets[gene_set_names[i]]))[0]\n if len(idi) > 0:\n if metric == \"sum\":\n gene_set_data[:, i] = np.nansum(data[:, idi], axis=1)\n elif metric == \"max\":\n gene_set_data[:, i] = np.nanmax(data[:, idi], axis=1)\n elif metric == \"min\":\n gene_set_data[:, i] = np.nanmin(data[:, idi], axis=1)\n elif metric == \"abs_mean\":\n gene_set_data[:, i] = np.nanmean(np.absolute(data[:, idi]), axis=1)\n elif metric == \"abs_maximum\":\n gene_set_data[:, i] = np.nanmax(np.absolute(data[:, idi]), axis=1)\n else: # 'mean'\n gene_set_data[:, i] = np.nanmean(data[:, idi], axis=1)\n\n if sample_name is None:\n gene_set_data = pd.DataFrame(gene_set_data, columns=gene_set_names)\n else:\n gene_set_data = pd.DataFrame(\n gene_set_data, columns=gene_set_names, index=sample_name\n )\n keep_id = np.where(np.sum(np.invert(pd.isna(gene_set_data)), axis=0) > 0)[0]\n gene_set_data = gene_set_data.iloc[:, keep_id]\n\n return gene_set_data",
"def batchAnalysis(groupfil):\n groups = []\n with open(groupfil, 'r') as fIn:\n for line in fIn:\n groups.append(line.strip().split(','))\n \n checks = ['maxV', 'maxDerivV', 'maxDerivdV', 'minDerivV',\n 'minDerivdV', 'preMinV', 'postMinV', 'preMaxCurveV',\n 'preMaxCurveK', 'postMaxCurveV', 'postMaxCurveK',\n 'height', 'repolarizationV', 'intervals', 'frequencies']\n props = {ch: {gr: {} for gr in list(set([g[1] for g in groups]))}\n for ch in checks} # A dict of dicts\n # props [properties] [group name] [cell name]\n cells = [f[0].split('/')[-1].split('_')[0] for f in groups]\n \n # Add a few more keys\n props['activity'] = {gr: {} for gr in list(set([g[1] for g in groups]))}\n \n # Assign all the properties to the props dict\n for g in groups:\n df = pd.read_csv(g[0])\n df = df.drop('Unnamed: 33', 1) # Garbage\n df = df.drop('freq', 1) # These are downsampled\n df = df.dropna() # Dropna\n \n # If there are multiple clusters, add them in order\n if max(df.clust_inds) == 1: # Two clusters\n numClusts = int(max(df.clust_inds)+1)\n for ch in checks:\n for clust in range(numClusts):\n try:\n props[ch][g[1]][cells[groups.index(g)]].append(df[df['clust_inds']==clust][ch].dropna().values)\n except:\n props[ch][g[1]][cells[groups.index(g)]] = [df[df['clust_inds']==clust][ch].dropna().values]\n else: # Just one cluster\n for ch in checks:\n props[ch][g[1]][cells[groups.index(g)]] = [df[ch].dropna().values]\n # Get activity profile\n tIn, cBouts = timeInClusters(df)\n props['activity'][g[1]][cells[groups.index(g)]] = [tIn, cBouts]\n \n return props",
"def _add_transform_genes(self):\n pass",
"def _add_transform_genes(self):\n self._alleles.add(pu.make_int_gene(1, 1, 10, 1)) # 'AR' backshift (p)\n self._alleles.add(pu.make_choice_gene(1, [0, 1, 2])) # 'I' backshift (d) \n self._alleles.add(pu.make_choice_gene(1, [1, 2, 3])) # 'MA' backshift (q)\n self._loci_list += ['AR_order', 'I_order', 'MA_order']",
"def import_data(hdfile, didx=0, dpath=BASE_PATH, PASS=False, dname='LMD'):\n dset = Dataset(hdfile)\n \n genres = {}\n for f in collect_mat_files(dpath,dname, didx):\n data = loadmat(f)\n g = data['genre'][0]\n print f\n if not g in genres:\n genres[g] = len(genres)\n \n if not PASS:\n x = data['features'].T\n dset.add_data(X=[x],y=[genres[g]],\n metadata=[{'filename':os.path.split(f)[-1]}])\n \n return genres",
"def _add_transform_genes(self):\n self._alleles.add(pu.make_int_gene(1, 1, 10, 1)) # 'AR' backshift (p)\n self._alleles.add(pu.make_choice_gene(1, [0, 1, 2])) # 'I' backshift (d) \n self._alleles.add(pu.make_choice_gene(1, [1, 2, 3])) # 'MA' backshift (q)\n self._alleles.add(pu.make_int_gene(1, 1, 10, 1)) # Seasonal 'AR' backshift (p)\n self._alleles.add(pu.make_choice_gene(1, [0, 1, 2])) # Seasonal 'I' backshift (d) \n self._alleles.add(pu.make_choice_gene(1, [1, 2, 3])) # Seasonal 'MA' backshift (q)\n self._loci_list += ['AR_order', 'I_order', 'MA_order',\n 'ssn_AR_order', 'ssn_I_order', 'ssn_MA_order']",
"def import_gene_information(inputargs):\n\n global chainnams, chain\n chain = inputargs['chain']\n\n if inputargs['tags'] == \"extended\" and inputargs['species'] == \"mouse\":\n print(\"Please note that there is currently no extended tag set for mouse TCR genes.\\n\" \\\n \"Decombinator will now switch the tag set in use from \\'extended\\' to \\'original\\'.\\n\" \\\n \"In future, consider editing the script to change the default, \" \\\n \"or use the appropriate flags (-sp mouse -tg original).\")\n inputargs['tags'] = \"original\"\n\n if inputargs['tags'] == \"extended\" and (chain == 'g' or chain == 'd'):\n print(\"Please note that there is currently no extended tag set for gamma/delta TCR genes.\\n\" \\\n \"Decombinator will now switch the tag set in use from \\'extended\\' to \\'original\\'.\\n\" \\\n \"In future, consider editing the script to change the default, or use the appropriate flags.\")\n inputargs['tags'] = \"original\"\n\n # Check species information\n if inputargs['species'] not in [\"human\", \"mouse\"]:\n print(\"Species not recognised. Please select either \\'human\\' (default) or \\'mouse\\'.\\n\" \\\n \"If mouse is required by default, consider changing the default value in the script.\")\n sys.exit()\n\n # Look for tag and V/J fasta and cysteine position files: if these cannot be found in the working directory,\n # source them from GitHub repositories\n # Note that fasta/tag files fit the pattern \"species_tagset_gene.[fasta/tags]\"\n # I.e. \"[human/mouse]_[extended/original]_TR[A/B/G/D][V/J].[fasta/tags]\"\n\n for gene in ['v', 'j']:\n # Get FASTA data\n fasta_file = read_tcr_file(inputargs['species'], inputargs['tags'], gene, \"fasta\", inputargs['tagfastadir'])\n globals()[gene + \"_genes\"] = list(SeqIO.parse(fasta_file, \"fasta\"))\n\n globals()[gene + \"_regions\"] = [str( item.seq.upper()) for item in globals()[gene + \"_genes\"]]\n globals()[gene + \"_names\"] = [str(item.id.upper().split(\"|\")[1]) for item in globals()[gene + \"_genes\"]]\n\n # Get conserved translation residue sites and functionality data\n translation_file = open(read_tcr_file(inputargs['species'], inputargs['tags'], gene, \"translate\",\n inputargs['tagfastadir']),\"rt\")\n translate_data = [x.rstrip() for x in list(translation_file)]\n\n globals()[gene + \"_translate_position\"] = [int(x.split(\",\")[1]) for x in translate_data]\n globals()[gene + \"_translate_residue\"] = [x.split(\",\")[2] for x in translate_data]\n globals()[gene + \"_functionality\"] = [x.split(\",\")[3] for x in translate_data]\n\n if gene == 'v':\n \n if inputargs['species'] == \"human\":\n # Get germline CDR data\n cdr_file = open(read_tcr_file(inputargs['species'], inputargs['tags'], gene, \"cdrs\", inputargs['tagfastadir']), \"rt\")\n cdr_data = [x.rstrip() for x in list(cdr_file)]\n cdr_file.close()\n v_cdr1 = [x.split(\" \")[1] for x in cdr_data]\n v_cdr2 = [x.split(\" \")[2] for x in cdr_data]\n else:\n # cdr_file only exists for human - CDR1 and CDR2 only written to output tsv\n # for human. Otherwise create empty lists fo v_cdr1 and v_cdr2, to write empty\n # fields to output tsv\n v_cdr1 = [\"\"]*len(globals()[gene + \"_genes\"])\n v_cdr2 = [\"\"]*len(globals()[gene + \"_genes\"])\n\n return v_regions, j_regions, v_names, j_names, v_translate_position, v_translate_residue, \\\n j_translate_position, j_translate_residue, v_functionality, j_functionality, v_cdr1, v_cdr2",
"def genes_file_creation(input_folder):\n file_paths = {}\n for file_name in os.listdir(input_folder):\n file_paths[file_name] = input_folder + '/' + file_name\n\n df = pa.DataFrame()\n \n for file_name in file_paths:\n df_temp = pa.read_csv(file_paths[file_name], sep='\\t', header=None)\n print(df_temp.columns)\n gene_column = 0\n df_temp = df_temp[[gene_column]]\n df_temp.columns = ['Gene_Name_DE']\n row = []\n file_extension = os.path.splitext(file_name)[1]\n row.append(file_name.replace(file_extension, \"\"))\n row.extend(df_temp['Gene_Name_DE'].tolist())\n df = df.append([row], ignore_index=True)\n\n df.insert(1, 'Description', 'Genes_DE')\n\n df.to_csv('DE_gene.gmt', sep='\\t', index=False, header=False)",
"def anno_gene_stats(anno_gene, loc_file, gene_file, isConvert):\r\n LocationNum = collections.Counter()\r\n LocationGene = collections.defaultdict(list)\r\n\r\n\r\n GeneCatSample = collections.defaultdict(lambda: collections.defaultdict(list))\r\n CatGeneSample = collections.defaultdict(lambda: collections.defaultdict(list))\r\n\r\n allLocations = set()\r\n anno_h = open(anno_gene, \"r\")\r\n for line in anno_h:\r\n lines = line.strip().split(\"\\t\")\r\n sample, location, number, gene = lines[:4]\r\n number = int(number)\r\n\r\n ### whether convert the category to \"Exon\" or \"Intron\"\r\n if isConvert == \"True\":\r\n if location == \"Intron\":\r\n newLoc = \"Intron\"\r\n else:\r\n newLoc = \"Exon\"\r\n elif isConvert == \"False\":\r\n newLoc = location\r\n else:\r\n print(\"Please check whether convert the original category to 'Intron' or 'Exon' based on True of False.\")\r\n sys.exit(1)\r\n\r\n allLocations.add(newLoc)\r\n ### get the dict of gene -> location -> sample\r\n genes = gene.split(\",\")\r\n for g in genes:\r\n GeneCatSample[g][newLoc].append(sample)\r\n\r\n ### get the location -> gene -> sample\r\n CatGeneSample[newLoc][g].append(sample)\r\n anno_h.close()\r\n\r\n\r\n ## output gene and number in samples\r\n ### sort all locations\r\n sortedAllLocation = sorted(list(allLocations))\r\n\r\n gene_h = open(gene_file, \"w\")\r\n\r\n headerSample = [l + \"_samples\" for l in sortedAllLocation]\r\n gene_h.write(\"Gene\\tTotal\\t%s\\t%s\\n\" % (\"\\t\".join(sortedAllLocation), \"\\t\".join(headerSample)))\r\n\r\n GeneRecord = {}\r\n GeneNumber = {}\r\n\r\n allGenes = sorted(list(GeneCatSample.keys()))\r\n for ge in allGenes:\r\n ### get the number and samples for each location of each gene\r\n GeneNum = []\r\n GeneSample = []\r\n\r\n for loc in sortedAllLocation:\r\n if loc in GeneCatSample[ge]:\r\n samples = GeneCatSample[ge][loc]\r\n ##############################\r\n ####### unique for samples\r\n samples = sorted(list(set(samples)))\r\n sampleNum = len(samples)\r\n else:\r\n sampleNum = 0\r\n samples = [\"-\"]\r\n\r\n GeneNum.append(sampleNum)\r\n GeneSample.append(samples)\r\n\r\n GeneNumSum = sum(GeneNum)\r\n CatNumOut = \"\\t\".join([str(g) for g in GeneNum])\r\n CatSampleOut = \"\\t\".join([\",\".join(s) for s in GeneSample])\r\n\r\n record = \"%s\\t%d\\t%s\\t%s\\t\" % (ge, GeneNumSum, CatNumOut, CatSampleOut)\r\n GeneNumber[ge] = GeneNumSum\r\n GeneRecord[ge] = record\r\n \r\n ### output\r\n GeneNumSorted = sort_dict_value(GeneNumber)\r\n for g, n in GeneNumSorted:\r\n r = GeneRecord[g]\r\n gene_h.write(\"%s\\n\" % r)\r\n\r\n gene_h.close() \r\n\r\n\r\n ### location and genes\r\n loc_h = open(loc_file, \"w\")\r\n loc_h.write(\"Location\\tGeneNumber\\tGenes\\tSampleNumber\\tSamples\\n\")\r\n for loc in sortedAllLocation:\r\n geneSample = CatGeneSample[loc]\r\n genes = sorted(list(geneSample.keys()))\r\n geneNum = len(genes)\r\n samNum = 0\r\n samList = []\r\n for ge in geneSample:\r\n sam = geneSample[ge]\r\n samList.append(sam)\r\n samNum += len(sam)\r\n samOut = \";\".join([\",\".join(s) for s in samList])\r\n loc_h.write(\"%s\\t%d\\t%s\\t%d\\t%s\\n\" % (loc, geneNum, \",\".join(genes), samNum, samOut))\r\n loc_h.close()",
"def multiple_matlab_csv_to_teacher_data(short_runs_dirname):\n subdirname = 'Run-'\n data = None\n data_length = 0\n for i in range(10):\n dirname = os.path.join(short_runs_dirname, subdirname+str(i+1))\n run_data = matlab_csv_to_teacher_data(dirname)\n if i == 0:\n data = run_data\n else:\n for i, phoneme_data in enumerate(run_data):\n data[i] = np.vstack((data[i], phoneme_data))\n\n data_length += run_data[0].shape[0]\n\n for i, phoneme_data in enumerate(data):\n assert phoneme_data.shape[0] == data_length\n\n return data",
"def geneProcess(self, name):\n self.fileHandle = open(self.fileName, 'r+b')\n self.mm = mmap.mmap(self.fileHandle.fileno(), 0)\n positions = self.geneFeatures[name]\n exons = []\n for position in positions:\n self.mm.seek(position)\n row = self.mm.readline().decode('utf-8').rstrip().split(\"\\t\")\n attributes = row[-1].split(\"; \")\n for attribute in attributes:\n if attribute.startswith(\"gene_type\"):\n _gt = attribute.split(\" \")[-1][1:-1]\n elif attribute.startswith(\"gene_id\"):\n _gid = attribute.split(\" \")[-1][1:-1]\n elif attribute.startswith(\"gene_name\"):\n _gn = attribute.split(\" \")[-1][1:-1]\n exons.append((row[0], int(row[3]), int(row[4]), row[6], _gt, _gid, _gn))\n self.fileHandle.close()\n exons_df = pd.DataFrame(exons, columns=['scaffold', 'start', 'end',\n 'strand', 'gene_type', 'gene_id', 'gene_name'])\n\n for record in self.geneExonicRegions(exons_df):\n yield record",
"def run_gsea_experiments(self, perc_redundant):\n print('Perc redundant: {}'.format(perc_redundant))\n\n for i in range(self.iterations):\n print('\\ti = {}'.format(i))\n\n modified_gene_sets = copy.copy(self.gene_sets)\n\n redundant_genes = random.sample(self.uniq_genes, int(perc_redundant * len(self.uniq_genes)))\n\n for gene in redundant_genes:\n including_gsets = [\n gs_name for gs_name, gs_entry in modified_gene_sets.items()\n if gene in gs_entry['genes']\n ]\n new_gene_name = gene + '_REDUNDANT'\n mod_gsets = random.sample(including_gsets, int(0.5 * len(including_gsets)))\n\n for gs in mod_gsets:\n orig_genes = modified_gene_sets[gs]['genes']\n modified_gene_sets[gs]['genes'] = [\n new_gene_name if g == gene else g for g in orig_genes\n ]\n\n # write modified gene sets to disk\n gmt_file = os.path.join(\n self.base_dir,\n 'output',\n 'gsea_{0:.2f}'.format(perc_redundant),\n 'reactome_gene_sets_{0:.2f}.gmt'.format(perc_redundant)\n )\n\n self.write_gmt_file(gmt_file, modified_gene_sets)\n\n # run GSEA\n cls_file = os.path.join(self.base_dir, 'output', 'gsea_exp.cls')\n gct_file = os.path.join(self.base_dir, 'output', 'gsea_exp.gct')\n\n gsea_dir = os.path.join(self.base_dir, 'output', 'gsea_{0:.2f}'.format(perc_redundant), 'gsea_output')\n shutil.rmtree(gsea_dir)\n os.mkdir(gsea_dir)\n\n self._run_gsea(gct_file, gmt_file, cls_file, gsea_dir)\n\n # gsea output files to process\n tumor_all_leading_genes_file = os.path.join(\n gsea_dir,\n 'syngsea.all.leading.genes.TUMOR.gmt'\n )\n\n tumor_leading_genes_file = os.path.join(\n gsea_dir,\n 'syngsea.leading.genes.TUMOR.gct'\n )\n\n tumor_summary_results_file = os.path.join(\n gsea_dir,\n 'syngsea.SUMMARY.RESULTS.REPORT.TUMOR.txt'\n )\n\n tumor_leading_genes = self.process_all_leading_genes(tumor_all_leading_genes_file)\n tumor_leading_gene_occurrences = self.process_leading_genes(tumor_leading_genes_file)\n tumor_summary_results = self.process_results_file(tumor_summary_results_file)\n\n gsea_output_dict = {\n 'leading_genes': tumor_leading_genes,\n 'leading_genes_by_occurrence': tumor_leading_gene_occurrences,\n 'summary': tumor_summary_results,\n 'gene_sets': modified_gene_sets\n }\n\n # save to pickle\n gsea_pickle_file = os.path.join(\n self.base_dir,\n 'output',\n 'gsea_{0:.2f}'.format(perc_redundant),\n 'trial_{}.pkl'.format(i)\n )\n\n pickle.dump(gsea_output_dict, open(gsea_pickle_file, 'wb'))",
"def take_unique_features_large(filename_data,filename_features,filename_data_save,filename_features_save,rm_features=None,block_size=1000):\n # read the features from file\n features_org=np.loadtxt(filename_features,delimiter='\\t',dtype=object)\n \n # create a new file to save processed data\n filename_data_save_handle=open(filename_data_save,'w')\n filename_data_save_handle.close()\n # open the new file to save data sequentially\n filename_data_save_handle=open(filename_data_save,'a')\n \n filename_data_handle=open(filename_data,'r')\n\n count=0\n start=0\n data_block=[]\n end_of_file=False\n print(\"Start processing ...\")\n while not end_of_file:\n line=filename_data_handle.readline()\n if line=='':\n end_of_file=True\n else:\n if start==0:\n data_block=[]\n # remove \"\\n\" at the end\n data_line=line[0:-1]\n # split the string to substrings\n data_line=data_line.split('\\t')\n # append the current line to the block \n data_block.append(data_line)\n # increase total count\n count=count+1\n # get a full block or partial block at the end\n if start==block_size-1 or (end_of_file and start!=0):\n print(\"processing the %d-th line ...\" %count)\n \n ### process the block ###\n data_block=np.array(data_block,dtype=str)\n data_block,features=take_unique_features(data_block,features_org,rm_features)\n # append to file\n np.savetxt(filename_data_save_handle,data_block,fmt='%s',delimiter='\\t')\n ### finished processing the block ###\n \n # reset the counts of lines in the block (0-based)\n start=0\n else:\n start=start+1\n filename_data_handle.close() \n filename_data_save_handle.close()\n print(\"Done! %d lines are processed.\" %count)\n print(\"The features are:\")\n print(features)\n\n # save feature list\n np.savetxt(filename_features_save,features,fmt='%s',delimiter='\\t')",
"def main(raw_filepath, interim_filepath, processed_filepath):\n raw_filepath = Path(raw_filepath)\n interim_filepath = Path(interim_filepath)\n processed_filepath = Path(processed_filepath)\n\n logger = logging.getLogger(__name__)\n logger.info('making final data set from raw data')\n\n years = ['2010', '2011', '2012', '2013', '2014']\n\n #############################################################\n ################ Life Expectancy Outcome ####################\n #############################################################\n\n le_birth = pd.read_csv(raw_filepath / 'US_A.csv',\n usecols=['Tract ID', 'e(0)'],\n dtype={'Tract ID': \"object\"}) \\\n .rename(columns={'Tract ID': 't10_cen_uid_u_2010'}) \\\n .set_index('t10_cen_uid_u_2010')\n\n le_other = pd.read_csv(raw_filepath / 'US_B.csv',\n usecols=['Tract ID', 'Age Group', 'e(x)'],\n dtype={'Tract ID': \"object\"}) \\\n .rename(columns={'Tract ID': 't10_cen_uid_u_2010'}) \\\n .set_index(['t10_cen_uid_u_2010', 'Age Group']) \\\n .sort_index() \\\n .loc[(slice(None), ['15-24', '35-44', '55-64']), :] \\\n .unstack() \\\n .reindex(le_birth.index) # use the same tracts for all experiments\n\n le_other.columns = ['e(20)', 'e(40)', 'e(60)']\n\n # le_birth.to_csv(processed_filepath / 'y_00.csv', header=True)\n # le_other['e(20)'].to_csv(processed_filepath / 'y_20.csv', header=True)\n # le_other['e(40)'].to_csv(processed_filepath / 'y_40.csv', header=True)\n # le_other['e(60)'].to_csv(processed_filepath / 'y_60.csv', header=True)\n\n\n ##############################################################\n ################## Priority Dataset ##########################\n ##############################################################\n\n with open(raw_filepath / 'T10_Priority_Wide_Interpolated.csv', 'r') as f:\n cols = f.readline().strip().split(',')\n\n proj_cols = [x for x in cols if x[-4:] in years]# and\n # get all the priority NETS columns for later\n net_cols = ['t10_cen_uid_u_2010'] + [x[:11] + '_d_' + x[14:] for x in cols if '_net_' in x]\n\n data_X = pd.read_csv(raw_filepath / 'T10_Priority_Wide_Interpolated.csv', usecols=proj_cols,\n dtype={'t10_cen_uid_u_2010': \"object\"}) \\\n .set_index('t10_cen_uid_u_2010')\n\n # Create % younger than 25 (this method is far less than ideal)\n ag25up = data_X.filter(regex='.*(_pop_c_|ag25up).*')\n ag25up_coltuples = [(x[:-4], x[-4:]) for x in ag25up.columns]\n ag25up.columns = pd.MultiIndex.from_tuples(ag25up_coltuples)\n ag25up_long = ag25up.stack()\n ag25dwn_p = ((ag25up_long['t10_ldb_pop_c_'] - ag25up_long['t10_ldb_ag25up_c_'])\n / ag25up_long['t10_ldb_pop_c_']).unstack()\n ag25dwn_p.columns = ['t10_ldb_ag25dwn_p_' + x for x in ag25dwn_p.columns]\n\n # Create % older than 65\n ag65up = data_X.filter(regex='.*(_pop_c_|a60up).*')\n ag65up_coltuples = [(x[:-4], x[-4:]) for x in ag65up.columns]\n ag65up.columns = pd.MultiIndex.from_tuples(ag65up_coltuples)\n ag65up_long = ag65up.stack()\n ag65up_p = (ag65up_long['t10_ldb_a60up_c_'] / ag65up_long['t10_ldb_pop_c_']) \\\n .unstack()\n ag65up_p.columns = ['t10_ldb_ag60up_p_' + x for x in ag65up_p.columns]\n\n # Add our new measure\n data_X = pd.concat([data_X, ag25dwn_p, ag65up_p], axis=1)\n\n # Get rid of all count variables, including nets\n no_count_cols = [x for x in data_X.columns if '_c_' not in x]\n data_X = data_X[no_count_cols]\n\n\n drop_cols = ['t10_gis_area_l_2010',\n 'm10_cen_uid_u_2010',\n 'm10_cen_memi_x_2010',\n 'c10_cen_uid_u_2010',\n 'z10_cen_uid_u_2010']\n\n data_X = data_X.drop(columns=drop_cols) \\\n .reindex(le_birth.index)\n\n data_X.columns = pd.Index([(x[:-5], int(x[-4:])) for x in data_X.columns])\n\n X_priority = data_X.groupby(axis=1, level=0).mean()\n X_priority.to_csv(interim_filepath / 'X_priority.csv')\n\n ###########################################################\n #################### NETS Dataset #########################\n ###########################################################\n\n X_nets_allyrs = pd.read_csv(raw_filepath / 'recvd_t10_vars_v8_20190607.csv', usecols=net_cols,\n dtype={'t10_cen_uid_u_2010': \"object\"}) \\\n .set_index('t10_cen_uid_u_2010') \\\n .reindex(le_birth.index)\n\n X_nets_allyrs.columns = pd.Index([(x[:-5], int(x[-4:])) for x in X_nets_allyrs.columns])\n X_nets = X_nets_allyrs.groupby(axis=1, level=0).mean()\n X_nets.to_csv(interim_filepath / 'X_nets.csv')\n\n # Split predictive data by Variable Set\n X_all = pd.concat([X_priority, X_nets], axis=1) \\\n .dropna(how='any')\n\n final_index = le_birth.index.intersection(X_all.index)\n X_all = X_all.reindex(final_index)\n le_birth = le_birth.reindex(final_index)\n le_other = le_other.reindex(final_index)\n\n le_birth.to_csv(processed_filepath / 'y_00.csv', header=True)\n le_other['e(20)'].to_csv(processed_filepath / 'y_20.csv', header=True)\n le_other['e(40)'].to_csv(processed_filepath / 'y_40.csv', header=True)\n le_other['e(60)'].to_csv(processed_filepath / 'y_60.csv', header=True)\n\n # Var Set 1\n p1_features = ['t10_ldb_hinci_m',\n 't10_ldb_pop_d',\n 't10_ldb_nhblk_p',\n 't10_ldb_hisp_p',\n 't10_ldb_col_p']\n X_p1 = X_all[p1_features]\n X_p1.to_csv(processed_filepath / 'X_varGroup1.csv')\n\n # Var Set 2\n p2_features = [\n \"t10_ldb_hinci_m\",\n \"t10_ldb_pop_d\",\n \"t10_ldb_ag25dwn_p\",\n \"t10_ldb_ag60up_p\",\n \"t10_ldb_nhblk_p\",\n \"t10_ldb_hisp_p\",\n \"t10_ldb_col_p\",\n \"t10_ldb_lep_p\",\n \"t10_ldb_mrenti_m\",\n \"t10_ldb_multi_p\",\n \"t10_ldb_nhwht_p\",\n \"t10_ldb_asian_p\",\n \"t10_ldb_fb_p\",\n \"t10_ldb_hs_p\",\n \"t10_ldb_unemp_p\",\n \"t10_ldb_npov_p\",\n \"t10_ldb_vac_p\",\n \"t10_ldb_own_p\",\n \"t10_ldb_mhmvali_m\"\n ]\n X_p2 = X_all[p2_features]\n X_p2.to_csv(processed_filepath / 'X_varGroup2.csv')\n\n # Var Set 3\n X_p3 = X_nets.reindex(final_index)\n X_p3.to_csv(processed_filepath / 'X_varGroup3.csv')\n\n # Var Set 4\n X_p4 = X_all\n X_p4.to_csv(processed_filepath / 'X_varGroup4.csv')",
"def chunk(wb_run,sample_run,ei_guess,rebin,mapingfile,nchunk,**kwargs):\n global reducer,rm_zero,inst_name,van_mass,bleed_switch,rate,pixels\n print 'DGreduce run for ',inst_name,'run number ',sample_run\n try:\n n,r=lhs('both')\n wksp_out=r[0]\n except:\n if sample_run == 0:\n #deal with the current run being parsed as 0 rather than 00000\n sample_run='00000'\n wksp_out=inst_name+str(sample_run)+'.spe'\n if kwargs.has_key('sum') and kwargs.get('sum')==True:\n wksp_out=inst_name+str(sample_run[0])+'sum'+'.spe'\n \n start_time=time.time()\n \n if sample_run=='00000' and mtd.doesExist(inst_name+'00000.raw')==True:\n print 'Deleteing previous instance of temp data'\n DeleteWorkspace(Workspace=inst_name+'00000.raw')\n \n \n reducer.energy_bins = rebin\n \n mon_list1=reducer.ei_mon_spectra\n mon_list2=reducer.mon1_norm_spec\n mon_list1.append(mon_list2)\n #mon_list1.sort()\n print 'Monitors for this chunk are: ',mon_list1\n # monitors for merlin[69634,69638]\n \n if inst_name == 'MER':\n #number of spectrums per instrument and where the detectors start (i.e. 5 for mari but 1 for merlin)\n numspec=69632\n spectrum_start=1\n if inst_name == 'MAP':\n #number of spectrums per instrument and where the detectors start (i.e. 5 for mari but 1 for merlin)\n numspec=41472\n spectrum_start=1\n \n if kwargs.has_key('det_cal_file'):\n cal_file = kwargs.get('det_cal_file') \n else:\n print 'Setting detector calibration to detector block info from ', sample_run\n \n reducer.det_cal_file =None\n reducer.relocate_dets = False\n nums=range(spectrum_start,numspec,nchunk)\n output_wkspName=wksp_out\n for i in nums:\n print '=========================================================================='\n print 'start spectra for this chunk',i\n chunk=range(i,i+nchunk)\n endIndex=nchunk-1\n if i+nchunk > numspec:\n chunk=range(i,numspec+1)\n endIndex=len(chunk)-1\n print 'end spectra for this chunk ', i+endIndex\n \n speclist=mon_list1+chunk\n #print speclist\n LoadRaw(Filename=wb_run,OutputWorkspace=\"wb_wksp\",LoadLogFiles=\"0\",SpectrumList=speclist)\n \n LoadRaw(Filename=sample_run,OutputWorkspace=\"run_wksp\",LoadLogFiles=\"0\",SpectrumList=speclist)\n \n tmp=arb_units(\"wb_wksp\",\"run_wksp\",ei_guess,rebin,'none_for_this_run_type',one2one=True,bleed=False,**kwargs)\n \n \n DeleteWorkspace(Workspace=\"wb_wksp\")\n DeleteWorkspace(Workspace=\"run_wksp\")\n #DeleteWorkspace(\"_wksp.spe\")\n #DeleteWorkspace(\"_wksp.spe-white\")\n \n if i == spectrum_start:\n #crop the workspace to remove the monitors, the workpsace seems sorted on specnumber so this is ok for instruments where the monitors are at the end of the \n # spectrum list\n CropWorkspace(InputWorkspace=tmp,OutputWorkspace=wksp_out,StartWorkSpaceIndex=0,EndWorkSpaceIndex=endIndex)\n else:\n CropWorkspace(InputWorkspace=tmp,OutputWorkspace=tmp,StartWorkSpaceIndex=0,EndWorkSpaceIndex=endIndex)\n ConjoinWorkspaces(InputWorkspace1=wksp_out,InputWorkspace2=tmp,CheckOverlapping='0')\n print int(((float(i+endIndex))/float(numspec))*100),'% complete'\n print '===============================================================================' \n \n GroupDetectors(InputWorkspace=output_wkspName,OutputWorkspace=output_wkspName,MapFile=mapingfile)\n\n \n \n print 'Elapsed time =',time.time()-start_time, 's'\n return mtd[wksp_out]",
"def main():\n dir_path='.'\n meas_file='magic_measurements.txt'\n samp_file=\"er_samples.txt\"\n out_file='magic_measurements.txt'\n if '-h' in sys.argv:\n print(main.__doc__)\n sys.exit()\n if '-WD' in sys.argv:\n ind = sys.argv.index('-WD')\n dir_path=sys.argv[ind+1]\n if '-f' in sys.argv:\n ind = sys.argv.index('-f')\n meas_file=sys.argv[ind+1]\n if '-fsa' in sys.argv:\n ind = sys.argv.index('-fsa')\n samp_file=sys.argv[ind+1]\n if '-F' in sys.argv:\n ind = sys.argv.index('-F')\n out_file=sys.argv[ind+1]\n # read in measurements file\n meas_file=dir_path+'/'+meas_file\n out_file=dir_path+'/'+out_file\n samp_file=dir_path+'/'+samp_file\n data,file_type=pmag.magic_read(meas_file)\n samps,file_type=pmag.magic_read(samp_file)\n MeasRecs=[]\n sampnames,sflag=[],0\n for rec in data:\n for samp in samps:\n if samp['er_sample_name'].lower()==rec['er_sample_name'].lower():\n if samp['er_sample_name'] not in sampnames:sampnames.append(samp['er_sample_name'].lower())\n rec['er_site_name']=samp['er_site_name']\n rec['er_location_name']=samp['er_location_name']\n MeasRecs.append(rec)\n break\n if rec['er_sample_name'].lower() not in sampnames:\n sampnames.append(rec['er_sample_name'].lower())\n sflag=1\n SampRec={}\n for key in list(samps[0].keys()):SampRec[key]=\"\"\n SampRec['er_sample_name']=rec['er_sample_name']\n SampRec['er_citation_names']=\"This study\"\n SampRec['er_site_name']='MISSING'\n SampRec['er_location_name']='MISSING'\n SampRec['sample_desription']='recorded added by update_measurements - edit as needed'\n samps.append(SampRec)\n print(rec['er_sample_name'],' missing from er_samples.txt file - edit orient.txt file and re-import')\n rec['er_site_name']='MISSING'\n rec['er_location_name']='MISSING'\n MeasRecs.append(rec)\n pmag.magic_write(out_file,MeasRecs,'magic_measurements')\n print(\"updated measurements file stored in \", out_file)\n if sflag==1:\n pmag.magic_write(samp_file,samps,'er_samples')\n print(\"updated sample file stored in \", samp_file)",
"def main():\n\n logger.info('Process initiated - Building dataset')\n\n if os.path.isfile(train_path) and os.path.isfile(test_path):\n logger.info('Loading pickled data')\n return pd.read_pickle(train_path), pd.read_pickle(test_path)\n\n logger.info('Reading COSMIC Cancer Gene Census')\n gene_census = cancer_gene_census()\n gene_census.extend(civic_cancer_genes())\n\n gene_census = set(gene_census)\n\n training_data = pd.DataFrame()\n testing_data = pd.DataFrame()\n\n for cancer_type in cancer_types:\n data_file_name = cancer_type + \".meth.by_mean.data.txt\"\n data_file_location = os.path.join(data_location, data_file_name)\n\n logger.info('Reading Methylation data for {}'.format(cancer_type))\n\n methyl_data = pd.read_csv(data_file_location, delimiter='\\t', skiprows=[1], index_col=0)\n\n logger.info(\n 'Number of Genes: {0} | Number of Patients: {1}'.format(methyl_data.shape[0], methyl_data.shape[1]))\n logger.info('Preprocessing Methylation data')\n\n methyl_data = genes_feature_selection(methyl_data, gene_census)\n\n logger.info('Number of Genes after processing: {0}\\n'.format(methyl_data.shape[0]))\n\n methyl_data = add_classification_label(methyl_data)\n methyl_data = methyl_data.transpose()\n\n normal_cases = methyl_data[methyl_data['Tumor'] == 0]\n logger.info(normal_cases.shape)\n train_normal_cases = normal_cases.sample(frac=0.7, random_state=200)\n logger.info(train_normal_cases.shape)\n test_normal_cases = normal_cases.drop(train_normal_cases.index)\n logger.info(train_normal_cases.shape)\n\n tumor_cases = methyl_data[methyl_data['Tumor'] != 0]\n logger.info(tumor_cases.shape)\n train_tumor_cases = tumor_cases.sample(frac=0.7, random_state=200)\n logger.info(train_tumor_cases.shape)\n\n test_tumor_cases = tumor_cases.drop(train_tumor_cases.index)\n logger.info(test_tumor_cases.shape)\n\n training_data = training_data.append(train_normal_cases)\n training_data = training_data.append(train_tumor_cases)\n\n testing_data = testing_data.append(test_normal_cases)\n testing_data = testing_data.append(test_tumor_cases)\n\n training_data = training_data.sample(frac=1)\n testing_data = testing_data.sample(frac=1)\n\n logger.info('Pickling training and testing data')\n training_data.to_pickle(train_path)\n testing_data.to_pickle(test_path)\n\n logger.info('Processing completed!')\n visualize_data(training_data)\n\n return training_data, testing_data",
"def read_file(self, reader):\n # Open files to write comments, invalid and ignored entries\n comments = open('genome.comments.gff', 'w')\n invalid = open('genome.invalid.gff', 'w')\n ignored = open('genome.ignored.gff', 'w')\n\n # First pass, pulling out all genes and mRNAs\n # and placing child features if possible\n for line in reader:\n if len(line) == 0 or line[0].startswith('#'):\n comments.write(line)\n continue\n splitline = self.validate_line(line)\n if not splitline:\n invalid.write(line)\n else:\n line_added = self.process_line(splitline)\n if not line_added:\n ignored.write(line)\n\n # Second pass, placing child features which \n # preceded their parents in the first pass\n orphans = copy.deepcopy(self.orphans)\n for splitline in orphans:\n self.process_line(splitline)\n \n # Add mRNAs to their parent genes\n for mrna in self.mrnas.values():\n parent_gene = self.genes[mrna.parent_id]\n parent_gene.mrnas.append(mrna)\n\n if self.skipped_features > 0:\n sys.stderr.write(\"Warning: skipped \"+str(self.skipped_features)+\" uninteresting features.\\n\")\n return self.genes.values()",
"def add_pacbio_run(run_set, sample_entry):\n\n ### Init ###\n\n # Check protocol\n if sample_entry['Protocol'] != 'hifi' and sample_entry['Protocol'] != 'clr':\n raise RuntimeError('Protocol not supported: ' + sample_entry['Protocol'])\n\n UPLOAD_FILE_NAME_FORMAT = UPLOAD_FILE_NAME_FORMAT_BAM\n\n # Check MD5\n file_md5 = sample_entry['MD5'].strip() if not pd.isnull(sample_entry['MD5']) else ''\n\n if not file_md5:\n raise RuntimeError('Missing MD5: sample={Sample}, proto={Protocol}, run={Run}, cell={Cell}'.format(**sample_entry))\n\n # Get experiment\n experiment_alias = 'HGSVC_Reads_{sample}_{proto}_{center}'.format(\n sample=sample_entry['Sample'],\n proto=PROTOCOL_NAME[sample_entry['Protocol']],\n center=sample_entry['Center']\n )\n\n# ena_experiment_accession = get_hifi_experiment(sample_entry['Sample']).strip() if not pd.isnull(sample_entry['Sample']) else ''\n#\n# if not ena_experiment_accession:\n# raise RuntimeError('No experiment accession: sample={Sample}, proto={Protocol}, run={Run}, cell={Cell}'.format(**sample_entry))\n\n\n ### Construct entry ###\n\n # Add run element\n run_element = ET.SubElement(\n run_set, 'RUN',\n alias='{sample}-{proto}-{run}-{cell}'.format(\n sample=sample_entry['Sample'],\n run=sample_entry['Run'],\n cell=sample_entry['Cell'],\n proto=PROTOCOL_NAME[sample_entry['Protocol']]\n ),\n center_name=CENTER_NAME[sample_entry['Center']]\n )\n\n # Add title\n if sample_entry['Protocol'] == 'hifi':\n ET.SubElement(run_element, 'TITLE').text = 'HGSVC HiFi {Sample} - {Run} {Cell}'.format(**sample_entry)\n \n elif sample_entry['Protocol'] == 'clr':\n ET.SubElement(run_element, 'TITLE').text = 'HGSVC CLR {Sample} - {Run} {Cell}'.format(**sample_entry)\n \n else:\n raise RuntimeError('Unknown protocol: ' + sample_entry['protocol'])\n\n # Add experiment reference\n #exp_ref = ET.SubElement(run_element, 'EXPERIMENT_REF', accession=ena_experiment_accession)\n exp_ref = ET.SubElement(run_element, 'EXPERIMENT_REF', refname=experiment_alias)\n\n #ET.SubElement(ET.SubElement(exp_ref, 'IDENTIFIERS'), 'PRIMARY_ID').text = ena_experiment_accession\n\n # Data block\n files_element = ET.SubElement(ET.SubElement(run_element, 'DATA_BLOCK'), 'FILES')\n\n ET.SubElement(\n files_element, 'FILE',\n filename=UPLOAD_FILE_NAME_FORMAT.format(**sample_entry),\n filetype='bam',\n checksum_method='MD5',\n checksum=file_md5\n )\n\n\n ### Attributes (optional) ###\n\n # Parse attributes\n attrib_list = list()\n \n attr_center = CENTER_NAME[sample_entry['Center']]\n attr_lib = sample_entry['Library name'].strip() if not pd.isnull(sample_entry['Library name']) else None\n attr_chem = sample_entry['Chemistry'].strip() if not pd.isnull(sample_entry['Chemistry']) else None\n attr_lib_len = sample_entry['Library Len (Mean bp)'] if not pd.isnull(sample_entry['Library Len (Mean bp)']) else None\n attr_notes = sample_entry['Notes'].strip() if not pd.isnull(sample_entry['Notes']) else None\n \n attrib_list.append(('Center', attr_center))\n \n if attr_lib is not None:\n attrib_list.append(('LibraryName', attr_lib))\n\n if attr_chem is not None:\n attrib_list.append(('Chemistry', attr_chem))\n\n if attr_lib_len is not None:\n\n try:\n attr_lib_len = '{:,d} bp'.format(np.int32(attr_lib_len))\n except ValueError:\n attr_lib_len = str(attr_lib_len).strip()\n\n if attr_lib_len:\n attrib_list.append(('LibraryInsertLength', attr_lib_len))\n\n if attr_notes is not None:\n attrib_list.append(('Notes', attr_notes))\n\n # Add attributes\n if attrib_list:\n\n attrib_element_container = ET.SubElement(run_element, 'RUN_ATTRIBUTES')\n\n for attr_key, attr_val in attrib_list:\n\n attrib_element = ET.SubElement(attrib_element_container, 'RUN_ATTRIBUTE')\n\n ET.SubElement(attrib_element, 'TAG').text = attr_key\n ET.SubElement(attrib_element, 'VALUE').text = attr_val",
"def add_genesets(snp_dict,gene_file):\n inf = open(gene_file,\"r\")\n for i in snp_dict.keys():\n snp_dict[i]['genes']=np.empty(len(snp_dict[i]['bps']), dtype=set)\n for line in inf:\n if re.match(\"\\#\",line):\n continue\n line.rstrip()\n fields=line.split()\n if len(fields) < 3:\n continue\n bps=int(fields[1])\n if fields[0] in snp_dict.keys():\n idx = snp_dict[fields[0]]['bps'].searchsorted(bps)\n if (idx < len(snp_dict[fields[0]]['bps'])) and snp_dict[fields[0]]['bps'][idx] == bps:\n snp_dict[fields[0]]['genes'][idx]=set([ x for x in fields[2:] ])\n return True",
"def featMatGenerator(dirName, trajfile, trajFilter):\n \n #load the data and extract feature vectors for each trajectory and plate summary for each chunk\n featMatTraj = {}\n featMatPlate = pd.DataFrame()\n try:\n if len(trajfile.split('_'))<10:\n fshort = '_'.join(trajfile.split('_')[0:-2:6])\n else:\n fshort = '_'.join(trajfile.split('_')[0:-1:7])\n featMatPlate = pd.DataFrame()\n with pd.HDFStore(os.path.join(dirName, trajfile), 'r') as fid:\n nChunks = list(fid.keys())\n for chunk in nChunks:\n chunkno = [int(s) for s in chunk.split('_') if s.isdigit()]\n chunkno = chunkno[0]\n\n featMatTraj[chunkno] = pd.DataFrame()\n nWorms = np.unique(fid[chunk]['worm_index'])\n for w in nWorms:\n if fid[chunk][fid[chunk]['worm_index']==w].shape[0]>=trajFilter:\n featMatTraj[chunkno] = featMatTraj[chunkno].append(\\\n fid[chunk][fid[chunk]['worm_index']==w].mean(),ignore_index=True)\n \n featMatTraj[chunkno].reset_index(drop=True)\n \n temp = featMatTraj[chunkno].median()\n temp = temp.drop(['worm_index', 'timestamp']).rename(lambda x: x +'_med').to_frame().transpose()\n \n temp2 = featMatTraj[chunkno].quantile(0.75) - featMatTraj[chunkno].quantile(0.25)\n temp2 = temp2.drop(['worm_index', 'timestamp']).rename(lambda x: x + '_iqr').to_frame().transpose()\n \n tempfinal = pd.concat([temp, temp2], axis = 1)\n tempfinal ['exp'] = fshort\n tempfinal['Chunk'] = chunk\n tempfinal ['drug'] = fshort.split('_')[0]\n \n featMatPlate = featMatPlate.append(tempfinal, ignore_index=True)\n del temp, temp2, tempfinal\n del nWorms\n del nChunks\n \n featMatPlate.reset_index(drop=True) \n featMatPlate.drop(featMatPlate.columns[np.sum(featMatPlate.isna()>featMatPlate.shape[0]/2)], \\\n axis=1, inplace = True)\n except OSError:\n print (trajfile + 'is invalid file format') \n\n #write the featMatPlate to a .csv file\n featMatPlate.to_csv(os.path.join(os.path.dirname(dirName), fshort + '_FeatMatPlate.csv'))\n\n #save the featMatTraj to an excel file\n writer = pd.ExcelWriter(os.path.join(os.path.dirname(dirName), fshort + '_FatMatTraj.xlsx'))\n for chunk in featMatTraj.keys():\n featMatTraj[chunk].to_excel(writer, sheet_name = str(chunk))\n writer.save()\n \n return featMatTraj, featMatPlate",
"def import_gff(file, genome_version, verbose = False):\n \n from tridentdb import models\n import re\n from django.db.utils import DatabaseError\n \n if genome_version == None:\n print(\"Genome Version is needed for loading a gff file.\")\n return\n\n genomes = models.Genome.objects.filter(genome_ver = genome_version)\n if len(genomes) == 0:\n print(\"Unknown Genome Version: %s\" % genome_version)\n return\n \n lineno = 1\n for line in file:\n if verbose:\n print(\"Line Number: %d\" % lineno)\n lineno += 1\n if not line:\n continue\n line = line.strip()\n if line[0] == '#':\n continue\n info = line.split('\\t')\n chromosome = info[0].replace(\"chr\", \"\")\n is_primary_transcript = (info[2] == 'miRNA_primary_transcript')\n genomic_mir_start = info[3]\n genomic_mir_end = info[4]\n is_on_positive_strand = (info[6] == '+')\n \n mirbase_id = mirbase_acc = mirbase_name = mirbase_derives_from = None\n mirbase = info[8].split(';')\n for tag in mirbase:\n (name, val) = tag.split('=')\n if name == \"ID\":\n mirbase_id = val\n elif name == \"accession_number\":\n mirbase_acc = val\n elif name == \"Alias\":\n # Use alias for accession_number IFF accession_number\n # is not used\n if not mirbase_acc:\n mirbase_acc = val\n elif name == \"Name\":\n mirbase_name = val\n elif name == \"derives_from\":\n mirbase_derives_from = val\n else:\n print(\"Unknown Mirbase tag: \\\"%s\\\"\" % name)\n continue\n\n mirna = models.MicroRNA(chromosome=chromosome, is_primary_transcript = is_primary_transcript, genomic_mir_start = genomic_mir_start, genomic_mir_end = genomic_mir_end, is_on_positive_strand = is_on_positive_strand, mirbase_id = mirbase_id, mirbase_acc = mirbase_acc, mirbase_name = mirbase_name, mirbase_derives_from = mirbase_derives_from, genome = genomes[0] )\n \n try:\n mirna.save()\n except DatabaseError as de:\n from sys import stderr\n stderr.write(\"Error loading GFF line: {0}\\n\".format(line))\n raise de\n ##end of import_gff",
"def get_mult_gene_RNA(ensemble, genes, grouping, max_points='10000'):\n\n\t# Prevent SQL injected since column names cannot be parameterized.\n\tif \";\" in ensemble or \";\" in grouping:\n\t\treturn None\n\n\tgenes = [gene+\"%\" for gene in genes]\n\n\t# This query is just to fix gene id's missing the ensemble version number.\n\t# Necessary because the table name must match exactly with whats on the MySQL database.\n\t# Ex. ENSMUSG00000026787 is fixed to ENSMUSG00000026787.3\n\tfirst_query = \"SELECT gene_id FROM genes WHERE gene_id LIKE %s\" + \" OR gene_id LIKE %s\" * (len(genes)-1)\n\tresult = db.get_engine(current_app, 'methylation_data').execute(first_query, (genes,)).fetchall()\n\n\tgene_table_names = ['gene_' + gene_id[0].replace('.','_') for gene_id in result]\n\n\tdf_all = pd.DataFrame()\n\n\tfirst = True\n\tfor gene_table_name in gene_table_names:\n\t\tquery = \"SELECT cells.cell_id, cells.cell_name, cells.dataset, \\\n\t\t\t%(ensemble)s.annotation_RNA, %(ensemble)s.cluster_RNA, \\\n\t\t\t%(ensemble)s.tsne_x_RNA, %(ensemble)s.tsne_y_RNA, \\\n\t\t\t%(gene_table_name)s.normalized_counts, \\\n\t\t\tdatasets.target_region \\\n\t\t\tFROM cells \\\n\t\t\tINNER JOIN %(ensemble)s ON cells.cell_id = %(ensemble)s.cell_id \\\n\t\t\tLEFT JOIN %(gene_table_name)s ON %(ensemble)s.cell_id = %(gene_table_name)s.cell_id \\\n\t\t\tLEFT JOIN datasets ON cells.dataset = datasets.dataset\" % {'ensemble': ensemble,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t 'gene_table_name': gene_table_name}\n\t\tif max_points.isdigit():\n\t\t\tquery = query+\" ORDER BY RAND() LIMIT %(max_points)s()\" % {'max_points': max_points}\n\n\t\ttry:\n\t\t\tdf_all = df_all.append(pd.read_sql(query, db.get_engine(current_app, 'RNA_data')))\n\t\texcept exc.ProgrammingError as e:\n\t\t\tnow = datetime.datetime.now()\n\t\t\tprint(\"[{}] ERROR in app(get_mult_gene_RNA): {}\".format(str(now), e))\n\t\t\tsys.stdout.flush()\n\t\t\treturn None\n\n\t\tif first:\n\t\t\tdf_coords = df_all\n\t\tfirst = False\n\n\tif df_all.empty: # If no data in column, return None\n\t\tnow = datetime.datetime.now()\n\t\tprint(\"[{}] ERROR in app(get_gene_RNA): No RNA data for {}\".format(str(now), ensemble))\n\t\tsys.stdout.flush()\n\t\treturn None\n\n\tdf_all['normalized_counts'].fillna(0, inplace=True)\n\n\tdf_avg_methylation = df_all.groupby(by='cell_id', as_index=False)['normalized_counts'].mean()\n\tdf_coords.update(df_avg_methylation)\n\n\tif grouping == 'annotation':\n\t\tdf_coords.fillna({'annotation_RNA': 'None'}, inplace=True)\n\t\tdf_coords['annotation_cat'] = pd.Categorical(df_coords['annotation_RNA'], cluster_annotation_order)\n\t\tdf_coords.sort_values(by='annotation_cat', inplace=True)\n\t\tdf_coords.drop('annotation_cat', axis=1, inplace=True)\n\telif grouping == 'cluster':\n\t\tdf_coords.sort_values(by='cluster_RNA', inplace=True)\n\treturn df_coords",
"def permute_data(self):\n\n if not eval(self.args.Segment_Permutation_File):\n \"\"\"\n User has selected no permutation outputs and should not be here so kick them out.\n \"\"\"\n self.log.error(\"--Segment_Permutation_File must be set True\")\n return\n\n cell_list = []\n\n for cell in self.args.Cell_Name.strip().split(','):\n cell_list.append(cell)\n\n \"\"\"\n This block is designed to do Segment Permutation Analysis. Randomly select groups of\n self.permutation_group_size from the Genome space, in this case the list of ID's from the\n self.bin_tracking_list, and intersect them with the Target Bed data.\n \"\"\"\n self.log.info(\"Spawning {0} jobs to process {1} iterations each for segment permutation analysis.\"\n .format(self.args.Spawn, self.args.Iteration_Count))\n\n self.target_bed_map_array = self.target_mapping()\n selection_space = self.bin_tracking_array[:, 0]\n intersect_space = self.target_bed_map_array[:, 0].tolist()\n\n p = pathos.multiprocessing.Pool(int(self.args.Spawn))\n p.starmap(self.intersection_iteration, zip(itertools.repeat(selection_space),\n itertools.repeat(intersect_space),\n itertools.repeat(self),\n cell_list))\n\n self.log.info(\"Segment Permutation jobs done. Compile any temporary data files into final results.\")",
"def _make_test_mutant_dataset(positions_and_readcounts_string, raw_chrom_names=False):\n dataset = Insertional_mutant_pool_dataset()\n if not positions_and_readcounts_string: \n return dataset\n for N, string in enumerate(positions_and_readcounts_string.split(', ')):\n raw_pos, readcount = string.split(' ')\n if '/' in readcount: readcount, perfect = [int(x) for x in readcount.split('/')]\n else: readcount = perfect = int(readcount)\n assert readcount >= perfect, \"In mutant string %s, perfect readcount is over total - not allowed!\"%string\n if '+' in raw_pos: strand = '+'\n elif '-' in raw_pos: strand = '-'\n else: raise Exception(\"Short-position %s has no strand!\"%raw_pos)\n chrom, pos = raw_pos.split(strand)\n pos = int(pos)\n if not raw_chrom_names:\n if chrom: chrom = 'chromosome_%s'%chrom\n else: chrom = 'chromosome_1'\n elif not chrom:\n raise Exception(\"Short-position %s has no chromosome name - can't use with raw_chrom_names!\")\n full_pos = Insertion_position(chrom, strand, position_before=pos, immutable=True)\n mutant = Insertional_mutant(IB=str(N), insertion_position=full_pos)\n mutant.total_read_count = readcount\n mutant.perfect_read_count = perfect\n dataset.add_mutant(mutant)\n return dataset",
"def gene_heritability(\n input_snp_filename: \"Data Input, use the SNPs file from dataParse\",\n output_genes_filename: 'output file for gene-level results, use .csv',\n output_summary_filename: 'output file for the genomewide results summary, use .csv',\n logger_filename: 'file for the logger, use a txt',\n sweeps: \"number of samples for each chain\" = 1000,\n burnin: \"number of burnin samples\" = 1000,\n n_chains: \"number of chains of the sampler\" = 4,\n n_cores: \"number of parallel cores to use\" = 4,\n N_1kG: \"number of SNPs onwhich the LD-score is calculated\" = 1290028,\n chromosome: \"chromosome on which the analysis is run\" = \"all\",\n snp_thr: \"threshold for the minimum number of SNPs in a gene\" = 10,\n sep: \"separator for the input files, use t for tab separated (not \\t)\" = \",\",\n model: 'specify the model for the regression, one betwenn normal/gamma' = 'normal',\n fix_intercept = False,\n ):\n\n # Initialisation of the logger\n output_logger = log.setup_logger(\"output_logger\", logger_filename)\n log.initialise_log(output_logger,\n 'gene level regression, model: %s' %model,\n [input_snp_filename],\n [output_genes_filename,output_summary_filename],\n sweeps,\n burnin,\n chromosome = str(chromosome),\n other_params_diz = {'chains': n_chains, 'cores': n_cores, 'SNP threshold': snp_thr})\n\n # Initialisation function, it reads the summary stats file, filters the SNPs,\n # creates the output files\n\n logging.info(\"Start Analysis\")\n\n snps = s.Snps()\n # read table\n logging.info(\"Reading SNP file: %s,\\n\\t with %s delimiter\"%(input_snp_filename, sep))\n snps.read_table(input_snp_filename, separator=sep)\n # generate chi squared stats\n snps.generate_stats()\n # update the summary stats\n snps.update_summary()\n output_logger.info(\" Sample size \" + str(snps.n_patients) + \"\\n\")\n\n\n\n snps.apply_filter_table(s.baghera_filter)\n snps.update_summary()\n output_logger.info(\"After baghera init filter.\\n\\t Number of SNPs: %s\\n\\t Number of genes: %s\\n\" \\\n %(str(snps.n_snps), str(snps.n_genes)) )\n\n # Non coding SNPs are assigned to a dummy gene, such that the regression is done on the entire SNPs' set\n snps.rename_non_annotated(name='NonCoding')\n\n if chromosome != \"all\":\n snps.apply_filter_table(snps.cut_single_chrom, **{'chromosome': chromosome})\n output_logger.info(\n \"Analysis restricted to chr %s\" %str(chromosome) )\n\n snps.update_summary()\n output_logger.info(\"Analysis. Number of SNPs: %s\\n, Number of genes: %s\\n\" \\\n %(str(snps.n_snps), str(snps.n_genes)) )\n\n # Creates the genes table with the number of SNPs for each gene and the basic stats values\n genes=g.Genes()\n genes.initialise_genes(snps.table.copy(), snps_thr=snp_thr)\n\n output_logger.info(\"Output gene table initialised:\\nNumber of genes: %s\\n\" \\\n %(str(genes.n_genes)) )\n\n snps.set_non_annotated(genes.cut_genes, 'NonCoding')\n\n if model == 'gamma':\n result = gr.analyse_gamma(snps, output_summary_filename, output_logger,\n sweeps, burnin, n_chains, n_cores, N_1kG, fix_intercept,\n )\n else:\n result = gr.analyse_normal(snps, output_summary_filename, output_logger,\n sweeps, burnin, n_chains, n_cores, N_1kG, fix_intercept,\n )\n\n logging.info(\"Saving genes table\")\n genes.table = genes.table.merge(\n result, left_index=False, left_on=\"name\", right_on=\"name\")\n\n k = genes.table.n_snps / float(N_1kG)\n genes.table[\"h2g\"] = genes.table.bg_mean.astype(\"float\") * k\n\n genes.table = genes.table.sort_values(by=[\"P\", \"bg_median\"])\n\n genes.save_table(output_genes_filename)\n\n non_coding = genes.table[genes.table.name == \"NonCoding\"]\n h2g_tot = np.sum(genes.table[\"h2g\"].values) - non_coding[\"h2g\"].values\n\n output_logger.info(\" Non coding heritability : \" +\n str(non_coding[\"h2g\"].values) + \"\\n\")\n output_logger.info(\" Coding heritability : \" + str(h2g_tot) + \"\\n\")",
"def run_all2(protgroup, memornot, subsequences, base_outdir,\n protgroup_dict, protein_feathers_dir, date, errfile, impute_counts=True,\n cutoff_num_proteins=0, core_only_genes=None,\n length_filter_pid=.8, remove_correlated_feats=True,\n force_rerun_counts=False, force_rerun_percentages=False, force_rerun_pca=False):\n import ssbio.utils\n\n # Need to set multiprocessing limit for scipy/numpy stuff if parallelizing anything\n import os\n os.environ['OMP_NUM_THREADS'] = '1'\n\n # First, filter down the protein group to the membrane/nonmembrane definition\n prots_filtered_feathers = get_protein_feather_paths(protgroup=protgroup, memornot=memornot,\n protgroup_dict=protgroup_dict,\n protein_feathers_dir=protein_feathers_dir,\n core_only_genes=core_only_genes)\n num_proteins = len(prots_filtered_feathers)\n if num_proteins <= cutoff_num_proteins:\n return\n\n # Make output directories\n protscale = 'proteome_unscaled'\n outdir_d0 = ssbio.utils.make_dir(op.join(base_outdir, protscale))\n outdir_d1 = ssbio.utils.make_dir(op.join(outdir_d0, '-'.join(memornot)))\n outdir_final = ssbio.utils.make_dir(op.join(outdir_d1, '-'.join(protgroup)))\n\n if impute_counts:\n big_strain_counts_df = get_proteome_counts_impute_missing(prots_filtered_feathers=prots_filtered_feathers,\n outpath=op.join(outdir_final,\n '{}-subsequence_proteome_IMP.fthr'.format(\n date)),\n length_filter_pid=length_filter_pid,\n force_rerun=force_rerun_counts)\n\n big_strain_percents_df = get_proteome_percentages(counts_df=big_strain_counts_df,\n outpath=op.join(outdir_final,\n '{}-subsequence_proteome_perc_IMP.fthr'.format(\n date)),\n force_rerun=force_rerun_percentages)\n pca_pickle = op.join(outdir_final, '{}-subsequence_pca.pckl'.format(date))\n # Divide by totals to get percentages in a new dataframe\n else:\n try:\n big_strain_percents_df = get_proteome_correct_percentages(prots_filtered_feathers=prots_filtered_feathers,\n outpath=op.join(outdir_final,\n '{}-subsequence_proteome_perc_AVG.fthr'.format(\n date)),\n length_filter_pid=length_filter_pid,\n force_rerun=force_rerun_percentages)\n pca_pickle = op.join(outdir_final, '{}-subsequence_pca_AVG.pckl'.format(date))\n except:\n with open(errfile, \"a\") as myfile:\n myfile.write('PERCENTAGES ERR: ' + '-'.join(memornot) + '\\t' + '-'.join(protgroup) + \"\\n\")\n return\n\n\n\n if ssbio.utils.force_rerun(flag=force_rerun_pca, outfile=pca_pickle):\n\n # Then, get filters for rows of the loaded feathers for interested subsequences\n keep_subsequences = get_interested_subsequences(subsequences=subsequences)\n\n # Some numbers: number of features\n num_feats = len(big_strain_percents_df)\n\n # Make an unwieldy title\n big_title = 'LOC={0}; PROTGROUP={1};\\n' \\\n 'NUMPROTS={2}; NUMFEATS={3}'.format('-'.join(memornot),\n '-'.join(protgroup),\n num_proteins,\n num_feats)\n\n # Run PCA and make plots\n runner = PCAMultiROS(features_df=big_strain_percents_df, observations_df=pd.DataFrame(), plot_title=big_title)\n try:\n runner.clean_data(keep_features=keep_subsequences, remove_correlated_feats=remove_correlated_feats)\n except:\n with open(errfile, \"a\") as myfile:\n myfile.write(\n 'CLEAN ERR: ' + '-'.join(memornot) + '\\t' + '-'.join(protgroup) + \"\\n\")\n return\n # try:\n runner.run_pca()\n # except:\n # with open(errfile, \"a\") as myfile:\n # myfile.write(\n # 'PCA ERR: ' + '-'.join(memornot) + '\\t' + '-'.join(protgroup) + \"\\n\")\n # return\n with open(pca_pickle, 'wb') as f:\n pickle.dump(runner, f)\n else:\n with open(pca_pickle, 'rb') as f:\n runner = pickle.load(f)"
] |
[
"0.5643232",
"0.56289685",
"0.54290247",
"0.5406451",
"0.5230023",
"0.52227926",
"0.52050686",
"0.5192278",
"0.51909274",
"0.51697755",
"0.5094837",
"0.50822145",
"0.5081997",
"0.50698656",
"0.5034037",
"0.50327355",
"0.5028977",
"0.5021501",
"0.50142425",
"0.4978028",
"0.4974297",
"0.496632",
"0.49662673",
"0.49513277",
"0.49447608",
"0.4940831",
"0.49368367",
"0.49346974",
"0.49315926",
"0.49281064"
] |
0.74304503
|
0
|
Add gene annotation data to mutant.gene_annotation, including multiplegene cases; return True if annotations found. If mutant.gene is a single, this just gets the annotation list for it from gene_annotation_dict and puts that in mutant.gene_annotation (or [] if the gene isn't in gene_annotation_dict); If mutant.gene is a MULTIPLE_GENE_JOINseparated string with multiple gene IDs, this finds the annotation for all of them, zips them together and joins similarly with MULTIPLE_GENE_JOIN for instance if gene was "A | B" and the annotations for A were [a, 1, 100] and for B were [b, 2, 100], the resulting annotations would be ["a | b", "1 | 2", "100 | 100"]. No duplicateremoval is done; genes with no annotation are skipped.
|
def _get_annotation_for_gene(gene, gene_annotation_dict):
# grab annotations for each gene
annotations = []
for gene in gene.split(MULTIPLE_GENE_JOIN):
try: annotations.append(gene_annotation_dict[gene])
except KeyError: pass
# make joint annotation (each field for all genes);
# make this look better by dealing with empty data specially - turn " & " into "" and " & x" into "- & x",
joint_annotations = []
for ann in zip(*annotations):
if any(ann):
ann = [a if a else '-' for a in ann]
joint_annotations.append(MULTIPLE_GENE_JOIN.join(ann))
else:
joint_annotations.append('')
# MAYBE-TODO do duplicate-removal etc? But that might just make things confusing - not obvious what goes with which gene.
return joint_annotations
# TODO unit-test!
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def add_gene_annotation(self, genome_version, include_RISCC_reads=False, print_info=False):\n # add the annotation info to each mutant (or nothing, if gene has no annotation)\n # MAYBE-TODO should I even store gene annotation in each mutant (AND in each genome-side LEAPseq read), or just keep a separate per-gene dictionary to save space?\n gene_annotation_dict, gene_annotation_header = get_all_gene_annotation(genome_version, print_info=False)\n if gene_annotation_header: self.gene_annotation_header = gene_annotation_header\n else: self.gene_annotation_header = 'GENE_ANNOTATION_DATA'\n # add the annotation info to each mutant (or nothing, if gene has no annotation) \n N_annotated = 0\n for mutant in self:\n annotation = self._get_annotation_for_gene(mutant.gene, gene_annotation_dict)\n mutant.gene_annotation = annotation\n if annotation: N_annotated += 1\n if include_RISCC_reads:\n for RISCC_data in mutant.RISCC_genome_side_aligned_reads.values():\n annotation = self._get_annotation_for_gene(RISCC_data[3], gene_annotation_dict)\n RISCC_data[7:] = annotation\n if annotation: N_annotated += 1\n if print_info: print(\"Added %s annotations\"%N_annotated)\n elif not N_annotated: print(\"Warning: No gene annotations found!\")\n # LATER-TODO add this to the gene-info run-test case! But the get_all_gene_annotation method has tests.",
"def GoAnnot(prots, gos, onlyProts=False):\r\n with resources.open_text(\"autoprot.data\",\"Homo_sapiens.gene_info\") as d:\r\n geneInfo = pd.read_csv(d, sep='\\t')\r\n with resources.open_text(\"autoprot.data\",\"gene2go_alt\") as d:\r\n gene2go = pd.read_csv(d, sep='\\t')\r\n prots = pd.DataFrame(pd.Series([str(i).upper().split(';')[0] for i in prots]), columns=[\"Gene names\"])\r\n prots = prots.merge(geneInfo[[\"Symbol\", \"GeneID\"]], left_on=\"Gene names\", right_on=\"Symbol\", how='inner')\r\n \r\n prots = prots.merge(gene2go[[\"GeneID\", \"GO_ID\", \"GO_term\"]], on=\"GeneID\", how='inner')\r\n if onlyProts == True:\r\n for idx, go in enumerate(gos):\r\n if idx == 0:\r\n redProts = prots[\"Symbol\"][prots[\"GO_term\"].str.contains(go)]\r\n else:\r\n redProts = redProts.append(prots[\"Symbol\"][prots[\"GO_term\"].str.contains(go)])\r\n return redProts.drop_duplicates()\r\n else: \r\n for idx, go in enumerate(gos):\r\n if idx == 0:\r\n redProts = prots[prots[\"GO_term\"]==go]\r\n else:\r\n redProts = redProts.append(prots[prots[\"GO_term\"]==go])\r\n return redProts.drop_duplicates()",
"def bulk_update_gene_annotations(c, bulk_annotations):\n\n cols = \" (\" + \", \".join([str_wrap_double(x) for x in [\"ID\",\"annot_name\",\n \"source\", \"attribute\", \"value\"]]) + \") \"\n command = 'INSERT INTO \"gene_annotations\"' + cols + \"VALUES \" + \\\n '(?,?,?,?,?)'\n c.executemany(command, bulk_annotations)\n\n return",
"def addOmimAnnotation(merged_data, OmimAnnotationFile):\n omim_genes = dict.fromkeys(list(OmimAnnotationFile['ENSID']))\n has_omim = []\n for index, row in merged_data.iterrows():\n human_ensid = str(row['Human ENSID'])\n if human_ensid in omim_genes:\n has_omim.append('t')\n else:\n has_omim.append('f')\n\n merged_data['Has Omim Annotation'] = has_omim\n return",
"def add_annotations(self, annotations):\n for annotation in annotations:\n logging.info(\"Annotation received on: '%s'\" % annotation.communication.id)\n self.annotations.extend(annotations)\n return True",
"def addAnnotation(clean_data, annotationFile, newColumnName):\n avgPeakScore = []\n for index, row in clean_data.iterrows():\n # get all the annotation data associated with this gene's ensid\n gene_ensid = str(row['ENSID'])\n peaks = annotationFile.loc[(annotationFile['Nearest Ensembl'] == gene_ensid) & (annotationFile['Annotation'].str.contains('promoter-TSS'))]\n\n if peaks.shape[0] == 0:\n avgPeakScore.append('') # no peaks associated with this gene\n elif peaks.shape[0] == 1:\n avgPeakScore.append(str(float(peaks.iloc[0]['Peak Score'])))\n elif peaks.shape[0] > 1:\n # take the avg\n peakscores = []\n for subindex, subrow in peaks.iterrows():\n peakscores.append(float(subrow['Peak Score']))\n avg = sum(peakscores) / float(len(peakscores))\n #print(\"multiple, had to average: \" + str(avg))\n avgPeakScore.append(avg)\n \n clean_data[newColumnName] = avgPeakScore\n \n return",
"def addGene(self, *args):\n return _libsbml.Association_addGene(self, *args)",
"def parse_anno_from_gff3(options, contigs):\n\n anno = dict()\n idx2gene = dict()\n gene2idx = dict()\n\n if options.verbose:\n print >> sys.stderr, \"Parsing annotation from %s ...\" % options.anno\n \n ### initial run to get the transcript to gene mapping\n if options.verbose:\n print >> sys.stderr, \"... init structure\"\n\n trans2gene = dict() ### dict with: keys = transcript IDs, values = gene IDs\n for line in open(options.anno, 'r'):\n if line[0] == '#':\n continue\n sl = line.strip().split('\\t')\n if sl[2] in ['mRNA', 'transcript', 'mrna', 'miRNA', 'tRNA', 'snRNA', 'snoRNA', 'ncRNA', 'mRNA_TE_gene', 'rRNA', 'pseudogenic_transcript', 'transposon_fragment']:\n tags = get_tags_gff(sl[8])\n trans2gene[tags['ID']] = tags['Parent']\n\n ### init genome structure\n for c in contigs:\n if options.verbose:\n print >> sys.stderr, 'reserving memory for contig %s of len %s' % (c, contigs[c])\n anno[c] = sp.zeros((contigs[c] + 1,), dtype = 'int32')\n\n ### init list of considered GFF fields\n fields = options.fields.split(',')\n\n ### generate a list of exons with attached gene/transcript information\n ### one list per chromsome\n counter = 1\n gene_counter = 2 ### 0 is default for no coverage and 1 is mask for overlap\n\n exons = dict() # contains the exon list per transcript, only need this for mask_alternative_overlap\n\n t0 = time.time()\n for line in open(options.anno, 'r'):\n if options.verbose and counter % 10000 == 0:\n print >> sys.stderr, '.',\n if counter % 100000 == 0:\n t1 = time.time() - t0\n print >> sys.stderr, \"%i - took %.2f secs\" % (counter, t1)\n t0 = time.time()\n counter += 1 \n\n if line[0] == '#':\n continue\n sl = line.strip().split('\\t')\n \n if not sl[2] in fields:\n continue\n\n tags = get_tags_gff(sl[8])\n if sl[2] == 'exon':\n trans_id = tags['Parent']\n gene_id = trans2gene[trans_id]\n else:\n print >> sys.stderr, 'Currently only >exon< is supported'\n sys.exit(1)\n\n if not gene2idx.has_key(tuple([gene_id])):\n gene2idx[tuple([gene_id])] = gene_counter\n idx2gene[gene_counter] = tuple([gene_id])\n gene_counter += 1\n\n ### store for each position of the transcriptome a tuple containing all overlapping gene IDs\n ### assume positions are 1 based and in closed intervals\n try:\n start = int(sl[3]) - 1\n except ValueError:\n start = 0\n try:\n stop = int(sl[4])\n except ValueError:\n stop = 1\n\n if not sl[0] in exons:\n exons[sl[0]] = dict()\n\n if options.mask_alternative_overlap:\n try:\n exons[sl[0]][trans_id].append([start, stop])\n except KeyError:\n exons[sl[0]][trans_id] = [[start, stop]]\n\n ### check, if there is already a different gene ID present, form a combination ID\n if sp.any(anno[sl[0]][start:stop] > 0):\n for p in range(start, stop):\n if anno[sl[0]][p] == 0:\n new_set = tuple([gene_id])\n else:\n new_set = tuple(set(idx2gene[anno[sl[0]][p]]) | set([gene_id]))\n try:\n anno[sl[0]][p] = gene2idx[new_set]\n except KeyError:\n anno[sl[0]][p] = gene_counter\n gene2idx[new_set] = gene_counter\n idx2gene[gene_counter] = new_set\n gene_counter += 1\n else:\n anno[sl[0]][start:stop] = sp.array([gene2idx[tuple([gene_id])]] * (stop - start), dtype = 'int32')\n if options.verbose:\n print >> sys.stderr, \"... done\"\n\n ### mask all positions in the genome, where we have more than one annotated gene\n if options.mask_gene_overlap:\n total_pos = 0\n total_masked = 0\n if options.verbose:\n print >> sys.stderr, '\\nMasking positions due to gene overlap:'\n for c in anno:\n masked_pos = 0\n p_idx = sp.where(anno[c] > 1)[0]\n pos = p_idx.shape[0]\n for p in p_idx:\n if len(idx2gene[anno[c][p]]) > 1:\n anno[c][p] = 1\n masked_pos += 1\n total_pos += pos\n total_masked += masked_pos\n if options.verbose:\n print >> sys.stderr, '\\t%s: %i (%i) masked (total) - %.2f %%' % (c, masked_pos, pos, masked_pos / float(max(1, pos)) * 100)\n if options.verbose:\n print >> sys.stderr, \"Total positions: %i\\nMasked positions: %i (%.2f %%)\" % (total_pos, total_masked, total_masked / float(max(1, total_pos)) * 100)\n print >> sys.stderr, \"... done\"\n\n ### mask all positions in the genome, where exonic and intronic positions are annotated\n if options.mask_alternative_overlap:\n if options.verbose:\n print >> sys.stderr, '\\nMasking positions due to exon/intron overlap:'\n for c in exons:\n masked_pos = 0\n for t in exons[c]:\n if len(exons[c][t]) < 2:\n continue\n ### pre-process exon\n tmp = sp.array(exons[c][t], dtype='int')\n s_idx = sp.argsort(tmp[:, 0])\n tmp = tmp[s_idx, :]\n ### mask positions that are intronic and exonic\n for e in range(1, tmp.shape[0]):\n p_idx = sp.where(anno[c][tmp[e - 1, 1] + 1:tmp[e, 0]] > 1)[0]\n if p_idx.shape[0] > 0:\n anno[c][p_idx + tmp[e - 1, 1] + 1] = 1\n masked_pos += p_idx.shape[0]\n total_masked += masked_pos\n if options.verbose:\n print >> sys.stderr, '\\t%s: %i pos masked' % (c, masked_pos)\n if options.verbose:\n print >> sys.stderr, 'Masked positions: %i' % total_masked\n print >> sys.stderr, \"... done\"\n\n \n if options.verbose:\n print >> sys.stderr, \"Storing exon array in HDF5 %s ...\" % (options.anno_hdf5 + '.exons.hdf5')\n\n ### store annotation in hdf5\n hdf_out = h5py.File(options.anno_hdf5 + '.exons.hdf5', 'w')\n for c in anno.keys():\n hdf_out.create_dataset(name = c, data = anno[c])\n hdf_out.close()\n\n if options.verbose:\n print >> sys.stderr, \"... pickling gene ID map\"\n\n cPickle.dump((idx2gene, gene2idx), open(options.anno_hdf5 + '.pickle', 'w'))\n\n if options.verbose:\n print >> sys.stderr, \"... done\"\n\n return (anno, idx2gene, gene2idx)",
"def get_annotation_gene_types(args):\n ref_mgr = ReferenceManager(args.reference_path)\n tss = BedTool(ref_mgr.tss_track)\n if tss.field_count() == 7:\n return TRANSCRIPT_ANNOTATION_GENE_TYPES\n else:\n return None",
"def parse_anno_from_gtf(options, contigs):\n\n anno = dict()\n idx2gene = dict()\n gene2idx = dict()\n\n if options.verbose:\n print >> sys.stderr, \"Parsing annotation from %s ...\" % options.anno\n \n ### init genome structure\n for c in contigs:\n if options.verbose:\n print >> sys.stderr, 'reserving memory for chr %s of len %s' % (c, contigs[c])\n anno[c] = sp.zeros((contigs[c] + 1, ), dtype = 'int32')\n\n ### init list of considered GFF fields\n fields = options.fields.split(',')\n\n ### generate a list of exons with attached gene/transcript information\n ### one list per chromsome\n counter = 1\n gene_counter = 2 ### 0 is default for no coverage and 1 is mask for overlap\n\n exons = dict()\n\n t0 = time.time()\n for line in open(options.anno, 'r'):\n if options.verbose and counter % 10000 == 0:\n print >> sys.stderr, '.',\n if counter % 100000 == 0:\n t1 = time.time() - t0\n print >> sys.stderr, \"%i - took %.2f secs\" % (counter, t1)\n t0 = time.time()\n counter += 1 \n\n if line[0] == '#':\n continue\n sl = line.strip().split('\\t')\n \n if not sl[2] in fields:\n continue\n\n if sl[2] != 'exon':\n print >> sys.stderr, 'Currently only >exon< is supported'\n sys.exit(1)\n\n tags = get_tags_gtf(sl[8])\n gene_id = tags['gene_id']\n trans_id = tags['transcript_id']\n\n if not gene2idx.has_key(tuple([gene_id])):\n gene2idx[tuple([gene_id])] = gene_counter\n idx2gene[gene_counter] = tuple([gene_id])\n gene_counter += 1\n\n try:\n start = int(sl[3]) - 1\n except ValueError:\n start = 0\n try:\n stop = int(sl[4])\n except ValueError:\n stop = 1\n\n chrm = sl[0]\n if chrm == 'chrM_rCRS':\n chrm = 'chrM'\n\n if not chrm in exons:\n exons[chrm] = dict()\n\n if options.mask_alternative_overlap:\n try:\n exons[chrm][trans_id].append([start, stop])\n except KeyError:\n exons[chrm][trans_id] = [[start, stop]]\n\n ### check, if there is already a different gene ID present, form a combination ID\n if sp.any(anno[chrm][start:stop] > 0):\n for p in range(start, stop):\n if anno[chrm][p] == 0:\n new_set = tuple([gene_id])\n else:\n new_set = tuple(set(idx2gene[anno[chrm][p]]) | set([gene_id]))\n try:\n anno[chrm][p] = gene2idx[new_set]\n except KeyError:\n anno[chrm][p] = gene_counter\n gene2idx[new_set] = gene_counter\n idx2gene[gene_counter] = new_set\n gene_counter += 1\n else:\n anno[chrm][start:stop] = sp.array([gene2idx[tuple([gene_id])]] * (stop - start), dtype = 'int32')\n if options.verbose:\n print >> sys.stderr, \"... done\"\n\n ### mask all positions in the genome, where we have more than one annotated gene\n if options.mask_gene_overlap:\n total_pos = 0\n total_masked = 0\n if options.verbose:\n print >> sys.stderr, '\\nMasking positions due to gene overlap:'\n for c in anno:\n masked_pos = 0\n p_idx = sp.where(anno[c] > 1)[0]\n pos = p_idx.shape[0]\n #print >> sys.stderr, 'found %i positions' % p_idx.shape[0]\n for p in p_idx:\n if len(idx2gene[anno[c][p]]) > 1:\n anno[c][p] = 1\n masked_pos += 1\n total_pos += pos\n total_masked += masked_pos\n if options.verbose:\n print >> sys.stderr, '\\t%s: %i (%i) masked (total) - %.2f %%' % (c, masked_pos, pos, masked_pos / float(max(1, pos)) * 100)\n if options.verbose:\n print >> sys.stderr, \"Total positions: %i\\nMasked positions: %i (%.2f %%)\" % (total_pos, total_masked, total_masked / float(max(1, total_pos)) * 100)\n print >> sys.stderr, \"... done\"\n\n ### mask all positions in the genome, where exonic and intronic positions are annotated\n if options.mask_alternative_overlap:\n total_masked = 0\n if options.verbose:\n print >> sys.stderr, '\\nMasking positions due to exon/intron overlap:'\n for c in exons:\n masked_pos = 0\n for t in exons[c]:\n if len(exons[c][t]) < 2:\n continue\n ### pre-process exon\n tmp = sp.array(exons[c][t], dtype='int')\n s_idx = sp.argsort(tmp[:, 0])\n tmp = tmp[s_idx, :]\n ### mask positions that are intronic and exonic\n for e in range(1, tmp.shape[0]):\n p_idx = sp.where(anno[c][tmp[e - 1, 1] + 1:tmp[e, 0]] > 1)[0]\n if p_idx.shape[0] > 0:\n anno[c][p_idx + tmp[e - 1, 1] + 1] = 1\n masked_pos += p_idx.shape[0]\n total_masked += masked_pos\n if options.verbose:\n print >> sys.stderr, '\\t%s: %i pos masked' % (c, masked_pos)\n if options.verbose:\n print >> sys.stderr, 'Masked positions: %i' % total_masked\n print >> sys.stderr, \"... done\"\n\n if options.verbose:\n print >> sys.stderr, \"Storing exon array in HDF5 %s ...\" % (options.anno_hdf5 + '.exons.hdf5')\n\n ### store annotation in hdf5\n hdf_out = h5py.File(options.anno_hdf5 + '.exons.hdf5', 'w')\n for c in anno.keys():\n hdf_out.create_dataset(name = c, data = anno[c])\n hdf_out.close()\n\n if options.verbose:\n print >> sys.stderr, \"... pickling gene ID map\"\n\n cPickle.dump((idx2gene, gene2idx), open(options.anno_hdf5 + '.pickle', 'w'))\n\n if options.verbose:\n print >> sys.stderr, \"... done\"\n\n return (anno, idx2gene, gene2idx)",
"def _set_joint_genome_info(self, gene_annotation_header_values, total_genes_in_genome_values):\n # Merge any pieces of global information that's not per-dataset\n self.gene_annotation_header = merge_values_to_unique(gene_annotation_header_values, blank_value=[], convert_for_set=tuple, \n value_name='gene_annotation_header', context='datasets in multi-dataset')\n self.total_genes_in_genome = merge_values_to_unique(total_genes_in_genome_values, blank_value=0, \n value_name='total_genes_in_genome', context='datasets in multi-dataset')",
"def addIntronAnnotation(clean_data, annotationFile, newColumnName):\n avgPeakScore = []\n for index, row in clean_data.iterrows():\n # get all the annotation data associated with this gene's ensid\n gene_ensid = str(row['ENSID'])\n peaks = annotationFile.loc[(annotationFile['Nearest Ensembl'] == gene_ensid) & (annotationFile['Annotation'].str.contains('intron'))]\n\n if peaks.shape[0] == 0:\n avgPeakScore.append('') # no peaks associated with this gene\n elif peaks.shape[0] == 1:\n avgPeakScore.append(str(float(peaks.iloc[0]['Peak Score'])))\n elif peaks.shape[0] > 1:\n # take the avg\n peakscores = []\n for subindex, subrow in peaks.iterrows():\n peakscores.append(float(subrow['Peak Score']))\n avg = sum(peakscores) / float(len(peakscores))\n #print(\"multiple, had to average: \" + str(avg))\n avgPeakScore.append(avg)\n \n clean_data[newColumnName] = avgPeakScore\n \n return",
"def compatibility_g_a(gen, anot):\n print(\"Checking compatibility of genome with annotation file\")\n r_code = 0\n for seq in gen:\n if seq not in anot:\n print(\"WARN\\t{} sequence not found in annotaion file\".format(seq))\n r_code = 1\n for seq in anot:\n if seq not in gen:\n print(\"FAIL\\t{} sequence in annotation \"\n \"but not in genome.\".format(seq))\n r_code = 2\n elif anot[seq] > gen[seq]:\n print(\"FAIL\\tannotation interval on {} sequence is out of \"\n \"reference range.\".format(seq))\n r_code = 2\n print()\n return r_code",
"def addGeneAssociation(self, *args):\n return _libsbml.FbcModelPlugin_addGeneAssociation(self, *args)",
"def Annotate(self, request, context):\n\n session_id = uuid.uuid4()\n mnemonic = encode(session_id)\n\n try:\n pid = os.getpid()\n self.logger.info(\"Current PID: \" + str(pid))\n payload = parse_payload(request.annotations, request.genes)\n response, check = check_genes(payload=payload)\n self.logger.warning(response)\n\n if check:\n response = start_annotation(session_id=session_id, mnemonic=mnemonic, payload=payload)\n if response:\n url = \"{MOZI_RESULT_URI}/?id={mnemonic}\".format(MOZI_RESULT_URI=MOZI_RESULT_URI, mnemonic=mnemonic)\n return annotation_pb2.AnnotationResponse(result=url)\n else:\n msg = \"an internal error occured. please try again\"\n context.set_details(msg)\n context.set_code(grpc.StatusCode.INTERNAL)\n return annotation_pb2.AnnotationResponse(result=msg)\n else:\n self.logger.warning(\"The following genes were not found in the atomspace %s\", response)\n context.set_details(response)\n context.set_code(grpc.StatusCode.INVALID_ARGUMENT)\n return annotation_pb2.AnnotationResponse(result=response)\n\n except Exception as ex:\n self.logger.exception(traceback.format_exc())\n context.set_code(grpc.StatusCode.INVALID_ARGUMENT)\n context.set_details(\"Error occurred in while trying to perform request: \" + ex.__str__())\n return annotation_pb2.AnnotationResponse(result=\"url\")",
"def _annotation_iter_helper(graph):\n return (\n key\n for _, _, data in graph.edges(data=True)\n if ANNOTATIONS in data\n for key in data[ANNOTATIONS]\n )",
"def load_gene_annotation(self, file_path):\n\t\tpass",
"def _load_gene(self, gene, batch) -> None:\n try:\n assert Gene(**gene)\n except pydantic.error_wrappers.ValidationError as e:\n logger.warning(f\"Unable to load {gene} due to validation error: \"\n f\"{e}\")\n else:\n concept_id = gene['concept_id'].lower()\n gene['label_and_type'] = f\"{concept_id}##identity\"\n gene['src_name'] = \\\n PREFIX_LOOKUP[gene['concept_id'].split(':')[0].lower()]\n gene['item_type'] = 'identity'\n\n for attr_type, item_type in ITEM_TYPES.items():\n if attr_type in gene:\n value = gene[attr_type]\n if value is not None and value != []:\n if isinstance(value, str):\n items = [value.lower()]\n else:\n gene[attr_type] = list(set(value))\n items = {item.lower() for item in value}\n for item in items:\n batch.put_item(Item={\n 'label_and_type': f\"{item}##{item_type}\",\n 'concept_id': concept_id,\n 'src_name': gene['src_name'],\n 'item_type': item_type\n })\n else:\n del gene[attr_type]\n batch.put_item(Item=gene)\n self._processed_ids.append(concept_id)",
"def check_annotation(config):\n # Check whether to set annotation downstream of assembly\n tools = [config[\"annotation\"][key] for key in config[\"annotation\"].keys()]\n assems = [config[\"assembly\"][\"metaspades\"], config[\"assembly\"][\"megahit\"]]\n config[\"run_assembly\"] = False\n if True in tools and True in assems:\n config[\"run_annotation\"] = True\n # if True also assume the user wants assembly\n config[\"run_assembly\"] = True\n # Set megahit as default unless metaspades is set\n if not config[\"assembly\"][\"megahit\"] and not config[\"assembly\"][\n \"metaspades\"]:\n config[\"assembly\"][\"megahit\"] = True\n return config",
"def add_annotation(self,\n node_attrs: Dict[str, Dict[str, Any]],\n edge_attrs: Dict[str, Dict[str, Any]],\n sentence_ids: Dict[str, str]) -> None:\n for node, attrs in node_attrs.items():\n self._add_node_annotation(node, attrs)\n\n for edge, attrs in edge_attrs.items():\n self._add_edge_annotation(edge, attrs, sentence_ids)",
"def submit_annotation(\n self, sample_id: Union[int, str], annotator: str, key: str,\n value_numeric: Optional[Union[int, float]], value_string: Optional[str], comment: str,\n ) -> bool:\n annotation = {\n 'sample_id': sample_id,\n 'annotator': annotator,\n 'annotation_timestamp': datetime.datetime.now(),\n 'key': key,\n 'value_numeric': value_numeric,\n 'value_string': value_string,\n 'comment': comment,\n }\n self.annotations.append(annotation)\n return True",
"def link_genes(self, genes: List[Gene]):\n\n # do a double-check to make sure we don't add duplicate genes\n for gene in genes:\n if gene.locus_tag is not None:\n if gene.locus_tag not in [gene.locus_tag for gene in self.genes]:\n self.genes.append(gene)\n gene.link_transcription_unit(self)\n elif gene.id is not None:\n if gene.id not in [gene.id for gene in self.genes]:\n self.genes.append(gene)\n gene.link_transcription_unit(self)",
"def mutate_append(self, gene):\n gene.chromosome.append(self.tactics.mutate_select())",
"def kegg_mapping_and_metadata(self, kegg_organism_code, custom_gene_mapping=None, outdir=None,\n set_as_representative=False, force_rerun=False):\n\n # First map all of the organism's KEGG genes to UniProt\n kegg_to_uniprot = ssbio.databases.kegg.map_kegg_all_genes(organism_code=kegg_organism_code, target_db='uniprot')\n\n successfully_mapped_counter = 0\n\n for g in tqdm(self.genes):\n if custom_gene_mapping:\n kegg_g = custom_gene_mapping[g.id]\n else:\n kegg_g = g.id\n\n if kegg_g not in kegg_to_uniprot:\n log.debug('{}: unable to map to KEGG'.format(g.id))\n continue\n\n # Download both FASTA and KEGG metadata files\n kegg_prop = g.protein.load_kegg(kegg_id=kegg_g, kegg_organism_code=kegg_organism_code,\n download=True, outdir=outdir, set_as_representative=set_as_representative,\n force_rerun=force_rerun)\n\n # Update potentially old UniProt ID\n if kegg_g in kegg_to_uniprot.keys():\n kegg_prop.uniprot = kegg_to_uniprot[kegg_g]\n if g.protein.representative_sequence:\n if g.protein.representative_sequence.kegg == kegg_prop.kegg:\n g.protein.representative_sequence.uniprot = kegg_to_uniprot[kegg_g]\n\n # Keep track of missing mappings - missing is defined by no available sequence\n if kegg_prop.sequence_file:\n successfully_mapped_counter += 1\n\n log.debug('{}: loaded KEGG information for gene'.format(g.id))\n\n log.info('{}/{}: number of genes mapped to KEGG'.format(successfully_mapped_counter, len(self.genes)))\n log.info('Completed ID mapping --> KEGG. See the \"df_kegg_metadata\" attribute for a summary dataframe.')",
"def add_annotations(annot_tuples, ref_data, annot_type):\n\n for annot in ref_data.annotations.select_type(annot_type):\n annot_begin, annot_end = annot.spans[0]\n annot_tuples.append((annot_begin, annot_end, annot.id))",
"def _need_genes(config):\n need_genes = []\n for t in ['gene', 'gene1', 'gene2']:\n if (t in config.keys()) and config[t]:\n need_genes.append(config[t])\n if ('adj_gene' in config.keys()) and config['adj_gene']:\n if config['adj_gene'] == 'CTL':\n need_genes.extend(['CD8A', 'CD8B', 'PRF1', 'GZMA', 'GZMB'])\n else:\n need_genes.append(config['adj_gene'])\n if ('protein_gene' in config.keys()) and config['protein_gene']:\n need_genes.extend(config['protein_gene'])\n return(need_genes)",
"def tag_conjunction_entities(annotated_pages):\n for page_id in annotated_pages:\n page = Page.objects(id=page_id).first()\n #page = db_conn.pages.find_one({\"_id\":page_id}) # TODO: refactor\n annotation_ids = [p.id for p in page[\"annotations_ids\"]]\n all_annotations = list(Annotation.objects(id__in=annotation_ids))\n # retrieve meta-annotations from that page\n meta_annotations = list(Annotation.objects(id__in=annotation_ids, entity_type=\"meta-annotation\"))\n #all_annotations = list(db_conn.annotations.find({\"_id\":{\"$in\":annotation_ids}})) # TODO: refactor\n #meta_annotations = list(db_conn.annotations.find({\"_id\":{\"$in\":annotation_ids} # TODO: refactor\n # ,\"entity_type\":\"meta-annotation\"}))\n if(len(meta_annotations)>0):\n logger.debug(\"Meta-annotations: %s\"%meta_annotations)\n for meta_annotation in meta_annotations:\n logger.info(\"Processing meta-annotation %s\"%meta_annotation[\"id\"])\n line_span = sorted(list(set([(position[\"page_id\"], position[\"line_n\"]) \n for position in meta_annotation[\"positions\"]])))\n top_entities_ids = [ann.id for ann in meta_annotation[\"top_entities\"]]\n top_entities = list(Annotation.objects(id__in=top_entities_ids))\n #top_entities = [db_conn.annotations.find_one({\"_id\":top_annotation_id}) \n # for top_annotation_id in meta_annotation[\"top_entities\"]]\n tokens = []\n for page_obj, line_n in line_span:\n page = Page.objects(id=page_obj.id).first()\n #page = db_conn.pages.find_one({\"_id\":page_id})\n for line in page[\"lines\"]:\n if line[\"line_number\"]==line_n:\n tokens.append((page_obj,line_n,line[\"tokens\"]))\n try:\n for entity in top_entities:\n assert entity is not None\n true_conjunctions = []\n meta_annotation_start = (top_entities[0][\"positions\"][0][\"page_id\"]\n ,top_entities[0][\"positions\"][0][\"line_n\"]\n ,top_entities[0][\"positions\"][0][\"start\"])\n meta_annotation_end = (top_entities[-1][\"positions\"][-1][\"page_id\"]\n ,top_entities[-1][\"positions\"][-1][\"line_n\"]\n ,top_entities[-1][\"positions\"][-1][\"end\"])\n conjunctions = [(token,page,line) for page,line,toks in tokens for token in toks\n if(token[\"offset_start\"] >= meta_annotation_start[2] and token[\"offset_end\"] <= meta_annotation_end[2])]\n true_conjunctions += [(page,line,token) for token,page,line in conjunctions \n if not is_annotated(page,line,token,all_annotations)]\n if(len(true_conjunctions)>0):\n logger.debug(\"Conjunctions found: %s\"%true_conjunctions)\n conjunction_annotations = []\n all_ann_ids = [annotation[\"ann_id\"] for annotation in all_annotations \n if '+' not in annotation[\"ann_id\"] ]\n identifier_counter = int(sorted(all_ann_ids, key=lambda x: int(x.replace('T','')))[-1].replace(\"T\",\"\"))\n logger.debug(sorted(all_ann_ids, key=lambda x: int(x.replace('T','')))[-1])\n for page_obj, line_n, token in true_conjunctions:\n identifier_counter += 1\n conjunction_annotation = Annotation(entity_type=\"conjunction\"\n , ingestion_timestamp=datetime.utcnow()\n , annotation_ingester_version=__version__\n , pageid=meta_annotation.pageid\n , filename=meta_annotation.filename\n , bid=meta_annotation.bid)\n conjunction_annotation.surface = token[\"surface\"]\n conjunction_annotation.ann_id = \"T%i\"%identifier_counter\n conjunction_annotation.positions.append(PagePosition(page_id = page_obj\n , start = token[\"offset_start\"]\n , end = token[\"offset_end\"]\n , line_n = line_n))\n conjunction_annotation.save()\n conjunction_annotations.append(conjunction_annotation)\n logger.info(\"(Page: %s) %i conjunction annotations were created and stored in MongoDB\"%(page_obj.id\n , len(conjunction_annotations)))\n logger.debug(\"N %i of top entities before adding conjunction entities\"%len(meta_annotation[\"top_entities\"]))\n meta_annotation[\"top_entities\"] += conjunction_annotations\n logger.debug(\"N %i of top entities after adding conjunction entities\"%len(meta_annotation[\"top_entities\"]))\n Annotation.objects(id=meta_annotation.id).update_one(set__top_entities = meta_annotation[\"top_entities\"])\n for conj_annotation in conjunction_annotations:\n for position in conj_annotation[\"positions\"]:\n page = Page.objects(id=position.page_id.id).first()\n page[\"annotations_ids\"].append(conj_annotation)\n page.save()\n except AssertionError as e:\n #raise e\n logger.warning(\"The meta-annotation %s has no top-level entities and generated the following error: %s\"%(meta_annotation[\"_id\"],e))\n except Exception as e:\n raise e",
"def _gene_ann(gene_ann_path):\n gene_ann = pd.read_csv(gene_ann_path)\n protein_gene = gene_ann[gene_ann.gene_type ==\n 'protein_coding'].gene_name.tolist()\n return(protein_gene)",
"def _is_annotated(nodes: List[Node]):\n annotated = False\n for node in nodes:\n annotated = annotated or (\n \"quantization_annotation\" in node.meta\n and node.meta[\"quantization_annotation\"]._annotated\n )\n return annotated",
"def submit_annotation(\n self, sample_id: Union[int, str], annotator: str, key: str,\n value_numeric: Optional[Union[int, float]], value_string: Optional[str], comment: str,\n ) -> bool:"
] |
[
"0.6410096",
"0.55775297",
"0.5299767",
"0.52662283",
"0.5193791",
"0.51118064",
"0.5048886",
"0.5045176",
"0.50307804",
"0.501942",
"0.5001189",
"0.49972197",
"0.4995329",
"0.4965884",
"0.48947483",
"0.48524985",
"0.48483244",
"0.48313758",
"0.4829132",
"0.48207685",
"0.4760366",
"0.47474402",
"0.4725664",
"0.46628696",
"0.46603763",
"0.46408442",
"0.46389872",
"0.46301582",
"0.4600401",
"0.458971"
] |
0.7736066
|
0
|
Add gene annotation to each mutant, based on multiple annotation_files for that genome version.
|
def add_gene_annotation(self, genome_version, include_RISCC_reads=False, print_info=False):
# add the annotation info to each mutant (or nothing, if gene has no annotation)
# MAYBE-TODO should I even store gene annotation in each mutant (AND in each genome-side LEAPseq read), or just keep a separate per-gene dictionary to save space?
gene_annotation_dict, gene_annotation_header = get_all_gene_annotation(genome_version, print_info=False)
if gene_annotation_header: self.gene_annotation_header = gene_annotation_header
else: self.gene_annotation_header = 'GENE_ANNOTATION_DATA'
# add the annotation info to each mutant (or nothing, if gene has no annotation)
N_annotated = 0
for mutant in self:
annotation = self._get_annotation_for_gene(mutant.gene, gene_annotation_dict)
mutant.gene_annotation = annotation
if annotation: N_annotated += 1
if include_RISCC_reads:
for RISCC_data in mutant.RISCC_genome_side_aligned_reads.values():
annotation = self._get_annotation_for_gene(RISCC_data[3], gene_annotation_dict)
RISCC_data[7:] = annotation
if annotation: N_annotated += 1
if print_info: print("Added %s annotations"%N_annotated)
elif not N_annotated: print("Warning: No gene annotations found!")
# LATER-TODO add this to the gene-info run-test case! But the get_all_gene_annotation method has tests.
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def overall_annotation_function(path_to_species_trees, path_to_gene_trees, project_name = \"myproject\", path_to_ranger_outputs = \"\", ):\r\n\r\n #initially gather the names of the datasets from the species_trees folder\r\n dataset_names = gather_dataset_names(path_to_species_trees, project_name, \"_CC\")\r\n #create an object of class PhyloData for each unique dataset\r\n phylodata_objects = []\r\n for name in dataset_names:\r\n phylodata_objects.append(annotate_classes.PhyloData(name))\r\n #for each object, have it try and assign itself the correct files\r\n print(\"populating phylodata objects\")\r\n populate_objects(phylodata_objects, project_name, path_to_species_trees, path_to_gene_trees, path_to_ranger_outputs)\r\n #run the visualizer for each object\r\n parse_and_visualize(phylodata_objects, project_name)",
"def updateAnnotations(self):\n self.backupDatafiles()\n print(\"Updating annotation files \", self.field(\"trainDir\"))\n listOfDataFiles = QDir(self.field(\"trainDir\")).entryList(['*.data'])\n for file in listOfDataFiles:\n # Read the annotation\n segments = Segment.SegmentList()\n newsegments = Segment.SegmentList()\n segments.parseJSON(os.path.join(self.field(\"trainDir\"), file))\n allSpSegs = np.arange(len(segments)).tolist()\n newsegments.metadata = segments.metadata\n for segix in allSpSegs:\n seg = segments[segix]\n if self.field(\"species\") not in [fil[\"species\"] for fil in seg[4]]:\n newsegments.addSegment(seg) # leave non-target segments unchanged\n else:\n for seg2 in self.segments:\n if seg2[1] == seg:\n # find the index of target sp and update call type\n seg[4][[fil[\"species\"] for fil in seg[4]].index(self.field(\"species\"))][\"calltype\"] = self.clusters[seg2[-1]]\n newsegments.addSegment(seg)\n newsegments.saveJSON(os.path.join(self.field(\"trainDir\"), file))",
"def bulk_update_gene_annotations(c, bulk_annotations):\n\n cols = \" (\" + \", \".join([str_wrap_double(x) for x in [\"ID\",\"annot_name\",\n \"source\", \"attribute\", \"value\"]]) + \") \"\n command = 'INSERT INTO \"gene_annotations\"' + cols + \"VALUES \" + \\\n '(?,?,?,?,?)'\n c.executemany(command, bulk_annotations)\n\n return",
"def _get_annotation_for_gene(gene, gene_annotation_dict):\n # grab annotations for each gene\n annotations = []\n for gene in gene.split(MULTIPLE_GENE_JOIN):\n try: annotations.append(gene_annotation_dict[gene])\n except KeyError: pass\n # make joint annotation (each field for all genes); \n # make this look better by dealing with empty data specially - turn \" & \" into \"\" and \" & x\" into \"- & x\", \n joint_annotations = []\n for ann in zip(*annotations):\n if any(ann):\n ann = [a if a else '-' for a in ann]\n joint_annotations.append(MULTIPLE_GENE_JOIN.join(ann))\n else:\n joint_annotations.append('')\n # MAYBE-TODO do duplicate-removal etc? But that might just make things confusing - not obvious what goes with which gene.\n return joint_annotations\n # TODO unit-test!",
"def addAnnotation(clean_data, annotationFile, newColumnName):\n avgPeakScore = []\n for index, row in clean_data.iterrows():\n # get all the annotation data associated with this gene's ensid\n gene_ensid = str(row['ENSID'])\n peaks = annotationFile.loc[(annotationFile['Nearest Ensembl'] == gene_ensid) & (annotationFile['Annotation'].str.contains('promoter-TSS'))]\n\n if peaks.shape[0] == 0:\n avgPeakScore.append('') # no peaks associated with this gene\n elif peaks.shape[0] == 1:\n avgPeakScore.append(str(float(peaks.iloc[0]['Peak Score'])))\n elif peaks.shape[0] > 1:\n # take the avg\n peakscores = []\n for subindex, subrow in peaks.iterrows():\n peakscores.append(float(subrow['Peak Score']))\n avg = sum(peakscores) / float(len(peakscores))\n #print(\"multiple, had to average: \" + str(avg))\n avgPeakScore.append(avg)\n \n clean_data[newColumnName] = avgPeakScore\n \n return",
"def load_gene_annotation(self, file_path):\n\t\tpass",
"def parse_anno_from_gff3(options, contigs):\n\n anno = dict()\n idx2gene = dict()\n gene2idx = dict()\n\n if options.verbose:\n print >> sys.stderr, \"Parsing annotation from %s ...\" % options.anno\n \n ### initial run to get the transcript to gene mapping\n if options.verbose:\n print >> sys.stderr, \"... init structure\"\n\n trans2gene = dict() ### dict with: keys = transcript IDs, values = gene IDs\n for line in open(options.anno, 'r'):\n if line[0] == '#':\n continue\n sl = line.strip().split('\\t')\n if sl[2] in ['mRNA', 'transcript', 'mrna', 'miRNA', 'tRNA', 'snRNA', 'snoRNA', 'ncRNA', 'mRNA_TE_gene', 'rRNA', 'pseudogenic_transcript', 'transposon_fragment']:\n tags = get_tags_gff(sl[8])\n trans2gene[tags['ID']] = tags['Parent']\n\n ### init genome structure\n for c in contigs:\n if options.verbose:\n print >> sys.stderr, 'reserving memory for contig %s of len %s' % (c, contigs[c])\n anno[c] = sp.zeros((contigs[c] + 1,), dtype = 'int32')\n\n ### init list of considered GFF fields\n fields = options.fields.split(',')\n\n ### generate a list of exons with attached gene/transcript information\n ### one list per chromsome\n counter = 1\n gene_counter = 2 ### 0 is default for no coverage and 1 is mask for overlap\n\n exons = dict() # contains the exon list per transcript, only need this for mask_alternative_overlap\n\n t0 = time.time()\n for line in open(options.anno, 'r'):\n if options.verbose and counter % 10000 == 0:\n print >> sys.stderr, '.',\n if counter % 100000 == 0:\n t1 = time.time() - t0\n print >> sys.stderr, \"%i - took %.2f secs\" % (counter, t1)\n t0 = time.time()\n counter += 1 \n\n if line[0] == '#':\n continue\n sl = line.strip().split('\\t')\n \n if not sl[2] in fields:\n continue\n\n tags = get_tags_gff(sl[8])\n if sl[2] == 'exon':\n trans_id = tags['Parent']\n gene_id = trans2gene[trans_id]\n else:\n print >> sys.stderr, 'Currently only >exon< is supported'\n sys.exit(1)\n\n if not gene2idx.has_key(tuple([gene_id])):\n gene2idx[tuple([gene_id])] = gene_counter\n idx2gene[gene_counter] = tuple([gene_id])\n gene_counter += 1\n\n ### store for each position of the transcriptome a tuple containing all overlapping gene IDs\n ### assume positions are 1 based and in closed intervals\n try:\n start = int(sl[3]) - 1\n except ValueError:\n start = 0\n try:\n stop = int(sl[4])\n except ValueError:\n stop = 1\n\n if not sl[0] in exons:\n exons[sl[0]] = dict()\n\n if options.mask_alternative_overlap:\n try:\n exons[sl[0]][trans_id].append([start, stop])\n except KeyError:\n exons[sl[0]][trans_id] = [[start, stop]]\n\n ### check, if there is already a different gene ID present, form a combination ID\n if sp.any(anno[sl[0]][start:stop] > 0):\n for p in range(start, stop):\n if anno[sl[0]][p] == 0:\n new_set = tuple([gene_id])\n else:\n new_set = tuple(set(idx2gene[anno[sl[0]][p]]) | set([gene_id]))\n try:\n anno[sl[0]][p] = gene2idx[new_set]\n except KeyError:\n anno[sl[0]][p] = gene_counter\n gene2idx[new_set] = gene_counter\n idx2gene[gene_counter] = new_set\n gene_counter += 1\n else:\n anno[sl[0]][start:stop] = sp.array([gene2idx[tuple([gene_id])]] * (stop - start), dtype = 'int32')\n if options.verbose:\n print >> sys.stderr, \"... done\"\n\n ### mask all positions in the genome, where we have more than one annotated gene\n if options.mask_gene_overlap:\n total_pos = 0\n total_masked = 0\n if options.verbose:\n print >> sys.stderr, '\\nMasking positions due to gene overlap:'\n for c in anno:\n masked_pos = 0\n p_idx = sp.where(anno[c] > 1)[0]\n pos = p_idx.shape[0]\n for p in p_idx:\n if len(idx2gene[anno[c][p]]) > 1:\n anno[c][p] = 1\n masked_pos += 1\n total_pos += pos\n total_masked += masked_pos\n if options.verbose:\n print >> sys.stderr, '\\t%s: %i (%i) masked (total) - %.2f %%' % (c, masked_pos, pos, masked_pos / float(max(1, pos)) * 100)\n if options.verbose:\n print >> sys.stderr, \"Total positions: %i\\nMasked positions: %i (%.2f %%)\" % (total_pos, total_masked, total_masked / float(max(1, total_pos)) * 100)\n print >> sys.stderr, \"... done\"\n\n ### mask all positions in the genome, where exonic and intronic positions are annotated\n if options.mask_alternative_overlap:\n if options.verbose:\n print >> sys.stderr, '\\nMasking positions due to exon/intron overlap:'\n for c in exons:\n masked_pos = 0\n for t in exons[c]:\n if len(exons[c][t]) < 2:\n continue\n ### pre-process exon\n tmp = sp.array(exons[c][t], dtype='int')\n s_idx = sp.argsort(tmp[:, 0])\n tmp = tmp[s_idx, :]\n ### mask positions that are intronic and exonic\n for e in range(1, tmp.shape[0]):\n p_idx = sp.where(anno[c][tmp[e - 1, 1] + 1:tmp[e, 0]] > 1)[0]\n if p_idx.shape[0] > 0:\n anno[c][p_idx + tmp[e - 1, 1] + 1] = 1\n masked_pos += p_idx.shape[0]\n total_masked += masked_pos\n if options.verbose:\n print >> sys.stderr, '\\t%s: %i pos masked' % (c, masked_pos)\n if options.verbose:\n print >> sys.stderr, 'Masked positions: %i' % total_masked\n print >> sys.stderr, \"... done\"\n\n \n if options.verbose:\n print >> sys.stderr, \"Storing exon array in HDF5 %s ...\" % (options.anno_hdf5 + '.exons.hdf5')\n\n ### store annotation in hdf5\n hdf_out = h5py.File(options.anno_hdf5 + '.exons.hdf5', 'w')\n for c in anno.keys():\n hdf_out.create_dataset(name = c, data = anno[c])\n hdf_out.close()\n\n if options.verbose:\n print >> sys.stderr, \"... pickling gene ID map\"\n\n cPickle.dump((idx2gene, gene2idx), open(options.anno_hdf5 + '.pickle', 'w'))\n\n if options.verbose:\n print >> sys.stderr, \"... done\"\n\n return (anno, idx2gene, gene2idx)",
"def _set_joint_genome_info(self, gene_annotation_header_values, total_genes_in_genome_values):\n # Merge any pieces of global information that's not per-dataset\n self.gene_annotation_header = merge_values_to_unique(gene_annotation_header_values, blank_value=[], convert_for_set=tuple, \n value_name='gene_annotation_header', context='datasets in multi-dataset')\n self.total_genes_in_genome = merge_values_to_unique(total_genes_in_genome_values, blank_value=0, \n value_name='total_genes_in_genome', context='datasets in multi-dataset')",
"def add_annotations(self, annotations):\n for annotation in annotations:\n logging.info(\"Annotation received on: '%s'\" % annotation.communication.id)\n self.annotations.extend(annotations)\n return True",
"def add_annotations(annot_tuples, ref_data, annot_type):\n\n for annot in ref_data.annotations.select_type(annot_type):\n annot_begin, annot_end = annot.spans[0]\n annot_tuples.append((annot_begin, annot_end, annot.id))",
"def addIntronAnnotation(clean_data, annotationFile, newColumnName):\n avgPeakScore = []\n for index, row in clean_data.iterrows():\n # get all the annotation data associated with this gene's ensid\n gene_ensid = str(row['ENSID'])\n peaks = annotationFile.loc[(annotationFile['Nearest Ensembl'] == gene_ensid) & (annotationFile['Annotation'].str.contains('intron'))]\n\n if peaks.shape[0] == 0:\n avgPeakScore.append('') # no peaks associated with this gene\n elif peaks.shape[0] == 1:\n avgPeakScore.append(str(float(peaks.iloc[0]['Peak Score'])))\n elif peaks.shape[0] > 1:\n # take the avg\n peakscores = []\n for subindex, subrow in peaks.iterrows():\n peakscores.append(float(subrow['Peak Score']))\n avg = sum(peakscores) / float(len(peakscores))\n #print(\"multiple, had to average: \" + str(avg))\n avgPeakScore.append(avg)\n \n clean_data[newColumnName] = avgPeakScore\n \n return",
"def ref_lamanno(\n fasta_path,\n gtf_path,\n cdna_path,\n intron_path,\n index_path,\n t2g_path,\n cdna_t2c_path,\n intron_t2c_path,\n temp_dir='tmp',\n overwrite=False,\n):\n results = {}\n if not os.path.exists(index_path) or overwrite:\n fasta_path = decompress_file(fasta_path, temp_dir=temp_dir)\n sorted_fasta_path, fasta_chromosomes = sort_fasta(\n fasta_path, os.path.join(temp_dir, SORTED_FASTA_FILENAME)\n )\n gtf_path = decompress_file(gtf_path, temp_dir=temp_dir)\n sorted_gtf_path, gtf_chromosomes = sort_gtf(\n gtf_path, os.path.join(temp_dir, SORTED_GTF_FILENAME)\n )\n logger.info('Splitting genome into cDNA at {}'.format(cdna_path))\n chromosomes = check_chromosomes(fasta_chromosomes, gtf_chromosomes)\n cdna_fasta_path = generate_cdna_fasta(\n sorted_fasta_path,\n sorted_gtf_path,\n cdna_path,\n chromosomes=chromosomes\n )\n results.update({'cdna_fasta': cdna_fasta_path})\n logger.info(\n 'Creating cDNA transcripts-to-capture at {}'.format(cdna_t2c_path)\n )\n cdna_t2c_result = create_t2c(cdna_fasta_path, cdna_t2c_path)\n results.update({'cdna_t2c': cdna_t2c_result['t2c']})\n logger.info('Splitting genome into introns at {}'.format(intron_path))\n intron_fasta_path = generate_intron_fasta(\n sorted_fasta_path,\n sorted_gtf_path,\n intron_path,\n chromosomes=chromosomes\n )\n results.update({'intron_fasta': intron_fasta_path})\n logger.info(\n 'Creating intron transcripts-to-capture at {}'.\n format(cdna_t2c_path)\n )\n intron_t2c_result = create_t2c(intron_fasta_path, intron_t2c_path)\n results.update({'intron_t2c': intron_t2c_result['t2c']})\n logger.info('Concatenating cDNA and intron FASTAs')\n combined_path = concatenate_files(\n cdna_fasta_path,\n intron_fasta_path,\n out_path=os.path.join(temp_dir, COMBINED_FILENAME),\n temp_dir=temp_dir\n )\n t2g_result = create_t2g_from_fasta(combined_path, t2g_path)\n results.update(t2g_result)\n index_result = kallisto_index(combined_path, index_path)\n results.update(index_result)\n else:\n logger.info(\n 'Skipping kallisto index because {} already exists. Use the --overwrite flag to overwrite.'\n .format(index_path)\n )\n\n return results",
"def add_annotation(self,\n node_attrs: Dict[str, Dict[str, Any]],\n edge_attrs: Dict[str, Dict[str, Any]],\n sentence_ids: Dict[str, str]) -> None:\n for node, attrs in node_attrs.items():\n self._add_node_annotation(node, attrs)\n\n for edge, attrs in edge_attrs.items():\n self._add_edge_annotation(edge, attrs, sentence_ids)",
"def _add_transform_genes(self):\n self._alleles.add(pu.make_int_gene(1, 1, 10, 1)) # 'AR' backshift (p)\n self._alleles.add(pu.make_choice_gene(1, [0, 1, 2])) # 'I' backshift (d) \n self._alleles.add(pu.make_choice_gene(1, [1, 2, 3])) # 'MA' backshift (q)\n self._loci_list += ['AR_order', 'I_order', 'MA_order']",
"def _annotate(self, mfccs):\n if self.slices is None or self.ipa_regions is None:\n raise ValueError(\"No IPA regions. Call setup_regions() prior\")\n\n # Define some short hands\n slices = self.slices\n ipa_regions = self.ipa_regions\n\n annotation_regions = []\n mfcc_len = mfccs.shape[1]\n sample_ann = [None] * mfcc_len\n\n # Convert slices into sample points.\n for s in slices:\n mfcc_rate = mfcc_len / self.length\n annotation_regions.append(round(s * mfcc_rate))\n annotation_regions.append(mfcc_len)\n\n # Loop through the annotation regions, and set them.\n for i in range(len(annotation_regions) - 1):\n low = annotation_regions[i]\n high = annotation_regions[i+1]\n for sample_ind in range(low, high):\n if sample_ind >= len(sample_ann):\n print(sample_ind)\n print(len(sample_ann))\n sample_ann[sample_ind] = ct.IPA_MAP[ipa_regions[i]]\n self.annotated_samples = sample_ann",
"def parse_anno_from_gtf(options, contigs):\n\n anno = dict()\n idx2gene = dict()\n gene2idx = dict()\n\n if options.verbose:\n print >> sys.stderr, \"Parsing annotation from %s ...\" % options.anno\n \n ### init genome structure\n for c in contigs:\n if options.verbose:\n print >> sys.stderr, 'reserving memory for chr %s of len %s' % (c, contigs[c])\n anno[c] = sp.zeros((contigs[c] + 1, ), dtype = 'int32')\n\n ### init list of considered GFF fields\n fields = options.fields.split(',')\n\n ### generate a list of exons with attached gene/transcript information\n ### one list per chromsome\n counter = 1\n gene_counter = 2 ### 0 is default for no coverage and 1 is mask for overlap\n\n exons = dict()\n\n t0 = time.time()\n for line in open(options.anno, 'r'):\n if options.verbose and counter % 10000 == 0:\n print >> sys.stderr, '.',\n if counter % 100000 == 0:\n t1 = time.time() - t0\n print >> sys.stderr, \"%i - took %.2f secs\" % (counter, t1)\n t0 = time.time()\n counter += 1 \n\n if line[0] == '#':\n continue\n sl = line.strip().split('\\t')\n \n if not sl[2] in fields:\n continue\n\n if sl[2] != 'exon':\n print >> sys.stderr, 'Currently only >exon< is supported'\n sys.exit(1)\n\n tags = get_tags_gtf(sl[8])\n gene_id = tags['gene_id']\n trans_id = tags['transcript_id']\n\n if not gene2idx.has_key(tuple([gene_id])):\n gene2idx[tuple([gene_id])] = gene_counter\n idx2gene[gene_counter] = tuple([gene_id])\n gene_counter += 1\n\n try:\n start = int(sl[3]) - 1\n except ValueError:\n start = 0\n try:\n stop = int(sl[4])\n except ValueError:\n stop = 1\n\n chrm = sl[0]\n if chrm == 'chrM_rCRS':\n chrm = 'chrM'\n\n if not chrm in exons:\n exons[chrm] = dict()\n\n if options.mask_alternative_overlap:\n try:\n exons[chrm][trans_id].append([start, stop])\n except KeyError:\n exons[chrm][trans_id] = [[start, stop]]\n\n ### check, if there is already a different gene ID present, form a combination ID\n if sp.any(anno[chrm][start:stop] > 0):\n for p in range(start, stop):\n if anno[chrm][p] == 0:\n new_set = tuple([gene_id])\n else:\n new_set = tuple(set(idx2gene[anno[chrm][p]]) | set([gene_id]))\n try:\n anno[chrm][p] = gene2idx[new_set]\n except KeyError:\n anno[chrm][p] = gene_counter\n gene2idx[new_set] = gene_counter\n idx2gene[gene_counter] = new_set\n gene_counter += 1\n else:\n anno[chrm][start:stop] = sp.array([gene2idx[tuple([gene_id])]] * (stop - start), dtype = 'int32')\n if options.verbose:\n print >> sys.stderr, \"... done\"\n\n ### mask all positions in the genome, where we have more than one annotated gene\n if options.mask_gene_overlap:\n total_pos = 0\n total_masked = 0\n if options.verbose:\n print >> sys.stderr, '\\nMasking positions due to gene overlap:'\n for c in anno:\n masked_pos = 0\n p_idx = sp.where(anno[c] > 1)[0]\n pos = p_idx.shape[0]\n #print >> sys.stderr, 'found %i positions' % p_idx.shape[0]\n for p in p_idx:\n if len(idx2gene[anno[c][p]]) > 1:\n anno[c][p] = 1\n masked_pos += 1\n total_pos += pos\n total_masked += masked_pos\n if options.verbose:\n print >> sys.stderr, '\\t%s: %i (%i) masked (total) - %.2f %%' % (c, masked_pos, pos, masked_pos / float(max(1, pos)) * 100)\n if options.verbose:\n print >> sys.stderr, \"Total positions: %i\\nMasked positions: %i (%.2f %%)\" % (total_pos, total_masked, total_masked / float(max(1, total_pos)) * 100)\n print >> sys.stderr, \"... done\"\n\n ### mask all positions in the genome, where exonic and intronic positions are annotated\n if options.mask_alternative_overlap:\n total_masked = 0\n if options.verbose:\n print >> sys.stderr, '\\nMasking positions due to exon/intron overlap:'\n for c in exons:\n masked_pos = 0\n for t in exons[c]:\n if len(exons[c][t]) < 2:\n continue\n ### pre-process exon\n tmp = sp.array(exons[c][t], dtype='int')\n s_idx = sp.argsort(tmp[:, 0])\n tmp = tmp[s_idx, :]\n ### mask positions that are intronic and exonic\n for e in range(1, tmp.shape[0]):\n p_idx = sp.where(anno[c][tmp[e - 1, 1] + 1:tmp[e, 0]] > 1)[0]\n if p_idx.shape[0] > 0:\n anno[c][p_idx + tmp[e - 1, 1] + 1] = 1\n masked_pos += p_idx.shape[0]\n total_masked += masked_pos\n if options.verbose:\n print >> sys.stderr, '\\t%s: %i pos masked' % (c, masked_pos)\n if options.verbose:\n print >> sys.stderr, 'Masked positions: %i' % total_masked\n print >> sys.stderr, \"... done\"\n\n if options.verbose:\n print >> sys.stderr, \"Storing exon array in HDF5 %s ...\" % (options.anno_hdf5 + '.exons.hdf5')\n\n ### store annotation in hdf5\n hdf_out = h5py.File(options.anno_hdf5 + '.exons.hdf5', 'w')\n for c in anno.keys():\n hdf_out.create_dataset(name = c, data = anno[c])\n hdf_out.close()\n\n if options.verbose:\n print >> sys.stderr, \"... pickling gene ID map\"\n\n cPickle.dump((idx2gene, gene2idx), open(options.anno_hdf5 + '.pickle', 'w'))\n\n if options.verbose:\n print >> sys.stderr, \"... done\"\n\n return (anno, idx2gene, gene2idx)",
"def _add_transform_genes(self):\n self._alleles.add(pu.make_int_gene(1, 1, 10, 1)) # 'AR' backshift (p)\n self._alleles.add(pu.make_choice_gene(1, [0, 1, 2])) # 'I' backshift (d) \n self._alleles.add(pu.make_choice_gene(1, [1, 2, 3])) # 'MA' backshift (q)\n self._alleles.add(pu.make_int_gene(1, 1, 10, 1)) # Seasonal 'AR' backshift (p)\n self._alleles.add(pu.make_choice_gene(1, [0, 1, 2])) # Seasonal 'I' backshift (d) \n self._alleles.add(pu.make_choice_gene(1, [1, 2, 3])) # Seasonal 'MA' backshift (q)\n self._loci_list += ['AR_order', 'I_order', 'MA_order',\n 'ssn_AR_order', 'ssn_I_order', 'ssn_MA_order']",
"def __gen_annoset_file(self):\n paula_id = '{}.{}.anno'.format(self.corpus_name, self.name)\n E, tree = gen_paula_etree(paula_id)\n\n slist = E('structList', {'type': 'annoSet'})\n # NOTE: we could group all the annotations into different structs\n # but I don't see the point. We're already using namespaces, after all\n struct = E('struct', {'id': 'anno_all_annotations'})\n for i, file_id in enumerate(self.files):\n struct.append(E('rel',\n {'id': 'rel_{}'.format(i),\n XLINKHREF: file_id+'.xml'}))\n slist.append(struct)\n tree.append(slist)\n self.files[paula_id] = tree\n self.file2dtd[paula_id] = PaulaDTDs.struct\n return paula_id",
"def add_annotator(self, annotator):\n self.sequence.append(annotator)",
"def run_annotators_mp(self):\n # Determine number of worker processes\n num_workers = au.get_max_num_concurrent_annotators_per_job()\n if self.args.mp is not None:\n try:\n self.args.mp = int(self.args.mp)\n if self.args.mp >= 1:\n num_workers = self.args.mp\n except:\n self.logger.exception(\"error handling mp argument:\")\n self.logger.info(\"num_workers: {}\".format(num_workers))\n # Create arguments for each annotator\n run_args = {}\n for module in self.run_annotators.values():\n # Select correct input file for annotator\n if module.level == \"variant\":\n if \"input_format\" in module.conf:\n input_format = module.conf[\"input_format\"]\n if input_format == \"crv\":\n inputpath = self.crvinput\n elif input_format == \"crx\":\n inputpath = self.crxinput\n else:\n raise Exception(\"Incorrect input_format value\")\n else:\n inputpath = self.crvinput\n elif module.level == \"gene\":\n inputpath = self.crginput\n # Assign secondary inputs from sub-annotators\n secondary_inputs = []\n if \"secondary_inputs\" in module.conf:\n secondary_module_names = module.conf[\"secondary_inputs\"]\n for secondary_module_name in secondary_module_names:\n secondary_module = self.annotators[secondary_module_name]\n secondary_output_path = self.get_module_output_path(\n secondary_module\n )\n secondary_inputs.append(\n secondary_module.name.replace(\"=\", r\"\\=\")\n + \"=\"\n + os.path.join(self.output_dir, secondary_output_path).replace(\n \"=\", r\"\\=\"\n )\n )\n # Assemble argument dictionary\n kwargs = {\n \"script_path\": module.script_path,\n \"input_file\": inputpath,\n \"secondary_inputs\": secondary_inputs,\n \"silent\": self.args.silent,\n \"log_path\": self.log_path,\n }\n if self.run_name != None:\n kwargs[\"run_name\"] = self.run_name\n if self.output_dir != None:\n kwargs[\"output_dir\"] = self.output_dir\n if module.name in self.cravat_conf:\n kwargs[\"conf\"] = self.cravat_conf[module.name]\n run_args[module.name] = (module, kwargs)\n # Run annotator workers\n # Annotator workers receive annotators to run in start_queue. When an \n # annotator is finished, it's name is placed in end_queue. This process\n # schedules annotators to run by placing them in start_queue. Annotators\n # that depend on other annotators results are not placed in start_queue \n # until the dependent annotators are finished. When all annotators have \n # been placed in start_queue, the queue_populated semaphore is set to \n # True. Once queue_populated is True and start_queue is empty, the \n # workers will exit. \n self.logger.removeHandler(self.log_handler)\n start_queue = self.manager.Queue()\n end_queue = self.manager.Queue()\n all_mnames = set(self.run_annotators)\n queued_mnames = set()\n done_annots = []\n done_mnames = set(self.done_annotators)\n queue_populated = self.manager.Value(\"c_bool\", False)\n pool_args = [\n [start_queue, end_queue, queue_populated, self.status_writer]\n ] * num_workers\n with mp.Pool(num_workers, init_worker) as pool:\n results = pool.starmap_async(\n annot_from_queue,\n pool_args,\n error_callback=lambda e, mp_pool=pool: mp_pool.terminate(),\n )\n pool.close()\n for mname, module in self.run_annotators.items():\n annotator_not_queue = mname not in queued_mnames\n secondaries_done = set(module.secondary_module_names) <= done_mnames\n if (annotator_not_queue and secondaries_done):\n start_queue.put(run_args[mname])\n queued_mnames.add(mname)\n # Loop until all annotators are put in start_queue\n # TODO not handling case where parent annotator errors out\n while (queued_mnames != all_mnames): \n # Block until item availble in end_queue\n done_annots.append(end_queue.get(True))\n # Queue any annotators that now have requirements complete\n done_mnames.add(done_annots[-1]['module'])\n for mname, module in self.run_annotators.items():\n annotator_not_queue = mname not in queued_mnames\n secondaries_done = set(module.secondary_module_names) <= done_mnames\n if (annotator_not_queue and secondaries_done):\n start_queue.put(run_args[mname])\n queued_mnames.add(mname)\n queue_populated = True\n pool.join()\n # Retrieve finished annotators from end_queue\n while True:\n try:\n done_annots.append(end_queue.get(False))\n except Empty:\n break\n # Write annotator metrics\n for done_annot in done_annots:\n annotator = {}\n annotator['name'] = done_annot['module']\n annotator['version'] = done_annot['version']\n annotator['runtime'] = done_annot['runtime']\n self.metricObj.set_job_annotator(annotator)\n self.log_path = os.path.join(self.output_dir, self.run_name + \".log\")\n self.log_handler = logging.FileHandler(self.log_path, \"a\")\n formatter = logging.Formatter(\n \"%(asctime)s %(name)-20s %(message)s\", \"%Y/%m/%d %H:%M:%S\"\n )\n self.log_handler.setFormatter(formatter)\n self.logger.addHandler(self.log_handler)\n if len(self.run_annotators) > 0:\n self.annotator_ran = True",
"def _add_transform_genes(self):\n pass",
"def main():\n args = get_args()\n FILE = args.FILE\n annotations = args.annotations\n outfile = args.outfile\n \n \n if not os.path.isfile(FILE):\n die('\"{}\" is not a file'.format(FILE))\n if not os.path.isfile(annotations):\n die('\"{}\" is not a file'.format(annotations))\n if os.path.isfile(FILE) and os.path.isfile(annotations):\n reader = csv.DictReader(open(FILE), delimiter = '\\t', fieldnames = (\"qseqid\", \"sseqid\", \"pident\", \"length\", \"mismatch\", \"gapopen\", \"qstart\", \"qend\", \"sstart\", \"send\", \"evalue\", \"bitscore\"))\n reader_a = csv.DictReader(open(annotations), fieldnames = (\"centroid\", \"domain\", \"kingdom\", \"phylum\", \"class\", \"order\", \"genus\", \"species\"))\n reader_b = csv.reader(open(annotations, 'r'))\n anno_dict = {}\n for row in reader_b:\n key1 = row[0]\n anno_dict[key1] = row[1:]\n\n #print(anno_dict)\n \n \"\"\"for dct in map(dict, reader_a):\n genus = (f\"{dct['genus']}\")\n species = (f\"{dct['species']}\")\n if genus == \"\": \n print(\"NA\")\n else:\n print(genus)\n if species == \"\":\n print(\"NA\")\n else:\n print(species)\"\"\"\n for dct in map(dict, reader):\n seq_id = (f\"{dct['sseqid']}\") \n pident = (f\"{dct['pident']}\")\n #print(seq_id)\n for dct_a in map(dict, reader_a):\n genus = (f\"{dct_a['genus']}\")\n species = (f\"{dct_a['species']}\")\n if any(seq_id == key for key in anno_dict): \n \"\"\"print(seq_id)\n print(pident)\n print(genus)\n print(species)\n #find a way to print genus and species of seq_id\n \"\"\"\n \n else:\n warn('Cannot find seq \"{}\" in lookup'.format(seq_id))\n \"\"\"for line_a in reader_a:\n an_id = (line_a['centroid']) \n print('\"{}\" is an_id'.format(an_id)) \n for line in reader:\n seq_id = (line['sseqid'])\n print('\"{}\" is seq_id'.format(seq_id))\n if seq_id == an_id:\n print(\"hi\")\n else:\n warn('Cannot find seq \"{}\" in lookup'.format(seq_id))\n \"\"\"\n #pprint.pprint(dict_list)\n #pprint.pprint(dict_list_a)\n #for key, value in d1.items():\n #if key is 'sseqid':\n #print(value)\n #print(dict_list_a['centroid']) ",
"def get_msms_annotations(self, representatives_only=True, force_rerun=False):\n for g in tqdm(self.genes):\n g.protein.get_msms_annotations(representative_only=representatives_only, force_rerun=force_rerun)",
"def add_locus_in_fasta(self, fasta, output_file):\n fasta_record = FastA(fasta)\n ids_list = self._get_seq_ids()\n\n # check if both files have same number of contigs\n if len(fasta_record) != len(ids_list): # pragma: no cover\n print(\n \"fasta and annotation files don't have the same number of \"\n \"contigs. Found {} and {}\".format(len(fasta_record), len(ids_list))\n )\n sys.exit(1)\n\n # check if directory exist\n output_dir = os.path.dirname(output_file)\n os.makedirs(output_dir, exist_ok=True)\n\n if sorted(fasta_record.names) == sorted(ids_list):\n logger.info(\"Files have same sequence id.\")\n if os.path.isfile(output_file): # pragma: no cover\n os.remove(output_file)\n os.symlink(os.path.realpath(fasta), output_file)\n return\n else:\n logger.info(\n \"fasta and GFF seem to have different IDs. Creating a\"\n \"new coherent fasta file assuming the chromsome names appear \"\n \"in the same order in the fasta and gff\"\n )\n\n with open(output_file, \"w\") as fp:\n # write fasta with seqid of annotation file\n for n in range(len(fasta_record)):\n seq_id = \">{0} {1}\\n\".format(ids_list[n], fasta_record.names[n])\n seq = fasta_record.sequences[n]\n sequence = \"\\n\".join([seq[i : min(i + 80, len(seq))] for i in range(0, len(seq), 80)]) + \"\\n\"\n contigs = seq_id + sequence\n fp.write(contigs)",
"def get_annotation_download_links(self, name, **kwargs):\n genome = self.genomes[safe(name)]\n division, is_vertebrate = self.get_division(name)\n\n # base directory of the genome\n ftp = \"http://ftp.ensemblgenomes.org\"\n if is_vertebrate:\n ftp = \"http://ftp.ensembl.org\"\n version = self.get_version(name, kwargs.get(\"version\"))\n div_path = \"\" if is_vertebrate else f\"/{division}\"\n lwr_name = genome[\"name\"]\n ftp_directory = f\"{ftp}/pub/release-{version}{div_path}/gtf/{lwr_name}\"\n\n # specific gtf file\n cap_name = lwr_name.capitalize()\n asm_name = re.sub(r\"\\.p\\d+$\", \"\", safe(genome[\"assembly_name\"]))\n\n ftp_file = f\"{cap_name}.{asm_name}.{version}.gtf.gz\"\n\n # combine\n link = f\"{ftp_directory}/{ftp_file}\"\n if name == \"GRCh37\":\n link = genome[\"annotation\"].format(version)\n return [link] if check_url(link, max_tries=2) else []",
"def _get_annotations(self) -> List[Dict[int, Dict[str, Any]]]:\n annotations = []\n for item in self.collector:\n data_file_type = os.path.basename(item).split(\".\")[-1]\n annotations.append(\n load_annotation_file(\n os.path.join(\n self.annotation_folder,\n os.path.basename(item).replace(data_file_type, \"json\"),\n )\n )\n )\n\n return annotations",
"def map_annotated_to_annot(annotated_files: Union[list, np.array],\n annot_list: list[crowsetta.Annotation],\n annot_format: str,\n annotated_ext: str | None = None) -> dict[pathlib.Path : crowsetta.Annotation]:\n if type(annotated_files) == np.ndarray: # e.g., vak DataFrame['spect_path'].values\n annotated_files = annotated_files.tolist()\n\n if annot_format in ('birdsong-recognition-dataset', 'yarden', 'generic-seq'):\n annotated_annot_map = _map_using_notated_path(annotated_files, annot_list)\n else:\n try:\n annotated_annot_map = _map_using_ext(annotated_files, annot_list, annot_format, method='remove')\n except MapUsingExtensionError:\n try:\n annotated_annot_map = _map_using_ext(annotated_files, annot_list, annot_format, method='replace',\n annotated_ext=annotated_ext)\n except MapUsingExtensionError as e:\n raise ValueError(\n 'Could not map annotated files to annotations.\\n'\n 'Please see this section in the `vak` documentation:\\n'\n 'https://vak.readthedocs.io/en/latest/howto/howto_prep_annotate.html'\n '#how-does-vak-know-which-annotations-go-with-which-annotated-files'\n ) from e\n\n return annotated_annot_map",
"def extract_annotations(xml_path, tsv_path):\n xml_opener = utilities.get_opener(xml_path)\n csv_opener = utilities.get_opener(tsv_path)\n with xml_opener(xml_path, \"rb\") as xml_file, csv_opener(tsv_path, \"wt\") as tsv_file:\n fieldnames = ['pubmed_id', 'type', 'identifier', 'offset', 'end']\n writer = csv.DictWriter(tsv_file, fieldnames=fieldnames, delimiter='\\t')\n writer.writeheader()\n tag_generator = ET.iterparse(xml_file, tag=\"document\")\n\n for event, document in tqdm.tqdm(tag_generator):\n pubmed_id = document[0].text\n\n # cycle through all the annotation tags contained within document tag\n for annotation in document.iter('annotation'):\n\n # not all annotations will contain an ID\n if len(annotation) <= 3:\n continue\n\n for infon in annotation.iter('infon'):\n if infon.attrib[\"key\"] == \"type\":\n ant_type = infon.text\n else:\n ant_id = infon.text\n\n location, = annotation.iter('location')\n offset = int(location.attrib['offset'])\n end = offset + int(location.attrib['length'])\n row = {'pubmed_id': pubmed_id, 'type': ant_type, 'identifier': ant_id, 'offset': offset, 'end': end}\n writer.writerow(row)\n\n # prevent memory overload\n document.clear()",
"def update_annot(cls, ind):\n gen = ind + FigureControl.minPossibleGenNumber\n for cplot in gs.cloud_plots:\n fitness = cplot.update_annot(gen)\n\n text = \"{}\".format(gen)\n gs.fitness_plot.floating_annot.xy = (gen, fitness)\n gs.fitness_plot.floating_annot.set_text(text)",
"def makeAMixOf2Annotations(inputAnnotPath1, inputAnnotPath2, outputMixPath):\n # make sure the paths end in a slash\n if inputAnnotPath1[-1] != u'/':\n inputAnnotPath1 = u'{0}/'.format(inputAnnotPath1)\n if inputAnnotPath2[-1] != u'/':\n inputAnnotPath2 = u'{0}/'.format(inputAnnotPath2)\n if outputMixPath[-1] != u'/':\n outputMixPath = u'{0}/'.format(outputMixPath)\n # for each input open\n for inPath in [inputAnnotPath1, inputAnnotPath2]:\n # open the file, read the lines\n with open(u'{0}sample.en'.format(inPath)) as inEnFile:\n enLns = inEnFile.readlines()\n with open(u'{0}sample.fr'.format(inPath)) as inFrFile:\n frLns = inFrFile.readlines()\n with open(u'{0}sampleAnnotation.tsv'.format(inPath)) as inAnnotFile:\n annotLns = inAnnotFile.readlines()\n with open(u'{0}sampleReference.tsv'.format(inPath)) as inRefFile:\n refLns = inRefFile.readlines()\n with open(u'{0}scores.tsv'.format(inPath)) as inScFile:\n scLns = inScFile.readlines()\n with open(u'{0}scoresAndMetaData.tsv'.format(inPath)) as inScMetaFile:\n scMetaLns = inScMetaFile.readlines()\n # choose and index randomly\n dejaVus = set([])\n while len(dejaVus) < int(len(enLns)/2.0):\n randomInd = randint(0, len(enLns)-1)\n while randomInd in dejaVus:\n randomInd = randint(0, len(enLns)-1)\n # add to dejavus\n dejaVus.add(randomInd)\n # dump to output file\n utilsOs.appendLineToFile(enLns[randomInd], u'{0}sample.en'.format(outputMixPath), addNewLine=False)\n utilsOs.appendLineToFile(frLns[randomInd], u'{0}sample.fr'.format(outputMixPath), False)\n utilsOs.appendLineToFile(annotLns[randomInd], u'{0}sampleAnnotation.tsv'.format(outputMixPath), False)\n utilsOs.appendLineToFile(refLns[randomInd], u'{0}sampleReference.tsv'.format(outputMixPath), False)\n utilsOs.appendLineToFile(scLns[randomInd], u'{0}scores.tsv'.format(outputMixPath), False)\n utilsOs.appendLineToFile(scMetaLns[randomInd], u'{0}scoresAndMetaData.tsv'.format(outputMixPath), False)"
] |
[
"0.63150656",
"0.6087792",
"0.5909039",
"0.5808526",
"0.57750446",
"0.57686114",
"0.5724871",
"0.57081753",
"0.56716824",
"0.5651769",
"0.5627074",
"0.5604145",
"0.55674523",
"0.5563889",
"0.55331784",
"0.55304426",
"0.55254304",
"0.55189997",
"0.5511156",
"0.5480355",
"0.54790604",
"0.54583526",
"0.5430836",
"0.5428522",
"0.53844225",
"0.5382018",
"0.5377503",
"0.5354985",
"0.53521407",
"0.5331601"
] |
0.7471653
|
0
|
Return a string containing details for most common mutant, or count of most common mutants if multiple.
|
def _most_common_mutants_info(self, dataset=None):
summ = self._get_summary(dataset)
most_common_mutants = summ.most_common_mutants
m = most_common_mutants[0]
# calculate the fraction of total reads per mutant, assuming each mutant has the same readcount
assert len(set([m.read_info(dataset).total_read_count for m in most_common_mutants])) == 1
readcount_info = value_and_percentages(m.read_info(dataset).total_read_count, [summ.aligned_read_count])
if len(most_common_mutants) == 1: return "%s (%s)"%(readcount_info, m.position)
else: return "%s (%s mutants)"%(readcount_info, len(most_common_mutants))
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def most_common_mutants(self):\n highest_readcount = max([mutant.read_info(self.dataset_name).total_read_count for mutant in self.dataset])\n highest_readcount_mutants = [mutant for mutant in self.dataset \n if mutant.read_info(self.dataset_name).total_read_count==highest_readcount]\n return highest_readcount_mutants",
"def most_popular_gender(data):\n answer = \"\"\n genders = count_gender(data)\n if genders[0] == genders[1]:\n answer = \"Equal\"\n elif genders[0] > genders[1]:\n answer = \"Male\"\n else:\n answer = \"Female\"\n return answer",
"def get_most_common(self, lst):\n data = Counter(lst)\n mc = data.most_common(2) \n #if len(mc) == 1 or (mc[0][1] != (mc[1][1])):\n # return mc[0][0]\n #return \"AMB\"\n return data.most_common(1)[0][0]",
"def get_consensus_string(motifs):\n profile = get_profile(motifs)\n return ''.join(max(position.items(), key=operator.itemgetter(1))[0] for position in profile)",
"def most_common(self):\n # Example ouput : ['so', 6]\n return list(sorted(self.frequencies().items(), key = lambda x: x[1], reverse=True)[0])\n #sorted = sorted(self.frequencies().items(), key = lambda x: x[1], reverse=True)\n #return sorted[0] #not list",
"def mutual_information_max(self):\n return np.log2(special.comb(self.Nr, self.coding_receptors))",
"def print_most_frequent(ngrams, num=10):\n for n in sorted(ngrams):\n print('----- {} most common {}-grams -----'.format(num, n))\n for gram, count in ngrams[n].most_common(num):\n print('{0}: {1}'.format(' '.join(gram), count))\n print('')",
"def print_most_frequent(ngrams, num=10):\n for n in sorted(ngrams):\n print('----- {} most common {}-grams -----'.format(num, n))\n for gram, count in ngrams[n].most_common(num):\n print('{0}: {1}'.format(' '.join(gram), count))\n print('')",
"def get_movie_most_nominations(movies: list) -> str:\n pass",
"def print_most_frequent(ngrams, num=10):\r\n for n in sorted(ngrams):\r\n print('----- {} most common {}-grams -----'.format(num, n))\r\n for gram, count in ngrams[n].most_common(num):\r\n print('{0}: {1}'.format(' '.join(gram), count))\r\n print('')",
"def most_common(self, number=10):\n\n words_full_list = []\n\n for string in self.__corpora:\n words_full_list += string.split()\n\n print(Counter(words_full_list).most_common(number))",
"def _majority(data_set):\r\n pair = _count_parties(data_set)\r\n democrats = pair[0]\r\n republicans = pair[1]\r\n if democrats > republicans: return \"D\"\r\n if democrats < republicans: return \"R\"\r\n else: return None",
"def print_most_frequent(ngrams, num=10):\n for n in sorted(ngrams):\n print('----- {} most common {}-grams -----'.format(num, n))\n for gram, count in ngrams[n].most_common(num):\n print('{0}: {1}'.format(' '.join(gram), count))\n print('')",
"def character_statistics(file_name):\n from operator import itemgetter\n import collections\n cnt = collections.Counter()\n\n try:\n fsock = open(file_name,'r')\n except IOError:\n print (\"The file does not exist, exiting gracefully\")\n\n for line in fsock:\n for c in line.rstrip().lower():\n if c.isalpha():\n cnt[c] += 1\n\n lessAbundant = cnt.most_common()[len(cnt)-1][1]\n #print(type(cnt.most_common()[len(cnt)-1]))\n #print(lessAbundant)\n #print (cnt.most_common()[-4:len(cnt)])\n #print (sorted(cnt.items(), key=itemgetter(1))[0])\n #print (cnt.most_common())\n\n # list comprehension\n #lessCommon = sorted([k for (k,v) in cnt.most_common() if v == lessAbundant])[0]\n # tuple unpacking, filter and map\n lessCommon = sorted(list(filter( lambda t: t[1] == lessAbundant, cnt.most_common())))[0][0]\n #lessCommon = map( lambda (keyLetter,_): keyLetter, filter( lambda (_,freqVal): freqVal == lessAbundant, cnt.most_common()) )\n #print(lessCommon)\n\n return (cnt.most_common()[0][0], lessCommon)",
"def top_three_letters(string):\n print(Counter(string))\n print(Counter(string).most_common(3))",
"def common():\r\n full_song = \"\"\r\n albums = simple_album_list()\r\n for album in albums:\r\n songs = simple_songs_list(album)\r\n for song in songs:\r\n full_song += str(song_lyrics(song))\r\n split_lyrics = full_song.lower().split()\r\n counter = collections.Counter(split_lyrics)\r\n most_words = counter.most_common(50)\r\n return most_words",
"def plurality_value(examples):\n common = defaultdict(int)\n for example_dict in examples:\n common[example_dict['class']] += 1\n return max(common.items(), key=itemgetter(1))[0]",
"def get_mutual_information(c_wic, c_wioc, c_owic, c_owioc):\n # total word count\n c_total = c_wic + c_wioc + c_owic + c_owioc\n\n mi_1 = (c_wic / float(c_total)) * log10((c_total * c_wic) /\n float((c_wic + c_wioc) * (c_wic + c_owic)))\n mi_2 = (c_owic / float(c_total)) * log10((c_total * c_owic) /\n float((c_owic + c_owioc) * (c_wic + c_owic)))\n mi_3 = (c_wioc / float(c_total)) * log10((c_total * c_wioc) /\n float((c_wic + c_wioc) * (c_wioc + c_owioc)))\n mi_4 = (c_owioc / float(c_total)) * log10((c_total * c_owioc) /\n float((c_owic + c_owioc) * (c_wioc + c_owioc)))\n\n return mi_1 + mi_2 + mi_3 + mi_4",
"def most_reducible(wordlist):\n\n\t# We create a memo for reducible words since is_reducible is \n\t# recursive. The keys are the words and the values are the \n\t# number of characters\n\tglobal reducible_words\n\treducible_words = dict()\n\treducible_words['a'], reducible_words['i'] = 1, 1\n\t\n\tword_dict = to_dictionary(wordlist)\n\tfor line in word_dict:\n\t\tis_reducible(line, word_dict)\n\n\t# Varible that will search the memo for the longest word\n\tcurrent_greatest = ''\n\tfor word in reducible_words:\n\t\tif reducible_words[word] > len(current_greatest):\n\t\t\tcurrent_greatest = word\n\tprint(current_greatest)",
"def most_similar_word(self,word, word_set):\n\t max_sim = -1.0\n\t sim_word = \"\"\n\t for ref_word in word_set:\n\t sim = self.word_similarity(word, ref_word)\n\t if sim > max_sim:\n\t max_sim = sim\n\t sim_word = ref_word\n\t return sim_word, max_sim",
"def print_stats(kmer_table):\n print(\"MY OUTPUT\")\n res = kmer_table\n unique = [i for i, j in res.items() if j == 1]\n print(\"Unique: {}\".format(len(unique)))\n print(\"Distinct: {}\".format(len(res)))\n total = sum(res.values())\n print(\"Total: {}\".format(total))\n max_count = max(res.values())\n print(\"Max count: {}\".format(max_count))\n for k, v in res.items():\n if v == max_count:\n print(k, v)\n print('----')\n return None",
"def findMutations(trimmed_consensus, trimmed_mutant, counting_direction):\n\tmutations = \"\"\n\tcount = 0 \n\tif counting_direction == \"r\":\n\t\ttrimmed_consensus = invertString(trimmed_consensus)\n\t\ttrimmed_mutant = invertString(trimmed_mutant)\n\tfor i in range(len(trimmed_consensus)):\n\t\tconsensus = trimmed_consensus[i]\n\t\tmutant = trimmed_mutant[i]\n\t\tif mutant != consensus:\n\t\t\tcount += 1\n\t\t\t# Currently will count first base as 'base 1'\n\t\t\tmutations = mutations + str(i+1) + mutant + \":\"\n\tif count > len(trimmed_consensus)/2:\n\t\treturn \"UPPER_LIM\", float('nan')\n\telse:\n\t\t# Trim off the last ':'\n\t\treturn mutations[:-1], count",
"def get_most_popular_merchants(self):\n if self.model:\n return self.model.wv.index_to_key[: self.num_rec]\n else:\n print(\"train the model before performing this step\")\n return None",
"def summary(self):\n if not self: return u''\n s = u'Character\\n=====\\nName: %s\\n' % \\\n self.get('name', u'')\n bio = self.get('biography')\n if bio:\n s += u'Biography: %s\\n' % bio[0]\n filmo = self.get('filmography')\n if filmo:\n a_list = [x.get('long imdb canonical title', u'')\n for x in filmo[:5]]\n s += u'Last movies with this character: %s.\\n' % u'; '.join(a_list)\n return s",
"def part1_answer_counter(people: list[str]) -> int:\n return len(set(\"\".join(people)))",
"def mode(self):\r\n\t\t_set\t= set(self.sample)\r\n\t\t_list\t= [self.sample.count(i) for i in _set]\r\n\t\treturn list(_set)[_list.index(max(_list))]",
"def most_common_passwords_graph(creds: list, num: int):\n\tc = collections.Counter(creds)\n\tdata = {x[0]:x[1] for x in c.most_common(num)} \n\t# I am not sure this really makes a difference or not with spacing... will check back on this\n\tb = plt.bar([' ' + x + ' ' for x in data.keys()], data.values(), align='center')\n\tplt.title(f\"Top {num} most common passwords\")\n\tplt.xlabel(\"Password\")\n\tplt.ylabel(\"Number of occurances\")\n\tplt.show()",
"def _name_champion(self):\n # TODO BREAK TIES\n return max(self.teams, key=lambda team: len(team.wins))",
"def score_motif(motifs):\n\n score = 0\n count_matrix = count_nucleotides(motifs)\n for i in range(len(count_matrix[0, :])):\n col = count_matrix[:, i]\n max_count = np.max(col)\n col_score = np.sum(col) - max_count\n score = score + col_score\n return score",
"def most_common_passwords(creds: list, num: int):\n\treturn collections.Counter(creds).most_common(num)"
] |
[
"0.66965836",
"0.6349603",
"0.62805444",
"0.6122623",
"0.57724065",
"0.5698563",
"0.56918824",
"0.56918824",
"0.5688887",
"0.5688197",
"0.5687104",
"0.5631871",
"0.5602505",
"0.5602353",
"0.557191",
"0.5551334",
"0.5520492",
"0.54514843",
"0.53785723",
"0.5359715",
"0.53501165",
"0.53204745",
"0.5295799",
"0.5277597",
"0.52464783",
"0.5227166",
"0.52243364",
"0.5221388",
"0.52178013",
"0.5212853"
] |
0.7904284
|
0
|
Sort the mutants by position or readcount, or leave unsorted.
|
def _sort_data(self, sort_data_by='position'):
all_mutants = iter(self)
if sort_data_by=='position':
sorted_data = sorted(all_mutants, key = lambda m: (m.position, m.IB))
# x.position here is an Insertion_position object and has a sensible cmp function
# TODO do unaligned/multi-aligned/unknown positions sort sensibly here?
elif sort_data_by=='read_count':
if self.multi_dataset:
raise MutantError("Sorting by readcount in print_data not implemented for multi-datasets!")
sorted_data = sorted(all_mutants, key = lambda m: (m.total_read_count, m.perfect_read_count, m.position, m.IB),
reverse=True)
else:
raise MutantError("Can't sort mutants by %s - only position or readcount are implemented!"%sort_data_by)
return sorted_data
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def sort_reads(self): \n if not self.sampling:\n self.convert_to_array()\n self.reads = self.reads[self.reads[:,0].argsort()]",
"def sort_mutations(self, mutations):\n return sorted(mutations, key=operator.itemgetter(\"offset\"))",
"def sort_cards(self):\n self.cards.sort(key=operator.attrgetter('persona', 'rank'))\n self.update_position()",
"def _sort(self):\n self.rows.sort(key=lambda x: (x['PERC1'], x['EQ'], x['PASS'], x['W2']),\n reverse=True)\n\n rank = 0\n prev_perc = 0\n prev_rank = 0\n for row in self.rows:\n if row[\"NR\"] == 0:\n # Something has already populated NR as 0 - so we set rank as\n # 0 too\n row['_RANK'] = 0\n row['_NR'] = 0\n continue\n\n # Increment our count\n rank += 1\n if row['PERC1'] == prev_perc:\n row['NR'] = \"\"\n row['_NR'] = prev_rank # I.e. joint 6th will be 6 here\n row['_RANK'] = rank # I.e. joint 6th could be 7, or 8 etc. here\n else:\n row['NR'] = rank\n row['_NR'] = rank\n row['_RANK'] = rank\n prev_perc = row['PERC1']\n prev_rank = rank",
"def reorder( self ):\n self.sorted.sort(self.compareFunction)",
"def _sort_compounds(self):\n self.sorted_molecules = sorted(self.values(), key=operator.attrgetter('criterion'))",
"def sortby(self):\n ...",
"def _sort(self):\n self.population.sort()\n self.population.reverse()",
"def calculate_movement_order(self):\n # type: () -> List[SquadDrone]\n return _.sortByAll(self.members, 'name')",
"def sort(self):\n # Sort here actually uses the tuple comparison we defined in the Card class\n self.cards.sort()",
"def reorderReadPair(read1, read2):\n\n if (isCisInteraction(read1, read2) and read1.left_pos > read2.left_pos):\n r1_reorder = read2\n r2_reorder = read1\n else:\n r1_reorder = read1\n r2_reorder = read2\n return r1_reorder, r2_reorder",
"def sort():\n return -1",
"def sort(self, *args, **kwargs):\n self._sequence.sort(*args, **kwargs)",
"def sort(self):\n self.notes.sort()",
"def sort(self):\n self.cards.sort()",
"def sort(self):\n self.cards.sort()",
"def sort(self):\n sort_key = self.data.chromosome.apply(sorter_chrom)\n self.data = (\n self.data.assign(_sort_key_=sort_key)\n .sort_values(by=[\"_sort_key_\", \"start\", \"end\"], kind=\"mergesort\")\n .drop(\"_sort_key_\", axis=1)\n .reset_index(drop=True)\n )",
"def sort(self):\n for i in range(self.num):\n for j in range(i,0,-1):\n if self.genepool[0][j] < self.genepool[0][j-1]:\n self.genepool[0][j], self.genepool[0][j-1] = self.genepool[0][j-1], self.genepool[0][j]\n else:\n break\n for i in range(self.num):\n for j in range(i,0,-1):\n if self.genepool[1][j] < self.genepool[1][j-1]:\n self.genepool[1][j], self.genepool[1][j-1] = self.genepool[1][j-1], self.genepool[1][j]\n else:\n break",
"def _sort_measurements(self):\n if self._unsorted:\n sorted_ndxs = np.argsort(self._angles)\n self._distances = self._distances[sorted_ndxs]\n self._angles = self._angles[sorted_ndxs]\n self._intensities = self._intensities[sorted_ndxs]\n self._error_codes = self._error_codes[sorted_ndxs]\n self._unsorted = False",
"def sort(self):\n tmp = list(zip(self.user_points, self.user_ids));\n tmp = sorted(tmp, reverse=True);\n self.user_points, self.user_ids = list(zip(*tmp));\n \n self.user_points = list(self.user_points);\n self.user_ids = list(self.user_ids);",
"def sort(self):\n self.fragment_list.sort()",
"def sort_by_parser_scores(self):\n self.parses.sort(key=lambda parse: -parse.parser_score)",
"def sort(self,desc):\n\tself.__sort(\"\",\"\",desc)",
"def sort(self):\r\n\t\treturn sorted(self.sample)",
"def sort(self):\r\n self.candidates.sort(key=self.sortFitness)\r\n return",
"def sort_and_reduce(self):\n self.data = sorted(self.data, key=lambda item: item.pubDate)\n if len(self.data) > MAX_SIZE:\n self.data = self.data[-MAX_SIZE:]",
"def _sort_by_duration(self) -> None:\n total_samples = len(self.paths)\n if total_samples == 0:\n return\n samples = zip(self.paths, self.durations, self.transcriptions)\n sorted_samples = sorted(samples, key=lambda sample: sample[1])\n self.paths, self.durations, self.transcriptions = [\n list(c) for c in zip(*sorted_samples)\n ]\n assert (\n total_samples\n == len(self.paths)\n == len(self.durations)\n == len(self.transcriptions)\n ), \"_sort_by_duration len mis-match\"",
"def sort(self):\n for i in range(self.num):\n for j in range(i,0,-1):\n if self.genepool[0][j].fitness < self.genepool[0][j-1].fitness:\n self.genepool[0][j], self.genepool[0][j-1] = self.genepool[0][j-1], self.genepool[0][j]\n else:\n break\n for i in range(self.num):\n for j in range(i,0,-1):\n if self.genepool[1][j].fitness < self.genepool[1][j-1].fitness:\n self.genepool[1][j], self.genepool[1][j-1] = self.genepool[1][j-1], self.genepool[1][j]\n else:\n break",
"def sort_by_default(self):\n self.data.sort()",
"def remove_mutants_below_readcount(self, min_readcount, perfect_reads=False):\n if perfect_reads: get_readcount = lambda m: m.perfect_read_count\n else: get_readcount = lambda m: m.total_read_count\n # go over all mutants in self; need to convert dataset to a list to make a separate copy, \n # otherwise we'd be modifying the dataset while iterating through it, which isn't allowed.\n for mutant in list(self):\n if get_readcount(mutant) < min_readcount:\n self.remove_mutant(mutant.IB)\n # TODO really I shouldn't be removing mutants outright, just noting them as removed or something... In that case should they or should they not show up in \"for m in self\"? Probably not - they should have a separate dictionary?\n # TODO should I keep track of removed reads, and print in summary? MAYBE."
] |
[
"0.5964748",
"0.58288926",
"0.5742678",
"0.57124084",
"0.5697459",
"0.56780016",
"0.5552178",
"0.5547821",
"0.5540576",
"0.5532421",
"0.5527822",
"0.54764074",
"0.5459857",
"0.54162335",
"0.53919667",
"0.53919667",
"0.53723526",
"0.5371294",
"0.5365716",
"0.53613347",
"0.5360197",
"0.5359721",
"0.5333421",
"0.53311646",
"0.530137",
"0.5297495",
"0.5258744",
"0.5239833",
"0.523144",
"0.52175516"
] |
0.72632295
|
0
|
Write detailed RISCC data (all reads per mutant) to separate file.
|
def print_detailed_RISCC_data(self, OUTPUT=sys.stdout, sort_data_by=None, max_distance=MAX_POSITION_DISTANCE):
# TODO docstring!
# TODO should probably add header
# TODO add annotation!
# TODO change this to be a proper tab-separated file?
### sort all mutants by position or readcount (for single datasets only for now), or don't sort at all
sorted_mutants = self._sort_data(sort_data_by)
### Quick summary
N_total = len(self)
# using sum because you can't do len on generators
N_single_genomic = sum(1 for m in self if m.RISCC_N_distinct_regions(max_distance)[0]==1)
N_single_chrom = sum(1 for m in self if m.RISCC_N_genomic_chromosomes==1)
N_cassette = sum(1 for m in self if m.RISCC_N_distinct_regions(max_distance)[1]>0)
OUTPUT.write("# %s mutants total; %s have one genomic location; "%(N_total,
value_and_percentages(N_single_genomic, [N_total]))
+"%s have locations on only one genomic chromosome; "%(value_and_percentages(N_single_chrom, [N_total]))
+"%s also have one or more cassette locations.\n"%(value_and_percentages(N_cassette, [N_total])) )
# TODO add info on how many actually have ANY genomic-side reads! And out of those, how many are confirmed vs not.
### Print data for all mutants
for mutant in sorted_mutants:
mutant.RISCC_print_detail(OUTPUT, max_distance)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def save_output(pris):\n pris.to_csv('reactors_pris_2016.csv',\n index=False,\n sep=',',\n )",
"def write_rcm(self,rcm_filename):\n \n if(self.buildingsAdded != True):\n self.run_nc.add_building_output_locations({}, 0, 0,0) #Set building locations # 0 in NETCDF file\n \n if(self.keyPointsAdded != True):\n self.run_nc.add_key_points_output_locations([], 0, 0, 0, 0) #Set key points to 0 in netcdf file\n \n self.ricom.write_rcm(rcm_filename)\n #self.run_nc.close()",
"def writeCC(self, fileName, allSCC):\n f = open(fileName,'w')\n\n for compNumber in range(0,len(allSCC)):\n f.write(\"Component number %s: \" % (compNumber))\n f.write(\"%s\\n\" % (str(allSCC[compNumber])))\n f.close()",
"def write_rcm(self, rcmFilename = \"tsunami.rcm\"):\n \n outfile = open(rcmFilename,\"w\")\n outfile.write(\"%s\\n%s\\n\" % (self.title,self.gridfilename))\n \n outfile.write(\"%s\\n\" % (self.run_filename))\n\n outfile.write(\"%s %s %s %s %s nprt,nsbc,nitn,isolve,nson\\n\" % \n (self.nprt,self.nsbc,self.nitn,self.isolve,self.nson))\n outfile.write(\"%s %s %s %s %s %s maxiter, maxNMiter, ifill, itopt, epsi, epsiNM\\n\" % \n (self.maxiter,self.maxNMiter,self.ifill,self.itopt,self.epsi,self.epsiNM))\n \n outfile.write(\"%s %s %s %s omega0, elev, depmin, zminq\\n\" % (self.omega0, self.elev, self.depmin, self.zminq))\n outfile.write(\"%s %s %s %s %s %s %s iomega,Tload,eqfac,nload,neqtide,fn,nu0\\n\" %\n (self.iomega, self.tload, self.eqfac, self.nload, self.neqtide, self.fn, self.nu0))\n \n outfile.write(\"%s %s %s %s %s icoord,lat0,long0,latoff,longoff\\n\" % \n (self.icoord, self.lat0,self.long0, self.latoff, self.longoff))\n \n outfile.write(\"%s %s %s %s delt, tmax, tscale, gamma0\\n\" % (self.delt, self.tmax, self.tscale, self.gamma0))\n \n #--------------------Write FRICTION TYPES-------------------------\n if self.ntype != len(self.friction):\n self.ntype = len(self.friction)\n \n outfile.write(\"%s ntype\\n\" % self.ntype)\n if self.ntype > 0: #write friction types in .rcm file\n for f in self.friction:\n outfile.write(\"%s %s %s %s %s %s %s %s\\n\" % (f[0], f[1], f[2], f[3], f[4], f[5], f[6], f[7])) \n \n \n \n #elif self.friction.ntype < 0: #write friction types in external file\n # self.friction.write_friction_file(self.run_dir + \"/\" + self.frictionFile)\n # outfile.write(self.frictionFile + \"\\n\")\n #------------------------------------------------------------------\n\n \n outfile.write(\"%s %s %s npvx, izcoord, izgrid\\n\" % (self.npvx, self.izcoord, self.izgrid))\n outfile.write(\"%s %s %s %s %s %s nbx, ibcfile, ncon, nbxfile, irampa, irampq\\n\" % \n (self.nbx, self.ibcfile, self.ncon, self.nbxfile, self.irampa, self.irampq))\n\n outfile.write(\"%s iwind\\n%s nsed\\n%s nsol\\n\" %\n (self.iwind, self.nsed, self.nsol))\n \n outfile.write(\"%s %s %s %s %s %s %s ifr, itn, iwn, ivfr, ivsf, ihfr, ifdrag\\n\" %\n (self.ifr, self.itn, self.iwn, self.ivfr, self.ivsf, self.ihfr, self.ifdrag))\n \n #--------------------------OUTPUT OPTIONS-------------------------------\n\n outfile.write(\"%s %s irst, irstout\\n\" % (self.irst, self.irstout)) #set up intial conditions and secify restart\n outfile.write(\"%s %s %s nopt noptstart nskip\\n\" % (self.nopt, self.noptstart, self.nskip))\n if self.nopt > 0:\n outfile.write(\"%s\\n\" % self.outputFileFull)\n \n #output ETA at specified elements\n outfile.write(\"%s %s %s ntsdata, ntsskip, jPprofle\\n\" % (self.ntsdata, self.ntsskip, self.jPprofile))\n if self.ntsdata > 0: #element numbers\n for ele in self.outputElementsETA:\n outfile.write(\"%s\" % ele)\n outfile.write(\"\\n\") \n\n #output ETA at specified elements\n outfile.write(\"%s %s %s ntsUdata, ntsUskip, jUPprofle\\n\" % (self.ntsUdata, self.ntsUskip, self.jUprofile))\n if self.ntsdata > 0: #element numbers\n for ele in self.outputElementsVEL:\n outfile.write(\"%s\" % ele)\n outfile.write(\"\\n\") \n \n \n #output ETA at specified elements\n outfile.write(\"%s %s %s ntsCdata, ntsCskip, jCPprofle\\n\" % (self.ntsCdata, self.ntsCskip, self.jCprofile))\n if self.ntsdata > 0: #element numbers\n for ele in self.outputElementsC:\n outfile.write(\"%s \" % ele)\n outfile.write(\"\\n\") \n \n \n outfile.close() \n \n #-----------------------------------------------------------------------",
"def save_data (mdp):\n for num,key in zip(mdp.corr_num,mdp.key):\n lnum = find_corr(mdp,int(num)) # get the line number of the correlator, if possible\n if lnum > -1:\n cdat = extract_data(mdp,lnum) # found the correlator, save to array\n try: # write it to file\n ## -- organizing is too slow, just write to end of file\n mdp.save_file.seek(0,2) # seek the end of file\n mdp.save_file.write( key + ' ' + \\\n ' '.join('{:e}'.format(cdat[x]) for x in range(0,mdp.corr_len))+'\\n')\n #lsec = uf.find_data_section(mdp.save_file,key)\n #mdp.save_file.write( key + ' ' + ' '.\\\n #uf.ins_line(mdp.save_file, key + ' ' + ' '.\\\n # join('{:e}'.format(cdat[x]) for x in range(0,mdp.corr_len))\\\n # , lsec[1]+1\\\n # )\n #write_fname(mdp,lsec[0])\n except IndexError:\n print \"-- In file\",mdp.corr_file.name\n print \"Could not extract data from file\"\n else:\n print \"-- In file\",mdp.corr_file.name\n print \"Failed to find correlator #\",num",
"def to_file(self, file_path, smirnoff_data):\n pass",
"def write_output(self) -> None:\n self.home.round(2).to_csv(var.indicators_base_cumsum + \"home_\" + str(self.year) + \".csv\")\n self.away.round(2).to_csv(var.indicators_base_cumsum + \"away_\" + str(self.year) + \".csv\")",
"def write(self, f):\n if self.best_mhc_align:\n mhc_align_str = self.best_mhc_align.subject_str()\n mhc_score_str = str(self.best_mhc_align.bit_score)\n else:\n mhc_align_str = \".\"\n mhc_score_str = \"0\"\n\n if self.best_non_mhc_align:\n non_mhc_align_str = self.best_non_mhc_align.subject_str()\n non_mhc_score_str = str(self.best_non_mhc_align.bit_score)\n else:\n non_mhc_align_str = \".\"\n non_mhc_score_str = \"0\"\n \n f.write(\"\\t\".join([self.locus, self.short_samp_id, self.name,\n str(self.length), mhc_align_str, non_mhc_align_str,\n mhc_score_str, non_mhc_score_str,\n str(self.n_mhc_align), str(self.n_non_mhc_align)]) + \"\\n\")",
"def write_file(self,f=None):\n nrow, ncol, nlay, nper = self.parent.nrow_ncol_nlay_nper\n # Open file for writing\n if f is None:\n f = open(self.fn_path, 'w')\n # First line: heading\n f.write('{}\\n'.format(self.heading))\n # write dataset 1\n f.write('{} {} {} {} {} {} {}\\n'.format(self.ipakcb, self.iswtoc,\n self.nsystm, self.ithk,\n self.ivoid, self.istpcs,\n self.icrcc))\n # write dataset 2\n t = self.lnwt.array\n for tt in t:\n f.write('{} '.format(tt + 1))\n f.write('\\n')\n\n # write dataset 3\n f.write(\n '{} {} {} {} {} {} {} {} {} {}\\n'.format(self.izcfl, self.izcfm,\n self.iglfl, self.iglfm,\n self.iestfl, self.iestfm,\n self.ipcsfl, self.ipcsfm,\n self.istfl, self.istfm))\n\n # write dataset 4\n f.write(self.gl0.get_file_entry())\n\n # write dataset 5\n f.write(self.sgm.get_file_entry())\n\n # write dataset 6\n f.write(self.sgs.get_file_entry())\n\n # write datasets 7 to 13\n for k in range(self.nsystm):\n f.write(self.thick[k].get_file_entry())\n if self.icrcc != 0:\n f.write(self.sse[k].get_file_entry())\n f.write(self.ssv[k].get_file_entry())\n else:\n f.write(self.cr[k].get_file_entry())\n f.write(self.cc[k].get_file_entry())\n f.write(self.void[k].get_file_entry())\n f.write(self.sub[k].get_file_entry())\n\n # write datasets 14 and 15\n for k in range(nlay):\n if self.istpcs != 0:\n f.write(self.pcsoff[k].get_file_entry())\n else:\n f.write(self.pcs[k].get_file_entry())\n\n # write dataset 16 and 17\n if self.iswtoc > 0:\n # dataset 16\n for i in self.ids16:\n f.write('{} '.format(i))\n f.write(' #dataset 16\\n')\n\n # dataset 17\n for k in range(self.iswtoc):\n t = self.ids17[k, :].copy()\n t[0:4] += 1\n for i in t:\n f.write('{} '.format(i))\n f.write(' #dataset 17 iswtoc {}\\n'.format(k + 1))\n\n # close swt file\n f.close()",
"def write_stats(self):\n with open(self.log_file,'a') as output:\n writer = csv.writer(output)\n n_comps,comp_size = self.connected_component() # Calculate number of connected components (sub-colonies)\n writer.writerow([self.pop_size,\n self.get_average_age(),\n self.get_average_survival(),\n # Nearest neighbor logging disabled for speed\n # Use c++ tool to calculate nearest neighbors after runs\n # or uncomment line below to calculate in python (slower)\n # self.get_average_repro()] + [self.get_average_neighbors(r) for r in range(0,16)] +\n self.get_average_repro()] +\n [n_comps,\",\".join(map(str,comp_size))])",
"def to_file(c, path, seq_types=None):\n with open(path, \"w\") as f:\n f.write(circuit_to_verilog(c, seq_types))",
"def writeOut(self):\n # import time\n self.outHeader = self.srcHeader\n for line in self.outHeader:\n self.outFile.write(line + '\\n')\n # now = time.asctime(time.localtime(time.time()))\n # self.outFile.write('%% -- %s -- Written to new alog' % now)\n for time_s in sorted(self.outData):\n for sens in self.outData[time_s]:\n for meas in self.outData[time_s][sens]:\n valu = self.outData[time_s][sens][meas]\n msg_list = [str(time_s), meas, sens, str(valu)]\n line_string = reconstructLine(msg_list)\n self.outFile.write(line_string + '\\n')",
"def write_stats(self, filestream):\n if not self.summary:\n self.summarize()\n\n print(self.scores, file=filestream)",
"def write(self, filename=None):\n if filename == None:\n filename = self.ofilename\n\n ofile = open(filename, 'w')\n\n ofile.write('# Susceptibility: %E d(susc): %E Coercivity: %E d(coer): %E\\n' % (self.susceptibility_mean, self.susceptibility_std, self.coercivity_mean, self.coercivity_std) )\n ofile.write('# H[] M[] Mfit[]\\n')\n\n #for i in range(len(self.h)):\n # ofile.write(\" %12.10f %12.10f %12.10f\\n\" % ( self.h[i], self.m[i], self.m_fit[i] ) )\n\n ofile.close()",
"def write_seq_cs(protein_stats, seq_cs_file):\n\n with open(seq_cs_file, 'w+') as fid:\n writer = csv.writer(fid, delimiter=',',\n quotechar='\"',\n quoting=csv.QUOTE_NONNUMERIC)\n stat_columns = ['mode', 'avg', 'std', 'count', 'piqc']\n for seq in protein_stats:\n stats = [round(protein_stats[seq][x], 2) for x in stat_columns]\n writer.writerow([seq[0], seq[1]] + stats)",
"def Writefile(self, outfile, verbose=True):\n \n self.outfile = outfile\n \n # Write SUNTANS grid to file\n nc = Dataset(outfile, 'w', format='NETCDF3_CLASSIC')\n nc.Description = 'SUNTANS subsetted history file'\n nc.Author = ''\n nc.Created = datetime.now().isoformat()\n nc.type = 'SUNTANS HIS file'\n #pdb.set_trace()\n nc.createDimension('Nc', self.Nc)\n nc.createDimension('Np', self.Np)\n nc.createDimension('Ne', self.Ne)\n nc.createDimension('Nk', self.Nk)\n nc.createDimension('numsides', self.numsides)\n \n nc.createDimension('time', None)\n \n def write_nc_var(var, name, dimensions, units=None):\n nc.createVariable(name, 'f8', dimensions)\n if units is not None:\n nc.variables[name].units = units\n nc.variables[name][:] = var\n if verbose:\n print ' ... wrote ', name\n \n def create_nc_var(name, dimensions, units=None):\n nc.createVariable(name, 'f8', dimensions)\n if units is not None:\n nc.variables[name].units = units\n if verbose:\n print ' ... wrote ', name\n \n # Grid variables\n write_nc_var(self.xv, 'xv', ('Nc'))\n write_nc_var(self.yv, 'yv', ('Nc'))\n write_nc_var(self.xp, 'xp', ('Np'))\n write_nc_var(self.yp, 'yp', ('Np'))\n write_nc_var(self.xe, 'xe', ('Ne'))\n write_nc_var(self.ye, 'ye', ('Ne'))\n write_nc_var(self.dz, 'dz', ('Nk'))\n write_nc_var(self.dv, 'dv', ('Nc'))\n write_nc_var(self.Ac, 'Ac', ('Nc'))\n write_nc_var(self.Nk, 'Nk', ('Nc'))\n write_nc_var(self.face, 'face', ('Nc','numsides'))\n write_nc_var(self.mark, 'mark', ('Ne'))\n write_nc_var(self.cells, 'cells', ('Nc','numsides'))\n \n \n # Create the data variables\n create_nc_var('time',('time'),'seconds since 1990-01-01 00:00:00')\n create_nc_var('salt',('time','Nk','Nc'),'psu')\n create_nc_var('temp',('time','Nk','Nc'),'degrees C')\n create_nc_var('uc',('time','Nk','Nc'),'meter second-1')\n create_nc_var('vc',('time','Nk','Nc'),'meter second-1')\n create_nc_var('nu_v',('time','Nk','Nc'),'m2 s-1')\n create_nc_var('rho',('time','Nk','Nc'),'kg m-3')\n create_nc_var('tau_x',('time','Nc'),'N m-2')\n create_nc_var('tau_y',('time','Nc'),'N m-2')\n create_nc_var('eta',('time','Nc'),'m')\n \n nc.close()",
"def write(self, outfile, rebasings=None):\r\n raise NotImplementedError()",
"def save(self,fout):\n\n # only process 0 should save\n if COMM_WORLD.rank == 0:\n\n # The file format is:\n # L,nterms,masks,signs,coefficients\n # where each is just a binary blob, one after the other.\n\n # do this first so that we haven't already created the file if\n # it fails for some reason\n msc = self.get_MSC()\n\n with open(fout,mode='wb') as f:\n\n # write the chain length to the file. This is the only parameter\n # that we save other than the MSC representation.\n L = self.L\n if L is None:\n raise ValueError('L must be set before saving to disk.')\n\n # cast it to the type that C will be looking for\n int_t = msc.dtype[0].type\n L = int_t(L)\n\n f.write(L.tobytes())\n\n # write out the length of the MSC representation\n size = int_t(msc.size)\n f.write(size.tobytes())\n\n f.write(msc['masks'].tobytes())\n f.write(msc['signs'].tobytes())\n f.write(msc['coeffs'].tobytes())\n\n COMM_WORLD.barrier()",
"def write_data():",
"def write_output_summary(outfile, read_scores, args):\n\theader = ['sim_info_file', 'sim_sam_file', 'analysis_info_file', 'results_file', 'junc_type', 'score_type', \n\t\t\t 'true_positives', 'true_negatives', 'false_positives', 'false_negatives']\n\t\t\t \n\tfilenames = [args.sim_info, args.sim_sam, args.analysis_info, args.output]\n\ttypes = ['tp', 'tn', 'fp', 'fn']\n\t\t\t \n\twith open(args.output_summary, \"w\") as outfile:\n\t\toutfile.write(\"\\t\".join(header) + \"\\n\")\n\t\t\n\t\tfor score_type in read_scores:\n\t\t\tfor junc_type in read_scores[score_type]:\n\t\t\t\tif junc_type == 'discord':\n\t\t\t\t\tscores = [str(read_scores[score_type][junc_type][type]/2) for type in types]\n\t\t\t\telse:\n\t\t\t\t\tscores = [str(read_scores[score_type][junc_type][type]) for type in types]\n\t\t\t\tline = filenames + [junc_type, score_type] + scores\n\t\t\t\toutfile.write(\"\\t\".join(line) + \"\\n\")",
"def write_info_to_file(self):\n\n self.info.write_mission_info()\n\n self.logger.info(\"Mission instance write succeeded.\")",
"def write_data():\n\n data_location = os.path.realpath(os.path.join(os.path.dirname(__file__), \"..\", DATA_DIR))\n\n sbi_file_name = os.path.join(data_location, SBI_FILE)\n\n sbi = SbiInfo(sbi_file_name)\n\n # the test file is stored in the same directory as the script\n test_file = os.path.splitext(os.path.join(os.path.dirname(__file__), SBI_FILE))[0] + \".pkl\"\n _logger.info(\"Writing header object to {}\".format(os.path.join(os.path.dirname(__file__),\n test_file)))\n sbi.data.to_pickle(test_file)",
"def write_output(basis, filename):\n\n logging.info('Writing output to {}'.format(filename))\n\n basis.to_csv(filename)",
"def write_cn_cards(bc_file, bc_class):\n cn = bc_class.constituent_properties\n bc_file.write('! Constituent Properties\\n')\n if not cn.general_constituents.empty:\n # bc_file.write(cn.general_constituents.to_csv(sep=' ', index=False, header=False).replace('\\r\\n', '\\n'))\n for index, row in bc_class.constituent_properties.general_constituents.iterrows():\n bc_file.write(\n 'CN CON {} {}\\n'.format(row['ID'].astype('int'), row['CONC']))\n if not cn.sand.empty:\n # bc_file.write(cn.sand.to_csv(sep=' ', index=False, header=False).replace('\\r\\n', '\\n'))\n for index, row in bc_class.constituent_properties.sand.iterrows():\n bc_file.write(\n 'CN SND {} {} {} {} {}\\n'.format(row['ID'].astype('int'), *row[['C_0', 'C_1', 'C_2', 'C_3']].values))\n if not cn.clay.empty:\n # bc_file.write(cn.clay.to_csv(sep=' ', index=False, header=False).replace('\\r\\n', '\\n'))\n for index, row in bc_class.constituent_properties.clay.iterrows():\n bc_file.write(\n 'CN CLA {} {} {} {} {}\\n'.format(row['ID'].astype('int'), *row[['C_0', 'C_1', 'C_2', 'C_3']].values))\n if cn.salinity:\n bc_file.write('CN SAL {} {}\\n'.format(cn.salinity_id, cn.reference_concentration))\n if cn.temperature:\n bc_file.write('CN TMP {} {}\\n'.format(cn.temperature_id, cn.reference_temperature))\n if cn.vorticity:\n bc_file.write('CN VOR {} {} {} {}\\n'.format(cn.vorticity_id, cn.vorticity_normalization,\n cn.vorticity_as_term, cn.vorticity_ds_term))\n\n bc_file.write('\\n') # blank line at the end of the Constituent Properties",
"def write(self, FN, crd, title='', append=False, \\\n multiplier=None, trajectory=False):\n if (append and os.path.isfile(FN)):\n if FN.endswith('.gz'):\n import gzip\n F = gzip.open(FN, 'a')\n else:\n F = open(FN, 'a')\n else:\n if os.path.isfile(FN):\n os.rename(FN, FN + '.BAK')\n if FN.endswith('.gz'):\n import gzip\n F = gzip.open(FN, 'w')\n else:\n F = open(FN, 'w')\n # Write the header\n F.write(title + '\\n') # Title\n if not trajectory:\n F.write('%d\\n' % crd.shape[0])\n\n if not trajectory:\n flattened = np.vstack(crd).flatten()\n if multiplier is not None:\n flattened = multiplier * flattened\n for n in range(0, len(flattened), 6):\n F.write(''.join(['%12.7f' % val for val in flattened[n:n + 6]]) + '\\n')\n else:\n for c in crd:\n flattened = c.flatten()\n if multiplier is not None:\n flattened = multiplier * flattened\n for n in range(0, len(flattened), 10):\n F.write(''.join(['%8.3f' % val\n for val in flattened[n:n + 10]]) + '\\n')\n\n F.close()",
"def writeReport(sourceFile, sub, scan, ic, stats, fout, delim='\\t'):\n data = [ic, sourceFile, sub, scan] + stats.split()\n fout.write(delim.join(data) + '\\n')",
"def write_results_dat(self, output_path):\n\n def fstr(nb):\n data = '%E' % nb\n if data == 'NAN':\n nb, power = 0,0\n else:\n nb, power = data.split('E')\n nb = float(nb) /10\n power = int(power) + 1\n return '%.5fE%+03i' %(nb,power)\n\n line = '%s %s %s %i %i %i %i %s %s %s %s %s %i\\n' % (fstr(self.axsec), fstr(self.xerru), \n fstr(self.xerrc), self.nevents, self.nw, self.maxit, self.nunwgt,\n fstr(self.luminosity), fstr(self.wgt), fstr(self.xsec), fstr(self.maxwgt),\n fstr(self.th_maxwgt), self.th_nunwgt) \n fsock = open(output_path,'w') \n fsock.writelines(line)\n for i in range(len(self.ysec_iter)):\n line = '%s %s %s %s %s %s\\n' % (i+1, self.ysec_iter[i], self.yerr_iter[i], \n self.eff_iter[i], self.maxwgt_iter[i], self.yasec_iter[i]) \n fsock.writelines(line)",
"def writeChronListToFile(self):\n ## write header\n for header_line in self.outData['header']:\n self.outFile.write(header_line + '\\n')\n ##loop through each msg list\n for msg_list in self.outData_temp:\n ## create line\n msg_line = reconstructLine(msg_list)\n ## write to file\n self.outFile.write(msg_line + '\\n')",
"def write_cs_stats(cs_stats, cs_stats_file):\n with open(cs_stats_file, 'w+') as fid:\n writer = csv.writer(\n fid, delimiter=',', quotechar='\"', quoting=csv.QUOTE_NONNUMERIC)\n stat_columns = ['mode', 'avg', 'std', 'min95', 'max95']\n for corr in cs_stats:\n atom_type = [corr.aa, corr.atoms[0].strip(), corr.ss]\n stats = [round(cs_stats[corr][x], 2) for x in stat_columns]\n writer.writerow(atom_type + stats)",
"def write_file(self):\n\n # -open file for writing\n f = open(self.fn_path, 'w')\n\n # header not supported\n # # -write header\n # f.write('{}\\n'.format(self.heading))\n\n # dataset 1 - WEL1flag QSUMflag SYNDflag\n line = '{:10d}'.format(self.wel1flag)\n line += '{:10d}'.format(self.qsumflag)\n line += '{:10d}\\n'.format(self.byndflag)\n f.write(line)\n\n # dataset 2 - MNWOBS\n f.write('{:10d}\\n'.format(self.mnwobs))\n\n # dataset 3 - WELLID UNIT QNDflag QBHflag {CONCflag}\n # (Repeat MNWOBS times)\n nitems = len(self.wellid_unit_qndflag_qhbflag_concflag[0])\n for i, t in enumerate(self.wellid_unit_qndflag_qhbflag_concflag):\n wellid = t[0]\n unit = t[1]\n qndflag = t[2]\n qhbflag = t[3]\n assert qndflag >= 0, 'QNDflag must be greater than or equal to zero.'\n assert qhbflag >= 0, 'QHBflag must be greater than or equal to zero.'\n line = '{:20s} '.format(wellid)\n line += '{:5d} '.format(unit)\n line += '{:5d} '.format(qndflag)\n line += '{:5d} '.format(qhbflag)\n if nitems == 5:\n concflag = t[4]\n assert 0 <= concflag <= 3, \\\n 'CONCflag must be an integer between 0 and 3.'\n assert isinstance(concflag, int), \\\n 'CONCflag must be an integer between 0 and 3.'\n line += '{:5d} '.format(concflag)\n line += '\\n'\n f.write(line)\n\n f.close()"
] |
[
"0.5820573",
"0.5751511",
"0.57284915",
"0.56944907",
"0.5619381",
"0.56188095",
"0.55729157",
"0.55714667",
"0.55704516",
"0.55553466",
"0.5552754",
"0.55219156",
"0.5521283",
"0.5508681",
"0.5494961",
"0.54939115",
"0.548014",
"0.5462899",
"0.5447348",
"0.5435847",
"0.54278606",
"0.5404955",
"0.54029274",
"0.53970605",
"0.53804106",
"0.5370587",
"0.5348203",
"0.53421617",
"0.533256",
"0.5307868"
] |
0.63001347
|
0
|
Read mutant input file, return as new dataset (.pickle format only).
|
def read_mutant_file(infile):
return unpickle(infile)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def read_data_set():\n # shapes of datasets -- [] means expanded form:\n # - X: J\n # - net.R: J [x J x 1]\n # - F_DIST: J x J x num_features\n # - F_DIST_w1: J x J x num_features\n # - w['except_first'][-1]: (last weights) J x num_features [x 1]\n # - w['except_first'][1:-1]: (second to last weights) J x J x num_features\n # - first weights **were** also J x J x num_features\n # - w['first_for_r']: J x 1 x num_features\n\n read_X()\n read_weights(read_FDIST())",
"def _read_dataset(self, dataset_path):\n dataset = pd.read_pickle(dataset_path)\n return dataset",
"def _read_data(self) -> MMD:\n\t\tif self.config.source_type == SourceType.LOCAL_FILE:\n\t\t\treturn self._read_files()\n\t\telif self.config.source_type == SourceType.HDFS:\n\t\t\treturn self._read_hdfs()\n\t\telif self.config.source_type == SourceType.NEO4J:\n\t\t\treturn self._read_neo4j(self.config.graph_db)\n\n\t\telse:\n\t\t\traise NotImplementedError(\"The source type {} has not been implemented yet.\".format(loader_config.source_type))",
"def read_data_set(self, path):\n data = self._preprocessor.create_empty_input_target_data()\n\n for filename in glob.iglob(os.path.join(path, \"*\" + NoiseReader.file_extension())):\n exp_name = os.path.splitext(os.path.basename(filename))[0]\n\n experiment = self.read_experiment(path, exp_name)\n data = self._preprocessor.concat_input_target_data(data, experiment)\n\n return data",
"def load_and_pickle_mnist():\n\n if os.path.exists(pickle_file):\n print(\"Pickle file found! Unpickling...\")\n with open(pickle_file, \"rb\") as pf:\n mnist = pickle.load(pf)\n else:\n mnist = read_data_sets(data_dir, one_hot=True)\n\n with open(pickle_file, \"wb\") as pf:\n pickle.dump(mnist, pf, pickle.HIGHEST_PROTOCOL)\n\n # Remove .gz files from the mnist download.\n for ptr in glob.glob(os.path.join(data_dir, \"*.gz\")):\n os.remove(ptr)\n\n return mnist",
"def input_data(self):\n return read_file(self.file_path)",
"def inputfile(fileName):\n with open(fileName, \"rb\") as file:\n data = pickle.load(file)\n return data",
"def unpickle_mnist(filename):\n\n # Unpickle files (uses latin switch for py2.x to py3.x compatibility)\n if sys.version_info[0] < 3:\n train, valid, test = pickle.load(open(filename, \"rb\"))\n else:\n train, valid, test = pickle.load(open(filename, \"rb\"), encoding=\"latin1\")\n X_train, y_train = map(torch.from_numpy, train)\n X_valid, y_valid = map(torch.from_numpy, valid)\n X_test, y_test = map(torch.from_numpy, test)\n\n # Convert to tensors\n train_data = TensorDataset(X_train, y_train)\n valid_data = TensorDataset(X_valid, y_valid)\n test_data = TensorDataset(X_test, y_test)\n\n return train_data, valid_data, test_data",
"def load_testset(filename):\n pickle_name = filename + \".pickle\"\n try:\n print(\"trying to load \" + filename + \" from pickle\")\n dataset = pickle.load(open(pickle_name, \"rb\"))\n except:\n with open(filename, 'r') as csv_file:\n print(\"no pickle exists. parsing file \" + filename)\n dataset = [DataPoint(item[0:], \" \")\n for item\n in csv.reader(csv_file, delimiter=',')]\n pickle.dump(dataset, open(pickle_name, \"wb\"))\n print(\"loaded \" + filename)\n return dataset",
"def _read_pkl(self, input_file):\n data = pickle.load(open(input_file, 'rb'))\n return data",
"def deserialize(self):\n with open(self.path+self.name, \"rb\") as pfile:\n dataSet = pickle.load(pfile)\n return dataSet",
"def read(train_path, test_path, label_name):\n train_dataset = pd.read_csv(train_path)\n test_dataset = pd.read_csv(test_path)\n\n train_labels = train_dataset.pop(label_name)\n\n imputer = DataFrameImputer().fit(train_dataset)\n train_dataset = imputer.transform(train_dataset)\n test_dataset = imputer.transform(test_dataset)\n\n train_dataset = pd.get_dummies(train_dataset)\n test_dataset = pd.get_dummies(test_dataset)\n\n train_dataset = train_dataset.drop(train_dataset.columns.difference(test_dataset.columns), axis=1)\n test_dataset = test_dataset.drop(test_dataset.columns.difference(train_dataset.columns), axis=1)\n\n scaler = StandardScaler().fit(train_dataset)\n train_dataset = scaler.transform(train_dataset)\n test_dataset = scaler.transform(test_dataset)\n\n return train_dataset, train_labels, test_dataset",
"def load_data_pickle(PATH, dataset, filename):\n with open(PATH + '/' + dataset + \"_\" + filename + \".pkl\",\"rb\") as f:\n new_data = pickle.load(f)\n\n # print(filename, \"opened\")\n return new_data",
"def readMNISTData():\n mnist = input_data.read_data_sets(\"MNIST_data\",one_hot=True) \n return mnist",
"def read_data(feature_file, label_file):",
"def read_data(cls, input_file,quotechar = None):\n if 'pkl' in str(input_file):\n lines = load_pickle(input_file)\n else:\n lines = input_file\n return lines",
"def read(self):\n\n if self.filename.endswith(\".pkl\"):\n logging.debug(\"Loading pickle file %s\", self.filename)\n data = pd.read_pickle(self.filename)\n\n elif self.filename.endswith(\".hdf5\"):\n logging.debug(\"Loading HDF5 file %s\", self.filename)\n with h5py.File(self.filename, \"r\") as data_file:\n\n data = pd.DataFrame(\n {\n column: data_file[\"/haloTrees/%s\" % column].value\n for column in self.columns\n }\n ).set_index(\"nodeIndex\")\n\n # with open(\"./data/cache.pkl\", \"w\") as pickle_file:\n # data.to_pickle(pickle_file)\n\n else:\n raise TypeError(\"Unknown filetype %s\" % self.filename)\n\n return data",
"def load_data_pickle(self, load_full=False):\n self.train = pd.read_pickle('../input/train_mod.pkl')\n self.test = pd.read_pickle('../input/test_mod.pkl')\n if load_full:\n self.train_full = pd.read_pickle('../input/train_full_mod.pkl')",
"def make(input_filepath, output_filepath) -> None:\n logger = logging.getLogger(__name__)\n logger.info('making final data set from raw data')",
"def read_and_save_raw_data(dataPath, filename):\n # Read in Raw data\n datafileNm = os.path.join(dataPath, 'nyt-ingredients-snapshot-2015.csv')\n nytData = pd.read_csv(datafileNm, index_col=None)\n nytData.drop(columns='index', inplace=True)\n\n # Generate training data from NY Times Ingredient Tagging Model\n cleaned_dat = reshape_data(Cli(nytData).df)\n cleaned_dat.to_pickle(os.path.join(dataPath, filename))",
"def load_data_set_from_pickle(file_name=None):\n if not file_name:\n try:\n file_name = max(glob.glob(os.path.join(__pickled_data_directory__, '*.chars74k-lite.gz')), key=os.path.getctime)\n except ValueError as e:\n log.error('Unable to load data set from file since no pickled files could be found, ')\n return None\n\n log.debug('Loading data set from file: %s' % file_name)\n return unpickle_data(file_name)",
"def read_info(self, dataset_name=None, strict=False):\n if dataset_name is None: return self\n else: raise MutantError(\"This is NOT a multi-dataset mutant - cannot provide dataset_name arg!\")",
"def load_data():\n data = gzip.open(\"mnist.pkl.gz\", \"rb\")\n train_set, valid_set, test_set = cPickle.load(data)\n data.close()\n\n # Combine validation and train folds to recreate the master 60k set.\n new_images = numpy.concatenate((train_set[0], valid_set[0]))\n new_labels = numpy.concatenate((train_set[1], valid_set[1]))\n\n train_set = (new_images, new_labels)\n \n return (train_set, test_set)",
"def readdata(self, fname):\n\t\treturn self.__readtsv(fname)",
"def get_local_dataset(\n self, \n file_name: str\n ):\n pd.read_csv(file_name)\n #save",
"def main(input_filepath, output_filepath):\n logger = logging.getLogger(__name__)\n logger.info('making final data set from raw data')",
"def main(input_filepath, output_filepath):\n logger = logging.getLogger(__name__)\n logger.info('making final data set from raw data')",
"def read_data(cls, input_file, quotechar=None):\r\n if 'pkl' in str(input_file):\r\n lines = load_pickle(input_file)\r\n else:\r\n lines = input_file\r\n return lines",
"def _read_input_file(self):\n pass",
"def load_data():\n f = gzip.open('../data/mnist.pkl.gz', mode='rb')\n\n # NOTE: I get errors when I don't use encoding='latin1' because of Python 2 vs Python 3 compatibility issues\n # training_data, validation_data, test_data = pickle.load(f, encoding='latin1')\n training_data, validation_data, test_data = pickle.load(f)\n\n f.close()\n\n return training_data, validation_data, test_data"
] |
[
"0.6097754",
"0.59648234",
"0.59588504",
"0.58907497",
"0.5883596",
"0.5860291",
"0.5846655",
"0.5843215",
"0.57745373",
"0.57522935",
"0.57493174",
"0.5684077",
"0.5683538",
"0.5667257",
"0.5662685",
"0.56449974",
"0.5636096",
"0.5633263",
"0.56238824",
"0.5590493",
"0.55816096",
"0.5557574",
"0.5557163",
"0.55286855",
"0.55182374",
"0.55167466",
"0.55167466",
"0.55059433",
"0.549098",
"0.54742616"
] |
0.7560404
|
0
|
Check that the function returns the right SPECIAL_POSITIONS object when given an unaligned HTSeq aln.
|
def _check_unaligned_alns(self, aln_parse_function, *extra_args):
fake_aln_unaligned_1 = Fake_HTSeq_aln('AAA', 'name', unaligned=True, optional_field_data={'XM':1})
fake_aln_unaligned_2 = Fake_HTSeq_aln('AAA', 'name', unaligned=True, optional_field_data={})
fake_aln_multi_aligned_1 = Fake_HTSeq_aln('AAA', 'name', unaligned=True, optional_field_data={'XM':2})
fake_aln_multi_aligned_2 = Fake_HTSeq_aln('AAA', 'name', unaligned=True, optional_field_data={'XM':20})
assert aln_parse_function(fake_aln_unaligned_1, *extra_args) == SPECIAL_POSITIONS.unaligned
assert aln_parse_function(fake_aln_unaligned_2, *extra_args) == SPECIAL_POSITIONS.unaligned
assert aln_parse_function(fake_aln_multi_aligned_1, *extra_args) == SPECIAL_POSITIONS.multi_aligned
assert aln_parse_function(fake_aln_multi_aligned_2, *extra_args) == SPECIAL_POSITIONS.multi_aligned
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def test_align_unaligned_seqs(self):\n res = align_unaligned_seqs(self.seqs1_fp, RNA)\n self.assertEqual(res.toFasta(), self.seqs1_aln)",
"def test_get_align_coords(self):\n # 01234 5\n # ACGGT--A\n # 012345\n # --GGTTTA\n m1, seq1 = DNA.make_seq(\"ACGGT--A\").parse_out_gaps()\n m2, seq2 = DNA.make_seq(\"--GGTTTA\").parse_out_gaps()\n path = get_align_coords(m1, m2)\n expect = [2, 4, None, 5, 5], [0, 2, None, 5, 5]\n self.assertEqual(path.get_coords(), expect)\n\n # we have no gaps, so coords will be None\n m1, s1 = seq1.parse_out_gaps()\n m2, s2 = seq2.parse_out_gaps()\n path = get_align_coords(m1, m2)\n self.assertEqual(path.get_coords(), ([], []))\n\n # unless we indicate the seqs came from an Alignment\n m1, seq1 = DNA.make_seq(\"ACGGTTTA\").parse_out_gaps()\n m2, seq2 = DNA.make_seq(\"GGGGTTTA\").parse_out_gaps()\n paths = get_align_coords(m1, m2, aligned=True)\n self.assertEqual(paths.get_coords(), ([0, len(seq1)], [0, len(seq1)]))\n\n # raises an exception if the Aligned seqs are different lengths\n m1, seq1 = DNA.make_seq(\"ACGGTTTA\").parse_out_gaps()\n m2, seq2 = DNA.make_seq(\"GGGGTT\").parse_out_gaps()\n with self.assertRaises(AssertionError):\n get_align_coords(m1, m2, aligned=True)",
"def test_get_subalignment_sequence_order_maintained(self):\n result = AlignedSeq.get_sub_alignment_by_list_id([\"s3\", \"s1\"], self.alignment)\n expected = MultipleSeqAlignment([self.alignment[0], self.alignment[2]])\n self.assertTrue(msas_equal(expected, result))",
"def get_corrected_index(seq,\n aligned_index):\n \n # Counts the number of nucleotides in aligned sequence, returns\n # count of nucleotides occuring before aligned index reached\n slice_seq=seq[0:aligned_index]\n # If different gap characters used, may need to modify this\n # In current form, it is optimized for speed\n corrected_index=\\\n aligned_index - (slice_seq.count(\"-\") + slice_seq.count(\".\"))\n \n\n \n return corrected_index",
"def is_aligned_dna(sequence):\r\n #ensure that the given sequence is uppercase\r\n sequence = sequence.upper()\r\n \r\n #replace all A C G and T and compare length with 0\r\n if len(sequence.replace(\"A\", \"\").replace(\"C\", \"\").replace(\"G\",\"\").replace(\"T\",\"\").replace(\"-\",\"\")) == 0:\r\n return True\r\n else:\r\n return False",
"def __init__(self,\n seq,\n aligned_index,\n unaligned_index):\n \n self.seq=seq\n self.aligned_index=aligned_index\n self.unaligned_index=unaligned_index\n self.numeric_seq=convert_to_numeric(self.seq)\n self.upstream_regions=[]\n self.downstream_regions=[]\n self.labels=[]\n self.match_count=0\n self.percent_match=0\n self.non_specific_hits=0\n self.non_specific_percent=0\n \n self.std_index = False\n self.f_std_index = None\n self.r_std_index = None",
"def test_align_idx(self):\n self.amp4.rotateAng([5, 5, 5], ang='deg')\n al = align(self.amp3, self.amp4, mv=[0, 1, 2, 3], sv=[0, 1, 2, 3], method='idxPoints')\n all(self.assertAlmostEqual(al.m.vert[i, 0], al.s.vert[i, 0], delta=0.1) for i in range(al.s.vert.shape[0]))",
"def test_is_gapped(self):\n assert not self.RNA(\"\").is_gapped()\n assert not self.RNA(\"ACGUCAGUACGUCAGNRCGAUcaguaguacYRNRYRN\").is_gapped()\n assert self.RNA(\"-\").is_gapped()\n assert self.PROT(\"--\").is_gapped()\n assert self.RNA(\"CAGUCGUACGUCAGUACGUacucauacgac-caguACUG\").is_gapped()\n assert self.RNA(\"CA--CGUAUGCA-----g\").is_gapped()\n assert self.RNA(\"CAGU-\").is_gapped()",
"def test_RNA_position_fail(self):\n \n tool = pybedtools.create_interval_from_list(\"chr1 50 60 ENSMUSG1_1_83;ENSMUSG1_6_83 0 - 10 10\".split())\n location_dict = {\"ENSMUSG1\" : {\"strand\" : \"-\", \"regions\" : [(100, 150),\n (25,50),\n ] \n }\n }\n \n self.assertEqual(RNA_position(tool, location_dict), (None, None))\n \n tool = pybedtools.create_interval_from_list(\"chr1 50 60 ENSMUSG1_1_83;ENSMUSG1_6_83 0 - 175 175\".split())\n \n self.assertEqual(RNA_position(tool, location_dict), (None, None))",
"def CheckSeq(Seq):\n OkNucleo = (\"A\", \"C\", \"G\", \"T\")\n for i in Seq:\n if i not in OkNucleo:\n raise InputError(Seq,\"malformed input\")",
"def test_align_sanity(self):\n # QWERTY resemblance matrix:\n R = qwerty_distance()\n diff, u, r = min_difference_align(\"polynomial\", \"exponential\", R)\n # Warning: we may (read: 'will') use another matrix!\n self.assertEqual(diff, 15)\n # Warning: there may be other optimal matchings!\n self.assertEqual(u, '--polyn-om-ial')\n self.assertEqual(r, 'exp-o-ne-ntial')",
"def test___get_sub_alignment_by_list_id___GivenUnorderedIds_SubalignmentStillInSequenceOrder(\n self, *uninteresting_mocks\n ):\n self.setup()\n expected = MSA([self.alignment[0], self.alignment[2]])\n actual = NodeFactory._get_sub_alignment_by_list_id(self.alignment, [\"s3\", \"s1\"])\n self.assertTrue(equal_msas(expected, actual))",
"def cleanAlign(align, badaa=None):\n return align.loc[[isvalidpeptide(s, badaa) for s in align]]",
"def test_add_seqs_to_alignment(self):\n res = add_seqs_to_alignment(self.seqs2_fp, self.seqs1_aln_fp, RNA)\n self.assertEqual(res.toFasta(), self.add_seqs_aligned)",
"def test_sequences(self):\n self.assertEqual(self.t.get_mrna(self.chrom_seq), self.transcript_seq)\n self.assertEqual(self.t.get_cds(self.chrom_seq), self.inframe_cds_seq)\n self.assertEqual(self.t.get_cds(self.chrom_seq, in_frame=False), self.cds_seq)\n self.assertEqual(self.t.get_protein_sequence(self.chrom_seq), self.amino_acid)\n self.assertEqual(self.t.get_intron_sequences(self.chrom_seq), self.introns)",
"def test_sequences(self):\n self.assertEqual(self.t.get_mrna(self.chrom_seq), self.transcript_seq)\n self.assertEqual(self.t.get_cds(self.chrom_seq), self.cds_seq)\n self.assertEqual(self.t.get_protein_sequence(self.chrom_seq), self.amino_acid)\n self.assertEqual(self.t.get_intron_sequences(self.chrom_seq), self.introns)",
"def test_sequences(self):\n self.assertEqual(self.t.get_mrna(self.chrom_seq), self.transcript_seq)\n self.assertEqual(self.t.get_cds(self.chrom_seq), self.cds_seq)\n self.assertEqual(self.t.get_protein_sequence(self.chrom_seq), self.amino_acid)\n self.assertEqual(self.t.get_intron_sequences(self.chrom_seq), self.introns)",
"def test_sequences(self):\n self.assertEqual(self.t.get_mrna(self.chrom_seq), self.transcript_seq)\n self.assertEqual(self.t.get_cds(self.chrom_seq), self.cds_seq)\n self.assertEqual(self.t.get_protein_sequence(self.chrom_seq), self.amino_acid)\n self.assertEqual(self.t.get_intron_sequences(self.chrom_seq), self.introns)",
"def test_sequences(self):\n self.assertEqual(self.t.get_mrna(self.chrom_seq), self.transcript_seq)\n self.assertEqual(self.t.get_cds(self.chrom_seq), self.cds_seq)\n self.assertEqual(self.t.get_protein_sequence(self.chrom_seq), self.amino_acid)\n self.assertEqual(self.t.get_intron_sequences(self.chrom_seq), self.introns)",
"def test_sequences(self):\n self.assertEqual(self.t.get_mrna(self.chrom_seq), self.transcript_seq)\n self.assertEqual(self.t.get_cds(self.chrom_seq), self.cds_seq)\n self.assertEqual(self.t.get_protein_sequence(self.chrom_seq), self.amino_acid)\n self.assertEqual(self.t.get_intron_sequences(self.chrom_seq), self.introns)",
"def test_sequences(self):\n self.assertEqual(self.t.get_mrna(self.chrom_seq), self.transcript_seq)\n self.assertEqual(self.t.get_cds(self.chrom_seq), self.cds_seq)\n self.assertEqual(self.t.get_protein_sequence(self.chrom_seq), self.amino_acid)\n self.assertEqual(self.t.get_intron_sequences(self.chrom_seq), self.introns)",
"def test_sequences(self):\n self.assertEqual(self.t.get_mrna(self.chrom_seq), self.transcript_seq)\n self.assertEqual(self.t.get_cds(self.chrom_seq), self.cds_seq)\n self.assertEqual(self.t.get_protein_sequence(self.chrom_seq), self.amino_acid)\n self.assertEqual(self.t.get_intron_sequences(self.chrom_seq), self.introns)",
"def test_sequences(self):\n self.assertEqual(self.t.get_mrna(self.chrom_seq), self.transcript_seq)\n self.assertEqual(self.t.get_cds(self.chrom_seq), self.cds_seq)\n self.assertEqual(self.t.get_protein_sequence(self.chrom_seq), self.amino_acid)\n self.assertEqual(self.t.get_intron_sequences(self.chrom_seq), self.introns)",
"def test_sequences(self):\n self.assertEqual(self.t.get_mrna(self.chrom_seq), self.transcript_seq)\n self.assertEqual(self.t.get_cds(self.chrom_seq), self.cds_seq)\n self.assertEqual(self.t.get_protein_sequence(self.chrom_seq), self.amino_acid)\n self.assertEqual(self.t.get_intron_sequences(self.chrom_seq), self.introns)",
"def test_align_without_gaps(self):\n aln = ArrayAlignment(\n {\"seq1\": \"ACGG\", \"seq2\": \"CGCA\", \"seq3\": \"CCG-\"}, moltype=\"dna\"\n )\n aln_plot = aln.dotplot(\"seq1\")\n self.assertNotEqual(aln_plot._aligned_coords, None)",
"def test_sequences(self):\n self.assertEqual(self.t.get_mrna(self.chrom_seq), self.transcript_seq)\n self.assertEqual(self.t.get_cds(self.chrom_seq, in_frame=False), self.cds_seq)\n self.assertEqual(self.t.get_protein_sequence(self.chrom_seq), self.amino_acid)\n self.assertEqual(self.t.get_intron_sequences(self.chrom_seq), self.introns)",
"def verify_page_alignment(toc):\n if len({len(toc_entry[2]) for toc_entry in toc}) != 1:\n return False\n return True",
"def test_alignments(self):\n # test against the correct input file\n parser = Lav(self.__correct_file)\n for alignment in parser.alignments():\n self.assertEqual(len(alignment), 7)\n for alignment in parser.alignments(gapped=False):\n self.assertEqual(len(alignment), 8)\n # test againts incorrect input files\n for lav_file in self.__incorrect_files:\n parser = Lav(os.path.join(self.__incorrect_file_dir,\n lav_file))\n with self.assertRaises(LavError):\n for alignment in parser.alignments():\n self.assertIsInstance(alignment,\n Lav.GapFreeAlignment)",
"def copy_annotations_from_unaligned(aligned_seqrec: SeqRecord, unaligned_seqrec: SeqRecord):\n # NCBI Blast id includes description, whereas alignment does not\n assert aligned_seqrec.id in unaligned_seqrec.id, f\"{aligned_seqrec.id} <> {unaligned_seqrec.id}\"\n # copy annotations from previous\n newrec = deepcopy(aligned_seqrec)\n newrec.annotations = unaligned_seqrec.annotations\n # clear any letter annotations added during deepcopy\n newrec.letter_annotations = dict()\n # original sequence and letter annotations\n seq = unaligned_seqrec.seq\n letter_annotations = unaligned_seqrec.letter_annotations\n # index to track position in original sequence\n i = 0\n for j, letter in enumerate(aligned_seqrec.seq):\n if letter in [gap_letter, stop_letter]:\n for key, values in letter_annotations.items():\n # convert strings into lists of characters,\n # then combine into string at end of loop\n if key == \"seqnums\":\n letter_annotation = None\n elif all(isinstance(value, str) for value in values):\n letter_annotation = gap_letter\n else:\n letter_annotation = None\n newrec.letter_annotations.setdefault(key, list()).append(letter_annotation)\n else:\n while seq[i] in [gap_letter, stop_letter]:\n i += 1\n assert letter == seq[i], f\"letter {letter} at {j} <> seq {seq[i]} at {i}\"\n for key in letter_annotations.keys():\n newrec.letter_annotations.setdefault(key, list()).append(letter_annotations[key][i])\n i += 1\n # convert list of chars into string\n for key, values in letter_annotations.items():\n if isinstance(values, str):\n newrec.letter_annotations[key] = \"\".join(newrec.letter_annotations[key])\n return newrec",
"def unaligned(self):\n new_alignment = Alignment()\n new_alignment.datatype = self.datatype\n for name, seq in self.items():\n new_seq = re.sub(_INDEL, '', str(seq))\n if new_seq != '':\n new_alignment[name] = new_seq\n return new_alignment"
] |
[
"0.63133895",
"0.55383784",
"0.5371149",
"0.5285918",
"0.5271198",
"0.51925683",
"0.5169525",
"0.5145871",
"0.5103414",
"0.5064759",
"0.50444007",
"0.4977112",
"0.49596253",
"0.4956812",
"0.49547723",
"0.49426582",
"0.49426582",
"0.49426582",
"0.49426582",
"0.49426582",
"0.49426582",
"0.49426582",
"0.49426582",
"0.49426582",
"0.49274868",
"0.49214298",
"0.49204168",
"0.4911957",
"0.48921326",
"0.48791412"
] |
0.68770176
|
0
|
Check basic inputs for function that takes (read_aln_or_pos, cassette_end, relative_read_direction).
|
def _check_basic_pos_inputs(self, get_pos_function):
# should raise exception for invalid argument (valid arguments: HTSeq position object or (chrom,start,end,strand) tuple
# (strand must be +/-, and start can't be after end)
for bad_flanking_region in [None, '', 'aaa', 0, 1, 0.65, [], {}, True, False, ('C',2,3,4),('C',2,3,'x'),('C',3,2,'-')]:
for cassette_end in SEQ_ENDS:
for relative_read_direction in RELATIVE_READ_DIRECTIONS:
self.assertRaises(MutantError, get_pos_function, bad_flanking_region, cassette_end, relative_read_direction)
# should raise exception for invalid cassette_end or relative_read_direction
bad_vals = ['','aaa',0,1,[],{},None,True,False,'start','end','middle','read','leftmost','rightmost']
for bad_val in bad_vals:
for relative_read_direction in RELATIVE_READ_DIRECTIONS:
self.assertRaises(MutantError, get_pos_function, ('C',1,5,'+'), bad_val, relative_read_direction)
for cassette_end in SEQ_ENDS:
self.assertRaises(MutantError, get_pos_function, ('C',1,5,'+'), cassette_end, bad_val)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def check_common_args(args, function_name,\n valid_functions=['gaperture', 'gmap', 'gfind'],\n allow_no_coords=False):\n\n try:\n function_name = function_name.strip().lower()\n except AttributeError:\n raise gPhotonArgsError(\"Invalid function: {f}\".format(f=function_name))\n\n if not function_name in valid_functions:\n raise gPhotonArgsError(\"Invalid function: {f}\".format(f=function_name))\n\n try:\n args.band = args.band.strip()\n except AttributeError:\n raise SystemExit(\"Invalid band: {b}\".format(b=args.band))\n\n # This will ensure calpath has a trailing '/'.\n if function_name in ['gaperture', 'gmap']:\n args.calpath = os.path.join(args.calpath, '')\n # [Future]: Consider fixing this statement. This is breaking nosetests,\n # but it's not a bad idea...\n # if not os.path.isdir(args.calpath):\n # raise SystemExit(\"Calibration path not found: \" + args.calpath)\n\n if (not (args.ra and args.dec) and not args.skypos and\n not allow_no_coords):\n raise SystemExit(\"Must specify either both RA/DEC or SKYPOS.\")\n elif (args.ra and args.dec) and args.skypos:\n if not (args.ra == args.skypos[0] and args.dec == args.skypos[1]):\n raise SystemExit(\"Must specify either RA/DEC or SKYPOS, not both.\")\n elif (args.ra and args.dec) and not args.skypos:\n args.skypos = [args.ra, args.dec]\n elif not (args.ra and args.dec) and args.skypos:\n args.ra, args.dec = args.skypos\n\n if args.suggest and function_name in ['gfind', 'gaperture']:\n (args.ra, args.dec, args.radius, args.annulus1,\n args.annulus2) = dbt.suggest_parameters(args.band, args.skypos,\n verbose=0)\n args.skypos = [args.ra, args.dec]\n if args.verbose:\n print(\"Recentering on [\"+str(args.ra)+\", \"+str(args.dec)+\"]\")\n print(\"Setting radius to \"+str(args.radius))\n print(\"Setting annulus to [\"+str(args.annulus1)+\", \"+\n str(args.annulus2)+\"]\")\n\n if args.skypos:\n if np.array(args.skypos).shape != (2,):\n raise gPhotonArgsError(\n \"Skypos (--skypos) must be a 2-element array.\")\n args.ra, args.dec = args.skypos\n\n if args.ra and not 0. <= args.ra <= 360.:\n raise SystemExit(\n \"RA of {ra} does not satisfy 0 <= RA <= 360\".format(ra=args.ra))\n\n if args.dec and not -90 <= args.dec <= 90:\n raise SystemExit(\n \"Dec of {dec} does not satisfy -90 <= DEC <= 90\".format(\n dec=args.dec))\n\n if args.detsize and args.detsize <= 0.:\n raise SystemExit(\"Effective field diameter (--detsize) must be > 0\")\n\n if args.maxgap and args.maxgap <= 0.:\n raise SystemExit(\"Maximum gap length (--maxgap) must be > 0 seconds.\")\n if args.minexp and args.minexp <= 0.:\n raise SystemExit(\"Minimum valid exposure depth (--minexp) must be > 0\"\n \" seconds.\")\n\n if args.retries and args.retries <= 0.:\n raise SystemExit(\"Number of retries (--retries) must be > 0.\")\n\n # tmin / tmax must be defined and reasonable\n if not args.tmin or args.tmin <= 0.:\n raise SystemExit(\"T0 (--t0) must be > 0.\")\n if not args.tmax or args.tmax <= 0.:\n raise SystemExit(\"T1 (--t1) must be > 0.\")\n if args.tmin >= args.tmax:\n raise SystemExit(\"Minimum time (--t0) must be < maximum time (--t1).\")\n\n if args.trange:\n if np.array(args.trange).shape == (2, ):\n args.trange = [args.trange]\n if not (len(np.array(args.trange).shape) == 2 and\n np.array(args.trange).shape[1] == 2):\n raise SystemExit(\"trange (--trange) must be a pairwise list.\")\n # Individually check the entries for sanity\n for t in args.trange:\n if t[0] <= 0 or t[1] <= 0:\n raise SystemExit('Times must be positive: {t}'.format(t=t))\n if t[1] <= t[0]:\n raise SystemExit('Start time ({t0}) must preceed end time'\n ' ({t1})'.format(t0=t[0], t1=t[1]))\n elif not allow_no_coords and function_name in ['gmap', 'gaperture']:\n args.trange = dbt.fGetTimeRanges(args.band, args.skypos,\n trange=[args.tmin, args.tmax],\n maxgap=args.maxgap, minexp=args.minexp,\n detsize=args.detsize,\n skyrange=args.skyrange)\n else:\n # If no coordinates specified then use a huge time range for now.\n args.trange = [args.tmin, args.tmax]\n\n return args",
"def _check_input(self, func, args, kwargs):\n fullargspec = inspect.getfullargspec(func)\n return_msg = ''\n if fullargspec.varkw is None:\n for key in kwargs:\n if not key in fullargspec.kwonlyargs:\n return_msg += f'[Error]: not support param `{key}`. \\n'\n if fullargspec.varargs is None:\n if len(fullargspec.args) == 0:\n max_args_len = 0\n else:\n max_args_len = len(fullargspec.args)-1 if fullargspec.args[0] == 'self' else len(fullargspec.args)\n defaults_nums = 0 if fullargspec.defaults is None else len(fullargspec.defaults)\n min_args_len = max_args_len - defaults_nums\n if len(args) < min_args_len:\n return_msg += f'[Error]: have min {min_args_len} input, but you input {len(args)} args. \\n'\n if max_args_len < len(args):\n return_msg += f'[Error]: have max {max_args_len} input, but you input {len(args)} args. \\n'\n return return_msg",
"def check_args():\n schema = Schema({\n 'FOLDREC': Use(open, error='FOLDREC file should be readable'),\n 'CLUSTAL': Use(open, error='CLUSTAL file should be readable'),\n 'CCMPRED': Use(open, error='CCMPRED file should be readable'),\n '--metafold': Use(open, error='METAFOLD_FILE should be readable'),\n '--nb_pdb': And(Use(int), lambda n: 1 <= n <= 405,\n error='--nb_pdb=NUM should be integer 1 <= N <= 405'),\n '--dssp': Use(open, error='dssp/mkdssp should be readable'),\n '--dope': Use(open, error='dope file should be readable'),\n '--benchmark': Use(open, error='BENCHMARK_FILE should be readable'),\n '--cpu': And(Use(int), lambda n: 0 <= n <= cpu_count(),\n error='--cpus=NUM should be integer 1 <= N <= ' + str(cpu_count())),\n # The output PATH is created (if not exists) at the end of the program\n # so we skip the check.\n object: object})\n try:\n schema.validate(ARGUMENTS)\n except SchemaError as err:\n exit(err)",
"def _check_input(self, **kwargs):\n\n combi_a = ['nxny', 'dxdy', 'ul_corner']\n combi_b = ['nxny', 'dxdy', 'll_corner']\n if all(kwargs[k] is not None for k in combi_a):\n nx, ny = kwargs['nxny']\n dx, dy = kwargs['dxdy']\n x0, y0 = kwargs['ul_corner']\n if (dx <= 0.) or (dy >= 0.):\n raise ValueError('dxdy and input params not compatible')\n origin = 'upper-left'\n elif all(kwargs[k] is not None for k in combi_b):\n nx, ny = kwargs['nxny']\n dx, dy = kwargs['dxdy']\n x0, y0 = kwargs['ll_corner']\n if (dx <= 0.) or (dy <= 0.):\n raise ValueError('dxdy and input params not compatible')\n origin = 'lower-left'\n else:\n raise ValueError('Input params not compatible')\n\n self._nx = int(nx)\n self._ny = int(ny)\n if (self._nx <= 0) or (self._ny <= 0):\n raise ValueError('nxny not valid')\n self._dx = float(dx)\n self._dy = float(dy)\n self._x0 = float(x0)\n self._y0 = float(y0)\n self._origin = origin\n\n # Check for pixel ref\n self._pixel_ref = kwargs['pixel_ref'].lower()\n if self._pixel_ref not in ['corner', 'center']:\n raise ValueError('pixel_ref not recognized')",
"def CheckRead(read_name, seq, break_line, quality, fastq_fp, line_num):\n if not read_name[0] == \"@\":\n raise Exception(\"Read name does not start with @, line # {}\\n File: {}\".format(\n line_num, fastq_fp))\n for x in seq.upper():\n if x not in [\"A\",\"C\",\"T\",\"G\",\"N\"]:\n raise Exception(\"Sequence value {} not recognized. Line # {}\\n File: {}\".format(\n x, line_num + 1, fastq_fp))\n if not break_line[0] == \"+\":\n raise Exception(\"Break line not '+'. Instead '{}'. Line # {}\\n File: {}\".format(\n break_line[0],line_num + 2, fastq_fp))\n if not len(quality) == len(seq):\n raise Exception(\"Quality line wrong length. Lines # {}\\n File: {}\".format(\n line_num + 3, fastq_fp))",
"def checkArguments(logger: logging.Logger):\n cFuncName = colored(os.path.basename(__file__), 'yellow') + ' - ' + colored(sys._getframe().f_code.co_name, 'green')\n\n # change to the directory dirRnx if it exists\n workDir = os.getcwd()\n if amc.dRTK['args']['rinexDir'] != '.':\n workDir = os.path.normpath(os.path.join(workDir, amc.dRTK['args']['rinexDir']))\n logger.info('{func:s}: working directory is {dir:s}'.format(func=cFuncName, dir=colored('{:s}'.format(workDir), 'green')))\n\n if not os.path.exists(workDir):\n logger.error('{func:s}: directory {dir:s} does not exists.'.format(func=cFuncName, dir=colored(workDir, 'red')))\n sys.exit(amc.E_DIR_NOT_EXIST)\n else:\n os.chdir(workDir)\n logger.info('{func:s}: changed to directory {dir:s}'.format(func=cFuncName, dir=colored('{:s}'.format(workDir), 'green')))\n\n # check existence of RINEX observation file\n if not os.access(amc.dRTK['args']['obs_name'], os.R_OK):\n logger.error('{func:s}: RINEX observation file {rinex:s} not accessible.\\n'.format(func=cFuncName, rinex=colored('{!s}'.format(amc.dRTK['args']['obs_name']), 'red')))\n return amc.E_FILE_NOT_EXIST\n logger.info('{func:s}: RINEX observation file {obs:s} accessible'.format(func=cFuncName, obs=colored('{!s}'.format(amc.dRTK['args']['obs_name']), 'green')))",
"def check_arguments(self):\n self.check_num_arguments()\n self.are_readable_files(self.args)",
"def checkArguments(args, log):\n\n\t\n\n \t\n\tif not args.variant_caller or not args.genome_ref or not args.bam or not args.bed or not args.vcf:\n\t\tlog.error(\"necessary pre-requisite arguments\")\n\t\tsys.exit()\n\n\t\n\tif args.genome_ref:\n\t\tif not os.path.isfile(args.genome_ref): \n \t\t\n \t\t\tlog.error(\"it does not exist file corresponding to the reference genome\")\n \t\t\tsys.exit()\n\n \t\tif not os.access(args.genome_ref, os.R_OK):\n \t\t\tlog.error(\"permission to read the reference genome file is not accorded\")\n \t\t\tsys.exit()\n\n\t \n \tif args.bam:\n \t\tif not os.path.isfile(args.bam): \n \t\t\n \t\t\tlog.error(\"it does not exist file corresponding to the bam\")\n\n \t\t\tsys.exit()\n\n \t\tif not os.access(args.bam, os.R_OK):\n \t\t\tlog.error(\"permission to read the bam file is not accorded\")\n \t\t\tsys.exit()\n\n\n \tif args.bed:\n \t\tif not os.path.isfile(args.bed):\n \t\t\tlog.error(\"it does not exist file corresponding to the target regions\")\n \t\t\tsys.exit()\n\n \t\tif not os.access(args.bed, os.R_OK):\n \t\t\tlog.error(\"permission to read the target regions file is not accorded\")\n \t\t\tsys.exit()",
"def checkArgs():\n\n #-- 1 --\n # [ if sys.argv[1:] has exactly four elements ->\n # rawAltAz, rawLat, rawLon, rawDT := those elements\n # else ->\n # sys.stderr +:= error message\n # stop execution ]\n argList = sys.argv[1:]\n if len(argList) != 4:\n usage (\"Incorrect command line argument count.\" )\n else:\n rawAltAz, rawLat, rawLon, rawDT = argList\n #-- 2 --\n # [ if rawAltAz is a valid set of horizon coordinates ->\n # altAz := those coordinates as a sidereal.AltAz instance\n altAz = checkAltAz ( rawAltAz )\n\n #-- 3 --\n # [ if rawLat is a valid latitude ->\n # lat := that latitude in radians\n # else ->\n # sys.stderr +:= error message\n # stop execution ]\n try:\n lat = sidereal.parseLat ( rawLat )\n except SyntaxError, detail:\n usage ( \"Invalid latitude: %s\" % detail )\n\n #-- 4 --\n # [ if rawLon is a valid longitude ->\n # lon := that longitude in radians\n # else ->\n # sys.stderr +:= error message\n # stop execution ]\n try:\n lon = sidereal.parseLon ( rawLon )\n except SyntaxError, detail:\n usage ( \"Invalid longitude: %s\" % detail )\n\n #-- 5 --\n # [ if rawDT is a valid date-time string ->\n # dt := that date-time as a datetime.datetime instance\n # else ->\n # sys.stderr +:= error message\n # stop execution ]\n try:\n dt = sidereal.parseDatetime ( rawDT )\n except SyntaxError, detail:\n usage ( \"Invalid timestamp: %s\" % detail )\n\n #-- 6 --\n latLon = sidereal.LatLon ( lat, lon )\n return (altAz, latLon, dt)",
"def _validateInputs(self):\n if self.args[\"Counties\"] == [] and self.args[\"BBox\"] == None:\n raise Exception(\"Invalid arguments provided. Must provide either a geographical bounding box or a list of counties.\")\n\n if self.args[\"StartDateTime\"] > self.args[\"EndDateTime\"]:\n raise Exception(\"Invalid arguments provided. StartDateTime cannot be after EndDateTime\")",
"def validate_input(self, *args):\n return",
"def _expected_inputs():",
"def __check_errors(self):\n if not(\"input\" in self.passedArgs or \"source\" in self.passedArgs):\n raise ArgError(\"Program did not receive any of mandatory arguments! (--source=file, --input=file)\")",
"def parse_stdin_args():\n \n #Initialize parser\n parser = argparse.ArgumentParser(description = \"Creates synthetic paired end DNA from given fraction of Human, Bacteria, Phix174 and Virus/Phage. \\\n Note: List of Human, Bacteria, Phix174 and Virus/Phage fractions should be space separated. Example: -hu 0.5 0.2 \\\n -b 0.3 0.01 -x 0.1 0.01. The Virus/Phage fraction is taken 1 - Human - Bacteria - Phix174.\")\n \n #Required command line arguments\n parser.add_argument(\"-s\", help=\"Number of total reads\", required = True, type = int)\n parser.add_argument(\"-p\", help=\"Path to directory for output files\", required = True)\n parser.add_argument(\"-hu\", help=\"Human DNA percentage. Default: [0.5, 0.1, 0.01, 0.001]\", required = False, type= float, nargs = \"+\", default = [0.5, 0.1, 0.01, 0.001])\n parser.add_argument(\"-b\", help=\"Bacterial DNA percentage. Default: [0.4, 0.25, 0.1, 0.5]\", required = False, type= float, nargs = \"+\", default = [0.4, 0.25, 0.1, 0.5])\n parser.add_argument(\"-x\", help=\"Phix174 DNA percentage. Default: [0.01, 0.001]\", required = False, type= float, nargs = \"+\", default = [0.01, 0.001])\n parser.add_argument(\"-isfastq\", help=\"Specify output format 1=FASTQ 0=FASTA (Default: 1)\", required = False, type= int, default = 1)\n parser.add_argument(\"-n\", help=\"Read length. Default: 250\", required = False, type= int, default = 250)\n parser.add_argument(\"-err\", help=\"Error rate. Default: 0\", required = False, type= int, default = 0)\n \n #Read the command line arguments\n args = parser.parse_args()\n \n #check parameters\n if args.s < 100:\n print \"Number of total reads must be greater than 100.\"\n sys.exit(0)\n \n if args.n < 35:\n print \"Read length should be greater than 35\"\n sys.exit(0)\n\n return args",
"def validate_args(self, in_args, cmd_call):\n valid_1, valid_2 = None, None\n\n if len(in_args) > 0 and type(in_args) is not list:\n args = in_args.split()\n valid_1 = args[0]\n elif type(in_args) is list and len(in_args) > 0:\n args = in_args\n valid_1 = args[0]\n else:\n args = []\n\n if cmd_call in ['default']:\n # Default : Returns a valid cui type for an input cui\n # checks to see if there is more than 2 arguments\n # if so, arg[0] may be a valid code\n # arg[1] may be a valid code type\n # if not ask the user what type of code type arg[0] is\n # valid_1 = valid cui type\n # valid_2 = None\n while True:\n if len(args) >= 2 and len(args) <= 3:\n input_type = args[1].upper()\n else:\n input_type = input(\"What type of id is '{0}'? [LOCAL/RXCUI/NDC/SNOMED]\".format(args[0])).upper()\n\n # Confirm it's a valid code type\n valid_type = self.validate_id_type(input_type)\n # Valid type is a boolean of True\n if isinstance(valid_type, str) or valid_type is None:\n return None\n elif valid_type:\n break\n elif not valid_type:\n print('Invalid Option, Please Try Again')\n continue\n valid_1 = input_type\n\n elif cmd_call in self.cmd_config_default:\n # valid_1 : Valid Cui , valid_2 : Valid Cui Type\n valid_2, _ = self.validate_args(args, 'default')\n valid_1 = args[0]\n\n elif cmd_call == 'code_lookup':\n # args[0] : Initial CUI, args[1] : Initial CUI Type, args[2] : Target CUI Type\n # valid_1 : valid cui, valid_2 : list valid source and target\n _dict_opts = util.OPTIONS_CUI_TYPES.copy()\n _avail = list(set(smores.get_dict_sources()) & set(_dict_opts))\n if len(_avail) == 0 and len(args) < 2:\n print('There are no available starting cui types that can be crosswalked.\\n'\n 'Please load a file containing valid cui types: {0}'.format(_dict_opts))\n return False, None\n\n if len(args) >= 2:\n if len(args) == 3:\n # provided cui, cui source, and target\n valid_2, _ = self.validate_args(args, 'default')\n source, target = args[1].upper(), args[2].upper()\n else:\n source, target = args[0].upper(), args[1].upper()\n valid_1 = simple_input(\"Is {0} the correct starting source? \".format(source), ['YES', 'NO', 'exit'])\n if valid_1 == 'exit':\n return False, None\n # TODO need path for valid_2\n else:\n valid_1 = simple_input(\"Which code set do you want to start with?\", _avail)\n if valid_1 != 'exit':\n _dict_opts.remove(valid_1) # Don't lookup what we've already got\n valid_2 = simple_input(\"Which code set do you want to get results for?\", _dict_opts)\n if valid_2 == 'exit':\n return False, None\n else:\n return False, None\n\n elif cmd_call == 'errors':\n _current_err = list(self.errors.keys())\n if len(args) > 1:\n smores_error('#Cx001.7', console_p=True)\n return\n elif len(args) == 1 and args[0].lower() in _current_err:\n valid_1 = args[0]\n elif len(args) == 1:\n print('There are currently no errors logged for that command.')\n return\n else:\n valid_1 = simple_input(\"Please choose a command from the list to see errors: \", _current_err)\n\n elif cmd_call in ['csv', 'remap', 'fhir', 'json']:\n # Format: [File] [Output]\n if not self.inputs['loaded']:\n print(\"No Files Loaded!\\nYou Must load a file containing local medications first\")\n else:\n _file_opts = list(self.inputs['files'].keys()) + ['All']\n _dict_opts = list(smores.get_dict_sources(True)) #+ ['All']\n _file_or_dict = None\n\n if cmd_call in ['csv', 'json']:\n if len(args) == 0:\n _file_or_dict = simple_input(\"Do you want results for a File or a constructed Dictionary?\",\n ['File', 'Dictionary', 'exit'], True)\n elif args[0] not in _file_opts and args[0] not in _dict_opts:\n print('That option was not recognized as a valid source.')\n _file_or_dict = simple_input(\"Do you want results for a File or a constructed Dictionary?\",\n ['File', 'Dictionary', 'exit'], True)\n else:\n valid_1 = args[0]\n\n if _file_or_dict.upper() == 'FILE':\n valid_1 = 'FILE|' + simple_input(\"Please choose a loaded file\", _file_opts, True)\n\n elif _file_or_dict.upper() == 'DICTIONARY':\n valid_1 = 'DICT|' + simple_input(\"Please choose a code dictionary to output\", _dict_opts, True)\n elif _file_or_dict.upper() == 'EXIT':\n return None, None\n\n else:\n valid_1 = simple_input(\"Please choose a loaded file\", _file_opts, True)\n\n if cmd_call in ['csv', 'json', 'fhir']:\n if len(args) == 2 and len(args[1]) > 0:\n valid_2 = args[1]\n else:\n valid_2= input(\"Please provide an output file name:\").strip()\n\n if len(valid_2) > 0:\n if \".\" in valid_2:\n valid_2, ext = valid_2.split(\".\")\n else:\n valid_2 = ''\n print('Empty file name provided, using default.')\n else:\n valid_2 = args[0]\n\n elif cmd_call == 'file':\n re_use = False\n if self.inputs['loaded'] and len(in_args) == 0:\n print(\"The following file(s) have already been loaded: \\n\" + str(self.inputs['files']))\n _load_more = simple_input(\"Would you like to load an additional file?\", ['Y', 'N', 'exit'])\n if _load_more == 'Y':\n pass\n elif _load_more == 'N':\n _re_use = simple_input(\"Would you like to re-use a loaded file?\", ['Y', 'N', 'exit'])\n if _re_use == 'Y':\n re_use = True\n else:\n return False, None\n else:\n return False, None\n\n if in_args is not None and len(in_args) > 0:\n valid_1 = in_args\n else:\n valid_1 = input(\"Please enter the name of the file to load: \") if not re_use else simple_input(\n 'Select the file to be used: ', list(self.inputs['files'].keys()), index=True)\n\n while True:\n if valid_1 in self.inputs['files']:\n if not re_use:\n print(\"It looks like you've already loaded that file. Please try a different file.\")\n valid_1, valid_2 = input(\"Please enter the name of the file to load: \")\n else:\n break\n elif len(valid_1) == 0:\n smores_error('#Cx001.7', logger=smoresLog)\n valid_1, valid_2 = input(\"Please enter the name of the file to load: \")\n else:\n break\n\n if not resolve_target_path(valid_1):\n valid_1, valid_2 = self.validate_args('', 'file')\n\n elif '.smr' in valid_1:\n if len(self.inputs['files']) > 0:\n print(\n 'It looks like you are trying to load a session, this will replace the current session and '\n 'all previous work.')\n _save = simple_input('Do you want to save the current session first?', ['Y', 'N', 'EXIT'])\n if _save == 'Y':\n smores.save_session(self.__version__)\n elif _save == 'EXIT':\n return False, None\n valid_2 = 'session'\n else:\n valid_2 = 'file'\n\n smoresLog.debug('Args: {0}, Validated as: {1}'.format(valid_1, valid_2))\n return valid_1, valid_2",
"def __verify_arguments(self):\n if len(self.__pointer_data) == 0:\n raise ValueError(\n \"Input data is empty (size: '%d').\" % len(self.__pointer_data)\n )\n\n if self.__number_clusters <= 0:\n raise ValueError(\n \"Amount of cluster (current value: '%d') for allocation should be greater than 0.\"\n % self.__number_clusters\n )\n\n if self.__numlocal < 0:\n raise ValueError(\n \"Local minima (current value: '%d') should be greater or equal to 0.\"\n % self.__numlocal\n )\n\n if self.__maxneighbor < 0:\n raise ValueError(\n \"Maximum number of neighbors (current value: '%d') should be greater or \"\n \"equal to 0.\" % self.__maxneighbor\n )",
"def check_args():\n assert os.path.exists(FLAGS.datadir)\n assert os.path.exists(FLAGS.trainlogdir)\n assert os.path.exists(FLAGS.split_fn)\n assert os.path.exists(FLAGS.labels_fname)\n assert FLAGS.snippet_len >= 1\n assert FLAGS.frameskip >= 1\n assert FLAGS.test_stride == 1 or FLAGS.test_stride == FLAGS.snippet_len, \\\n 'test_stride has to be either 1 or snippet_len (for vanilla+)'\n pass",
"def ValidateInputs(lat_min, lat_max, lon_min, lon_max, lonres, latres, basepath, \\\n GFED_path, EDGAR_path, CAMS_path, behaviour_settings):\n # Assert sure extents fall within boundary\n assert -180 <= lon_min < 180 and -180 < lon_max <= 180, 'Longitude should be within range -180 -- 180!'\n assert -90 <= lat_min < 90 and -90 < lat_max <= 90, 'latitude should be within range -90 -- 90!'\n assert lon_min < lon_max, 'maximum longitude cannot be smaller than or equal to minimum!'\n assert lat_min < lat_max, 'maximum latitude cannot be smaller than or equal to minimum!'\n \n # Assert resolution is larger than TROPOMI minimum:\n assert lonres > 7, 'TROPOMI minimum longitude resolution is 7 km!'\n assert latres > 7, 'TROPOMI minimum latitude resolution is 7 km!'\n \n # Assert if given directories exist\n if behaviour_settings[1] == True:\n assert os.path.isdir(CAMS_path), f'Directory {CAMS_path} was not found!'\n if behaviour_settings[2] == True:\n assert os.path.isdir(GFED_path), f'Directory {GFED_path} was not found!'\n assert os.path.isdir(EDGAR_path), f'Directory {EDGAR_path} was not found!'\n\n \n return",
"def check_arguments(logger: logging.Logger = None):\n cFuncName = colored(os.path.basename(__file__), 'yellow') + ' - ' + colored(sys._getframe().f_code.co_name, 'green')\n\n # check & change working dir\n dStat['dir'] = os.path.dirname(Path(dStat['cli']['obsstatf']).resolve())\n dStat['obsstatf'] = os.path.basename(dStat['cli']['obsstatf'])\n\n if not amutils.changeDir(dStat['dir']):\n if logger is not None:\n logger.error('{func:s}: changing to directory {dir:s} failed'.format(dir=dStat['dir'], func=cFuncName))\n sys.exit(amc.E_DIR_NOT_EXIST)\n\n # check accessibilty of observation statistics file\n if not amutils.file_exists(fname=dStat['obsstatf'], logger=logger):\n if logger is not None:\n logger.error('{func:s}: observation file {file:s} not accessible'.format(file=dStat['obsstatf'], func=cFuncName))\n sys.exit(amc.E_FILE_NOT_EXIST)\n\n # create dir for storing the latex sections\n dStat['ltx']['path'] = os.path.join(dStat['dir'], 'ltx')\n if not amutils.mkdir_p(dStat['ltx']['path']):\n if logger is not None:\n logger.error('{func:s}: cannot create directory {dir:s} failed'.format(dir=dStat['ltx']['path'], func=cFuncName))\n sys.exit(amc.E_CREATE_DIR_ERROR)\n\n # check for accessibility of CVS database\n if not amutils.path_writable(os.path.dirname(dStat['cli']['cvsdb'])):\n if logger is not None:\n logger.error('{func:s}: cannot write to directory {dir:s} failed'.format(dir=colored(os.path.dirname(dStat['cli']['cvsdb']), 'red'), func=cFuncName))\n sys.exit(amc.E_PATH_NOT_WRITABLE)\n\n # check whether selected freq is available\n for clifreq in dStat['cli']['freqs']:\n if clifreq not in dStat['info']['freqs']:\n if logger is not None:\n logger.error('{func:s}: selected frequency {clifreq:s} is not available'.format(clifreq=colored(clifreq, 'red'), func=cFuncName))\n sys.exit(amc.E_NOAVAIL_FREQ)\n\n # extract YY and DOY from filename\n dStat['time']['YYYY'] = int(dStat['obsstatf'][12:16])\n dStat['time']['DOY'] = int(dStat['obsstatf'][16:19])\n # converting to date\n dStat['time']['date'] = datetime.strptime('{year:04d}-{doy:03d}'.format(year=dStat['time']['YYYY'], doy=dStat['time']['DOY']), \"%Y-%j\")",
"def input_check(self):\n\n if self.species == 'He': assert self.line_model == 'voigt'\n n_upper_range, e_dens_range, temp_range, b_field_range = get_param_ranges(self.line_model)\n\n if np.isnan(n_upper_range).sum() <= 1:\n assert (self.n_upper in range(n_upper_range[0], n_upper_range[1]))\n if np.isnan(e_dens_range).sum() <= 1:\n assert (e_dens_range[0] <= self.e_dens <= e_dens_range[1])\n if np.isnan(temp_range).sum() <= 1:\n assert (temp_range[0] <= self.temp <= temp_range[1])\n if np.isnan(b_field_range).sum() <= 1:\n assert (b_field_range[0] <= self.b_field <= b_field_range[1])",
"def check_args(self):\n parser = get_base_arguments(get_parser())\n parser = get_tc_arguments(parser)\n # Disable \"Do not use len(SEQ) as condition value\"\n # pylint: disable=C1801\n if len(sys.argv) < 2:\n self.logger.error(\"Icetea called with no arguments! \")\n parser.print_help()\n return False\n elif not self.args.ignore_invalid_params and self.unknown:\n self.logger.error(\"Unknown parameters received, exiting. \"\n \"To ignore this add --ignore_invalid_params flag.\")\n self.logger.error(\"Following parameters were unknown: {}\".format(self.unknown))\n parser.print_help()\n return False\n return True",
"def validate_arguments(args):\n if not args.input_file[-4:] == \".pdb\":\n exit(\"ERROR: Input file should be in PDB format\")\n if args.n_decoys < 0:\n exit(\"ERROR: Number of decoys must be a non-negative value\")\n if args.n_steps < 0:\n exit(\"ERROR: Number of PASSO steps must be a non-negative value\")",
"def check_num_arguments(self):\n if len(self.args) != 3:\n self.cli_parser.error(\n \"Please provide paths to an interactions file, \"\n \"an annotations file, and an expressions file.\"\n )",
"def check_input(para_dic, parser):\n # for prokarya\n if para_dic['kingdom'] in [\"prokarya\", \"both\"]:\n if para_dic[\"fasta_prok\"] is None:\n parser.error(\"\"\"prokarya requires fasta file to be specified using\n fasta_prok in config file\"\"\")\n else:\n if os.path.exists(para_dic[\"fasta_prok\"]) is False:\n parser.error(\"\"\"reference genome doesnt exist\"\"\")\n if para_dic[\"gff_prok\"] is None:\n parser.error(\"\"\"prokarya requires gff file to be specified using\n gff_prok in config file\"\"\")\n else:\n if os.path.exists(para_dic[\"gff_prok\"]) is False:\n parser.error(\"\"\"reference genome gff doesnt exist\"\"\")\n # for eukarya\n elif para_dic['kingdom'] in [\"eukarya\", \"both\"]:\n if para_dic[\"fasta_euk\"] is None:\n parser.error(\"\"\"eukarya requires fasta file to be specified using\n fasta_euk in config file\"\"\")\n else:\n if os.path.exists(para_dic[\"fasta_euk\"]) is False:\n parser.error(\"\"\"eukaryotic reference genome doesnt exist\"\"\")\n if para_dic[\"gff_euk\"] is None:\n parser.error(\"\"\"eukarya requires gff file to be specified using\n gff_euk in config file\"\"\") \n else:\n if os.path.exists(para_dic[\"gff_euk\"]) is False:\n parser.error(\"\"\"eukaryotic reference genome gff doesnt exist\"\"\")\n\n # if \"ballgown\" in para_dic['method'] and para_dic[\"kingdom\"] == \"prokarya\":\n # sys.exit(\"\"\"Ballgown does not work for prokaryotic genomes,\n # pick method that does not have ballgown!\"\"\")",
"def test_read_correct_inputs():\n bids_path = 'sub-01_ses-01_meg.fif'\n with pytest.raises(RuntimeError, match='\"bids_path\" must be a '\n 'BIDSPath object'):\n read_raw_bids(bids_path)\n\n with pytest.raises(RuntimeError, match='\"bids_path\" must be a '\n 'BIDSPath object'):\n get_head_mri_trans(bids_path)",
"def check(self, input, ast):\n assert False # Must be redefined",
"def __check_supplied_params(event, resp):\n\n use_start_end = False\n use_range = False\n\n # check if start/end was provided in the request, \n # indicate subseq-type is 'start-end' in response data dict\n if event['queryStringParameters']:\n params = event['queryStringParameters']\n if \"start\" in params.keys() or \"end\" in params.keys():\n use_start_end = True\n resp.put_data(\"subseq-type\", \"start-end\")\n \n # check if Range header was provided in the request,\n # indicate subseq-type is 'Range' in response data dict\n if \"Range\" in event['headers']:\n use_range = True\n resp.put_data(\"subseq-type\", \"range\")\n \n # if both start/end and AND Range header, this is a BAD REQUEST\n if use_start_end and use_range:\n resp.set_status_code(SC.BAD_REQUEST)\n resp.set_body(json.dumps({\n \"message\": \"Cannot provide both sequence start/end AND Range\"\n }))",
"def __verify_arguments(self):\r\n if len(self.__pointer_data) == 0:\r\n raise ValueError(\"Input data is empty (size: '%d').\" % len(self.__pointer_data))\r\n\r\n if self.__number_clusters <= 0:\r\n raise ValueError(\"Amount of cluster (current value: '%d') for allocation should be greater than 0.\" %\r\n self.__number_clusters)\r\n\r\n if self.__numlocal < 0:\r\n raise ValueError(\"Local minima (current value: '%d') should be greater or equal to 0.\" % self.__numlocal)\r\n\r\n if self.__maxneighbor < 0:\r\n raise ValueError(\"Maximum number of neighbors (current value: '%d') should be greater or \"\r\n \"equal to 0.\" % self.__maxneighbor)",
"def get_rna_coord(rna_gene_name):\n user_input_required = True\n deduction_attempt_made = False\n\n rna_info = gene_to_coord(rna_gene_name)\n\n is_success = rna_info['success']\n if is_success:\n rna_chr_no = rna_info['chr_n']\n rna_start_chr_coord = rna_info['start_coord']\n rna_end_chr_coord = rna_info['end_coord']\n\n deduction_attempt_made = True\n print(\n \"We have automatically deduced that this gene lies on chromosome \"\n + str(rna_chr_no) + \" from \" + str(rna_start_chr_coord) + \" to \"\n + str(rna_end_chr_coord) + \" (with length \"\n + str(rna_end_chr_coord - rna_start_chr_coord) + \" bases)\"\n )\n print(\"Are you okay with the above coordinates? [y/n]: \")\n user_input = input()\n while len(user_input) != 1 or user_input.lower() not in \"yn\":\n print(\"\")\n print(\"Please type 'y' or 'n'\")\n print(\"Are you okay with the above coordinates? [y/n]: \")\n user_input = input()\n print(\"\")\n if user_input.lower() == \"y\":\n print(\"Thank you!\")\n user_input_required = False\n else:\n print(\"Alright, please give us the coordinates: \")\n print(\"\")\n user_input_required = True\n\n if user_input_required:\n if not deduction_attempt_made:\n print(\"\")\n print(\n \"Sorry, we are having trouble figuring out the location of this\"\n \" gene on the genome. Could you tell us?\"\n )\n print(\"\")\n\n print(\"Chromosome number (1-23): > \")\n rna_chr_no = input()\n print(\"\")\n print(\n \"Thanks! What about the start coordinate of this gene on chromosome\"\n + \" \" + rna_chr_no + \"?:\"\n )\n print(\"\")\n print(\"Start coordinate: > \")\n rna_start_chr_coord = input()\n print(\"\")\n print(\n \"Thanks! What about the end coordinate of this gene on chromosome\"\n + \" \" + rna_chr_no + \"?:\"\n )\n print(\"\")\n print(\"End coordinate: > \")\n rna_end_chr_coord = input()\n return rna_chr_no, rna_start_chr_coord, rna_end_chr_coord",
"def check_params(self, name, fs_in, fs_out, window):\n if not isinstance(name, str):\n raise TypeError('name must be a string, not %s' % name)\n if fs_in <= 0:\n raise ValueError('fs_in should not be less than 0.')\n if fs_out <= 0:\n raise ValueError('fs_out should not be less than 0.')\n if window <= 0:\n raise ValueError('window must be greater than than 0.')"
] |
[
"0.5927966",
"0.59214574",
"0.5877408",
"0.5834172",
"0.57960546",
"0.5730392",
"0.57066596",
"0.56749415",
"0.5674913",
"0.5639737",
"0.56005573",
"0.557201",
"0.5553413",
"0.5532597",
"0.5499041",
"0.54693806",
"0.5468923",
"0.54674524",
"0.54220945",
"0.5385928",
"0.5384334",
"0.53813565",
"0.53632987",
"0.5357685",
"0.53529876",
"0.53497094",
"0.533956",
"0.5334583",
"0.5324026",
"0.5319446"
] |
0.68425304
|
0
|
Convenience function to make the args to Fake_HTSeq_genomic_pos from an Insertion_position
|
def _make_pos(pos):
return pos.chromosome, pos.strand, pos.min_position, pos.min_position+20
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def __init__(self, chromosome, strand, full_position=None, position_before=None, position_after=None, immutable=False):\n # need to make instance mutable to be able to set anything, due to how __setattr__ is decorated\n self.make_mutable_REMEMBER_CLEANUP_FIRST() \n # now start setting attributes\n self.chromosome = chromosome\n self.strand = strand\n # parse full_position if provided\n if full_position is not None:\n if (position_before is not None) or (position_after is not None):\n raise ValueError(\"If providing full_position, cannot also provide position_before/position_after!\")\n self.position_before, self.position_after = self._parse_full_position(full_position)\n # otherwise use position_before and/or position_after\n else:\n if position_before is None and position_after is None:\n raise ValueError(\"Can't create an Insertion_position object with no known position values!\")\n try:\n self.position_before = None if position_before is None else int(position_before)\n self.position_after = None if position_after is None else int(position_after)\n except TypeError: \n raise ValueError(\"position_before/position_after must be int-castable or None!\")\n if immutable: self.make_immutable()",
"def setPosition(*args):",
"def setPosition(*args):",
"def setPosition(*args):",
"def setPosition(*args):",
"def setPosition(*args):",
"def setPosition(*args):",
"def setPosition(*args):",
"def setPosition(*args):",
"def setPosition(*args):",
"def setPosition(*args):",
"def setPosition(*args):",
"def getPosition(*args):",
"def getPosition(*args):",
"def getPosition(*args):",
"def getPosition(*args):",
"def getPosition(*args):",
"def getPosition(*args):",
"def getPosition(*args):",
"def getPosition(*args):",
"def getPosition(*args):",
"def getPosition(*args):",
"def getPosition(*args):",
"def __repr__(self):\n return \"Insertion_position('%s', '%s', full_position='%s', immutable=%s)\"%(self.chromosome, self.strand, \n self.full_position, self.immutable)",
"def insert(self, position, insert):\n assert all(new in self.ALPHABET for new in insert)\n if position < 1 or position - 1 > len(self.sequence):\n raise ValueError(f\"Insertion position {position} out of bonds for given sequence.\")\n self.sequence = f\"{self.sequence[: position - 1]}{insert}{self.sequence[position:]}\"\n if \"mutations\" in self.metadata.keys():\n self.metadata[\"mutations\"] += f\" ins{position}{insert}\"\n else:\n self.metadata[\"mutations\"] = f\"ins{position}{insert}\"",
"def HTSeq_pos_to_tuple(HTSeq_pos):\n try:\n chrom = HTSeq_pos.chrom\n except AttributeError:\n raise MutantError(\"Invalid position %s! Need an HTSeq iv object. (If empty, maybe read wasn't aligned?)\"%(HTSeq_pos,))\n strand = HTSeq_pos.strand\n # HTSeq is 0-based and I want 1-based, thus the +1; end has no +1 because in HTSeq end is the base AFTER the alignment.\n start_pos = HTSeq_pos.start+1\n end_pos = HTSeq_pos.end\n output_pos = (chrom, start_pos, end_pos, strand)\n check_valid_position_tuple(output_pos)\n return output_pos",
"def create(self, pos):\n self.pos = pos",
"def get_position(self, position):",
"def _make_args(self, args, defaults=[], vararg=None, kwonlyargs=[],\n kw_defaults=[], kwarg=None):\n # On Python 2 convert vararg and kwarg to raw name, raise error using\n # lineno stored on the node and lexer from self.\n # On Python 3.3 extract name and annotation\n # After should be straight forward\n raise NotImplementedError()",
"def make_position(data) -> Position:\n return (data[\"x\"], data[\"y\"])"
] |
[
"0.5913934",
"0.5540507",
"0.5540507",
"0.5540507",
"0.5540507",
"0.5540507",
"0.5540507",
"0.5540507",
"0.5540507",
"0.5540507",
"0.5540507",
"0.5540507",
"0.55009013",
"0.55009013",
"0.55009013",
"0.55009013",
"0.55009013",
"0.55009013",
"0.55009013",
"0.55009013",
"0.55009013",
"0.55009013",
"0.55009013",
"0.521999",
"0.5194045",
"0.5162728",
"0.51096994",
"0.5108628",
"0.5108053",
"0.51015997"
] |
0.5741503
|
1
|
Help method to quickly make a dataset based on a string of mutant positions/readcounts. Commaseparated mutants, first word is position, second is readcount. "+100 5, A100 10/10, cassette+400 1" Position is chromosome+strand+minpos, with chromosome optional and more complicated. If raw_chrom_names, just take the chromosome name as given; otherwise prepend 'chromosome_' to what is given, or just use 'chromosome_1' if chromosome string is empty. If readcount is a single number, it's the total read count; if it's two numbers, it's total/perfect.
|
def _make_test_mutant_dataset(positions_and_readcounts_string, raw_chrom_names=False):
dataset = Insertional_mutant_pool_dataset()
if not positions_and_readcounts_string:
return dataset
for N, string in enumerate(positions_and_readcounts_string.split(', ')):
raw_pos, readcount = string.split(' ')
if '/' in readcount: readcount, perfect = [int(x) for x in readcount.split('/')]
else: readcount = perfect = int(readcount)
assert readcount >= perfect, "In mutant string %s, perfect readcount is over total - not allowed!"%string
if '+' in raw_pos: strand = '+'
elif '-' in raw_pos: strand = '-'
else: raise Exception("Short-position %s has no strand!"%raw_pos)
chrom, pos = raw_pos.split(strand)
pos = int(pos)
if not raw_chrom_names:
if chrom: chrom = 'chromosome_%s'%chrom
else: chrom = 'chromosome_1'
elif not chrom:
raise Exception("Short-position %s has no chromosome name - can't use with raw_chrom_names!")
full_pos = Insertion_position(chrom, strand, position_before=pos, immutable=True)
mutant = Insertional_mutant(IB=str(N), insertion_position=full_pos)
mutant.total_read_count = readcount
mutant.perfect_read_count = perfect
dataset.add_mutant(mutant)
return dataset
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def findMutations(trimmed_consensus, trimmed_mutant, counting_direction):\n\tmutations = \"\"\n\tcount = 0 \n\tif counting_direction == \"r\":\n\t\ttrimmed_consensus = invertString(trimmed_consensus)\n\t\ttrimmed_mutant = invertString(trimmed_mutant)\n\tfor i in range(len(trimmed_consensus)):\n\t\tconsensus = trimmed_consensus[i]\n\t\tmutant = trimmed_mutant[i]\n\t\tif mutant != consensus:\n\t\t\tcount += 1\n\t\t\t# Currently will count first base as 'base 1'\n\t\t\tmutations = mutations + str(i+1) + mutant + \":\"\n\tif count > len(trimmed_consensus)/2:\n\t\treturn \"UPPER_LIM\", float('nan')\n\telse:\n\t\t# Trim off the last ':'\n\t\treturn mutations[:-1], count",
"def parse_mutants(self, mutant_names=None, mutation_number=None):\n if not mutant_names and not mutation_number:\n raise ValueError('Please specify either mutant_names or mutation_number')\n elif not mutant_names:\n mutant_names = [f\"Mutant {elt4}\" for elt4 in range(1, 2 ** mutation_number + 1)]\n elif not mutation_number:\n mutation_number = len(mutant_names)\n else:\n assert len(mutant_names) == 2 ** mutation_number, f'There are {len(mutant_names)} names, but was expecting {2 ** mutation_number}'\n return mutant_names, 2 ** mutation_number",
"def test_fastq_read_length():\n cluster = clust.Clustering.from_fastq(TMP + 'simple.fastq', 4, 'ACGT',\n threshold=0, prefix=1, read_length=25)\n uid1_expect = 'AAAACCCC'\n uid2_expect = 'CCCCAAAA'\n seq1_expect = 'ACCTCTCCCTGTGGGTCATGTGACT'\n seq2_expect = 'TTGTTTGAAAAACCTCGAAAGTAAC'\n\n assert uid1_expect in cluster, \"%r not in %r\" % (uid1_expect, list(cluster.keys()))\n assert uid2_expect in cluster, \"%r not in %r\" % (uid2_expect, list(cluster.keys()))\n assert cluster[uid1_expect].sequence.sequence == seq1_expect, \\\n \"%r != %r\" % (cluster[uid1_expect].sequence.sequence, seq1_expect)\n assert cluster[uid2_expect].sequence.sequence == seq2_expect, \\\n \"%r != %r\" % (cluster[uid2_expect].sequence.sequence, seq2_expect)",
"def __init__(self, dataset, cassette_end, relative_read_direction, dataset_name=None):\n # make sure the arguments are valid values\n if not cassette_end in SEQ_ENDS+['?']: \n raise ValueError(\"The cassette_end variable must be one of %s or '?'!\"%SEQ_ENDS)\n if relative_read_direction not in RELATIVE_READ_DIRECTIONS+['?']: \n raise ValueError(\"The relative_read_direction variable must be %s, or '?'!\"%(', '.join(RELATIVE_READ_DIRECTIONS)))\n # reference to the containing dataset (for read-counting purposes etc), \n # and the dataset name (None if it's a single dataset, string for multi-datasets)\n self.dataset_name = dataset_name\n self.dataset = dataset\n # information on reads that aren't included in the dataset mutants - None or 0 by default\n # TODO I should really go over this and figure out what should be None and what should be 0 and why!!\n self.discarded_read_count, self.discarded_wrong_start, self.discarded_no_cassette = None, None, None\n self.discarded_other_end = 0\n self.non_aligned_read_count, self.unaligned, self.multiple_aligned = 0, 0, 0\n self.ignored_region_read_counts = defaultdict(int)\n # MAYBE-TODO should cassette_end and relative_read_direction be specified for the whole dataset, or just for each set of data added, in add_RISCC_alignment_files_to_data? The only real issue with this would be that then I wouldn't be able to print this information in the summary - or I'd have to keep track of what the value was for each alignment reader added and print that in the summary if it's a single value, or 'varied' if it's different values. Might also want to keep track of how many alignment readers were involved, and print THAT in the summary! Or even print each (infile_name, cassette_end, relative_read_direction) tuple as a separate line in the header.\n self.cassette_end = cassette_end\n self.relative_read_direction = relative_read_direction",
"def base_composition(reads, base):\n assert base.upper() in set(\"ACGT\")\n\n \"\"\" Reports nucelotide frequencies at each position in the\n sam sequences\n \"\"\"\n # DNA_Alphabet=[\"A\",\"C\",\"T\",\"G\",\"N\"]\n all_nucs = []\n for read in reads:\n nucs = {} # Dictionary to store nucleotide data.\n seq = read[9]\n for i in range(0, len(seq)):\n nucs[str(i + 1)] = seq[i]\n all_nucs.append(nucs)\n all_items = []\n counts = []\n for dicts in all_nucs:\n for item in dicts.items():\n all_items.append(item)\n all_items.sort(key=operator.itemgetter(0))\n groups = [map(operator.itemgetter(1), list(group))\n for key, group in itertools.groupby(\n all_items, operator.itemgetter(0))]\n for group in groups:\n counts.append(group.count(base))\n\n pos = range(1, len(seq) + 1)\n\n # Create plot.\n plt.figure(1, figsize=(8, 8))\n plt.axes([0.1, 0.1, 0.8, 0.8])\n plt.bar(pos, counts, facecolor='g')\n plt.xlabel(\"Position\")\n plt.ylabel(\"number of mapped reads\")\n plt.title(base)\n plt.show()",
"def parse_sam(rows):\n row1, row2 = rows\n mseqs = {}\n failed_list = []\n insert_list = []\n rname = row1['rname']\n qname = row1['qname']\n cigar1 = row1['cigar']\n cigar2 = row2['cigar']\n\n # filtering criteria\n reason = None\n if cigar1 == '*':\n reason = 'R1 unmapped'\n if int(row1['mapq']) < read_mapping_cutoff:\n reason = 'R1 low mapq'\n\n if cigar2 == '*':\n reason = 'R2 unmapped'\n if int(row2['mapq']) < read_mapping_cutoff:\n reason = 'R2 low mapq'\n\n genotype1, genotype2 = None, None\n try:\n genotype1 = row1['rname'].split('-')[1][0]\n genotype2 = row2['rname'].split('-')[1][0]\n except:\n reason = 'discordant map'\n pass\n\n if genotype1 != genotype2:\n reason = 'map conflict'\n\n if reason:\n failed_list.append({'qname': qname,\n 'rname1': row1['rname'],\n 'rname2': row2['rname'],\n 'reason': reason})\n else:\n pos1 = int(row1['pos'])-1 # convert 1-index to 0-index\n _, seq1, qual1, inserts = apply_cigar(cigar1, row1['seq'], row1['qual'])\n \n # report insertions relative to sample consensus\n for left, (iseq, iqual) in inserts.iteritems():\n insert_list.append({'qname': qname,\n 'fwd_rev': 'F' if is_first_read(row1['flag']) else 'R',\n 'refname': rname,\n 'pos': pos1+left,\n 'insert': iseq,\n 'qual': iqual})\n \n seq1 = '-'*pos1 + seq1 # pad sequence on left\n qual1 = '!'*pos1 + qual1 # assign lowest quality to gap prefix so it does not override mate\n \n \n # now process the mate\n pos2 = int(row2['pos'])-1 # convert 1-index to 0-index\n _, seq2, qual2, inserts = apply_cigar(cigar2, row2['seq'], row2['qual'])\n for left, (iseq, iqual) in inserts.iteritems():\n insert_list.append({'qname': qname,\n 'fwd_rev': 'F' if is_first_read(row2['flag']) else 'R',\n 'refname': rname,\n 'pos': pos2+left,\n 'insert': iseq,\n 'qual': iqual})\n seq2 = '-'*pos2 + seq2\n qual2 = '!'*pos2 + qual2\n \n # merge reads\n for qcut in sam2aln_q_cutoffs:\n mseq = merge_pairs(seq1, seq2, qual1, qual2, qcut)\n prop_N = mseq.count('N') / float(len(mseq.strip('-')))\n if prop_N > max_prop_N:\n # fail read pair\n failed_list.append({'qname': qname,\n 'reason': 'merge failure'})\n continue\n mseqs[qcut] = mseq\n\n return rname, mseqs, insert_list, failed_list",
"def _most_common_mutants_info(self, dataset=None):\n summ = self._get_summary(dataset)\n most_common_mutants = summ.most_common_mutants\n m = most_common_mutants[0]\n # calculate the fraction of total reads per mutant, assuming each mutant has the same readcount\n assert len(set([m.read_info(dataset).total_read_count for m in most_common_mutants])) == 1\n readcount_info = value_and_percentages(m.read_info(dataset).total_read_count, [summ.aligned_read_count])\n if len(most_common_mutants) == 1: return \"%s (%s)\"%(readcount_info, m.position)\n else: return \"%s (%s mutants)\"%(readcount_info, len(most_common_mutants))",
"def test__init(self):\n for chromosome in ['chr1', 'chromosome_2', 'chrom3', 'a', 'adfads', '100', 'scaffold_88']:\n for strand in ['+','-']:\n for position in [1,2,5,100,10000,4323423]:\n ins_pos_5prime = Insertion_position(chromosome,strand,position_before=position)\n ins_pos_3prime = Insertion_position(chromosome,strand,position_after=position)\n # test \"normal\" mutants - check all details, including position\n mutant_5prime = Insertional_mutant(insertion_position=ins_pos_5prime)\n mutant_3prime = Insertional_mutant(insertion_position=ins_pos_3prime)\n mutant_readcount_only = Insertional_mutant_readcount_only()\n mutant_multi_dataset = Insertional_mutant_multi_dataset(insertion_position=ins_pos_5prime)\n # test position details (only for the two \"normal\" mutants)\n assert mutant_5prime.position.min_position == position\n assert mutant_3prime.position.min_position == position-1\n assert mutant_5prime.position.max_position == position+1\n assert mutant_3prime.position.max_position == position\n assert mutant_5prime.position.full_position == \"%s-?\"%(position)\n assert mutant_3prime.position.full_position == \"?-%s\"%position\n # test non-readcount-related info for all mutants except mutant_readcount_only\n for mutant in [mutant_5prime, mutant_3prime, mutant_multi_dataset]:\n assert mutant.position.chromosome == chromosome\n assert mutant.position.strand == strand\n assert mutant.gene == SPECIAL_GENE_CODES.not_determined\n assert mutant.orientation == '?'\n assert mutant.gene_feature == '?'\n assert mutant.gene_distances == '?'\n # test readcount-related info for all mutants except mutant_multi_dataset\n for mutant in [mutant_5prime, mutant_3prime, mutant_readcount_only]:\n assert mutant.total_read_count == 0\n assert mutant.perfect_read_count == 0\n assert mutant.sequences_counts_positions_errors == {}\n # test readcount-related info for mutant_multi_dataset\n assert all([x.total_read_count == 0 for x in mutant_multi_dataset.by_dataset.values()])\n assert all([x.perfect_read_count == 0 for x in mutant_multi_dataset.by_dataset.values()])\n assert all([x.sequences_counts_positions_errors == {} for x in mutant_multi_dataset.by_dataset.values()])",
"def __parse_sample_name(self):\n pattern = '(.*)(P53)(XR|NT)(\\d+)([A-Z]?|Ctr)?.*'\n vals = re.findall(pattern, self.sample_name.replace('_', ''))[0]\n self.cell_type = vals[0]\n self.treatment_type = vals[2]\n self.treatment_time = vals[3]\n if vals[3]:\n self.treatment_repeat = vals[4]",
"def process(\n self,\n name_and_reads: Tuple[str, Iterable[reads_pb2.Read]],\n ) -> Iterable[Tuple[str, List[reads_pb2.Read]]]:\n\n name, reads = name_and_reads[0], list(name_and_reads[1])\n # Note, examples will only be included in one of the initial counters since\n # we are returning early.\n if not reads:\n self.no_reads_counter.inc()\n return\n\n # Do not error for labels that have multiple alignments to correct molecule.\n # One of the alignments may be a supplementary alignment.\n if self.is_label and len(reads) > 1:\n logging.info('Unexpected: %d labels for %s', len(reads),\n reads[0].fragment_name)\n self.multiple_alignments_counter.inc()\n\n reads_copy = copy.deepcopy(reads)\n for read in reads_copy:\n assert read.aligned_sequence\n base_index = 0\n expanded_sequence = ''\n expanded_cigar_str = ''\n new_cigar_ops = []\n if not self.is_label:\n pw = struct_utils.get_int_field(read.info, 'pw')\n ip = struct_utils.get_int_field(read.info, 'ip')\n new_pw = []\n new_ip = []\n\n for op in read.alignment.cigar:\n # Skip over ops we don't want, such as soft clips.\n if op.operation not in dc_constants.OPS_TO_CONSIDER:\n base_index += op.operation_length\n continue\n if op.operation in dc_constants.READ_ADVANCING_OPS:\n start = base_index\n end = start + op.operation_length\n expanded_sequence += read.aligned_sequence[start:end]\n base_index += op.operation_length\n if not self.is_label:\n new_pw += pw[start:end]\n new_ip += ip[start:end]\n else:\n # Add a special token in sequence where we have deletion.\n expanded_sequence += dc_constants.GAP_OR_PAD * op.operation_length\n\n new_cigar_ops.append(op)\n op_char = cigar_utils.CIGAR_OPS_TO_CHAR[op.operation]\n expanded_cigar_str += op_char * op.operation_length\n\n # Update the read sequence.\n read.aligned_sequence = expanded_sequence\n assert len(read.aligned_sequence) == len(expanded_cigar_str)\n\n # Update the read cigar to only include ops that were kept.\n del read.alignment.cigar[:]\n read.alignment.cigar.extend(new_cigar_ops)\n\n # Save pw, ip, and expanded cigar string to be used downstream.\n if not self.is_label:\n struct_utils.set_int_field(read.info, 'pw', new_pw)\n struct_utils.set_int_field(read.info, 'ip', new_ip)\n # PW and IP won't be the same length as read.aligned_sequence here\n # because we haven't yet spaced out PW and IP based on gaps/padding.\n struct_utils.set_string_field(read.info, 'expanded_cigar',\n expanded_cigar_str)\n yield name, reads_copy",
"def parse_sam(in_file, out_file, read_type , strand):\n out_handle = open(out_file , 'a')\n if strand == 'watson':\n nt = ['C']\n else:\n nt = ['G']\n count = 0\n # print 'Warning, only works for forward mapped reads'\n mismatch = 0\n clip_count_total = 0\n for line in open(in_file, 'r'):\n modulo_line_no = count % 2\n #alternates between 0 and 1\n if line.startswith('@'):\n continue\n split_line = line.rstrip('\\n').split('\\t')\n #skip read pairs with improper flags.\n #TODO: do this filtering in mark_PCR_duplicates or elsewhere with access to pysam.\n if split_line[1] not in ['0', '99', '147']:\n mismatch += 1\n count += 1\n # continue\n char_count = ''\n clip_count = 0\n for char in split_line[5]:\n if not char.isalpha():\n char_count += char\n elif char == 'S':\n clip_count += int(char_count)\n else:\n char_count = ''\n if clip_count > 6:\n clip_count_total += 1\n count += 1\n # continue\n header = split_line[0].split('|')\n #meth_post list can be present for both R1 and R2 the last Samtools tag added should be the RN:Z: tag, look\n #to the right of this tag only\n meth_pos_list = split_line[0][split_line[0].rindex(':Z:'):].split('|')[1:]\n out_line = [header[0]]\n out_line += split_line[1:9]\n seq = list(split_line[9])\n try:\n meth_pos = [int(n) for n in meth_pos_list[-modulo_line_no].split(',')]\n for n in meth_pos:\n if n >= len(seq):\n break\n if seq[n] not in ['T','A']:\n break\n seq[n] = nt[-modulo_line_no]\n except ValueError:\n pass\n out_line += [''.join(seq)]\n out_line += split_line[10:]\n for item in header[1:]:\n if ':' in item and item not in out_line:\n out_line.append(item)\n # out_line += header[3:6]\n out_handle.write('\\t'.join(out_line) + '\\n')\n count += 1\n print('%s mismatches out of %s' % (mismatch, count))\n print('%s reads out of %s soft clipped more than 5' % (clip_count_total, count))",
"def full_chromosomes(reader):\n for line in reader.header.get_lines(\"contig\"):\n if line.id in CHROMS:\n name = line.id\n length = line.length or 1_000_000_000\n yield \"{}:{}-{}\".format(name, 1, length)",
"def parse_position(chrom_pos: str):\n chrom, pos = chrom_pos.split('_')\n return chrom, int(pos)",
"def get_singles_counts(sname, seglen, mincounts):\n counts = defaultdict(int)\n with open(sname) as fin:\n infile = csv.DictReader(fin, delimiter='\\t')\n for line in infile:\n ints = int(line['interactions'])\n if ints < mincounts:\n continue\n r1_reg = (\n line['RNA1 chromosome'],int(int(line['Start of RNA1 first read'])/seglen)*seglen,\n line['RNA1 strand'])\n r2_reg = (\n line['RNA2 chromosome'],int(int(line['Start of RNA2 last read'])/seglen)*seglen,\n line['RNA2 strand'])\n counts[r1_reg] += ints\n counts[r2_reg] += ints\n return counts",
"def illumina_data_to_fastq(record_data, number_of_bases=None):\r\n seq_index = 8\r\n qual_index = 9\r\n pass_filter_index = 10\r\n\r\n try:\r\n pass_filter = int(record_data[pass_filter_index])\r\n except IndexError:\r\n pass_filter = 2\r\n\r\n if number_of_bases is None:\r\n seq = record_data[seq_index].replace('.', 'N')\r\n qual = record_data[qual_index]\r\n else:\r\n seq = record_data[seq_index][:number_of_bases].replace('.', 'N')\r\n qual = record_data[qual_index][:number_of_bases]\r\n\r\n header = '%s_%s:%s:%s:%s:%s#%s/%s' % (\r\n record_data[0],\r\n record_data[1],\r\n record_data[2],\r\n record_data[3],\r\n record_data[4],\r\n record_data[5],\r\n record_data[6],\r\n record_data[7])\r\n\r\n return '@%s\\n%s\\n+\\n%s' % (header, seq, qual), pass_filter",
"def find_genes_for_mutants(self, genome_version, genefile, detailed_features=True, include_RISCC_reads=False, \n nearest_genes_for_intergenic=False, N_run_groups=3, verbosity_level=1): \n if self.multi_dataset: raise MutantError(\"find_genes_for_mutants not implemented for multi-datasets!\")\n # MAYBE-TODO implement for multi-datasets? The actual gene-finding would be easy, since it'd just work on \n # multi-dataset mutants instead of single-dataset ones; adding stuff to summary would be harder.\n\n # Group all the mutants by chromosome, so that I can go over each chromosome in genefile separately\n # instead of reading in all the data at once (which uses a lot of memory)\n # Inclue both the main mutants, AND all the RISCC genome-side read sub-mutants if wanted.\n insertion_data_by_chromosome = defaultdict(list)\n for mutant in self:\n if mutant.position not in SPECIAL_POSITIONS.all_undefined:\n insertion_data_by_chromosome[mutant.position.chromosome].append(mutant)\n if include_RISCC_reads:\n for RISCC_read_data in mutant.RISCC_genome_side_aligned_reads.values():\n insertion_data_by_chromosome[RISCC_read_data[0].chromosome].append(RISCC_read_data)\n self._find_genes_for_list(insertion_data_by_chromosome, genome_version, genefile, \n detailed_features, nearest_genes_for_intergenic, N_run_groups, verbosity_level)",
"def test_fastq_map():\n cluster = clust.Clustering.from_fastq(TMP + 'map.fastq', 4, 'ACGT',\n threshold=2, prefix=1)\n uid1_expect = 'AAAACCCC'\n uid2_expect = 'CCCCAAAA'\n seq1_expect = 'ACCTCTCCCTGTGGGTCATGTGACT'\n seq2_expect = 'TTGTTTGAAAAACCTCGAAAGTAAC'\n\n assert uid1_expect in cluster, \"%r not in %r\" % (uid1_expect, list(cluster.keys()))\n assert uid2_expect in cluster, \"%r not in %r\" % (uid2_expect, list(cluster.keys()))\n assert cluster[uid1_expect].sequence.sequence == seq1_expect, \\\n \"%r != %r\" % (cluster[uid1_expect].sequence.sequence, seq1_expect)\n assert cluster[uid2_expect].sequence.sequence == seq2_expect, \\\n \"%r != %r\" % (cluster[uid2_expect].sequence.sequence, seq2_expect)\n assert cluster[uid1_expect].size == 5, \"%r != %r\" % (cluster[uid1_expect].size, 5)\n assert cluster[uid2_expect].size == 5, \"%r != %r\" % (cluster[uid2_expect].size, 5)",
"def get_read_subsets(filein, fileout, region_string, target, bool_array=None):\n\n if bool_array is None:\n bool_array = [True, True, False, False]\n bam = pysam.AlignmentFile(filein, 'rb')\n countM = 0 # fragments that span\n countN = 0 # fragments that don't span\n countL = 0 # fragments that don't span, on left\n countR = 0 # fragments that don't span, on right\n counter = 0 # count total number of reads\n fileM = fileout + \"_M.bam\"\n fileN = fileout + \"_N.bam\"\n fileL = fileout + \"_L.bam\"\n fileR = fileout + \"_R.bam\"\n readsM = pysam.AlignmentFile(fileM, \"wb\", template=bam)\n readsN = pysam.AlignmentFile(fileN, \"wb\", template=bam)\n readsL = pysam.AlignmentFile(fileL, \"wb\", template=bam)\n readsR = pysam.AlignmentFile(fileR, \"wb\", template=bam)\n for read1, read2 in read_pair_generator(bam, region_string):\n read = read_pair_align(read1, read2)\n if not read:\n continue\n counter += 1\n if read[0] < target < read[3]: # fragments that span\n countM += 1\n readsM.write(read1)\n readsM.write(read2)\n elif target + 5 >= read[0] >= target: # fragments that begin 5bp of cleavage site\n countR += 1\n readsR.write(read1)\n readsR.write(read2)\n countN += 1\n readsN.write(read1)\n readsN.write(read2)\n elif target - 5 <= read[-1] <= target: # fragments that end 5bp of cleavage site\n countL += 1\n readsL.write(read1)\n readsL.write(read2)\n countN += 1\n readsN.write(read1)\n readsN.write(read2)\n readsM.close()\n readsN.close()\n readsL.close()\n readsR.close()\n file_array = [fileM, fileN, fileL, fileR]\n for i, boo in enumerate(bool_array):\n file_i = file_array[i]\n if boo:\n pysam.sort(\"-o\", file_i, file_i)\n os.system(\"samtools index \" + file_i)\n else:\n os.system(\"rm \" + file_i)\n print(\"%i span | %i end/start | %i window total | %i mapped total\"\n % (countM, countN, counter, bam.mapped))\n bam.close()",
"def _assign_reads( medians, centroids ):\n log.info(\"Assigning subreads reads to the closet amplicon cluster\")\n assignments = {'5p':set(), '3p':set()}\n five_prime, three_prime = centroids\n for read, median in medians.iteritems():\n five_prime_diff = abs(median - five_prime)\n three_prime_diff = abs(median - three_prime)\n if five_prime_diff < three_prime_diff:\n assignments['5p'].add( read )\n else:\n assignments['3p'].add( read )\n return assignments",
"def process(self,\n read: reads_pb2.Read) -> Iterable[Tuple[str, reads_pb2.Read]]:\n pacbio_molecule_name = preprocess_utils.get_pacbio_molecule_name(\n read.fragment_name)\n if pacbio_molecule_name is not None:\n yield pacbio_molecule_name, read\n else:\n raise ValueError(str(read))",
"def proc_dataset_v1(write=False):\n \n path = load_config()\n M = pd.read_csv(path['metadata_file'])\n T = pd.read_csv(path['rna_file'])\n mCH = pd.read_csv(path['mCH_file'])\n CH = pd.read_csv(path['CH_file'])\n\n def format_df(df):\n \"\"\"The inputs are genes x cells. Transpose data and rename columns\"\"\"\n df = df.transpose()\n df.rename(columns=df.iloc[0], inplace=True)\n df.drop('gene', inplace=True)\n df.index.rename('sample_id', inplace=True)\n return df\n\n T = format_df(T)\n mCH = format_df(mCH)\n CH = format_df(CH)\n\n #Update metadata\n M = pd.read_csv(path['metadata_file'])\n M.rename(columns={'Unnamed: 0': 'sample_id'}, inplace=True)\n M.set_index(keys='sample_id', drop=True, inplace=True)\n\n #Sort cells by metadata\n sorted_index = M.sort_values(by='SubClusterAnno').index\n M = M.loc[sorted_index]\n T = T.loc[sorted_index]\n mCH = mCH.loc[sorted_index]\n CH = CH.loc[sorted_index]\n\n assert np.array_equal(CH.columns, mCH.columns), \"Genes are not in the same order\"\n assert np.array_equal(T.columns, mCH.columns), \"Genes are not in the same order\"\n assert M.index.equals(T.index), \"Cells are not in the same order\"\n assert M.index.equals(CH.index), \"Cells are not in the same order\"\n assert M.index.equals(mCH.index), \"Cells are not in the same order\"\n\n # CPM-normalize counts\n X = T.values.astype(np.float32)\n X = (X/np.sum(X, axis=1, keepdims=True))*1e6\n X = np.log1p(X)\n T = pd.DataFrame(data=X, index=T.index, columns=T.columns)\n print('Completed T processing')\n\n # For methylation data\n X = CH.values.astype(float)\n Y = mCH.values.astype(float)\n Z = np.log1p(Y) - np.log1p(X)\n E = pd.DataFrame(data=Z, index=mCH.index, columns=mCH.columns)\n print('Completed E processing')\n\n # select genes based on variance in log normalized CPM values in T\n def calc_highvar_genes(df):\n vars = np.var(df.values, axis=0)\n order_vars = np.argsort(-vars) # descending order\n sorted_highvar_genes = df.columns.values[order_vars]\n return sorted_highvar_genes\n\n data = {'sorted_highvar_T_genes': calc_highvar_genes(T),\n 'sorted_highvar_E_genes': calc_highvar_genes(E)}\n print('Completed finding high variance features')\n\n if write:\n feather.write_dataframe(T, path['data_dir'] / 'T_dat.feather')\n feather.write_dataframe(E, path['data_dir'] / 'E_dat.feather')\n feather.write_dataframe(M, path['data_dir'] / 'Meta.feather')\n sio.savemat(path['data_dir'] / 'highvar_genes.mat', data)\n return T, E, M, data",
"def process(cline, rline, file1, file2, file3, library_sizes):\n cparts = cline.split(\"\\t\")\n rparts = rline.split(\"\\t\")\n\n # confirm that the two lines being processed are for the same locus\n assert(cparts[0] == rparts[0] and cparts[1] == rparts[1])\n\n # split first column (locus) into three columns containing its\n # consituent parts (chromosome, start base, and end base)\n chr = rparts[0].split(\":\")[0]\n start = rparts[0].split(\":\")[1].split(\"-\")[0]\n end = rparts[0].split(\":\")[1].split(\"-\")[1]\n\n line1 = [chr, start, end] + rparts[1:] + cparts[2:] # counts in reads\n line2 = [chr, start, end] + rparts[1:] + [cparts[2]] # counts in rpm\n line3 = [chr, start, end] + rparts[1:] + [cparts[2]] # counts in rpkm\n\n gene_length = int(rparts[2])\n\n for i in range(3, len(cparts)):\n\n index = i - 3\n lib_size = library_sizes[index][1]\n\n mapped_reads = int(cparts[i])\n\n if lib_size == 0: # Prevent DIVBYZERO error\n rpm = 0\n rpkm = 0\n elif gene_length == 0:\n rpkm = 0\n else:\n rpm = ((mapped_reads * (10 ** 6)) / lib_size)\n rpkm = ((mapped_reads * (10 ** 9)) / (lib_size * gene_length))\n\n line2 += [str(rpm)]\n line3 += [str(rpkm)]\n\n out1 = \"\\t\".join(line1) + \"\\n\"\n out2 = \"\\t\".join(line2) + \"\\n\"\n out3 = \"\\t\".join(line3) + \"\\n\"\n\n file1.write(out1)\n file2.write(out2)\n file3.write(out3)",
"def count_unique_mirbase_reads(bam, counts_file):\n count_ref_hits(bam, counts_file)",
"def variants_in_read(bamrecord, counter, mqmin=20, bqmin=20):\n \n if not bamrecord.cigar:\n return 0, ()\n\n for op, count in bamrecord.cigar:\n if op:\n return 0, ()\n\n if (bamrecord.is_duplicate or bamrecord.is_unmapped or\n bamrecord.is_qcfail or bamrecord.is_secondary or\n bamrecord.mapq < mqmin):\n return 0, ()\n if not bamrecord.is_proper_pair:\n return 0, ()\n \n try:\n md = bamrecord.opt('MD')\n except KeyError:\n return 0, ()\n if md.isdigit(): # read is reference\n for c, q in izip(bamrecord.seq, bamrecord.query_qualities):\n if q >= bqmin:\n counter[c] += 1\n return 1, ()\n\n qref = get_ref(bamrecord.seq, md)\n qlen = len(bamrecord.seq) - 1\n start = bamrecord.pos\n variants = []\n for idx, (r, a, q) in enumerate(izip(qref, bamrecord.seq,\n bamrecord.query_qualities)):\n if q >= bqmin:\n counter[r] += 1\n if r != a:\n if idx > 0:\n prev = qref[idx - 1]\n else:\n prev = 'N'\n if idx < qlen:\n next = qref[idx + 1]\n else:\n next = 'N'\n variants.append((start + idx, r, a, prev, next))\n\n return 1, variants",
"def parse_reads_and_select_candidates(self, reads):\n st_time = time.time()\n # read_id_list = []\n total_reads = 0\n read_unique_id = 0\n for read in reads:\n # check if the read is usable\n if read.mapping_quality >= DEFAULT_MIN_MAP_QUALITY and read.is_secondary is False \\\n and read.is_supplementary is False and read.is_unmapped is False and read.is_qcfail is False:\n\n read.query_name = read.query_name + '_' + str(read_unique_id)\n if self.find_read_candidates(read=read):\n # read_id_list.append(read.query_name)\n total_reads += 1\n read_unique_id += 1\n\n if total_reads == 0:\n return []\n\n selected_allele_list = []\n postprocess_read_id_list = set()\n for pos in self.positional_allele_dictionary:\n if pos < self.region_start_position or pos > self.region_end_position:\n continue\n ref = self.reference_dictionary[pos]\n\n all_allele_dictionary = self.positional_allele_dictionary[pos]\n all_mismatch_count = 0\n for allele in all_allele_dictionary:\n all_mismatch_count += all_allele_dictionary[allele]\n\n # pick the top 2 most frequent allele\n allele_frequency_list = list(sorted(all_allele_dictionary.items(), key=operator.itemgetter(1, 0),\n reverse=True))[:PLOIDY]\n allele_list = self._filter_alleles(pos, allele_frequency_list)\n alt1 = allele_list[0] if len(allele_list) >= 1 else None\n alt2 = allele_list[1] if len(allele_list) >= 2 else '.'\n if alt1 is None:\n continue\n mq_rms = round(math.sqrt(self.rms_mq[pos]/self.coverage[pos]), 3) if self.coverage[pos] > 0 else 0\n dp = self.coverage[pos]\n ref_count = self.coverage[pos] - all_mismatch_count\n candidate_record = [self.chromosome_name] + self._get_record(pos, alt1, alt2, ref, ref_count) + [mq_rms] + [dp]\n postprocess_read_id_list.update(self.read_id_by_position[pos])\n selected_allele_list.append(candidate_record)\n\n postprocess_read_id_list = list(postprocess_read_id_list)\n if len(selected_allele_list) > 0:\n self.postprocess_reference()\n self.postprocess_reads(postprocess_read_id_list)\n\n return selected_allele_list",
"def genotype_measure(path_dataset, snp_ids=None, gene_names=None,\n subject_ids=None, count_A1=True, path_log=None,\n timeout=10, nb_tries=3, metagen_url=DEFAULT_METAGEN_URL):\n if gene_names is not None:\n metagen_snps_of_gene = metagen_get_snps_of_genes(\n gene_names=gene_names,\n metagen_url=metagen_url,\n timeout=timeout,\n nb_tries=nb_tries)\n metagen_snp_ids = [snp.rs_id for snps in metagen_snps_of_gene.values()\n for snp in snps]\n if len(metagen_snp_ids) == 0:\n raise ValueError(\"Metagen returned 0 snp for the requested genes.\")\n else:\n snp_ids = (snp_ids or []) + metagen_snp_ids\n else:\n metagen_snps_of_gene = None\n\n # Load the genotypes\n dataframe = load_plink_bed_bim_fam_dataset(path_dataset=path_dataset,\n snp_ids=snp_ids,\n subject_ids=subject_ids,\n count_A1=count_A1)\n return dataframe, metagen_snps_of_gene",
"def test_fastq_simple():\n cluster = clust.Clustering.from_fastq(TMP + 'simple.fastq', 4, 'ACGT',\n threshold=0, prefix=1)\n uid1_expect = 'AAAACCCC'\n uid2_expect = 'CCCCAAAA'\n seq1_expect = 'ACCTCTCCCTGTGGGTCATGTGACT'\n seq2_expect = 'TTGTTTGAAAAACCTCGAAAGTAAC'\n\n assert uid1_expect in cluster, \"%r not in %r\" % (uid1_expect, list(cluster.keys()))\n assert uid2_expect in cluster, \"%r not in %r\" % (uid2_expect, list(cluster.keys()))\n assert cluster[uid1_expect].sequence.sequence == seq1_expect, \\\n \"%r != %r\" % (cluster[uid1_expect].sequence.sequence, seq1_expect)\n assert cluster[uid2_expect].sequence.sequence == seq2_expect, \\\n \"%r != %r\" % (cluster[uid2_expect].sequence.sequence, seq2_expect)",
"def multiple_chromosome_set_generator(random, args):\n keys = args.get('keys')\n candidate = MultipleChromosomeGenome(keys=keys)\n for key in keys:\n key_args = {\n 'representation': args.get(\"%s_representation\" % key),\n 'max_size': args.get(\"%s_max_size\" % key),\n 'variable_size': args.get('variable_size')\n }\n candidate[key] = set_generator(random, key_args)\n\n return candidate",
"def proc_dataset_v2(write=False):\n \n path = load_config()\n M = pd.read_csv(path['metadata_file'])\n T = pd.read_csv(path['rna_file'])\n mCH = pd.read_csv(path['mCH_file'])\n CH = pd.read_csv(path['CH_file'])\n\n def format_df(df):\n \"\"\"The inputs are genes x cells. Transpose data and rename columns\"\"\"\n df = df.transpose()\n df.rename(columns=df.iloc[0], inplace=True)\n df.drop('gene', inplace=True)\n df.index.rename('sample_id', inplace=True)\n return df\n\n T = format_df(T)\n mCH = format_df(mCH)\n CH = format_df(CH)\n\n #Update metadata\n M = pd.read_csv(path['metadata_file'])\n M.rename(columns={'Unnamed: 0': 'sample_id'}, inplace=True)\n M.set_index(keys='sample_id', drop=True, inplace=True)\n\n #Sort cells by metadata\n sorted_index = M.sort_values(by='SubClusterAnno').index\n M = M.loc[sorted_index]\n T = T.loc[sorted_index]\n mCH = mCH.loc[sorted_index]\n CH = CH.loc[sorted_index]\n\n assert np.array_equal(CH.columns, mCH.columns), \"Genes are not in the same order\"\n assert np.array_equal(T.columns, mCH.columns), \"Genes are not in the same order\"\n assert M.index.equals(T.index), \"Cells are not in the same order\"\n assert M.index.equals(CH.index), \"Cells are not in the same order\"\n assert M.index.equals(mCH.index), \"Cells are not in the same order\"\n\n # CPM-normalize counts\n X = T.values.astype(np.float32)\n X = (X/np.sum(X, axis=1, keepdims=True))*1e6\n X = np.log1p(X)\n T = pd.DataFrame(data=X, index=T.index, columns=T.columns)\n print('Completed T processing')\n\n # For methylation data\n X = CH.values.astype(float)\n Y = mCH.values.astype(float)\n Z = np.divide(Y,X+1e-10)\n E = pd.DataFrame(data=Z, index=mCH.index, columns=mCH.columns)\n print('Completed E processing')\n\n # select genes based on variance in log normalized CPM values in T\n def calc_highvar_genes(df):\n vars = np.var(df.values, axis=0)\n order_vars = np.argsort(-vars) # descending order\n sorted_highvar_genes = df.columns.values[order_vars]\n return sorted_highvar_genes\n\n data = {'sorted_highvar_T_genes': calc_highvar_genes(T),\n 'sorted_highvar_E_genes': calc_highvar_genes(E)}\n print('Completed finding high variance features')\n\n if write:\n feather.write_dataframe(T, path['data_dir'] / 'T_dat_v2.feather')\n feather.write_dataframe(E, path['data_dir'] / 'E_dat_v2.feather')\n feather.write_dataframe(M, path['data_dir'] / 'Meta_v2.feather')\n sio.savemat(path['data_dir'] / 'highvar_genes_v2.mat', data)\n return T, E, M, data",
"def populate(infile, stype, Nmax, Sratio):\n seqnum = 0\n in_hdl = open(infile, 'rU')\n for i, rec in enumerate(seq_iter(in_hdl, stype)):\n head, seq, qual = split_rec(rec, stype)\n if Sratio < random.random():\n continue\n seqnum += 1\n for i in range(0, min(len(seq), Nmax)):\n if seq[i] == \"A\":\n a[i] += 1\n elif seq[i] == \"C\":\n c[i] += 1\n elif seq[i] == \"G\":\n g[i] += 1\n elif seq[i] == \"T\":\n t[i] += 1\n elif seq[i] == \"N\":\n n[i] += 1\n in_hdl.close()\n return seqnum"
] |
[
"0.49266934",
"0.48673043",
"0.48296267",
"0.4801651",
"0.47721255",
"0.47520983",
"0.4717786",
"0.46513838",
"0.4617326",
"0.45733854",
"0.4565814",
"0.4553901",
"0.45357215",
"0.4524307",
"0.45105764",
"0.44691744",
"0.44661796",
"0.44487408",
"0.44387537",
"0.44363236",
"0.4435886",
"0.44354016",
"0.44209352",
"0.44094077",
"0.44019946",
"0.43979898",
"0.43889323",
"0.4377381",
"0.43408558",
"0.43364182"
] |
0.7758807
|
0
|
What to make sure that any changes I make still allow the datasets to be pickled/unpickled correctly, since some stuff like lambdas and __slots__ etc interferes with that.
|
def test__pickle_unpickle(self):
pass
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def test_pickle():\r\n M = Module()\r\n M.x = (T.dmatrix())\r\n M.y = (T.dmatrix())\r\n a = T.dmatrix()\r\n M.f = Method([a], a + M.x + M.y)\r\n M.g = Method([a], a * M.x * M.y)\r\n\r\n mode = get_mode()\r\n m = M.make(x=numpy.zeros((4,5)), y=numpy.ones((2,3)), mode=mode)\r\n\r\n m_dup = cPickle.loads(cPickle.dumps(m, protocol=-1))\r\n\r\n assert numpy.all(m.x == m_dup.x) and numpy.all(m.y == m_dup.y)\r\n\r\n m_dup.x[0,0] = 3.142\r\n assert m_dup.f.input_storage[1].data[0,0] == 3.142\r\n assert m.x[0,0] == 0.0 #ensure that m is not aliased to m_dup\r\n\r\n #check that the unpickled version has the same argument/property aliasing\r\n assert m_dup.x is m_dup.f.input_storage[1].data\r\n assert m_dup.y is m_dup.f.input_storage[2].data\r\n assert m_dup.x is m_dup.g.input_storage[1].data\r\n assert m_dup.y is m_dup.g.input_storage[2].data",
"def detect_serialized_datasets(self):\n prepared_data_dir = str(utils.prepared_data_folder / self.dir_str / self.period)\n os.makedirs(prepared_data_dir, exist_ok=True)\n self.prepared_data_dir = prepared_data_dir\n print(f'Looking for pickles in {self.prepared_data_dir}')\n\n if len(utils.find('*serialized.pkl', self.prepared_data_dir)) == 2:\n print('This domain-period combination has been serialized before, loading objects...')\n for pkl in utils.find('*.pkl', self.prepared_data_dir):\n if \"input_ds\" in pkl: self.input_ds_serialized_path = pkl\n elif \"rf_ds\" in pkl: self.rf_ds_serialized_path = pkl\n else: \n print('Proceeding to load & serialize raw data. ')\n self.raw_input_dir = prepare.get_raw_input_data(self)\n self.raw_rf_dir = prepare.get_raw_target_data(self)\n print(f'Raw input datasets taken from @: \\n{self.raw_input_dir}')\n print(f'Raw rainfall datasets taken from @: \\n{self.raw_rf_dir}')\n self.input_ds_serialized_path, self.rf_ds_serialized_path = prepare.prepare_dataset(self, self.prepared_data_dir)\n print(f'Serialized raw input datasets @: \\n{self.input_ds_serialized_path}')\n print(f'Serialized raw RF datasets @: \\n{self.rf_ds_serialized_path}')",
"def test_pickle(self):\n X,Y,Z = self.generate_data(nrows=200)\n task = mmSCHPOLY()\n task.fit(X,Y,Z)\n with tempfile.TemporaryFile(mode='w+b') as tf:\n cPickle.dump(task, tf)\n\n X,Y,Z = self.generate_data(nrows=200)\n task = mmSCH2W()\n task.fit(X,Y,Z)\n with tempfile.TemporaryFile(mode='w+b') as tf:\n cPickle.dump(task, tf)",
"def configure():\n Vector.__getstate__ = partialmethod(pickling.getstate, add_dict=False)\n Vector.__setstate__ = partialmethod(pickling.setstate, add_dict=False)",
"def test_pickle(self):\n X = self.generate_X()\n task = mmRDTR()\n task.fit(X)\n with tempfile.TemporaryFile(mode='w+b') as tf:\n cPickle.dump(task, tf)",
"def __getstate__(self):\n # construct a list of unpicklable entties and exclude them from pickling\n nope = ['_divisionClassifier', '_assembledObjects']\n d = dict((key, val) for key, val in self.__dict__.items() if key not in nope) # deepcopy needed\n return d",
"def __reduce_ex__(self, protocol):\n return (_safe_pickle_load, (self.__module__, self.__class__.__name__, self.name))",
"def test_pickle_save(self):\n l = [1, 2, 3, 4, 5]\n self.plugin.save_data(l)",
"def _deserialiseLightweight(self,unpickled):\n if self.sampleid != unpickled['sampleId']:\n raise RuntimeError('sampleids do not match: '+self.sampleid+' '+unpickled['sampleId'])\n if self.condition != unpickled['condition']:\n raise RuntimeError('conditions do not match: '+self.condition+' '+unpickled['condition'])\n if self.wellids != unpickled['wellIds']:\n raise RuntimeError('wellids do not match: '+self.wellids+' '+unpickled['wellIds'])\n if self._wellIndices != unpickled['wellIndices']:\n raise RuntimeError('wellIndices do not match: '+self._wellIndices+' '+unpickled['wellIndices'])\n self._activeWellIndices=unpickled['activeWellIndices']",
"def test_pickling_tensors(free_alg):\n\n dr = free_alg\n p = dr.names\n x = IndexedBase('x')\n v = Vec('v')\n b = Vec('b')\n\n tensor = dr.einst(x[p.i] * v[p.i])\n def_ = dr.define(b, tensor)\n serialized = pickle.dumps([tensor, def_])\n\n with pytest.raises(ValueError):\n pickle.loads(serialized)\n\n with dr.pickle_env():\n res = pickle.loads(serialized)\n\n assert res[0] == tensor\n assert res[1] == def_",
"def __reduce_ex__(self, protocol):\n if not hasattr(self, '__setstate__'):\n raise ValueError(\"If __reduce_ex__ is used from modelBaseUnpickler, \"\n \"must also use __setstate__ from modelBaseUnpickler!\")\n\n # Constructor kwargs\n c = {}\n for k in self.PICKLE_INIT:\n c[k] = getattr(self, k, None)\n\n # Runtime information\n d = {}\n for p in self.PICKLE_STATE:\n d[p] = getattr(self, p, None)\n if hasattr(self, '__dict__'):\n for k, v in self.__dict__.items():\n if k in self.PICKLE_STATE or k in self.PICKLE_INIT:\n continue\n d[k] = v\n\n # Convert memoryviews to arrays, which can be pickled\n for k, v in d.items():\n d[k] = _fixMemoryview(v)\n\n return (_unpickle, (self.__class__, c), d)",
"def test_pickle_load(self):\n l = [1, 2, 3, 4, 5]\n self.plugin.save_data(l)\n\n l = self.plugin.load_data()\n self.assertIn(4, l)",
"def test_pickle_serialization(self, molecule):\n serialized = pickle.dumps(molecule)\n molecule_copy = pickle.loads(serialized)\n assert molecule == molecule_copy\n assert molecule_copy.n_conformers == molecule.n_conformers\n assert np.allclose(molecule_copy.conformers[0], molecule.conformers[0])",
"def load_synthetic_data():\n\n pickle_object = FM().data_file \n\n with pickle_object.open('rb') as data_file: \n return pickle.load(data_file)",
"def freeze(self):\r\n\r\n # this code is probably rather ickier than it needs to be!\r\n for i in range(len(self.data)):\r\n e = self.data[i]\r\n if not isinstance(e.code, str):\r\n self.data[i] = type(e)((label(e.code),) + e[1:])\r\n if e.calls:\r\n for j in range(len(e.calls)):\r\n se = e.calls[j]\r\n if not isinstance(se.code, str):\r\n e.calls[j] = type(se)((label(se.code),) + se[1:])",
"def test_pickle_serialization(self, molecule):\n serialized = pickle.dumps(molecule)\n molecule_copy = pickle.loads(serialized)\n assert molecule == molecule_copy",
"def __reduce__(self) -> NoReturn:\n raise TypeError(\n \"can't pickle {} objects\".format(self.__class__.__name__)\n )",
"def pickle_objects(feat_extrs, feat_lex, dataset_splits, goldstandard_splits, a_file):\n start = time.time()\n print 'Pickling feature extraction functions, feature lexicon, dataset_splits batch examples and goldstandard_splits labels...'\n assert type(feat_extrs) == list, 'Expected a list of callables as the 1st object to be pickled'\n for _ in feat_extrs:\n assert callable(_) is True, 'Element {} of 1st object is not callable'.format(_)\n assert isinstance(feat_lex, FeatureLexicon), \"Expected an instance of FeatureLexicon as the 2nd object to be pickled. Got '{}' instead\".format(type(feat_lex))\n assert type(dataset_splits) == dict, 'Expected a dict as the 3rd object to be pickled'\n for _ in dataset_splits:\n assert _ in ['train', 'test', 'valid'], \"The dict expected as the 3rd object to be pickled, has key '{}' not in ['train', 'test', 'valid']\".format(_)\n assert type(goldstandard_splits) == dict, 'Expected a dict as the 4th object to be pickled'\n for _ in goldstandard_splits:\n assert _ in ['train', 'test', 'valid'], \"The dict expected as the 4th object to be pickled, has key '{}' not in ['train', 'test', 'valid']\".format(_)\n with open(a_file, 'wb') as pkl_file:\n pickle.dump(feat_extrs, pkl_file, protocol=pickle.HIGHEST_PROTOCOL)\n pickle.dump(feat_lex, pkl_file, protocol=pickle.HIGHEST_PROTOCOL)\n pickle.dump(dataset_splits, pkl_file, protocol=pickle.HIGHEST_PROTOCOL)\n pickle.dump(goldstandard_splits, pkl_file, protocol=pickle.HIGHEST_PROTOCOL)\n print ' Done in {:.2f} sec'.format(time.time() - start)",
"def sync(self):\n if not self.is_readonly():\n deser = self._deserialize()\n orig = getattr(self.model, self.name)\n if (orig != deser):\n if isinstance(orig, list):\n # first remove the original triples, instead of doing sophisticated\n # set manipulations\n setattr(self.model, self.name, [])\n setattr(self.model, self.name, deser)",
"def _numpy_ufunc_pickle_support():\n # Remove this when numpy.ufuncs themselves support pickling.\n # Code from Robert Kern; see:\n #http://news.gmane.org/find-root.php?group=gmane.comp.python.numeric.general&article=13400\n from numpy import ufunc\n import copy_reg\n\n def ufunc_pickler(ufunc):\n \"\"\"Return the ufunc's name\"\"\"\n return ufunc.__name__\n\n copy_reg.pickle(ufunc,ufunc_pickler)",
"def test_serialization_no_conformers(self):\n mol = Molecule.from_smiles(\"CCO\")\n\n dict_copy = Molecule.from_dict(mol.to_dict())\n assert mol == dict_copy\n\n # TODO: yaml_copy = Molecule.from_yaml(mol.to_yaml())\n with pytest.raises(NotImplementedError):\n mol.to_toml()\n\n bson_copy = Molecule.from_bson(mol.to_bson())\n assert mol == bson_copy\n\n json_copy = Molecule.from_json(mol.to_json())\n assert mol == json_copy\n\n # TODO: round-trip when from_xml is implemented\n mol_as_xml = mol.to_xml()\n with pytest.raises(NotImplementedError):\n Molecule.from_xml(mol_as_xml)\n\n messagepack_copy = Molecule.from_messagepack(mol.to_messagepack())\n assert mol == messagepack_copy\n\n pickle_copy = pickle.loads(pickle.dumps(mol))\n assert mol == pickle_copy",
"def test_serialization_no_conformers(self):\n mol = Molecule.from_smiles(\"CCO\")\n\n dict_copy = Molecule.from_dict(mol.to_dict())\n assert mol == dict_copy\n\n # TODO: yaml_copy = Molecule.from_yaml(mol.to_yaml())\n with pytest.raises(NotImplementedError):\n mol.to_toml()\n\n bson_copy = Molecule.from_bson(mol.to_bson())\n assert mol == bson_copy\n\n json_copy = Molecule.from_json(mol.to_json())\n assert mol == json_copy\n\n # TODO: round-trip when from_xml is implemented\n mol_as_xml = mol.to_xml()\n with pytest.raises(NotImplementedError):\n Molecule.from_xml(mol_as_xml)\n\n messagepack_copy = Molecule.from_messagepack(mol.to_messagepack())\n assert mol == messagepack_copy\n\n pickle_copy = pickle.loads(pickle.dumps(mol))\n assert mol == pickle_copy",
"def test_pickle_limit_continue(self):\n l = []\n for i in range(0, 30):\n l.append(i)\n self.plugin.save_data(l, 10)\n l = self.plugin.load_data()\n self.assertEqual(20, l[0])",
"def __post_init__(self) -> None:\n setattr(self, _FROZEN, True)",
"def test_instmap_picklable(self):\n instmap = FakeAthens().defaults().instruction_schedule_map\n\n ser_obj = pickle.dumps(instmap)\n deser_instmap = pickle.loads(ser_obj)\n\n self.assertEqual(instmap, deser_instmap)",
"def loadData(self):\n infile = open(PublicTransit.PICKLE_SAVE_FILE, 'rb')\n self.nodesDict = cPickle.load(infile)\n self.linksDict = cPickle.load(infile)\n self.stopsByRoute = cPickle.load(infile)\n self.stopsByNode = cPickle.load(infile)\n self.routeXref = cPickle.load(infile)\n self.transitRoutes = cPickle.load(infile)\n infile.close()\n self.spIndex = index.Index(PublicTransit.SPATIAL_INDEX_FILE)\n #last step is to reconcile all of the nodes into single objects\n #use routePattern dictionary as the master\n self.stopsDict = {}\n for routePattern in self.stopsByRoute:\n for stop in self.stopsByRoute[routePattern]:\n if stop.stopPointId in self.stopsDict:\n self.stopsDict[stop.stopPointId].append(stop)\n else:\n self.stopsDict[stop.stopPointId] = [stop]\n if stop.tanaNode in self.stopsByNode:\n for i in range(len(self.stopsByNode[stop.tanaNode])):\n nodeStop = self.stopsByNode[stop.tanaNode][i]\n if nodeStop.basicEqual(stop):\n self.stopsByNode[stop.tanaNode][i] = stop",
"def load_data(self):\n raise NotImplementedError()",
"def __getstate__(self):\n if self.tag is not None:\n raise pickle.PicklingError()\n state = self.__dict__.copy()\n del state['_SharedNumpyArray__shared']\n return state",
"def __setstate__(self, state):\n\n if 'version' not in state or state['version'] != CURRENT_DATASET_VERSION:\n raise RuntimeError('Pickled dataset has older file format, please regenerate!')\n\n self.token_embeddings = state['token_embeddings']\n self.fee_embeddings = state['fee_embeddings']\n self.all_fee_spans = state['all_fee_spans']\n self.fee_indices = state['fee_indices']\n self.labels = state['labels']\n self.lemma_pos = state['lemma_pos'] \n self.all_dependency_labels = state['dependency_labels']\n self.all_dependency_heads = state['dependency_heads']\n self.n = state['n']\n self.dim = state['dim']\n self.loaded = state['loaded']\n self.statistics = state['statistics']\n self.version = state['version']",
"def _deserialize_data(self):\n try:\n self._func_name, self._instance, self._args, self._kwargs = self.serializer.loads(self.data)\n except Exception as e:\n raise DeserializationError() from e"
] |
[
"0.62170494",
"0.61310524",
"0.59855705",
"0.5935278",
"0.5901065",
"0.58910406",
"0.58880556",
"0.5863765",
"0.5853036",
"0.5848496",
"0.5726836",
"0.572173",
"0.5718709",
"0.56999195",
"0.5698243",
"0.56816524",
"0.56652796",
"0.56592995",
"0.56532854",
"0.5632119",
"0.56064785",
"0.56064785",
"0.5604769",
"0.55819297",
"0.5574903",
"0.555397",
"0.5552391",
"0.5542785",
"0.554088",
"0.5498515"
] |
0.6965079
|
0
|
Ask user what style of drink they like
|
def drink_style_input():
# Define answered style questions dictionary
answers_style = {}
# Loop through style questions
for style, question in questions.iteritems():
# Ask whether they like a drink style and set to lower case
# Test if answer is yes, then add drink style and boolean to dictionary
answers_style[style] = raw_input(questions[style] + " Please answer (y)es or (n)o.").lower() in ["y","yes"]
return answers_style
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def main():\n answers_style = drink_style_input()\n drink = drink_make(answers_style)\n print \"\"\n print \"Your drink includes:\"\n for ingredient in drink:\n print \"A {}\".format(ingredient)",
"def drink_make(drink_styles): \n # Define empty list for drink\n drink_ingredients = [] \n # Loop through styles and add random ingredient to drink list\n for style, selected in drink_styles.iteritems():\n # Test whether style selected by user\n if selected == True:\n drink_ingredients.append(random.choice(ingredients[style]))\n # Return drink\n return drink_ingredients",
"def pirate ():\n \n print \"I'm a {}, I will ask you what you like\".format(pirate.__name__)\n \n for taste in questions:\n print questions[taste]\n x =raw_input().lower()\n if x == \"y\" or x == \"yes\":\n Prefer.append(taste)\n print Prefer",
"def match_style(self, input_style: str) -> str:\r\n try: # Try to get from the dictionary\r\n return self.get_style_from_styles(input_style)\r\n except KeyError: # If you get a key error, it is not in the dictionary\r\n new_style = input(input_style + '\\nWhat style is this?') # Ask the user what style it is\r\n self.add_style_to_styles(input_style, new_style) # Add this style to the dictionary\r\n return new_style # Return the more readable style\r",
"def user_choice():\n\n OPTIONS = \"\"\"\n a) See ratings\n b) Add rating\n c) Quit\n \"\"\"\n\n while True:\n print OPTIONS\n user_choice = raw_input(\"What would you like to do?: \")\n\n if user_choice == \"a\":\n restaurant_rating(filename)\n elif user_choice == \"b\":\n user_input()\n elif user_choice == \"c\":\n print \"Good-bye!\"\n break\n else:\n print \"Invalid input\"",
"def choose_display():\n\n choices = [\"d\",\"theta\",\"both\"]\n\n temp_choice = \"false\"\n\n while temp_choice not in choices:\n temp_choice = input(\"Please choose the scale to display.\\nd, theta, both\\n\")\n if temp_choice not in choices:\n print(\"incorrect choice\\n\")\n\n return temp_choice",
"def user_question():\n return input('What would you like? (espresso/latte/cappuccino): ')",
"def menu():\r\n cont = False\r\n while cont == False:\r\n choice = input(\"Enter a letter to choose an option:\\n\" +\r\n \"e - Enter preferences\\nr - Get recommendations\\n\" +\r\n \"p - Show most popular artists\\nh - How popular is the most popular\\n\" +\r\n \"m - Which user has the most likes\\nq - Save and quit\\n\")\r\n if isinstance(choice, str):\r\n cont = True\r\n else:\r\n print(\"please enter one of the choices above\")\r\n return choice",
"def customer_wants_condiments(self):\n answer = raw_input(\"Would you like Lemon? (y/n)\").lower()\n if answer.startswith('y'):\n return True\n else:\n return False",
"def make_drink ():\n \n customer_pref = customer_order.drink_order()\n drink = []\n \n for pref in customer_pref:\n if customer_pref[pref] == True:\n drink.append(random.choice(ingredients[pref]))\n \n return drink",
"def getPetalColor():\n return input(\"What color do you want the petals to be?\")",
"def attack_choice(self, user_choice):\n\n if 'A' in user_choice:\n return 'What is the name of your item?'\n\n elif 'B' in user_choice:\n # IDEA: Should there be limit on flee?\n if randint(1, 4) == 3:\n return False\n else:\n return \"Well looks like your escape attempt failed.\"\n else:\n return \"Please choose either 'A' or 'B'\"",
"def pay(drink):\n qtr_pay = int(input(\"How many quarters? \"))\n dime_pay = int(input(\"How many dimes? \"))\n nickel_pay = int(input(\" How many nickels? \"))\n penny_pay = int(input(\"How many pennies? \"))\n payment_given = (qtr_pay * qtr) + (dime_pay * dime) + (nickel_pay * nickel) + (penny_pay * penny)\n print(payment_given)\n if payment_given >= drink['cost']:\n balance = payment_given - drink['cost']\n print(f\"Here is your USD{balance} back. \")\n print(f\"Here is your {drink_choice} == Enjoy\")\n make_drink(drink)\n elif payment_given < drink['cost']:\n print(\"Sorry that's not enough\")",
"def choose_slime():\n\n did_choose = False\n slime = None\n \n while did_choose == False:\n slime_type = input(\"Which one would you like? \")\n if slime_type.lower() == \"cheap slime\":\n print(\"Great choice!\")\n print(\"* Cheap slime is given *\")\n print(\"Have fun!\")\n slime = CheapSlime()\n did_choose = True\n else:\n print(\"We don't have this kind of slime, please choose another one\")\n print(\"* Leaving store *\")\n\n to_try = input(\"Would you like to try your slime? \")\n if to_try.lower() == \"yes\" or to_try.lower() == \"y\":\n print(\"Great!\")\n elif to_try.lower() == \"no\" or to_try.lower() == \"n\":\n print(\"That wasn't a question, you *will* play with your slime and you *will* enjoy.\")\n print(\"Anyways\", end=\", \")\n else:\n print(\"I didn't understand your answer so I'll take it as a yes.\")\n use_slime(slime)",
"def handle_case():\n jackets, pants, shirts, k = [int(x) for x in input().split(\" \")]\n result = fashion_small(jackets, pants, shirts, k)\n\n return result",
"def weight_input():\r\n\tif bool(eval(input('Do you want to use different weights? (y/n) '))):\r\n\t\treturn float(input('payload weight (lbm): ')), \\\r\n\t\t\t\tfloat(input('avionics bay weight (lbm): ')), \\\r\n\t\t\t\tfloat(input('booster weight (lbm): '))\r\n\r\n\telse:\r\n\t\treturn 9.489, 4.083, 11.483 #2016-17 PDR Weights\r",
"def main():\n\n tea_bag = Flavour('Tea')\n hot_water = Water('Hot Water')\n semi_skimmed = Milk('Semi-Skimmed Milk')\n no_sugar = Sugar('No Sugar')\n\n print make_drink(tea_bag, hot_water, semi_skimmed, no_sugar)\n\n sour_milk = Milk.BAD_MILK\n print make_drink(tea_bag, hot_water, sour_milk, no_sugar)\n\n salt = Sugar.INVALID_SUGAR\n print make_drink(tea_bag, hot_water, semi_skimmed, salt)",
"def prompt_style():\r\n font_numbers = {'0', '1', '2', '3', '4', '5', '6'}\r\n print(\"Background Color\")\r\n background_color = str.lower(input(\"Choose the name of a color, or in format '#XXXXXX':\\t\"))\r\n if len(background_color) != 7 or background_color[0] != '#':\r\n while background_color not in COLORS:\r\n print(\"Illegal format\")\r\n background_color = str.lower(input(\"Choose the color name or #XXXXXX\\t\"))\r\n if len(background_color) == 7 and background_color[0] == '#':\r\n break\r\n see_font = str.lower(input(\"Do you want to see what the fonts look like? [yes]\\t\"))\r\n if see_font == \"yes\" or see_font == \"\":\r\n print(\"Close the window when you have made your choice\")\r\n turtle_fonts()\r\n print(\"Choose a font by its number\",\r\n \"0: Arial, size 14\",\r\n \"1: Comic Sans MS, size 14\",\r\n \"2: Lucida Grande, size 14\",\r\n \"3: Tahoma, size 14\",\r\n \"4: Verdana, size 14\",\r\n \"5: Helvetica, size 14\",\r\n \"6: Times New Roman, size 14\", sep='\\n')\r\n font = input(\">> \")\r\n while font not in font_numbers:\r\n font = input(\"Invalid font number, enter from 0 - 6\\t\")\r\n if font == \"0\":\r\n font = \"Arial\"\r\n elif font == \"1\":\r\n font = \"Comic Sans MS\"\r\n elif font == \"2\":\r\n font = \"Lucida Grande\"\r\n elif font == \"3\":\r\n font = \"Tahoma\"\r\n elif font == \"4\":\r\n font = \"Verdana\"\r\n elif font == \"5\":\r\n font = \"Helvetica\"\r\n elif font == \"6\":\r\n font = \"Times New Roman\"\r\n print(\"Paragraph Text Color\")\r\n paragraph_color = str.lower(input(\"Choose the name of a color, or in format '#XXXXXX':\\t\"))\r\n if len(paragraph_color) != 7 or paragraph_color[0] != '#':\r\n while paragraph_color not in COLORS:\r\n print(\"Illegal format\")\r\n paragraph_color = str.lower(input(\"Choose the color name or #XXXXXX\\t\"))\r\n if len(paragraph_color) == 7 and paragraph_color[0] == '#':\r\n break\r\n print(\"Heading Color\")\r\n head_color = str.lower(input(\"Choose the name of a color, or in format '#XXXXXX':\\t\"))\r\n if len(head_color) != 7 or head_color[0] != '#':\r\n while head_color not in COLORS:\r\n print(\"Illegal format\")\r\n head_color = str.lower(input(\"Choose the color name or #XXXXXX\\t\"))\r\n if len(head_color) == 7 and head_color[0] == '#':\r\n break\r\n return background_color, font, paragraph_color, head_color",
"def todays_choice():\n while True: #Run until a suitable input is passed.\n question = input(\"Deposit(D) or Withdrawal(W) or History(H) or Balance(B) >>> \")\n if question == \"D\": #if savings account\n return \"deposit\"\n elif question == \"W\": #if current account\n return \"withdraw\"\n elif question == \"H\":\n return \"history\"\n elif question == \"B\":\n return \"balance\"",
"def card_type():\n while True: #Run until a suitable input is passed.\n question = input(\"Savings(S) or Current(C) >>> \")\n if question == \"S\": #if savings account\n return \"savings\"\n elif question == \"C\": #if current account\n return \"current\"",
"def user_wants_to_search_for_specific_song():\n while True:\n try:\n option = int(input(\"How would you like search for a song?\\n\"\n \"1. Enter the name of the song I want to find.\\n\"\n \"2. Search by Hot, New, or Most Difficult.\\n\\n\"\n \"Enter: \"))\n except ValueError:\n print(\"\\nSorry, that is not number.\\n\")\n continue\n\n options = [True, False]\n if option in range(1, 3):\n return options[option - 1]\n\n print(f\"Sorry, \\\"{option}\\\" is not a valid option.\")",
"def use(self):\n return_string = ''\n item = input(f\"What do you want to use?\\n>\")\n if item in self.backpack:\n if self.backpack[item].type is \"Food\":\n if (self.health + self.backpack[item].heal_amount) > standard_health:\n self.health = standard_health\n else:\n self.health += self.backpack[item].heal_amount\n self.backpack[item].charges -= 1\n return_string = f\"You ate {self.backpack[item].name}. {self.backpack[item].heal_amount} health restored\"\n if self.backpack[item].charges == 0:\n del self.backpack[item]\n return return_string\n else:\n return \"You cant eat this\"\n else:\n return \"You dont have this\"",
"def select_favorite(favoris_dict):\r\n choice = user_choix_input(len(favoris_dict))\r\n # Extract the specifitions of the product to display it\r\n product = extract_product(favoris_dict[choice][0])\r\n # Extract the specifitions of the substitute to display it\r\n substitute = extract_product(favoris_dict[choice][1])\r\n print_product(product)\r\n print('\\n Vous pouvez remplacer ceci par: \\n')\r\n print_product(substitute)",
"def do_drink(self, args):\n\n name = pick(\"Agent\", self.model.get_agent_names())\n if name is not None:\n fluid = int(input(\"Fluid to drink?\"))\n agent = self.model.get_agent(name)\n agent.drink(fluid)",
"def prompt_user_money_to_withdrawl():\n print('What amount of money do you want to withdrawl?:')\n return input()",
"def make_drink(beverage_type, water, milk, sugar):\n\n drink = Drink(beverage_type)\n if water.is_hot():\n drink.with_water(water)\n if not milk.is_off():\n drink.with_milk(milk)\n try:\n drink.with_sugar(sugar)\n except ValueError as e:\n return e.message\n\n drink.stir()\n\n return drink\n else:\n return 'ERROR: Bad Milk'\n else:\n return 'ERROR: Cold Water'",
"def ask_for_blessing(test):\n print()\n msg = \"Is this output correct ([Y]es/[N]o/[S]top)? \"\n print(Ansi.in_color(msg, Ansi.WHITE), end=\"\")\n choice = input().lower()\n if choice.startswith(\"y\"):\n test.bless_output()\n test.result = NEW_OUTPUT\n elif choice.startswith(\"s\"):\n sys.exit(2)\n else:\n test.result = WRONG_OUTPUT",
"def validate_sweeps_type_menu(user_input):\n switcher = {\n 1: (True, 1),\n 2: (True, 2),\n 3: (True, 3),\n }\n return switcher.get(user_input, (False, None))",
"def prompt_user_account_to_get_interest():\n print('What account do you want 0.5% automatic interest?:')\n return input()",
"def dish_get_info() -> Dish:\r\n return Dish(input(\"Please enter the dish's name: \"),\r\n float(input(\"Please enter the price of the dish: \")),\r\n int(input(\"Please enter the number of calories of the dish: \")))"
] |
[
"0.639712",
"0.63034374",
"0.59951705",
"0.5833803",
"0.57443935",
"0.57302874",
"0.55725485",
"0.5533288",
"0.549301",
"0.54182047",
"0.5369108",
"0.5357277",
"0.5327648",
"0.5317922",
"0.5311883",
"0.5305208",
"0.5300284",
"0.52954954",
"0.5273153",
"0.52573305",
"0.52358514",
"0.52214205",
"0.5219739",
"0.5211998",
"0.5200053",
"0.51985055",
"0.5193831",
"0.5192633",
"0.5189034",
"0.51643145"
] |
0.7765335
|
0
|
Construct drink using user's style preferences and ingredients by style
|
def drink_make(drink_styles):
# Define empty list for drink
drink_ingredients = []
# Loop through styles and add random ingredient to drink list
for style, selected in drink_styles.iteritems():
# Test whether style selected by user
if selected == True:
drink_ingredients.append(random.choice(ingredients[style]))
# Return drink
return drink_ingredients
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def main():\n answers_style = drink_style_input()\n drink = drink_make(answers_style)\n print \"\"\n print \"Your drink includes:\"\n for ingredient in drink:\n print \"A {}\".format(ingredient)",
"def make_drink ():\n \n customer_pref = customer_order.drink_order()\n drink = []\n \n for pref in customer_pref:\n if customer_pref[pref] == True:\n drink.append(random.choice(ingredients[pref]))\n \n return drink",
"def make_coffee(drink_type):\r\n for resource in MENU[drink_type][\"ingredients\"]:\r\n resources[resource] -= MENU[drink_type][\"ingredients\"][resource]\r\n print(f\"Here is your {drink_type} ☕. Enjoy!\")",
"def make_coffee(self, drink):\n for ingredient in drink.ingredients:\n self.resources[ingredient] -= drink.ingredients[ingredient]\n self.profit += drink.cost",
"def make_coffee(drink, resources, menu):\r\n resources['Water'] -= menu[drink]['ingredients']['water']\r\n resources['Milk'] -= menu[drink]['ingredients']['milk']\r\n resources['Coffee'] -= menu[drink]['ingredients']['coffee']\r\n print(f'Here is your {drink} ☕.Enjoy!')",
"def make_coffee(drink):\n ingredients_to_deduct = (MENU[drink][\"ingredients\"])\n for key in ingredients_to_deduct:\n resources[key] -= ingredients_to_deduct[key]",
"def make_coffee(drink, ingredients):\n for item in ingredients:\n resources[item] -= ingredients[item]\n print(f\"Here is your {drink}. Enjoy!\")",
"def make_drink(beverage_type, water, milk, sugar):\n\n drink = Drink(beverage_type)\n if water.is_hot():\n drink.with_water(water)\n if not milk.is_off():\n drink.with_milk(milk)\n try:\n drink.with_sugar(sugar)\n except ValueError as e:\n return e.message\n\n drink.stir()\n\n return drink\n else:\n return 'ERROR: Bad Milk'\n else:\n return 'ERROR: Cold Water'",
"def main():\n\n tea_bag = Flavour('Tea')\n hot_water = Water('Hot Water')\n semi_skimmed = Milk('Semi-Skimmed Milk')\n no_sugar = Sugar('No Sugar')\n\n print make_drink(tea_bag, hot_water, semi_skimmed, no_sugar)\n\n sour_milk = Milk.BAD_MILK\n print make_drink(tea_bag, hot_water, sour_milk, no_sugar)\n\n salt = Sugar.INVALID_SUGAR\n print make_drink(tea_bag, hot_water, semi_skimmed, salt)",
"def makecoffee(drink_name, ingredients):\n for item in ingredients:\n resources[item] -= ingredients[item]\n print(f\"Please enjoy your {drink_name}\")",
"def drink_style_input():\n # Define answered style questions dictionary\n answers_style = {} \n # Loop through style questions\n for style, question in questions.iteritems():\n # Ask whether they like a drink style and set to lower case\n # Test if answer is yes, then add drink style and boolean to dictionary\n answers_style[style] = raw_input(questions[style] + \" Please answer (y)es or (n)o.\").lower() in [\"y\",\"yes\"]\n return answers_style",
"def make_cup(drink_name, order_ingredients):\n for item in order_ingredients:\n resources[item] -= order_ingredients[item]\n print(f\"Here is your {drink_name} ☕️Enjoy!\")",
"def __init__(self, _name, _drink=menu.water, _food=menu.bread):\n self.name = _name\n self.drinks = []\n self.food = []\n self.drinks.append(_drink)\n self.food.append(_food)",
"def prepare_recipe(self):\n self.boil_water()\n self.brew()\n self.pour_in_cup()\n if self.customer_wants_condiments():\n self.add_condiments()",
"def generateSuit(self):\n\n dna = self.style\n self.headParts = []\n \n # most heads do not need different poly color or texture\n self.headColor = None\n self.headTexture = None\n\n # For suit death animation\n self.loseActor = None\n\n # Have we become a skelecog?\n self.isSkeleton = 0\n \n # Suit heights have been determined empirically; see\n # RoguesGallery.py or the magic word ~rogues.\n\n # corporate dept\n if (dna.name == 'f'):\n # flunky\n self.scale = 4.0/cSize\n self.handColor = SuitDNA.corpPolyColor\n self.generateBody()\n # this suit has two head parts\n self.generateHead(\"flunky\")\n self.generateHead(\"glasses\") \n self.setHeight(4.88)\n elif (dna.name == 'p'):\n # pencil pusher\n self.scale = 3.35/bSize\n self.handColor = SuitDNA.corpPolyColor\n self.generateBody()\n self.generateHead(\"pencilpusher\")\n self.setHeight(5.00)\n elif (dna.name == 'ym'):\n # yes man\n self.scale = 4.125/aSize\n self.handColor = SuitDNA.corpPolyColor \n self.generateBody()\n self.generateHead(\"yesman\")\n self.setHeight(5.28)\n elif (dna.name == 'mm'):\n # micromanager\n self.scale = 2.5/cSize\n self.handColor = SuitDNA.corpPolyColor \n self.generateBody()\n self.generateHead(\"micromanager\")\n self.setHeight(3.25)\n elif (dna.name == 'ds'):\n # downsizer - DEFAULT\n self.scale = 4.5/bSize\n self.handColor = SuitDNA.corpPolyColor \n self.generateBody()\n self.generateHead(\"beancounter\")\n self.setHeight(6.08)\n elif (dna.name == 'hh'):\n # head hunter\n self.scale = 6.5/aSize\n self.handColor = SuitDNA.corpPolyColor \n self.generateBody()\n self.generateHead(\"headhunter\")\n self.setHeight(7.45)\n elif (dna.name == 'cr'):\n # corporate raider\n self.scale = 6.75/cSize\n self.handColor = VBase4(0.85, 0.55, 0.55, 1.0) \n self.generateBody()\n self.headTexture = \"corporate-raider.jpg\"\n self.generateHead(\"flunky\")\n self.setHeight(8.23)\n elif (dna.name == 'tbc'):\n # the big cheese\n self.scale = 7.0/aSize\n self.handColor = VBase4(0.75, 0.95, 0.75, 1.0)\n self.generateBody()\n self.generateHead(\"bigcheese\")\n self.setHeight(9.34)\n \n # legal dept\n elif (dna.name == 'bf'):\n # bottom feeder\n self.scale = 4.0/cSize\n self.handColor = SuitDNA.legalPolyColor \n self.generateBody()\n self.headTexture = \"bottom-feeder.jpg\"\n self.generateHead(\"tightwad\")\n self.setHeight(4.81)\n elif (dna.name == 'b'):\n # blood sucker\n self.scale = 4.375/bSize\n self.handColor = VBase4(0.95, 0.95, 1.0, 1.0)\n self.generateBody()\n self.headTexture = \"blood-sucker.jpg\" \n self.generateHead(\"movershaker\")\n self.setHeight(6.17)\n elif (dna.name == 'dt'):\n # double talker\n self.scale = 4.25/aSize\n self.handColor = SuitDNA.legalPolyColor \n self.generateBody()\n self.headTexture = \"double-talker.jpg\" \n self.generateHead(\"twoface\")\n self.setHeight(5.63)\n elif (dna.name == 'ac'):\n # ambulance chaser\n self.scale = 4.35/bSize\n self.handColor = SuitDNA.legalPolyColor \n self.generateBody()\n self.generateHead(\"ambulancechaser\")\n self.setHeight(6.39)\n elif (dna.name == 'bs'):\n # back stabber\n self.scale = 4.5/aSize\n self.handColor = SuitDNA.legalPolyColor \n self.generateBody()\n self.generateHead(\"backstabber\")\n self.setHeight(6.71)\n elif (dna.name == 'sd'):\n # spin doctor\n self.scale = 5.65/bSize\n self.handColor = VBase4(0.5, 0.8, 0.75, 1.0) \n self.generateBody()\n self.headTexture = \"spin-doctor.jpg\" \n self.generateHead(\"telemarketer\")\n self.setHeight(7.90)\n elif (dna.name == 'le'):\n # legal eagle\n self.scale = 7.125/aSize\n self.handColor = VBase4(0.25, 0.25, 0.5, 1.0) \n self.generateBody()\n self.generateHead(\"legaleagle\")\n self.setHeight(8.27)\n elif (dna.name == 'bw'):\n # bigwig\n self.scale = 7.0/aSize\n self.handColor = SuitDNA.legalPolyColor\n self.generateBody()\n self.generateHead(\"bigwig\")\n self.setHeight(8.69)\n \n # money dept\n elif (dna.name == 'sc'):\n # short changer\n self.scale = 3.6/cSize\n self.handColor = SuitDNA.moneyPolyColor \n self.generateBody()\n self.generateHead(\"coldcaller\")\n self.setHeight(4.77)\n elif (dna.name == 'pp'):\n # penny pincher\n self.scale = 3.55/aSize\n self.handColor = VBase4( 1.0, 0.5, 0.6, 1.0) \n self.generateBody()\n self.generateHead(\"pennypincher\")\n self.setHeight(5.26)\n elif (dna.name == 'tw'):\n # tightwad\n self.scale = 4.5/cSize\n self.handColor = SuitDNA.moneyPolyColor \n self.generateBody()\n self.generateHead(\"tightwad\")\n self.setHeight(5.41)\n elif (dna.name == 'bc'):\n # bean counter\n self.scale = 4.4/bSize\n self.handColor = SuitDNA.moneyPolyColor\n self.generateBody()\n self.generateHead(\"beancounter\")\n self.setHeight(5.95)\n elif (dna.name == 'nc'):\n # number cruncher\n self.scale = 5.25/aSize\n self.handColor = SuitDNA.moneyPolyColor \n self.generateBody()\n self.generateHead(\"numbercruncher\")\n self.setHeight(7.22)\n elif (dna.name == 'mb'):\n # money bags\n self.scale = 5.3/cSize\n self.handColor = SuitDNA.moneyPolyColor \n self.generateBody()\n self.generateHead(\"moneybags\")\n self.setHeight(6.97)\n elif (dna.name == 'ls'):\n # load shark\n self.scale = 6.5/bSize\n self.handColor = VBase4(0.5, 0.85, 0.75, 1.0) \n self.generateBody()\n self.generateHead(\"loanshark\")\n self.setHeight(8.58)\n elif (dna.name == 'rb'):\n # robber baron\n self.scale = 7.0/aSize\n self.handColor = SuitDNA.moneyPolyColor \n self.generateBody()\n self.headTexture = \"robber-baron.jpg\"\n self.generateHead(\"yesman\")\n self.setHeight(8.95)\n\n # sales dept\n elif (dna.name == 'cc'):\n # cold caller\n self.scale = 3.5/cSize\n self.handColor = VBase4(0.55, 0.65, 1.0, 1.0)\n self.headColor = VBase4(0.25, 0.35, 1.0, 1.0)\n self.generateBody() \n self.generateHead(\"coldcaller\")\n self.setHeight(4.63)\n elif (dna.name == 'tm'):\n # telemarketer\n self.scale = 3.75/bSize\n self.handColor = SuitDNA.salesPolyColor\n self.generateBody()\n self.generateHead(\"telemarketer\")\n self.setHeight(5.24)\n elif (dna.name == 'nd'):\n # name dropper\n self.scale = 4.35/aSize\n self.handColor = SuitDNA.salesPolyColor \n self.generateBody()\n self.headTexture = \"name-dropper.jpg\"\n self.generateHead(\"numbercruncher\")\n self.setHeight(5.98)\n elif (dna.name == 'gh'):\n # glad hander\n self.scale = 4.75/cSize\n self.handColor = SuitDNA.salesPolyColor \n self.generateBody()\n self.generateHead(\"gladhander\")\n self.setHeight(6.40)\n elif (dna.name == 'ms'):\n # mover & shaker\n self.scale = 4.75/bSize\n self.handColor = SuitDNA.salesPolyColor \n self.generateBody()\n self.generateHead(\"movershaker\")\n self.setHeight(6.70)\n elif (dna.name == 'tf'):\n # two-face\n self.scale = 5.25/aSize\n self.handColor = SuitDNA.salesPolyColor \n self.generateBody()\n self.generateHead(\"twoface\")\n self.setHeight(6.95)\n elif (dna.name == 'm'):\n # the mingler\n self.scale = 5.75/aSize\n self.handColor = SuitDNA.salesPolyColor \n self.generateBody()\n self.headTexture = \"mingler.jpg\" \n self.generateHead(\"twoface\")\n self.setHeight(7.61)\n elif (dna.name == 'mh'):\n # Mr. Hollywood\n self.scale = 7.0/aSize\n self.handColor = SuitDNA.salesPolyColor\n self.generateBody()\n self.generateHead(\"yesman\")\n self.setHeight(8.95)\n \n self.setName(SuitBattleGlobals.SuitAttributes[dna.name]['name'])\n self.getGeomNode().setScale(self.scale)\n self.generateHealthBar()\n self.generateCorporateMedallion()",
"def __init__(self, date: dt_date, style: str, partners: list, notes: str, climb: Climb):\r\n self._date = date\r\n self._styles = {\r\n 'Lead RP': 'read point',\r\n 'AltLd O/S': 'onsight',\r\n 'Solo O/S': 'onsight',\r\n 'Lead rpt': 'no log',\r\n 'Lead O/S': 'onsight',\r\n '2nd β': 'flash',\r\n 'Solo rpt': 'no log',\r\n 'Lead Flash': 'flash',\r\n 'Lead dog': 'no send',\r\n '2nd O/S': 'onsight',\r\n 'AltLd rpt': 'no log',\r\n 'AltLd': 'no log',\r\n '2nd': 'no log',\r\n 'Sent x': 'read point',\r\n 'Sent Flash': 'flash',\r\n '-': 'summit',\r\n 'Solo': 'no log',\r\n 'Sent O/S': 'onsight',\r\n 'AltLd dnf': 'no send',\r\n 'Lead dnf': 'no send',\r\n 'DWS': 'no log',\r\n '2nd rpt': 'no log',\r\n '2nd dog': 'no send',\r\n 'AltLd dog': 'no send',\r\n 'Sent rpt': 'no log',\r\n 'Lead G/U': 'ground up',\r\n 'Sent': 'no log',\r\n 'Solo dnf': 'no send',\r\n 'Lead': 'no log'} # A matcher of different style types\r\n self._style = self.match_style(style) # Correct the style for a more readable format\r\n self._partners = partners\r\n self._notes = notes\r\n self._climb = climb",
"def make_sandwich(*ingredients):\n print(\"\\nMaking sandwich with the following ingredients:\")\n for ingredient in ingredients:\n print(\"- \" + ingredient)",
"def make_coffee(self, order):\n for item in order.ingredients:\n self.resources[item] -= order.ingredients[item]\n print(f\"Here is your {order.name} ☕️. Enjoy!\")",
"def make_coffee(self, order):\n for item in order.ingredients:\n self.resources[item] -= order.ingredients[item]\n print(f\"Here is your {order.name} ☕️. Enjoy!\")",
"def _create_ingredient(self, data):\n return Ingredient(**data)",
"def generate_food() -> FoodItem:\n presets = random.choice(FOOD_BANK)\n return FoodItem(presets['name'], presets['hp'], presets['msg'])",
"def create_pizza(pizza_type):\n pass",
"def create_food(self):\n self.penup()\n self.shape(\"circle\")\n self.color(\"green\")\n self.x_cordinates = random.randint(-210, 210)\n self.y_cordinates = random.randint(-210, 210)\n self.goto(self.x_cordinates, self.y_cordinates)\n print(f\"This Is Food {self.x_cordinates} and {self.y_cordinates}\")\n # self.stamp()",
"def _deduct_inventory(self, drink_type: str, drink_details: Beverages) -> None:\n drink_composition = drink_details.get_receipe(drink_type=drink_type)\n for ingredient in drink_composition:\n self._inventory.deduct_ingredient(\n ingredient=ingredient,\n quantity=drink_composition.get(ingredient)\n )",
"def the_drinking(self):\n self.log.info('The drinking starts')\n self.hide_give_buttons()\n self.show_people()\n\n #\n state = 'day-{0}'.format(self.end_of_day)\n conversation_item = self.conversation.getNext(situation='drinking', state=state)\n if conversation_item:\n if conversation_item.person == 'x':\n conversation = self.show_callout_sequence(situation='drinking', state=state, title='The Drinking')\n else:\n conversation = self.show_conversation_sequence(situation='drinking', state=state)\n for delay in conversation:\n yield delay\n #\n yield S['drinking-start-delay']\n #\n self.log.info('Performing the drinking')\n for name in sorted(self.people.keys()):\n person = self.people[name]\n if not person.isDead():\n #\n self.log.debug('{0} trying to drink'.format(name))\n conversation_item = None\n #\n try:\n person.drinkWater()\n except model.person.NoWater:\n self.log.debug('{0} has no water'.format(name))\n self.health_panels[name].set_drinking_state(healthpanel.S_NOT_DRINKING)\n if random.random() < S['drinking-talk-probability']:\n conversation_item = self.conversation.getAndUseNext(person=name, situation='no-drink-at-drinking')\n sounds.not_drinking.play()\n else:\n self.health_panels[name].set_drinking_state(healthpanel.S_DRINKING)\n if random.random() < S['drinking-talk-probability']:\n conversation_item = self.conversation.getAndUseNext(person=name, situation='drink-at-drinking')\n sounds.drink.play()\n #\n person.eatFood()\n person.newDay()\n #\n self.log.info('{0} finished day with health {1} and water {2}'.format(name, person.health, person.water))\n if person.isDead() and not self.health_panels[name].is_dead:\n self.record_death(name)\n #\n if conversation_item:\n self.show_conversation(conversation_item)\n self.update_panel_displays()\n #\n yield S['drinking-individual-delay']\n #\n self.health_panels[name].set_drinking_state(healthpanel.S_OFF)\n #\n yield S['drinking-end-delay']\n self.show_give_buttons()\n #\n self.end_of_day = None",
"def test_visualize_recipe_taste(self):\n pass",
"def create_meal():",
"def sample_ingredient(user, name = 'Cinnamon'):\n return Ingredient.objects.create(user=user, name=name)",
"def sample_ingredient(user, name='Cinemon'):\n return Ingredient.objects.create(user=user, name=name)",
"def drinks_new():\n return render_template('drinks_new.html', drink={})"
] |
[
"0.64675385",
"0.625151",
"0.6151498",
"0.6134938",
"0.61196727",
"0.61048126",
"0.60791534",
"0.607719",
"0.60167503",
"0.5968282",
"0.59542197",
"0.5853176",
"0.5558934",
"0.54750603",
"0.52756566",
"0.5242397",
"0.5206469",
"0.5190345",
"0.5181955",
"0.51619166",
"0.51209444",
"0.5119621",
"0.5119257",
"0.50873",
"0.5085282",
"0.50399214",
"0.50241476",
"0.49936134",
"0.49936032",
"0.4987585"
] |
0.793936
|
0
|
Run Pirate Bartender Ask user for styles and return drink ingredients
|
def main():
answers_style = drink_style_input()
drink = drink_make(answers_style)
print ""
print "Your drink includes:"
for ingredient in drink:
print "A {}".format(ingredient)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def drink_make(drink_styles): \n # Define empty list for drink\n drink_ingredients = [] \n # Loop through styles and add random ingredient to drink list\n for style, selected in drink_styles.iteritems():\n # Test whether style selected by user\n if selected == True:\n drink_ingredients.append(random.choice(ingredients[style]))\n # Return drink\n return drink_ingredients",
"def brew_process() -> str:\r\n brew_section = []\r\n batch_recipe = request.args.get(\"recipe_ask\")\r\n batch_tank = str(request.args.get(\"tank_question\"))\r\n if brewer_tanks[batch_tank][\"Activity_Status\"] == \"Nothing\":\r\n if \"Fermenter\" in brewer_tanks[batch_tank][\"Capabilities\"]:\r\n batch_number = assign_batch_number()\r\n now = datetime.now()\r\n starting_point = now.strftime(\"%d/%m/%Y %H:%M:%S\")\r\n expected_end = datetime.now() + timedelta(days=28)\r\n batch_amount = brewer_tanks[str(batch_tank)][\"Volume\"]\r\n #Appends info about batch being brewed in batch\r\n brew_section.append(batch_number)\r\n brew_section.append(starting_point)\r\n brew_section.append(batch_recipe)\r\n brew_section.append(int(batch_amount))\r\n brew_section.append(\"Fermentation\")\r\n brew_section.append(str(batch_tank))\r\n brew_section.append(expected_end)\r\n current_brewings.append(brew_section)\r\n brewer_tanks[str(batch_tank)][\"Batch_Content\"] = batch_number\r\n brewer_tanks[str(batch_tank)][\"Activity_Status\"] = \"Fermenting\"\r\n brew_message = \"Brew for this Batch has Started\"\r\n else:\r\n brew_message = \"This Tank is not Capable of this Activity\"\r\n else:\r\n brew_message = \"This Tank is Full\"\r\n\r\n return render_template(\"singular_message.html\",\r\n user_display=brew_message)",
"def use(self):\n return_string = ''\n item = input(f\"What do you want to use?\\n>\")\n if item in self.backpack:\n if self.backpack[item].type is \"Food\":\n if (self.health + self.backpack[item].heal_amount) > standard_health:\n self.health = standard_health\n else:\n self.health += self.backpack[item].heal_amount\n self.backpack[item].charges -= 1\n return_string = f\"You ate {self.backpack[item].name}. {self.backpack[item].heal_amount} health restored\"\n if self.backpack[item].charges == 0:\n del self.backpack[item]\n return return_string\n else:\n return \"You cant eat this\"\n else:\n return \"You dont have this\"",
"def drink_style_input():\n # Define answered style questions dictionary\n answers_style = {} \n # Loop through style questions\n for style, question in questions.iteritems():\n # Ask whether they like a drink style and set to lower case\n # Test if answer is yes, then add drink style and boolean to dictionary\n answers_style[style] = raw_input(questions[style] + \" Please answer (y)es or (n)o.\").lower() in [\"y\",\"yes\"]\n return answers_style",
"def main():\n\n tea_bag = Flavour('Tea')\n hot_water = Water('Hot Water')\n semi_skimmed = Milk('Semi-Skimmed Milk')\n no_sugar = Sugar('No Sugar')\n\n print make_drink(tea_bag, hot_water, semi_skimmed, no_sugar)\n\n sour_milk = Milk.BAD_MILK\n print make_drink(tea_bag, hot_water, sour_milk, no_sugar)\n\n salt = Sugar.INVALID_SUGAR\n print make_drink(tea_bag, hot_water, semi_skimmed, salt)",
"def make_sandwich(*ingredients):\n print(\"\\nMaking sandwich with the following ingredients:\")\n for ingredient in ingredients:\n print(\"- \" + ingredient)",
"def _get_user_beverage_request(self):\n while True:\n # display options\n self._beverage_options_impl.display_options()\n try:\n # read an option from stdin\n input_option = self._beverage_options_impl.read_option()\n num_beverages = self._beverage_impl.num_beverages\n if input_option == num_beverages + 1:\n #refill request\n refill_ingredient_request = read_ingredient_refill_request(list(self._ingredient_stock_impl.get_inventory_ingredient_dict().keys()))\n self._ingredient_stock_impl.refill_ingredient_stock(refill_ingredient_request)\n logger.info(\"Successfully refilled\")\n logger.debug(self._ingredient_stock_impl.get_inventory_details_msg())\n elif input_option == num_beverages + 2:\n logger.info(self._ingredient_stock_impl.get_inventory_details_msg())\n else:\n requested_beverage_name = self._beverage_dict[input_option].name\n seek_beverage = SeekBeverage(requested_beverage_name)\n break;\n #serve beverage\n except InvalidInputOptionException as e:\n print(\"Please enter a valid option\")\n return seek_beverage",
"async def bestiary(self, ctx: commands.Context, *, fish_name: str = None):\r\n\r\n # See if we want to list all of the fish\r\n if not fish_name:\r\n fields = []\r\n embed = discord.Embed(title=\"All Fish\")\r\n for rarity, fish_types in self.bot.fish.items():\r\n fish_string = [f\"**{' '.join(fish_type.split('_')).title()}**\" for fish_type, fish_info in fish_types.items()]\r\n fields.append((rarity.title(), \"\\n\".join(fish_string)))\r\n return await utils.paginate(ctx, fields, ctx.author, \"**Bestiary**\\n\")\r\n\r\n # Find the fish they asked for\r\n selected_fish = None\r\n for rarity, fish_types in self.bot.fish.items():\r\n for _, fish_info in fish_types.items():\r\n if fish_info[\"name\"] == str(fish_name.title()):\r\n selected_fish = fish_info\r\n break\r\n if selected_fish:\r\n break\r\n else:\r\n return await ctx.send(\"That fish doesn't exist.\")\r\n\r\n # Make and send an embed\r\n embed = discord.Embed(title=selected_fish[\"name\"])\r\n embed.set_image(url=\"attachment://new_fish.png\")\r\n embed.add_field(name='Rarity:', value=f\"{selected_fish['rarity']}\", inline=False)\r\n embed.add_field(name='Base Sell Price:', value=f\"{int(int(selected_fish['cost']) / 2)} <:sand_dollar:877646167494762586>\", inline=False)\r\n embed.add_field(name='Size:', value=f\"{selected_fish['size']}\", inline=False)\r\n embed.color = {\r\n \"common\": 0xFFFFFE, # White - FFFFFF doesn't work with Discord\r\n \"uncommon\": 0x75FE66, # Green\r\n \"rare\": 0x4AFBEF, # Blue\r\n \"epic\": 0xE379FF, # Light Purple\r\n \"legendary\": 0xFFE80D, # Gold\r\n \"mythic\": 0xFF0090, # Hot Pink\r\n }[selected_fish['rarity']]\r\n fish_file = discord.File(selected_fish[\"image\"], \"new_fish.png\")\r\n await ctx.send(file=fish_file, embed=embed)",
"def trader(backpack):\n loot = [[\"gold coin\", \"other\", 1]]\n loot2 = [[\"corn\", \"food\", 1]]\n print(\"\\nTrader says: \")\n if \"corn\" in backpack:\n x = input(\"-Hey! So you want sell some corn mate?\\n(write yes or no): \")\n x = x.lower()\n if x == \"yes\":\n try:\n remove_corn = int(input(\"-How much u wanna sell?: \"))\n if remove_corn > backpack[\"corn\"][0]:\n print(\"-You dont have that much corn in ur backpack \")\n enter()\n else:\n print(\"-Thanks for corn :) \")\n inve.remove_item(backpack, loot2, remove_corn)\n inve.add_to_inventory(backpack, loot, remove_corn)\n enter()\n except ValueError:\n print(\"(U need to write a number): \")\n enter()\n elif x == \"no\":\n print(\"-Come to me when u wanna sell corn \")\n enter()\n else:\n print(\"(Your answer need to be yes or no) \")\n enter()\n else:\n print(\"-You dont have any corn, come to me when u get some \")\n enter()\n return backpack",
"def make_drink ():\n \n customer_pref = customer_order.drink_order()\n drink = []\n \n for pref in customer_pref:\n if customer_pref[pref] == True:\n drink.append(random.choice(ingredients[pref]))\n \n return drink",
"def make_coffee(drink, ingredients):\n for item in ingredients:\n resources[item] -= ingredients[item]\n print(f\"Here is your {drink}. Enjoy!\")",
"def makecoffee(drink_name, ingredients):\n for item in ingredients:\n resources[item] -= ingredients[item]\n print(f\"Please enjoy your {drink_name}\")",
"async def drinkify(self,ctx):\n artist = ctx.message.content[9:]\n drink = \"\"\n response = requests.get('http://drinkify.org/{}'.format(artist))\n if response.status_code == 200:\n root = html.fromstring(response.content)\n recipe = root.xpath('//ul[@class=\"recipe\"]/li/text()')\n for alc in recipe:\n drink +=\"{}\\n\".format(alc)\n instructions = root.xpath('normalize-space(//p[@class=\"instructions\"]/text())')\n drink += \"\\nInstructions:\\n{}\".format(instructions)\n else:\n return\n await ctx.bot.send_message(ctx.message.channel, drink)",
"def the_drinking(self):\n self.log.info('The drinking starts')\n self.hide_give_buttons()\n self.show_people()\n\n #\n state = 'day-{0}'.format(self.end_of_day)\n conversation_item = self.conversation.getNext(situation='drinking', state=state)\n if conversation_item:\n if conversation_item.person == 'x':\n conversation = self.show_callout_sequence(situation='drinking', state=state, title='The Drinking')\n else:\n conversation = self.show_conversation_sequence(situation='drinking', state=state)\n for delay in conversation:\n yield delay\n #\n yield S['drinking-start-delay']\n #\n self.log.info('Performing the drinking')\n for name in sorted(self.people.keys()):\n person = self.people[name]\n if not person.isDead():\n #\n self.log.debug('{0} trying to drink'.format(name))\n conversation_item = None\n #\n try:\n person.drinkWater()\n except model.person.NoWater:\n self.log.debug('{0} has no water'.format(name))\n self.health_panels[name].set_drinking_state(healthpanel.S_NOT_DRINKING)\n if random.random() < S['drinking-talk-probability']:\n conversation_item = self.conversation.getAndUseNext(person=name, situation='no-drink-at-drinking')\n sounds.not_drinking.play()\n else:\n self.health_panels[name].set_drinking_state(healthpanel.S_DRINKING)\n if random.random() < S['drinking-talk-probability']:\n conversation_item = self.conversation.getAndUseNext(person=name, situation='drink-at-drinking')\n sounds.drink.play()\n #\n person.eatFood()\n person.newDay()\n #\n self.log.info('{0} finished day with health {1} and water {2}'.format(name, person.health, person.water))\n if person.isDead() and not self.health_panels[name].is_dead:\n self.record_death(name)\n #\n if conversation_item:\n self.show_conversation(conversation_item)\n self.update_panel_displays()\n #\n yield S['drinking-individual-delay']\n #\n self.health_panels[name].set_drinking_state(healthpanel.S_OFF)\n #\n yield S['drinking-end-delay']\n self.show_give_buttons()\n #\n self.end_of_day = None",
"def run_im_bored():\n \n greet_user()\n \n bored = True\n \n while bored:\n generate_suggestion()\n bored = ask_to_continue()",
"def run(input_date=None, input_meal=None):\n query_date, query_meal = _parse_args(input_date, input_meal)\n # Get the data and instantiate the required classes\n html = scrapper.fetch_data(query_date)\n meal_date = scrapper.get_meal_date(html)\n available_dates = scrapper.get_available_dates(html)\n meals = scrapper.get_meals(html)\n result = \"\"\n result += format_for_terminal(\"Cardápio do dia {}\\n\".format(format_date(meal_date)),\n constants.TERMINAL_GREEN)\n # Automatically decide which meal should be shown if none was specified\n if not query_meal:\n if meal_date != date.today():\n query_meal = constants.MEAL_LUNCH\n elif (datetime.now(timezone(constants.BANDECO_TIMEZONE)).time() <\n time(hour=constants.LUNCH_END_HOUR)):\n query_meal = constants.MEAL_LUNCH\n else:\n query_meal = constants.MEAL_DINNER\n # Formats the meal menu\n if query_meal == constants.MEAL_LUNCH:\n result += meals[0].combine(meals[1]).format()\n elif query_meal == constants.MEAL_DINNER:\n result += meals[2].combine(meals[3]).format()\n else:\n for meal in meals:\n result += meal.format()\n # Show other dates available for fetching\n result += format_for_terminal(\n \"Datas disponíves: {}\".format(\", \".join(list(map(format_date, available_dates)))),\n constants.TERMINAL_BLUE)\n # Shows a warning if the menu is not from today\n today = date.today()\n if not query_date and meal_date != today:\n result += format_for_terminal(\"\\nAviso: este cardápio não é de hoje, mas para daqui {} dias\"\n .format((meal_date - today).days), constants.TERMINAL_RED)\n return result",
"def beer():\r\n global cheated\r\n\r\n if enter_four == config.confus(config.config4):\r\n player.grab(helpful.Item('SixPack',10,0,0,6))\r\n cheated = True\r\n print '<achievement unlocked>\\n'\r\n\r\n if player.get_money() >= 17:\r\n\r\n player.set_health(100)\r\n player.lose_money(17)\r\n\r\n raw_input('You take out your money.\\n')\r\n raw_input(bartender_name + ' chuckles.\\n')\r\n raw_input('\"I guess we have this stuff, if you really need a drink.\"\\n')\r\n\r\n raw_input(\"The 'beer' healed you!\\n\")\r\n raw_input('It also cost $17.\\n')\r\n \r\n else:\r\n print bartender_name + ' chuckles and looks pointedly at his empty tip jar.\\n'\r\n raw_input('\"' +\"We're out of beer.\" + '\"\\n')\r\n raw_input('\"Nice try.\"\\n')",
"def make_coffee(drink_type):\r\n for resource in MENU[drink_type][\"ingredients\"]:\r\n resources[resource] -= MENU[drink_type][\"ingredients\"][resource]\r\n print(f\"Here is your {drink_type} ☕. Enjoy!\")",
"def pirate ():\n \n print \"I'm a {}, I will ask you what you like\".format(pirate.__name__)\n \n for taste in questions:\n print questions[taste]\n x =raw_input().lower()\n if x == \"y\" or x == \"yes\":\n Prefer.append(taste)\n print Prefer",
"def sandwiches(*items):\n print(\"\\nThe following items were selected for a sandwich:\")\n for item in items:\n print(\"- \" + item.upper())",
"def start_brew() -> str:\r\n recipe_names = [\"Organic Red Helles\", \"Organic Pilsner\",\r\n \"Organic Dunkel\"]\r\n return render_template(\"start_brew.html\",\r\n recipes=recipe_names,\r\n tank_options=TANKS)",
"def bum(backpack):\n loot = [[\"vodka\", \"food\", 1]]\n loot2 = [[\"gold coin\", \"other\", 1]]\n print(\"\\nBum says: \")\n if \"gold coin\" in backpack:\n if backpack['gold coin'][0] >= 1:\n vodka_sell = input(\n \"-I see gold in your pocket!\\nDo u wanna change 1 gold coin for 1 vodka?\\n(write yes to accept or no for reject)\\n\")\n if vodka_sell == \"yes\":\n try:\n vodka_ask = int(input(\"-How much vodka u need my friend?\\n\"))\n if vodka_ask <= backpack[\"gold coin\"][0]:\n print(\"GLUP \")\n inve.remove_item(backpack, loot2, vodka_ask) # removing coins from backpack\n inve.add_to_inventory(backpack, loot, vodka_ask) # adding vodka\n enter()\n else: # handling situation when u have no gold coins\n print(\"-U dont have coins for this...god's drink, f... off! \")\n enter()\n except ValueError: # handling bugs with writing some other stuff then int\n print(\"(U need to write a number) \")\n enter()\n elif vodka_sell == \"no\":\n print(\"-Bye, come to papa again :0 \")\n enter()\n else:\n print(\"-I dont know what you talking about \")\n enter()\n else:\n print(\"-You must have at least 1 gold coin to buy vodka! \")\n enter()\n else:\n print(\"-U dont have coins for this...god's drink, f... off! \")\n enter()\n return backpack",
"def prepare_recipe(self):\n self.boil_water()\n self.brew()\n self.pour_in_cup()\n if self.customer_wants_condiments():\n self.add_condiments()",
"def main():\n deli = troll_fight()\n if not empty_stack(deli):\n pick_berries(deli)\n else:\n print(\"The Troll has defeated the Goats! /sadface\")",
"async def cast(self, ctx:commands.Context, bait_type:str):\r\n\r\n if not await self.IsSpecialized(ctx.guild, ctx.channel.id, POOL_CHANNEL):\r\n return\r\n profile = self.config.member(ctx.message.author)\r\n\r\n await profile.currently_fishing.set(True)\r\n modified_fish_weights = await self.startfishing(ctx, profile, bait_type)\r\n\r\n embed = Embed(title=f'{ctx.message.author.display_name} cast their rod into the shimmering waves at {ctx.channel}', color=0x7300ff)\r\n embed.set_footer(text='Not even a nibble yet...')\r\n msg = await ctx.send(embed=embed)\r\n start_adding_reactions(msg, ['🎣'])\r\n\r\n pred = ReactionPredicate.with_emojis(['🎣'], msg, ctx.author)\r\n time_left = await self.GetSetting(ctx.guild, 'max_fishing_length')\r\n min_pause = await self.GetSetting(ctx.guild, 'min_fishing_wait')\r\n max_pause = await self.GetSetting(ctx.guild, 'max_fishing_wait')\r\n curr_fish = None\r\n rarity = None\r\n while time_left >= 0:\r\n try:\r\n timer = time_left if time_left < max_pause else randint(min_pause, max_pause)\r\n time_left -= timer\r\n await ctx.bot.wait_for('reaction_add', check=pred, timeout=timer)\r\n except asyncio.TimeoutError:\r\n if curr_fish is None:\r\n rarity = choices(FISH_RARITIES, modified_fish_weights)[0]\r\n rarity_list = self.fishing_rarities.get(rarity)\r\n curr_fish = rarity_list[randint(0, len(rarity_list) - 1)] if not await profile.bryan_mode() else self.SEA_BASS\r\n embed.set_footer(text=RARITY_DESCRIPTIONS[rarity])\r\n else:\r\n curr_fish = None\r\n embed.set_footer(text='The rod drifts in the water')\r\n await msg.edit(embed=embed)\r\n\r\n if pred.result == 0:\r\n break\r\n\r\n if curr_fish is None or time_left <= 0:\r\n embed.set_footer(text='You feel a twist as the line snaps :(')\r\n await msg.edit(embed=embed)\r\n await msg.clear_reactions()\r\n else:\r\n new_fish = curr_fish.ToFishCatch(RARITY_VALUES[rarity])\r\n embed.set_footer(text=f'You pulled a {new_fish[\"name\"]} ({new_fish[\"size\"]} inches) out of the water!\\nDo you want to keep or release?')\r\n embed.set_thumbnail(url=curr_fish.image)\r\n await msg.edit(embed=embed)\r\n await msg.clear_reactions()\r\n\r\n start_adding_reactions(msg, ['🥤', '🐟'])\r\n\r\n pred = ReactionPredicate.with_emojis(['🥤', '🐟'], msg, ctx.author)\r\n try:\r\n await ctx.bot.wait_for(\"reaction_add\", check=pred, timeout=15)\r\n except asyncio.TimeoutError:\r\n if await self.AddFish(ctx.message.author, new_fish):\r\n embed.set_footer(text=f'Timed out, {new_fish[\"name\"]} was added to your bucket')\r\n else:\r\n embed.set_footer(text=f'Timed out and your bucket was full, so {new_fish[\"name\"]} was released :(')\r\n await msg.edit(embed=embed)\r\n await msg.clear_reactions()\r\n else:\r\n if pred.result == 0:\r\n if await self.AddFish(ctx.message.author, new_fish):\r\n embed.set_footer(text=f'{new_fish[\"name\"]} was added to your bucket!')\r\n else:\r\n embed.set_footer(text=f'Your bucket was full, so you had to release {new_fish[\"name\"]} :(')\r\n else:\r\n embed.set_footer(text=f'You let {new_fish[\"name\"]} swim away...')\r\n await msg.edit(embed=embed)\r\n await msg.clear_reactions()\r\n\r\n if randint(0, 100) < 100 * await self.GetSetting(ctx.guild, 'bait_recovery_chance'):\r\n await ctx.send(f'Your {bait_type} is still on the end of the rod! (+1 {bait_type})')\r\n else:\r\n user_bait = await profile.bait()\r\n user_bait[bait_type] -= 1\r\n await profile.bait.set(user_bait)\r\n\r\n await profile.currently_fishing.set(False)\r\n #if not await profile.mawiam_mode():\r\n #await profile.nextcast.set(time() + await self.GetSetting(ctx.guild, 'fishing_delay'))\r\n\r\n await self.CheckSchools(ctx)",
"def want_to_use(self, plan):\n # Get list of all ingredients available\n # (we do not care about stones - i.e. multiple paths, these will be\n # taken care later when brewing the potion)\n ingredients = {}\n for path in plan.get_paths_for_strat(self.id):\n for cell in path:\n for ingredient in plan[cell[0]][cell[1]]:\n if ingredient not in ingredients:\n ingredients[ingredient] = []\n ingredients[ingredient].append(cell)\n ###print \">>> want_to_use: Ingredients:\", ingredients\n # Now compile list of ingrediens we are interested in\n # i.e. if whe have not brew basic potion it is only these from basic\n # potion, if we brew it already, it is these from cookbook\n interesting = []\n if self.potion_done:\n for potion in self.get_cookbook():\n interesting += potion\n else:\n interesting = self.potion\n ###print \">>> want_to_use: Interesting:\", interesting\n # Now only get ingredients we are interested in\n interested_ingredients = {}\n for ingredient in ingredients:\n if ingredient in interesting:\n interested_ingredients[ingredient] = ingredients[ingredient]\n ###print \">>> want_to_use: Iterested in these ingredients:\", interested_ingredients\n return interested_ingredients",
"async def quickcast(self, ctx:commands.Context, bait_type:str):\r\n\r\n if not await self.IsSpecialized(ctx.guild, ctx.channel.id, POOL_CHANNEL):\r\n return\r\n profile = self.config.member(ctx.message.author)\r\n\r\n await profile.currently_fishing.set(True)\r\n modified_fish_weights = await self.startfishing(ctx, profile, bait_type)\r\n\r\n rarity = choices(FISH_RARITIES, modified_fish_weights)[0]\r\n rarity_list = self.fishing_rarities.get(rarity)\r\n curr_fish = rarity_list[randint(0, len(rarity_list) - 1)] if not await profile.bryan_mode() else self.SEA_BASS\r\n new_fish = curr_fish.ToFishCatch(RARITY_VALUES[rarity])\r\n\r\n embed = Embed(title=f'{ctx.message.author.display_name} cast their rod into the shimmering waves at {ctx.channel}', color=0x7300ff)\r\n embed.set_footer(text=f'You pulled a {new_fish[\"name\"]} ({new_fish[\"size\"]} inches) out of the water!\\nDo you want to keep or release?')\r\n embed.set_thumbnail(url=curr_fish.image)\r\n msg = await ctx.send(embed=embed)\r\n start_adding_reactions(msg, ['🥤', '🐟'])\r\n\r\n pred = ReactionPredicate.with_emojis(['🥤', '🐟'], msg, ctx.author)\r\n try:\r\n await ctx.bot.wait_for('reaction_add', check=pred, timeout=15)\r\n except asyncio.TimeoutError:\r\n pred.result = 0\r\n\r\n if pred.result == 0:\r\n if await self.AddFish(ctx.message.author, new_fish):\r\n embed.set_footer(text=f'{new_fish[\"name\"]} was added to your bucket!')\r\n else:\r\n embed.set_footer(text=f'Your bucket was full, so you had to release {new_fish[\"name\"]} :(')\r\n else:\r\n embed.set_footer(text=f'You let {new_fish[\"name\"]} swim away...')\r\n await msg.edit(embed=embed)\r\n await msg.clear_reactions()\r\n\r\n user_bait = await profile.bait()\r\n user_bait[bait_type] -= 1\r\n await profile.bait.set(user_bait)\r\n\r\n await profile.currently_fishing.set(False)\r\n #if not await profile.mawiam_mode():\r\n #await profile.nextcast.set(time() + await self.GetSetting(ctx.guild, 'fishing_delay'))\r\n\r\n await self.CheckSchools(ctx)",
"def main():\n splitted_file = convert_input_to_list()\n encyclopedia_of_pizza = parse_pizza_info(splitted_file)\n pizza_winner = choose_pizza(encyclopedia_of_pizza)\n print_winner(pizza_winner)",
"def make_cup(drink_name, order_ingredients):\n for item in order_ingredients:\n resources[item] -= order_ingredients[item]\n print(f\"Here is your {drink_name} ☕️Enjoy!\")",
"def bids_cli():"
] |
[
"0.6478046",
"0.5842075",
"0.5710219",
"0.56645936",
"0.56446636",
"0.5596086",
"0.5570129",
"0.551248",
"0.550472",
"0.5448311",
"0.54283273",
"0.54248184",
"0.5390015",
"0.5384944",
"0.5377172",
"0.53676957",
"0.53014386",
"0.5243002",
"0.52425283",
"0.52145404",
"0.5190738",
"0.5171974",
"0.5171695",
"0.5151455",
"0.5143717",
"0.513548",
"0.5120061",
"0.5112023",
"0.5098293",
"0.5096073"
] |
0.69387525
|
0
|
Independence Day in the United States, celebrated on July 4th. May be observed on the previous or following day if it occurs on a Saturday or Sunday.
|
def independence_day(year, observed=None):
day = 4
if observed:
weekday = calendar.weekday(year, JUL, 4)
if weekday == SAT:
day = 3
if weekday == SUN:
day = 5
return (year, JUL, day)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def get_hebrew_independence_day(self, jewish_year):\n month = 2\n day = 5\n original_hebrew_independence_date = HebrewDate(jewish_year, month, day)\n if original_hebrew_independence_date.weekday() == 6:\n day = 4\n if original_hebrew_independence_date.weekday() == 7:\n day = 3\n if original_hebrew_independence_date.weekday() == 2:\n day = 6\n return [\n (HebrewDate(jewish_year, month, day - 1), \"Independence Day Eve\"),\n (HebrewDate(jewish_year, month, day), \"Independence Day\")\n ]",
"def get_holiday(self, date):\n if date.weekday() > 4 or [date.year, date.month, date.day] in self.bank_holidays:\n ok_value = 1\n else:\n ok_value = 0\n return ok_value",
"def test_holidays():\n\n assert not datetime.datetime(2003, 12, 25) in TRADING_DATES\n assert not datetime.datetime(2003, 5, 26) in TRADING_DATES # memorial day",
"def test_holidays(self):\n # New Year\n self.assertIn(date(2017, 1, 1), self.holidays)\n self.assertIn(date(2017, 1, 2), self.holidays)\n # Prešeren's day\n self.assertIn(date(2017, 2, 8), self.holidays)\n # Easter monday - 2016 and 2017\n self.assertIn(date(2016, 3, 28), self.holidays)\n self.assertIn(date(2017, 4, 17), self.holidays)\n # Day of uprising against occupation\n self.assertIn(date(2017, 4, 27), self.holidays)\n # Labour day\n self.assertIn(date(2017, 5, 1), self.holidays)\n # Labour day\n self.assertIn(date(2017, 5, 2), self.holidays)\n # Statehood day\n self.assertIn(date(2017, 6, 25), self.holidays)\n # Assumption day\n self.assertIn(date(2017, 8, 15), self.holidays)\n # Reformation day\n self.assertIn(date(2017, 10, 31), self.holidays)\n # Remembrance day\n self.assertIn(date(2017, 11, 1), self.holidays)\n # Christmas\n self.assertIn(date(2017, 12, 25), self.holidays)\n # Day of independence and unity\n self.assertIn(date(2017, 12, 26), self.holidays)",
"def dow(self):\n comparator = Date(11, 12, 2014) # known to be a 'Wednesday'\n DOW = ['Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday', 'Monday', 'Tuesday']\n diff = self.diff(comparator)\n return DOW[diff % 7]",
"def dia_constitucion(year, observed=True):\n if observed:\n return nth_day_of_month(1, MON, FEB, year)\n\n return (year, FEB, 5)",
"def checkdeplaid(incidence):\n if incidence >= 95 and incidence <= 180:\n return 'night'\n elif incidence >=90 and incidence < 95:\n return 'night'\n elif incidence >= 85 and incidence < 90:\n return 'day'\n elif incidence >= 0 and incidence < 85:\n return 'day'\n else:\n return False",
"def doomsday(y):",
"def test_non_holidays(self):\n # January 2nd was not public holiday between 2012 and 2017\n self.assertNotIn(date(2013, 1, 2), self.holidays)\n self.assertNotIn(date(2014, 1, 2), self.holidays)\n self.assertNotIn(date(2015, 1, 2), self.holidays)\n self.assertNotIn(date(2016, 1, 2), self.holidays)",
"def test_is_payday_positive4(self):\n date_to_check = date_class(2020,10,2)\n is_payday = self.pay_cycle.is_payday(date_to_check)\n assert is_payday == True\n\n date_to_check = date_class(2020,10,16)\n is_payday = self.pay_cycle.is_payday(date_to_check)\n assert is_payday == True\n\n date_to_check = date_class(2020,10,30)\n is_payday = self.pay_cycle.is_payday(date_to_check)\n assert is_payday == True",
"def test_wednesday(self):\n date = datetime.date(1988, 5, 4)\n self.assertEqual(date.isoweekday(), 3)\n start_date, end_date = get_weekspan(date)\n self.assertEqual(start_date.isoweekday(), 1)\n self.assertEqual(end_date.isoweekday(), 7)\n self.assertTrue(start_date.toordinal() <= date.toordinal() <= end_date.toordinal())",
"def dow(values, feature, parent): \r\n input_date = values[0]\r\n \r\n # Return dayOfWeek() % 7 so that values range from 0 (sun) to 6 (sat)\r\n # to match Postgresql behaviour\r\n if type(input_date) == QDateTime:\r\n return input_date.date().dayOfWeek() % 7\r\n elif type(input_date) == QDate:\r\n return input_date.dayOfWeek() % 7\r\n elif type(input_date) in (str, unicode): \r\n # Convert string to qdate\r\n input_qdate = QDate.fromString(input_date, 'yyyy-MM-dd')\r\n if input_qdate.isValid():\r\n return input_qdate.dayOfWeek() % 7 \r\n else:\r\n return None",
"def dow(values, feature, parent): \r\n input_date = values[0]\r\n \r\n # Return dayOfWeek() % 7 so that values range from 0 (sun) to 6 (sat)\r\n # to match Postgresql behaviour\r\n if type(input_date) == QDateTime:\r\n return input_date.date().dayOfWeek() % 7\r\n elif type(input_date) == QDate:\r\n return input_date.dayOfWeek() % 7\r\n elif type(input_date) in (str, unicode): \r\n # Convert string to qdate\r\n input_qdate = QDate.fromString(input_date, 'yyyy-MM-dd')\r\n if input_qdate.isValid():\r\n return input_qdate.dayOfWeek() % 7 \r\n else:\r\n return None",
"def test_output_day(self):\n input_ = [\n self.indicator_record(date=datetime.date(2011, 1, 1), value=0.83),\n self.indicator_record(date=datetime.date(2011, 2, 1), value=0.80),\n ]\n output = self.expander._ipca_from_15_expander(input_)\n\n self.assertEqual(output[-1].date.day, 1)",
"def four_day_weekends(*args,\n start_month: int = 8,\n paid_time_off: int = 200,\n year: int = 2020,\n show_workdays: bool = False\n ) -> None:\n\n\n if args:\n raise ValueError(ERROR_MSG)\n else:\n four_day_weekends = workdays = 0\n weekend_dates =[]\n workday_dates = []\n current = date(year,start_month,1)\n current += relativedelta(weekday=FR)\n first_monday = current + relativedelta(weekday=MO(-1))\n if first_monday.year == year and first_monday.month == start_month:\n workday_dates.append(first_monday)\n \n\n\n\n while current.year == year:\n monday = current + relativedelta(weekday=MO)\n thursday = current - relativedelta(days=1)\n if thursday.year == year and thursday not in FEDERAL_HOLIDAYS:\n workday_dates.append(thursday)\n\n\n dates = [current,monday]\n if monday.year == year:\n if all(date not in FEDERAL_HOLIDAYS for date in dates):\n weekend_dates.append((current,monday))\n four_day_weekends += 1\n else:\n if monday not in FEDERAL_HOLIDAYS:\n workday_dates.append(monday)\n if current not in FEDERAL_HOLIDAYS:\n workday_dates.append(current)\n else:\n if current not in FEDERAL_HOLIDAYS:\n workday_dates.append(current)\n\n\n\n\n\n\n current += relativedelta(weeks=1)\n \n\n\n last_thursday = current - relativedelta(days=1)\n if last_thursday.year == year and last_thursday not in FEDERAL_HOLIDAYS:\n workday_dates.append(last_thursday)\n workdays = len(workday_dates)\n\n\n \n if not show_workdays:\n length = len(str(paid_time_off))\n number = 24 \n before_days = paid_time_off//8 \n new_balance = paid_time_off - HOURS * four_day_weekends * 2\n new_days = abs(new_balance // 8)\n title = f'{four_day_weekends} Four-Day Weekend{\"s\" if four_day_weekends != 1 else \"\"}'\n print(f'{title:^{number}}')\n print('='* 24)\n\n labels = ['PTO:','BALANCE:']\n original = [paid_time_off,new_balance]\n new = [before_days,new_days]\n\n \n for label,value_1,value_2 in zip(labels,original,new):\n print(f'{label:>8} {value_1:>{length}} ({value_2} days)')\n\n print()\n \n\n\n start_losing = (four_day_weekends * 2 - before_days)//2\n date_start_losing = None\n if start_losing > 0:\n date_start_losing = weekend_dates[start_losing]\n for i,(weekend_start,weekend_end) in enumerate(weekend_dates):\n print(f\"{weekend_start} - {weekend_end}\",end='')\n if (weekend_start,weekend_end) == date_start_losing:\n print(' *')\n else:\n print()\n\n\n else:\n print(f'Remaining Work Days: {workdays * 8} ({workdays} days)')\n\n\n print('\\n'.join(map(str,workday_dates)))",
"def dow_1(self):\n return self._dayoffset + 1",
"def business_day(self): \n\n if self.time_stamp.weekday() not in (5, 6) and not holiday(self.time_stamp):\n return True \n return False",
"def day_07_a() -> int:\n return 0",
"def fed_holiday(df):\n\n if (df[\"Date\"].month == 1) & (df[\"Date\"].day == 1):\n return \"New Year's Day\"\n elif (df[\"Date\"].month == 1) & (15 <= df[\"Date\"].day <= 21) & (df[\"Date\"].dayofweek == 1):\n return \"Martin Luther King Day\"\n elif (df[\"Date\"].month == 2) & (df[\"Date\"].day == 18):\n return \"President's Day\"\n elif (df[\"Date\"].month == 5) & (25 <= df[\"Date\"].day <= 31) & (df[\"Date\"].dayofweek == 1):\n return \"Memorial Day\"\n elif (df[\"Date\"].month == 7) & (df[\"Date\"].day == 4):\n return \"Independence Day\"\n elif (df[\"Date\"].month == 9) & (1 <= df[\"Date\"].day <= 7) & (df[\"Date\"].dayofweek == 1):\n return \"Labor Day\"\n elif (df[\"Date\"].month == 10) & (8 <= df[\"Date\"].day <= 14) & (df[\"Date\"].dayofweek == 1):\n return \"Columbus Day\"\n elif (df[\"Date\"].month == 11) & (df[\"Date\"].day == 11):\n return \"Veterans Day\"\n elif (df[\"Date\"].month == 11) & (22 <= df[\"Date\"].day <= 28) & (df[\"Date\"].dayofweek == 4):\n return \"Thanksgiving Day\"\n elif (df[\"Date\"].month == 12) & (df[\"Date\"].day == 25):\n return \"Christmas Day\"\n else:\n return \"Non-holidays\"",
"def _labor_day(year):\n day = datetime(year, 9, 1)\n delta = timedelta(days=1)\n while day.weekday() != 0:\n day += delta\n return day",
"def day_06_a() -> int:\n return 0",
"def day_05_a() -> int:\n return 0",
"def is_workfree(date):\n \n return date.weekday() == 6 or is_holiday(date)",
"def indigenous_peoples_day(year, country='usa'):\n if country == 'usa':\n return nth_day_of_month(2, MON, OCT, year)\n\n return (year, OCT, 12)",
"def test_no_weekend_dates(self):\n input_ = [\n self.indicator_record(date=datetime.date(2014, 10, 14), value=0.035657),\n ]\n output = self.expander._daily_workday_indicator_expander(input_)\n no_weekend_dates = [record.date.weekday() < 5 for record in output]\n\n self.assertTrue(all(no_weekend_dates))",
"def day(self):\n return 0",
"def day(self):\n return 0",
"def relative_days(from_day, from_year):\n if from_day == 30:\n relative_days = 2\n return relative_days\n elif from_day == 31:\n relative_days = 1\n return relative_days\n else:\n if calendar.isleap(from_year) == 'false':\n relative_days = 2\n else:\n relative_days = 1\n return relative_days",
"def isoweekday(self):\n return 0",
"def isoweekday(self):\n return 0"
] |
[
"0.6554653",
"0.5694261",
"0.5676444",
"0.56217307",
"0.56001997",
"0.55737984",
"0.55195194",
"0.5491033",
"0.5477347",
"0.54401356",
"0.54395264",
"0.5433216",
"0.5433216",
"0.54014236",
"0.5388311",
"0.53674847",
"0.53637344",
"0.534773",
"0.5320098",
"0.5271103",
"0.5264338",
"0.5257806",
"0.52569497",
"0.52569294",
"0.5256308",
"0.5255406",
"0.5255406",
"0.52545315",
"0.5253808",
"0.5253808"
] |
0.73383605
|
0
|
In most jurisdictions in the United States, Election day occurs on the first Tuesday in November.
|
def election_day(year):
return nth_day_of_month(1, TUE, NOV, year)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def meetup_day(year, month, day_of_week, day_occurrence):\n \n cal = calendar.monthcalendar(year, month)\n day_of_week_index = days_of_week[day_of_week]\n \n not_teenth = day_occurrence != 'teenth'\n day_is_in_first_week = cal[0][day_of_week_index] != 0\n \n if not_teenth and day_is_in_first_week:\n week_index = week_indices[day_occurrence]\n \n elif not_teenth and not day_is_in_first_week:\n week_index = week_indices[day_occurrence] + 1\n \n else:\n for i in range(len(cal)):\n if cal[i][day_of_week_index] >= 10:\n week_index = i\n break\n\n date = cal[week_index][day_of_week_index]\n return datetime.date(year, month, date)",
"def day_06_a() -> int:\n return 0",
"def meetup_day(year, month, dow, wom):\n first_dow = monthrange(year, month)[0]\n days_in_month = monthrange(year, month)[1]\n possible_dates = []\n print str(year) + str(month) + dow + wom\n\n \"\"\"Build dictionary of possible dates based on dow\"\"\"\n for day in range(1, days_in_month+1):\n if datetime.date(year, month, day).strftime(\"%A\") == dow:\n print day\n possible_dates.extend([day])\n\n \"\"\"Perform logic on wom constraint\"\"\"\n if wom == \"teenth\":\n for day in possible_dates:\n if day > 12 and day < 20:\n return datetime.date(year, month, day)\n elif wom == \"last\":\n return datetime.date(year, month, possible_dates[-1])\n else:\n return datetime.date(year, month, possible_dates[ int(wom[:1]) - 1 ])",
"def day(self):\n return 0",
"def day(self):\n return 0",
"def day_06_b() -> int:\n return 0",
"def wkday_on_first(yr, mon): # returns day of week of first of month of the given year (1/1/2016)\r\n TotalDays = 0\r\n for x in range(1754, yr):\r\n YearNum = yeardays(x)\r\n TotalDays += YearNum\r\n for x in range(1, mon):\r\n MonNum = monthdays(yr, x)\r\n TotalDays += MonNum\r\n WhatDayNum = TotalDays % 7\r\n WhatDay = [\"Tues\", \"Wedn\", \"Thu\", \"Fri\", \"Sat\", \"Mon\"]\r\n return WhatDay[WhatDayNum]",
"def indigenous_peoples_day(year, country='usa'):\n if country == 'usa':\n return nth_day_of_month(2, MON, OCT, year)\n\n return (year, OCT, 12)",
"def test_monday(self):\n date = datetime.date(1981, 5, 4)\n self.assertEqual(date.isoweekday(), 1)\n start_date, end_date = get_weekspan(date)\n self.assertEqual(start_date.isoweekday(), 1)\n self.assertEqual(end_date.isoweekday(), 7)\n self.assertTrue(start_date.toordinal() <= date.toordinal() <= end_date.toordinal())",
"def day_of_the_week(arg):",
"def test_tuesday(self):\n date = datetime.date(1982, 5, 4)\n self.assertEqual(date.isoweekday(), 2)\n start_date, end_date = get_weekspan(date)\n self.assertEqual(start_date.isoweekday(), 1)\n self.assertEqual(end_date.isoweekday(), 7)\n self.assertTrue(start_date.toordinal() <= date.toordinal() <= end_date.toordinal())",
"def doomsday(y):",
"def day_07_a() -> int:\n return 0",
"def weekday(self):\n return 0",
"def weekday(self):\n return 0",
"def meetup_day(year, month, day, position):\n\n # Create a numerical mapping for weekday\n mapWeekday = { \n 'Monday': 0,\n 'Tuesday': 1,\n 'Wednesday': 2,\n 'Thursday': 3,\n 'Friday': 4,\n 'Saturday': 5,\n 'Sunday': 6\n }\n\n # Create a numerical mapping for weekday position within month\n mapPosition = {\n '1st': 0,\n '2nd': 1,\n '3rd': 2,\n '4th': 3,\n '5th': 4,\n 'last': -1,\n 'teenth': 1\n }\n\n # map day argument to number\n day = mapWeekday[day]\n\n # Map weekday position to number\n posIndex = mapPosition[position]\n\n # Get max days in month provided\n mRange = monthrange(year, month)\n\n # Calculate possible dates of weekday provided in that month and year\n possibleDates = [ p for p in range(1, mRange[1]+1) if date(year, month, p).weekday() == day ]\n\n # Adjust teenth (for flaw in calculation methodology)\n if position == 'teenth' and possibleDates[posIndex] < 13:\n posIndex += 1\n\n # Return date\n return date(year, month, possibleDates[posIndex])",
"def day_of_week(self):\n # 1 Jan 0001 was Monday according to the proleptic Gregorian calendar.\n # So, 1 Jan 0001 has ordinal 1, and the weekday is 0.\n return (self._ordinals - 1) % 7",
"def day_05_a() -> int:\n return 0",
"def day_07_b() -> int:\n return 0",
"def _labor_day(year):\n day = datetime(year, 9, 1)\n delta = timedelta(days=1)\n while day.weekday() != 0:\n day += delta\n return day",
"def main():\n print(day_of_week(datetime.now()))\n print(day_of_week(datetime(2019, 7, 4)))\n print(day_of_week(datetime(2013, 12, 25)))\n print(day_of_week(datetime(2000, 1, 1)))",
"def test_sunday(self):\n date = datetime.date(1980, 5, 4)\n self.assertEqual(date.isoweekday(), 7)\n start_date, end_date = get_weekspan(date)\n self.assertEqual(start_date.isoweekday(), 1)\n self.assertEqual(end_date.isoweekday(), 7)\n self.assertTrue(start_date.toordinal() <= date.toordinal() <= end_date.toordinal())",
"def day_of_week_for_start_day(self):\n import calendar\n\n day = self.idfobjects[\"RUNPERIOD\"][0][\"Day_of_Week_for_Start_Day\"]\n\n if day.lower() == \"sunday\":\n return calendar.SUNDAY\n elif day.lower() == \"monday\":\n return calendar.MONDAY\n elif day.lower() == \"tuesday\":\n return calendar.TUESDAY\n elif day.lower() == \"wednesday\":\n return calendar.WEDNESDAY\n elif day.lower() == \"thursday\":\n return calendar.THURSDAY\n elif day.lower() == \"friday\":\n return calendar.FRIDAY\n elif day.lower() == \"saturday\":\n return calendar.SATURDAY\n else:\n return 0",
"def test_wednesday(self):\n date = datetime.date(1988, 5, 4)\n self.assertEqual(date.isoweekday(), 3)\n start_date, end_date = get_weekspan(date)\n self.assertEqual(start_date.isoweekday(), 1)\n self.assertEqual(end_date.isoweekday(), 7)\n self.assertTrue(start_date.toordinal() <= date.toordinal() <= end_date.toordinal())",
"def test_one_date(self):\n result = beautiful_days_at_the_movies(10, 10, 6)\n self.assertEquals(result, 0)",
"def Day_of_week(day, month, year):\r\n if year % 4 == 0 and (year % 400 == 0 or year % 100 != 0):\r\n doomsday = [11, 29, 21, 4, 9, 6, 11, 8, 5, 10, 7, 12]\r\n else:\r\n doomsday = [10, 28, 21, 4, 9, 6, 11, 8, 5, 10, 7, 12]\r\n exact_day = ((day - doomsday[month-1]) + Dooms_day(year)) % 7\r\n character_day = [\"Sunday\", \"Monday\", \"Tuesday\", \"Wednesday\", \"Thursday\", \r\n \"Friday\", \"Saturday\"]\r\n return character_day[exact_day]",
"def test_ordinal_first(self):\n with open(\"tests/data_files/labor_day_dates.txt\", \"r\") as dates_file:\n dates_strings_list = dates_file.read().splitlines()\n\n for date_string in dates_strings_list:\n test_date = date(*[int(p) for p in date_string.split(\"-\")])\n labor_day = get_by_values(Ordinal.first, Weekday.Monday, Month.September, test_date.year)\n\n self.assertEquals(test_date, labor_day)",
"def MayDay(year):\n\n day = datetime.date(year, 5, 1)\n count = 0\n while True:\n if day.weekday() == 0:\n count += 1\n if count == 1:\n return day\n day += datetime.timedelta(days=1)",
"def get_first_day_of_the_week(self):\n if SubscriptionProduct.objects.filter(\n subscription=self, product__weekday=1\n ).exists():\n return 1\n elif SubscriptionProduct.objects.filter(\n subscription=self, product__weekday=2\n ).exists():\n return 2\n elif SubscriptionProduct.objects.filter(\n subscription=self, product__weekday=3\n ).exists():\n return 3\n elif SubscriptionProduct.objects.filter(\n subscription=self, product__weekday=4\n ).exists():\n return 4\n elif SubscriptionProduct.objects.filter(\n subscription=self, product__weekday=5\n ).exists():\n return 5\n else:\n return 6",
"def test_saturday(self):\n date = datetime.date(1985, 5, 4)\n self.assertEqual(date.isoweekday(), 6)\n start_date, end_date = get_weekspan(date)\n self.assertEqual(start_date.isoweekday(), 1)\n self.assertEqual(end_date.isoweekday(), 7)\n self.assertTrue(start_date.toordinal() <= date.toordinal() <= end_date.toordinal())"
] |
[
"0.66748834",
"0.5912463",
"0.5869916",
"0.5838318",
"0.5838318",
"0.5784695",
"0.57811135",
"0.5774078",
"0.5763169",
"0.5745868",
"0.5739217",
"0.5718777",
"0.5713595",
"0.56897986",
"0.56897986",
"0.5679064",
"0.564338",
"0.5631577",
"0.5598538",
"0.5595918",
"0.55901814",
"0.55698717",
"0.5562325",
"0.55391216",
"0.551752",
"0.54930526",
"0.5486497",
"0.5482329",
"0.5468863",
"0.54613453"
] |
0.6208247
|
1
|
Christmas is celebrated on the 25th of December. For the purposes of business closings, it may be observed on the previous or following day if the 25th falls on a Saturday or Sunday, respectively.
|
def christmas(year, observed=None):
day = 25
if observed:
weekday = calendar.weekday(year, DEC, 25)
if weekday == SAT:
day = 24
if weekday == SUN:
day = 26
return (year, DEC, day)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def days_until_christmas(d):\n import datetime\n\n if d == None:\n return None\n\n xmas_this = datetime.date(d.year, 12, 25)\n xmas_next = datetime.date(d.year + 1, 12, 25)\n\n if d == xmas_this:\n return 0\n elif d < xmas_this:\n return (xmas_this - d).days\n else:\n return (xmas_next - d).days",
"def test_after():\n date = datetime.datetime\n\n assert CALENDAR.nth_trading_day_after(3, date(2005, 6, 13)) == date(2005, 6, 16)\n assert CALENDAR.nth_trading_day_after(0, date(2005, 6, 13)) == date(2005, 6, 13)\n assert CALENDAR.nth_trading_day_after(0, date(2005, 6, 18)) == date(2005, 6, 20)",
"def doomsday(y):",
"def bussines_days(year_month):\n year, month = year_month\n date = datetime(\n year,\n month,\n calendar.monthrange(year, month)[1]\n )\n\n return sum([\n 1 for x in xrange(1, date.day)\n if datetime(year, month, x).weekday() < 5\n ]) * (60 * 60 * 8)",
"def day_05_b() -> int:\n return 0",
"def day_06_b() -> int:\n return 0",
"def day_07_b() -> int:\n return 0",
"def n_business_days(self, n=-2):\n\n business_days = 0\n calendar_days = 0 \n if n != 0:\n step = int(n/abs(n))\n while business_days != abs(n):\n calendar_days = calendar_days + step\n if business_day(self.time_stamp + timedelta(calendar_days)):\n business_days = business_days + 1\n return self.time_stamp + timedelta(calendar_days)\n return date",
"def wkday_on_first(yr, mon): # returns day of week of first of month of the given year (1/1/2016)\r\n TotalDays = 0\r\n for x in range(1754, yr):\r\n YearNum = yeardays(x)\r\n TotalDays += YearNum\r\n for x in range(1, mon):\r\n MonNum = monthdays(yr, x)\r\n TotalDays += MonNum\r\n WhatDayNum = TotalDays % 7\r\n WhatDay = [\"Tues\", \"Wedn\", \"Thu\", \"Fri\", \"Sat\", \"Mon\"]\r\n return WhatDay[WhatDayNum]",
"def get_next_day(self):\n pass",
"def test_wednesday(self):\n date = datetime.date(1988, 5, 4)\n self.assertEqual(date.isoweekday(), 3)\n start_date, end_date = get_weekspan(date)\n self.assertEqual(start_date.isoweekday(), 1)\n self.assertEqual(end_date.isoweekday(), 7)\n self.assertTrue(start_date.toordinal() <= date.toordinal() <= end_date.toordinal())",
"def Friday_occur_from_2000(day, month, year):\r\n Day_on_13 = [Day_of_week(13, s, t) \\\r\n for s in xrange(1, 13) for t in xrange(2000, year)]\r\n count_from_2000 = Day_on_13.count(\"Friday\")\r\n \r\n if day >= 13:\r\n count_from_2000 += \\\r\n [Day_of_week(13, s, year) for s in xrange(1, month+1)].count(\"Friday\")\r\n else:\r\n count_from_2000 += \\\r\n [Day_of_week(13, s, year) for s in xrange(1, month)].count(\"Friday\")\r\n return count_from_2000",
"def what_night_is_it():\n d = datetime.datetime.utcnow() - datetime.timedelta(7 / 24 + 0.5)\n tonight = int(d.strftime('%Y%m%d'))\n return tonight",
"def day_of_week(dt):\n cday = dt\n mday = 2\n uday = cday.isocalendar()[2] + mday\n try:\n if uday > 7:\n CURRDAY = uday - 7\n log.debug(\"1;EME;RUNNING;000;Scheduler.py;Setting customized day of week>7 : \", CURRDAY)\n else:\n CURRDAY = uday\n log.debug(\"1;EME;RUNNING;000;Scheduler.py;Setting customized day of week : \", CURRDAY)\n return CURRDAY\n except Exception as e:\n log.exception(\"1;EME;FAILURE;700;SCHEDULE ERROR \" + str(e), exc_info=False)\n sys.exit(0)",
"def day_05_a() -> int:\n return 0",
"def tomorrow(self):\n if self.isLeapYear():\n fdays = 29\n else:\n fdays = 28\n\n DIM = [0, 31, fdays, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]\n\n currentDay = self.day\n maxDay = DIM[self.month]\n\n if currentDay == maxDay and self.month == 12:\n self.year += 1\n self.month = 1\n self.day = 1\n elif currentDay == maxDay:\n self.month += 1\n self.day = 1\n else:\n self.day += 1",
"def weekly():",
"def test_suite():\r\n test(turn_clockwise(\"N\") == \"E\")\r\n test(turn_clockwise(\"W\") == \"N\")\r\n test(turn_clockwise(42) == None)\r\n test(turn_clockwise(\"rubbish\") == None)\r\n test(day_name(3) == \"Wednesday\")\r\n test(day_name(6) == \"Saturday\")\r\n test(day_name(42) == None)\r\n test(day_num(\"Friday\") == 5)\r\n test(day_num(\"Sunday\") == 0)\r\n test(day_num(day_name(3)) == 3)\r\n test(day_name(day_num(\"Thursday\")) == \"Thursday\")\r\n test(day_num(\"Halloween\") == None)\r\n test(day_add(\"Monday\", 4) == \"Friday\")\r\n test(day_add(\"Tuesday\", 0) == \"Tuesday\")\r\n test(day_add(\"Tuesday\", 14) == \"Tuesday\")\r\n test(day_add(\"Sunday\", 100) == \"Tuesday\")\r\n test(day_add(\"Sunday\", -1) == \"Saturday\")\r\n test(day_add(\"Sunday\", -7) == \"Sunday\")\r\n test(day_add(\"Tuesday\", -100) == \"Sunday\")\r\n test(days_in_month(\"February\") == 28)\r\n test(days_in_month(\"December\") == 31)\r\n test(to_secs(2, 30, 10) == 9010)\r\n test(to_secs(2, 0, 0) == 7200)\r\n test(to_secs(0, 2, 0) == 120)\r\n test(to_secs(0, 0, 42) == 42)\r\n test(to_secs(0, -10, 10) == -590)\r\n test(to_secs(2.5, 0, 10.71) == 9010)\r\n test(to_secs(2.433,0,0) == 8758)\r\n test(hours_in(9010) == 2)\r\n test(minutes_in(9010) == 30)\r\n test(seconds_in(9010) == 10)\r\n test(compare(5, 4) == 1)\r\n test(compare(7, 7) == 0)\r\n test(compare(2, 3) == -1)\r\n test(compare(42, 1) == 1)\r\n test(hypotenuse(3, 4) == 5.0)\r\n test(hypotenuse(12, 5) == 13.0)\r\n test(hypotenuse(24, 7) == 25.0)\r\n test(hypotenuse(9, 12) == 15.0)\r\n test(slope(5, 3, 4, 2) == 1.0)\r\n test(slope(1, 2, 3, 2) == 0.0)\r\n test(slope(1, 2, 3, 3) == 0.5)\r\n test(slope(2, 4, 1, 2) == 2.0)\r\n test(intercept(1, 6, 3, 12) == 3.0)\r\n test(intercept(6, 1, 1, 6) == 7.0)\r\n test(intercept(4, 6, 12, 8) == 5.0)\r\n test(is_even(42))\r\n test(not is_even(7))\r\n test(is_even(0))\r\n test(is_even(-6))\r\n test(not is_even(-13))\r\n test(not is_odd(18))\r\n test(is_odd(71))\r\n test(not is_odd(0))\r\n test(not is_odd(-14))\r\n test(is_odd(-21))\r\n test(is_factor(3, 12))\r\n test(not is_factor(5, 12))\r\n test(is_factor(7, 14))\r\n test(not is_factor(7, 15))\r\n test(is_factor(1, 15))\r\n test(is_factor(15, 15))\r\n test(not is_factor(25, 15))\r\n test(is_multiple(12, 3))\r\n test(is_multiple(12, 4))\r\n test(not is_multiple(12, 5))\r\n test(is_multiple(12, 6))\r\n test(not is_multiple(12, 7))\r\n test(f2c(212) == 100) # Boiling point of water\r\n test(f2c(32) == 0) # Freezing point of water\r\n test(f2c(-40) == -40) # Wow, what an interesting case!\r\n test(f2c(36) == 2)\r\n test(f2c(37) == 3)\r\n test(f2c(38) == 3)\r\n test(f2c(39) == 4)\r\n test(c2f(0) == 32)\r\n test(c2f(100) == 212)\r\n test(c2f(-40) == -40)\r\n test(c2f(12) == 54)\r\n test(c2f(18) == 64)\r\n test(c2f(-48) == -54)",
"def business_day(self): \n\n if self.time_stamp.weekday() not in (5, 6) and not holiday(self.time_stamp):\n return True \n return False",
"def test_is_payday_positive_25(self):\n expected_count = 3\n expected_paydays = [\n date_class(2020,10,2), \n date_class(2020,10,16), \n date_class(2020,10,30)\n ]\n\n curr_date = date_class(2020,10,1)\n end_date = date_class(2020,10,31)\n paydays = []\n\n while curr_date <= end_date:\n is_payday = pay_cycle_object().is_payday(curr_date)\n if is_payday: \n paydays.append(curr_date)\n curr_date += timedelta(days=1)\n\n assert len(paydays) == expected_count, \\\n f'Got {len(paydays)}, expected {expected_count}'\n assert paydays == expected_paydays, \\\n f'Got {paydays}, expected {expected_paydays}'",
"def is_in_advent() -> bool:\n # Run the code from the 1st to the 24th\n return datetime.now(EST).day in range(1, 25) and datetime.now(EST).month == 12",
"async def _night(self, ctx: Context):\n\n guild: discord.Guild = ctx.guild\n\n data = await self.config.guild(guild).all()\n\n cycle = data['cycle']\n\n number = cycle['number']\n\n msg = await ctx.send(\n _(\n \"Are you sure you want to start night {}? Make sure you\"\n \" have already posted the night starting text.\"\n ).format(number)\n )\n start_adding_reactions(msg, ReactionPredicate.YES_OR_NO_EMOJIS)\n\n pred = ReactionPredicate.yes_or_no(msg, ctx.author)\n await ctx.bot.wait_for(\"reaction_add\", check=pred)\n\n if not pred.result:\n return await ctx.send(_(\"Process aborted.\"))\n\n day = guild.get_channel(cycle['day'])\n vote = guild.get_channel(cycle['vote'])\n night = guild.get_channel(cycle['night'])\n\n night_overwrites = day.overwrites\n day_overwrites = {\n guild.default_role: discord.PermissionOverwrite(\n read_messages=True,\n send_messages=False,\n add_reactions=False,\n )\n }\n\n await day.edit(overwrites=day_overwrites)\n await vote.edit(overwrites=day_overwrites)\n await night.edit(overwrites=night_overwrites)\n\n na_channel = await self.check_na_channel(guild)\n if not na_channel:\n na_channel = await self.create_na_channel(guild)\n\n try:\n await na_channel.send(\n _(\"**Night {} begins!**\\n\\n\\n\\n\\u200b\").format(number)\n )\n except discord.Forbidden:\n pass\n\n await ctx.send(_(\"Night {} channel opened.\").format(number))",
"def test_tuesday(self):\n date = datetime.date(1982, 5, 4)\n self.assertEqual(date.isoweekday(), 2)\n start_date, end_date = get_weekspan(date)\n self.assertEqual(start_date.isoweekday(), 1)\n self.assertEqual(end_date.isoweekday(), 7)\n self.assertTrue(start_date.toordinal() <= date.toordinal() <= end_date.toordinal())",
"def test_before():\n date = datetime.datetime\n\n #july forth is this week\n assert CALENDAR.nth_trading_day_before(3, date(2006, 7, 7)) == date(2006, 7, 3)\n assert CALENDAR.nth_trading_day_before(0, date(2006, 7, 7)) == date(2006, 7, 7)\n assert CALENDAR.nth_trading_day_before(0, date(2006, 7, 8)) == date(2006, 7, 7)",
"def test_second_date_static_3(self):\n input_ = (datetime.date(1993, 1, 31), datetime.date(1993, 3, 1))\n expected = (datetime.date(1993, 2, 1), datetime.date(1993, 3, 1))\n actual = self.expander._get_next_days(*input_)\n\n self.assertEqual(expected, actual)",
"def test_next_workday_at_10():\n saturday_at_8am = datetime(2017, 4, 1, 8, tzinfo=timezone.utc)\n assert next_workday_at_10(saturday_at_8am) == datetime(2017, 4, 3, 10, tzinfo=timezone.utc)\n tuesday_at_4am = datetime(2017, 4, 4, 4, tzinfo=timezone.utc)\n assert next_workday_at_10(tuesday_at_4am) == datetime(2017, 4, 5, 10, tzinfo=timezone.utc)\n wednesday_at_3pm = datetime(2017, 4, 5, 15, tzinfo=timezone.utc)\n assert next_workday_at_10(wednesday_at_3pm) == datetime(2017, 4, 6, 10, tzinfo=timezone.utc)",
"def is_no_bell_day():\n today = time.localtime()[:3]\n for r in no_bell:\n if today >= r[0] and today <= r[1]:\n return True\n return False",
"def _get_next_monday(self):\n today = datetime.date.today()\n weekday_int = today.weekday()\n if weekday_int == 0:\n return today\n next_mon = today + timedelta(7 - weekday_int)\n return next_mon",
"def day_07_a() -> int:\n return 0",
"def main():\r\n day, mth = int(input()), int(input())\r\n mths = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]\r\n wks = [\"Saturday\", \"Sunday\", \"Monday\", \"Tuesday\", \"Wednesday\", \"Thursday\", \"Friday\"]\r\n yrday = sum(mths[:mth-1])+day\r\n cur_idx = 0\r\n for _ in range(yrday):\r\n if cur_idx == 6:\r\n cur_idx = 0\r\n else:\r\n cur_idx += 1\r\n print(wks[cur_idx-1])"
] |
[
"0.5951745",
"0.58248746",
"0.5754146",
"0.5656661",
"0.5640452",
"0.55025434",
"0.5470026",
"0.5448984",
"0.5358766",
"0.5263799",
"0.5253447",
"0.52255183",
"0.5210289",
"0.52096266",
"0.5208944",
"0.5208575",
"0.5197862",
"0.51868194",
"0.51768965",
"0.5165653",
"0.5148891",
"0.5146154",
"0.5140644",
"0.5133763",
"0.51242447",
"0.51198345",
"0.511209",
"0.5098595",
"0.50908524",
"0.5080083"
] |
0.64039904
|
0
|
Tu BeShvat, the 'New Year of Trees', is celebrated on the 15th of Shevat.
|
def tu_beshvat(year, eve=None):
jd = hebrew.to_jd_gregorianyear(year, hebrew.SHEVAT, 15)
if eve:
jd = jd - 1
return gregorian.from_jd(jd)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def twenty_seventeen():\n return 2017",
"def yearlyDepreciation():\n return .10",
"def increment_year(self):",
"def sukkot(year, eve=None):\n jd = hebrew.to_jd_gregorianyear(year, hebrew.TISHRI, 15)\n if eve:\n jd = jd - 1\n return gregorian.from_jd(jd)",
"def new_years_eve(year):\n return (year, DEC, 31)",
"def test_20th_century(self):\r\n season = \"1989-90\"\r\n res = get_end_year(season)\r\n assert res == 1990",
"def showNextYear(self):\n pass",
"def new_year(dacycle):\n\n this_year = dacycle['time.start'].year\n prev_year = (dacycle['time.start']-dacycle['cyclelength']).year\n\n return (this_year != prev_year)",
"def showPreviousYear(self):\n pass",
"def run_year(self, year):\n pass",
"def year_cost_rule(_m, y):\r\n\r\n return sum(m.RHO[y, s] * m.SCEN[y, s] for s in m.S)",
"def voto(ano):\n\tfrom datetime import date\n\t\n\t#idade recebe o ano atual menos o ano de nascimento\n\tidade = date.today().year - ano\n\t\n\tprint(f'Com {idade} anos: Voto', end=' ')\n\tif idade < 16:\n\t\treturn 'Negado'\n\telif 16 <= idade < 18 or idade > 65:\n\t\treturn 'Opcional'\n\telse:\n\t\treturn 'Obrigatorio'",
"def test_21st_century(self):\r\n season = \"2019-20\"\r\n res = get_end_year(season)\r\n assert res == 2020",
"def calendar_year(cls, tee):\n return iround(((tee - OldHindu.EPOCH) / cls.SIDEREAL_YEAR) - (cls.solar_longitude(tee) / 360))",
"def test_change_of_year(self):\n\n input_ = [\n self.indicator_record(date=datetime.date(2006, 11, 1), value=0.31),\n self.indicator_record(date=datetime.date(2006, 12, 1), value=0.48),\n ]\n output = self.expander._ipca_from_15_expander(input_)\n expected = self.indicator_record(date=datetime.date(2007, 1, 1), value=0.35)\n actual = output[-1]\n\n self.assertEqual(expected, actual)",
"def closeyear(year):\n\n # Return the specific year\n return int(year % 4)",
"def yr_fewest_movies():\n copy = movies.copy()\n year = copy.sort_values(['Number of Movies', 'Year']).reset_index(drop = True).Year.loc[0]\n return ('yr_fewest_movies', year)",
"def end_year(self) -> float:\n\n end_year = -np.inf\n for td_table in list(self.tdve.values()) + self.transfers + self.interpops:\n if len(td_table.tvec) and np.amax(td_table.tvec) > end_year:\n end_year = np.amax(td_table.tvec)\n return end_year",
"def getagefromyear(year=None):\n if year is None:\n print(\"Please enter the year to assign class to them\")\n try:\n t = datetime.datetime.today()\n b = datetime.datetime.strptime(str(year), '%Y')\n a = (t - b).days / 365\n a = int(a)\n if (a < 10) or (a > 80):\n a = None\n except:\n a = None\n return a",
"def calendar_year(cls, tee):\n return iround(((tee - OldHindu.EPOCH) / cls.MEAN_SIDEREAL_YEAR) - (sidereal_solar_longitude(tee) / 360))",
"def tisha_bav(year, eve=None):\n jd = hebrew.to_jd_gregorianyear(year, hebrew.AV, 9)\n if jwday(jd) == SAT:\n jd = jd + 1\n if eve:\n jd = jd - 1\n return gregorian.from_jd(jd)",
"def getCurrentYear(self):\n return math.ceil(self.wcount / 48)",
"def _year_of_graduation(self):\n return str((12 - int(self._grade)) + self._this_year)[2:]",
"def start_year(self) -> float:\n\n start_year = np.inf\n for td_table in list(self.tdve.values()) + self.transfers + self.interpops:\n if len(td_table.tvec) and np.amin(td_table.tvec) < start_year:\n start_year = np.amin(td_table.tvec)\n return start_year",
"def cumulative_CH4(year):\n f1 = 0.5 # Unitless\n f2 = 0.15 # Unitless\n alpha = 1.27e-13 # Radiative forcing (W/m2/kg)\n tau = 12.4 # Lifetime (years)\n return (1 + f1 + f2) * alpha * tau * (1 - np.exp(-year / tau))",
"def get_contribution_for_year(self, year: int):",
"def get_current_hockey_year():\n\n today = date.today()\n\n # if we are in the end of a hockey year (anytime from jan 1 until next season \"sept\")\n if today.month <= 8: \n return get_last_year() + get_current_year()\n\n\n else: # if month >= 9 (Sept)\n return get_current_year() + get_next_year()",
"def year(self):\n return self._years",
"def years_since_vernal_equinox(dt):\n year_start = type(dt)(dt.year, 3, 20, 12)\n year_end = type(dt)(dt.year+1, 3, 20, 12)\n return (dt - year_start).total_seconds() / (year_end - year_start).total_seconds()",
"def current_season():\n td = datetime.datetime.today()\n if td.month > 8:\n return td.year\n return td.year - 1"
] |
[
"0.6772096",
"0.64986193",
"0.6126908",
"0.6072159",
"0.606915",
"0.59305495",
"0.5918589",
"0.5857435",
"0.5834787",
"0.5815584",
"0.5772973",
"0.5769094",
"0.5739822",
"0.5701493",
"0.56905806",
"0.5651845",
"0.56207174",
"0.56139946",
"0.5611614",
"0.5566634",
"0.5548395",
"0.5533086",
"0.5529075",
"0.552359",
"0.5518318",
"0.54947245",
"0.5486686",
"0.54790425",
"0.5469189",
"0.54550606"
] |
0.6524505
|
1
|
Revolution Day, a public holiday in Mexico observed on the third Monday in November.
|
def dia_revolucion(year):
return nth_day_of_month(3, MON, NOV, year)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def election_day(year):\n return nth_day_of_month(1, TUE, NOV, year)",
"def wkday_on_first(yr, mon): # returns day of week of first of month of the given year (1/1/2016)\r\n TotalDays = 0\r\n for x in range(1754, yr):\r\n YearNum = yeardays(x)\r\n TotalDays += YearNum\r\n for x in range(1, mon):\r\n MonNum = monthdays(yr, x)\r\n TotalDays += MonNum\r\n WhatDayNum = TotalDays % 7\r\n WhatDay = [\"Tues\", \"Wedn\", \"Thu\", \"Fri\", \"Sat\", \"Mon\"]\r\n return WhatDay[WhatDayNum]",
"def nth_dow_to_day(tupel, y):\r\n m = tupel[0]\r\n dow = tupel[1]\r\n n = tupel[2]\r\n\r\n if dow == 7:\r\n dow = 0\r\n\r\n first_dow = date_to_dow(y, m, 1) # the dow of the first of the month\r\n shift = dow - first_dow\r\n if shift < 0:\r\n shift += 7\r\n\r\n return shift + (7 * n) - 6",
"def meetup_day(year, month, dow, wom):\n first_dow = monthrange(year, month)[0]\n days_in_month = monthrange(year, month)[1]\n possible_dates = []\n print str(year) + str(month) + dow + wom\n\n \"\"\"Build dictionary of possible dates based on dow\"\"\"\n for day in range(1, days_in_month+1):\n if datetime.date(year, month, day).strftime(\"%A\") == dow:\n print day\n possible_dates.extend([day])\n\n \"\"\"Perform logic on wom constraint\"\"\"\n if wom == \"teenth\":\n for day in possible_dates:\n if day > 12 and day < 20:\n return datetime.date(year, month, day)\n elif wom == \"last\":\n return datetime.date(year, month, possible_dates[-1])\n else:\n return datetime.date(year, month, possible_dates[ int(wom[:1]) - 1 ])",
"def _labor_day(year):\n day = datetime(year, 9, 1)\n delta = timedelta(days=1)\n while day.weekday() != 0:\n day += delta\n return day",
"def day_07_b() -> int:\n return 0",
"def get_mothers_day_date(year):\n day = date(year=year, month=5, day=1)\n while 1:\n if day.weekday() == 6:\n day += timedelta(days=7)\n break\n day += timedelta(days=1)\n return day",
"def doomsday(y):",
"def weekday(day):\n return (day % 7) - 1",
"def day_06_b() -> int:\n return 0",
"def day_07_a() -> int:\n return 0",
"def meetup_day(year, month, day_of_week, day_occurrence):\n \n cal = calendar.monthcalendar(year, month)\n day_of_week_index = days_of_week[day_of_week]\n \n not_teenth = day_occurrence != 'teenth'\n day_is_in_first_week = cal[0][day_of_week_index] != 0\n \n if not_teenth and day_is_in_first_week:\n week_index = week_indices[day_occurrence]\n \n elif not_teenth and not day_is_in_first_week:\n week_index = week_indices[day_occurrence] + 1\n \n else:\n for i in range(len(cal)):\n if cal[i][day_of_week_index] >= 10:\n week_index = i\n break\n\n date = cal[week_index][day_of_week_index]\n return datetime.date(year, month, date)",
"def dow(self):\n comparator = Date(11, 12, 2014) # known to be a 'Wednesday'\n DOW = ['Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday', 'Monday', 'Tuesday']\n diff = self.diff(comparator)\n return DOW[diff % 7]",
"def day_06_a() -> int:\n return 0",
"def weekday(self):\n return 0",
"def weekday(self):\n return 0",
"def indigenous_peoples_day(year, country='usa'):\n if country == 'usa':\n return nth_day_of_month(2, MON, OCT, year)\n\n return (year, OCT, 12)",
"def day_of_the_week(arg):",
"def DayOfWeek(year, month, day):\n num = year * 365\n num = num + year // 4 + 1\n num = num - (year // 100 + 1)\n num = num + year // 400 + 1\n if month < 3 and LeapYear(year):\n num = num - 1\n return (num + MONTH_OFFSETS[month - 1] + day + 4) % 7 + 1",
"def holiday_type() -> Holiday:\n return Holiday.CHRISTMAS",
"def dow_1(self):\n return self._dayoffset + 1",
"def weekday(self):\n return (self.toordinal() + 6) % 7",
"def day_of_week(day, month, year):\n bias = (14 - month) // 12\n m_year = year - bias\n mth = month + 12 * bias - 2\n return (day + m_year + m_year // 4 - m_year // 100 + m_year // 400 + (31 * mth) // 12) % 7",
"def add_holiday(date):\n\n holiday_dates = ['09/08/2019', '10/08/2019', '11/08/2019', '12/08/2019',\n '13/08/2019', '14/08/2019', '15/08/2019', '16/08/2019', '17/08/2019',\n '18/08/2019', '19/08/2019', '20/08/2019', '21/08/2019', '22/08/2019',\n '23/08/2019', '24/08/2019', '25/08/2019', '26/08/2019', '27/08/2019',\n '28/08/2019', '29/08/2019', '30/08/2019', '31/08/2019', '28/10/2019',\n '29/10/2019', '30/10/2019', '31/10/2019', '20/12/2019', '21/12/2019',\n '22/12/2019', '23/12/2019', '24/12/2019', '25/12/2019', '26/12/2019',\n '27/12/2019', '28/12/2019', '29/12/2019', '30/12/2019', '31/12/2019',\n '01/01/2020', '02/01/2020', '03/01/2020', '04/01/2020', '05/01/2020',\n '17/02/2020', '18/02/2020', '19/02/2020', '20/02/2020', '21/02/2020',\n '03/04/2020', '04/04/2020', '05/04/2020', '06/04/2020', '07/04/2020',\n '08/04/2020', '09/04/2020', '10/04/2020', '11/04/2020', '12/04/2020',\n '13/04/2020', '14/04/2020', '15/04/2020', '16/04/2020', '17/04/2020',\n '18/04/2020', '19/04/2020', '20/04/2020', '28/06/2020', '29/06/2020',\n '30/06/2020', '31/06/2020', '01/07/2020', '02/07/2020', '03/07/2020',\n '04/07/2020', '05/07/2020', '06/07/2020', '07/07/2020', '08/07/2020',\n '09/07/2020', '10/07/2020', '11/07/2020', '12/07/2020', '13/07/2020',\n '14/07/2020', '15/07/2020', '16/07/2020', '17/07/2020', '18/07/2020',\n '19/07/2020', '20/07/2020', '21/07/2020', '22/07/2020', '23/07/2020',\n '24/07/2020', '25/07/2020', '26/07/2020', '27/07/2020', '28/07/2020',\n '29/07/2020', '30/07/2020', '31/07/2020', '01/08/2020', '02/08/2020',\n '03/08/2020', '04/08/2020', '05/08/2020', '06/08/2020', '07/08/2020',\n '08/08/2020', '09/08/2020', '10/08/2020', '11/08/2020', '12/08/2020',\n '13/08/2020', '14/08/2020', '15/08/2020', '16/08/2020', '17/08/2020',\n '18/08/2020', '19/08/2020', '20/08/2020', '21/8/2020', '22/08/2020',\n '23/08/2020', '24/08/2020', '25/08/2020', '26/08/2020', '27/08/2020',\n '28/08/2020', '29/08/2020', '30/08/2020', '31/08/2020']\n\n if date in holiday_dates:\n return 1\n else:\n return 0",
"def to_fixed(self):\n begin = ifloor((self.year + self.SOLAR_ERA + ((self.month - 1)/12)) * self.SIDEREAL_YEAR + OldHindu.EPOCH)\n return self.day - 1 + next_int(begin - 3, lambda d: self.zodiac(self.sunrise(d + 1)) == self.month)",
"def get_hebrew_independence_day(self, jewish_year):\n month = 2\n day = 5\n original_hebrew_independence_date = HebrewDate(jewish_year, month, day)\n if original_hebrew_independence_date.weekday() == 6:\n day = 4\n if original_hebrew_independence_date.weekday() == 7:\n day = 3\n if original_hebrew_independence_date.weekday() == 2:\n day = 6\n return [\n (HebrewDate(jewish_year, month, day - 1), \"Independence Day Eve\"),\n (HebrewDate(jewish_year, month, day), \"Independence Day\")\n ]",
"def day_of_week(dt):\n cday = dt\n mday = 2\n uday = cday.isocalendar()[2] + mday\n try:\n if uday > 7:\n CURRDAY = uday - 7\n log.debug(\"1;EME;RUNNING;000;Scheduler.py;Setting customized day of week>7 : \", CURRDAY)\n else:\n CURRDAY = uday\n log.debug(\"1;EME;RUNNING;000;Scheduler.py;Setting customized day of week : \", CURRDAY)\n return CURRDAY\n except Exception as e:\n log.exception(\"1;EME;FAILURE;700;SCHEDULE ERROR \" + str(e), exc_info=False)\n sys.exit(0)",
"def calcSeasonModified( monthNum ):\r\n\r\n if monthNum == 12 or monthNum == 1 or monthNum == 2:\r\n return 0\r\n\r\n elif monthNum == 6 or monthNum == 7 or monthNum == 7:\r\n return 1\r\n\r\n else:\r\n return 3",
"def MayDay(year):\n\n day = datetime.date(year, 5, 1)\n count = 0\n while True:\n if day.weekday() == 0:\n count += 1\n if count == 1:\n return day\n day += datetime.timedelta(days=1)",
"def fed_holiday(df):\n\n if (df[\"Date\"].month == 1) & (df[\"Date\"].day == 1):\n return \"New Year's Day\"\n elif (df[\"Date\"].month == 1) & (15 <= df[\"Date\"].day <= 21) & (df[\"Date\"].dayofweek == 1):\n return \"Martin Luther King Day\"\n elif (df[\"Date\"].month == 2) & (df[\"Date\"].day == 18):\n return \"President's Day\"\n elif (df[\"Date\"].month == 5) & (25 <= df[\"Date\"].day <= 31) & (df[\"Date\"].dayofweek == 1):\n return \"Memorial Day\"\n elif (df[\"Date\"].month == 7) & (df[\"Date\"].day == 4):\n return \"Independence Day\"\n elif (df[\"Date\"].month == 9) & (1 <= df[\"Date\"].day <= 7) & (df[\"Date\"].dayofweek == 1):\n return \"Labor Day\"\n elif (df[\"Date\"].month == 10) & (8 <= df[\"Date\"].day <= 14) & (df[\"Date\"].dayofweek == 1):\n return \"Columbus Day\"\n elif (df[\"Date\"].month == 11) & (df[\"Date\"].day == 11):\n return \"Veterans Day\"\n elif (df[\"Date\"].month == 11) & (22 <= df[\"Date\"].day <= 28) & (df[\"Date\"].dayofweek == 4):\n return \"Thanksgiving Day\"\n elif (df[\"Date\"].month == 12) & (df[\"Date\"].day == 25):\n return \"Christmas Day\"\n else:\n return \"Non-holidays\""
] |
[
"0.6295589",
"0.6026804",
"0.60245025",
"0.59761655",
"0.5968329",
"0.5934792",
"0.5931478",
"0.5897197",
"0.5892322",
"0.5892249",
"0.58864856",
"0.5849594",
"0.5817999",
"0.58040303",
"0.5774847",
"0.5774847",
"0.5727384",
"0.5635808",
"0.5634328",
"0.5630868",
"0.5609257",
"0.56086355",
"0.5558913",
"0.5555674",
"0.5552749",
"0.5545067",
"0.5539385",
"0.5532725",
"0.5528069",
"0.54990596"
] |
0.6376459
|
0
|
Return the short time version e.g. Apr15
|
def short_time(self):
return "%s%02d" % (util.SHORT_MONTH[self.month_num], self.year - 2000)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def shortDate(self, date):\n return u'%s %02i' % (date.pMonth(), date.day())",
"def twenty_seventeen():\n return 2017",
"def now_short(_format=\"%Y%m%d-%H%M%S\"):\n return time.strftime(_format, time.localtime()) + \"\\t\"",
"def time_hack(self):\n now = datetime.datetime.now()\n monthnames = ['jan', 'feb', 'mar', 'apr', 'may', 'jun',\n 'jul', 'aug', 'sep', 'oct', 'nov', 'dec']\n month = monthnames[now.month - 1].capitalize()\n return ('[%02d/%s/%04d:%02d:%02d:%02d.%06d]' %\n (now.day, month, now.year, now.hour, now.minute, now.second, now.microsecond))",
"def get_nightly_start_time():\n return 14 # 2PM local Tucson time",
"def get_date():\n return str(datetime.now()).split(' ')[0]",
"def now_short(_format=\"%Y%m%d-%H%M%S\"):\n timeString = time.strftime(_format, time.localtime()) + \"\\t\"\n return timeString",
"def get_video_publishing_date(self, response):\n return response.css(\".watch-time-text\").extract_first(default='')",
"def test_get_short_version(self):\n pass",
"def shortMonthName(self, p_int, QDate_MonthNameType=None): # real signature unknown; restored from __doc__ with multiple overloads\r\n return QString",
"def getApplicationBuildDate(self) -> unicode:\n ...",
"def get_time_string(self):\n return f\"{self.year} {self.month:02} \" \\\n f\"{self.start_day:02} {self.start_hour:02} 00 {self.get_duration():6}\"",
"def human_version(self):\n return _('Latest Stable') if self.version == 'latest_stable' else 'OpenRAVE %s'%self.version",
"def get_version(version=VERSION, date=DATE):\n return \"JoMRS v{} Modular Rigging System | last update {}\".format(\n \".\".join([i for i in version]), \"/\".join([x for x in date])\n )",
"def todaystr():\n today = datetime.datetime.today()\n return f\"{today.year}{today.month:02}{today.day:02}\"",
"def first_month_day():\r\n return datetime.now().replace(day=1).strftime('%d-%m-%Y')",
"def shortDayName(self, p_int, QDate_MonthNameType=None): # real signature unknown; restored from __doc__ with multiple overloads\r\n return QString",
"def get_date(self,yearlimits=[1500,2020]):\n\t\thead = self.raw_text()[:300] \t \t \n\t\tparser = Regexdate(head) \t \t\t\n\t\tyear = parser.find_year(yearlimits)\t\t\n\t\tmonth = parser.find_month()\n\t\tday = parser.find_day()\n\t\tif day and year != \"\":\n\t\t\treturn year + \"-\" + month + \"-\" + day\t\n\t\tif year:\n\t\t\treturn year\n\t\treturn \"\"",
"def time2shortstr(time):\n return time.strftime(MEM_SHORT_TIME_FORMAT)",
"def test_short_format_contains_year(self):\n locale = {\n 'timeformat': '%H:%M',\n 'dateformat': '%Y-%m-%d',\n 'longdateformat': '%Y-%m-%d',\n 'datetimeformat': '%Y-%m-%d %H:%M',\n 'longdatetimeformat': '%Y-%m-%d %H:%M',\n }\n assert (dt.datetime(2017, 1, 1), True) == guessdatetimefstr(\n '2017-1-1'.split(), locale=locale, default_day=dt.datetime.today())\n assert (dt.datetime(2017, 1, 1, 16, 30), False) == guessdatetimefstr(\n '2017-1-1 16:30'.split(), locale=locale, default_day=dt.datetime.today())",
"def GetCompactDateString():\r\n utc_time = pytz.UTC.localize(datetime.datetime.utcnow())\r\n pac_time = utc_time.astimezone(PACIFIC)\r\n is_dst = time.localtime().tm_isdst\r\n if is_dst:\r\n return pac_time.strftime(\"%Y%m%d-%Hd%M%S\")\r\n else:\r\n return pac_time.strftime(\"%Y%m%d-%Hs%M%S\")",
"def full_version(self) -> Optional[str]:\n full_version = None\n if self.version:\n full_version = self.version\n if self.release:\n full_version = \"{}-{}\".format(self.version, self.release)\n return full_version",
"def plastic_date():\n return 'Zun, 99 Zun 9999 99:61:61'",
"def version_min():\n return VERSION_MIN",
"def get_date():\n return (datetime.now() - TIMEDELTA).isoformat()",
"def __str__(self):\n return '{y}-{m:0>2}-{d:0>2}'.format(y=self.year, m=self.month, d=self.day)",
"def start1(self): \n return self.ddmmyyyy(self.rowTime.start)",
"def str_ax_version(self) -> str:\n days = f\"({tools.dt_days_ago(self.build_date)} days ago)\"\n return (\n f\"Axonius Version {self.version!r}, Build Date: {self.build_date!r} {days}\"\n )",
"def printVersionInfo():\n #pass\n pathname = sys.argv[0]\n myMtime = os.stat(pathname)[ST_MTIME]\n modDate = CONFIG['utils'].mktime(myMtime)\n logIt(\"Python Script: \" + pathname + \"\\n\")\n logIt(\"Version Date: \" + modDate + \"\\n\")",
"def yt_datetime(yt_date_time):\n time_obj = time.strptime(yt_date_time, \"%Y-%m-%dT%H:%M:%S.%fZ\")\n locale_date = time.strftime(\"%x\", time_obj)\n # strip first two digits of four digit year\n short_date = re.sub(r\"(\\d\\d\\D\\d\\d\\D)20(\\d\\d)$\", r\"\\1\\2\", locale_date)\n return time_obj, short_date"
] |
[
"0.63859946",
"0.5892262",
"0.5783507",
"0.57542473",
"0.570186",
"0.56883734",
"0.56549394",
"0.56004983",
"0.559018",
"0.55878234",
"0.5565777",
"0.5543981",
"0.55357987",
"0.5523186",
"0.5511338",
"0.5483309",
"0.54828346",
"0.5475784",
"0.5471855",
"0.54618084",
"0.5425063",
"0.54223764",
"0.5413275",
"0.54094774",
"0.5374072",
"0.5369284",
"0.5366125",
"0.5363569",
"0.5362116",
"0.5348293"
] |
0.7720047
|
0
|
Retrieve all of the triples linking phrases from the AMR object. We use the Framenet words that are found in the AMR Object as the linker.
|
def get_triples_linker():
triples_linkers = []
concepts = list(self.amr_obj.concepts())
# Retrieve all concept that has the word ARG in it
for concept in concepts:
triple = self.amr_obj.triples(head=concept[0])
items = [item for item in triple if 'ARG' in item[1]]
if len(items) > 0:
triples_linkers.append(triple)
return triples_linkers
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def get_triples(self, bundle_url=None):\n # metadata triples\n # transcription triples\n # translation triples\n # gloss triples\n # NER triples\n pass",
"def linked_words(self):\n return self[pair_data.PROPERTIES][pair_data.LINKED_WORDS]",
"def list(self):\n\t\treturn self.link_words",
"def generate_amr_string_from_triples(self):\n def get_alignment(f_concept_var):\n \"\"\"\n Get alignment for a single concept\n \"\"\"\n for triplet, a in self.amr_obj.alignments().items():\n if f_concept_var == triplet[0] and triplet[1] == ':instance-of':\n return int(a.split('.')[1].split(',')[0])\n\n def get_all_amr_string(f_concept_var):\n \"\"\"\n Get all amr string from the concept\n \"\"\"\n def get_triples(key):\n result_triples = []\n f_triples = self.amr_obj.triples(dep=key, rel=':ARG-of', normalize_inverses=True)\n if f_triples:\n result_triples.extend(f_triples)\n f_triples = self.amr_obj.triples(head=key)\n if f_triples:\n result_triples.extend(f_triples)\n return result_triples\n entry = defaultdict(int)\n q = []\n q.append((amr.Var('TOP'), ':top', f_concept_var))\n entry[f_concept_var] += 1\n reentrancies = self.amr_obj.reentrancies()\n all_triples = []\n while q:\n u = q.pop()\n all_triples.append(u)\n triples = get_triples(u[2])\n for triplet in triples[::-1]:\n if triplet[2] in reentrancies:\n if entry[triplet[2]] <= reentrancies[triplet[2]] + 1:\n q.append(triplet)\n entry[triplet[2]] += 1\n else:\n q.append(triplet)\n entry[triplet[2]] += 1\n s = ''\n stack = []\n instance_fulfilled = None\n align = role_align = {}\n concept_stack_depth = {\n None: 0} # size of the stack when the :instance-of triple was encountered for the variable\n for h, r, d in all_triples + [(None, None, None)]:\n align_key = align.get((h, r, d), '')\n role_align_key = role_align.get((h, r, d), '')\n if r == ':top':\n s += '(' + d()\n stack.append((h, r, d))\n instance_fulfilled = False\n elif r == ':instance-of':\n s += ' / ' + d(align_key)\n instance_fulfilled = True\n concept_stack_depth[h] = len(stack)\n elif r == ':wiki':\n continue\n elif h == stack[-1][2] and r == ':polarity': # polarity gets to be on the same line as the concept\n s += ' ' + r + role_align_key + ' ' + d(align_key)\n else:\n while len(stack) > concept_stack_depth[h]:\n h2, r2, d2 = stack.pop()\n if instance_fulfilled is False:\n # just a variable or constant with no concept hanging off of it\n # so we have an extra paren to get rid of\n align_key2 = align.get((h2, r2, d2), '')\n s = s[:-len(d2(align_key2)) - 1] + d2(align_key2, append=not instance_fulfilled)\n else:\n s += ')'\n instance_fulfilled = None\n if d is not None:\n s += ' \\n' + ' ' * 4 * len(stack) + r + role_align_key + ' (' + d(align_key)\n stack.append((h, r, d))\n instance_fulfilled = False\n return s\n\n # def get_all_alignments(concept_var, sep, left=True):\n # '''\n # Get all alignments from the concept\n # '''\n #\n # # def alignment_to_text(alignments):\n # # '''\n # # Convert all alignments to text\n # # '''\n # # def filter(idxs, tol):\n # # '''\n # # Resulting only the longest contiguous elements\n # # '''\n # # diffs = [idxs[i + 1] - idxs[i] for i in range(len(idxs) - 1)]\n # # start = False\n # # max_length = -1\n # # for i in range(len(diffs)):\n # # if diffs[i] <= tol:\n # # if not start:\n # # start = True\n # # length = 1\n # # start_idx = i\n # # else:\n # # length += 1\n # # else:\n # # if start:\n # # start = False\n # # end_idx = i\n # # if length >= max_length:\n # # max_length = length\n # # max_start_idx = start_idx\n # # max_end_idx = end_idx\n # # if start:\n # # end_idx = i + 1\n # # if length >= max_length:\n # # max_start_idx = start_idx\n # # max_end_idx = end_idx\n # # return [idxs[i] for i in range(max_start_idx, max_end_idx + 1)]\n # #\n # # doc = en_nlp(\" \".join(self.amr_obj.tokens()))\n # # alignments = sorted(list(set(alignments)))\n # # # We used noun chunks to prevent orphaned noun\n # # noun_chunks = list(doc.noun_chunks)\n # # new_alignments = set()\n # # for a in alignments:\n # # new_alignments.add(a)\n # # # Insert all noun chunks to new alignment\n # # for noun in noun_chunks:\n # # if noun.start <= a <= noun.end:\n # # new_alignments.update([i for i in range(noun.start, noun.end)])\n # # text = [self.amr_obj.tokens()[idx] for idx in filter(sorted(list(new_alignments)), 3)]\n # # text = \" \".join(text)\n # # return text\n #\n # def get_triplet(key):\n # result_triplets = []\n # triples = self.amr_obj.f_triples(dep=key, rel=':ARG-of', normalize_inverses=True)\n # if triples:\n # result_triplets.extend(triples)\n # triples = self.amr_obj.f_triples(head=key)\n # if triples:\n # result_triplets.extend(triples)\n # return result_triplets\n #\n # triplets_stor = {}\n # entry = defaultdict(int)\n # q = queue.Queue()\n # q.put(concept_var)\n # entry[concept_var] += 1\n # result_alignments = []\n # alignments = self.amr_obj.alignments()\n # role_alignments = self.amr_obj.role_alignments()\n # reentrancies = self.amr_obj.reentrancies()\n # while not q.empty():\n # u = q.get()\n # triples = get_triplet(u)\n # for triplet in triples:\n # if triplet not in triplets_stor:\n # triplets_stor[triplet] = 0\n # if type(triplet[2]) is amr.Var:\n # if entry[triplet[2]] <= reentrancies[triplet[2]] + 1:\n # q.put(triplet[2])\n # entry[triplet[2]] += 1\n #\n # def is_pos_correct(idx, sep, left=True):\n # if left:\n # return True if idx < sep else False\n # else:\n # return True if idx > sep else False\n #\n # if triplet in alignments:\n # idx = int(alignments[triplet].split('.')[1])\n # #if is_pos_correct(idx, sep, left):\n # result_alignments.append(idx)\n # if triplet in role_alignments:\n # idx = int(role_alignments[triplet].split('.')[1])\n # #if is_pos_correct(idx, sep, left):\n # result_alignments.append(idx)\n # return alignment_to_text(result_alignments)\n\n if self.triples == {}:\n return ''\n\n results = []\n for key, triple in self.triples.items():\n result_1 = get_alignment(triple[1])\n if result_1 is None:\n continue\n if triple[0] is not None:\n result_0 = get_all_amr_string(triple[0])\n else:\n result_0 = ''\n for concept_var in triple[2]:\n if concept_var:\n result_2 = get_all_amr_string(concept_var)\n if len(result_2.split(' ')) == 1:\n if not result_2.startswith('('):\n result_2 = '(' + result_2 + ')'\n results.append((result_0, self.amr_obj.var2concept()[triple[1]]._name, result_2))\n\n # f = open('amr_string.txt', 'w')\n # for l, m, r in results:\n # if l != '':\n # f.write(l+'\\n')\n # if r != '':\n # f.write(r+'\\n')\n # f.close()\n return results",
"def getPhrases(self, word, limit=None, wlmi=None, useCanonical=None, ):\n\n # Parse inputs\n resourcePath = '/word.{format}/{word}/phrases'\n resourcePath = resourcePath.replace('{format}', 'json')\n method = 'GET'\n\n queryParams = {}\n headerParams = {}\n\n queryParams['limit'] = self.apiClient.toPathValue(limit)\n queryParams['wlmi'] = self.apiClient.toPathValue(wlmi)\n queryParams['useCanonical'] = self.apiClient.toPathValue(useCanonical)\n\n\n if word != None:\n resourcePath = resourcePath.replace('{word}', word)\n\n\n # Make the API Call\n response = self.apiClient.callAPI(resourcePath, method, queryParams,\n None, headerParams)\n if not response:\n return None\n\n\n responseObjects = []\n for responseObject in response:\n responseObjects.append(self.apiClient.deserialize(responseObject,\n model.Bigram.Bigram))\n return responseObjects",
"def convert(self) -> dict:\n def get_triples_linker():\n \"\"\"\n Retrieve all of the triples linking phrases from the AMR object.\n We use the Framenet words that are found in the AMR Object as the linker.\n \"\"\"\n triples_linkers = []\n concepts = list(self.amr_obj.concepts())\n\n # Retrieve all concept that has the word ARG in it\n for concept in concepts:\n triple = self.amr_obj.triples(head=concept[0])\n items = [item for item in triple if 'ARG' in item[1]]\n if len(items) > 0:\n triples_linkers.append(triple)\n return triples_linkers\n\n def generate_triples():\n\n def fixing_annotation(key, n):\n \"\"\"\n Fixing some inconsistency in the annotation\n \"\"\"\n if key + '.' + n not in self.propbank:\n key = key.replace('-', '_')\n return key + '.' + n\n\n def is_agent(f_rel, rel_var):\n \"\"\"\n Checking whether the role is an agent (denoted by 'pag') or not\n \"\"\"\n # TODO: beside 'pag' is there any other role?\n m = re.match(r'(.*)-(\\d*)$', rel_var)\n key = m.group(1)\n n = m.group(2)\n\n # some annotation does not have the correspondence frameset, just put false if found\n if n == '00':\n return False\n\n concept = fixing_annotation(key, n)\n roleset = self.propbank[concept]\n\n m = re.match(r':ARG(.).*', f_rel[1])\n n = int(m.group(1))\n roles = roleset.getElementsByTagName('role')\n\n for role in roles:\n if dict(role.attributes)['n'].value == str(n) and dict(role.attributes)['f'].value.lower() == 'pag':\n return True\n return False\n\n # Case 1: ARG\n for triple_linker in self.triples_linkers:\n triple = [None, triple_linker[0][0], []]\n for rel in triple_linker:\n if 'ARG' in rel[1] and 'of' not in rel[1]:\n # check whether the propbank verb rel[0] and its argument rel[2] is an agent or not\n if is_agent(rel, self.var2c[rel[0]].__str__()):\n triple[0] = rel[2]\n else:\n triple[2].append(rel[2])\n if not (triple[0] is None and triple[2] == []):\n self.triples[triple[1]] = triple\n\n # Case 2: ARG-of\n for triple_linker in self.triples_linkers:\n for rel in triple_linker:\n if 'ARG' in rel[1] and 'of' in rel[1]:\n if rel[2] not in self.triples:\n self.triples[rel[2]] = [None, rel[2], []]\n if is_agent(rel, self.var2c[rel[2]].__str__()):\n self.triples[rel[2]][0] = rel[0]\n else:\n self.triples[rel[2]][2].append(rel[0])\n return self.triples\n\n self.triples_linkers = get_triples_linker()\n return generate_triples()",
"def lemmatized_phrases(self):\n phrases = [set(lower_words(TextBlob(p).words.lemmatize()))\n for p in self.blob.noun_phrases]\n return [' '.join(p) for p in phrases if not STOPWORDS.intersection(p)]",
"def get_triples(self):\n cursor = self.db.cursor()\n cursor.execute(\"SELECT page_url, link_type, link_url FROM triples ORDER BY page_url, link_type\")\n return cursor.fetchall()",
"def extract_triplets(self) -> Iterable[dict]:\n stg_corpus = [txt.strip()+\".\" if txt.strip()[-1]!=\".\" else txt.strip() for txt in self.__corpus__]\n stg_corpus = ' '.join(self.__corpus__)\n\n with StanfordOpenIE() as client:\n triples_corpus = client.annotate(stg_corpus)\n\n self.__triples_corpus__ = triples_corpus\n\n return triples_corpus",
"def triples(self):\n\t\tif len(self.words) < 3:\n\t\t\treturn\n\t\t\n\t\tfor i in range(len(self.words) - 2):\n\t\t\tyield (self.words[i], self.words[i+1], self.words[i+2])",
"def get_triplets_visualphrase(self):\n vocab = self.vocab['sro']\n triplets = torch.zeros(len(vocab), 3)\n for j in range(len(vocab)):\n subjname, relname, objname = vocab.idx2word[j].split('-')\n triplets[j, 0] = self.vocab['all'].wordpos2idx[subjname + '_noun']\n triplets[j, 1] = self.vocab['all'].wordpos2idx[objname + '_noun']\n triplets[j, 2] = self.vocab['all'].wordpos2idx[relname + '_verb']\n\n triplets = triplets.long()\n return triplets",
"def gen_links(text):\n return []",
"def extract_phrases(data,model):\n phrases = []\n alignment = model.alignment_idx\n for i in range(len(data)):\n sent_phrases = phrase_extraction(data[i][\"fr\"],data[i][\"en\"],alignment[i])\n phrases.append(sent_phrases)\n return phrases",
"def triples(self):\r\n\r\n if len(self.words) < 3:\r\n return\r\n\r\n for i in range(len(self.words) - 2):\r\n yield (self.words[i], self.words[i + 1], self.words[i + 2])",
"def triples():",
"def build_word_relations():\n song_urls = lyricsorter.get_song_url_list()\n viablewords = find_viable_words()\n word_list = []\n relation_dict = {}\n for i, link in enumerate(song_urls):\n response = song_table.get_item(\n Key={\n 'id': link\n }\n )\n lyrics = []\n print(\"Working on song# {}\".format(str(i)))\n try:\n lyrics = response['Item']['lyric_array']\n except KeyError:\n pass\n for index, line in enumerate(lyrics):\n for index2, w in enumerate(line):\n if w not in viablewords:\n lyrics[index][index2] = \"\"\n for index, line in enumerate(lyrics):\n for index2, w in enumerate(line):\n __line_parse(index2, line, relation_dict, word_list)\n\n for i, word in enumerate(word_list):\n print(\"Inserting #{} word in wordlist of size {}\".format(str(i), str(len(word_list))))\n Item1 = {\n 'id': str(word + \"_1\"),\n \"words\": relation_dict[word][str(word + \"_1\")]\n }\n Item2 = {\n 'id': str(word + \"_2\"),\n \"words\": relation_dict[word][str(word + \"_2\")]\n }\n Item3 = {\n 'id': str(word + \"_3\"),\n \"words\": relation_dict[word][str(word + \"_3\")]\n }\n word_relation_table.put_item(\n Item=Item1\n )\n word_relation_table.put_item(\n Item=Item2\n )\n word_relation_table.put_item(\n Item=Item3\n )",
"def triples(self):\n\n if len(self.words) < 3:\n return\n\n for i in range(len(self.words) - 2):\n yield (self.words[i], self.words[i+1], self.words[i+2])",
"def get_phrases(self) -> List[Phrase]:\n return list(self.phrase_index.values())",
"def rdfGetTriples(id):\n\ttargets = []\n\tfullId = id\n\n#\tlog.info(\"rdfgetTriples(%s)\" % fullId)\n\tif\t':' in id: #Includes full path or namespaces\n\t\tfullId = id\n\telse:\n\t\tfullId = VOCAB + \"/\" + id\n\tsource = URIRef(fullId)\n\t#log.info(\"rdfgetTriples(%s)\" % source)\n\t\n\tfirst = True\n\tunit = None\n\t\n\thomeSetTo = None\n\ttypeOfInLayers = []\n\n\ttry:\n\t\tRDFLIBLOCK.acquire()\n\t\tres = list(STORE.query(GETTRIPS, initBindings={'sub':source}))\n\tfinally:\n\t\tRDFLIBLOCK.release()\n\t\t\n\tfor row in res:\n#\t\tif source == \"http://meta.schema.org/\":\n#\t\tlog.info(\"Triple: %s %s %s %s\" % (source, row.p, row.o, row.g))\n\t\tlayer = str(getRevNss(str(row.g)))\n\t\tif first:\n\t\t\tfirst = False\n\t\t\tunit = api.Unit.GetUnitNoLoad(id,True)\n\t\ts = stripID(source)\n\t\tp = stripID(row.p)\n\t\tif p == \"rdf:type\": \n\t\t\ttypeOfInLayers.append(layer)\n\t\telif(p == \"isPartOf\"):\n\t\t\tif(unit.home != None and unit.home != layer):\n\t\t\t\tlog.info(\"WARNING Cannot set %s home to %s - already set to: %s\" % (s,layer,unit.home))\n\t\t\tunit.home = layer\n\t\t\thomeSetTo = layer\n\t\telif(p == \"category\"):\n\t\t\tunit.category = row.o\n\n\t\tprop = api.Unit.GetUnit(p,True)\n\n\t\tif isinstance(row.o,rdflib.Literal):\n\t\t\tapi.Triple.AddTripleText(unit, prop, row.o, layer)\n\t\telse: \n\t\t\tapi.Triple.AddTriple(unit, prop, api.Unit.GetUnit(stripID(row.o),True), layer)\n\t\t\t\n\t\"\"\" Default Unit.home to core if not specificly set with an 'isPartOf' triple \"\"\"\n\tif(unit and homeSetTo == None):\n\t\tif('core' in typeOfInLayers or len(typeOfInLayers) == 0):\n\t\t\tunit.home = 'core'\n\t\telse:\n\t\t\tlog.info(\"WARNING: %s defined in extensions %s but has no 'isPartOf' triple - cannot default home to core!\" % (id,typeOfInLayers))\n\treturn unit",
"def external_terminologies(self):\n terms = set()\n for node_record in self.graph.run(\"MATCH (n) RETURN (n)\"):\n node = node_record[\"n\"]\n if \"links_to\" in node:\n terms.add(node[\"links_to\"])\n return terms",
"def antonym(self, sense=0):\n s = self._synset(self.text, sense=sense)\n\n if not s:\n return []\n\n lemmas = s.lemmas()\n\n result = list()\n\n for lemma in lemmas:\n if lemma.antonyms():\n result.append(lemma.antonyms()[0].name())\n\n return result if result else []",
"def getmentioningobjs(idfindex, idfobject):\n idf, edges = eppystuff.an_idfedges(idfindex)\n mentioningobjs = idf_helpers.getanymentions(idf, idfobject)\n keys = [mentioningobj.key for mentioningobj in mentioningobjs] \n objnames = [mentioningobj.obj[1] for mentioningobj in mentioningobjs] \n idfkeys = idf_helpers.idfobjectkeys(idf)\n keysobjsindexes = [(idfkeys.index(mentioningobj.key.upper()), \n idf.idfobjects[mentioningobj.key.upper()].index(mentioningobj))\n for mentioningobj in mentioningobjs] \n urls = [\"../../%s/%s\" % (idfkey, objkey) \n for idfkey, objkey in keysobjsindexes]\n urllinks = ['<a href=%s>%s</a>' % (url, name) \n for url, name in zip(urls, objnames)]\n lines = [\"%s->%s\" % (mentioningobj.key, urllink) \n for mentioningobj, urllink in zip(mentioningobjs, urllinks)]\n return ', '.join(lines)",
"def get_links(query_terms):\n\n # the set of links all of which contains all the terms in the query string\n final_links = None\n for term in query_terms:\n # get all links containing the term and put in a set\n links = Set(index_data.get(term))\n #print(\"\\n\\nQuery Term: %s\" % term)\n #print(links)\n\n # special case for first iteration, because: empty & anything = empty\n if final_links == None:\n final_links = links\n\n # take intersection of links set\n final_links = final_links & links\n\n #print(final_links)\n\n # convert the Set to List and return\n return list(final_links)",
"def parse_triples(self, s: str) -> List[BasicTriple]:\n tokens = lex(s, pattern=TRIPLE_RE)\n return self._parse_triples(tokens)",
"def readLinking(goldStdFile):\n linking = dict()\n for line in open(goldStdFile):\n d = re.split(\"\\s+\", line.strip())\n mention = d[0].upper()\n kb_id = d[1].upper()\n\n if kb_id in linking.keys():\n linking[kb_id].add(mention)\n else:\n linking[kb_id] = set([mention])\n return linking",
"def get_noun_pairs_from_all_texts(self):\n singulars = []\n for text in self.available_texts():\n singulars += self.get_nouns_from_text(text)\n singulars = list(set(singulars))\n return [(singular, plural(singular)) for singular in singulars]",
"def __init__(self):\n\t\tself.link_words = []",
"def get_links(self):\n msg = self.get_message()\n return msg.split()",
"def lemmas(self):\n if 'lemma' not in self.annotators:\n return None\n return [t[self.LEMMA] for t in self.data]",
"def lemmas(self):\n if 'lemma' not in self.annotators:\n return None\n return [t[self.LEMMA] for t in self.data]"
] |
[
"0.6250458",
"0.6226203",
"0.6032588",
"0.5930905",
"0.58461785",
"0.5834479",
"0.5614722",
"0.5560175",
"0.5539191",
"0.55331725",
"0.5495651",
"0.54529977",
"0.5452302",
"0.54013497",
"0.5397558",
"0.53082573",
"0.5281321",
"0.52532434",
"0.5239895",
"0.5156461",
"0.51475745",
"0.5128843",
"0.5128702",
"0.51214767",
"0.5118754",
"0.5114833",
"0.50924456",
"0.5090791",
"0.5083125",
"0.5083125"
] |
0.7512734
|
0
|
Checking whether the role is an agent (denoted by 'pag') or not
|
def is_agent(f_rel, rel_var):
# TODO: beside 'pag' is there any other role?
m = re.match(r'(.*)-(\d*)$', rel_var)
key = m.group(1)
n = m.group(2)
# some annotation does not have the correspondence frameset, just put false if found
if n == '00':
return False
concept = fixing_annotation(key, n)
roleset = self.propbank[concept]
m = re.match(r':ARG(.).*', f_rel[1])
n = int(m.group(1))
roles = roleset.getElementsByTagName('role')
for role in roles:
if dict(role.attributes)['n'].value == str(n) and dict(role.attributes)['f'].value.lower() == 'pag':
return True
return False
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def check_agent_by_role(self, username, role):\n current_user = self.this_user\n new_user = self.verify_current_user(username)\n self.this_user = current_user\n if new_user and new_user[\"role\"] == role:\n return new_user\n return False",
"def test_show_agent(self):\n with self.override_role():\n self.agents_client.show_agent(self.agent['id'])",
"def has_role(self, role):\n return False",
"def check_role(role):\n return role in all_roles",
"async def is_launcher(ctx):\n member = ctx.message.author\n staff = await is_staff(ctx)\n lhRole = discord.utils.get(member.guild.roles, name=ROLE_LH)\n if staff or lhRole in member.roles: return True",
"def is_java_agent(self):\r\n return self.has_label('java_agent')",
"def isParticipantRole(*args):\n return _libsbml.SBO_isParticipantRole(*args)",
"def SBO_isParticipantRole(*args):\n return _libsbml.SBO_isParticipantRole(*args)",
"def test_list_agents(self):\n admin_resource_id = self.agent['id']\n with (self.override_role_and_validate_list(\n admin_resource_id=admin_resource_id)) as ctx:\n ctx.resources = self.agents_client.list_agents(\n id=admin_resource_id)[\"agents\"]",
"def is_permitted(self):\n\t\tfrom frappe.utils import has_common\n\n\t\tallowed = [\n\t\t\td.role for d in frappe.get_all(\"Has Role\", fields=[\"role\"], filters={\"parent\": self.name})\n\t\t]\n\n\t\tcustom_roles = get_custom_allowed_roles(\"page\", self.name)\n\t\tallowed.extend(custom_roles)\n\n\t\tif not allowed:\n\t\t\treturn True\n\n\t\troles = frappe.get_roles()\n\n\t\tif has_common(roles, allowed):\n\t\t\treturn True",
"def _is_granter_pvm( # pylint: disable=no-self-use\n self, pvm: PermissionView\n ) -> bool:\n\n return pvm.permission.name in {\"can_override_role_permissions\", \"can_approve\"}",
"def i_am(user_role):\n return user_role",
"async def is_admin(ctx):\n member = ctx.message.author\n aRole = discord.utils.get(member.guild.roles, name=ROLE_AD)\n if aRole in member.roles or member.id == 715048392408956950: return True",
"def agent_is_active(self):\n agents = self.board[self.agent_locs_idx]\n return agents & CellTypes.agent > 0",
"def agents_at_goal(self):\r\n return self.searchenv.conv.state_to_tile(self.searchstate.positions) == self.searchenv.goal_tile",
"def has_role(self, role):\n\n if self.roles and (role in self.roles):\n return True\n return False",
"def has_role(role, nodename=None):\n return role in get_list('roles', nodename)",
"def _has_keystone_role(self, role):\n return False",
"def _is_wiz(agent: Agent):\n return agent.agent_id == 'Wizard'",
"def is_mentor(self):\n return self.user_profile_status == self.MENTOR",
"def isRole(self, role):\n user = self.getSession()\n return self.pipe.auth.isRole(user, role)",
"def current_venue_allows_role_state_routing() -> bool:\n venue_instance = CommonLocationUtils.get_venue_of_current_lot()\n if venue_instance is None:\n return False\n # noinspection PyUnresolvedReferences\n return venue_instance.allow_rolestate_routing_on_navmesh",
"def test_func(self):\n taxonomy = self.get_object()\n return self.request.user == taxonomy.author",
"def test_func(self):\n taxonomy = self.get_object()\n return self.request.user == taxonomy.author",
"def policy(agent):",
"def test_ipam_roles_read(self):\n pass",
"def test_func(self):\n taxonomy = self.get_taxonomy()\n return self.request.user == taxonomy.author",
"async def is_staff(ctx):\n member = ctx.message.author\n vipRole = discord.utils.get(member.guild.roles, name=ROLE_VIP)\n staffRole = discord.utils.get(member.guild.roles, name=ROLE_STAFF)\n return vipRole in member.roles or staffRole in member.roles",
"def is_admin(self):\n return Role.query.get(2) in self.roles",
"def has_roles(ctx, roles: list):\n for r in ctx.author.roles:\n if r.name in roles:\n return True\n return False"
] |
[
"0.64683294",
"0.6237958",
"0.62285286",
"0.5953416",
"0.5951429",
"0.58070844",
"0.5790452",
"0.5789454",
"0.57131404",
"0.5685838",
"0.56456906",
"0.56294304",
"0.56220144",
"0.56133395",
"0.5613335",
"0.5591917",
"0.55668414",
"0.5560702",
"0.55188465",
"0.54573995",
"0.5455209",
"0.54236954",
"0.5396988",
"0.5396988",
"0.53885907",
"0.5379947",
"0.53796697",
"0.5353754",
"0.53537095",
"0.534587"
] |
0.7379751
|
0
|
Given a triple, generate an amr string from it
|
def generate_amr_string_from_triples(self):
def get_alignment(f_concept_var):
"""
Get alignment for a single concept
"""
for triplet, a in self.amr_obj.alignments().items():
if f_concept_var == triplet[0] and triplet[1] == ':instance-of':
return int(a.split('.')[1].split(',')[0])
def get_all_amr_string(f_concept_var):
"""
Get all amr string from the concept
"""
def get_triples(key):
result_triples = []
f_triples = self.amr_obj.triples(dep=key, rel=':ARG-of', normalize_inverses=True)
if f_triples:
result_triples.extend(f_triples)
f_triples = self.amr_obj.triples(head=key)
if f_triples:
result_triples.extend(f_triples)
return result_triples
entry = defaultdict(int)
q = []
q.append((amr.Var('TOP'), ':top', f_concept_var))
entry[f_concept_var] += 1
reentrancies = self.amr_obj.reentrancies()
all_triples = []
while q:
u = q.pop()
all_triples.append(u)
triples = get_triples(u[2])
for triplet in triples[::-1]:
if triplet[2] in reentrancies:
if entry[triplet[2]] <= reentrancies[triplet[2]] + 1:
q.append(triplet)
entry[triplet[2]] += 1
else:
q.append(triplet)
entry[triplet[2]] += 1
s = ''
stack = []
instance_fulfilled = None
align = role_align = {}
concept_stack_depth = {
None: 0} # size of the stack when the :instance-of triple was encountered for the variable
for h, r, d in all_triples + [(None, None, None)]:
align_key = align.get((h, r, d), '')
role_align_key = role_align.get((h, r, d), '')
if r == ':top':
s += '(' + d()
stack.append((h, r, d))
instance_fulfilled = False
elif r == ':instance-of':
s += ' / ' + d(align_key)
instance_fulfilled = True
concept_stack_depth[h] = len(stack)
elif r == ':wiki':
continue
elif h == stack[-1][2] and r == ':polarity': # polarity gets to be on the same line as the concept
s += ' ' + r + role_align_key + ' ' + d(align_key)
else:
while len(stack) > concept_stack_depth[h]:
h2, r2, d2 = stack.pop()
if instance_fulfilled is False:
# just a variable or constant with no concept hanging off of it
# so we have an extra paren to get rid of
align_key2 = align.get((h2, r2, d2), '')
s = s[:-len(d2(align_key2)) - 1] + d2(align_key2, append=not instance_fulfilled)
else:
s += ')'
instance_fulfilled = None
if d is not None:
s += ' \n' + ' ' * 4 * len(stack) + r + role_align_key + ' (' + d(align_key)
stack.append((h, r, d))
instance_fulfilled = False
return s
# def get_all_alignments(concept_var, sep, left=True):
# '''
# Get all alignments from the concept
# '''
#
# # def alignment_to_text(alignments):
# # '''
# # Convert all alignments to text
# # '''
# # def filter(idxs, tol):
# # '''
# # Resulting only the longest contiguous elements
# # '''
# # diffs = [idxs[i + 1] - idxs[i] for i in range(len(idxs) - 1)]
# # start = False
# # max_length = -1
# # for i in range(len(diffs)):
# # if diffs[i] <= tol:
# # if not start:
# # start = True
# # length = 1
# # start_idx = i
# # else:
# # length += 1
# # else:
# # if start:
# # start = False
# # end_idx = i
# # if length >= max_length:
# # max_length = length
# # max_start_idx = start_idx
# # max_end_idx = end_idx
# # if start:
# # end_idx = i + 1
# # if length >= max_length:
# # max_start_idx = start_idx
# # max_end_idx = end_idx
# # return [idxs[i] for i in range(max_start_idx, max_end_idx + 1)]
# #
# # doc = en_nlp(" ".join(self.amr_obj.tokens()))
# # alignments = sorted(list(set(alignments)))
# # # We used noun chunks to prevent orphaned noun
# # noun_chunks = list(doc.noun_chunks)
# # new_alignments = set()
# # for a in alignments:
# # new_alignments.add(a)
# # # Insert all noun chunks to new alignment
# # for noun in noun_chunks:
# # if noun.start <= a <= noun.end:
# # new_alignments.update([i for i in range(noun.start, noun.end)])
# # text = [self.amr_obj.tokens()[idx] for idx in filter(sorted(list(new_alignments)), 3)]
# # text = " ".join(text)
# # return text
#
# def get_triplet(key):
# result_triplets = []
# triples = self.amr_obj.f_triples(dep=key, rel=':ARG-of', normalize_inverses=True)
# if triples:
# result_triplets.extend(triples)
# triples = self.amr_obj.f_triples(head=key)
# if triples:
# result_triplets.extend(triples)
# return result_triplets
#
# triplets_stor = {}
# entry = defaultdict(int)
# q = queue.Queue()
# q.put(concept_var)
# entry[concept_var] += 1
# result_alignments = []
# alignments = self.amr_obj.alignments()
# role_alignments = self.amr_obj.role_alignments()
# reentrancies = self.amr_obj.reentrancies()
# while not q.empty():
# u = q.get()
# triples = get_triplet(u)
# for triplet in triples:
# if triplet not in triplets_stor:
# triplets_stor[triplet] = 0
# if type(triplet[2]) is amr.Var:
# if entry[triplet[2]] <= reentrancies[triplet[2]] + 1:
# q.put(triplet[2])
# entry[triplet[2]] += 1
#
# def is_pos_correct(idx, sep, left=True):
# if left:
# return True if idx < sep else False
# else:
# return True if idx > sep else False
#
# if triplet in alignments:
# idx = int(alignments[triplet].split('.')[1])
# #if is_pos_correct(idx, sep, left):
# result_alignments.append(idx)
# if triplet in role_alignments:
# idx = int(role_alignments[triplet].split('.')[1])
# #if is_pos_correct(idx, sep, left):
# result_alignments.append(idx)
# return alignment_to_text(result_alignments)
if self.triples == {}:
return ''
results = []
for key, triple in self.triples.items():
result_1 = get_alignment(triple[1])
if result_1 is None:
continue
if triple[0] is not None:
result_0 = get_all_amr_string(triple[0])
else:
result_0 = ''
for concept_var in triple[2]:
if concept_var:
result_2 = get_all_amr_string(concept_var)
if len(result_2.split(' ')) == 1:
if not result_2.startswith('('):
result_2 = '(' + result_2 + ')'
results.append((result_0, self.amr_obj.var2concept()[triple[1]]._name, result_2))
# f = open('amr_string.txt', 'w')
# for l, m, r in results:
# if l != '':
# f.write(l+'\n')
# if r != '':
# f.write(r+'\n')
# f.close()
return results
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def triple(str_to_triple):\n return ''.join([char * 3 for char in str_to_triple])",
"def __n3_to_str(triple):\n s, p, o = triple\n s = s.n3()\n p = p.n3()\n o = o.n3()\n if s.startswith('<') and s.endswith('>'):\n s = s[1:len(s) - 1]\n if p.startswith('<') and p.endswith('>'):\n p = p[1:len(p) - 1]\n if o.startswith('<') and o.endswith('>'):\n o = o[1:len(o) - 1]\n return (s, p, o)",
"def add_bert_identifier(triple: str) -> str:\n fields = triple.strip().split(', ')\n assert len(fields) == 12\n fields.insert(9, 'BERT')\n return ', '.join(fields)",
"def from_str(triple):\n def nodeIsBlank(element):\n return (element == '%p') or (element.startswith('?'))\n\n elements = triple.strip().split(' ')\n # check if current triple pattern is well formed\n if (len(elements) < 3) or (len(elements) > 3):\n raise SyntaxError('The pattern {} is not well formed : '\n 'it must contains exactly three nodes.'\n .format(triple.strip()))\n\n # seralize it\n subject = Node(elements[0], nodeIsBlank(elements[0]))\n predicate = Node(elements[1], nodeIsBlank(elements[1]))\n obj = Node(elements[2], nodeIsBlank(elements[2]))\n return TriplePattern(subject, predicate, obj)",
"def show_triple(self, tokens, text, triple):\n nsubj, verb, dobj = triple\n\n # Extract the text for each element of the triple.\n nsubj_text = self.phrase_text_for_head(tokens, text, nsubj)\n verb_text = tokens[verb].text_content\n \n if tokens[verb].edge_label == 'RCMOD':\n parent_rcmod = self.phrase_text_for_rcmod(tokens,verb,text)\n index = parent_rcmod.find(nsubj_text)\n if index > -1:\n nsubj_text = parent_rcmod\n else:\n nsubj_text = parent_rcmod + nsubj_text \n \n dobj_text = self.phrase_text_for_head(tokens, text, dobj)\n text = nsubj_text + ' ' +verb_text + ' '+ dobj_text\n #print (text)\n semanticRole = { 'subject': nsubj_text, 'action': verb_text, 'object': dobj_text }\n self.semanticRoleList.append(semanticRole)",
"def get_all_amr_string(f_concept_var):\n def get_triples(key):\n result_triples = []\n f_triples = self.amr_obj.triples(dep=key, rel=':ARG-of', normalize_inverses=True)\n if f_triples:\n result_triples.extend(f_triples)\n f_triples = self.amr_obj.triples(head=key)\n if f_triples:\n result_triples.extend(f_triples)\n return result_triples\n entry = defaultdict(int)\n q = []\n q.append((amr.Var('TOP'), ':top', f_concept_var))\n entry[f_concept_var] += 1\n reentrancies = self.amr_obj.reentrancies()\n all_triples = []\n while q:\n u = q.pop()\n all_triples.append(u)\n triples = get_triples(u[2])\n for triplet in triples[::-1]:\n if triplet[2] in reentrancies:\n if entry[triplet[2]] <= reentrancies[triplet[2]] + 1:\n q.append(triplet)\n entry[triplet[2]] += 1\n else:\n q.append(triplet)\n entry[triplet[2]] += 1\n s = ''\n stack = []\n instance_fulfilled = None\n align = role_align = {}\n concept_stack_depth = {\n None: 0} # size of the stack when the :instance-of triple was encountered for the variable\n for h, r, d in all_triples + [(None, None, None)]:\n align_key = align.get((h, r, d), '')\n role_align_key = role_align.get((h, r, d), '')\n if r == ':top':\n s += '(' + d()\n stack.append((h, r, d))\n instance_fulfilled = False\n elif r == ':instance-of':\n s += ' / ' + d(align_key)\n instance_fulfilled = True\n concept_stack_depth[h] = len(stack)\n elif r == ':wiki':\n continue\n elif h == stack[-1][2] and r == ':polarity': # polarity gets to be on the same line as the concept\n s += ' ' + r + role_align_key + ' ' + d(align_key)\n else:\n while len(stack) > concept_stack_depth[h]:\n h2, r2, d2 = stack.pop()\n if instance_fulfilled is False:\n # just a variable or constant with no concept hanging off of it\n # so we have an extra paren to get rid of\n align_key2 = align.get((h2, r2, d2), '')\n s = s[:-len(d2(align_key2)) - 1] + d2(align_key2, append=not instance_fulfilled)\n else:\n s += ')'\n instance_fulfilled = None\n if d is not None:\n s += ' \\n' + ' ' * 4 * len(stack) + r + role_align_key + ' (' + d(align_key)\n stack.append((h, r, d))\n instance_fulfilled = False\n return s",
"def output_aa_string(residues):\n # Dictionary of 3 letter to 1 letter AA conversion\n aa_dict = {'CYS': 'C', 'ASP': 'D', 'SER': 'S', 'GLN': 'Q', 'LYS': 'K',\n 'ILE': 'I', 'PRO': 'P', 'THR': 'T', 'PHE': 'F', 'ASN': 'N',\n 'GLY': 'G', 'HIS': 'H', 'LEU': 'L', 'ARG': 'R', 'TRP': 'W',\n 'ALA': 'A', 'VAL':'V', 'GLU': 'E', 'TYR': 'Y', 'MET': 'M'}\n\n s = ''\n for res in residues:\n s = s + aa_dict.get(res.type)\n return s",
"def migrate_into_triple(belstr: str) -> str:\n\n bo.parse(belstr)\n\n return migrate_ast(bo.ast).to_triple()",
"def tuple_to_string(letter_word_pair):\n letter, word = letter_word_pair\n return '{letter}: {word}'.format(letter=letter, word=word)",
"def atom_format(record) -> str:\n if len(record.name) == 4:\n return record.name\n if record.name[:2] == record.element[:2]:\n return f\"{record.name:>2} \"[:4]\n return f\" {record.name:<3}\"[:4]",
"def format_triples(self,\n triples: Iterable[BasicTriple],\n indent: bool = True):\n delim = ' ^\\n' if indent else ' ^ '\n # need to remove initial : on roles for triples\n conjunction = [f'{role.lstrip(\":\")}({source}, {target})'\n for source, role, target in triples]\n return delim.join(conjunction)",
"def to_rna(DNA):\r\n\r\n \r\n return \"\".join( ( {\"G\":\"C\", \"C\":\"G\", \"T\":\"A\", \"A\":\"U\"}[nuc] for nuc in DNA))",
"def triple(triple_string):\n triple_list = triple_string.split(\",\")\n if len(triple_list) != 3:\n print(\n \"The arguments to the warning values are triples, corresponding\"\n \"to the average value of 10, 60 and 300 seconds respectively\"\n )\n return tuple(triple_list)",
"def tuple_to_string(transcript_info):\n\n return \"\\t\".join(transcript_info.data_attributes())",
"def make_string(attr_dict, create=False):\n # attr_dict follows to to_uml_json_* structure\n if create:\n ops_in_value = \"name\"\n else:\n ops_in_value = \"value\"\n if isinstance(attr_dict[\"ops\"][0][ops_in_value], list):\n e_a_value = attr_dict[\"ops\"][0][ops_in_value][0]\n else:\n e_a_value = attr_dict[\"ops\"][0][ops_in_value]\n return (\n str(attr_dict[\"id\"])\n + str(e_a_value)\n + str(attr_dict[\"ops\"][0][\"path\"])\n )",
"def format_fasta_entry(otu_name, isolate_name, sequence_id, sequence):\n return f\">{otu_name}|{isolate_name}|{sequence_id}|{len(sequence)}\\n{sequence}\"",
"def make_angle_name(angle):\n\n return (angle.atom1.res_name +\n angle.atom1.res_num + ' ' + \n angle.atom1.chain + ' ' + \n angle.atom1.name + ' : ' + \n angle.atom2.res_name +\n angle.atom2.res_num + ' ' + \n angle.atom2.chain + ' ' + \n angle.atom2.name + ' : ' + \n angle.atom3.res_name +\n angle.atom3.res_num + ' ' + \n angle.atom3.chain + ' ' + \n angle.atom3.name)",
"def build_rule(rule, attributes):\n\t\n\tlines = [rule, \"{\"]\n\tfor attr in attributes:\n\t\tlines.append(\"\t%s\" % attr)\n\tlines.append(\"}\\n\")\n\n\treturn \"\\n\".join(lines)",
"def formulaToL3String(*args):\n return _libsbml.formulaToL3String(*args)",
"def coding_strand_to_AA(dna):\n #inital conditions\n protein = ''\n i = 0\n\n #for the length of DNA, translate each codon in an ORF to an amino acid\n while i < (len(dna)-2):\n codon = dna[i:i+3] \n amino_acid = aa_table[codon]\n protein= protein + amino_acid\n i += 3\n\n #return the string of amino acids\n return protein",
"def fname_tsk3(tup):\n t_size = str(len(tup))\n fname3 = \"The \" + t_size + \" numbers are: \"\n for t in tup:\n fname3 = fname3 + \"{:d}, \"\n return fname3[:-2].format(*tup)",
"def str_tuple(item):\n return \"{}:{}\".format(item[0], item[1])",
"def makeAntString(ants):\n st = \"Antenna\"\n antlist = makeAntList(ants)\n if len(antlist) > 1: st += \"s\"\n st += \" %s\"%helpers.formatAsRanges( antlist )\n return st",
"def elementary_weight_str(tree,style='python'):\n from nodepy.strmanip import mysimp\n from nodepy.rooted_trees import Dmap_str\n ewstr='dot(b,'+tree.Gprod_str(RKeta_str,Dmap_str)+')'\n ewstr=ewstr.replace('1*','')\n ewstr=mysimp(ewstr)\n if style=='matlab': raise Exception(\"Use elementary_weight_str_matlab() instead.\")\n if style=='fortran': ewstr=python_to_fortran(ewstr)\n return ewstr",
"def format_score(att, hts):\n return str(att) + \"/\" + str(hts)",
"def coding_strand_to_AA(dna):\n dna_codons = split_into_codons(dna)\n i = 0\n aa_string = ''\n while i < len(dna_codons):\n if len(dna_codons[i]) == 3:\n amino_acid = aa_table[dna_codons[i]]\n aa_string += amino_acid\n i += 1\n return aa_string",
"def strOutputIdtf(_session, _el):\n it = _session.create_iterator(_session.sc_constraint_new(sc_constants.CONSTR_3_f_a_a,\n _el,\n sc.SC_A_CONST,\n 0), True)\n res = \"\"\n while not it.is_over():\n res = res + cp1251ToUtf8(_session.get_idtf(it.value(2))) + \"\\n\"\n it.next()\n \n return res",
"def create_str(it):\n key = it[\"AND_KEY\"]\n val = it[language[0]]\n if val.startswith(\"<ubt_string-array>\"):\n rel = val.replace(\"<ubt_string-array>\", \"\").replace(\"</ubt_string-array>\", \"\")\n arr_str = rel.split(\"<ubt/>\")\n rel = f\"<string-array name=\\\"{key}\\\">\"\n for s in arr_str:\n s = s.replace(\"\\\"\", \"\\\\\\\"\")\n rel += f\"<item>{s}</item>\"\n rel += \"</string-array>\"\n return rel\n else:\n val = val.replace(\"\\\\\", \"\")\n val = val.replace(\"'\", \"\\\\'\")\n val = val.replace(\"\\\"\", \"\\\\\\\"\")\n return f\"<string name=\\\"{key}\\\">{val}</string>\"",
"def coding_strand_to_AA(dna):\n coding_strand = ''\n for i in range(len(dna)/3):\n aa = dna[3*i:(3*i)+3]\n coding_strand += aa_table[aa]\n return coding_strand",
"def triplet():\n ends = digraphs + list(consonants)\n spaces = [ends, list(vowels), ends]\n bit = \"\".join([random.sample(x, 1)[0] for x in spaces])\n if random.randint(0, 1) == 1:\n bit = bit.title()\n return bit"
] |
[
"0.69918025",
"0.6543586",
"0.60001856",
"0.599101",
"0.5819799",
"0.5699199",
"0.55604106",
"0.55587226",
"0.5380458",
"0.535493",
"0.5332849",
"0.5326733",
"0.52886426",
"0.52294433",
"0.52108353",
"0.52092993",
"0.52076393",
"0.5204793",
"0.52039593",
"0.5092317",
"0.5083517",
"0.5068515",
"0.5063843",
"0.50601125",
"0.5054545",
"0.5052319",
"0.50467616",
"0.50367755",
"0.50184256",
"0.5015209"
] |
0.67517376
|
1
|
Get alignment for a single concept
|
def get_alignment(f_concept_var):
for triplet, a in self.amr_obj.alignments().items():
if f_concept_var == triplet[0] and triplet[1] == ':instance-of':
return int(a.split('.')[1].split(',')[0])
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def GetAlignment(self):\r\n\r\n return self.alignment",
"def align(self):\n return self[\"align\"]",
"def align(self):\n return self[\"align\"]",
"def align(self):\n return self[\"align\"]",
"def GetAlignment(self):\r\n\r\n return self._flag",
"def get_segment_alignment(*args):\n return _ida_segment.get_segment_alignment(*args)",
"def aligned(self):\n return self.__aligned",
"def _get_alignment(self, width):\n # we know the alignment is appropriate\n # if we can divide the width by the\n # alignment cleanly\n # valid alignments are 1,2,4 and 8\n # put 4 first, since it's the default\n alignments = [4, 8, 2, 1]\n for alignment in alignments:\n if width % alignment == 0:\n return alignment",
"def check_alignment(name):\n translate_alignment = {\n 'middle': 'center',\n 'center-left': 'left',\n 'center-right': 'right',\n 'bottom-left': 'left',\n 'bottom-right': 'right',\n 'bottom-center': 'center',\n }\n\n if name in translate_alignment:\n return translate_alignment[name]\n else:\n return None",
"def get_alignments(self) -> list:",
"def get_alignment_data(self, section):\n subgroup = '{}/Aligned_{}'.format(self.group_name, section)\n sam = self.handle.get_analysis_dataset(subgroup, 'SAM')\n fasta = self.handle.get_analysis_dataset(subgroup, 'Fasta')\n if sam is None or fasta is None:\n return None\n sequence = fasta.split('\\n')[1]\n return sam, sequence",
"def get_horizontal_alignment ( self, object ):\n return self.horizontal_alignment",
"def get_horizontal_alignment ( self, object ):\n return self.horizontal_alignment",
"def format_alignment(self, alignment):\n raise NotImplementedError(\"This method should be implemented\")\n ###################################################\n # You MUST implement this method in the subclass. #\n ###################################################",
"def alignsrc(self):\n return self[\"alignsrc\"]",
"def alignsrc(self):\n return self[\"alignsrc\"]",
"def alignsrc(self):\n return self[\"alignsrc\"]",
"def get_alignment_offset(self):\n\n return 0",
"def value(self):\n return self.alignment.matching[self.idx]",
"def aligned_face(self):\n return self.aligned[\"face\"]",
"def get_alignment(self, names=None):\n names = names or self.experiments.keys()\n return dict([(e, self.experiments[e]['align']) \\\n for e in names if 'align' in self.experiments[e]])",
"def get_alignment(self, record_contents, field):\n all_value = set()\n all_categories = set()\n for content in record_contents:\n content = content.strip()\n (value, categories) = self.mapper[field].get(content, (\"\", []))\n if value:\n all_value.add(value)\n if categories:\n #TODO Handle several categories given in the alignment\n all_categories.add(categories)\n return (\" - \".join(all_value), all_categories)",
"def overlay_alignment(self):\n return self._overlay_alignment",
"def get_alignment_from(tree):\r\n msa = []\r\n for node in tree.get_terminals():\r\n alignment = self.msa_by_name[node.name.split(' ')[0]]\r\n if msa:\r\n msa.append(alignment)\r\n else:\r\n msa = MultipleSeqAlignment([alignment])\r\n\r\n return msa",
"def get_vertical_alignment ( self, object ):\n return self.vertical_alignment",
"def get_vertical_alignment ( self, object ):\n return self.vertical_alignment",
"def guess_align(aln):\n \n if \"pep\" in [guess_seq(seq) for seq in aln.itervalues()]:\n return \"pep\"\n else:\n return \"dna\"",
"def alignment(self, alignment):\n allowed_values = [\"Near\", \"Center\", \"Far\"] # noqa: E501\n if not alignment.isdigit():\t\n if alignment not in allowed_values:\n raise ValueError(\n \"Invalid value for `alignment` ({0}), must be one of {1}\" # noqa: E501\n .format(alignment, allowed_values))\n self._alignment = alignment\n else:\n self._alignment = allowed_values[int(alignment) if six.PY3 else long(alignment)]",
"def read_alignment(filepath, filetype):\n\n return AlignIO.read(filepath, filetype)",
"def align(self):\n ..."
] |
[
"0.68454945",
"0.6783263",
"0.6783263",
"0.6783263",
"0.6474947",
"0.63738024",
"0.60854924",
"0.60745746",
"0.6050638",
"0.60419714",
"0.60137933",
"0.5971896",
"0.5971896",
"0.58954287",
"0.5851324",
"0.5851324",
"0.5851324",
"0.5809287",
"0.58061635",
"0.57436806",
"0.5700636",
"0.56227165",
"0.5620684",
"0.5594733",
"0.55614287",
"0.55614287",
"0.555727",
"0.55359966",
"0.548872",
"0.54876226"
] |
0.78405607
|
0
|
Get all amr string from the concept
|
def get_all_amr_string(f_concept_var):
def get_triples(key):
result_triples = []
f_triples = self.amr_obj.triples(dep=key, rel=':ARG-of', normalize_inverses=True)
if f_triples:
result_triples.extend(f_triples)
f_triples = self.amr_obj.triples(head=key)
if f_triples:
result_triples.extend(f_triples)
return result_triples
entry = defaultdict(int)
q = []
q.append((amr.Var('TOP'), ':top', f_concept_var))
entry[f_concept_var] += 1
reentrancies = self.amr_obj.reentrancies()
all_triples = []
while q:
u = q.pop()
all_triples.append(u)
triples = get_triples(u[2])
for triplet in triples[::-1]:
if triplet[2] in reentrancies:
if entry[triplet[2]] <= reentrancies[triplet[2]] + 1:
q.append(triplet)
entry[triplet[2]] += 1
else:
q.append(triplet)
entry[triplet[2]] += 1
s = ''
stack = []
instance_fulfilled = None
align = role_align = {}
concept_stack_depth = {
None: 0} # size of the stack when the :instance-of triple was encountered for the variable
for h, r, d in all_triples + [(None, None, None)]:
align_key = align.get((h, r, d), '')
role_align_key = role_align.get((h, r, d), '')
if r == ':top':
s += '(' + d()
stack.append((h, r, d))
instance_fulfilled = False
elif r == ':instance-of':
s += ' / ' + d(align_key)
instance_fulfilled = True
concept_stack_depth[h] = len(stack)
elif r == ':wiki':
continue
elif h == stack[-1][2] and r == ':polarity': # polarity gets to be on the same line as the concept
s += ' ' + r + role_align_key + ' ' + d(align_key)
else:
while len(stack) > concept_stack_depth[h]:
h2, r2, d2 = stack.pop()
if instance_fulfilled is False:
# just a variable or constant with no concept hanging off of it
# so we have an extra paren to get rid of
align_key2 = align.get((h2, r2, d2), '')
s = s[:-len(d2(align_key2)) - 1] + d2(align_key2, append=not instance_fulfilled)
else:
s += ')'
instance_fulfilled = None
if d is not None:
s += ' \n' + ' ' * 4 * len(stack) + r + role_align_key + ' (' + d(align_key)
stack.append((h, r, d))
instance_fulfilled = False
return s
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def generate_amr_string_from_triples(self):\n def get_alignment(f_concept_var):\n \"\"\"\n Get alignment for a single concept\n \"\"\"\n for triplet, a in self.amr_obj.alignments().items():\n if f_concept_var == triplet[0] and triplet[1] == ':instance-of':\n return int(a.split('.')[1].split(',')[0])\n\n def get_all_amr_string(f_concept_var):\n \"\"\"\n Get all amr string from the concept\n \"\"\"\n def get_triples(key):\n result_triples = []\n f_triples = self.amr_obj.triples(dep=key, rel=':ARG-of', normalize_inverses=True)\n if f_triples:\n result_triples.extend(f_triples)\n f_triples = self.amr_obj.triples(head=key)\n if f_triples:\n result_triples.extend(f_triples)\n return result_triples\n entry = defaultdict(int)\n q = []\n q.append((amr.Var('TOP'), ':top', f_concept_var))\n entry[f_concept_var] += 1\n reentrancies = self.amr_obj.reentrancies()\n all_triples = []\n while q:\n u = q.pop()\n all_triples.append(u)\n triples = get_triples(u[2])\n for triplet in triples[::-1]:\n if triplet[2] in reentrancies:\n if entry[triplet[2]] <= reentrancies[triplet[2]] + 1:\n q.append(triplet)\n entry[triplet[2]] += 1\n else:\n q.append(triplet)\n entry[triplet[2]] += 1\n s = ''\n stack = []\n instance_fulfilled = None\n align = role_align = {}\n concept_stack_depth = {\n None: 0} # size of the stack when the :instance-of triple was encountered for the variable\n for h, r, d in all_triples + [(None, None, None)]:\n align_key = align.get((h, r, d), '')\n role_align_key = role_align.get((h, r, d), '')\n if r == ':top':\n s += '(' + d()\n stack.append((h, r, d))\n instance_fulfilled = False\n elif r == ':instance-of':\n s += ' / ' + d(align_key)\n instance_fulfilled = True\n concept_stack_depth[h] = len(stack)\n elif r == ':wiki':\n continue\n elif h == stack[-1][2] and r == ':polarity': # polarity gets to be on the same line as the concept\n s += ' ' + r + role_align_key + ' ' + d(align_key)\n else:\n while len(stack) > concept_stack_depth[h]:\n h2, r2, d2 = stack.pop()\n if instance_fulfilled is False:\n # just a variable or constant with no concept hanging off of it\n # so we have an extra paren to get rid of\n align_key2 = align.get((h2, r2, d2), '')\n s = s[:-len(d2(align_key2)) - 1] + d2(align_key2, append=not instance_fulfilled)\n else:\n s += ')'\n instance_fulfilled = None\n if d is not None:\n s += ' \\n' + ' ' * 4 * len(stack) + r + role_align_key + ' (' + d(align_key)\n stack.append((h, r, d))\n instance_fulfilled = False\n return s\n\n # def get_all_alignments(concept_var, sep, left=True):\n # '''\n # Get all alignments from the concept\n # '''\n #\n # # def alignment_to_text(alignments):\n # # '''\n # # Convert all alignments to text\n # # '''\n # # def filter(idxs, tol):\n # # '''\n # # Resulting only the longest contiguous elements\n # # '''\n # # diffs = [idxs[i + 1] - idxs[i] for i in range(len(idxs) - 1)]\n # # start = False\n # # max_length = -1\n # # for i in range(len(diffs)):\n # # if diffs[i] <= tol:\n # # if not start:\n # # start = True\n # # length = 1\n # # start_idx = i\n # # else:\n # # length += 1\n # # else:\n # # if start:\n # # start = False\n # # end_idx = i\n # # if length >= max_length:\n # # max_length = length\n # # max_start_idx = start_idx\n # # max_end_idx = end_idx\n # # if start:\n # # end_idx = i + 1\n # # if length >= max_length:\n # # max_start_idx = start_idx\n # # max_end_idx = end_idx\n # # return [idxs[i] for i in range(max_start_idx, max_end_idx + 1)]\n # #\n # # doc = en_nlp(\" \".join(self.amr_obj.tokens()))\n # # alignments = sorted(list(set(alignments)))\n # # # We used noun chunks to prevent orphaned noun\n # # noun_chunks = list(doc.noun_chunks)\n # # new_alignments = set()\n # # for a in alignments:\n # # new_alignments.add(a)\n # # # Insert all noun chunks to new alignment\n # # for noun in noun_chunks:\n # # if noun.start <= a <= noun.end:\n # # new_alignments.update([i for i in range(noun.start, noun.end)])\n # # text = [self.amr_obj.tokens()[idx] for idx in filter(sorted(list(new_alignments)), 3)]\n # # text = \" \".join(text)\n # # return text\n #\n # def get_triplet(key):\n # result_triplets = []\n # triples = self.amr_obj.f_triples(dep=key, rel=':ARG-of', normalize_inverses=True)\n # if triples:\n # result_triplets.extend(triples)\n # triples = self.amr_obj.f_triples(head=key)\n # if triples:\n # result_triplets.extend(triples)\n # return result_triplets\n #\n # triplets_stor = {}\n # entry = defaultdict(int)\n # q = queue.Queue()\n # q.put(concept_var)\n # entry[concept_var] += 1\n # result_alignments = []\n # alignments = self.amr_obj.alignments()\n # role_alignments = self.amr_obj.role_alignments()\n # reentrancies = self.amr_obj.reentrancies()\n # while not q.empty():\n # u = q.get()\n # triples = get_triplet(u)\n # for triplet in triples:\n # if triplet not in triplets_stor:\n # triplets_stor[triplet] = 0\n # if type(triplet[2]) is amr.Var:\n # if entry[triplet[2]] <= reentrancies[triplet[2]] + 1:\n # q.put(triplet[2])\n # entry[triplet[2]] += 1\n #\n # def is_pos_correct(idx, sep, left=True):\n # if left:\n # return True if idx < sep else False\n # else:\n # return True if idx > sep else False\n #\n # if triplet in alignments:\n # idx = int(alignments[triplet].split('.')[1])\n # #if is_pos_correct(idx, sep, left):\n # result_alignments.append(idx)\n # if triplet in role_alignments:\n # idx = int(role_alignments[triplet].split('.')[1])\n # #if is_pos_correct(idx, sep, left):\n # result_alignments.append(idx)\n # return alignment_to_text(result_alignments)\n\n if self.triples == {}:\n return ''\n\n results = []\n for key, triple in self.triples.items():\n result_1 = get_alignment(triple[1])\n if result_1 is None:\n continue\n if triple[0] is not None:\n result_0 = get_all_amr_string(triple[0])\n else:\n result_0 = ''\n for concept_var in triple[2]:\n if concept_var:\n result_2 = get_all_amr_string(concept_var)\n if len(result_2.split(' ')) == 1:\n if not result_2.startswith('('):\n result_2 = '(' + result_2 + ')'\n results.append((result_0, self.amr_obj.var2concept()[triple[1]]._name, result_2))\n\n # f = open('amr_string.txt', 'w')\n # for l, m, r in results:\n # if l != '':\n # f.write(l+'\\n')\n # if r != '':\n # f.write(r+'\\n')\n # f.close()\n return results",
"def typedAntennaNames() :\n a=s.getAntennaAssignments()\n namelist = []\n for i in a:\n namelist.append( i.typedAntennaName )\n return namelist",
"def get_acid_name(seq):\n term_list = []\n for i in __kmers(seq,k=3):\n res = __get_key(i,full_amino_acid_name)\n term_list.append(res)\n return ''.join(term_list)",
"def tactics(self) -> Sequence[str]:\n return pulumi.get(self, \"tactics\")",
"def currentAntennaNames(carmaOnly=False) :\n a=s.getAntennaAssignments()\n namelist = []\n for i in a:\n cname = i.carmaAntennaName\n tname = i.typedAntennaName\n if (carmaOnly) :\n names = i.carmaAntennaName\n else :\n names = \"%s(%s)\" %(cname,tname)\n namelist.append(names)\n return namelist",
"def GenerateACLString(self):\n target_string = ''\n app_id = 100 # variable in ACL sentences.\n\n for terms in self.silverpeak_terms:\n for term in terms:\n for unit in term.GenerateUnitList():\n if term.term.precedence:\n for precedence in term.term.precedence:\n target_string += self._GenerateACLLine(app_id, term,\n unit, precedence)\n app_id += 100\n else:\n target_string += self._GenerateACLLine(app_id, term, unit)\n app_id += 100\n # finalize the target string\n target_string = self._FinalizeString(target_string, self.pre_string,\n self.fixed_content)\n return target_string",
"def AttributeString(self) -> str:",
"def AttributeString(self) -> str:",
"def getannotationstrings(cann):\n cdesc = ''\n if cann['description']:\n cdesc += cann['description'] + ' ('\n if cann['annotationtype'] == 'diffexp':\n chigh = []\n clow = []\n call = []\n for cdet in cann['details']:\n if cdet[0] == 'all':\n call.append(cdet[1])\n continue\n if cdet[0] == 'low':\n clow.append(cdet[1])\n continue\n if cdet[0] == 'high':\n chigh.append(cdet[1])\n continue\n cdesc += ' high in '\n for cval in chigh:\n cdesc += cval + ' '\n cdesc += ' compared to '\n for cval in clow:\n cdesc += cval + ' '\n cdesc += ' in '\n for cval in call:\n cdesc += cval + ' '\n elif cann['annotationtype'] == 'isa':\n cdesc += ' is a '\n for cdet in cann['details']:\n cdesc += 'cdet,'\n elif cann['annotationtype'] == 'contamination':\n cdesc += 'contamination'\n else:\n cdesc += cann['annotationtype'] + ' '\n for cdet in cann['details']:\n cdesc = cdesc + ' ' + cdet[1] + ','\n return cdesc",
"def __getConcepts(self, split):\n umls_cui = split[0].strip()\n language = split[1].strip()\n kb = split[11].strip()\n term_type = split[12].strip()\n kb_cui = split[13].strip()\n kb_name = split[14].strip()\n return umls_cui, language, kb, term_type, kb_cui, kb_name",
"def getannotationstrings2(cann):\n cdesc = ''\n if cann['description']:\n cdesc += cann['description'] + ' ('\n if cann['annotationtype'] == 'diffexp':\n chigh = []\n clow = []\n call = []\n for cdet in cann['details']:\n if cdet[0] == 'all':\n call.append(cdet[1])\n continue\n if cdet[0] == 'low':\n clow.append(cdet[1])\n continue\n if cdet[0] == 'high':\n chigh.append(cdet[1])\n continue\n cdesc += ' high in '\n for cval in chigh:\n cdesc += cval + ' '\n cdesc += ' compared to '\n for cval in clow:\n cdesc += cval + ' '\n cdesc += ' in '\n for cval in call:\n cdesc += cval + ' '\n elif cann['annotationtype'] == 'isa':\n cdesc += ' is a '\n for cdet in cann['details']:\n cdesc += 'cdet,'\n elif cann['annotationtype'] == 'contamination':\n cdesc += 'contamination'\n else:\n cdesc += cann['annotationtype'] + ' '\n for cdet in cann['details']:\n cdesc = cdesc + ' ' + cdet[1] + ','\n\n if len(cdesc) >= 1 and cdesc[-1] == ',':\n cdesc = cdesc[:-1]\n return cdesc",
"def lemma(self) -> str:",
"def get_assumption(assum):\n\tif not assum:\n\t\treturn\n\tret = []\n\tfor word in text:\n\t\tfor letter in word:\n\t\t\tret += assum[letter]\n\t\tret += \" \"\n\treturn ''.join(ret).strip()",
"def get_atril_string(self):\n return \", \".join(str(item) for item in self.atril)",
"def _get_subject_alt(self, name):\n\n if self._subject_alt_name is None:\n return []\n\n output = []\n for general_name in self._subject_alt_name:\n if general_name.name == name:\n output.append(general_name.native)\n return output",
"def concept_to_lemmas(concept):\n lemmas = []\n if concept.startswith('/c/'):\n parts = concept.split('/')\n lang = parts[2]\n if len(parts) > 3:\n # get the concept name\n lemmas.extend(parts[3].replace('_', ' ').split())\n if len(parts) > 5:\n uri = make_concept_uri(parts[5], lang)\n norm = uri.split('/')[3]\n lemmas.extend(norm.split('_'))\n return lemmas",
"def to_str(self) -> str:\n fields: t.Tuple[dataclasses.Field, ...] = dataclasses.fields(self)\n items: t.List[str] = [\"# Concepts for getting assets from the API\"]\n for field in fields:\n value: AssetsHelper = getattr(self, field.name)\n items.append(value.to_str().strip())\n value: str = \"\\n\\n\".join(items)\n return value",
"def test_get_result_string_1(self):\n attr_list = [\"type\", \"phage_id\", \"eval_mode\"]\n string = import_genome.get_result_string(self.tkt, attr_list)\n exp = \"type: replace, phage_id: Trixie, eval_mode: final\"\n self.assertEqual(string, exp)",
"def get_verbs(self) -> Set[str]:",
"def getattdocs(self, aname):\n dsc = self.schema.xpath(\"//tei:attDef[@ident=$name]/tei:desc/text()\", name=aname, namespaces=TEI_NS)\n if dsc:\n return re.sub('[\\s\\t]+', ' ', dsc[0]) # strip extraneous whitespace\n else:\n return \"\"",
"def get_aa (tRNA):\n\tpass",
"def get_atom_description(self, atom):\n return \"_\".join(\n [str(x) for x in [atom.GetAtomicNum(), atom.GetHybridization()]]\n )",
"def getConstantSentenceForms(self):",
"def _features_of(entry: _LexiconEntry) -> str:\n return entry[\"features\"]",
"def get_ability_scores(self):\n s = ''\n for i in range(6):\n s += ' ' + str(self.dna[i])\n return s",
"def do(s):\r\n return get_AA_subs(generate_mutString(s))",
"def test_str(self, concept_embedding: ConceptEmbedding):\n # printing should not rise error\n str(concept_embedding)",
"def __str__(self):\n st=\"\"\n for g in self:\n st+=g.fasta()\n st+=\"\\n\"\n return st",
"def getConcept(result_json):\n answers = {}\n answers[0] = result_json['outputs'][0][\"data\"]['concepts'][0]['name']\n answers[1] = result_json['outputs'][0][\"data\"]['concepts'][1]['name']\n answers[2] = result_json['outputs'][0][\"data\"]['concepts'][2]['name']\n answers[3] = result_json['outputs'][0][\"data\"]['concepts'][3]['name']\n answers[4] = result_json['outputs'][0][\"data\"]['concepts'][4]['name']\n return answers",
"def get_text(self):"
] |
[
"0.62805957",
"0.59305984",
"0.57337254",
"0.56466794",
"0.5593321",
"0.55783033",
"0.55317634",
"0.55317634",
"0.5517116",
"0.55127764",
"0.55023813",
"0.5495977",
"0.54629284",
"0.54532874",
"0.5445935",
"0.54427445",
"0.5428915",
"0.5425408",
"0.5423252",
"0.54044414",
"0.54007566",
"0.5398624",
"0.5341395",
"0.5319441",
"0.5310604",
"0.5292388",
"0.5278739",
"0.5269716",
"0.52635384",
"0.5247771"
] |
0.66864014
|
0
|
Write tok to file, for openIE relation extraction later
|
def write_tok_to_file(self):
dir_path = os.path.join(self.output_path, 'tokens')
if not os.path.exists(dir_path):
os.makedirs(dir_path)
for dataset_name, dataset in self.amr_corpus.items():
f = open(os.path.join(dir_path, dataset_name + '_tok.txt'), 'w')
for doc_name, doc in dataset.items():
for amr_id, amr_data in doc.items():
amr_strings = self.amr_corpus[dataset_name][doc_name][amr_id]['amr_string_triples']
if not amr_strings:
continue
tok = ' '.join(self.amr_corpus[dataset_name][doc_name][amr_id]['tok'])
f.write(tok + '\n')
f.close()
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def write_tokens(self, tokenizer):\n output_file = '{}ktT.xml'.format(tokenizer.filename[:-5])\n with open(output_file, 'w') as f:\n print 'writing tokens to {}'.format(output_file)\n f.write(''.join(tokenizer.token_output))",
"def write_to_file(self, filename):\n self.octree.write(str.encode(filename))\n print(\"Save octomap to \"+filename)",
"def writeToTempXml(self):\n name = self.fileToProcess.name\n all_tokens = ET.Element(\"tokens\")\n for token in self.tokensTable:\n if token.getType() == KEYWORD:\n keyword = ET.SubElement(all_tokens, \"keyword\")\n keyword.text = ' '+token.getValue()+' '\n elif token.getType() == IDENTIFIER:\n identifier = ET.SubElement(all_tokens, \"identifier\")\n identifier.text = ' '+token.getValue()+' '\n elif token.getType() == SYMBOL:\n symbol = ET.SubElement(all_tokens, \"symbol\")\n symbol.text = ' '+token.getValue()+' '\n elif token.getType() == STRING_CONST:\n stringConstant = ET.SubElement(all_tokens, \"stringConstant\")\n stringConstant.text = ' '+token.getValue()+' '\n elif token.getType() == INT_CONST:\n integerConstant = ET.SubElement(all_tokens, \"integerConstant\")\n integerConstant.text = ' '+token.getValue()+' '\n tree = ET.ElementTree(all_tokens)\n tree.write(name + 'T' + '.xml')",
"def write(self, fname):\n pass",
"def to_file(self, f: str) -> None:\n with open(f, \"w\") as open_file:\n open_file.write(\"\\n\".join(self.itos) + \"\\n\")",
"def to_file(self, file_path, smirnoff_data):\n pass",
"def write_to_file(writer, data):\n feature = {\n \"text\": _int64_feature(data)\n }\n tf_example = tf.train.Example(features=tf.train.Features(feature=feature))\n writer.write(tf_example.SerializeToString())",
"def write_to_file(writer, data):\n feature = {\n \"text\": _int64_feature(data)\n }\n tf_example = tf.train.Example(features=tf.train.Features(feature=feature))\n writer.write(tf_example.SerializeToString())",
"def save_info_to_file(filepath, tokens):\n with open(filepath, 'w') as f:\n json.dump(tokens, f)",
"def write_to_file(self, filename: str) -> None:",
"def write(self, out):",
"def _toFile(self):\n pass",
"def _write_file(self, n, k, att, pol, selected, emb_dim, gold):\n target_file = self._get_target_name(n, k, att, pol, emb_dim)\n writer = csv.writer(open(os.path.join(target_file), \"w\"))\n header = [\"eid\", \"rids\", \"n\"]\n header = header + [\"gold_summary\"] if gold is not None else header\n header += [\"review_{}\".format(i) for i in range(n)]\n header += [\"extraction\", \"input_text\"]\n writer.writerow(header)\n for row in selected:\n row = row[:3] + [gold[row[0]] if row[0] in gold else \"\"] + row[3:] if gold is not None else row\n writer.writerow(row)",
"def write(self, filename): # real signature unknown; restored from __doc__\n pass",
"def write(self, filename):\n pass",
"def write(self, filename):\n pass",
"def write_link_token(token_idx, entity, token_tag, output_file, mapping):\n for subtoken_idx, subtoken in enumerate(word_tokenize(token_tag.text)):\n token_idx += 1\n if entity is not None:\n yago_classes, lkif_classes = get_entity_classes(entity, mapping)\n if subtoken_idx == 0:\n row = \"{}\\t{}\\tB-{}\\tB-{}\\tB-{}\"\n else:\n row = \"{}\\t{}\\tI-{}\\tI-{}\\tI-{}\"\n print(row.format(\n token_idx, subtoken, url_entity_to_string(entity),\n yago_classes, lkif_classes).encode('utf-8'), file=output_file)\n else:\n print(\"{}\\t{}\\tO\\tO\\tO\".format(token_idx, subtoken).encode(\"utf-8\"),\n file=output_file)\n return token_idx",
"def writeto(self, fileout):\n \n dump_pkl(self.data, fileout)",
"def write(self, file):\n\n # Initialize output buffer\n out = ''\n\n # Print specification\n for key, value in self.specification.items():\n out += f'{key} : {value}\\n'\n\n # Print the tour\n if self.tour:\n out += 'TOUR_SECTION\\n'\n for s in self.tour:\n out += str(s) + '\\n'\n out += '-1\\n'\n\n # Append EOF\n out += 'EOF\\n'\n\n # Write to file\n with open(file, 'w') as f:\n f.write(out)",
"def write_to_bin(tok_files, out_file, makevocab=False):\n\n num_stories = len(tok_files)\n\n if makevocab:\n vocab_counter = collections.Counter()\n\n with open(out_file, 'wb') as writer:\n for idx,s in enumerate(tok_files):\n if idx % 1000 == 0:\n print(\"Writing story %i of %i; %.2f percent done\" % (idx, num_stories, float(idx)*100.0/float(num_stories)))\n\n path = os.path.join(tok_dir, s)\n src_path = \"%s.src.tok\" % path\n tgt_path = \"%s.tgt.tok\" % path\n for _ in [src_path, tgt_path]:\n if not os.path.isfile(_):\n raise Exception(\"Error: Couldn't find tokenized file %s\" % _)\n\n # Get the strings to write to .bin file\n article, abstract = [to_bytes(_) for _ in get_art_abs(src_path, tgt_path)]\n\n # Write to tf.Example\n tf_example = example_pb2.Example()\n tf_example.features.feature['article'].bytes_list.value.extend([article])\n tf_example.features.feature['abstract'].bytes_list.value.extend([abstract])\n tf_example_str = tf_example.SerializeToString()\n str_len = len(tf_example_str)\n writer.write(struct.pack('q', str_len))\n writer.write(struct.pack('%ds' % str_len, tf_example_str))\n\n # Write the vocab to file, if applicable\n if makevocab:\n art_tokens = article.split(b' ')\n abs_tokens = abstract.split(b' ')\n art_tokens = [t for t in art_tokens if t not in [to_bytes(SENTENCE_START), to_bytes(SENTENCE_END)]] # remove these tags from vocab\n abs_tokens = [t for t in abs_tokens if t not in [to_bytes(SENTENCE_START), to_bytes(SENTENCE_END)]] # remove these tags from vocab\n tokens = art_tokens + abs_tokens\n tokens = [t.strip() for t in tokens] # strip\n tokens = [t for t in tokens if t!=\"\"] # remove empty\n vocab_counter.update(tokens)\n\n print(\"Finished writing file %s\\n\" % out_file)\n\n # write vocab to file\n if makevocab:\n print(\"Writing vocab file...\")\n with open(os.path.join(finished_files_dir, \"vocab\"), 'wb') as writer:\n for word, count in vocab_counter.most_common(VOCAB_SIZE):\n writer.write(word + b' ' + to_bytes(str(count)) + b'\\n')\n print(\"Finished writing vocab file\")",
"def write_file_simple(self,filename):\n\n output = open(filename,\"w\")\n # write header\n output.write(\"# %1s %3s %22s %6s %22s\\n\"%(\"l\",\"n\",\"nu_theo (muHz)\",\"unused\",\"Inertia\"))\n for i in range(self.modes.shape[0]):\n output.write(\" %1d %3d %22.15e 0.0 %22.15e\\n\"%( \\\n self.modes[\"l\"][i], \\\n self.modes[\"n\"][i], \\\n self.modes[\"freq\"][i]*self.glb[ifreq_ref], \\\n self.modes[\"inertia\"][i]))\n output.close()",
"def to_file(self, file_path, smirnoff_data):\n xml_string = self.to_string(smirnoff_data)\n with open(file_path, \"w\") as of:\n of.write(xml_string)",
"def to_file(self, fname, delimiter=\"\\t\", encoding=\"utf-8\"):\n with open(fname, \"wb\") as fh:\n for key, score in self.ranked_items():\n fh.write(self.to_record(key, score, delimiter).encode(encoding))",
"def write(self, cull=False):\n if cull:\n cull_prefixes(self).write()\n else:\n ser = self.g.serialize(format='nifttl', encoding='utf-8')\n with open(self.filename, 'wb') as f:\n f.write(ser)\n #print('yes we wrote the first version...', self.name)",
"def writeToFile(fil, aks, tid):\r\n\r\n f = open(\"processed_\"+fil, 'w')\r\n \r\n f.write(\"Aks Tid\")\r\n for i in range(len(aks)):\r\n f.write(f\"\\n{aks[i]} {tid[i]}\")\r\n f.close()",
"def write_to_file(self):\n\t\tfile = open(\"states.txt\", \"w\")\n\t\t\n\t\tpointer = self.head\n\t\twhile pointer != None:\n\t\t\tfile.write(pointer.state + \"\\t\" + pointer.info)\t\n\t\t\tpointer = pointer.next\n\n\t\tfile.close()",
"def write(self):",
"def write(self):",
"def write_tree_to_file(self, output_file_name):\n with open(output_file_name, \"w\") as output:\n tree_string = self.decisionTree.get_tree_string(self.decisionTree.root)\n output.write(tree_string[:len(tree_string) - 1])",
"def write_out(c2ptmk, ofn):\n print \"Writing out to [{}]\".format(ofn)\n with codecs.open(ofn, \"w\", \"utf8\") as ofd:\n for co, infos in sorted(c2ptmk.items()):\n ofd.write(u\"{}\\t{}\\t{}\\n\".format(\n co, infos[\"uri\"], \",\".join(\n [unicode(x) for x in infos[\"ptmks\"]])))"
] |
[
"0.68534285",
"0.665422",
"0.66518503",
"0.64659524",
"0.618629",
"0.6139454",
"0.61091554",
"0.61091554",
"0.6091548",
"0.60892123",
"0.60817105",
"0.6043151",
"0.6030217",
"0.6002952",
"0.59830815",
"0.59830815",
"0.5968159",
"0.5950414",
"0.59480244",
"0.59361947",
"0.59090865",
"0.5896104",
"0.58913386",
"0.58874744",
"0.5879462",
"0.5874955",
"0.5867954",
"0.5867954",
"0.586795",
"0.5858266"
] |
0.74253917
|
0
|
Write amr_string from each triple to file, for use by AMR generation
|
def write_amr_string_to_file(self):
dir_path = os.path.join(self.output_path, 'amr_string')
if not os.path.exists(dir_path):
os.makedirs(dir_path)
for dataset_name, dataset in self.amr_corpus.items():
f = open(os.path.join(dir_path, dataset_name + '_amr_string.txt'), 'w')
for doc_name, doc in dataset.items():
for amr_id, amr_data in doc.items():
amr_strings = self.amr_corpus[dataset_name][doc_name][amr_id]['amr_string_triples']
for left, middle, right in amr_strings:
if left != '':
f.write(left+'\n')
if right != '':
f.write(right+'\n')
f.close()
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def generate_amr_string_from_triples(self):\n def get_alignment(f_concept_var):\n \"\"\"\n Get alignment for a single concept\n \"\"\"\n for triplet, a in self.amr_obj.alignments().items():\n if f_concept_var == triplet[0] and triplet[1] == ':instance-of':\n return int(a.split('.')[1].split(',')[0])\n\n def get_all_amr_string(f_concept_var):\n \"\"\"\n Get all amr string from the concept\n \"\"\"\n def get_triples(key):\n result_triples = []\n f_triples = self.amr_obj.triples(dep=key, rel=':ARG-of', normalize_inverses=True)\n if f_triples:\n result_triples.extend(f_triples)\n f_triples = self.amr_obj.triples(head=key)\n if f_triples:\n result_triples.extend(f_triples)\n return result_triples\n entry = defaultdict(int)\n q = []\n q.append((amr.Var('TOP'), ':top', f_concept_var))\n entry[f_concept_var] += 1\n reentrancies = self.amr_obj.reentrancies()\n all_triples = []\n while q:\n u = q.pop()\n all_triples.append(u)\n triples = get_triples(u[2])\n for triplet in triples[::-1]:\n if triplet[2] in reentrancies:\n if entry[triplet[2]] <= reentrancies[triplet[2]] + 1:\n q.append(triplet)\n entry[triplet[2]] += 1\n else:\n q.append(triplet)\n entry[triplet[2]] += 1\n s = ''\n stack = []\n instance_fulfilled = None\n align = role_align = {}\n concept_stack_depth = {\n None: 0} # size of the stack when the :instance-of triple was encountered for the variable\n for h, r, d in all_triples + [(None, None, None)]:\n align_key = align.get((h, r, d), '')\n role_align_key = role_align.get((h, r, d), '')\n if r == ':top':\n s += '(' + d()\n stack.append((h, r, d))\n instance_fulfilled = False\n elif r == ':instance-of':\n s += ' / ' + d(align_key)\n instance_fulfilled = True\n concept_stack_depth[h] = len(stack)\n elif r == ':wiki':\n continue\n elif h == stack[-1][2] and r == ':polarity': # polarity gets to be on the same line as the concept\n s += ' ' + r + role_align_key + ' ' + d(align_key)\n else:\n while len(stack) > concept_stack_depth[h]:\n h2, r2, d2 = stack.pop()\n if instance_fulfilled is False:\n # just a variable or constant with no concept hanging off of it\n # so we have an extra paren to get rid of\n align_key2 = align.get((h2, r2, d2), '')\n s = s[:-len(d2(align_key2)) - 1] + d2(align_key2, append=not instance_fulfilled)\n else:\n s += ')'\n instance_fulfilled = None\n if d is not None:\n s += ' \\n' + ' ' * 4 * len(stack) + r + role_align_key + ' (' + d(align_key)\n stack.append((h, r, d))\n instance_fulfilled = False\n return s\n\n # def get_all_alignments(concept_var, sep, left=True):\n # '''\n # Get all alignments from the concept\n # '''\n #\n # # def alignment_to_text(alignments):\n # # '''\n # # Convert all alignments to text\n # # '''\n # # def filter(idxs, tol):\n # # '''\n # # Resulting only the longest contiguous elements\n # # '''\n # # diffs = [idxs[i + 1] - idxs[i] for i in range(len(idxs) - 1)]\n # # start = False\n # # max_length = -1\n # # for i in range(len(diffs)):\n # # if diffs[i] <= tol:\n # # if not start:\n # # start = True\n # # length = 1\n # # start_idx = i\n # # else:\n # # length += 1\n # # else:\n # # if start:\n # # start = False\n # # end_idx = i\n # # if length >= max_length:\n # # max_length = length\n # # max_start_idx = start_idx\n # # max_end_idx = end_idx\n # # if start:\n # # end_idx = i + 1\n # # if length >= max_length:\n # # max_start_idx = start_idx\n # # max_end_idx = end_idx\n # # return [idxs[i] for i in range(max_start_idx, max_end_idx + 1)]\n # #\n # # doc = en_nlp(\" \".join(self.amr_obj.tokens()))\n # # alignments = sorted(list(set(alignments)))\n # # # We used noun chunks to prevent orphaned noun\n # # noun_chunks = list(doc.noun_chunks)\n # # new_alignments = set()\n # # for a in alignments:\n # # new_alignments.add(a)\n # # # Insert all noun chunks to new alignment\n # # for noun in noun_chunks:\n # # if noun.start <= a <= noun.end:\n # # new_alignments.update([i for i in range(noun.start, noun.end)])\n # # text = [self.amr_obj.tokens()[idx] for idx in filter(sorted(list(new_alignments)), 3)]\n # # text = \" \".join(text)\n # # return text\n #\n # def get_triplet(key):\n # result_triplets = []\n # triples = self.amr_obj.f_triples(dep=key, rel=':ARG-of', normalize_inverses=True)\n # if triples:\n # result_triplets.extend(triples)\n # triples = self.amr_obj.f_triples(head=key)\n # if triples:\n # result_triplets.extend(triples)\n # return result_triplets\n #\n # triplets_stor = {}\n # entry = defaultdict(int)\n # q = queue.Queue()\n # q.put(concept_var)\n # entry[concept_var] += 1\n # result_alignments = []\n # alignments = self.amr_obj.alignments()\n # role_alignments = self.amr_obj.role_alignments()\n # reentrancies = self.amr_obj.reentrancies()\n # while not q.empty():\n # u = q.get()\n # triples = get_triplet(u)\n # for triplet in triples:\n # if triplet not in triplets_stor:\n # triplets_stor[triplet] = 0\n # if type(triplet[2]) is amr.Var:\n # if entry[triplet[2]] <= reentrancies[triplet[2]] + 1:\n # q.put(triplet[2])\n # entry[triplet[2]] += 1\n #\n # def is_pos_correct(idx, sep, left=True):\n # if left:\n # return True if idx < sep else False\n # else:\n # return True if idx > sep else False\n #\n # if triplet in alignments:\n # idx = int(alignments[triplet].split('.')[1])\n # #if is_pos_correct(idx, sep, left):\n # result_alignments.append(idx)\n # if triplet in role_alignments:\n # idx = int(role_alignments[triplet].split('.')[1])\n # #if is_pos_correct(idx, sep, left):\n # result_alignments.append(idx)\n # return alignment_to_text(result_alignments)\n\n if self.triples == {}:\n return ''\n\n results = []\n for key, triple in self.triples.items():\n result_1 = get_alignment(triple[1])\n if result_1 is None:\n continue\n if triple[0] is not None:\n result_0 = get_all_amr_string(triple[0])\n else:\n result_0 = ''\n for concept_var in triple[2]:\n if concept_var:\n result_2 = get_all_amr_string(concept_var)\n if len(result_2.split(' ')) == 1:\n if not result_2.startswith('('):\n result_2 = '(' + result_2 + ')'\n results.append((result_0, self.amr_obj.var2concept()[triple[1]]._name, result_2))\n\n # f = open('amr_string.txt', 'w')\n # for l, m, r in results:\n # if l != '':\n # f.write(l+'\\n')\n # if r != '':\n # f.write(r+'\\n')\n # f.close()\n return results",
"def write_tok_to_file(self):\n dir_path = os.path.join(self.output_path, 'tokens')\n if not os.path.exists(dir_path):\n os.makedirs(dir_path)\n for dataset_name, dataset in self.amr_corpus.items():\n f = open(os.path.join(dir_path, dataset_name + '_tok.txt'), 'w')\n for doc_name, doc in dataset.items():\n for amr_id, amr_data in doc.items():\n amr_strings = self.amr_corpus[dataset_name][doc_name][amr_id]['amr_string_triples']\n if not amr_strings:\n continue\n tok = ' '.join(self.amr_corpus[dataset_name][doc_name][amr_id]['tok'])\n f.write(tok + '\\n')\n f.close()",
"def save_matricies(matrix_str_arr):\n\tmatrix_cntr = 0\n\tfor curr_matrix_str in matrix_str_arr:\n\t\tmatrix_filename = \"Example Matrices/matrix_\" + str(matrix_cntr) + \".txt\"\n\t\twith open(matrix_filename, 'w') as writefile:\n\t\t\twritefile.write(curr_matrix_str)\n\t\tmatrix_cntr+=1",
"def write_results(gold, pred, ratings, text):\n\n f = open(\"results.txt\", \"w\")\n for g, p, r, t in zip(gold, pred, ratings, text):\n f.write(\"%d\\t%d\\t%d\\t%s\\n\" % (g,p,r,t))\n\n f.close()",
"def writeArff(file_name, relation, classes, attrs, data):\n\tprint 'writeArff:', file_name, len(data), len(data[0])\n\tf = file(file_name, 'w')\n\tf.write('%\\n')\n\tf.write('%% %s \\n' % os.path.basename(file_name))\n\tf.write('%\\n')\n\tf.write('% Created by ' + os.path.basename(sys.argv[0]) + ' on ' + datetime.date.today().strftime(\"%A, %d %B %Y\") + '\\n')\n\tf.write('% Code at http://bit.ly/b7Kkqt\\n')\n\tf.write('%\\n')\n\tf.write('% Constructed from raw data in http://archive.ics.uci.edu/ml/machine-learning-databases/soybean/\\n')\n\tf.write('%% %d instances\\n' % len(data))\n\tf.write('%% %d attributes + 1 class = %d columns\\n' % (len(data[0]) - 1, len(data[0])))\n\tf.write('\\n')\n\tf.write('@RELATION ' + relation + '\\n\\n')\n\tf.write('@ATTRIBUTE %-15s {%s}\\n' % ('class', ','.join([x for x in classes if not x == '?'])))\n\tfor a in attrs:\n\t\tf.write('@ATTRIBUTE %-15s {%s}\\n' % (a['attr'], ','.join([x for x in a['vals'] if not x == '?'])))\n\tf.write('\\n@DATA\\n\\n')\n\tfor instance in data:\n\t\tf.write(', '.join(instance) + '\\n')\n\tf.close()\n\n\t\"\"\" Copy .arff files to .arff.txt so they can be viewed from Google docs \"\"\"\n\tprint 'writeArff:', file_name + '.txt', '-- duplicate'\n\tshutil.copyfile(file_name, file_name + '.txt')",
"def write_out4fp(fname,specorder,nspcs,agr,nr,rmax,pairs,nperline=6):\n ndat = nr *len(pairs)\n data = np.zeros(ndat)\n n = 0\n for pair in pairs:\n isid,jsid = pair\n for i in range(nr):\n data[n] = agr[isid,jsid,i]\n n += 1\n\n with open(fname,'w') as f:\n f.write('# RDF for pairs: ')\n for pair in pairs:\n si = specorder[pair[0]-1]\n sj = specorder[pair[1]-1]\n f.write(' {0:s}-{1:s},'.format(si,sj))\n f.write('\\n')\n f.write('# rmax, nr = {0:.3f}, {1:d}\\n'.format(rmax,nr))\n f.write('#\\n')\n #...Num of data, weight for the data\n f.write(' {0:6d} {1:7.3f}\\n'.format(ndat, 1.0))\n j0 = 0\n while True:\n f.write(' '.join('{0:12.4e}'.format(data[j]) for j in range(j0,j0+nperline) if j < ndat))\n f.write('\\n')\n j0 += nperline\n if j0 >= ndat:\n break\n\n return None",
"def _write_outfile(A):\n def __remove_symmetry_A(A):\n A_triu = defaultdict(int)\n for (i, j, k), w in list(A.items()):\n if j > i:\n A_triu[(i, j, k)] = w\n return A_triu\n def __write_nodes(outfile):\n outfile += \"*Vertices %d\" % Nn\n for nid, label in enumerate(nodes):\n outfile += '\\n%d \"%s\" 1.0' % (nid + index_from, str(label))\n return outfile\n def __write_intra_edges(outfile):\n outfile += \"\\n*Intra\\n# layer node node [weight]\"\n for (i, j, k), w in list(__remove_symmetry_A(A).items()):\n outfile += '\\n%d %d %d %f' % (\n k + index_from, # layer\n nodemap[i] + index_from, # node\n nodemap[j] + index_from, # node\n w # weight\n )\n return outfile\n\n outfile = \"\"\n outfile = __write_nodes(outfile)\n outfile = __write_intra_edges(outfile)\n\n return outfile",
"def writeToFile(fil, aks, tid):\r\n\r\n f = open(\"processed_\"+fil, 'w')\r\n \r\n f.write(\"Aks Tid\")\r\n for i in range(len(aks)):\r\n f.write(f\"\\n{aks[i]} {tid[i]}\")\r\n f.close()",
"def write(afile, seqs): \n for s in seqs :\n writeseq(afile, s)",
"def put_str(file,tupla): \n if type(tupla)!=type((2,)):\n raise 'Need a tuple of variables'\n f=open(file,'w') \n for i in range(1,len(tupla)):\n if len(tupla[i])!=len(tupla[0]):\n raise 'Variable lists have different lenght'\n for i in range(len(tupla[0])):\n cosas=[]\n for j in range(len(tupla)):cosas.append(str(tupla[j][i]))\n f.write(join(cosas)+'\\n')\n f.close()",
"def _write_outfile(A):\n def __remove_symmetry_A(A):\n A_triu = defaultdict(int)\n for (i, j, k), w in list(A.items()):\n if j > i:\n A_triu[(i, j, k)] = w\n return A_triu\n def __write_nodes(outfile):\n outfile += \"*Vertices %d\" % Nn\n for nid, label in enumerate(nodes):\n outfile += '\\n%d \"%s\" 1.0' % (nid + index_from, labelmap[label])\n return outfile\n def __write_edges(outfile):\n outfile += \"\\n*Intra\\n# layer node node [weight]\"\n sorted_A_sparse = sorted(list(__remove_symmetry_A(A).items()), key=lambda ind__: ind__[0][2])\n for (i, j, k), w in sorted_A_sparse:\n outfile += '\\n%d %d %d %f' % (\n k + index_from, # layer\n nodemap[i] + index_from, # node\n nodemap[j] + index_from, # node\n w # weight\n )\n return outfile\n \n outfile = \"\"\n outfile = __write_nodes(outfile)\n outfile = __write_edges(outfile)\n \n return outfile",
"def write_file(self, lst_of_palidroms: list, result_file: str):\n with open(result_file, 'w', encoding='utf-8', errors='ignore') as result:\n for word in lst_of_palidroms:\n result.write(word + '\\n')",
"def write_file(l_dta, outputfile):\n l_dta2 = []\n for row in l_dta:\n s = '\\t'.join(row)\n l_dta2.append(s)\n s_dta = \"\\r\\n\".join(l_dta2)\n try:\n with open(outputfile, 'w') as fd:\n fd.write(s_dta)\n except (IOError,) as e:\n tracker()\n return None",
"def write_traj(name,r_eq):\r\n f = open(name, 'w') #eqilibration.dump'\r\n N =len(r_eq[0,:,0])\r\n steps = len(r_eq[0,0,:])\r\n types = np.linspace(0,N-1,N)\r\n types = np.ones(N)\r\n types[1::3] = 2\r\n for kk in tqdm(range(steps)):\r\n f.write('ITEM: TIMESTEP \\n')\r\n f.write('{} \\n'.format(dt*kk))\r\n f.write('ITEM: NUMBER OF ATOMS \\n')\r\n f.write('{} \\n'.format(N))\r\n f.write('ITEM: BOX BOUNDS pp pp pp\\n')\r\n f.write('{} {} \\n'.format(-0,L))\r\n f.write('{} {} \\n'.format(-0,L))\r\n f.write('{} {} \\n'.format(-0,L))\r\n f.write('ITEM: ATOMS id type x y z Radius \\n')\r\n for ii in range(N):\r\n f.write(' {} {} {} {} {} {}\\n'.format(ii+1,types[ii],r_eq[0,ii,kk],r_eq[1,ii,kk],r_eq[2,ii,kk], .2e-10, ))\r\n f.close() \r\n return",
"def writeFastaFile(filename,sequences):\n fhw=open(filename,\"w\")\n for id in sequences:\n fhw.write(\">\"+id+\"\\n\"+sequences[id]+\"\\n\")\n fhw.close()",
"def write_to_txt(data, filename, attr='w'):\n f = open(filename, attr, encoding='utf-8', errors='ignore')\n for item in data:\n f.write(item.__str__())\n f.close()",
"def create_output_file(arr):\r\n for i in arr:\r\n output_file.write(f'{i[0]}\\t{i[1]}\\n')",
"def outTxt(data, outPath, fileName):\n\n with open(outPath+fileName, \"wb\") as f:\n f.write(\"index,link,name,rating,review,price,category,neighborhood,address,phone,feedback\\n\")\n for record in data:\n f.write(\"%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s\\n\" % \\\n (record[0],record[1],record[2],record[3],record[4],record[5],record[6],\\\n record[7],record[8],record[9],record[10]))",
"def write_text_file(dictionary, path):\n dict_to_extract = dictionary\n string_array = []\n for key in dict_to_extract:\n #string\n change_number = str(key)\n #string\n #[\"swapwithlimit\", \"adddummynode\", \"swapwithdummynode\", \"swapwithswitchnode\", \"swapwithcrossingnode\", \"deletenode\"]\n action = dict_to_extract[key]['action_completed']\n if action == \"swapwithlimit\":\n action = \"Swap with LimitOfNetwork\"\n elif action == \"adddummynode\":\n action = \"Add DummyNode\"\n elif action == \"swapwithdummynode\":\n action = \"Swap with DummyNode\"\n elif action == \"swapwithswitchnode\":\n action = \"Swap with Switch\"\n elif action == \"swapwithcrossingnode\":\n action = \"Swap with FlatCrossing\"\n elif action == \"deletenode\":\n action = \"Delete node\"\n \n #tuple\n node_acted_upon = str(dict_to_extract[key]['node_acted_upon'])\n #tuple\n node_swapped_or_added = str(dict_to_extract[key]['node_swapped_or_added'])\n #array of tuples\n nodes_created = dict_to_extract[key]['nodes_created']\n array_of_created_nodes_strings = []\n for item in nodes_created:\n array_of_created_nodes_strings.append(str(item))\n nodes_created_string = \" \"\n for item in array_of_created_nodes_strings:\n nodes_created_string+=str(item)\n #array of tuples\n nodes_deleted = dict_to_extract[key]['nodes_deleted']\n array_of_deleted_nodes_strings = []\n for item in nodes_deleted:\n array_of_deleted_nodes_strings.append(str(item))\n nodes_deleted_string = \" \"\n for item in array_of_deleted_nodes_strings:\n nodes_deleted_string+=str(item)\n\n # print(key, '->', dict_to_extract[key])\n string_to_add_to_array = (\"Change Number: \" + str(key) + \n \" -- Action Taken: \" + action + \n \" || Node Changed: \" + node_acted_upon + \n \" || Node Swapped With: \" + node_swapped_or_added + \n \" || Nodes Created: \" + nodes_created_string + \n \" || Nodes Deleted: \" + nodes_deleted_string + \"\\n\\n\" )\n \n string_array.append(string_to_add_to_array)\n # print(key+action+node_acted_upon+node_swapped_or_added+nodes_created_string+nodes_deleted_string)\n \n\n path_to_save = path + \"/changes.txt\"\n file = open(path_to_save, 'w+')\n for item in string_array:\n file.write(item)\n file.close()\n return",
"def writeToFile(rdd, parallelized, output, user, format, features):\n fileEnd = \".\" + format\n output_path = output + \"/ig/\" + user + \"/\" + user + fileEnd\n if parallelized:\n rdd.saveAsTextFile(output_path)\n else:\n arr = np.array(rdd.collect())\n if not os.path.exists(os.path.dirname(output_path)):\n os.makedirs(os.path.dirname(output_path))\n with open(output_path, 'w+') as tsvfile:\n for row in arr:\n if format == \"json\":\n tsvfile.write(row.encode(\"utf-8\", errors='ignore') + \"\\n\")\n else:\n tsvfile.write(row + \"\\n\")\n if not format == \"json\":\n output_path = output + \"/ig/\" + user + \"/\" + user + \".txt\"\n saveCorpusFile(output_path, arr, format, features)",
"def text_write_data(file, l):\r\n file = open(file, \"a+\")\r\n for name in l:\r\n file.write(str(name) + \"\\n\")\r\n file.close",
"def write(cls, vas):\n with open(Y, 'w') as f_i:\n for items in vas:\n f_i.write('%s ' % items)\n print(\"File written successfully. Check out \\\"output.txt\\\" file\")\n f_i.close()",
"def writeInput_for_LAMMPS(rd, listAtoms, filename):\n #f=open(\"geo.kirigami_d0.0_\"+str(rd),\"w+\")\n f=open(filename+str(rd),\"w+\")\n f.write(\"\\n\")\n f.write(\"%d atoms\\n\" %len(listAtoms))\n f.write(\"1 atom types\\n\")\n f.write(\"\\n\")\n f.write(\"%f\\t%f xlo xhi\\n\" %(xlo-1, xhi+1))\n f.write(\"%f\\t%f ylo yhi\\n\" %(ylo-1, yhi+1))\n f.write(\"%f\\t%f zlo zhi\\n\" %(zlo-1, zhi+1))\n f.write(\"\\n\")\n f.write(\"Atoms\\n\")\n f.write(\"\\n\")\n for i in range (len(listAtoms)):\n f.write(\"%d\\t1\\t%f\\t%f\\t%f\\n\" %(i+1, listAtoms[i][0], listAtoms[i][1], listAtoms[i][2]))\n f.close()",
"def write_textfiles(train, val, anno):\n \"\"\" used only for side effect \"\"\"\n # fn creates text file line in form '<filename> <int mapping of grip type>'\n # for each train/val file and writes each line in corresponding\n # train.txt/val.txt for training\n # to_line = lambda fname:'{} {}'.format(fname,labelmap[anno[fname]['grip']])\n to_line = lambda fname:'{} {}'.format(fname,labelmap[anno[fname]['grip']])\n train_str = '\\n'.join(map(to_line, train))\n val_str = '\\n'.join(map(to_line, val))\n\n with open('train.txt', 'w') as trainfile:\n trainfile.write(train_str)\n with open('val.txt', 'w') as valfile:\n valfile.write(val_str)",
"def writeQrels(qrelList, fileName):\n with open(fileName, 'w') as f:\n for e in qrelList:\n f.write(qrelEntry2Str(e))\n f.write('\\n')",
"def write_to_txt(self):\r\n file = open(self.output_path, 'w')\r\n for question_id in self.question_ids:\r\n file.write(self.questions[question_id].question_string+str(self.questions[question_id].answer)+'\\n')\r\n file.close()",
"def output(results):\n\n text_file = open(\"problem_1_B_output.txt\", \"w\")\n\n out = \"\"\n\n for i, line in enumerate(results):\n\n string = \"Sample {}: {}, posterior probability of {:.4f}\".format(i + 1,\n line[0],\n line[1])\n\n out += (string + \"\\n\")\n\n text_file.write(out)\n\n text_file.close()",
"def write_fasta(alignment, dest):\n file_obj = None\n if isinstance(dest, str):\n file_obj = open(dest, \"w\")\n else:\n file_obj = dest\n for name, seq in list(alignment.items()):\n file_obj.write('>%s\\n%s\\n' % (name, seq) )\n if isinstance(dest, str):\n file_obj.close()",
"def arff_file(data,attributes,relation,description,output_dir=\"./\",filename=\"tmp\"):\n x = []\n for k in attributes:\n x.append(k[0])\n data_write = {}\n data_write['data'] = manip.dic_to_list(data,order=x)[1:]\n data_write['attributes'] = [tuple(l) for l in attributes]\n data_write['relation'] = unicode(relation)\n data_write['description'] = unicode(description)\n data_final = arf.dumps(data_write)\n #print data_final\n fil = open(output_dir + filename + '.arff', \"w\")\n fil.write(data_final)\n fil.close()\n\n return None",
"def write_out_prediction(predictions_file, src_seqs,\n trg_seqs, pred_string, src_feat_bundles,\n trg_feat_bundles, val_id):\n\n output_lines = []\n if trg_seqs[val_id] != pred_string:\n output_lines.append('*ERROR*')\n output_lines.append('SRC: {}'.format(src_seqs[val_id]))\n if src_feat_bundles[val_id]:\n output_lines.append('SFT: {}'.format(src_feat_bundles[val_id]))\n if trg_feat_bundles[val_id]:\n output_lines.append('TFT: {}'.format(trg_feat_bundles[val_id]))\n output_lines.append('TRG: {}'.format(trg_seqs[val_id]))\n output_lines.append('PRD: {}\\n'.format(pred_string))\n predictions_file.write('{}\\n'.format('\\n'.join(output_lines)))"
] |
[
"0.6043795",
"0.5816451",
"0.5616476",
"0.5615036",
"0.55679107",
"0.5558254",
"0.55434495",
"0.55120385",
"0.5497597",
"0.5473476",
"0.5469896",
"0.5452916",
"0.5444766",
"0.5437203",
"0.54188925",
"0.5401833",
"0.5394112",
"0.53601915",
"0.53462917",
"0.5338236",
"0.5273775",
"0.52713174",
"0.5266709",
"0.52255857",
"0.5223085",
"0.52067596",
"0.5186962",
"0.5174273",
"0.51642364",
"0.515519"
] |
0.7671765
|
0
|
Dynamically pads the batch_data with pad_id
|
def _dynamic_padding(self, batch_data, pad_id):
pad_p_len = min(self.max_p_len, max(batch_data['passage_length']))
pad_q_len = min(self.max_q_len, max(batch_data['question_length']))
batch_data['passage_token_ids'] = [(ids + [pad_id] * (pad_p_len - len(ids)))[: pad_p_len]
for ids in batch_data['passage_token_ids']]
batch_data['question_token_ids'] = [(ids + [pad_id] * (pad_q_len - len(ids)))[: pad_q_len]
for ids in batch_data['question_token_ids']]
return batch_data, pad_p_len, pad_q_len
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def _dynamic_padding(self, batch_data, pad_id = 0 ):\n #print 'dynamic _padding...'\n #print 'pad_id' + str(pad_id)\n max_p_len = 1000\n max_q_len =1000\n pad_p_len = min(max_p_len, max(batch_data['passage_length']))+1\n #print 'pad_p_len' + str(pad_p_len)\n pad_q_len = min(max_q_len, max(batch_data['question_length']))\n #print 'pad_q_len' + str(pad_q_len)\n #for ids in batch_data['passage_token_ids'] :\n #print 'padding: '\n #print (ids + [pad_id] * (pad_p_len - len(ids)))[: pad_p_len]\n batch_data['passage_token_ids'] = [(ids + [pad_id] * (pad_p_len - len(ids)))[: pad_p_len]\n for ids in batch_data['passage_token_ids']]\n batch_data['question_token_ids'] = [(ids + [pad_id] * (pad_q_len - len(ids)))[: pad_q_len]\n for ids in batch_data['question_token_ids']]\n return batch_data, pad_p_len, pad_q_len",
"def apply_padding(self, batch_list):\n max_len = max([len(idx_seq) for idx_seq in batch_list])\n padded = [idx_seq + [self.vocab.pad_id] * (max_len - len(idx_seq)) for idx_seq in batch_list]\n return padded",
"def pad_dataset(dataset, padding=0):\n max_l = max(len(x) for x in dataset[\"input_ids\"])\n for name in PADDED_INPUTS:\n dataset[name] = [x + [padding if name != \"lm_labels\" else -100] * (max_l - len(x)) for x in dataset[name]]\n return dataset",
"def pad_batch(self, ids):\r\n max_len = max([len(x) for x in ids])\r\n return [x + [0] * (max_len - len(x)) for x in ids]",
"def pad_ndarray_list(batch, pad_value):\n bs = len(batch)\n maxlen = max([b.shape[0] for b in batch])\n if len(batch[0].shape) >= 2:\n batch_pad = np.zeros((bs, maxlen) + batch[0].shape[1:])\n else:\n batch_pad = np.zeros((bs, maxlen))\n batch_pad.fill(pad_value)\n for i, b in enumerate(batch):\n batch_pad[i, :b.shape[0]] = b\n\n return batch_pad",
"def Batch_Padding(batch, batch_first, pad_token):\n padded_batch = torch.nn.utils.rnn.pad_sequence([torch.tensor(x) for x in batch], batch_first=True, padding_value=pad_token)\n return padded_batch",
"def _pad_data(data, pad_length, padding_type='same'):\n\n # get the sampling period (or distance between sampling points, for PLUX devices this is always 1)\n # it is assumed that the signals are equidistantly sampled therefore only the distance between to sampling points\n # is needed to calculate the sampling period\n T = data[:, 0][1] - data[:, 0][0]\n\n if padding_type == 'same':\n\n # create the 'same' padding array\n padding = np.tile(data[-1, 1:], (pad_length, 1))\n\n elif padding_type == 'zero':\n\n # get the number of columns for the zero padding\n num_cols = data.shape[1] - 1 # ignoring the time/sample column\n\n # create the zero padding array\n padding = np.zeros((pad_length, num_cols))\n\n else:\n\n IOError('The padding type you chose is not defined. Use either \\'same\\ or \\'zero\\'.')\n\n # create the time / sample axis that needs to be padded\n start = data[:, 0][-1] + T\n stop = start + (T * pad_length)\n time_pad = np.arange(start, stop, T)\n time_pad = time_pad[:pad_length] # crop the array if there are to many values\n\n # expand dimension for hstack operation\n time_pad = np.expand_dims(time_pad, axis=1)\n\n # hstack the time_pad and the zero_pad to get the final padding array\n pad_array = np.hstack((time_pad, padding))\n\n # vstack the pad_array and the new_array\n padded_data = np.vstack([data, pad_array])\n\n return padded_data",
"def pad(batch):\n batch_split = list(zip(*batch))\n seqs, num, targs, lengths, visits = batch_split[0], batch_split[1], batch_split[2], batch_split[3], batch_split[4]\n num = torch.vstack([torch.as_tensor(sample, dtype=torch.float32) for sample in zip(*num)]).T\n visits = [torch.as_tensor(s, dtype=torch.long) for s in visits]\n return [list(seqs), num, torch.as_tensor(lengths, dtype=torch.long), visits], \\\n torch.as_tensor(targs, dtype=torch.float32)",
"def pad_sequence(batch, padding_value=0.0):\n max_size = batch[0].size()\n trailing_dims = max_size[1:]\n max_len = max([s.size(0) for s in batch])\n \n if all(x.shape[0] == max_len for x in batch):\n # if all data sequences in batch have the same length, no need to pad\n return batch\n else:\n # we need to pad\n out_dims = (max_len, ) + trailing_dims\n \n output_batch = []\n for i, tensor in enumerate(batch):\n # check the rest of dimensions\n if tensor.size()[1:] != trailing_dims:\n print(\"Data in batch has different dimensions:\")\n for data in batch:\n print(str(data.size()))\n raise RuntimeError('Fail to create batch data')\n # save padded results\n out_tensor = tensor.new_full(out_dims, padding_value)\n out_tensor[:tensor.size(0), ...] = tensor\n output_batch.append(out_tensor)\n return output_batch",
"def pad_batches(self, ids):\n\n batches = []\n for batch_elem_len, batch_sent_ids in zip(self.batch_elem_lengths, self.batch_sent_ids):\n batch = self.tokenizer.pad_id * np.ones((len(batch_sent_ids), batch_elem_len), dtype=np.int)\n for i, sentence_idx in enumerate(batch_sent_ids):\n batch[i][: len(ids[sentence_idx])] = ids[sentence_idx]\n batches.append(batch)\n return batches",
"def pad_sentence_batch(sentence_batch):\r\n max_sentence = max([len(sentence) for sentence in sentence_batch])\r\n return [sentence + [vocab_to_int['<PAD>']] * (max_sentence - len(sentence)) for sentence in sentence_batch]",
"def pad_to_fixed_size(data, pad_value, output_shape):\n max_num_instances = output_shape[0]\n dimension = output_shape[1]\n\n data = tf.reshape(data, [-1, dimension])\n num_instances = tf.shape(input=data)[0]\n\n pad_length = max_num_instances - num_instances\n\n paddings = pad_value * tf.ones([pad_length, dimension])\n\n padded_data = tf.reshape(tf.concat([data, paddings], axis=0), output_shape)\n return padded_data",
"def pad_sentence_batch(sentence_batch, pad_int):\n max_sentence = max([len(sentence) for sentence in sentence_batch])\n return [sentence + [pad_int] * (max_sentence - len(sentence)) for sentence in sentence_batch]",
"def pad_batch_data(insts,\n pad_idx,\n n_head,\n is_target=False,\n is_label=False,\n return_attn_bias=True,\n return_max_len=True,\n return_num_token=False):\n return_list = []\n max_len = max(len(inst) for inst in insts)\n # Any token included in dict can be used to pad, since the paddings' loss\n # will be masked out by weights and make no effect on parameter gradients.\n inst_data = np.array(\n [inst + [pad_idx] * (max_len - len(inst)) for inst in insts])\n return_list += [inst_data.astype(\"int64\").reshape([-1, 1])]\n if is_label: # label weight\n inst_weight = np.array([[1.] * len(inst) + [0.] * (max_len - len(inst))\n for inst in insts])\n return_list += [inst_weight.astype(\"float32\").reshape([-1, 1])]\n else: # position data\n inst_pos = np.array([\n list(range(0, len(inst))) + [0] * (max_len - len(inst))\n for inst in insts\n ])\n return_list += [inst_pos.astype(\"int64\").reshape([-1, 1])]\n if return_attn_bias:\n if is_target:\n # This is used to avoid attention on paddings and subsequent\n # words.\n slf_attn_bias_data = np.ones(\n (inst_data.shape[0], max_len, max_len))\n slf_attn_bias_data = np.triu(slf_attn_bias_data,\n 1).reshape([-1, 1, max_len, max_len])\n slf_attn_bias_data = np.tile(slf_attn_bias_data,\n [1, n_head, 1, 1]) * [-1e9]\n else:\n # This is used to avoid attention on paddings.\n slf_attn_bias_data = np.array([[0] * len(inst) + [-1e9] *\n (max_len - len(inst))\n for inst in insts])\n slf_attn_bias_data = np.tile(\n slf_attn_bias_data.reshape([-1, 1, 1, max_len]),\n [1, n_head, max_len, 1])\n return_list += [slf_attn_bias_data.astype(\"float32\")]\n if return_max_len:\n return_list += [max_len]\n if return_num_token:\n num_token = 0\n for inst in insts:\n num_token += len(inst)\n return_list += [num_token]\n return return_list if len(return_list) > 1 else return_list[0]",
"def pad_arrays(arrays, batch_size):\n\n rows_needed = batch_size - len(arrays[\"y\"]) % batch_size\n return {k: np.pad(v, [(0,rows_needed)] + [(0,0)]*(len(v.shape)-1), \"edge\") for k,v in arrays.items()}",
"def __call__(self, batch, PAD=utils.IGNORE_ID):\n # sort a list by sequence length (descending order) to use pack_padded_sequence\n batch.sort(key=lambda x: len(x[0]), reverse=True)\n # seperate inputs and labels\n input_seqs, label_seqs = zip(*batch)\n # padding\n lengths = [len(seq) for seq in input_seqs]\n input_padded_seqs = torch.zeros(len(input_seqs), max(lengths)).long()\n label_padded_seqs = torch.zeros(len(input_seqs), max(lengths)).fill_(PAD).long()\n for i, (input, label) in enumerate(zip(input_seqs, label_seqs)):\n end = lengths[i]\n input_padded_seqs[i, :end] = input[:end]\n label_padded_seqs[i, :end] = label[:end]\n return input_padded_seqs, torch.IntTensor(lengths), label_padded_seqs",
"def trim_and_pad_batch(self, batch):\n\t\tmaxlength = min(self.MAXLENGTH, max([len(x) for x in batch]))\n\t\t\t\t\n\t\tbatch = [x[:maxlength] for x in batch]\n\t\tbatch = [np.concatenate([x, np.zeros(maxlength - x.shape[0])]) for x in batch]\n\n\t\treturn batch",
"def pad_examples(x, desired_batch_size):\n batch_pad = desired_batch_size - x.shape[0]\n tile_dims = [1] * len(x.shape)\n tile_dims[0] = batch_pad\n return np.concatenate([x, np.tile(x[-1], tile_dims)], axis=0)",
"def Batch_Size_Normalization(batch, batch_len, pad_token, batch_size):\n max_length = max(batch_len)\n current_batch_len = len(batch)\n need_more = batch_size-current_batch_len\n if need_more==0:\n return batch\n\n padding_array = np.ones(max_length)*pad_token\n for i in range(need_more):\n batch.append(padding_array)\n return batch",
"def pad_sentence_batch(sentence_batch):\n max_sentence = max([len(sentence) for sentence in sentence_batch])\n return [sentence + [CODES['<PAD>']] * (max_sentence - len(sentence))\n for sentence in sentence_batch]",
"def _one_mini_batch(self, data, indices, pad_id):\n batch_data = {'raw_data': [data[i] for i in indices],\n 'question_token_ids': [],\n 'question_length': [],\n 'passage_token_ids': [],\n 'passage_length': [],\n 'start_id': [],\n 'end_id': []}\n max_passage_num = max([len(sample['passages']) for sample in batch_data['raw_data']])\n max_passage_num = min(self.max_p_num, max_passage_num)\n for sidx, sample in enumerate(batch_data['raw_data']):\n for pidx in range(max_passage_num):\n if pidx < len(sample['passages']):\n batch_data['question_token_ids'].append(sample['question_token_ids'])\n batch_data['question_length'].append(len(sample['question_token_ids']))\n passage_token_ids = sample['passages'][pidx]['passage_token_ids']\n batch_data['passage_token_ids'].append(passage_token_ids)\n batch_data['passage_length'].append(min(len(passage_token_ids), self.max_p_len))\n else:\n batch_data['question_token_ids'].append([])\n batch_data['question_length'].append(0)\n batch_data['passage_token_ids'].append([])\n batch_data['passage_length'].append(0)\n batch_data, padded_p_len, padded_q_len = self._dynamic_padding(batch_data, pad_id)\n for sample in batch_data['raw_data']:\n if 'answer_passages' in sample and len(sample['answer_passages']):\n gold_passage_offset = padded_p_len * sample['answer_passages'][0]\n batch_data['start_id'].append(gold_passage_offset + sample['answer_spans'][0][0])\n batch_data['end_id'].append(gold_passage_offset + sample['answer_spans'][0][1])\n else:\n # fake span for some samples, only valid for testing\n batch_data['start_id'].append(0)\n batch_data['end_id'].append(0)\n return batch_data",
"def _Pad(self, data):\n pad = self.block_size - len(data) % self.block_size\n return data + util.RepeatByte(pad, pad)",
"def add_padding(*data, value, maxlen=250, padding=\"post\"):\n return [keras.preprocessing.sequence.pad_sequences(\n d, value=value, padding=padding,\n maxlen=maxlen) for d in data]",
"def pad_data(data: List[np.ndarray], padding_value) -> List[tf.Tensor]:\n paddings = np.array([0, np.max(list(datapoint.shape[0] for datapoint in data))]).reshape((1, -1))\n return list(\n tf.pad(datapoint, paddings=paddings - np.array((0, len(datapoint))), mode='CONSTANT',\n constant_values=padding_value) for datapoint in data\n )",
"def pad_collate(batch):\n \n data, labels, data_file = zip(*batch)\n \n number_of_modalities = len(data[0]) if isinstance(data[0], list) else 1\n if number_of_modalities == 1:\n data = [[x] for x in data]\n\n modality_tensors = []\n for i in range(number_of_modalities):\n modality_i = [torch.Tensor(x[i]) for x in data]\n padded_modality = pad_sequence(modality_i, batch_first=True)\n modality_tensors.append(padded_modality)\n \n num_seqs_per_sample = [len(x) for x in labels]\n labels = [torch.Tensor(x) for x in labels]\n labels = pad_sequence(labels, batch_first=True)\n \n if number_of_modalities == 1:\n modality_tensors = modality_tensors[0]\n \n return modality_tensors, labels, num_seqs_per_sample, data_file",
"def pad_packed_collate(batch):\n if isinstance(batch[0], np.ndarray):\n pass\n elif isinstance(batch[0], collections.Sequence):\n transposed = zip(*batch)\n return [pad_packed_collate(samples) for samples in transposed]\n\n # pad sequence as TxBx*\n # T is length[0] longest seq, B is batch, * is feature\n # length and padded is sorted in descending order\n if len(batch) == 1:\n sorted_batch = batch\n padded_batch = batch[0][:, None, :] # add batch dimension\n lengths = [padded_batch.shape[0]]\n else:\n # sort\n sorted_batch = sorted(batch, key=lambda x: x.shape[0], reverse=True)\n lengths = [s.shape[0] for s in sorted_batch]\n\n # pad\n max_len, n_feats = sorted_batch[0].shape\n padded_batch = \\\n [np.concatenate((s, np.zeros((max_len - s.shape[0], n_feats),\n dtype=np.float32)), axis=0)\n if s.shape[0] != max_len else s for s in sorted_batch]\n\n # stack\n padded_batch = np.stack(padded_batch, axis=1)\n\n # pack\n packed_batch = pack_padded_sequence(Variable(t.from_numpy(padded_batch)), lengths,\n batch_first=False)\n\n return packed_batch",
"def pad(data, *args, **kwargs): # pragma: no cover\n raise NotImplementedError()",
"def pad_and_onehot(data, pad_len=None, extra_padding=200):\n if pad_len is None:\n pad_len = max(len(x) for x in data) + extra_padding\n data = [\n onehot(np.pad(trace, (0, pad_len - len(trace)), mode=\"constant\"))\n for trace in data\n ]\n return pad_len, np.array(data)",
"def test_Pad3D23():\n input_shape = (1, 2, 3)\n # pad = np.array([1, 2]).astype('int32')\n pad = [1, 2]\n mode = \"replicate\"\n res = [[[1, 2, 3], [1, 2, 3], [4, 5, 6], [4, 5, 6], [4, 5, 6]]]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.run(res=res, padding=pad, mode=mode, data_format=\"NLC\", data=data)",
"def test_Pad3D16():\n input_shape = (1, 2, 3)\n pad = [1, 2]\n mode = \"replicate\"\n res = [[[1, 1, 2, 3, 3, 3], [4, 4, 5, 6, 6, 6]]]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.base(res=res, padding=pad, mode=mode, data=data, data_format=\"NCL\")"
] |
[
"0.82784224",
"0.72639644",
"0.69665056",
"0.6958741",
"0.6956212",
"0.68224317",
"0.6813602",
"0.68095857",
"0.68010575",
"0.6763136",
"0.67258495",
"0.66987",
"0.667535",
"0.66577417",
"0.6612682",
"0.66086715",
"0.6608627",
"0.6553975",
"0.65246",
"0.64727557",
"0.6432696",
"0.6397146",
"0.63809294",
"0.63314486",
"0.6297586",
"0.629584",
"0.6286987",
"0.62681407",
"0.6255433",
"0.6249613"
] |
0.8182605
|
1
|
Convert the question and passage in the original dataset to ids
|
def convert_to_ids(self, vocab):
for data_set in [self.train_set, self.dev_set, self.test_set]:
if data_set is None:
continue
for sample in data_set:
sample['question_token_ids'] = vocab.convert_to_ids(sample['question_tokens'])
for passage in sample['passages']:
passage['passage_token_ids'] = vocab.convert_to_ids(passage['passage_tokens'])
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def raw_to_ids(raw_data, word_to_id):\n docs = tokenize_keras(raw_data)\n uid = word_to_id[UNKNOWN_WORD]\n return [[word_to_id.get(w, uid) for w in doc] for doc in docs]",
"def _compute_question_ids(self):\n if self._origin.question_ids:\n # lines to keep: those with already sent emails or registrations\n questions_tokeep_ids = self.env['event.registration.answer'].search(\n [('question_id', 'in', self._origin.question_ids.ids)]\n ).question_id.ids\n else:\n questions_tokeep_ids = []\n for event in self:\n if not event.event_type_id and not event.question_ids:\n event.question_ids = False\n continue\n\n if questions_tokeep_ids:\n questions_toremove = event._origin.question_ids.filtered(lambda question: question.id not in questions_tokeep_ids)\n command = [(3, question.id) for question in questions_toremove]\n else:\n command = [(5, 0)]\n if event.event_type_id.use_mail_schedule:\n command += [\n (0, 0, {\n 'title': question.title,\n 'question_type': question.question_type,\n 'sequence': question.sequence,\n 'once_per_order': question.once_per_order,\n 'answer_ids': [(0, 0, {\n 'name': answer.name,\n 'sequence': answer.sequence\n }) for answer in question.answer_ids],\n }) for question in event.event_type_id.question_ids\n ]\n event.question_ids = command",
"def _transform_interaction_to_question_id(self, interaction):\n return interaction - self._question_num * (interaction > self._question_num).long()",
"def process_data(words,puncts,word_to_id):\n\tids = []\n\tp_ids = []\n\tfor i in range(len(words)):\n\t\tids.append(word_to_id[words[i]])\n\t\tp_ids.append(punct_to_id[puncts[i]])\n\treturn ids,p_ids",
"def words_to_word_ids(data, word_to_id):\n # if isinstance(data[0], six.string_types):\n # print(type(data[0]))\n # # exit()\n # print(data[0])\n # print(word_to_id)\n # return [word_to_id[str(word)] for word in data]\n # else:\n return [word_to_id[word] for word in data]\n\n # if isinstance(data[0], str):\n # # print('is a string object')\n # return [word_to_id[word] for word in data]\n # else:#if isinstance(s, bytes):\n # # print('is a unicode object')\n # # print(data[0])\n # return [word_to_id[str(word)] f",
"def _sentence_to_ids(sentence, vocab):\n ids = [vocab.get(w, special_words.UNK_ID) for w in sentence]\n if FLAGS.add_eos:\n ids.append(special_words.EOS_ID)\n return ids",
"def convert_to_ids(self, terms):\n vec = [self.get_id(label) for label in terms]\n return vec",
"def batches2IDs(batches):\n l = [ np.array( [ char2id(x) for x in characters(b) ] ) for b in batches ]\n return l",
"def __getEgoAndAlterQuestionIds2(self):\n egoQuestionIds = [(\"Q4\",0), (\"Q5X\",0), (\"Q48\",0)]\n alterQuestionIds = [(\"Q184$\",0), (\"Q185$X\",0), (\"Q186$\",0)]\n\n egoQuestionIds.extend([(\"Q51X\",0)])\n alterQuestionIds.extend([(\"Q191$X\",0)])\n\n for i in range(1, self.numProfessions+1):\n egoQuestionIds.append((\"Q7_\" + str(i), 1))\n alterQuestionIds.append((\"Q187$_\" + str(i) , 1))\n\n egoQuestionIds.extend([(\"Q44AX\", 0), (\"Q44BX\", 0), (\"Q44CX\",0), (\"Q44DX\",0)])\n alterQuestionIds.extend([(\"Q180A$X\", 0), (\"Q180B$X\", 0), (\"Q180C$X\",0), (\"Q180D$X\",0)])\n\n egoQuestionIds.extend([(\"Q46A\", 0), (\"Q46B\", 0), (\"Q46C\", 0), (\"Q46D\", 0)])\n alterQuestionIds.extend([(\"Q182A$\", 0), (\"Q182B$\", 0), (\"Q182C$\", 0), (\"Q182D$\", 0)])\n\n return (egoQuestionIds, alterQuestionIds)",
"def get_ids(self, sentence):\n return [self.get_id(word) for word in sentence.strip().split(' ')]",
"def token2id(data, mode):\n vocab_path = 'vocab.' + mode\n in_path = data + '.' + mode\n out_path = data + '_ids.' + mode\n _, vocab = load_vocab(os.path.join(config.PROCESSED_PATH, vocab_path))\n in_file = open(os.path.join(config.PROCESSED_PATH, in_path), 'rb')\n out_file = open(os.path.join(config.PROCESSED_PATH, out_path), 'wb')\n\n lines = in_file.read().splitlines()\n for line in lines:\n if mode == 'dec': # we only care about '<s>' and </s> in encoder\n ids = [vocab[b'<s>']]\n else:\n ids = []\n ids.extend(sentence2id(vocab, line))\n # ids.extend([vocab.get(token, vocab['<unk>']) for token in basic_tokenizer(line)])\n if mode == 'dec':\n ids.append(vocab[b'</s>'])\n out_file.write(b' '.join(str(id_).encode('ascii') for id_ in ids) + b'\\n')",
"def getIDs():",
"def token2id(data, mode, phase): \n #Outputs data in the form of tokens from vocab to processed/(train_or_test)/ids.(enc or dec)\n vocab_path = 'vocab.' + mode\n in_path = data + '.' + mode\n out_path = data + '_ids.' + mode\n\n _, vocab = load_vocab(os.path.join(config.PROCESSED_PATH, vocab_path))\n in_file = open(os.path.join(config.PROCESSED_PATH, in_path), 'rb')\n out_file = open(os.path.join(config.PROCESSED_PATH, out_path), 'wb')\n \n lines = in_file.read().splitlines()\n for line in lines:\n if mode == 'dec' and phase is not 2: # we only care about '<s>' and </s> in encoder\n ids = [vocab['<s>']]\n else:\n ids = []\n ids.extend(sentence2id(vocab, line))\n # ids.extend([vocab.get(token, vocab['<unk>']) for token in basic_tokenizer(line)])\n if mode == 'dec':\n ids.append(vocab['<\\s>'])\n out_file.write(' '.join(str(id_) for id_ in ids) + '\\n')",
"def sequence_to_list_ids(sequence, vocab):\n pass",
"def entity2id(self,q):\n ans = []\n url = \"https://www.wikidata.org/w/api.php?action=wbsearchentities&format=json&search=\" + q + \"&language=en\"\n # url = \"https://www.wikidata.org/w/api.php?action=wbsearchentities&format=json&search=\" + \"+\".join(\n # q.split(\" \")) + \"&language=en\"\n get = urllib.urlopen(url).read()\n # print get\n response = json.loads(get)\n # ans += response[\"search\"]\n for s in response[\"search\"]:\n ans.append({\n \"s_id\": s[\"id\"],\n \"subject_des\": s[\"description\"]\n })\n return ans",
"def sent2id(self, sentences):\n # [max_conversation_length, max_sentence_length]\n return [self.vocab.sent2id(sentence) for sentence in sentences]",
"def docs2ids(self):\n self.docs = [ [self.vocab[word] for word in doc] for doc in self.docs]",
"def get_req_ids(actual_pose, target, req_ids, person_ids):\n train_x = []\n train_y = []\n\n for i in req_ids:\n id_mask = (person_ids == i)\n train_x.append(actual_pose[id_mask])\n train_y.append(target[id_mask, 0])\n\n train_x = np.concatenate(train_x)\n train_y = np.concatenate(train_y)\n \n return train_x, train_y",
"def replace_ids_submission(ids):\n \n item = np.zeros((len(ids), ), dtype = 'int')\n user = np.zeros((len(ids), ), dtype = 'int')\n for i in range(len(ids)):\n row, col = ids[i].split(\"_\")\n item[i] = int(row.replace(\"r\", \"\"))\n user[i] = int(col.replace(\"c\", \"\"))\n \n return item, user",
"def texts2ids(self, texts: list, length: int):\n return [self.text2ids(text, length) for text in texts]",
"def get_ids(self, text):\n\n tokens = [token.orth for token in self.tokenizer(text)]\n ids = []\n for token in tokens:\n try:\n id = self.vocab.vectors.key2row[token]\n except KeyError:\n id = self.oov_id\n\n ids.append(id)\n\n return ids",
"def get_unique_survey_and_business_ids(enrolment_data):\n\n surveys_ids = set()\n business_ids = set()\n for enrolment in enrolment_data:\n surveys_ids.add(enrolment[\"survey_id\"])\n business_ids.add(enrolment[\"business_id\"])\n return surveys_ids, business_ids",
"def words_to_id(text, is_list=False, old_word_to_id=None):\n if is_list:\n x = \"\"\n for line in text:\n x += line + \" \"\n text = x\n \n uniq_words = set(text.split(\" \"))\n \n if old_word_to_id:\n word_to_id = old_word_to_id\n start = len(old_word_to_id)\n for word in uniq_words:\n if word not in word_to_id:\n word_to_id[word] = start\n start += 1\n else:\n word_to_id = {word:i for i, word in enumerate(uniq_words)}\n \n id_to_word = {str(v):k for k,v in word_to_id.items()}\n return word_to_id, id_to_word",
"def to_partid(self, id_tensor):\n ...",
"def data_to_word_ids(self, input_data, filter=False):\n\n _buffer = list()\n for word in input_data:\n word = word.lower()\n if self.unit == \"oracle\":\n if \"+\" in word:\n tokens = word.split('+')\n word_tag = tokens[0].split(':')\n word = word_tag[1]\n if self.unit == \"morpheme\":\n word = re.sub(\"@@\", \"\", word)\n\n # flag to randomize token with frequency one\n flag = 1\n if word in self.unk_word_list:\n flag = random.randint(0, 1)\n\n if word in self.word_to_id and flag == 1:\n # if filter is True, reduce output vocabulary for softmax\n # (map words not in top self.max_vocab_size to UNK)\n if filter:\n # index start from 0\n if self.word_to_id[word] < self.max_vocab_size:\n _buffer.append(self.word_to_id[word])\n else:\n _buffer.append(self.word_to_id['<unk>'])\n else:\n _buffer.append(self.word_to_id[word])\n else:\n _buffer.append(self.word_to_id['<unk>'])\n return _buffer",
"def get_ids(self, text):\n\n tokens = [token.orth for token in self.tokenizer(text)]\n ids = []\n for token in tokens:\n try:\n id = self._vocab.vectors.key2row[token]\n except KeyError:\n id = self.oov_id\n\n ids.append(id)\n\n return ids",
"def generate_question_inter(path):\n\n\t# Function to compute vocabulary intersection\n\tdef q1_q2_intersect(row):\n\t return(len(set(q_dict[row['question1']]).intersection(set(q_dict[row['question2']]))))\n\n\t# Load training and test set\n\ttrain_orig =pd.read_csv(os.path.join(path,'train.csv'), sep=',',names = [\"id\", \"qid1\", \"qid2\", \"question1\",\"question2\",\"is_duplicate\"])\n\ttest_orig = pd.read_csv(os.path.join(path,'test.csv'), sep=',',names = [\"id\", \"qid1\", \"qid2\", \"question1\",\"question2\"])\n\n\t# Concatenation\n\tques = pd.concat([train_orig[['question1', 'question2']], test_orig[['question1', 'question2']]], axis=0).reset_index(drop='index')\n\n\n\tq_dict = defaultdict(set)\n\tfor i in range(ques.shape[0]):\n\t q_dict[ques.question1[i]].add(ques.question2[i])\n\t q_dict[ques.question2[i]].add(ques.question1[i])\n\n\t# Compute vocabulary intersection\n\ttrain_orig['q1_q2_intersect'] = train_orig.apply(q1_q2_intersect, axis=1, raw=True)\n\ttest_orig['q1_q2_intersect'] = test_orig.apply(q1_q2_intersect, axis=1, raw=True)\n\n\ttrain_feat = train_orig[['q1_q2_intersect']]\n\ttest_feat = test_orig[['q1_q2_intersect']]\n\n\tprint('Writing train features...')\n\ttrain_feat.to_csv(os.path.join(path,'train_question_inter.csv'))\n\n\tprint('Writing test features...') \n\ttest_feat.to_csv(os.path.join(path,'test_question_inter.csv'))\n\n\tprint('CSV written ! see: ', path, \" | suffix: \", \"_question_inter.csv\")",
"def element2id(self):\n elements = self.contents['Element']\n unique_elements, indices = np.unique(elements, return_inverse=True)\n self.contents['Sub_ID'] = indices + 1\n self.contents['ID'] = np.arange(1, len(self.contents)+1)\n self.num_atom_types = len(unique_elements)",
"def _make_identifiers(self) -> Identifiers:\n identifier_list = []\n for meta_column in self._meta_columns:\n idf_string = meta_column.reflected_column_idf\n idf = Identifier.from_string(idf_string)\n identifier_list.append(idf)\n return identifier_list",
"def _convert_words_to_ids(self, words, vocab_id_dict):\n return [self._convert_token_to_id(w, vocab_id_dict) for w in words]"
] |
[
"0.69351757",
"0.64325917",
"0.6149097",
"0.61358285",
"0.6019004",
"0.59935755",
"0.59873396",
"0.59230286",
"0.5898484",
"0.5884933",
"0.5883382",
"0.5849986",
"0.58040035",
"0.57681507",
"0.57341343",
"0.572762",
"0.5713004",
"0.5608902",
"0.56045425",
"0.5583348",
"0.5575349",
"0.5552886",
"0.55524004",
"0.5541811",
"0.5515007",
"0.55089486",
"0.5505098",
"0.55004853",
"0.54987675",
"0.5493107"
] |
0.73733747
|
0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.