query
stringlengths 9
9.05k
| document
stringlengths 10
222k
| metadata
dict | negatives
listlengths 30
30
| negative_scores
listlengths 30
30
| document_score
stringlengths 4
10
| document_rank
stringclasses 2
values |
---|---|---|---|---|---|---|
Shows all the equipments from the database | def show_equipments(self):
database = Database('data/database.db')
equipments = database.read_equipments()
view = Template(filename="view/template.html", lookup=lookup)
return view.render(
rows = [[item.number, item.name, item.installation_number] for item in equipments],
pageTitle = "Équipements",
tableTitle = "Liste de tous les équipements",
ths = ["Numéro", "Nom", "Numéro d'installation"]
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def all_equipment(self):\n\t\tself.db = DB()\n\t\tactivity_all = self.db.select_all_from(\"equipment\")\n\t\ttmpl = lookup.get_template(\"equipment.html\")\n\t\treturn (tmpl.render(equipment=activity_all))",
"def show_equipment(self, number):\n database = Database('data/database.db')\n equip = database.read_equipment(number)\n view = Template(filename=\"view/template.html\", lookup=lookup)\n\n try:\n render = view.render(\n rows = [[equip.number, equip.name, equip.installation_number]],\n pageTitle = \"Équipement \" + number,\n tableTitle = \"Équipement \" + number,\n ths = [\"Numéro\", \"Nom\", \"Numéro d'installation\"]\n )\n except AttributeError:\n render = view.render(\n rows = [],\n pageTitle = \"Équipement \" + number,\n tableTitle = \"Équipement \" + number,\n ths = [\"Numéro\", \"Nom\", \"Numéro d'installation\"]\n )\n \n return render",
"def list_all(self):\n\n url = 'equipamento/list/'\n\n code, xml = self.submit(None, 'GET', url)\n\n return self.response(code, xml)",
"def get_all(self):\n\n url = 'equipment/all'\n\n code, xml = self.submit(None, 'GET', url)\n\n return self.response(code, xml)",
"def employees():\n # gather data from db about all employees\n return render_template(\"employees.html\")",
"def pets():\n \n pets_owned = db.execute(\"SELECT pets.id, pet_types.imgsrc, pet_types.pet_type, pets.created, pets.exp, pets.name, users.active_pet_id FROM owners JOIN pets ON pets.id = owners.pet_id JOIN pet_types ON pets.type = pet_types.id JOIN users ON users.id = owners.owner_id WHERE owner_id = ?\", (session_get_int(\"user_id\"), )).fetchall()\n return render_template(\"list.html\", pets_owned=pets_owned)",
"def equip_items(self):\n # TODO: Assuming first server is good - need to make fallback logic\n return self.session.get_any(\"{base}{request_url}\".format(base=self.servers[0],\n request_url=F\"/Destiny2/Actions/Items/EquipItems/\"))",
"def all_products(request):\n products = Product.objects.all()\n return render(request, \"products.html\", {\"products\": products})",
"def equipments(self):\n selection = self.object.equipment_set\n return {\n 'selection': selection.all(),\n 'count': selection.count()\n }",
"def all_products(request):\n\n products = Product.objects.all()\n return render(request, 'products.html', {'products': products})",
"def show_inventory(self):\n\t\tclear_screen()\n\n\t\tprint(\"# INVENTORY #\\n\")\n\t\tprint(\"Weapon{:.>15} \".format(self.inventory['Weapon']))\n\t\tprint(\"Clothing{:.>13} \".format(self.inventory['Clothing']))\n\t\tprint(\"Items{:.>16} \".format(self.inventory['Items']))\n\n\t\tpress_enter()",
"def equipments(self):\n selection = Equipment.objects.filter(responsible__location_id=self.object.id)\n return {\n 'selection': selection,\n 'count': selection.count()\n }",
"def departments():\n # gather data from db about all departments\n return render_template(\"departments.html\")",
"def get(self, request, **kwargs):\n elementos_list = Elementos.objects.all()\n return render(request, 'alchemy/mezclar.html', {'elementos_list' : elementos_list})",
"def list_departments():\n \t check_admin()\n\n #check all the departments in the database and assign them to a variable.departments \n \t departments = Department.query.all()\n\n \t return render_template('admin/departments/departments.html',departments = departments,title = \"Departments\")",
"def get(self, request, **kwargs):\n elementos_list= Elementos.objects.all()\n return render(request, 'alchemy/index.html', {})",
"def show_all_information():\n if not g.user:\n flash(\"Please login to access\", \"danger\")\n return redirect(\"/\")\n if g.user.is_admin == False:\n flash (\"Unauthorized\", \"danger\")\n return redirect(\"/login\")\n\n employees = Employee.query.all()\n all_certs = employee_certification.query.all()\n \n return render_template(\"admin.html\", employees = employees, all_certs = all_certs)",
"def all_frames(request):\n frames = Product.objects.filter(department=\"2\")\n\n context = {\n 'frames': frames,\n }\n\n return render(request, 'products/frames.html', context)",
"def all_prints(request):\n prints = Product.objects.filter(department=\"1\")\n\n context = {\n 'prints': prints,\n }\n\n return render(request, 'products/prints.html', context)",
"def show_all_employees():\n if not g.user:\n flash(\"Please login to access\", \"danger\")\n return redirect(\"/\")\n if g.user.is_admin == False:\n flash (\"Unauthorized\", \"danger\")\n return redirect(\"/login\")\n\n employees = Employee.query.all()\n \n \n ## right now this is ALL users... \n \n return render_template(\"employee_display.html\", employees = employees)",
"def products(request):\n\n if not request.user.is_superuser:\n messages.error(request, 'Sorry, only store owners can do that.')\n return redirect(reverse('home'))\n\n products = Product.objects.all()\n template = \"auctionsmng/products.html\"\n\n context = {\n 'products': products\n }\n\n return render(request, template, context)",
"def products(request):\n\n return render(request, \"core/products.html\", {\n \"products\": Product.objects.all()\n })",
"def products():\n\n\treturn render_template(\"products.html\")",
"def all_products(request):\n products = Product.objects.all()\n departments = None\n\n if request.GET:\n if 'department' in request.GET:\n departments = request.GET['department'].split(',')\n products = products.filter(department__name__in=departments)\n departments = Department.objects.filter(name__in=departments)\n\n context = {\n 'products': products,\n 'current_departments': departments,\n }\n\n return render(request, 'products/products.html', context)",
"def rooms(request):\n rooms = Room.objects.all()\n categories = AmenityCategory.objects.all()\n amenities = Amenity.objects.all()\n template = 'rooms/rooms.html'\n context = {\n 'rooms': rooms,\n 'categories': categories,\n 'amenities': amenities,\n }\n return render(request, template, context)",
"def showSports():\n\n sports = session.query(Sport).order_by(asc(Sport.sportName))\n items = session.query(Item).order_by(desc(Item.id))\n return render_template('sports.html', sports=sports, items=items)",
"def index(request):\n rooms = Room.objects.order_by('name').all()\n return render(request, 'survey/index.html', context={'rooms': rooms})",
"def equip_item(self):\n # TODO: Assuming first server is good - need to make fallback logic\n return self.session.get_any(\"{base}{request_url}\".format(base=self.servers[0],\n request_url=F\"/Destiny2/Actions/Items/EquipItem/\"))",
"def show():\n session = current_app.config['db']\n items = session\\\n .query(WineABV)\\\n .order_by(asc(func.lower(WineABV.name)))\n if is_json_request(request):\n return jsonify(items=[x.serialize for x in items])\n else:\n return render_template(template_prefix+'view.html', items=items)",
"def show_inventory(table):\r\n if (table):\r\n print('======= The Current Inventory: =======')\r\n print('ID\\tCD Title (by: Artist)\\n')\r\n for row in table:\r\n print('{}\\t{} (by:{})'.format(*row.values()))\r\n print('======================================')\r\n else:\r\n print ('Inventory is empty.\\n')\r\n # return None\r"
]
| [
"0.76633227",
"0.6947809",
"0.67456573",
"0.6615799",
"0.6177239",
"0.61736935",
"0.6138498",
"0.60801035",
"0.6078603",
"0.6067097",
"0.60606223",
"0.6054881",
"0.60485995",
"0.59715",
"0.59245",
"0.5917239",
"0.58929026",
"0.5869813",
"0.5869021",
"0.586012",
"0.584841",
"0.58469564",
"0.5814903",
"0.57929856",
"0.5785315",
"0.57636994",
"0.57536805",
"0.57089394",
"0.56994694",
"0.56964314"
]
| 0.82579195 | 0 |
Shows all the activities from the database | def show_activities(self):
database = Database('data/database.db')
activities = database.read_activities()
view = Template(filename="view/template.html", lookup=lookup)
return view.render(
rows = [[item.number, item.name] for item in activities],
pageTitle = "Activités",
tableTitle = "Liste de toutes les activités",
ths = ["Numéro", "Nom"]
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def all_activity(self):\n\t\tself.db = DB()\n\t\tactivity_all = self.db.select_all_from(\"activity\")\n\t\ttmpl = lookup.get_template(\"activity.html\")\n\t\treturn (tmpl.render(activity=activity_all))",
"def __ui_list_all_activities(self):\n activities_list = self.__activity_service.service_get_list_of_activities()\n if len(activities_list) == 0:\n print(\"The list of activities is empty!\\n\")\n else:\n for activity in activities_list:\n print(activity)\n print(\"\")",
"def index(self):\n\n\t\tself.db = DB()\n\t\tactivityTuple = self.db.select_all_from(\"activity\")[1]\n\t\ttmpl = lookup.get_template(\"index.html\")\n\t\treturn (tmpl.render(activity=activityTuple))",
"def dashboard_showall():\n tasks = Task.query.all()\n return render_template('home/taskshowall/dashboard_showall.html',\n tasks=tasks, title=\"Tasks\")",
"def get_activities():\n pass",
"def get_activities(cls):\n objs = cls.objects\n return objs",
"def activities(self):\r\n return resources.Activities(self)",
"def show_activity(self, number):\n database = Database('data/database.db')\n activ = database.read_activity(number)\n view = Template(filename=\"view/template.html\", lookup=lookup)\n\n try:\n render = view.render(\n rows = [[activ.number, activ.name]],\n pageTitle = \"Activité \" + number,\n tableTitle = \"Activité \" + number,\n ths = [\"Numéro\", \"Nom\"]\n )\n except AttributeError:\n render = view.render(\n rows = [],\n pageTitle = \"Activité \" + number,\n tableTitle = \"Activité \" + number,\n ths = [\"Numéro\", \"Nom\"]\n )\n \n return render",
"def get_activity_list(self):\n return self._request_activity_list(self.athlete)",
"def activities(self):\r\n return activities.Activities(self)",
"def show_activities(bucketlist_id):\n form = ActivityForm(request.form)\n if logged_in:\n\n # Check if buck has activities\n all_activities = Activity.activities\n buck_activities = {k:v for k, v in all_activities.items() if bucketlist_id==v['bucketlist_id']}\n if buck_activities:\n return render_template(\"show_activities.html\", form=form, bucketlist_id=bucketlist_id, data=buck_activities)\n\n # If buck ids do not match\n return render_template('show_activities.html', form=form, bucketlist_id=bucketlist_id)\n\n # If user is not logged in:\n return login_required()",
"def activities(self):\r\n return v3.Activities(self)",
"def index(request):\n home_user = request.user.profile\n \"\"\"num_activities = Activity.objects.count()\"\"\"\n Cactivity = CompletedActivity.objects.filter(user=home_user)\n UActivity = Cactivity.values('activity_id', 'activity__name', 'activity__value', 'activity__group').annotate \\\n (count=Count('activity__name'), earned=Sum('activity__value'))\n TimesCompelted = Cactivity.annotate(count=Count('activity__name'))\n # Generate counts of some of the main objects\n\n context = {\n 'huser': home_user,\n 'Lname' : home_user.user.last_name,\n 'Fname': home_user.user.first_name,\n 'num_activities': 1,\n 'activity_list' : UActivity,\n \"times_completed\" : TimesCompelted\n }\n\n # Render the HTML template index.html with the data in the context variable\n return render(request, 'index.html', context=context)",
"def show_tasks():\n\n task = Task(connection=connection, cursor=cursor)\n\n all_tasks = task.get_all_tasks()\n\n context = {\n 'all_tasks': all_tasks\n }\n\n return render_template('pages/tables/tasks.html', **context)",
"def activities(self, start=None, limit=None, done=None, exclude=None):\r\n params = base.get_params(None, locals())\r\n url = '{0}/activities'.format(self.get_url())\r\n return http.Request('GET', url, params), parsers.parse_json",
"def activities(self, start=None, limit=None, done=None, exclude=None):\r\n params = base.get_params(None, locals())\r\n url = '{0}/activities'.format(self.get_url())\r\n return http.Request('GET', url, params), parsers.parse_json",
"def activities(self, start=None, limit=None, done=None, exclude=None):\r\n params = base.get_params(None, locals())\r\n url = '{0}/activities'.format(self.get_url())\r\n return http.Request('GET', url, params), parsers.parse_json",
"def activities(self, start=None, limit=None, done=None, exclude=None):\r\n params = base.get_params(None, locals())\r\n url = '{0}/activities'.format(self.get_url())\r\n return http.Request('GET', url, params), parsers.parse_json",
"def dashboard(request):\n appointments = AppointmentRequests.objects.all().filter(completed=False)\n return render(request,\"dashboard.html\",{\"appointments\":appointments})",
"def activities(self):\n return self._activities",
"def full_list_of_movies():\n\n movie_list = Movie.query.order_by(Movie.title).all()\n return render_template('movie_list.html', movie_list=movie_list)",
"def getUserActivities(context, request):\n mmdb = MADMaxDB(context.db)\n query = {}\n query['actor.username'] = request.actor['username']\n query['verb'] = 'post'\n chash = request.params.get('context', None)\n if chash:\n query['contexts.hash'] = chash\n\n is_head = request.method == 'HEAD'\n activities = mmdb.activity.search(query, sort=\"_id\", keep_private_fields=False, flatten=1, count=is_head, **searchParams(request))\n\n handler = JSONResourceRoot(activities, stats=is_head)\n return handler.buildResponse()",
"def get_all_activities_list(self):\n self.__load_activities_from_file_into_memory()\n return super().get_all_activities_list()",
"def index(request):\n data = Information.objects.all()\n args = {'data': data}\n return render_to_response('tasks/index.html', args, context_instance=RequestContext(request))",
"def list_all(request):\n companies = Company.objects.order_by('-created')\n context = dict(companies=companies)\n return render(request, 'companies/all.html', context)",
"def template(self, request, activity, session):\n activity_model = apps.get_model(\"activity\", \"Activity\")\n user = request.user\n if user not in activity.student.all() and user not in activity.teacher.all():\n logger.warning(\n \"User '\" + user.username + \"' denied to access course'\" + activity.name + \"'.\")\n raise PermissionDenied(\"Vous n'êtes pas membre de cette classe.\")\n\n if request.method == 'GET':\n if request.GET.get(\"action\", None) == \"toggle_activity\":\n try:\n act = activity_model.objects.get(id=request.GET.get(\"id\", None))\n act.toggle_open(request)\n except activity_model.DoesNotExist:\n raise Http404(\n \"L'activité d'ID '\" + str(request.GET.get(\"id\", None)) + \"' introuvable.\")\n return redirect(reverse(\"activity:play\", args=[activity.id]))\n\n smalls = list()\n for item in activity.indexed_activities():\n if item.open or activity.is_teacher(user):\n smalls.append(item.small(request))\n\n return render(request, \"activity/activity_type/course/index.html\", {\n 'name': activity.name,\n 'smalls': smalls,\n 'teacher': activity.teacher.all(),\n 'instructor': activity.is_teacher(user),\n 'course_id': activity.id,\n })",
"def movie_list():\n\n movies = Movie.query.order_by(Movie.title).all()\n\n return render_template(\"movie_list.html\", movies=movies)",
"def movie_list():\n\n movies = Movie.query.order_by(Movie.movie_title).all()\n return render_template(\"movie_list.html\", movies=movies)",
"def getAll(request):\n return HttpResponse('getAll')",
"def touragenda(request):\n active_events = TourAgendaModel.objects.order_by('number')\n friday_events = TourAgendaModel.objects.all().filter(day='FRIDAY')\n saturday_events = TourAgendaModel.objects.all().filter(day='SATURDAY')\n sunday_events = TourAgendaModel.objects.all().filter(day='SUNDAY')\n\n context = {\n 'active_events': active_events,\n 'friday_events': friday_events,\n 'saturday_events': saturday_events,\n 'sunday_events': sunday_events,\n }\n\n return render(request, 'tourAgenda.html', context=context)"
]
| [
"0.7956943",
"0.7559513",
"0.71567297",
"0.6593943",
"0.65700984",
"0.65081066",
"0.64780426",
"0.6435594",
"0.6362298",
"0.62765324",
"0.62336224",
"0.621595",
"0.6190602",
"0.61696124",
"0.6159179",
"0.6159179",
"0.6159179",
"0.6159179",
"0.6157847",
"0.6083726",
"0.6079967",
"0.60479385",
"0.6010264",
"0.59942365",
"0.59884495",
"0.5966211",
"0.59565043",
"0.59536743",
"0.594413",
"0.5929318"
]
| 0.83865505 | 0 |
Shows the installation which has the given number from the database | def show_installation(self, number):
database = Database('data/database.db')
inst = database.read_installation(number)
view = Template(filename="view/template.html", lookup=lookup)
try:
render = view.render(
rows = [[inst.number, inst.name, inst.address, inst.zip_code, inst.city, inst.latitude, inst.longitude]],
pageTitle = "Installation " + number,
tableTitle = "Installation " + number,
ths = ["Numéro", "Nom", "Adresse", "Code postal", "Ville", "Latitude", "Longitude"]
)
except AttributeError:
render = view.render(
rows = [],
pageTitle = "Installation " + number,
tableTitle = "Installation " + number,
ths = ["Numéro", "Nom", "Adresse", "Code postal", "Ville", "Latitude", "Longitude"]
)
return render | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def show_installations(self):\n database = Database('data/database.db')\n installations = database.read_installations()\n view = Template(filename=\"view/template.html\", lookup=lookup)\n \n \n return view.render(\n rows = [[item.number, item.name, item.address, item.zip_code, item.city, item.latitude, item.longitude] for item in installations],\n pageTitle = \"Installations\",\n tableTitle = \"Liste de toutes les installations\",\n ths = [\"Numéro\", \"Nom\", \"Adresse\", \"Code postal\", \"Ville\", \"Latitude\", \"Longitude\"]\n )",
"def show_equipment(self, number):\n database = Database('data/database.db')\n equip = database.read_equipment(number)\n view = Template(filename=\"view/template.html\", lookup=lookup)\n\n try:\n render = view.render(\n rows = [[equip.number, equip.name, equip.installation_number]],\n pageTitle = \"Équipement \" + number,\n tableTitle = \"Équipement \" + number,\n ths = [\"Numéro\", \"Nom\", \"Numéro d'installation\"]\n )\n except AttributeError:\n render = view.render(\n rows = [],\n pageTitle = \"Équipement \" + number,\n tableTitle = \"Équipement \" + number,\n ths = [\"Numéro\", \"Nom\", \"Numéro d'installation\"]\n )\n \n return render",
"def show_equipments(self): \n database = Database('data/database.db')\n equipments = database.read_equipments()\n view = Template(filename=\"view/template.html\", lookup=lookup)\n \n \n return view.render(\n rows = [[item.number, item.name, item.installation_number] for item in equipments],\n pageTitle = \"Équipements\",\n tableTitle = \"Liste de tous les équipements\",\n ths = [\"Numéro\", \"Nom\", \"Numéro d'installation\"]\n )",
"def display_nbr(ctx, as_port=False):\n ctl = ctx.ctl\n\n jobs = ctl('list-avail', '--partition', 'main', flatten=False)\n\n if len(jobs) == 0:\n click.echo('No jobs running', err=True)\n sys.exit(1)\n\n for job in jobs:\n info = collect_vnc_info(ctl, job['id'], ctx.ssh_cfg)\n\n if as_port:\n click.echo('%d' % info['port'])\n else:\n click.echo(':%d' % info['display'])",
"def run(self):\n logging.debug('List Installed Programs')\n if self.short:\n print(' '.join([ent for ent in pakit.conf.IDB]))\n return\n\n nchars = 12\n fmt = str(nchars).join(['{prog:', '} {repo:',\n '} {hash:', '} {date}'])\n installed = ['Program Repo Hash Date']\n for prog in pakit.conf.IDB:\n entry = pakit.conf.IDB[prog]\n installed.append(fmt.format(prog=prog[0:nchars],\n repo=entry['repo'][0:nchars],\n date=entry['date'],\n hash=entry['hash'][0:nchars]))\n\n msg = 'Installed Programs:'\n msg += PREFIX + PREFIX.join(installed)\n print(msg)\n return msg",
"def show_version():\n terminal.echo(f\"{package_metadata['name']} {package_metadata['version']}\")",
"def all_installation(self):\n\t\tself.db = DB()\n\t\tinstallation_all = self.db.select_all_from(\"installations\")\n\t\ttmpl = lookup.get_template(\"installation.html\")\n\t\treturn (tmpl.render(installation=installation_all))",
"def show_versions():\n sys_info = _get_sys_info()\n versions = _get_autogluon_versions()\n sorted_keys = sorted(versions.keys(), key=lambda x: x.lower())\n\n maxlen = 0 if len(versions) == 0 else max(len(x) for x in versions)\n print(\"\\nINSTALLED VERSIONS\")\n print(\"------------------\")\n for k, v in sys_info.items():\n print(f\"{k:<{maxlen}}: {v}\")\n print(\"\")\n for k in sorted_keys:\n print(f\"{k:<{maxlen}}: {versions[k]}\")",
"def database_installed_version(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"database_installed_version\")",
"def show(name):\n try:os.system(f'python -m pip show {name}') \n except Exception:\n user_choice=input(f\"Seems like {name} not found\")",
"def print_number():\n\tfavorite_number = fetch_number()\n\tprint(f\"Your favorite number is {favorite_number}!\")",
"def show_version():\n print(\"===============================================================\")\n print(f\"Productivity App v{__VERSION__}\")\n print(f\"Made by {__AUTHOR__} (with the advices of {__ADVISOR__})\")\n print(\"Source : https://github.com/Ilade-s/productivite-app-TkVer\")\n print(\"Server (optionnal) : https://github.com/Tifiloow/productivite-app\")\n print(\"Assets : https://feathericons.com/\")\n print(\"===============================================================\")",
"def show_catalogue(self):\n\n data = cur.execute(\"\"\"SELECT productid, productname, unitcost, stock, location \n FROM catalogue WHERE vendorname = ?\"\"\", (self.vendorname,)).fetchall()\n print(tabulate(data, headers=[\"Product ID\", \"Name\", \"Unit Cost\", \"Stock\", \"Location\"]))",
"def action_number(self, *args):\n # obj_ret = self.browse()\n if self.type == 'out_invoice':\n self._cr.execute(\n 'SELECT id, number '\n 'FROM account_wh_src '\n 'WHERE id IN (' + ','.join([str(item) for item in self.ids]) + ')')\n\n for (aws_id, number) in self._cr.fetchall():\n if not number:\n number = self.env['ir.sequence'].get(\n 'account.wh.src.%s' % self.type)\n self._cr.execute('UPDATE account_wh_src SET number=%s '\n 'WHERE id=%s', (number, aws_id))\n\n return True",
"def display_number(number):\n ret = _LIB.led_matrix_click_display_number(number)\n if ret < 0:\n raise Exception(\"led matrix click display number failed\")",
"def cli_show_version(ctx, _, value):\n if not value or ctx.resilient_parsing:\n return\n\n show_versions()\n\n ctx.exit()",
"def disp_found(num):\n from x84.bbs import getterminal, echo\n term = getterminal()\n echo(u''.join((u'\\r',\n term.bold_white(u'%d' % (num,)),\n term.yellow(u' lOCAtiON%s diSCOVEREd ' %\n (u's' if num > 1 else u'')),\n term.bold_black(u'...'),)))",
"def _getversion(self):\n\n import_module(self.packagename)\n version = pkg_resources.get_distribution(self.packagename).version\n return \"Version {} of {} is installed.\".format(version, self.packagename)",
"def cli(ctx, name, owner, changeset_revision):\n return ctx.gi.repositories.get_repository_revision_install_info(name, owner, changeset_revision)",
"def display_product_from_id(self, product_id):\n self.cur.execute(\"SELECT name, brand, nova, stores, id FROM Product WHERE id = %s\", (product_id, ))\n response = self.cur.fetchall()\n response = response[0]\n print (\"{} de la marque {} (indice nova : {}), disponible dans les magasins {}.\\n\"\n \"Lien vers une description complete https://fr.openfoodfacts.org/produit/{}\\n\".\n format(response[0], response[1], response[2], response[3], response[4]))",
"def info(k=None):\n global program\n if program is None:\n print \"no program is loaded\"\n return\n infos = program.info()\n if k is None:\n for k in infos.keys():\n val = infos[k]\n if isinstance(val, int):\n print \"{:20} : 0x{:x}({})\".format(k, val, val)\n else:\n print \"{:20} : {}\".format(k, val)\n elif k in infos:\n print \"{:20} : {}\".format(k, infos[k])\n else:\n print \"no such entry\"",
"def version(self):\n self.cursor.execute(\"SELECT VERSION()\")\n # Fetch a single row using fetchone() method.\n data = self.cursor.fetchone()\n print(\"Database version : %s \" % data)",
"def do_version(self, a):\n print(\"\\tversion: \" + (str(ise.getVersion())) +\n \".\" + (str(ise.getFirmware())))",
"def show(self, number_of_rows = None):\n result = quest.engine.show(self.statement, number_of_rows)\n return result",
"def installation(request):\n return render(request, 'ecosystem/installation.html',\n {'page': 'installation', 'category': 'publish'})",
"def database_installed_version(self) -> str:\n return pulumi.get(self, \"database_installed_version\")",
"def show_activity(self, number):\n database = Database('data/database.db')\n activ = database.read_activity(number)\n view = Template(filename=\"view/template.html\", lookup=lookup)\n\n try:\n render = view.render(\n rows = [[activ.number, activ.name]],\n pageTitle = \"Activité \" + number,\n tableTitle = \"Activité \" + number,\n ths = [\"Numéro\", \"Nom\"]\n )\n except AttributeError:\n render = view.render(\n rows = [],\n pageTitle = \"Activité \" + number,\n tableTitle = \"Activité \" + number,\n ths = [\"Numéro\", \"Nom\"]\n )\n \n return render",
"def installation(request):\n return jingo.render(request, 'ecosystem/installation.html',\n {'page': 'installation', 'category': 'publish'})",
"def piece_by_version(request, version):\n\n v = LPVersion.objects.get(version=version)\n paginator = Paginator(Piece.objects.filter(version=v), 25)\n p = request.GET.get('page')\n try:\n page = paginator.page(p)\n except PageNotAnInteger:\n page = paginator.page(1)\n except EmptyPage:\n page = paginator.page(paginator.num_pages)\n\n context = {\n 'keyform': KeySearchForm(auto_id=False),\n 'pager': page,\n 'version': version\n }\n return render(request, 'mutopia/piece_version.html', context)",
"def show():\n info(str(Project))"
]
| [
"0.679254",
"0.63224375",
"0.5637825",
"0.55374575",
"0.5517338",
"0.54618335",
"0.5394725",
"0.53656566",
"0.53460836",
"0.5300642",
"0.52793235",
"0.5266057",
"0.52598614",
"0.5193216",
"0.5169169",
"0.5164407",
"0.51391256",
"0.51271594",
"0.51092535",
"0.510022",
"0.5084253",
"0.5071465",
"0.50707966",
"0.50591385",
"0.5045901",
"0.50203305",
"0.50014",
"0.49937516",
"0.49862322",
"0.497926"
]
| 0.7730804 | 0 |
Shows the equipment which has the given number from the database | def show_equipment(self, number):
database = Database('data/database.db')
equip = database.read_equipment(number)
view = Template(filename="view/template.html", lookup=lookup)
try:
render = view.render(
rows = [[equip.number, equip.name, equip.installation_number]],
pageTitle = "Équipement " + number,
tableTitle = "Équipement " + number,
ths = ["Numéro", "Nom", "Numéro d'installation"]
)
except AttributeError:
render = view.render(
rows = [],
pageTitle = "Équipement " + number,
tableTitle = "Équipement " + number,
ths = ["Numéro", "Nom", "Numéro d'installation"]
)
return render | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def show_equipments(self): \n database = Database('data/database.db')\n equipments = database.read_equipments()\n view = Template(filename=\"view/template.html\", lookup=lookup)\n \n \n return view.render(\n rows = [[item.number, item.name, item.installation_number] for item in equipments],\n pageTitle = \"Équipements\",\n tableTitle = \"Liste de tous les équipements\",\n ths = [\"Numéro\", \"Nom\", \"Numéro d'installation\"]\n )",
"def info_equipment_get():\n equipment = _equipment_by_group()\n return equipment, 200",
"def getEquipmentByEquipmentId(equipment_id):\r\n db = db_helpers.getDbCon()\r\n cursor = db.cursor()\r\n userEquipmentInsertQuery = \"SELECT * FROM equipment WHERE equipment_id =%s\"\r\n try:\r\n cursor.execute(userEquipmentInsertQuery, (equipment_id,))\r\n equipment = cursor.fetchall()\r\n return equipment\r\n except Exception:\r\n print('Error: OOPs something went wrong while getting the equipment by equipment id!')\r\n finally:\r\n cursor.close()\r\n db.close()",
"def visit_equipment(self, equipment):",
"def getEquipmentByInstructionId(instruction_id):\r\n db = db_helpers.getDbCon()\r\n cursor = db.cursor()\r\n userEquipmentInsertQuery = \"SELECT equipment_id FROM recipe_equipment WHERE recipe_instruction_id =%s\"\r\n try:\r\n cursor.execute(userEquipmentInsertQuery, (instruction_id,))\r\n equipment_id = cursor.fetchall()\r\n return equipment_id\r\n except Exception:\r\n print('Error: OOPs something went wrong while getting the equipment by Instruction ID!')\r\n finally:\r\n cursor.close()\r\n db.close()",
"def show_activity(self, number):\n database = Database('data/database.db')\n activ = database.read_activity(number)\n view = Template(filename=\"view/template.html\", lookup=lookup)\n\n try:\n render = view.render(\n rows = [[activ.number, activ.name]],\n pageTitle = \"Activité \" + number,\n tableTitle = \"Activité \" + number,\n ths = [\"Numéro\", \"Nom\"]\n )\n except AttributeError:\n render = view.render(\n rows = [],\n pageTitle = \"Activité \" + number,\n tableTitle = \"Activité \" + number,\n ths = [\"Numéro\", \"Nom\"]\n )\n \n return render",
"def view_products():\n min_id = (Product.select().order_by(Product.product_id.asc()).get()).product_id\n max_id = (Product.select().order_by(Product.product_id.desc()).get()).product_id\n print(f\"\\nPlease select id between {min_id} & {max_id}\")\n id = int(input(\"Select product id: \"))\n while id not in range(min_id, max_id+1):\n print(\"Your selection must be between {} and {}\".format(min_id, max_id))\n id = int(input(\"Select product id: \"))\n print(f\"\"\"\\n-Product: {Product.get_by_id(id).product_name}\n-Quantity: {Product.get_by_id(id).product_quantity}\n-Price: {Product.get_by_id(id).product_price} cents\n-Date updated: {Product.get_by_id(id).date_updated}\\n\"\"\")\n input(\"\\nPress ENTER to continue\")\n clear()",
"def showSlot(self, number):\n if number <= 0:\n self.log.info('Showing slot \"Negative\" (%d) slot ignored' % number)\n else:\n slot = self.slots[self.number - number]\n if slot.isOccupied():\n slot.removeItem()\n if number == self.target:\n image = self.target_image if self.value < self.target else self.target_image_filled\n self.log.debug('Showing target row: %s, %s, %s' % (self.value, self.target, image))\n elif number <= self.value:\n image = self.fill_image\n elif number <= self.target:\n image = self.blank_image\n else:\n image = self.grey_image\n self.log.debug('Showing %s cell %d as %s (value=%s, target=%s)' % (\n self.name, number, image, self.value, self.target))\n\n slot.addItem(sprite.Sprite(image))",
"def show_from_database(self, table_model):\n arr = [4, 1]\n # TODO",
"def show_inventory(table):\r\n print('======= The Current Inventory: =======')\r\n print('ID\\tCD Title by: Artist\\n')\r\n for cd in table:\r\n print(cd)\r\n\r\n print('======================================')",
"def all_equipment(self):\n\t\tself.db = DB()\n\t\tactivity_all = self.db.select_all_from(\"equipment\")\n\t\ttmpl = lookup.get_template(\"equipment.html\")\n\t\treturn (tmpl.render(equipment=activity_all))",
"def show_installation(self, number): \n database = Database('data/database.db')\n inst = database.read_installation(number)\n view = Template(filename=\"view/template.html\", lookup=lookup)\n\n try:\n render = view.render(\n rows = [[inst.number, inst.name, inst.address, inst.zip_code, inst.city, inst.latitude, inst.longitude]],\n pageTitle = \"Installation \" + number,\n tableTitle = \"Installation \" + number,\n ths = [\"Numéro\", \"Nom\", \"Adresse\", \"Code postal\", \"Ville\", \"Latitude\", \"Longitude\"]\n )\n except AttributeError:\n render = view.render(\n rows = [],\n pageTitle = \"Installation \" + number,\n tableTitle = \"Installation \" + number,\n ths = [\"Numéro\", \"Nom\", \"Adresse\", \"Code postal\", \"Ville\", \"Latitude\", \"Longitude\"]\n )\n \n return render",
"def show(self, item_id):\n pass",
"def get():\n id_num = int(input('Enter the ID number of the item you wish to retrieve\\n'))\n db_actions.retrieve(id_num)",
"def show_inventory(table):\r\n if (table):\r\n print('======= The Current Inventory: =======')\r\n print('ID\\tCD Title (by: Artist)\\n')\r\n for row in table:\r\n print('{}\\t{} (by:{})'.format(*row.values()))\r\n print('======================================')\r\n else:\r\n print ('Inventory is empty.\\n')\r\n # return None\r",
"def show():\n\n quality_list = []\n\n conn = sqlite3.connect(\"person_database.bd\")\n c = conn.cursor()\n\n c.execute(\"SELECT *, oid FROM person_info\")\n records = c.fetchall()\n\n conn.commit()\n conn.close()\n\n for record in records:\n quality_list.append(str(record[2]) + \" \" + str(record[0]))\n\n return quality_list",
"def get_equipment(self):\n s = ''\n for i in range(12, 16):\n s += ' ' + str(self.dna[i])\n return s",
"def test_visualize_recipe_equipment_by_id(self):\n pass",
"def details(request, product_id):\n product_details = get_object_or_404(Products, pk=product_id)\n nutriments = Nutriments_for_100g.objects.filter(product__id=product_id).order_by('name')\n context = {\n 'product_details': product_details,\n 'nutriments': nutriments\n }\n return render(request, 'store/details.html', context)",
"def info_equipment_silos_get():\n equipment = _equipment_by_group(404) # 404 == Silo\n return equipment, 200",
"def detail(request, pk):\n mineral = get_object_or_404(Mineral, pk=pk)\n return render(request, 'detail.html', {'mineral': mineral})",
"def display_product_from_id(self, product_id):\n self.cur.execute(\"SELECT name, brand, nova, stores, id FROM Product WHERE id = %s\", (product_id, ))\n response = self.cur.fetchall()\n response = response[0]\n print (\"{} de la marque {} (indice nova : {}), disponible dans les magasins {}.\\n\"\n \"Lien vers une description complete https://fr.openfoodfacts.org/produit/{}\\n\".\n format(response[0], response[1], response[2], response[3], response[4]))",
"def info_equipment_reactors_get():\n equipment = _equipment_by_group(438) # 438 == Mobile Reactor\n return equipment, 200",
"def show_inventory(self):\n\t\tclear_screen()\n\n\t\tprint(\"# INVENTORY #\\n\")\n\t\tprint(\"Weapon{:.>15} \".format(self.inventory['Weapon']))\n\t\tprint(\"Clothing{:.>13} \".format(self.inventory['Clothing']))\n\t\tprint(\"Items{:.>16} \".format(self.inventory['Items']))\n\n\t\tpress_enter()",
"def get_details(self,p_id):\n workbook = load_workbook(\"products.xlsx\")\n products = workbook.active\n #loop for finding specified product\n for row in range(2,products.max_row+1):\n if products[row][0].value == p_id:\n self.id = p_id\n self.row = row\n self.quantity= products[row][2].value\n self.name = products[row][1].value\n self.price = products[row][3].value\n break\n else:\n self.id = 0\n print(\"no Such Id exits!_ \")",
"def show_catalogue(self):\n\n data = cur.execute(\"\"\"SELECT productid, productname, unitcost, stock, location \n FROM catalogue WHERE vendorname = ?\"\"\", (self.vendorname,)).fetchall()\n print(tabulate(data, headers=[\"Product ID\", \"Name\", \"Unit Cost\", \"Stock\", \"Location\"]))",
"def __str__(self):\n if self.number == 0:\n return 'info'\n elif self.number == 1:\n return '1 game left!'\n else:\n return f'{self.number} games left!'",
"def show_activities(self): \n database = Database('data/database.db')\n activities = database.read_activities()\n view = Template(filename=\"view/template.html\", lookup=lookup)\n \n \n return view.render(\n rows = [[item.number, item.name] for item in activities],\n pageTitle = \"Activités\",\n tableTitle = \"Liste de toutes les activités\",\n ths = [\"Numéro\", \"Nom\"]\n )",
"def __str__(self):\n return 'Classroom {} has a capacity of {} persons and ' \\\n 'has the following equipment: {}.'.format(\n self.number, str(self.capacity), ', '.join(self.equipment))",
"def display_number(com,count):\n print \"NUM: \", count\n try:\n if count > 999:\n count = 999\n safenum=str(int(count))\n #com = serial.Serial(config.devnum, 9600, timeout=3)\n #com.close()\n #com.open()\n comstr = config.num['display']+safenum+config.num['eot']\n com.write(comstr)\n #com.close()\n except serial.SerialException as e:\n logging.warning(\"Serial exception: \"+str(e))"
]
| [
"0.69405115",
"0.6025888",
"0.5856586",
"0.57539564",
"0.5740271",
"0.56885344",
"0.558453",
"0.55563587",
"0.55362034",
"0.5532173",
"0.5498387",
"0.548142",
"0.5451306",
"0.53929764",
"0.5377283",
"0.53668404",
"0.5342139",
"0.53247577",
"0.5303719",
"0.5300274",
"0.5288869",
"0.5246248",
"0.52450526",
"0.52309126",
"0.5213608",
"0.5206559",
"0.52040213",
"0.51957476",
"0.51840216",
"0.517585"
]
| 0.791672 | 0 |
Shows the activity which has the given number from the database | def show_activity(self, number):
database = Database('data/database.db')
activ = database.read_activity(number)
view = Template(filename="view/template.html", lookup=lookup)
try:
render = view.render(
rows = [[activ.number, activ.name]],
pageTitle = "Activité " + number,
tableTitle = "Activité " + number,
ths = ["Numéro", "Nom"]
)
except AttributeError:
render = view.render(
rows = [],
pageTitle = "Activité " + number,
tableTitle = "Activité " + number,
ths = ["Numéro", "Nom"]
)
return render | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def show_activities(self): \n database = Database('data/database.db')\n activities = database.read_activities()\n view = Template(filename=\"view/template.html\", lookup=lookup)\n \n \n return view.render(\n rows = [[item.number, item.name] for item in activities],\n pageTitle = \"Activités\",\n tableTitle = \"Liste de toutes les activités\",\n ths = [\"Numéro\", \"Nom\"]\n )",
"def open_view_bynum(action, num):\n srt = sorted(g.userpl)\n name = srt[int(num) - 1]\n open_save_view(action, name)",
"def display_number(number):\n ret = _LIB.led_matrix_click_display_number(number)\n if ret < 0:\n raise Exception(\"led matrix click display number failed\")",
"def __ui_find_activities_by_participant(self):\n existing_persons_ids = self.__person_service.get_existing_persons_ids()\n searched_participant_id = int(\n input(f\"Introduce the ID of one person (you can choose from the list: {existing_persons_ids})\\n > \"))\n\n searched_activities = self.__activity_service.find_activities_by_participant(searched_participant_id)\n print(\"\")\n\n if len(searched_activities) == 0:\n print(\"There is no such an activity!\\n\")\n else:\n for activity in searched_activities:\n print(activity)\n print(\"\")",
"def find_infos(self, activity_name, city):\n database = Database('data/database.db') \n\n infos = database.get_infos(activity_name, city)\n view = Template(filename=\"view/template.html\", lookup=lookup)\n\n try:\n render = view.render(\n rows = [[item[0].number, item[0].name, item[1].number, item[1].name, item[2].number, item[2].name] for item in infos],\n pageTitle = \"Informations pour \" + activity_name + \" à \" + city,\n tableTitle = \"Informations pour \" + activity_name + \" à \" + city,\n ths = [\"Numéro d'installation\", \"Nom d'installation\", \"Numéro d'équipement\", \"Nom d'équipement\", \"Numéro d'activité\", \"Nom d'activité\"]\n )\n except AttributeError:\n render = view.render(\n rows = [],\n pageTitle = \"Informations pour \" + activity_name + \" à \" + city,\n tableTitle = \"Informations pour \" + activity_name + \" à \" + city,\n ths = [\"Numéro d'installation\", \"Nom d'installation\", \"Numéro d'équipement\", \"Nom d'équipement\", \"Numéro d'activité\", \"Nom d'activité\"]\n )\n \n return render",
"def print_number():\n\tfavorite_number = fetch_number()\n\tprint(f\"Your favorite number is {favorite_number}!\")",
"async def viewcase(self, ctx, case_number):\r\n server = ctx.guild\r\n try:\r\n if self._logs[str(server.id)][\"case\"][case_number][\"reason\"] is None:\r\n reason = \"None (Update using `;modlog case {} <reason>`)\".format(case_number)\r\n else:\r\n reason = self._logs[str(server.id)][\"case\"][case_number][\"reason\"]\r\n if self._logs[str(server.id)][\"case\"][case_number][\"mod\"] is None:\r\n author = \"Unknown\"\r\n else:\r\n author = await self.bot.get_user_info(self._logs[str(server.id)][\"case\"][case_number][\"mod\"])\r\n user = await self.bot.get_user_info(int(self._logs[str(server.id)][\"case\"][case_number][\"user\"]))\r\n s = discord.Embed(\r\n title=\"Case {} | {}\".format(case_number, self._logs[str(server.id)][\"case\"][case_number][\"action\"]))\r\n s.add_field(name=\"User\", value=f'{user}(<@{user.id}>)')\r\n s.add_field(name=\"Moderator\", value=author, inline=False)\r\n s.add_field(name=\"Reason\", value=reason)\r\n await ctx.send(embed=s)\r\n except:\r\n await ctx.send(\"Invalid case number :no_entry:\")",
"def __str__(self):\n if self.number == 0:\n return 'info'\n elif self.number == 1:\n return '1 game left!'\n else:\n return f'{self.number} games left!'",
"def get_activity():\n try:\n activity = Activity.objects.filter(active=1).latest('id')\n except Activity.DoesNotExist:\n activity = None\n return activity",
"def showtask(id):\n\n tasks = Task.query.filter_by(id=id)\n return render_template('home/taskshowall/dashboard_showtask.html',tasks=tasks,title=\"tasks\")",
"def query_nine_b(self, table_name_activities):\n query = (\n \"SELECT user_id, COUNT(*) AS ActivityCount\"\n \", SUM(TIMESTAMPDIFF(HOUR, start_date_time, end_date_time)) as HoursActive \"\n \"FROM %s \"\n \"WHERE YEAR(start_date_time) = '2008' AND MONTH(start_date_time) = '11' \"\n \"GROUP BY user_id \"\n \"ORDER BY ActivityCount DESC \"\n \"LIMIT 10\"\n )\n self.cursor.execute(query % table_name_activities)\n rows = self.cursor.fetchall()\n print(tabulate(rows, headers=self.cursor.column_names))\n not_var = \"\"\n if rows[0][0][0] < rows[1][0][0]:\n not_var = \"NOT\"\n print(\n \"The user with the most activities does\",\n not_var,\n \"have more hours than the user with the second most activities.\",\n )\n return rows",
"def show_activities(bucketlist_id):\n form = ActivityForm(request.form)\n if logged_in:\n\n # Check if buck has activities\n all_activities = Activity.activities\n buck_activities = {k:v for k, v in all_activities.items() if bucketlist_id==v['bucketlist_id']}\n if buck_activities:\n return render_template(\"show_activities.html\", form=form, bucketlist_id=bucketlist_id, data=buck_activities)\n\n # If buck ids do not match\n return render_template('show_activities.html', form=form, bucketlist_id=bucketlist_id)\n\n # If user is not logged in:\n return login_required()",
"def index(self):\n\n\t\tself.db = DB()\n\t\tactivityTuple = self.db.select_all_from(\"activity\")[1]\n\t\ttmpl = lookup.get_template(\"index.html\")\n\t\treturn (tmpl.render(activity=activityTuple))",
"def click_display(self, row, column):\n\n print(\"Row %d and Column %d was clicked\" % (row, column))\n item = self.result_table.item(row, 4)\n self.ID = item.text()\n print self.ID",
"def template(self, request, activity, session):\n activity_model = apps.get_model(\"activity\", \"Activity\")\n user = request.user\n if user not in activity.student.all() and user not in activity.teacher.all():\n logger.warning(\n \"User '\" + user.username + \"' denied to access course'\" + activity.name + \"'.\")\n raise PermissionDenied(\"Vous n'êtes pas membre de cette classe.\")\n\n if request.method == 'GET':\n if request.GET.get(\"action\", None) == \"toggle_activity\":\n try:\n act = activity_model.objects.get(id=request.GET.get(\"id\", None))\n act.toggle_open(request)\n except activity_model.DoesNotExist:\n raise Http404(\n \"L'activité d'ID '\" + str(request.GET.get(\"id\", None)) + \"' introuvable.\")\n return redirect(reverse(\"activity:play\", args=[activity.id]))\n\n smalls = list()\n for item in activity.indexed_activities():\n if item.open or activity.is_teacher(user):\n smalls.append(item.small(request))\n\n return render(request, \"activity/activity_type/course/index.html\", {\n 'name': activity.name,\n 'smalls': smalls,\n 'teacher': activity.teacher.all(),\n 'instructor': activity.is_teacher(user),\n 'course_id': activity.id,\n })",
"def all_activity(self):\n\t\tself.db = DB()\n\t\tactivity_all = self.db.select_all_from(\"activity\")\n\t\ttmpl = lookup.get_template(\"activity.html\")\n\t\treturn (tmpl.render(activity=activity_all))",
"def action(self):\n return self.rowTime.activity",
"def show_equipment(self, number):\n database = Database('data/database.db')\n equip = database.read_equipment(number)\n view = Template(filename=\"view/template.html\", lookup=lookup)\n\n try:\n render = view.render(\n rows = [[equip.number, equip.name, equip.installation_number]],\n pageTitle = \"Équipement \" + number,\n tableTitle = \"Équipement \" + number,\n ths = [\"Numéro\", \"Nom\", \"Numéro d'installation\"]\n )\n except AttributeError:\n render = view.render(\n rows = [],\n pageTitle = \"Équipement \" + number,\n tableTitle = \"Équipement \" + number,\n ths = [\"Numéro\", \"Nom\", \"Numéro d'installation\"]\n )\n \n return render",
"def query_nine_a(self, table_name_activities):\n query = (\n \"SELECT YEAR(start_date_time) as Year, MONTH(start_date_time) as Month, COUNT(*) AS ActivityCount \"\n \"FROM %s \"\n \"GROUP BY YEAR(start_date_time), MONTH(start_date_time) \"\n \"ORDER BY ActivityCount DESC \"\n \"LIMIT 1 \"\n )\n\n self.cursor.execute(query % table_name_activities)\n rows = self.cursor.fetchall()\n print(tabulate(rows, headers=self.cursor.column_names))\n return rows",
"def do_show(self, line):\n\t\tif not(self.db is None):\n\t\t\tfor contact in self.db.contact.find():\n\t\t\t\tpprint.pprint(contact)\n\t\telse:\n\t\t\tprint(\"You must open the existing database or create new one.\")",
"def get_activity(variable):\n project = variable['project']\n try:\n exp = variable['exp']\n if isinstance(exp, list):\n return [CMOR_TABLES[project].activities[value][0] for value in exp]\n return CMOR_TABLES[project].activities[exp][0]\n except (KeyError, AttributeError):\n return None",
"def process_dot(request, id, template='goflow/process.dot'):\n process = Process.objects.get(id=int(id))\n context = {\n 'process': process,\n 'roles': ({'name':'role1', 'color':'red'},),\n 'activities': Activity.objects.filter(process=process)\n }\n return render_to_response(template, context)",
"def activity_from_database(self, table, render_function,\n timestamp_accessor=operator.attrgetter('timestamp')):\n # TODO optimize the heck outta this like spline.frontpage\n q = (\n session.query(table)\n .order_by(timestamp_accessor(table).desc())\n .limit(self.max_count)\n )\n self.add_activity(q, render_function, timestamp_accessor)",
"async def number(self, ctx, number: int) -> None:\n number_user = self.bot.numbers.search.num_to_user(number)\n nation_and_countries = self.bot.numbers.checks.nation_and_countries(number)\n\n countries = \"\\n\".join([f\"• {country[0]}\" for country in nation_and_countries[\"countries\"]])\n await ctx.send(\n \"\",\n embed=NumEmbed(\n title=\"NGB - Number Search\",\n colour=0x00C9CC,\n fields={\n \"Number\": f\"#{number}\",\n \"Number User\": f\"u/{number_user}\" if number_user is not None else \"No One\" if number != 404 else \"Error 404\\nNot Found\",\n \"Nation\": nation_and_countries[\"nation\"][0],\n \"Odd/Even\": self.bot.numbers.checks.parity(number),\n \"Eligible Countries\": countries,\n },\n user=ctx.author,\n ),\n )",
"async def get_your_puzzle_activity(self, limit: int = None) -> 'Response':\n headers = {\n 'Content-Type': 'application/x-ndjson',\n }\n parameters = {\n 'max': limit if limit is not None else 'null'\n }\n response = await self._client.request_stream(method=RequestMethods.GET,\n url=USERS_MY_PUZZLE_ACTIVITY_URL,\n headers=headers,\n params=parameters)\n return response",
"def get(self, request):\n activities = (\n activitystreams.streams[\"local\"]\n .get_activity_stream(request.user)\n .filter(\n Q(comment__isnull=False)\n | Q(review__isnull=False)\n | Q(quotation__isnull=False)\n | Q(mention_books__isnull=False)\n )\n )\n\n large_activities = Paginator(\n activities.filter(mention_books__isnull=True)\n .exclude(content=None, quotation__quote=None)\n .exclude(content=\"\"),\n 6,\n )\n small_activities = Paginator(\n activities.filter(\n Q(mention_books__isnull=False) | Q(content=None) | Q(content=\"\")\n ),\n 4,\n )\n\n page = request.GET.get(\"page\")\n data = {\n \"large_activities\": large_activities.get_page(page),\n \"small_activities\": small_activities.get_page(page),\n }\n return TemplateResponse(request, \"discover/discover.html\", data)",
"def show(self, number_of_rows = None):\n result = quest.engine.show(self.statement, number_of_rows)\n return result",
"def index(request):\n try:\n meeting = Meeting.objects.latest('when')\n num_rsvped = Person.objects.filter(ynm='Y').count()\n\n except (KeyError, Meeting.DoesNotExist, Person.DoesNotExist):\n raise Http404\n\n return render(request,'chipy/chipy.html',{'meeting':meeting,'num_rsvped':num_rsvped})",
"async def _view_note(self, ctx: Context, number: int):\n\n author = ctx.author\n\n embed_links = ctx.channel.permissions_for(ctx.guild.me).embed_links\n\n author_str = f\"{author.name}'\"\n\n if author.name[-1].lower() != \"s\":\n author_str += \"s\"\n\n async with self.config.member(author).notes() as notes:\n try:\n note = notes[number-1]\n except IndexError:\n return await ctx.send(\n _(\"Note number {} not found.\").format(number)\n )\n\n msg_info = \"\"\n if note[\"author\"]:\n msg_info += _(\"**Author:** {}\").format(note[\"author\"])\n if note[\"channel\"]:\n msg_info += _(\"\\n**Channel:** {}\").format(note[\"channel\"])\n if note[\"jump_url\"]:\n if embed_links:\n msg_info += _(\n \"\\n[Click here to jump to message]({})\"\n ).format(note[\"jump_url\"])\n else:\n msg_info += _(\n \"\\n**Jump To Message:** {}\"\n ).format(note[\"jump_url\"])\n\n note_info = _(\n \"{}\\n\\n**Note:**\\n```{}```\\n**Reason:**\\n```{}```\"\n ).format(\n msg_info,\n note[\"note\"],\n note[\"reason\"]\n ).strip()\n\n if embed_links:\n page = discord.Embed(\n colour=0xff0000,\n description=note_info,\n title=_(\"{} TvM Note #{}\").format(author_str, number),\n timestamp=ctx.message.created_at\n )\n await ctx.send(embed=page)\n else:\n page = _(\n \"**{author} TvM Note #{number}**\"\n \"\\n\\n{note}\"\n ).format(\n author=author_str,\n number=number,\n note=note_info\n )\n await ctx.send(page)",
"def get_by_number(number=1):\r\n conn_string = \"host='127.0.0.1' dbname='NAME' user='NAME' password='PASSED'\"\r\n conn = psycopg2cffi.connect(conn_string)\r\n cur = conn.cursor()\r\n query = \"select tid from trajectory.taxi group by tid limit \" + str(number) + \";\";\r\n \r\n \r\n logging.debug('query: '+query)\r\n \r\n try:\r\n cur.execute(query)\r\n except psycopg2cffi.Error as e:\r\n conn.rollback()\r\n cur.close()\r\n logging.error('query: '+query)\r\n return\r\n \r\n \r\n tid_list = [int(r[0]) for r in cur]\r\n \r\n\r\n trajectory_dataset = []\r\n for tid in tid_list:\r\n trajectory_dataset.append(get_from_id(tid))\r\n \r\n return trajectory_dataset"
]
| [
"0.703598",
"0.5651328",
"0.56446993",
"0.5596035",
"0.5421679",
"0.54168236",
"0.53886724",
"0.5314571",
"0.53015953",
"0.52724856",
"0.5244988",
"0.52392125",
"0.5209759",
"0.51946753",
"0.51631796",
"0.51054376",
"0.50989926",
"0.50710297",
"0.5067204",
"0.5057887",
"0.5005942",
"0.49649468",
"0.4936471",
"0.49359116",
"0.4926335",
"0.49087936",
"0.49068958",
"0.48839763",
"0.48839527",
"0.4880861"
]
| 0.76558006 | 0 |
Shows all the installations, equipments and activities which match the given activity and city | def find_infos(self, activity_name, city):
database = Database('data/database.db')
infos = database.get_infos(activity_name, city)
view = Template(filename="view/template.html", lookup=lookup)
try:
render = view.render(
rows = [[item[0].number, item[0].name, item[1].number, item[1].name, item[2].number, item[2].name] for item in infos],
pageTitle = "Informations pour " + activity_name + " à " + city,
tableTitle = "Informations pour " + activity_name + " à " + city,
ths = ["Numéro d'installation", "Nom d'installation", "Numéro d'équipement", "Nom d'équipement", "Numéro d'activité", "Nom d'activité"]
)
except AttributeError:
render = view.render(
rows = [],
pageTitle = "Informations pour " + activity_name + " à " + city,
tableTitle = "Informations pour " + activity_name + " à " + city,
ths = ["Numéro d'installation", "Nom d'installation", "Numéro d'équipement", "Nom d'équipement", "Numéro d'activité", "Nom d'activité"]
)
return render | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_search_result(self, user_input):\n\n conn = sqlite3.connect(self.db_name)\n\n request_field = '%' + user_input[0].strip() + '%'\n request_city = '%' + user_input[1].strip() + '%'\n\n activity_ids = []\n activities_dictionary = {}\n activities_array = self.search_activity(conn, request_field)\n for data in activities_array:\n # We use an array of id to easily iterate and a dictionary to find the names correspondence later\n activity_ids.append(data[0])\n activities_dictionary[data[0]] = data[1]\n\n equipment_activity_ids = {}\n equipment_ids = []\n for data in self.get_equipments_by_activity(conn, activity_ids):\n equipment_ids.append(data[0])\n # If the reference doesn't exist we create it, if it does we add the activityId to it\n if data[0] in equipment_activity_ids:\n equipment_activity_ids.get(data[0]).append(data[1])\n else:\n equipment_activity_ids[data[0]] = [data[1]]\n\n equipments_array = self.get_equipments_by_ids(conn, equipment_ids)\n\n installation_ids = []\n for data in equipments_array:\n if data[2] not in installation_ids:\n installation_ids.append(data[2])\n\n installations_list = []\n # At first we get all installations matching our previously gathered ids and the city name\n for data_installation in self.search_installation(conn, request_city, installation_ids):\n current_installation = Installation(data_installation[0], data_installation[1], data_installation[2],\n data_installation[3], data_installation[4], data_installation[5],\n data_installation[6])\n for data_equipment in equipments_array:\n if data_equipment[2] == current_installation.id:\n current_equipment = Equipment(data_equipment[0], data_equipment[1], data_equipment[2])\n # Then, we add the matching equipments to the installation object\n for key in equipment_activity_ids.keys():\n if key == current_equipment.id:\n for value in equipment_activity_ids.get(key):\n # And the matching activities to the equipment object\n current_equipment.add_activity(Activity(value, activities_dictionary.get(value)))\n\n current_installation.add_equipment(current_equipment)\n equipments_array.remove(data_equipment)\n\n installations_list.append(current_installation)\n\n conn.close()\n\n return installations_list",
"def find_by_city(city, cat, keywords):\n print \"--------------------------------------------------------\"\n city_items = CityItems(city, cat)\n r = requests.get(city_items.url)\n if r.status_code == 200:\n scraper = HtmlScraper(r.text)\n for path in scraper.item_paths:\n item = scraper.scrape_item(path, keywords)\n if item:\n print item\n city_items.add_item(item)\n else:\n print 'ERROR: Invalid city: {}'.format(city)\n return city_items",
"def search_installation(conn, city, ids):\n\n c = conn.cursor()\n params = [city] + ids\n search_query = \"SELECT * FROM Installation T1 WHERE T1.City LIKE ? AND T1.Id IN ({})\".format(\n \",\".join([\"?\"] * len(ids)))\n\n c.execute(search_query, params)\n result = c.fetchall()\n return result",
"def show_activities(self): \n database = Database('data/database.db')\n activities = database.read_activities()\n view = Template(filename=\"view/template.html\", lookup=lookup)\n \n \n return view.render(\n rows = [[item.number, item.name] for item in activities],\n pageTitle = \"Activités\",\n tableTitle = \"Liste de toutes les activités\",\n ths = [\"Numéro\", \"Nom\"]\n )",
"def get_equipments_by_activity(conn, request):\n\n c = conn.cursor()\n search_query = \"SELECT * FROM EquipmentActivity T1 WHERE T1.IdActivity IN ({})\".format(\n \",\".join([\"?\"] * len(request)))\n c.execute(search_query, tuple(request))\n result = c.fetchall()\n return result",
"def __ui_list_all_activities(self):\n activities_list = self.__activity_service.service_get_list_of_activities()\n if len(activities_list) == 0:\n print(\"The list of activities is empty!\\n\")\n else:\n for activity in activities_list:\n print(activity)\n print(\"\")",
"def visu_plugs(city):\r\n\r\n conn = psycopg2.connect(database=DATABASE,\r\n user=USER,\r\n host=HOST,\r\n password=PASSWORD)\r\n cursor = conn.cursor()\r\n\r\n sql = (\"\"\"SELECT BOUCHON, nombre_particule FROM bouchon\r\n WHERE nom_ville = %s;\"\"\")\r\n\r\n cursor.execute(sql, (city,))\r\n rows = cursor.fetchall()\r\n liste = [i for i in rows]\r\n\r\n return liste",
"def search_engine(city_name):\n\n API_Key = \"zIGuOeUd0aE4O621Gj1KGDc6JiZ3PAGb\"\n http_request = f\"http://dataservice.accuweather.com/locations/v1/cities/search?apikey={API_Key}&q={city_name}&language=pt-br\"\n\n search_request = requests.get(http_request)\n\n if search_request.status_code != 200:\n print(f\"It was not possible to retrive information about {city_name}\")\n\n else:\n search_response = search_request.json()\n print(f\"Obtaining information about the weather in {city_name}\")\n\n return search_response[0]",
"def get_city_info(g, city_name):\n flag = 0\n for key in g.city_dict:\n if(g.city_dict[key].get_name() == city_name):\n print g.city_dict[key].get_info()\n flag = 1\n \n if(flag == 0):\n print (\"Invalid Input\")",
"def cities_by_states():\n states = storage.all(State)\n cities = storage.all(City)\n return render_template('8-cities_by_states.html', states=states,\n cities=cities)",
"def test_search_city(self):\n self.tc_id = \"Ts_011\"\n self.tc_desc = \"Verify user is able to register into the application with existing email id\"\n self.tc_step = \"TC Start\"\n\n searchbycity = SearchCity(self.driver)\n\n self.tc_step = \"Launch the url\"\n searchbycity.launchUrl(\"https://massdtaiot.com/dtahip/\")\n\n self.tc_step =\"Search the city\"\n searchbycity.chooseActon()\n # searchbycity.chooseVendor()",
"def get_filters():\n\n cities = CITY_DATA.keys()\n months = ('all', 'january', 'february','march', 'april', 'may', 'june')\n days = ('all', 'monday', 'tuesday', 'wednesday', 'thursday', 'friday', 'saturday', 'sunday')\n print('Hello! Let\\'s explore some US bikeshare data!')\n\n # TO DO: get user input for city (chicago, new york city, washington). HINT: Use a while loop to handle invalid inputs\n city_name= \"chicago, new york city or washington\"\n while True:\n city = input('Which city would you like to view:{}?\\n'.format(city_name)).lower()\n if city not in cities:\n print('Invalid response, please try again.')\n continue\n else:\n break\n\n # TO DO: get user input for month (all, january, february, ... , june)\n month_list=\"all, january, february, march, april, may or june\"\n while True:\n month = input('Which month would you like to view: {}?\\n'.format(month_list)).lower()\n if month not in months:\n print('Invalid response, please try again.')\n continue\n else:\n break\n\n # TO DO: get user input for day of week (all, monday, tuesday, ... sunday)\n week_days= \"all, monday, tuesday, wednesday, thursday, friday, saturday or sunday\"\n while True:\n day = input('Which day of the week would you like to view:{}?\\n'.format(week_days)).lower()\n if day not in days:\n print('Invalid response, please try again.')\n continue\n else:\n break\n\n print('-'*40)\n return city, month, day",
"def get_cities(self, city_name: str = \"\"):",
"def _get_information(self):\n grid = self._tab.find(\"div\", class_=\"details grid show\")\n cities = grid.find_all(\"li\", attrs={'data-type': 'city'})\n return [self._get_text(city) for city in cities]",
"def __ui_choose_search_criteria_for_activities(self):\n print(\"By which criteria do you want to search activities?\\n\"\n \" 1. By date\\n\"\n \" 2. By description\\n\")\n user_choice = input(\"Type your option: \").strip()\n if user_choice == \"1\":\n self.__ui_search_activities_by_date()\n elif user_choice == \"2\":\n self.__ui_search_activities_by_description()\n else:\n print(\"Invalid option!\\n\")\n return",
"def get_cities(self, city_name: str = None):",
"async def get_city_tour(self, flights_params, city_name, lang='en', limit=1) -> tuple:\n excursions = await self.excursion_by_city_search(city_name, limit=limit)\n flights = await flights_instance.get_flights(flights_params)\n hotels = await self.get_hotels(query=city_name, limit=limit, lang=lang)\n return excursions, flights, hotels",
"async def collect_info(request: web.Request, city=\"Kiev\"):\n async with ClientSession() as session:\n covid_19_data = await (covid_19(\"https://covid-19-data.p.rapidapi.com/totals\", session))\n weather_data = await (weather(\"https://community-open-weather-map.p.rapidapi.com/weather\", session, city=city))\n context = {\n \"weather\": weather_data,\n \"covid\": covid_19_data,\n \"current_date\": datetime.datetime.now().strftime(\"%d/%m/%Y - %H:%M:%S\")\n }\n response = aiohttp_jinja2.render_template(\"collect_info.html\", request,\n context=context)\n return response",
"def cities_by_states():\n states = storage.all(State).values()\n return render_template('8-cities_by_states.html', states=states)",
"def activitySearch (listAct,activity):\n \n for act in listAct:\n if (act.name == activity.name): \n return True",
"def describe_city(city, country='Germany'):\n\tprint(f'{city.title()} is in {country.title()}.')",
"def get_all_city_facts(request, city_id):\n try:\n city_facts = CityFact.objects.filter(city=city_id)\n except CityFact.DoesNotExist:\n return Response(status=status.HTTP_404_NOT_FOUND)\n\n serializer = CityFactSerializer(city_facts, many=True)\n return Response(serializer.data)",
"def get_activities():\n pass",
"def get_airports(cities):\n ## Database connection, db, collection\n conn = pymongo.Connection()\n db=conn.flight_db\n ap = db.airports\n\n airport_list = []\n for city in cities:\n c = ap.find({\n 'city':{'$regex':'^'+city, '$options':'i'}\n })\n for info in c:\n airport_list.append(info['city'] + ': ' + info['code'])\n print '%s - %s' % (info['city'], info['code'])\n conn.disconnect()\n\n return airport_list",
"def describe_city(city, country='New Zealand'):\n\tprint(f\"\\nThe city of {city} is in {country}.\")",
"def describe_city(city, country='Japan'):\n print(city.title() + \" is in \" + country.title() + \".\")",
"def get_filters():\n print('Hello! Let\\'s explore some US bikeshare data!')\n\n while True:\n city = input('Are you from Washington, New York City or Chicago: ').lower()\n if city in cities:\n break\n print('You selected: ', city)\n#fixed the missing loop and case sensitivity in 'month' and 'day' input\n while True:\n month = input('Which month would you like to filter, choose \"all\" if you do not want to filter: ').lower()\n if month in months:\n break\n print('You selected')\n\n while True:\n day = input('Which day would you like to filter, choose \"all\" if you do not want to filter: ').lower()\n if day in days:\n break\n\n print('-'*40)\n return city, month, day",
"def get_city_visits(request):\n city_visits = CityVisitLog.objects.filter(user=request.user).values('city_id').annotate(\n visit_count=Count('city')).order_by('-visit_count')\n for visit in city_visits:\n visit['city'] = City.objects.get(pk=visit['city_id'])\n\n serializer = CityVisitSerializer(city_visits, many=True)\n return Response(serializer.data)",
"def by_activity(cls,site_id=0,activity=None):\n return meta.DBSession.query(Activity).filter_by(site_id=site_id,activity=activity).all()",
"def cities_by_states():\n new_dict = storage.all(State)\n return render_template('8-cities_by_states.html', states=new_dict)"
]
| [
"0.6081876",
"0.5634441",
"0.56299937",
"0.54854494",
"0.5451115",
"0.54429376",
"0.5318897",
"0.527957",
"0.52620614",
"0.5245779",
"0.5226336",
"0.5164053",
"0.5126602",
"0.51202446",
"0.5117655",
"0.51148504",
"0.510631",
"0.508857",
"0.50721574",
"0.5069758",
"0.5064015",
"0.5063353",
"0.50422925",
"0.5035061",
"0.5018784",
"0.49919024",
"0.49883395",
"0.49857813",
"0.49822062",
"0.49789065"
]
| 0.6213436 | 0 |
fill line edit with curve name | def fillCurveLE(self):
sel = mn.ls( sl = True, dag = True, ni = True, typ = 'nurbsCurve' )
self.curve_le.setText( sel[0].name ) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def add_to_plot(self, line_name, points):\n points = [x * 100 for x in points]\n plt.plot(points, label=line_name)",
"def setCurve(self, index, curve) -> None:\n ...",
"def add_curve(self):\n pv_name = self._get_full_pv_name(self.pv_name_line_edt.text())\n color = random_color()\n for k, v in self.channel_map.items():\n if color == v.color:\n color = random_color()\n\n self.add_y_channel(pv_name=pv_name, curve_name=pv_name, color=color)",
"def line(self, arg, fill):\n pass",
"def create_line(uniform = True, *args):\n axis = cmds.radioButtonGrp(widgets[\"lineAxisRBG\"], q=True, sl=True)\n length = cmds.floatFieldGrp(widgets[\"lineLenFFG\"], q=True, v1=True)\n density = cmds.floatFieldGrp(widgets[\"lineDenFFG\"], q=True, v1=True)\n\n numCvs = length * density\n if numCvs < 3.0: # curve needs 3 cvs (for 3 dg curve)\n numCvs = 3.0\n\n cvDist = length/numCvs\n\n # make a list of pt dist along some axis\n axisList = []\n for x in range(0,int(numCvs)+1):\n axisList.append(x)\n\n pts = []\n\n if axis == 1:\n for y in range(0, int(numCvs)+1):\n pt = [axisList[y]*cvDist, 0, 0]\n pts.append(pt)\n\n if axis == 2:\n for y in range(0, int(numCvs)+1):\n pt = [0, axisList[y]*cvDist, 0]\n pts.append(pt)\n\n if axis == 3:\n for y in range(0, int(numCvs)+1):\n pt = [0, 0, axisList[y]*cvDist]\n pts.append(pt)\t\t\t\n \n line = cmds.curve(name = \"line_01\", d=3, p=pts)\n shp = cmds.listRelatives(line, s=True)[0]\n cmds.rename(shp, \"{0}Shape\".format(line))\n if uniform:\n line = cmds.rebuildCurve(line, rebuildType = 0, spans = 0, keepRange = 0, replaceOriginal=True, end=1, keepControlPoints=0)[0]\n\n cmds.select(line, r=True)",
"def curve(*args, append: bool=True, bezier: bool=True, degree: float=3, editPoint:\n Union[List[float, float, float], List[List[float, float, float]]]=None, knot:\n Union[float, List[float]]=0.0, name: AnyStr=\"\", objectSpace: bool=True, periodic:\n bool=True, point: Union[List[float, float, float], List[List[float, float,\n float]]]=None, pointWeight: Union[List[float, float, float, float], List[List[float,\n float, float, float]]]=None, replace: bool=True, worldSpace: bool=True,\n **kwargs)->AnyStr:\n pass",
"def line(value):\r\n return '({}, {}), ({}, {})'.format(value.x1(), value.y1(), value.x2(), value.y2())",
"def add_curve(self, name, curve_type, xy_tuples_list):\n self._curve_reg.add_curve(name, curve_type, xy_tuples_list)",
"def _createline(self):\n return self.cv.create_line(0, 0, 0, 0, fill=\"\", width=2,\n capstyle = TK.ROUND)",
"def add_curve(self, **args):\n if \"name\" not in args:\n raise KeyError(\"No curve name given.\")\n if \"coords\" not in args:\n raise KeyError(\"No coordinates given.\")\n if \"values\" not in args:\n raise KeyError(\"No values given.\")\n if len(args[\"coords\"]) != len(args[\"values\"]):\n raise ValueError(\"Number of time coordinate points differs from number of values\")\n entries = len(self.tree['curves']['children'])\n self.tree['curves']['children']['curve' + str(entries)] = self.populate_tree('curve',\n children={})\n parameter = self.tree['curves']['children']['curve' + str(entries)]\n parameter['children']['name'] = self.populate_tree('name', text=args['name'], children={})\n coord_str = \"\"\n value_str = \"\"\n for i, coord in enumerate(args[\"coords\"]):\n if i < (len(args[\"coords\"])-1):\n coord_str = coord_str + str(coord) + \" \"\n value_str = value_str + str(args[\"values\"][i]) + \" \"\n if i == (len(args[\"coords\"])-1):\n coord_str = coord_str + str(coord)\n value_str = value_str + str(args[\"values\"][i])\n parameter['children']['coords'] = self.populate_tree('coords', text=coord_str, children={})\n parameter['children']['values'] = self.populate_tree('values', text=value_str, children={})",
"def add_datum(self, x, fields):\n\t\n\t\tfor name, value in fields.iteritems():\n\t\t\tif name not in self.curves:\n\t\t\t\tcurve = QwtPlotCurve()\n\t\t\t\tcurve.attach(self)\n\t\t\t\tself.curves[name] = [curve, [], []]\n\t\t\t\n\t\t\tstuff = self.curves[name]\n\t\t\tstuff[1].append(x)\n\t\t\tstuff[2].append(value)",
"def line(points):\n return LineString(points)",
"def add_curve(self, name, curve_type, xy_tuples_list):\n assert isinstance(name, str) and len(name) < 32 and name.find(' ') == -1, \"name must be a string with less than 32 characters and contain no spaces\"\n assert isinstance(curve_type, (type(None), str)), \"curve_type must be a string\"\n assert isinstance(xy_tuples_list, (list, np.ndarray)), \"xy_tuples_list must be a list of (x,y) tuples\"\n \n curve = Curve(name, curve_type, xy_tuples_list)\n self[name] = curve",
"def setCurveSymbols(step=0,shape='circle',size=30):\n dislin.marker(symboldict[shape])\n dislin.hsymbl(size)\n dislin.incmrk(step)",
"def parse_curves_line(L):\n data = L.split()\n if len(data) != len(column_names['curves']):\n print(\"curves line {} does not have 12 fields, skipping\".format(L))\n return\n label, record = parse_line_label_cols(L)\n\n record['conductor_ideal'] = data[4]\n record['conductor_norm'] = N = ZZ(data[5])\n record['conductor_norm_factors'] = N.support()\n\n record['ainvs'] = data[6]\n record['jinv'] = data[7]\n record['disc'] = disc = data[8]\n if \".\" in disc:\n print(\"Old disc: {}\".format(disc))\n disc = \"({})\".format(ZZ(RR(disc[1:-1])))\n print(\"New disc: {}\".format(disc))\n record['disc'] = disc\n record['normdisc'] = ZZ(data[9])\n from sage.all import sqrt\n record['root_analytic_conductor'] = sqrt(0.00798504020212804*float(N)**(1.0/float(record['degree']))*float(record['abs_disc']))\n #print('root_analytic_conductor = {}'.format(record['root_analytic_conductor']))\n\n eqn = data[10]\n # the reason for doing the following is for the unique field\n # 2.2.5.1 where the field generator is not a single character such\n # as 'a' or 'i' but is '\\phi', and we don't want to have '\\phix'\n # in a latex string (and also do not want any whitespace).\n if \"{x}\" not in eqn:\n eqn = eqn.replace('x', '{x}').replace('y', '{y}')\n record['equation'] = eqn\n\n record['cm'] = cm = ZZ(data[11]) if data[11] != '?' else '?'\n # The 'cm_type' column holds +1 for a curve with rational, -1 for\n # potential, 0 if no CM\n if cm:\n if 'CM' in label:\n record['cm_type'] = +1\n else:\n record['cm_type'] = -1\n else:\n record['cm_type'] = 0\n bc = data[12][1:-1]\n record['base_change'] = [str(lab) for lab in bc.split(\",\")] if bc else []\n record['q_curve'] = (data[13] == '1')\n return label, record",
"def setOriginLines(val=\"xy\"):\n if val == \"x\":\n dislin.xaxgit()\n elif val == \"y\":\n dislin.yaxgit()\n elif val == \"cross\":\n dislin.cross()\n else:\n dislin.axgit()",
"def addLegendLine(line,n):\n dislin.leglin(' ',line,n)",
"def _draw_line(plot, hori, vert, color, text):\n plot.plot(hori, vert, '-o'+color)\n plot.text(hori[-1]-3, vert[-1]+2, text, color=color)",
"def addLineStyle(dist, focus, axis, pupil):\n r = 0 #focus / 2\n g = 0 #np.log10(dist) / (25 / 3)\n b = 0 #axis / 20\n a = 0.4\n rgb = [r, g, b, a]\n line = {'style': '-', 'color': rgb}\n return line",
"def _curveToOne(self, pt1, pt2, pt3):\n t = \"C\"\n t += pointToString(pt1) + \" \"\n t += pointToString(pt2) + \" \"\n t += pointToString(pt3)\n self._commands.append(t)\n self._lastCommand = \"C\"\n self._lastX, self._lastY = pt3",
"def _defLine(self):\n self._dline=GPath(points = [0,100,GAME_WIDTH,100], linewidth = 1.5,\n linecolor = 'cyan')",
"def plot_line(ax, d, t, nufnu, name, label, col, legend=False, zorder=1):\n lum = nufnu * 1e-23 * 1e-3 * 4 * np.pi * d**2\n fs = 11\n nsize = 10 # normal size for points\n if name=='AT2018cow':\n marker='*'\n fcol = col\n s=70\n else:\n if label=='SN':\n marker='o'\n s=nsize\n fcol = col # fill color\n label = 'SN'\n elif label=='GRB':\n marker='o'\n fcol = 'white' # unfilled\n s=nsize\n label = 'GRB'\n elif label=='Rel. SN':\n marker='s'\n fcol = col \n s=nsize\n label = 'Rel. SN'\n elif label=='TDE':\n marker='s'\n fcol = 'white' #unfilled\n s=nsize\n label='TDE'\n ax.scatter(\n t, lum, facecolor=fcol, edgecolor=col, \n marker=marker, s=s, zorder=zorder)\n if legend:\n ax.plot(t, lum, c=col, ls='-', label=label, zorder=zorder)\n else:\n ax.plot(t, lum, c=col, ls='-', label=None, zorder=zorder)\n return lum",
"def setCurve(self, *args):\n return _libsbml.ReferenceGlyph_setCurve(self, *args)",
"def setCurve(self, *args):\n return _libsbml.GeneralGlyph_setCurve(self, *args)",
"def create(self):\n self.parent.copyCurrentWinState(self.pltw)\n # add a new vector\n vname = self.pltw.curvelist[self.cpos].name + 'BL'\n (nvec, npt) = np.shape(self.pltw.blklst[self.blkno])\n if self.pltw.pasteVector(self.data[2], self.blkno, vname):\n xname = self.pltw.getVnam(self.blkno, self.xpos)\n xvinfo = vectInfo(self.blkno, self.xpos, xname)\n yvinfo = vectInfo(self.blkno, nvec, vname)\n self.pltw.curvelist.append(curveInfo(vname, xvinfo, yvinfo))\n self.pltw.updatePlot()\n self.pltw.dirty = True\n self.pltw.activecurv = self.cpos\n self.parent.updateUI()\n self.hide()",
"def addBL(self):\n self.parent.copyCurrentWinState(self.pltw)\n vname = self.pltw.curvelist[self.cpos].name + 'BL'\n (nvec, npt) = np.shape(self.pltw.blklst[self.blkno])\n if self.pltw.pasteVector(self.data[2], self.blkno, vname):\n xname = self.pltw.getVnam(self.blkno, self.xpos)\n xvinfo = vectInfo(self.blkno, self.xpos, xname)\n yvinfo = vectInfo(self.blkno, nvec, vname)\n self.pltw.curvelist.append(curveInfo(vname, xvinfo, yvinfo))\n self.pltw.updatePlot()\n self.pltw.dirty = True\n self.pltw.activecurv = self.cpos\n self.parent.updateUI()\n self.hide()",
"def next_line():\r\n set_point(point().next_line())",
"def setCurve(self, *args):\n return _libsbml.SpeciesReferenceGlyph_setCurve(self, *args)",
"def _append_line_color_update_expression(self) -> None:\r\n from apysc.expression import expression_file_util\r\n expression: str = (\r\n f'{self.variable_name}.stroke(\"{self.line_color}\");'\r\n )\r\n expression_file_util.append_js_expression(expression=expression)",
"def curve_number(self):"
]
| [
"0.65251184",
"0.63600713",
"0.6350415",
"0.6244321",
"0.6214081",
"0.6091867",
"0.60685146",
"0.60510254",
"0.5991166",
"0.5967503",
"0.596386",
"0.59406424",
"0.5938429",
"0.5893543",
"0.583466",
"0.5831425",
"0.5829138",
"0.5812785",
"0.5776291",
"0.5767876",
"0.5730263",
"0.57119566",
"0.56977475",
"0.56822926",
"0.5663658",
"0.56466734",
"0.5632915",
"0.56267464",
"0.56063515",
"0.56060445"
]
| 0.6569289 | 0 |
remove selected object from list | def removeObject(self):
for SelectedItem in self.objects_lw.selectedItems():
self.objects_lw.takeItem(self.objects_lw.row(SelectedItem) ) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def remove(self):",
"def remove():",
"def removeObject(self,object):\n self.removeList.append(object)",
"def get_non_selected(self):\n\n obj_list = self.get_list()\n\n for sel in self.get_selected():\n obj_list.remove(sel)\n\n return obj_list",
"def remove(self, obj: Viewable) -> None:\n new_objects = list(self)\n new_objects.remove(obj)\n self.objects = new_objects",
"def __onRemoveClicked(self):\n\t\tresults = self.deleteSelectedListWidgetItems(self.ui.listWidget, \"Remove Items?\", \"Are you sure that you want to remove the selected items?\")\n\t\t# force the iteration... removal from the list is our only goal.\n\t\tfor item in results:\n\t\t\tpass",
"def remove(self):\n pass",
"def remove(self):\n pass",
"def remove(self):\n pass",
"def remove(self, item: Any) -> BaseList:\n super().remove(item)\n return self",
"def _remove(self):\n pass",
"def remove_object(self, obj):\n pass",
"def remove(self):\n raise NotImplementedError",
"def remove(self):\n raise NotImplementedError",
"def remove(self, *args):\n return _libsbml.ListOfObjectives_remove(self, *args)",
"def pop(self):\n self.list.pop()",
"def remove(self, value):\n list.remove(self, value)\n self.emit('removed', value)\n self.emit('modified')",
"def remove(self, value):\n\n list.remove(self, value)\n self.changed()",
"def buttonRemove_Clicked( self, event ):\n\t\tindex = -1\n\t\tindex = self.listCtrlTreasure.GetNextItem(index, wx.LIST_NEXT_ALL, wx.LIST_STATE_SELECTED)\n\t\tif index != -1:\n\t\t\tself.getTreasure(index, True)\n\t\t\tself.refreshTreasureList()",
"def removePlayer(self, index):\n\n self.eloList.pop(index)\n self.idList.pop(index)",
"def remove_selected(self):\n if not self.tree_widget.selectedItems():\n self.configuration_widgets.logger.warning('Nothing has been selected. Please select an item and try again.')\n return\n _selected_items = self.tree_widget.selectedItems()\n root = self.tree_widget.invisibleRootItem()\n [(item.parent() or root).removeChild(item) for item in _selected_items]",
"def remove (self, item):\n pass",
"def obj_delete_list(self, request=None, **kwargs):\n self.get_collection(request).remove()",
"def deleteSelected(self):\n self.scene().deleteSelected()",
"def _remove_texture(self):\n # Retrieve the item that was selected\n key = self._listbox.get(ACTIVE)\n # Post a delete notice to the manager\n self._remove(key)",
"def remove_item(self):\n\n self.todo_scroll_cell.remove_selected_item()",
"def remove(self,object):\n if object in self.cell.objects:\n self.cell.objects.remove(object)\n else:\n self.cell.tempObjects.remove(object)\n self.cell.setChanged()",
"def removeItem(*args):",
"def removeItem(*args):",
"def remove(self, *args):\n return _libsbml.ListOf_remove(self, *args)"
]
| [
"0.7310277",
"0.70695",
"0.70034885",
"0.6878407",
"0.6866645",
"0.6846475",
"0.6840973",
"0.6840973",
"0.6840973",
"0.675142",
"0.6743878",
"0.67410135",
"0.67245823",
"0.67245823",
"0.67107946",
"0.67068976",
"0.6683341",
"0.667483",
"0.667084",
"0.66617125",
"0.66386074",
"0.6625341",
"0.6610795",
"0.6603924",
"0.6598562",
"0.65941304",
"0.6554787",
"0.6554746",
"0.6554746",
"0.65508735"
]
| 0.80012274 | 0 |
create scatter based on UI | def createScatter(self):
curv = str( self.curve_le.text() )
objCount = self.controlCount_sbx.value()
random = self.random_chb.isChecked()
useTip = self.useTips_chb.isChecked()
keepConn = self.keepConnected_chb.isChecked()
tangent = self.tangent_chb.isChecked()
groupIt = self.groupIt_chb.isChecked()
animated = self.animated_chb.isChecked()
objs = []
for index in xrange(self.objects_lw.count()):
objs.append( mn.Node( str ( self.objects_lw.item(index).text() ) ) )
crvScat.CurveScatter(
curve = crv.Curve( curv ),
objects = objs,
pointsCount = objCount,
useTips = useTip,
keepConnected = keepConn,
tangent = tangent,
rand = random,
groupit = groupIt,
animated = animated) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def draw(self):\n duplets = list(self._chunker(self.attr, 2))\n colors = self._set_colors(duplets)\n\n for i, duplet in enumerate(duplets, start=1):\n self.chart.make_scatter(self.source, duplet[0], duplet[1], 'circle', colors[i - 1])\n\n if i < len(duplets):\n self.create_plot_if_facet()\n\n self.reset_legend()",
"def create_scatter(df_orig, met, xaxis, **kwargs): \n if 'cato' in kwargs:\n cato = kwargs['cato']\n else:\n cato = False\n if 'col' in kwargs:\n col = kwargs['colors']\n else:\n col = Set1[9]\n if 'metric_attrib' in kwargs:\n metric_attrib = kwargs['metric_attrib']\n else:\n metric_attrib = False\n\n df = df_orig\n \n p = scatter_template(df, met, xaxis, cato)\n \n #Finds the unique list of the category and counts them\n if cato:\n uniq_cat = df[cato].unique()\n cat_cnt = len(uniq_cat)\n \n #Groups by the category and assigns each group to a ColumnDataSource \n indv_cat = df.groupby(cato)\n \n source = []\n for item in indv_cat:\n sub_source = ColumnDataSource(ColumnDataSource.from_df(item[1]))\n source.append(sub_source)\n\n i = 0\n while i < cat_cnt:\n p.circle(xaxis, \n met,\n size=10, \n color=col[i % len(col)], \n source=source[i])\n i += 1\n\n else:\n source = ColumnDataSource(df)\n p.circle(xaxis, \n met,\n size=10,\n source=source)\n \n if metric_attrib:\n p = create_limits(p, df, xaxis, metric_attrib)\n\n\n return p",
"def scattergrid(self, scatter_data, initial_feature):\n # features\n features = self.features # taken from MainGrid\n\n # Scatter\n scatter_row_sources, scatter_rows = self._create_scatter_rows(scatter_data, features, initial_feature)\n\n # Dropdown\n dropdown = self._create_features_dropdown(self._scatterplot_grid_dropdown)\n callbacks = self._create_features_dropdown_callbacks(scatter_row_sources)\n for callback in callbacks:\n dropdown.js_on_change(\"value\", callback)\n\n # output Grid\n grid = column(\n dropdown,\n Div(text=initial_feature, css_classes=[self._chosen_feature_scatter_title]),\n *scatter_rows,\n )\n return grid",
"def scatter(args):\n prism.scatter.run(\n input_fp=args.input,\n output_fp=args.output,\n width=args.width,\n height=args.height,\n scale=args.scale,\n font_family=args.font_family,\n )",
"def scatter_list(self, l):\n pass",
"def plot_scatter_points(self):\n self.plot(1)",
"def generate_var_scatter(self):\n pass",
"def inter_scatter(xdata,ydata, xlabel, ylabel, colours, title, datalabels):\n fig = plt.figure()\n ax = plt.subplot()\n ax.scatter(xdata, ydata, c = colours, picker=True, s = 5 )\n plt.title(title)\n plt.xlabel(xlabel)\n plt.ylabel(ylabel)\n x_lines = [0, -1, 1]\n y_lines = [1.3]\n for a in range (0,len(x_lines)):\n plt.axvline(x_lines[a], color='gray', linestyle='dashed', linewidth=1)\n for b in range(0,len(y_lines)):\n plt.axhline(y_lines[b], color='gray', linestyle='dashed', linewidth=1) # p-value of 0.05 is considered significant\n plt.grid(True)\n datalabels=datalabels\n def onpick(event):\n # step 1: take the index of the dot which was picked\n ind = event.ind\n # step 2: save the actual coordinates of the click, so we can position the text label properly\n label_pos_x = event.mouseevent.xdata\n label_pos_y = event.mouseevent.ydata\n # just in case two dots are very close, this offset will help the labels not appear one on top of each other\n offset = 0\n # if the dots are to close one to another, a list of dots clicked is returned by the matplotlib library\n for i in ind:\n # step 3: take the label for the corresponding instance of the data\n label = datalabels[i]\n # step 4: log it for debugging purposes\n print (\"index\", i, label)\n # step 5: create and add the text annotation to the scatterplot\n annotate(\n ax,\n label,\n label_pos_x + offset,\n label_pos_y + offset\n )\n # step 6: force re-draw\n ax.figure.canvas.draw_idle()\n # alter the offset just in case there are more than one dots affected by the click\n offset += 0.1\n # connect the click handler function to the scatterplot\n fig.canvas.mpl_connect('pick_event', onpick)\n plt.show()\n return fig",
"def bokeh_scatter_maker(df, x_col, y_col, c_col, title, hover_list, to_svg=False):\n\n source = ColumnDataSource(df)\n TOOLS = \"hover,save,pan,box_zoom,reset,wheel_zoom\"\n\n colors = list(reversed(brewer['Reds'][9]))#brewer['RdYlBu'][25]#[\"#75968f\", \"#a5bab7\", \"#c9d9d3\", \"#e2e2e2\", \"#dfccce\", \"#ddb7b1\", \"#cc7878\", \"#933b41\", \"#550b1d\"]\n mapper = LinearColorMapper(palette=colors, low=df[c_col].min(), high=df[c_col].max())\n\n fig = figure(title=title, plot_width=500, plot_height=500, tools=TOOLS, toolbar_location='below')\n\n #creating objects to be added to the figure\n vline = Span(location=0, dimension='height', line_color='red', line_width=1, line_dash='dotted')\n hline = Span(location=0, dimension='width', line_color='grey', line_width=1, line_dash='dotted')\n color_bar = ColorBar(color_mapper=mapper, major_label_text_font_size=\"5pt\",\n ticker=BasicTicker(desired_num_ticks=len(colors)),\n label_standoff=6, border_line_color=None, location=(0, 0))\n\n #adding all elements to the figure plot\n fig.renderers.extend([vline, hline])\n fig.grid.grid_line_color = None\n fig.background_fill_color = None\n fig.xaxis.axis_label = str(x_col)\n fig.yaxis.axis_label = str(y_col)\n fig.scatter(x=x_col,\n y=y_col,\n marker='circle', size=15,\n source=source,\n fill_color={'field': c_col, 'transform': mapper},\n line_color=\"navy\", alpha=0.5)\n fig.add_layout(color_bar, 'right')\n fig.select_one(HoverTool).tooltips = hover_list\n #fig.select_one(HoverTool).formatters={'Gene name' : 'printf', 'Ontology' : 'printf',# use 'printf' formatter}\n if to_svg:\n fig.output_backend = \"svg\"\n\n return fig",
"def display(self):\n scatter_plot(self.points, self.hull_points, self.color, self.title)",
"def scattered():\r\n c = 'A'\r\n i = 'FLR '\r\n data = chart_data(i, '2018-09-01', 12*5, c).set_index('date').sort_index()\r\n # print(data)\r\n data.plot(kind='scatter', x='Perc.idv', y='Perc.ids') # ,c='Centre')\r\n # plt.xticks(range(len(data)),data.index.tolist(),rotation=20)\r\n # plt.axhline(y=100, color='r', linestyle='-', label='Individual target')\r\n # plt.axhline(y=75, color='b', linestyle='-', label='Industry target')\r\n plt.title(centres[c] + ' ' + indic)\r\n plt.savefig('pic/' + c + indic + '.jpg')",
"def scatter(self):\n self.ax.scatter(\n x=self.df.loc[self.df[\"highlight_all\"] != 1, f\"order_{self.x}\"],\n y=self.df.loc[self.df[\"highlight_all\"] != 1, f\"order_{self.y}\"],\n color=self.df.loc[self.df[\"highlight_all\"] != 1, self.obs].apply(\n lambda x: self.bgcolors[0] if x == 0 else self.bgcolors[1]\n ),\n alpha=self.bg_alpha,\n zorder=0,\n linewidths=2,\n s=self.markersize,\n )\n # Separate scatter for highlights (z-order can't be conditional).\n self.ax.scatter(\n x=self.df.loc[self.df[\"highlight_all\"] == 1, f\"order_{self.x}\"],\n y=self.df.loc[self.df[\"highlight_all\"] == 1, f\"order_{self.y}\"],\n color=self.df.loc[self.df[\"highlight_all\"] == 1, self.obs].apply(\n lambda x: self.fgcolors[0] if x == 0 else self.fgcolors[1]\n ),\n alpha=self.fg_alpha,\n zorder=2,\n linewidths=1,\n edgecolor=\"black\",\n s=1.25 * self.markersize, # A little bigger to create perspective.\n )\n\n return self",
"def _create_scatterplot_pane(self):\n panel = wx.Panel(self, -1)\n\t\t\n self.fig_scatter = Figure()\n self.ax_scatter = self.fig_scatter.add_subplot(111)\n\t\t\n families = ['serif', 'sans-serif', 'cursive', 'fantasy', 'monospace']\n\t\t\n self.ax_scatter.set_xlabel(\"Mean\") #fontsize = 14, fontweight = 'semibold', name = families[2]\n self.ax_scatter.set_ylabel(\"Standard Deviation\")\n #self.ax_scatter.grid(color='black', alpha=0.5, linestyle='-', linewidth=1.0)\n self.ax_scatter.set_axis_bgcolor((0.8,0.8,0.8))\n #self.ax_scatter.set_ylim(0, 35)\n #self.ax_scatter.set_ylim(0, 90)\n\t\t\n self.canvas_scatter = FigureCanvas(panel, -1, self.fig_scatter)\n self.toolbar_scatter = NavigationToolbar(self.canvas_scatter)\n\t\t\n vbox = wx.BoxSizer(wx.VERTICAL)\n vbox.Add(self.canvas_scatter, 1, wx.EXPAND|wx.BOTTOM, 7)\n vbox.Add(self.toolbar_scatter, 0, wx.EXPAND)\n\t\t\n panel.SetSizer(vbox)\n vbox.Fit(panel)\n\t\t\n return panel",
"def add_scatter(\r\n self,\r\n name: str,\r\n data: Union[Dict, DataFrame],\r\n mapping: Dict = {\r\n \"x\": \"x\",\r\n \"y\": \"y\",\r\n \"z\": \"z\",\r\n \"c\": \"c\",\r\n \"cs\": \"cs\",\r\n \"s\": \"s\",\r\n \"labels\": \"labels\",\r\n \"knn\": \"knn\",\r\n },\r\n colormap: Union[str, Colormap, List[str], List[Colormap]] = \"plasma\",\r\n shader: str = \"sphere\",\r\n point_scale: float = 1.0,\r\n max_point_size: float = 100.0,\r\n fog_intensity: float = 0.0,\r\n saturation_limit: Union[float, List[float]] = 0.2,\r\n categorical: Union[bool, List[bool]] = False,\r\n interactive: bool = True,\r\n has_legend: bool = False,\r\n legend_title: Union[str, List[str]] = None,\r\n legend_labels: Union[Dict, List[Dict]] = None,\r\n min_legend_label: Union[str, float, List[str], List[float]] = None,\r\n max_legend_label: Union[str, float, List[str], List[float]] = None,\r\n series_title: Union[str, List[str]] = None,\r\n ondblclick: Union[str, List[str]] = None,\r\n selected_labels: Union[List, List[List]] = None,\r\n label_index: Union[int, List[int]] = 0,\r\n title_index: Union[int, List[int]] = 0,\r\n knn: List[List[int]] = [],\r\n ):\r\n\r\n if mapping[\"z\"] not in data:\r\n data[mapping[\"z\"]] = [0] * len(data[mapping[\"x\"]])\r\n\r\n if \"pandas\" in type(data).__module__:\r\n data = data.to_dict(\"list\")\r\n\r\n data_c = data[mapping[\"c\"]]\r\n data_cs = data[mapping[\"c\"]] if mapping[\"cs\"] in data else None\r\n data_s = data[mapping[\"s\"]] if mapping[\"s\"] in data else None\r\n\r\n # Check whether the color (\"c\") are strings\r\n if type(data_c[0]) is str:\r\n raise ValueError('Strings are not valid values for \"c\".')\r\n\r\n # In case there are multiple series defined\r\n n_series = 1\r\n if isinstance(data_c[0], Iterable):\r\n n_series = len(data_c)\r\n else:\r\n data_c = [data_c]\r\n\r\n if data_cs is not None and not isinstance(data_cs[0], Iterable):\r\n data_cs = [data_cs]\r\n\r\n if data_s is not None and not isinstance(data_s[0], Iterable):\r\n data_s = [data_s]\r\n\r\n # Make everything a list that isn't one (or a tuple)\r\n colormap = Faerun.make_list(colormap)\r\n saturation_limit = Faerun.make_list(saturation_limit)\r\n categorical = Faerun.make_list(categorical)\r\n legend_title = Faerun.make_list(legend_title)\r\n legend_labels = Faerun.make_list(legend_labels, make_list_list=True)\r\n min_legend_label = Faerun.make_list(min_legend_label)\r\n max_legend_label = Faerun.make_list(max_legend_label)\r\n series_title = Faerun.make_list(series_title)\r\n ondblclick = Faerun.make_list(ondblclick)\r\n selected_labels = Faerun.make_list(selected_labels, make_list_list=True)\r\n label_index = Faerun.make_list(label_index)\r\n title_index = Faerun.make_list(title_index)\r\n\r\n # If any argument list is shorter than the number of series,\r\n # repeat the last element\r\n colormap = Faerun.expand_list(colormap, n_series)\r\n saturation_limit = Faerun.expand_list(saturation_limit, n_series)\r\n categorical = Faerun.expand_list(categorical, n_series)\r\n legend_title = Faerun.expand_list(legend_title, n_series, with_none=True)\r\n legend_labels = Faerun.expand_list(legend_labels, n_series, with_none=True)\r\n min_legend_label = Faerun.expand_list(\r\n min_legend_label, n_series, with_none=True\r\n )\r\n max_legend_label = Faerun.expand_list(\r\n max_legend_label, n_series, with_none=True\r\n )\r\n series_title = Faerun.expand_list(series_title, n_series, with_value=\"Series\")\r\n ondblclick = Faerun.expand_list(ondblclick, n_series, with_none=True)\r\n selected_labels = Faerun.expand_list(selected_labels, n_series)\r\n label_index = Faerun.expand_list(label_index, n_series)\r\n title_index = Faerun.expand_list(title_index, n_series)\r\n\r\n # # The c and cs values in the data are a special case, as they should\r\n # # never be expanded\r\n # if type(data[mapping[\"c\"]][0]) is not list and prop_len > 1:\r\n # prop_len = 1\r\n # elif:\r\n # prop_len = len(data[mapping[\"c\"]])\r\n\r\n legend = [None] * n_series\r\n is_range = [None] * n_series\r\n min_c = [None] * n_series\r\n max_c = [None] * n_series\r\n\r\n for s in range(n_series):\r\n min_c[s] = float(min(data_c[s]))\r\n max_c[s] = float(max(data_c[s]))\r\n len_c = len(data_c[s])\r\n\r\n if min_legend_label[s] is None:\r\n min_legend_label[s] = min_c[s]\r\n\r\n if max_legend_label[s] is None:\r\n max_legend_label[s] = max_c[s]\r\n\r\n is_range[s] = False\r\n\r\n if legend_title[s] is None:\r\n legend_title[s] = name\r\n\r\n # Prepare the legend\r\n legend[s] = []\r\n if has_legend:\r\n legend_values = []\r\n if categorical[s]:\r\n if legend_labels[s]:\r\n legend_values = legend_labels[s]\r\n else:\r\n legend_values = [(i, str(i)) for i in sorted(set(data_c[s]))]\r\n else:\r\n if legend_labels[s]:\r\n legend_labels[s].reverse()\r\n for value, label in legend_labels[s]:\r\n legend_values.append(\r\n [(value - min_c[s]) / (max_c[s] - min_c[s]), label]\r\n )\r\n else:\r\n is_range[s] = True\r\n for i, val in enumerate(np.linspace(1.0, 0.0, 99)):\r\n legend_values.append(\r\n [val, str(data_c[s][int(math.floor(len_c / 100 * i))])]\r\n )\r\n\r\n cmap = None\r\n if isinstance(colormap[s], str):\r\n cmap = plt.cm.get_cmap(colormap[s])\r\n else:\r\n cmap = colormap[s]\r\n\r\n for value, label in legend_values:\r\n legend[s].append([list(cmap(value)), label])\r\n\r\n # Normalize the data to later get the correct colour maps\r\n if not categorical[s]:\r\n data_c[s] = np.array(data_c[s])\r\n data_c[s] = (data_c[s] - min_c[s]) / (max_c[s] - min_c[s])\r\n\r\n if mapping[\"cs\"] in data and len(data_cs) > s:\r\n data_cs[s] = np.array(data_cs[s])\r\n min_cs = min(data_cs[s])\r\n max_cs = max(data_cs[s])\r\n # Avoid zero saturation by limiting the lower bound to 0.1\r\n\r\n data_cs[s] = 1.0 - np.maximum(\r\n saturation_limit[s],\r\n np.array((data_cs[s] - min_cs) / (max_cs - min_cs)),\r\n )\r\n\r\n # Format numbers if parameters are indeed numbers\r\n if isinstance(min_legend_label[s], (int, float)):\r\n min_legend_label[s] = self.legend_number_format.format(\r\n min_legend_label[s]\r\n )\r\n\r\n if isinstance(max_legend_label[s], (int, float)):\r\n max_legend_label[s] = self.legend_number_format.format(\r\n max_legend_label[s]\r\n )\r\n\r\n data[mapping[\"c\"]] = data_c\r\n if data_cs:\r\n data[mapping[\"cs\"]] = data_cs\r\n\r\n if data_s:\r\n data[mapping[\"s\"]] = data_s\r\n\r\n self.scatters[name] = {\r\n \"name\": name,\r\n \"shader\": shader,\r\n \"point_scale\": point_scale,\r\n \"max_point_size\": max_point_size,\r\n \"fog_intensity\": fog_intensity,\r\n \"interactive\": interactive,\r\n \"categorical\": categorical,\r\n \"mapping\": mapping,\r\n \"colormap\": colormap,\r\n \"has_legend\": has_legend,\r\n \"legend_title\": legend_title,\r\n \"legend\": legend,\r\n \"is_range\": is_range,\r\n \"min_c\": min_c,\r\n \"max_c\": max_c,\r\n \"min_legend_label\": min_legend_label,\r\n \"max_legend_label\": max_legend_label,\r\n \"series_title\": series_title,\r\n \"ondblclick\": ondblclick,\r\n \"selected_labels\": selected_labels,\r\n \"label_index\": label_index,\r\n \"title_index\": title_index,\r\n }\r\n\r\n self.scatters_data[name] = data",
"def generate_scatter(data, x, y, data_format=\"wide\", selected_points=[], **kwargs):\n if data_format == \"wide\":\n fig = go.Figure()\n\n # Plot only if 2 variables are chosen, show an empty plot otherwise\n if len(y) > 0 and len(x) > 0:\n \n # This is done to provide a fillvalue for the zip_longest funtion\n if len(x) > len(y):\n previous = y[0]\n else:\n previous = x[0]\n\n\n \"\"\"colors = data[\"value\"][data[\"variable\"] == \"SARS-Cov-2 exam result\"].to_numpy()\n colors = np.where(colors==\"positive\", \"red\", colors)\n colors = np.where(colors==\"negative\", \"blue\", colors)\"\"\"\n\n # Loop through the pairs of attributes and add traces to the graph\n # zip_longest makes sure the number of pairs correspond to the lenght of the lognest of two argumens\n # The shorter argument is paired with the previous argument\n for attribute_x, attribute_y in zip_longest(x, y, fillvalue=previous):\n fig.add_trace(go.Scatter(\n x=data[\"value\"][data[\"variable\"] == attribute_x],\n y=data[\"value\"][data[\"variable\"] == attribute_y],\n name=attribute_x + \"-\" + attribute_y,\n mode='markers',\n )\n )\n fig.update_layout(legend=dict(\n orientation=\"h\",\n yanchor=\"bottom\",\n y=1.02,\n xanchor=\"right\",\n x=1\n ))\n return fig\n\n elif data_format == \"long\":\n fig = px.scatter(data, x=x, y=y, **kwargs)\n # Highlight the selected point with yellow\n if selected_points:\n fig.update_traces(selectedpoints=selected_points, selected={'marker': { 'color': 'yellow' }})\n return fig",
"def plot_data(self):",
"def scatter_plot(self):\n\n X = self.reduce_dimension(n_components=2)\n\n plt.figure()\n plt.scatter(X[:,0], X[:,1])\n\n return plt",
"def scatter_template(df, met, xaxy, cato):\n scatter_args = {'plot_height' : 625,\n 'plot_width' : 950,\n 'tools' : ['pan, wheel_zoom', 'reset', 'save'],\n 'active_scroll' : 'wheel_zoom'}\n \n tooltips = [(met, '@' + met)]\n \n if 'SerialNumber' in df.columns:\n tooltips.insert(0, ('Serial Number', '@SerialNumber'))\n \n #Looks at the last entry in the xaxis column to see if it is a date\n #If it is a date then create a date column as a string to display in \n #the tooltip\n if isinstance(df[xaxy].iloc[-1], pd.tslib.Timestamp):\n filt = lambda x:str(x)[:-9]\n df['Date_str'] = df[xaxy].apply(filt)\n scatter_args['x_axis_type'] = 'datetime'\n tooltips.append((xaxy, '@Date_str'))\n else:\n tooltips.append((xaxy, '@' + xaxy))\n\n #Title and tooltips depending on if there are categories\n if cato:\n tooltips.insert(2, (cato, '@' + cato))\n scatter_args['title'] = \"{0} by {1}\".format(met, cato) \n else:\n scatter_args['title'] = \"{0}\".format(met)\n\n hover = HoverTool(tooltips = tooltips)\n scatter_args['tools'].append(hover)\n \n p = figure(**scatter_args)\n p.xaxis.axis_label = xaxy\n p.yaxis.axis_label = met \n \n return p",
"def _generate_scatter_plots(self):\n\n for xcolname, ycolname in itertools.product(self.xaxes, self.yaxes):\n _LOG.info(\"Generating scatter plot: %s vs %s.\", xcolname, ycolname)\n\n pinfo = self._add_pinfo(xcolname, ycolname, is_hist=False)\n markers = itertools.cycle(_SCATTERPLOT_MARKERS)\n gobjs = []\n\n for res in self.rsts:\n df = self._reduce_df_density(res, xcolname, ycolname)\n\n # How many datapoints were included into the scatter plot.\n pinfo[\"sp_datapoints_cnt\"] = len(df.index)\n\n text = self._create_hover_text(res, df, pinfo)\n marker = {\"size\" : 4, \"symbol\" : next(markers), \"opacity\" : self._opacity}\n try:\n gobj = plotly.graph_objs.Scattergl(x=self._base_unit(df, xcolname),\n y=self._base_unit(df, ycolname),\n opacity=self._opacity,\n text=text, mode=\"markers\",\n name=res.reportid, marker=marker)\n except Exception as err:\n raise Error(f\"failed to create scatter plot '{ycolname}-vs-{xcolname}':\\n{err}\")\n gobjs.append(gobj)\n\n self._create_diagram(gobjs, pinfo)",
"def fun_get(self):\n\n scat = self.xtl.Scatter\n scat._scattering_type = self.type.get()\n scat._energy_kev = self.energy_kev.get()\n scat._scattering_theta_offset = self.theta_offset.get()\n scat._scattering_min_theta = self.theta_min.get()\n scat._scattering_max_theta = self.theta_max.get()\n scat._scattering_min_twotheta = self.twotheta_min.get()\n scat._scattering_max_twotheta = self.twotheta_max.get()\n scat._powder_units = self.powder_units.get()\n\n if self.orientation.get() == 'Reflection':\n scat._scattering_specular_direction[0] = self.direction_h.get()\n scat._scattering_specular_direction[1] = self.direction_k.get()\n scat._scattering_specular_direction[2] = self.direction_l.get()\n elif self.orientation.get() == 'Transmission':\n scat._scattering_parallel_direction[0] = self.direction_h.get()\n scat._scattering_parallel_direction[1] = self.direction_k.get()\n scat._scattering_parallel_direction[2] = self.direction_l.get()",
"def setup_plot(self):\n self.scat = self.ax.scatter(self.centers[0,1], self.centers[0,2], c=self.colorList[0], s=self.radii[0,1])\n # For FuncAnimation's sake, we need to return the artist we'll be using\n # Note that it expects a sequence of artists, thus the trailing comma.\n return self.scat,",
"def setup_plot(self):\n self.scat = self.ax.scatter(self.centers[0,1], self.centers[0,2], c=self.colorList[0], s=self.radii[0,1])\n # For FuncAnimation's sake, we need to return the artist we'll be using\n # Note that it expects a sequence of artists, thus the trailing comma.\n return self.scat,",
"def bokeh_multi_scatter(df, x_col, y_cols, y_label, title, to_svg=False):\n\n source = ColumnDataSource(df)\n TOOLS = \"hover,save,pan,box_zoom,reset,wheel_zoom\"\n\n fig = figure(title=title, plot_width=500, plot_height=500, tools=TOOLS, toolbar_location='below')\n\n #adding all elements to the figure plot\n fig.grid.grid_line_color = None\n fig.background_fill_color = None\n fig.xaxis.axis_label = str(x_col)\n fig.yaxis.axis_label = str(y_label)\n for col in y_cols:\n fig.scatter(x=x_col,\n y=col,\n marker='circle', size=10,\n source=source,\n )\n\n if to_svg:\n fig.output_backend = \"svg\"\n\n return fig",
"def show_scatter(self):\n plt.scatter(self.a1[:, 0], self.a1[:, 1], c=\"red\", alpha=0.5, s=10)\n plt.scatter(self.a2[:, 0], self.a2[:, 1], c=\"blue\", alpha=0.5, s=10)\n plt.scatter(0, 0, marker=\"D\", c=\"black\", alpha=0.8)\n plt.scatter(2, 2, marker=\"D\", c=\"black\", alpha=0.8)\n plt.show()",
"def _create_scatter_source(self, scatter_data, x, y):\n source = ColumnDataSource(scatter_data)\n # additional 2 columns for x and y in plots\n source.data.update(\n {\n self._scatter_x_axis: source.data[x],\n self._scatter_y_axis: source.data[y]\n }\n )\n return source",
"def main_scatterplot(self) -> Component:\n logger.debug('Generating main scatterplot.')\n\n x_dropdown = dcc.Dropdown('overview_main_scatterplot_x_dropdown', options=[\n {'label': 'Distance', 'value': 'distance'},\n {'label': 'Average speed', 'value': 'mean_speed'},\n {'label': 'Average heart rate', 'value': 'mean_hr'},\n {'label': 'Duration', 'value': 'duration'}\n ], value='distance')\n y_dropdown = dcc.Dropdown('overview_main_scatterplot_y_dropdown', options=[\n {'label': 'Distance', 'value': 'distance'},\n {'label': 'Average speed', 'value': 'mean_speed'},\n {'label': 'Average heart rate', 'value': 'mean_hr'},\n {'label': 'Duration', 'value': 'duration'}\n ], value='mean_speed')\n\n button = dbc.Button('View selected activities', id='overview_main_scatterplot_button')\n link = html.A(button, href='/activities', id='overview_main_scatterplot_link', target='_blank')\n\n config_row = dbc.Row([\n dbc.Col(html.Div(['x axis:', x_dropdown])),\n dbc.Col(html.Div(['y axis:', y_dropdown])),\n dbc.Col(link, width='auto')\n ], justify='center')\n\n graph = dcc.Graph(\n id='overview_main_scatterplot',\n figure=self.main_scatter_fig('distance', 'mean_speed')\n )\n\n return html.Div([\n html.H2('Scatter plot'), config_row, graph\n ])",
"def check(self):\n self.initial_scatter = ax.scatter(self.skel_points[:, 0],\n self.skel_points[:, 1],\n self.skel_points[:, 2], s=10, c='r')\n self.cell_points = self.get_cell_xyz()\n ax.scatter(self.cell_points[::50, 0],\n self.cell_points[::50, 1],\n self.cell_points[::50, 2], s=3, c='b', alpha=.1)\n ax.set_xlabel('X (um)')\n ax.set_ylabel('Y (um)')\n ax.set_zlabel('Z (um)')",
"def scatter(xarr, yarr, xlbl=None, ylbl=None, pw=600, ph=400):\n p = figure(plot_width=pw, plot_height=ph)\n # Model\n p.circle(xarr, yarr, color='black')#, legend='data')\n # Label\n if xlbl is not None:\n p.xaxis.axis_label = xlbl\n if ylbl is not None:\n p.yaxis.axis_label = ylbl\n # Show\n show(p)",
"def plot(self):\n\t\tif (2 <= len(self.X) <= 3):\n\t\t\tvDataFrame(self.name, self.cursor).scatter(columns = self.X, catcol = \"dbscan_cluster\", max_cardinality = 100, max_nb_points = 10000)\n\t\telse:\n\t\t\traise ValueError(\"Clustering Plots are only available in 2D or 3D\")",
"def partial_visualize_in_2d(self, cluster_index=[5,12,35,44,64,75,81]):\n for i in cluster_index:\n list_x = []\n list_y = []\n for j in self.cluster[i]:\n list_x.append(self.code[0][j,0])\n list_y.append(self.code[0][j,1])\n plt.scatter(list_x,list_y, label=self.skill[self.present_skill[i]])\n plt.legend()\n plt.show()\n return"
]
| [
"0.70726407",
"0.67823887",
"0.65958464",
"0.6529031",
"0.6512134",
"0.6497885",
"0.6460971",
"0.64456946",
"0.6424747",
"0.6404196",
"0.64007133",
"0.63893604",
"0.6388845",
"0.6370495",
"0.6369845",
"0.63139355",
"0.6273838",
"0.6234437",
"0.622965",
"0.62135124",
"0.6179047",
"0.6179047",
"0.61642826",
"0.61423284",
"0.61402464",
"0.61379755",
"0.6098792",
"0.609674",
"0.6086206",
"0.60451007"
]
| 0.76700217 | 0 |
Sets the cluster_node_id of this StateSyncNode. | def cluster_node_id(self, cluster_node_id):
self._cluster_node_id = cluster_node_id | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def cluster_id(self, cluster_id):\n self._cluster_id = cluster_id",
"def set_cluster(self, cluster_id=None):\n cluster = objects.Cluster.get_by_uid(\n cluster_id, fail_if_not_found=False\n )\n if cluster:\n self._cluster = cluster\n self._set_task(self.EXPECTED, None)\n self._set_task(\n self.CURRENT,\n objects.TransactionCollection.get_last_succeed_run(cluster)\n )\n return True\n return False",
"def cluster_uuid(self, cluster_uuid):\n\n self._cluster_uuid = cluster_uuid",
"def cluster_num(self, cluster_num):\n\n self._cluster_num = cluster_num",
"def set_node_id(self, node_id):\n self._node_id = node_id",
"def set_chassis_cluster_enable(self, cluster_id, node_id):\n return self.dev.rpc.set_chassis_cluster_enable(\n cluster_id=cluster_id, node=node_id,\n reboot=True, normalize=True)",
"def node_id(self, node_id):\n\n self._node_id = node_id",
"def node_id(self, node_id: int):\r\n self._node_id = node_id",
"def cluster(self, cluster):\n\n self._cluster = cluster",
"def cluster(self, cluster):\n\n self._cluster = cluster",
"def cluster(self, cluster):\n\n self._cluster = cluster",
"def cluster(self, cluster):\n\n self._cluster = cluster",
"def cluster(self, cluster):\n\n self._cluster = cluster",
"def cluster(self, cluster):\n\n self._cluster = cluster",
"def cluster(self, cluster):\n if self.local_vars_configuration.client_side_validation and cluster is None: # noqa: E501\n raise ValueError(\"Invalid value for `cluster`, must not be `None`\") # noqa: E501\n\n self._cluster = cluster",
"def cluster_name(self, cluster_name):\n\n self._cluster_name = cluster_name",
"def cluster_num_in(self, cluster_num_in):\n\n self._cluster_num_in = cluster_num_in",
"def cluster_type(self, cluster_type):\n\n self._cluster_type = cluster_type",
"def cluster_id(self):\n return self._cluster_id",
"def storage_cluster_id(self, storage_cluster_id):\n if self.local_vars_configuration.client_side_validation and storage_cluster_id is None: # noqa: E501\n raise ValueError(\"Invalid value for `storage_cluster_id`, must not be `None`\") # noqa: E501\n\n self._storage_cluster_id = storage_cluster_id",
"def cluster_num_lte(self, cluster_num_lte):\n\n self._cluster_num_lte = cluster_num_lte",
"def node_id(self, node_id):\n if node_id is None:\n raise ValueError(\"Invalid value for `node_id`, must not be `None`\") # noqa: E501\n\n self._node_id = node_id",
"def ion_node_id(self, ion_node_id):\n\n self._ion_node_id = ion_node_id",
"def __init_cluster(self, cluster):\n self.___init_nodes(cluster)\n self.__clusterop.async_rebalance(\n cluster.get_nodes(),\n cluster.get_nodes()[1:],\n []).result()",
"def cluster_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"cluster_id\")",
"def cluster_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"cluster_id\")",
"def cluster_myid(self, target_node: \"TargetNodesT\") -> ResponseT:\n return self.execute_command(\"CLUSTER MYID\", target_nodes=target_node)",
"def set_node(self, node_id):\n info = self._get_info(self.EXPECTED)\n if node_id in info:\n self._node_id = node_id\n return True\n return False",
"def cluster_num_gt(self, cluster_num_gt):\n\n self._cluster_num_gt = cluster_num_gt",
"def cluster_id(self) -> str:\n return pulumi.get(self, \"cluster_id\")"
]
| [
"0.76118255",
"0.68488646",
"0.6685833",
"0.6680353",
"0.665979",
"0.64597934",
"0.64168763",
"0.63185054",
"0.62717867",
"0.62717867",
"0.62717867",
"0.62717867",
"0.62717867",
"0.62717867",
"0.6237502",
"0.61332864",
"0.5897969",
"0.5849075",
"0.58098924",
"0.5744126",
"0.5724699",
"0.5715012",
"0.56845224",
"0.5587974",
"0.5582991",
"0.5582991",
"0.5573245",
"0.5567356",
"0.5537323",
"0.55212367"
]
| 0.8481871 | 0 |
Sets the api_listen_ip of this StateSyncNode. | def api_listen_ip(self, api_listen_ip):
self._api_listen_ip = api_listen_ip | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"async def async_set_slave_ip(self, slave_ip):\n self._slave_ip = slave_ip",
"def set_ip(self, ip: str, host_addr: str) -> None:\n self.config[\"linkIp\"] = ip\n self.config[\"ngapIp\"] = ip\n self.config[\"gtpIp\"] = ip",
"def api(self, api):\n if self._running:\n raise ValueError('API cannot be modified while the server is running')\n\n self._api = api",
"def setServerip(self):\n\t\tself.serverip = self.settings.getKeyValue('serverip')\n\t\tself.socket.send('setenv serverip ' + self.serverip+'\\r', 1)\n\t\treturn None",
"def ip(self, ip: str):\n\n self._ip = ip",
"def setIpaddr(self):\n\t\tself.ipaddr = self.settings.getKeyValue('ipaddr')\n\t\tself.socket.send('setenv ipaddr ' + self.ipaddr+'\\r', 1)\t\t\n\t\treturn None",
"def ip(self, ip):\n\n self._ip = ip",
"def ip(self, ip):\n\n self._ip = ip",
"def setIPAddress(self,listenIP,listenPort,broadcastIP,broadcastPort):\n # Listener\n if self.listener.close.isSet():\n if listenIP:\n self.listenerIP = listenIP\n else:\n self.listenerIP = _RobotCommunicator.LOCAL_IP\n if listenPort:\n self.listenerPort = listenPort\n else:\n self.listenerPort = _RobotCommunicator.DEFAULT_LISTEN_PORT\n self.listener = _RobotListener(self.listenerIP,self.listenerPort,\n self.buffer)\n\n # Broadcaster\n if broadcastIP:\n self.broadcasterIP = broadcastIP\n else:\n self.broadcasterIP = _RobotCommunicator.NETWORK_BROADCAST_IP\n if broadcastPort:\n self.broadcasterPort = broadcastPort\n else:\n self.broadcasterPort = _RobotCommunicator.DEFAULT_BROADCAST_PORT\n self.broadcaster = RobotBroadcaster(self.broadcasterIP,\n self.broadcasterPort,self.buffer)",
"def ip(self, ip):\n self._ip = ip\n return self",
"def set_ip(self, party_ip) -> None:\n\n self._ip = party_ip",
"def __init__(self, cluster_node_id=None, api_listen_ip=None): # noqa: E501 # noqa: E501\n\n self._cluster_node_id = None\n self._api_listen_ip = None\n self.discriminator = None\n\n if cluster_node_id is not None:\n self.cluster_node_id = cluster_node_id\n if api_listen_ip is not None:\n self.api_listen_ip = api_listen_ip",
"def add_http_server_listen(self, value):\n path = [u\"http\", u\"server\", u\"listen\"]\n self.add_config_item(self._nodeconfig, value, path)",
"def setIP(self, idx, ip):\n self.ip[int(idx)-1] = ip",
"def virtual_router_ip(self, virtual_router_ip):\n self._virtual_router_ip = virtual_router_ip",
"def remoteip(self, remoteip) :\n\t\ttry :\n\t\t\tself._remoteip = remoteip\n\t\texcept Exception as e:\n\t\t\traise e",
"def api_access(self, api_access):\n\n self._api_access = api_access",
"def listening(self, listening):\n\n self._listening = listening",
"def setIP( self, intf, ip, prefixLen=8 ):\n ipSub = '%s/%d' % ( ip, prefixLen )\n result = self.cmd( 'ifconfig', intf, ipSub, 'up' )\n self.ips[ intf ] = ip\n return result",
"def set_ip_adresses(self):\n # unfold a config tree for the current suffix, if any\n for interface, details in self.interfaces.items():\n for k, v in details.items():\n if k == 'address':\n ip, prefix = address_to_ip_prefix(v)\n self.interfaces[interface]['ip_address'] = ip\n self.interfaces[interface]['ip_prefix'] = prefix\n break\n if interface == 'wan':\n self.ip_address = ip\n if interface == 'ha_sync':\n self.ha_sync_ip_address = ip",
"async def put_listen_key(self, listen_key):\n params = {\n \"listenKey\": listen_key\n }\n success, error = await self.request(\"PUT\", \"/api/v1/userDataStream\", params=params)\n return success, error",
"def add_ip(self, inf, ip):\n self.interfaces[inf]['ip'] = ip",
"def SetAPIKey(self, api_key):\n self._api_key = api_key",
"def SetAPIKey(self, api_key):\n self._api_key = api_key",
"def ip_address(self, ip_address):\n\n self._ip_address = ip_address",
"def ip_address(self, ip_address):\n\n self._ip_address = ip_address",
"def ip_address(self, ip_address):\n\n self._ip_address = ip_address",
"async def put_listen_key(self, listen_key):\n uri = \"/fapi/v1/listenKey\"\n params = {\n \"listenKey\": listen_key,\n \"timestamp\": tools.get_cur_timestamp_ms()\n }\n success, error = await self.request(\"PUT\", uri, params=params, auth=True)\n return success, error",
"def fusion_api_set_default_api_version(self, api=None):\n return self.version.set(api=api)",
"def api_token(self, api_token):\n\n self._api_token = api_token"
]
| [
"0.5769698",
"0.55941546",
"0.55758727",
"0.5282456",
"0.52659065",
"0.5219188",
"0.5210068",
"0.5210068",
"0.5200352",
"0.49966982",
"0.49550515",
"0.49326894",
"0.492381",
"0.48899937",
"0.48827827",
"0.48702243",
"0.4863016",
"0.48214495",
"0.4695518",
"0.46883398",
"0.46840397",
"0.46835428",
"0.46542892",
"0.46542892",
"0.464966",
"0.464966",
"0.464966",
"0.46289027",
"0.46111065",
"0.45920956"
]
| 0.8451454 | 0 |
Suggest a tweak based on an encoding result. For fixed QP, suggest increasing minq when bitrate is too high, otherwise suggest decreasing it. If a parameter is already at the limit, go to the next one. | def SuggestTweak(self, encoding):
if not encoding.result:
return None
parameters = self._SuggestTweakToName(encoding, 'fixed-q')
if not parameters:
parameters = self._SuggestTweakToName(encoding, 'gold-q')
if not parameters:
parameters = self._SuggestTweakToName(encoding, 'key-q')
if not parameters:
return None
parameters = self.ConfigurationFixups(parameters)
return encoder.Encoding(encoder.Encoder(encoding.context, parameters),
encoding.bitrate, encoding.videofile) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _SuggestTweakToName(self, encoding, name):\n parameters = encoding.encoder.parameters\n value = int(parameters.GetValue(name))\n new_value = None\n if encoding.result['bitrate'] > encoding.bitrate:\n delta = 1\n new_value = 63\n candidates = range(value + 1, 64)\n else:\n delta = -1\n new_value = 0\n candidates = range(value - 1, -1, -1)\n # The range of Q values is from 0 to 63.\n if value + delta > 63:\n print name, 'maxed out at 63'\n return None # Already maxed out\n if value + delta < 0:\n print name, 'mined out at 0'\n return None # Already at bottom\n # If a previous result returned a score (which will be lower, since\n # the starting point is the highest score), try the middle value\n # between this and that. If none exists, go for the extreme values.\n for search_value in candidates:\n temp_params = parameters.ChangeValue(name, str(search_value))\n temp_params = self.ConfigurationFixups(temp_params)\n temp_encoder = encoder.Encoder(encoding.context, temp_params)\n temp_encoding = encoder.Encoding(temp_encoder, encoding.bitrate,\n encoding.videofile)\n temp_encoding.Recover()\n if temp_encoding.Result():\n print name, 'found scored value', search_value\n new_value = int((value + search_value) / 2)\n if new_value in (value, search_value):\n print name, 'already tried', value, '+1'\n return None # Already tried one-step-up\n break\n\n print name, \"suggesting value\", new_value\n parameters = parameters.ChangeValue(name, str(new_value))\n parameters = self.ConfigurationFixups(parameters)\n return parameters",
"def rechargeHint(self):\n if self.hints < 8:\n self.hints = self.hints + 1",
"def get_multiplier(quality):\n\n if quality == \"low\":\n return 5\n elif quality == \"medium\":\n return 6\n elif quality == \"good\":\n return 7\n elif quality == \"high\":\n return 8\n return 6",
"def increment_quality(self, increment_unit):\n if self.quality > self.min_quality and self.quality < self.max_quality:\n self.quality = self.quality + increment_unit\n return self.quality",
"def _tune(acc_rate, proposed, step):\n if step.tune_scaling:\n # a and b after Muto & Beck 2008.\n a = 1 / 9\n b = 8 / 9\n step.scaling = (a + b * acc_rate) ** 2\n if step.tune_steps:\n acc_rate = max(1.0 / proposed, acc_rate)\n step.n_steps = min(step.max_steps, 1 + int(np.log(step.p_acc_rate) / np.log(1 - acc_rate)))",
"def _choose_best_option(self):",
"def enforce_quality_limits(self):\n if self.orig_quality <= 50:\n if self.quality >= 50:\n self.quality = 50",
"def mostlikelycodeword(self):\n\n # Add your code here\n stoping_int = None # Replace\n best, first_index, last_index = None, None, None # Replace all three with an integer\n for i in range(stoping_int):\n for j in range(None, stoping_int): # Replace None. \n current = self.quality(None, None)\n if None > None # Replace both Nones\n best, first_index, last_index = current, i, j\n return self.preamble[None:None]",
"def update_quality():\n global items, converted_items\n if not converted_items:\n items = convert_items(items)\n converted_items = True\n for item in items:\n item.update_q()",
"def useHint(self):\n self.hints = self.hints - 1",
"def fit_bpm_in_window(bpm_suggestion):\n \n if bpm_suggestion is not None:\n while bpm_suggestion < (BPM_WINDOW_MIN):\n bpm_suggestion = bpm_suggestion * 2\n while bpm_suggestion > (BPM_WINDOW_MAX):\n bpm_suggestion = bpm_suggestion / 2\n return bpm_suggestion",
"def do_reduction_placzek_corrections(q,sqfg,bgd,rescale_bgd=1.0,plaz_type=None,\n gauss_damp=False,gw=20.0,qmax=None,qmin=None,\n rmin=0.0,rmax=20.0,delr=.02\n ,qminpla=10.0,qmaxpla=30.0,ndeg=2, return_correction = False,\n skip_bgd = False, return_final_sq = False, force_qmax_type='Off'):\n #first, make netsq if bgd and/or damping is present\n q = np.array(q)\n sqfg = np.array(sqfg)\n bgd = np.array(bgd)\n\n if skip_bgd:\n netsq = sqfg\n else:\n netsq = sqfg - bgd*rescale_bgd\n\n\n if gauss_damp:\n netsq = netsq*gauss(q,gw,0)\n\n\n if force_qmax_type == 'Force Data (PreCorrection)':\n qcut, sqcut = cut_data(q,netsq,qmax-.5,qmax)\n mean_sqmax = np.mean(sqcut)\n netsq -= mean_sqmax\n\n #now, apply a correction if requested\n if plaz_type != None:\n if plaz_type == 'Polynomial' or plaz_type == 'poly' or plaz_type == 'ndeg':\n sq_poly_fit = fit_ndeg_to_sq(q,netsq,ndeg=ndeg,qmin=qminpla,qmax=qmaxpla)\n this_fit = sq_poly_fit\n elif plaz_type == 'Pseudo-Voight' or plaz_type == 'pv' or plaz_type == 'hydro':\n pv_fit = fit_pv_to_sq(q,netsq,qmin=qminpla,qmax=qmaxpla)\n this_fit = pv_fit\n elif plaz_type == 'PVoight + n0' or plaz_type == 'pvndeg0':\n pv_n0_fit = fit_pv_n0_to_sq(q,netsq,qmin=qminpla,qmax=qmaxpla)\n this_fit = pv_n0_fit\n elif plaz_type == 'PVoight + n1' or plaz_type == 'pvndeg1':\n pv_n1_fit = fit_pv_n1_to_sq(q,netsq,qmin=qminpla,qmax=qmaxpla)\n this_fit = pv_n1_fit\n elif plaz_type == 'PVoight + n2' or plaz_type == 'pvndeg2':\n pv_n2_fit = fit_pv_n2_to_sq(q,netsq,qmin=qminpla,qmax=qmaxpla)\n this_fit = pv_n2_fit\n else:\n print (\"I don't know that correction type, sorry\")\n this_fit = np.zeros(len(q))\n else:\n this_fit = np.zeros(len(q))\n\n if force_qmax_type == 'Force Data' or force_qmax_type == 'Force Both (Independent)':\n qcut, sqcut = cut_data(q,netsq,qmax-.5,qmax)\n mean_sqmax = np.mean(sqcut)\n netsq -= mean_sqmax\n if force_qmax_type == 'Force Correction' or force_qmax_type == 'Force Both (Independent)':\n qcut, sqcut = cut_data(q,this_fit,qmax-.5,qmax)\n mean_sqmax = np.mean(sqcut)\n this_fit -= mean_sqmax\n if force_qmax_type == 'ReCorrection':\n qcut, sqcut = cut_data(q,netsq-this_fit,qmax-.5,qmax)\n mean_sqmax = np.mean(sqcut)\n this_fit += mean_sqmax\n\n netsq = netsq - this_fit\n\n if return_correction:\n return this_fit\n\n if return_final_sq:\n return netsq\n\n #finally, generate PDF\n r,gr = make_gr_from_sq(q,netsq,qmin=qmin,qmax=qmax,rmin=rmin,rmax=rmax,delr=delr)\n\n return r,gr",
"def fit(self):\n self._minuit_problem.migrad() # run optimizer\n self._status = 0 if self._minuit_problem.migrad_ok() else 1",
"def quasi_optimalityTV(f, lam_init = 2.0, q = 0.9):\n \n lam = lam_init\n max_iter = 50\n error = np.zeros(max_iter)\n #alt_error = np.zeros(max_iter)\n u_old = ChambollePock_denoise(f,lam, tau = 0.5, sig = 0.25, acc = True, tol = 1.0e-5)\n for i in range(1, max_iter):\n lam = lam_init * (q ** i)\n u_new = ChambollePock_denoise(f,lam, tau = 0.5, sig = 0.25, acc = True, tol = 1.0e-5)\n error[i] = np.linalg.norm(u_old - u_new)\n #alt_error[i] = np.linalg.norm(u_old - u_new) /abs(lam_init*(q ** i - q ** (i-1)))\n u_old = np.copy(u_new)\n\n #plt.plot(error)\n #plt.plot(alt_error)\n #plt.show()\n opt_idx = np.argmin(error[error != 0.0])\n t = 1.0 / (1.0 + lam_init * (q ** opt_idx))\n lam = lam_init * (q ** opt_idx)\n u= ChambollePock_denoise(f,lam, tau = 0.5, sig = 0.25, acc = True, tol = 1.0e-5)\n \n return u, t",
"def update (self):\n\t\tidx = self.idx\n\t\tC = self.C[idx]\t\t# choice\n\t\tPE = self.PE[idx]\t# choice PE\n\t\talpha = self.alpha\t# learning rate\n\n\t\t# don't need to update anything for UCB\n\t\tif self.UCB_samplemean:\n\t\t\treturn\n\n\t\tif not self.gamble:\n\t\t\t# carry over values for the unselected options\n\t\t\tself.Q[idx+1,:] = self.Q[idx,:]\n\t\t\t# check if two learning rates (pos/neg)\n\t\t\tif isinstance(alpha,float):\n\t\t\t\tself.Q[idx+1,C] = self.Q[idx,C] + alpha*PE\n\t\t\telse:\n\t\t\t\tif PE > 0:\n\t\t\t\t\tself.Q[idx+1,C] = self.Q[idx,C] + alpha[0]*PE\n\t\t\t\telse:\n\t\t\t\t\tself.Q[idx+1,C] = self.Q[idx,C] + alpha[1]*PE\n\n\t\telse:\n\t\t\t# check if two learning rates (pos/neg)\n\t\t\t# PE = 0 if gamble isn't chosen\n\t\t\tif isinstance(alpha,float):\n\t\t\t\tself.Q[idx+1] = self.Q[idx] + alpha*PE\n\t\t\telse:\n\t\t\t\tif PE > 0:\n\t\t\t\t\tself.Q[idx+1] = self.Q[idx] + alpha[0]*PE\n\t\t\t\telse:\n\t\t\t\t\tself.Q[idx+1] = self.Q[idx] + alpha[1]*PE",
"def rsrq_quality_rating(value, unit):\n\n if unit != \"dB\":\n raise ValueError(\"Unsupported unit '{:}'\".format(unit))\n\n rating = 0\n if value > -6:\n rating = 4\n elif -6 >= value > -9:\n rating = 3\n elif -9 >= value > -16:\n rating = 2\n elif value <= -16:\n rating = 1\n\n return rating",
"def setup(self, use_warm_start, settings={}):\n assert self.opt_type in QP_COST, \"OSQP cannot solve this type of problem\"\n self.use_warm_start = use_warm_start\n self._setup_input = settings\n if self.opt_type in CONSTRAINED_OPT:\n self._setup_input[\"u\"] = np.inf * np.ones(self.opt.nk + self.opt.na)\n self._reset_parameters()\n return self",
"def _update_status(self):\n if any([abs(v) > LIMITS[i] for i, v in enumerate(self.state)]):\n self.terminal = True\n elif abs(self.q[3]) < LIMITS[9]:\n self.terminal = True\n elif self.steps + 1 >= self.max_steps:\n self.terminal = True",
"def do_tune(self, cmd):\n self.params.set('tune', float(cmd) / 16.0, 'global')",
"def improve(update, close, guess = 1):\n\twhile not close(guess):\n\t\tguess = update(guess)\n\treturn guess",
"def tweak_q(self, q):\n self._q = q\n self.reset()",
"def get_small_hint():\n inp = option_text('Input \"small\" hint (leave blank for no hint)')\n add_to_collected('small hint', inp)\n OPTIONS['small-hint'] = inp\n return",
"def _sequential_minimal_optimization(self):\n qpmin, qpmin_tmp = 99999999, 99999999\n while qpmin != qpmin_tmp:\n qpmin = qpmin_tmp\n qpmin_tmp = 123",
"def compute_optimal_tuning(target,current):\n target = target.split(' ')\n current = current.split(' ')\n\n initial = [calculate_note_distance(current[i],target[i]) for i in range(min(len(current), len(target)))]\n total_modifications = reduce(lambda x,y: abs(x) + abs(y), initial)\n\n winner = []\n min_sum = total_modifications\n for i in initial:\n mods = abs(i)\n tmp = [abs(i - mods) if i > 0 else abs(i + mods) for i in initial]\n tsum = sum(tmp)\n if tsum < min_sum:\n min_sum = tsum\n winner = tmp\n #transpose the current tuning into the optimal one\n ret = [transpose(note=current[i],distance=winner[i]) for i in range(min(len(current), len(target)))]\n return \" \".join(ret)",
"def quality(self):\n return self.plays * self.number",
"def minimize( self, field = \"UFF\" ) :\n if self._verbose : sys.stdout.write( \"%s.minimize(%s)\\n\" % (self.__class__.__name__,field,) )\n\n if not str( field ).upper() in (\"UFF\",\"MMFF94\",\"MMFF94S\") :\n return\n\n rc = rdkit.Chem.AllChem.EmbedMolecule( self._mol )\n if rc < 0 :\n rc = rdkit.Chem.AllChem.EmbedMolecule( self._mol, useRandomCoords = True )\n try :\n\n# if at first it doesn't converge, try try try again\n#\n if str( field ).upper() == \"UFF\" :\n if rdkit.Chem.AllChem.UFFOptimizeMolecule( self._mol ) == 1 :\n rdkit.Chem.AllChem.UFFOptimizeMolecule( self._mol, maxIters = 1000 )\n\n if str( field ).upper() in (\"MMFF94\", \"MMFF94S\") :\n if rdkit.Chem.AllChem.MMFFOptimizeMolecule( self._mol, mmffVariant = field ) == 1 :\n rdkit.Chem.AllChem.MMFFOptimizeMolecule( self._mol, mmffVariant = field, maxIters = 1000 )\n\n except ValueError :\n pass",
"def ConfigurationFixups(self, config):\n fixed_q_value = config.GetValue('fixed-q')\n if int(config.GetValue('gold-q')) > int(fixed_q_value):\n config = config.ChangeValue('gold-q', fixed_q_value)\n if int(config.GetValue('key-q')) > int(fixed_q_value):\n config = config.ChangeValue('key-q', fixed_q_value)\n\n return config",
"def correct_barcode(query_seq, seq_possibilities):\r\n dists = [_edit_dist(query_seq, seq) for seq in seq_possibilities]\r\n min_dist = min(dists)\r\n number_mins = dists.count(min_dist)\r\n if number_mins > 1:\r\n return None, min_dist\r\n else:\r\n best_hit = seq_possibilities[dists.index(min_dist)]\r\n return best_hit, min_dist",
"def improve(update, close, guess=1, max_updates=100):\n k = 0\n while not close(guess) and k < max_updates:\n guess = update(guess)\n k = k + 1\n return guess",
"def fake_get_hint(_):\r\n return {'best_hint': 'This is the best hint.',\r\n 'rand_hint_1': 'A random hint',\r\n 'rand_hint_2': 'Another random hint',\r\n 'answer': '42.5'}"
]
| [
"0.78182817",
"0.5859518",
"0.5770075",
"0.5642969",
"0.5607727",
"0.5415742",
"0.52216774",
"0.5206427",
"0.51767814",
"0.5172263",
"0.5101419",
"0.505127",
"0.5038782",
"0.5033367",
"0.49953026",
"0.49904868",
"0.4973912",
"0.49698272",
"0.49614057",
"0.49476328",
"0.4937339",
"0.49241653",
"0.4914802",
"0.49095705",
"0.49068707",
"0.490252",
"0.48821616",
"0.48734447",
"0.48675323",
"0.4859961"
]
| 0.666621 | 1 |
Fetch the points for a given fish body | def _fish_body_points(cls, fish_num):
# y coordinate is from top of FISH_HEIGHT for a given fish, to bottom of FISH_HEIGHT for a given FISH
left_oval_side = FishTileView.SIZE_MULTIPLIER, \
(fish_num * cls.FISH_HEIGHT) + (cls.FISH_HEIGHT / FishTile.MAX_AMOUNT_FISH)
right_oval_side = cls.FISH_BODY_SIZE_CONSTANT, \
(fish_num + 1) * cls.FISH_HEIGHT - (cls.FISH_HEIGHT / FishTile.MAX_AMOUNT_FISH)
return [left_oval_side, right_oval_side] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def find_body_part(self, body_points, segmentation_image):\n print(\"start the measuring....\")\n body_parts = {}\n shoulders = self.find_shoulders_point(body_points, segmentation_image)\n abdomen = self.find_abdomen_point(body_points)\n chest = self.find_chest_point(body_points)\n knees = self.find_knee_point(body_points)\n arms = self.find_arms_point(body_points)\n elbows = self.find_elbow_point(body_points)\n ankles = self.find_ankle_point(body_points)\n head = self.find_head_point(body_points)\n\n if shoulders is not None:\n body_parts['shoulders'] = shoulders\n\n if abdomen is not None:\n body_parts['abdomen'] = abdomen\n\n if chest is not None:\n body_parts['chest'] = chest\n\n if knees is not None:\n body_parts['knees'] = knees\n\n if arms is not None:\n body_parts['arms'] = arms\n\n if elbows is not None:\n body_parts['elbows'] = elbows\n\n if ankles is not None:\n body_parts['ankles'] = ankles\n\n if head is not None:\n body_parts['head'] = head\n\n return body_parts",
"def query(self, points):\n return self.locate_points(points)",
"def get_points(self):\n\t\treturn self.points",
"def get_points(self):\r\n return self.points",
"def get_at_position(self, x=507, y=507, filter='F140W'):\n epsf = self.epsf[filter]\n \n rx = 1+(np.clip(x,1,1013)-0)/507.\n ry = 1+(np.clip(y,1,1013)-0)/507.\n \n # zero index\n rx -= 1\n ry -= 1 \n\n nx = np.clip(int(rx), 0, 2)\n ny = np.clip(int(ry), 0, 2)\n\n # print x, y, rx, ry, nx, ny\n\n fx = rx-nx\n fy = ry-ny\n\n psf_xy = (1-fx)*(1-fy)*epsf[:, :, nx+ny*3]\n psf_xy += fx*(1-fy)*epsf[:, :, (nx+1)+ny*3]\n psf_xy += (1-fx)*fy*epsf[:, :, nx+(ny+1)*3]\n psf_xy += fx*fy*epsf[:, :, (nx+1)+(ny+1)*3]\n self.eval_filter = filter\n \n return psf_xy",
"def get_points(self):\n\t\treturn self._points",
"def calc_points_harbor(self):\n points = 0\n if self.cnt_1 + self.cnt_2 + self.cnt_3 + self.cnt_4 + self.cnt_5 >= 2:\n hor = 0\n for i in range(4):\n j = 0\n while j < 5 and ord(self.b[i * 5 + j]) >= 54:\n j += 1\n if j < 4:\n start = j\n j += 1\n while j < 5 and ord(self.b[i * 5 + j]) < 54:\n j += 1\n length = j - start\n if length > hor:\n hor = length\n vptab_harbor = (0, 0, 3, 7, 12, 18)\n points += vptab_harbor[hor]\n ver = 0\n for j in range(5):\n i = 0\n while i < 4 and ord(self.b[i * 5 + j]) >= 54:\n i += 1\n if i < 3:\n start = i\n i += 1\n while i < 4 and ord(self.b[i * 5 + j]) < 54:\n i += 1\n length = i - start\n if length > ver:\n ver = length\n points += vptab_harbor[ver]\n if 'cust' in args.exp:\n if ver == 4 or hor == 5:\n points += 5\n points += 2 * self.cnt_2 + 3 * self.cnt_3\n return points",
"def extract_pts_feat(self, pts: Tensor) -> Tuple[Tensor]:\n x = self.pts_backbone(pts)\n if self.with_pts_neck:\n x = self.pts_neck(x)\n\n seed_points = x['fp_xyz'][-1]\n seed_features = x['fp_features'][-1]\n seed_indices = x['fp_indices'][-1]\n\n return (seed_points, seed_features, seed_indices)",
"def get_points(self):\n landmark_points = []\n image_points = None\n for elt in self.object_list:\n if elt.type.lower() in ['surfacemesh', 'polyline', 'pointcloud', 'landmark']:\n landmark_points.append(elt.get_points())\n elif elt.type.lower() == 'image':\n assert image_points is None, 'A deformable_multi_object cannot contain more than one image object.'\n image_points = elt.get_points()\n\n points = {}\n if len(landmark_points) > 0:\n points = {'landmark_points': np.concatenate(landmark_points)}\n if image_points is not None:\n points['image_points'] = image_points\n return points",
"def points(self, request, pk=None):\n shp = self.get_object()\n points = shp.multipointfeatures_set.all()\n '''\n pagination of the geojson to reduce loading time\n '''\n paginator = GeoJsonPagination()\n paginator.page_size = 100\n page = paginator.paginate_queryset(points, request)\n if page is not None:\n serializer = pointSerializer(page, many=True)\n return paginator.get_paginated_response(serializer.data)\n\n serializer = pointSerializer(data=points, many=True)\n serializer.is_valid()\n\n return Response(serializer.data)",
"def find(self, image, around=15):\n\t\tpoints = self.localizer.predict(image)\n\t\textracted_fish = []\n\t\tfor p, point in enumerate(points):\n\t\t\tgmm = GMM()\n\t\t\ttry:\n\t\t\t\t# use a GMM to find the fish positions\n\t\t\t\tgmm.fit(point)\n\t\t\t\tulp_x, ulp_y, w, h = convert_gmm_to_box(gmm)\n\t\t\t\tulp_x = int(ulp_x)-around if ulp_x > around else 0\n\t\t\t\tulp_y = int(ulp_y)-around if ulp_y > around else 0\n\t\t\t\tw, h = int(w), int(h)\n\t\t\t\tif w == 0 or h == 0:\n\t\t\t\t\tcontinue\n\n\t\t\t\t# extract the fish\n\t\t\t\textracted_fish.append(image[ulp_y: ulp_y+h+around, ulp_x: ulp_x+w+around, :])\n\t\t\texcept Exception as e:\n\t\t\t\t# the gmm could not be fit, possibly due to not enough points\n\t\t\t\t# @TODO: check if the points are the reason for skipping here\n\t\t\t\tprint str(e)\n\t\t\t#end try\n\t\t#endfor\n\n\t\treturn extracted_fish",
"def get_points(self) -> typing.Iterable[float]:\n raise NotImplementedError()",
"def get_body_kp(kp_name=\"Nose\", person_idx=0):\n\n try:\n kps = datum.poseKeypoints[person_idx]\n except:\n print(f\"get_body_kp: invalid person_idx '{person_idx}'\")\n return None\n\n try:\n x, y, conf = kps[body_kp_name_to_id[kp_name]]\n except:\n print(f\"get_body_kp: invalid kp_name '{kp_name}'\")\n return None\n\n if x or y:\n return int(x), int(y)\n else:\n return None",
"def _gather_points(self):\n # This is just a stub for now. We should really find the lines only\n # inside the screen range here.\n\n x = self.index.get_data()\n y = self.value.get_data()\n rad= min(self.width/2.0,self.height/2.0)\n sx = x*rad+ self.x + self.width/2.0\n sy = y*rad+ self.y + self.height/2.0\n\n points = transpose(array((sx,sy)))\n self._cached_data_pts = points\n self._cache_valid = True\n return",
"def get_hull_points(self, show_progress):\n if self.points and not self.hull_points:\n self.graham_scan(show_progress)\n print(\"Input: {} points\").format(len(self.points))\n print(\"Convex hull: {} points\").format(len(self.hull_points))\n return self.hull_points",
"def _get_halluc_points(_, halluc_pts):\n if len(halluc_pts) > 0:\n return halluc_pts\n else:\n return halluc_pts",
"def extract_footprint(polygons):\n heights = []\n prisms = []\n for poly in polygons:\n exterior, interior = extract_ring(poly)\n epoints = extract_points(exterior[0])\n #irings = []\n #for iring in inteior:\n # irings.append(gml_points(iring))\n height = 0\n for i in range(len(epoints)):\n height += epoints[i][2]\n\n heights.append(height/len(epoints))\n prisms.append(epoints)\n\n index = np.array(heights).argmin()\n list_points = [coordinates[0:2] for coordinates in prisms[index]]\n\n return list_points, heights[index]",
"def find_points(self):\n\n points = [\n (self.inner_radius, 0, \"straight\"),\n (self.inner_radius, self.height / 2, \"straight\"),\n (self.outer_radius, self.height / 2, \"straight\"),\n (self.outer_radius, self.arc_height / 2, \"circle\"),\n (self.mid_radius, 0, \"circle\"),\n (self.outer_radius, -self.arc_height / 2, \"straight\"),\n (self.outer_radius, -self.height / 2, \"straight\"),\n (self.inner_radius, -self.height / 2, \"straight\")\n ]\n\n self.points = points",
"def extract_points(fp: T.BinaryIO) -> T.Optional[T.List[geo.Point]]:\n\n points = None\n movie_timescale = None\n media_timescale = None\n elst_entries = None\n\n for h, s in parser.parse_path(fp, [b\"moov\", [b\"mvhd\", b\"trak\"]]):\n if h.type == b\"trak\":\n trak_start_offset = s.tell()\n\n descriptions = sample_parser.parse_descriptions_from_trak(\n s, maxsize=h.maxsize\n )\n camm_descriptions = [d for d in descriptions if d[\"format\"] == b\"camm\"]\n if camm_descriptions:\n s.seek(trak_start_offset, io.SEEK_SET)\n camm_samples = _extract_camm_samples(s, h.maxsize)\n\n points_with_nones = (\n _parse_point_from_sample(fp, sample)\n for sample in camm_samples\n if sample.description[\"format\"] == b\"camm\"\n )\n\n points = [p for p in points_with_nones if p is not None]\n if points:\n s.seek(trak_start_offset)\n elst_data = parser.parse_box_data_first(\n s, [b\"edts\", b\"elst\"], maxsize=h.maxsize\n )\n if elst_data is not None:\n elst_entries = cparser.EditBox.parse(elst_data)[\"entries\"]\n\n s.seek(trak_start_offset)\n mdhd_data = parser.parse_box_data_firstx(\n s, [b\"mdia\", b\"mdhd\"], maxsize=h.maxsize\n )\n mdhd = cparser.MediaHeaderBox.parse(mdhd_data)\n media_timescale = mdhd[\"timescale\"]\n else:\n assert h.type == b\"mvhd\"\n if not movie_timescale:\n mvhd = cparser.MovieHeaderBox.parse(s.read(h.maxsize))\n movie_timescale = mvhd[\"timescale\"]\n\n # exit when both found\n if movie_timescale is not None and points:\n break\n\n if points and movie_timescale and media_timescale and elst_entries:\n segments = [\n elst_entry_to_seconds(entry, movie_timescale, media_timescale)\n for entry in elst_entries\n ]\n points = list(filter_points_by_elst(points, segments))\n\n return points",
"def get_bodyparts(project_dir):\n print(f\"\\n\\n\\nLoading data\")\n df_paths = sorted(glob.glob(os.path.join(project_dir, '*.h5')))\n points_2d_df = utils.create_dlc_points_2d_file(df_paths)\n arr = points_2d_df[points_2d_df[\"frame\"]==0][[\"marker\"]][points_2d_df[\"camera\"]==0].values\n final_arr = arr.flatten().tolist()\n return(final_arr)",
"def GetFacePoints(self, p_int, int_tuple):\n ...",
"def get_points(self):\n return self._points",
"def get_points(self):\n return self._points",
"def extract_poses(self):\n if len(self.path) == 0:\n raise ValueError(\"Path Empty\")\n return\n \n path = np.array(self.readLeaderPath())\n # Path distance at target position\n target_dist = path[-1,3] - self.distance\n # Filter by distance\n far_pose_ids = np.where(path[:,3] < target_dist) #path[path[:,3] < target_dist]\n\n if len(far_pose_ids[0]) != 0:\n target_pose_id = far_pose_ids[0][-1]\n target_pose = path[target_pose_id,:3]\n\n # Delete Path history\n self.thread_lock.acquire()\n self.path = path[target_pose_id:,:].tolist()\n self.thread_lock.release()\n else:\n # No target\n raise ValueError(\"No target\")\n return\n\n current_pose = np.array(self.readPose())\n return current_pose, target_pose",
"def load_body_points(left_json, right_json):\n left_body = [2, 3, 4, 8, 9, 10]\n right_body = [5, 6, 7, 11, 12, 13]\n left_face = [14, 16]\n right_face = [15, 17]\n with open(left_json, 'r') as f:\n left_annotation = json.load(f)\n # find the largest confidence people\n peoples = left_annotation['people']\n confidence = 0\n left_points = np.array([])\n for people in peoples:\n keep = np.array(people['pose_keypoints_2d']).reshape((-1, 3))\n if np.mean(keep[:, 2]) > confidence:\n confidence = np.mean(keep[:, 2])\n left_points = np.copy(keep)\n # left_points = np.array(left_annotation['people'][0]['pose_keypoints_2d']).reshape((-1, 3))\n # body left right points accuracy take (left + right / 2)\n left_points[left_body, 2], left_points[right_body, 2] = [(left_points[left_body, 2] + left_points[right_body, 2])\n / 2 for i in [0, 1]]\n left_points[left_face, 2], left_points[right_face, 2] = [(left_points[left_face, 2] + left_points[right_face, 2])\n / 2 for i in [0, 1]]\n\n with open(right_json, 'r') as f:\n right_annotation = json.load(f)\n # find the largest confidence people\n peoples = right_annotation['people']\n confidence = 0\n right_points = np.array([])\n for people in peoples:\n keep = np.array(people['pose_keypoints_2d']).reshape((-1, 3))\n if np.mean(keep[:, 2]) >= confidence:\n confidence = np.mean(keep[:, 2])\n right_points = np.copy(keep)\n #right_points = np.array(right_annotation['people'][0]['pose_keypoints_2d']).reshape((-1, 3))\n # body left right points accuracy take (left + right / 2)\n right_points[left_body, 2], right_points[right_body, 2] = [(right_points[left_body, 2] + right_points[right_body, 2])\n / 2 for i in [0, 1]]\n right_points[left_face, 2], right_points[right_face, 2] = [(right_points[left_face, 2] + right_points[right_face, 2])\n / 2 for i in [0, 1]]\n\n index = np.argmax((left_points[:, 2] + right_points[:, 2]))\n # it is possible cause left right up site down\n if index in left_body:\n return (left_points[index]+left_points[index+3])/2, (right_points[index]+right_points[index+3])/2\n elif index in right_body:\n return (left_points[index]+left_points[index-3])/2, (right_points[index]+right_points[index-3])/2\n elif index in left_face:\n return (left_points[index]+left_points[index+1])/2, (right_points[index]+right_points[index+1])/2\n elif index in right_face:\n return (left_points[index]+left_points[index-1])/2, (right_points[index]+right_points[index-1])/2\n\n return left_points[index], right_points[index]",
"def get_city_points(city):\n for item in coordinate_list:\n if item[0] == city:\n return (item[1], item[2])",
"def point(self):\n bfsize = card(self.basefield)\n one = self.basefield.one\n t = self.basefield.zero\n if len(self) == 2 or (self.a1 == self.a2 == self.a3 == self.basefield.zero):\n while self.basefield.Legendre(t) != 1:\n s = self.basefield.createElement(bigrandom.randrange(bfsize))\n t = self.cubic(s)\n if not t:\n return [s, t]\n t = self.basefield.sqrt(t)\n r = bigrandom.randrange(2)\n if r:\n return [s, -t]\n return [s, t]\n elif self.ch != 2 and self.ch != 3:\n sform = self.simple()\n while sform.basefield.Legendre(t) != 1:\n s = sform.basefield.createElement(bigrandom.randrange(bfsize))\n t = (s**3+sform.a*s+sform.b)\n x = (s-3*self.b2) // (36*one)\n y = (sform.basefield.sqrt(t) // (108*one)-self.a1*x-self.a3)//(2*one)\n return [x, y]\n elif self.ch == 3:\n while sform.basefield.Legendre(t) != 1:\n s = self.basefield.createElement(bigrandom.randrange(bfsize))\n t = (s**3+self.a2*s**2+self.a4*s+self.a6)\n return [s, self.basefield.sqrt(t)]\n else:\n raise NotImplementedError(\"This is not implemented.\")",
"def extract_fixed_point_locations(fps):\n fixed_point_location = [fp['x'] for fp in fps]\n\n fixed_point_locations = np.vstack(fixed_point_location)\n\n return fixed_point_locations",
"def extract_pts_feat(self, pts, img_feats, img_metas):\n if not self.with_pts_bbox:\n return None\n voxels, num_points, coors = self.voxelize(pts)\n voxel_features = self.pts_voxel_encoder(voxels, num_points, coors,\n img_feats, img_metas)\n batch_size = coors[-1, 0] + 1\n x = self.pts_middle_encoder(voxel_features, coors, batch_size)\n x = self.pts_backbone(x)\n if self.with_pts_neck:\n x = self.pts_neck(x)\n return x",
"def calc_points_tower(self):\n points = 0\n cnt_tower = 0\n vptab_tower = (0, 1, 3, 6, 10, 15)\n for i in range(20):\n if self.b[i] == 'T':\n points += vptab_tower[self.f[i]]\n cnt_tower += 1\n if 'poli' in args.exp:\n points += max(self.f)\n if 'scho' in args.exp:\n points += cnt_tower\n return points"
]
| [
"0.6347192",
"0.57089156",
"0.5574567",
"0.5518729",
"0.54645497",
"0.5356999",
"0.5321854",
"0.5314515",
"0.53112376",
"0.530583",
"0.52986497",
"0.52113086",
"0.5188562",
"0.51864165",
"0.5180519",
"0.51597506",
"0.5130005",
"0.5108989",
"0.5103025",
"0.50794214",
"0.50657815",
"0.50625277",
"0.50625277",
"0.5049183",
"0.502823",
"0.4982111",
"0.4980286",
"0.4973284",
"0.4948575",
"0.49413213"
]
| 0.64592373 | 0 |
Extracts triples using Stanford CoreNLP OpenIE library via CoreNLPConnector in Java REST API | def openie(text: str) -> List[Tuple[str, str, str]]:
client = env.resolve('servers.java')
verbose.info('Extracting triples using OpenIE at: ' + client['address'], caller=openie)
return requests.get('%s/openie/triples' % client['address'], params={'text': text}).json() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def extract_triplets(self) -> Iterable[dict]:\n stg_corpus = [txt.strip()+\".\" if txt.strip()[-1]!=\".\" else txt.strip() for txt in self.__corpus__]\n stg_corpus = ' '.join(self.__corpus__)\n\n with StanfordOpenIE() as client:\n triples_corpus = client.annotate(stg_corpus)\n\n self.__triples_corpus__ = triples_corpus\n\n return triples_corpus",
"def corenlp(limit):\n # limit should be the int of sentences to extract from database or 'all' to extract all\n modeller = Modeller(limit=limit)\n modeller.get_wiki_data()\n strings,y_vals = zip(*modeller.data)\n print('Total strings:',len(y_vals))\n print('People:',sum(y_vals))\n print('Other entities:',len(y_vals) - sum(y_vals))\n\n correct_person = 0\n correct_non_person = 0\n false_positive = 0\n false_negative = 0\n\n print('\\nPutting strings through CoreNLP')\n start_time_corenlp = time.time()\n for counter,(string,y) in enumerate(modeller.data):\n # pass string to stanford ner\n ner = stanford_ner(string)\n # check if ner indicates there is a person in the string\n person = False\n for word in ner:\n if word['ner'] == 'PERSON':\n person = True\n break\n if person:\n if y:\n correct_person += 1\n else:\n false_positive += 1\n else:\n if y:\n false_negative += 1\n else:\n correct_non_person += 1\n print('Strings put through CoreNLP in:',round(time.time()-start_time_corenlp),'seconds')\n\n\n precision = correct_person/(correct_person+false_positive)\n recall = correct_person/(correct_person+false_negative)\n f1_score = 2*(precision*recall)/(precision+recall)\n print('Precision:',round(precision,2),'\\nRecall:',round(recall,2),'\\nF1 score:',round(f1_score,2))\n # compute precision, recall, f1_score",
"def stanford_ner(string):\n corenlp_url = 'http://localhost:9000'\n string = string.encode(encoding='utf-8')\n params = {\n 'properties': '{\"annotators\": \"ner\", \"outputFormat\": \"json\"}'\n }\n try:\n r = requests.post(url=corenlp_url,params=params, data=string)\n json = r.json()\n # Store Name entity Recognition data in self.ner.\n ner = json['sentences'][0]['tokens']\n except:\n print('CoreNLP Name Entity Recognition failed.')\n traceback.print_exc()\n ner = []\n string = string.decode(encoding='utf-8')\n return ner",
"def extract_english_raw_texts():\n # conceptnet triples raw text\n cpnet_en_raw_text = []\n\n # conceptnet entity context\n cpnet_en_entity_context = []\n\n with open(conceptnet_path, encoding=\"utf8\") as f:\n for line in f.readlines():\n ls = line.split('\\t')\n if ls[2].startswith('/c/en/') and ls[3].startswith('/c/en/'):\n \"\"\"\n Some preprocessing:\n - Remove part-of-speech encoding.\n - Split(\"/\")[-1] to trim the \"/c/en/\" and just get the entity name, convert all to \n - Lowercase for uniformity.\n \"\"\"\n rel = ls[1].split(\"/\")[-1].lower()\n head = del_pos(ls[2]).split(\"/\")[-1].lower()\n tail = del_pos(ls[3]).split(\"/\")[-1].lower()\n\n if not head.replace(\"_\", \"\").replace(\"-\", \"\").isalpha():\n continue\n\n if not tail.replace(\"_\", \"\").replace(\"-\", \"\").isalpha():\n continue\n\n # transfer to raw text\n head_text = head.replace(\"_\", \" \")\n tail_text = tail.replace(\"_\", \" \")\n\n if rel not in relation_mapping:\n continue\n\n rel_text = relation_mapping[rel]\n raw_text = head_text + \" \" + rel_text + \" \" + tail_text\n cpnet_en_raw_text.append(raw_text)\n \n # split to train and test\n shuffle(cpnet_en_raw_text)\n train_size = int(len(cpnet_en_raw_text) * 0.9)\n cpnet_en_raw_train_text = cpnet_en_raw_text[:train_size]\n cpnet_en_raw_dev_text = cpnet_en_raw_text[train_size:]\n\n with open(conceptnet_en_raw_text_train_path, \"w\", encoding=\"utf8\") as f:\n f.write(\"\\n\".join(cpnet_en_raw_train_text))\n \n with open(conceptnet_en_raw_text_dev_path, \"w\", encoding=\"utf8\") as f:\n f.write(\"\\n\".join(cpnet_en_raw_dev_text))",
"def get_triples(self, bundle_url=None):\n # metadata triples\n # transcription triples\n # translation triples\n # gloss triples\n # NER triples\n pass",
"def parseSentences(jobidsentences):\n\n jobid, docs, Config = jobidsentences\n\n #start stanford server, we need to find an open port through guessing\n maxtries = 12\n tries=0\n err=[]\n while tries <maxtries:\n try:\n np.random.seed()\n jobid = np.random.randint(0, 2000)\n nlp = StanfordCoreNLP(Config.parserPath, port=8000+(jobid%2000), memory='8g', timeout=500000) #https://github.com/Lynten/stanford-corenlp\n maxtries = 0\n print(\"Starting DepParse\", jobid)\n except IOError as e:\n err=e\n tries += 1\n\n wmap = {}\n #wcou={} #word counts\n compounds = [] #of lemmatized words\n newdocs = []\n useNLTK = not \"nlp\" in locals() # check if StanfordCoreParser could be used, if not use NLTK lemmatizer\n if useNLTK:\n print(\"StanfordCoreNLP parser not found or ioport in use - We automatically try another;\", \"Message \",err, \" Jobid\",jobid)\n # from nltk.stem import WordNetLemmatizer\n # lemmatizer=WordNetLemmatizer()\n props = {'annotators': 'tokenize, ssplit, lemma, depparse', 'pipelineLanguage': 'en', 'outputFormat': 'json'} #options for parsing\n failed=0\n for i, (docid, d) in enumerate(docs):\n if i%10 == 9: print(docid, jobid)\n if useNLTK:\n words=tt.docSpacePunctuation(d).split(\" \")\n for w in words:\n lem=tt.changeWord(w) #lem = lemmatizer.lemmatize(w)\n if not len(lem): lem=w\n addWord(wmap, w, lem)\n newdocs.append((docid, words))\n else: #Use StanfordCoreParser\n docTokens = []\n parseRes = nlp.annotate(d, properties=props)\n try: var = json.loads(parseRes)\n except json.decoder.JSONDecodeError as e:\n print(\" Not parsed\", e, str(d)[:30].replace(\"\\n\", \"\"), str(parseRes)[:30].replace(\"\\n\", \"\"))\n failed += 1\n newdocs.append((docid, docTokens))\n continue\n\n for s in var[\"sentences\"]:\n csent = []\n currcomp = []\n mapTow = {}\n for i, b in enumerate(s[\"enhancedPlusPlusDependencies\"]):\n tok = s[\"tokens\"][b[\"dependent\"]-1][\"word\"]\n lem = s[\"tokens\"][b[\"dependent\"]-1][\"lemma\"]\n #print(\"t,l\",tok,lem,b[\"dep\"],b[\"dependent\"])\n if b[\"dep\"] == \"compound\": #if part of compound\n # compounds should be pure words, Stanford parser often creates clutter words like \"Section_1\" or so\n if len(tok) > 1 and tok.isalpha(): #note this skips non-alpha words!\n currcomp.append((tok, lem)) #tok also ok, but leads to some redundant words => communication skill, communication skills\n iEnd = b['governor']\n mapTow[b[\"dependent\"]] = \"\"\n elif len(currcomp) > 0 and b['dependent'] == iEnd: #last word of compound\n rawcomp = \" \".join([x[0] for x in currcomp]) #create compounds (except last word)\n comp = \" \".join([x[1] for x in currcomp])\n if len(tok) > 1 and tok.isalpha(): #last word is alpha => add it\n rawcomp += \" \" + tok\n comp += \" \" + lem\n else: addWord(wmap, tok, lem) #add last word as new word if non-alpha => not really needed\n if len(comp.split()) > 1: #if compound\n comp = comp.lower() #all lemmas are lower case\n compounds.append(comp)\n addWord(wmap, rawcomp, comp)\n # wcou[tok] = wcou.get(rawcomp, 0) + 1\n currcomp = []\n mapTow[b[\"dependent\"]] = rawcomp\n elif not (b[\"dep\"] == \"punct\" or (lem in tt.setStopWords and not tok == \"IT\" ) or (len(tok) == 1 and not tok in [\"R\", \"C\"])): #a single word / no compound\n #wcou[tok]=wcou.get(tok,0)+1\n addWord(wmap, tok, lem)\n\n for i, t in enumerate(s[\"tokens\"]): #add all tokens (single words/compounds)\n if i+1 in mapTow:\n if len(mapTow[i+1]) > 0: csent.append(mapTow[i+1])\n else:\n if \"-lrb-\" in t[\"word\"].lower(): csent.append(\"(\") #left bracket\n elif \"-rrb-\" in t[\"word\"].lower(): csent.append(\")\") #right brackt\n else: csent.append(t[\"word\"])\n #print(\"wmap\", wmap)\n docTokens.append(\" \".join(csent))\n newdocs.append((docid, docTokens))\n if not useNLTK: nlp.close()\n print(\" Parse errors\", failed, \"out of\", len(docs))\n\n return compounds, wmap, newdocs #,wcou",
"def extract(content, corenlp_endpoint, grobid_endpoint, dependency_patterns_file, output_file=None, show_graph=False,\n pretty=False, simplify=False):\n\n all_extractions = []\n\n out = None\n if output_file:\n out = codecs.open(output_file, \"a\", encoding=\"utf-8\")\n\n if len(content) < 5:\n return None\n\n nlp = StanfordCoreNLP(corenlp_endpoint)\n output = nlp.annotate(content, properties={'outputFormat': 'json', 'timeout': '9999'})\n\n if isinstance(output, str): # str supports both python 2 and 3\n output = json.loads(output.encode(\"latin-1\"), strict=False)\n\n if \"sentences\" in output and isinstance(output[\"sentences\"], list):\n for i in range(0, len(output[\"sentences\"])):\n s_str = _reconstruct_sent(output[\"sentences\"][i])\n\n # Enhanced dependencies have different key names in JSON depending on version of CoreNLP\n possible_keys = [\n \"enhanced-plus-plus-dependencies-annotation\",\n \"enhancedPlusPlusDependencies\"\n ]\n\n dep_key = \"collapsed-ccprocessed-dependencies\" # default key\n if \"collapsed-ccprocessed-dependencies\" not in output[\"sentences\"][i]:\n for k in possible_keys:\n if k in output[\"sentences\"][i]:\n dep_key = k\n\n global A\n A = Annotations(output[\"sentences\"][i][\"tokens\"], output[\"sentences\"][i][dep_key])\n\n if A.check_output(output[\"sentences\"][i], stats) is True:\n\n stats.total_sentences += 1\n G = _build_graph(show=show_graph)\n grobid_response = grobid_quantities(s_str, A, grobid_endpoint)\n\n if isinstance(grobid_response, dict) and \"measurements\" in grobid_response:\n for quantity in grobid_response[\"measurements\"]:\n A.augment_match(quantity)\n\n stats.total_measurements += len(A.matches)\n\n for idx, match in enumerate(A.matches):\n\n global Num\n Num = match[\"num\"]\n\n match[\"sentence\"] = i + 1\n match[\"grobid\"][\"related\"] = _get_related(stats, match, dependency_patterns_file)\n\n # Remove fields used for processing but not to be shown to user\n remove = [\"adverbs\", \"num\", \"unit\", \"connector\", \"form\", \"sentence\", \"num_idx\", \"unit_idx\",\n \"measurement_format\"]\n [match.pop(x, None) for x in remove]\n sort_order = ['adverbs', 'type', 'quantity', 'quantityLeast', 'quantityMost', 'quantified',\n 'related']\n\n match_ordered = _sorted_dictionary(match[\"grobid\"], sort_order)\n\n if simplify:\n simplified_sort_order = ['value', 'unit', 'quantified', 'related']\n simplified = _simplify_results(match_ordered)\n\n if simplified:\n match_ordered = _sorted_dictionary(match[\"grobid\"], simplified_sort_order)\n\n if pretty and not simplify:\n if out:\n out.write(json.dumps(match_ordered, ensure_ascii=False, indent=4))\n if idx != len(A.matches) - 1 and out:\n out.write(\",\\n\")\n\n elif out:\n out.write(json.dumps(match_ordered, ensure_ascii=False) + \"\\n\")\n\n all_extractions.extend(A.matches)\n\n else:\n logging.warning(\"CoreNLP parsing failed for sentence: %s\" % (s_str))\n else:\n logging.warning(\"CoreNLP parsing failed for content: %s\" % (content))\n\n if out:\n out.close()\n\n logging.info(\"Total sentences parsed: %s\" % (str(stats.total_sentences)))\n logging.info(\"Total measurements found: %s\" % (str(stats.total_measurements)))\n stats.print_summary()\n\n return all_extractions",
"def process_data_from_input_file(triplet):\n\n sentence = triplet.subject + ' ' + triplet.predicate + ' ' + triplet.object\n doc = nlp(unicode(sentence))\n root = doc[0]\n for t in doc:\n if t.pos_ == 'VERB' and t.head == t:\n root = t\n # elif t.pos_ == 'NOUN'\n\n # also, if only one sentence\n # root = doc[:].root\n\n\n \"\"\"\n CURRENT ASSUMPTIONS:\n - People's names are unique (i.e. there only exists one person with a certain name).\n - Pet's names are unique\n - The only pets are dogs and cats\n - Only one person can own a specific pet\n - A person can own only one pet\n \"\"\"\n\n\n # Process (PERSON, likes, PERSON) relations\n if root.lemma_ == 'like':\n if triplet.subject in [e.text for e in doc.ents if e.label_ == 'PERSON' or e.label_ == 'ORG'] and triplet.object in [e.text for e in doc.ents if e.label_ == 'PERSON' or e.label_ == 'ORG'] and \"n't\" not in triplet.predicate:\n s = add_person(triplet.subject)\n o = add_person(triplet.object)\n s.likes.append(o)\n\n if root.lemma_ == 'be' and triplet.object.startswith('friends with'):\n fw_doc = nlp(unicode(triplet.object))\n with_token = [t for t in fw_doc if t.text == 'with'][0]\n # get text after with\n after_with = fw_doc.text.split(with_token.text+ ' ')[1]\n people = []\n for p in after_with.split(' '):\n if nlp(p)[0].tag_ == 'NNP':\n people.append(nlp(p)[0].text)\n # fw_who = [t for t in with_token.children if t.dep_ == 'pobj'][0].text\n # fw_who = [e for e in fw_doc.ents if e.label_ == 'PERSON'][0].text\n for p in people:\n if triplet.subject in [e.text for e in doc.ents if e.label_ == 'PERSON']:\n s = add_person(triplet.subject)\n o = add_person(p)\n s.likes.append(o)\n o.likes.append(s)\n if root.lemma_ == 'be' and triplet.object == 'friends':\n fw_doc = nlp(unicode(triplet.subject))\n and_token = [t for t in fw_doc if t.text == 'and']\n if and_token:\n and_token = and_token[0].text\n if and_token == 'and' and fw_doc[0].text in [e.text for e in doc.ents if e.label_ == 'PERSON'] and fw_doc[2].text in [e.text for e in doc.ents if e.label_ == 'PERSON']:\n s = add_person(fw_doc[0].text)\n o = add_person(fw_doc[2].text)\n s.likes.append(o)\n o.likes.append(s)\n\n # Process (PET, has, NAME) Mary's dog's name is Rover\n if triplet.subject.endswith('name') and ('dog' in triplet.subject or 'cat' in triplet.subject):\n obj_span = doc.char_span(sentence.find(triplet.object), len(sentence))\n\n # handle single names, but what about compound names? Noun chunks might help.\n if (len(obj_span) == 1 or len(obj_span) == 2) and obj_span[-1].pos_ == 'PROPN':\n name = triplet.object\n subj_start = sentence.find(triplet.subject)\n subj_doc = doc.char_span(subj_start, subj_start + len(triplet.subject))\n\n s_people = [token.text for token in subj_doc if token.ent_type_ == 'PERSON']\n assert len(s_people) == 1\n s_person = select_person(s_people[0])\n\n pet = get_persons_pet(s_person.name)\n\n pet.name = name\n s_person.has.append(pet)\n\n # Process (Who has dog)\n if root.lemma_ == 'have'and ('dog' in triplet.object or 'cat' in triplet.object):\n # find pets name and instantiate name empty str\n obj_span = doc.char_span(sentence.find(triplet.object), len(sentence))\n name = ''\n\n if obj_span[-1].pos_ == 'PROPN':\n name = obj_span[-1].text\n s = add_person(triplet.subject)\n s_pet_type = 'dog' if 'dog' in triplet.object else 'cat'\n pet = add_pet(s_pet_type, name)\n s.has.append(pet)\n\n date = [e.text for e in doc.ents if e.label_ == 'DATE']\n gpe = [e.text for e in doc.ents if e.label_ == 'GPE']\n person = [e.text for e in doc.ents if e.label_ == 'PERSON' or e.label_ == 'ORG']\n # if person and GPE exists, we add it into trip(departs_on, departs_to)\n if person and (gpe or date):\n s = add_person(triplet.subject)\n o = add_trip(date, gpe)\n s.travels.append(o)",
"def try6():\n sample_file = '/Users/mayankkejriwal/datasets/eswc2017/triples_sample.ttl'\n with codecs.open(sample_file, 'r', 'utf-8') as f:\n for line in f:\n triple_dict = EmbeddingGenerator.EmbeddingGenerator.parse_line_into_triple(line)\n if not triple_dict:\n continue\n # print type(triple_dict['object'])\n # print triple_dict\n print triple_dict['subject'].n3()[1:-1]\n # print triple_dict['predicate']==URIRef(u'http://www.w3.org/1999/02/22-rdf-syntax-ns#type')",
"def corenlp_tokenizing(data_home, dataset='kp20k', data_type='validation'):\n suffix = ''\n if dataset == 'kp20k' and data_type == 'training':\n suffix = '_filtered'\n data_for_opennmt_home = os.path.join(data_home, 'data_for_opennmt')\n if not os.path.exists(data_for_opennmt_home):\n os.makedirs(data_for_opennmt_home)\n\n context_file = os.path.join(data_home, 'data_for_corenlp', '{}_{}_context_for_corenlp{}.txt'.format(dataset, data_type, suffix))\n context_file = open(context_file, encoding='utf-8')\n context_lines = context_file.readlines()\n # tokenized_context_lines = [' '.join(CoreNLP.word_tokenize(c.strip())) + '\\n' for c in context_lines]\n tokenized_context_lines = []\n for c_idx in tqdm(range(len(context_lines))):\n c = context_lines[c_idx]\n c = ' '.join(CoreNLP.word_tokenize(c.strip())) + '\\n'\n tokenized_context_lines.append(c)\n saved_context_file = os.path.join(data_for_opennmt_home, '{}_{}_context{}.txt'.format(dataset, data_type, suffix))\n saved_context_file = open(saved_context_file, 'w', encoding='utf-8')\n saved_context_file.writelines(tokenized_context_lines)\n\n key_file = os.path.join(data_home, 'data_for_corenlp', '{}_{}_keyword_for_corenlp{}.txt'.format(dataset, data_type, suffix))\n key_file = open(key_file, encoding='utf-8')\n key_lines = key_file.readlines()\n # tokenized_key_lines = [' '.join(CoreNLP.word_tokenize(c.strip())) + '\\n' for c in key_lines]\n tokenized_key_lines = []\n for c_idx in tqdm(range(len(key_lines))):\n c = key_lines[c_idx]\n c = ' '.join(CoreNLP.word_tokenize(c.strip())) + '\\n'\n tokenized_key_lines.append(c)\n saved_key_file = os.path.join(data_for_opennmt_home, '{}_{}_keyword{}.txt'.format(dataset, data_type, suffix))\n saved_key_file = open(saved_key_file, 'w', encoding='utf-8')\n saved_key_file.writelines(tokenized_key_lines)",
"def read_turte():\n file = request.form['upload-file']\n print(file)\n # no_of_rows = int(request.form['no_of_rows'])\n # g = rdflib.Graph()\n # g = ConjunctiveGraph()\n owlClass = rdflib.namespace.OWL.Class\n rdfType = rdflib.namespace.RDF.type\n\n result = g.parse(file, format=\"turtle\")\n final_list = []\n # Iterate over triples in store and print them out\n # for s, p, o in result:\n # # if type(o) == rdflib.term.Literal:\n # # sub.append(s),prop.append(p),obj.append(o)\n # # final_list.append((s, o))\n # final_list.append((s, g.label(s)))\n for s in result.subjects(predicate=rdfType, object=owlClass):\n class_labels.append(result.label(s).title())\n autocmplete_label_dict[result.label(s).title()] = {}\n final_list.append((s.title(), result.label(s).title()))\n class_labels_dict[result.label(s).title()] = s.title()\n labels = list(set([i for i in final_list if len(i[1]) > 0]))\n print(len(labels))\n # print(class_labels_dict)\n print(class_labels_dict.get('Computertomograph'))\n # print(class_labels_dict.get('DepthOfCut'))\n rdf_df = pd.DataFrame(labels, columns=['class(subject)', 'label(literals)'])\n alert_value = 1 # for alert.\n return render_template('turtle_list.html', tables=[rdf_df.to_html(classes='data')], titles=rdf_df.columns.values)\n # render_template('index_old.html', alert_value=alert_value)",
"def peticionesArcGISServer(serviceURl):\r\n params = {\"where\": \"1=1\", \"f\":\"pjson\"}\r\n data = requests.get(serviceURl + \"/query\", params)\r\n count = 0\r\n for feature in data.json()[\"features\"]:\r\n count += 1\r\n\r\n print data.json()\r\n print \"Numero total de entidades: {0}\".format(count)",
"def run_corenlp_openie(input_file, output, config=NLP_CONFIG, no_entity_filter=False, consider_sections=False):\n # Read config\n with open(config) as f:\n conf = json.load(f)\n core_nlp_dir = conf[\"corenlp\"]\n\n # Prepare files\n filelist_fn, out_fn, amount_files = openie_prepare_files(input_file, no_entity_filter=no_entity_filter,\n consider_sections=consider_sections)\n\n if amount_files == 0:\n print('no files to process - stopping')\n else:\n openie_run(core_nlp_dir, out_fn, filelist_fn)\n print(\"Processing output ...\", end=\"\")\n start = datetime.now()\n # Process output\n openie_process_output(out_fn, output)\n print(\" done in {}\".format(datetime.now() - start))",
"def stanford_core_nlp_seg_str(data):\n # set random time\n time.sleep(random.random()*2)\n url = \"https://corenlp.run/\"\n params = {\n \"properties\": {\n \"annotators\": \"tokenize,ssplit,pos,ner,regexner\",\n \"date\": util.format_timestamp(timestamp=time.time())\n },\n \"pipelineLanguage\": \"zh\"\n }\n r = session.post(url=url, params=params, data=data.encode(\"utf-8\"))\n\n seg_str = \"\"\n if r.status_code != 200:\n print(\"connect error!\")\n return seg_str\n\n words = []\n natures = []\n list_seg = json.loads(r.text)[\"sentences\"]\n for idx in range(len(list_seg)):\n seg = list_seg[idx][\"tokens\"]\n for item in seg:\n words.append(item[\"word\"])\n natures.append(item[\"pos\"])\n\n # concat\n seg_list = []\n for word, nature in zip(words, natures):\n seg_list.append(word+\"/\"+nature)\n\n seg_str = \"\\t\".join(seg_list)\n return seg_str",
"def buildFeatureList():\n with open('./feature_list.txt', 'w')as out:\n res = es.search(index=indexName, doc_type=document,\n body={\n 'query': {\n 'query_string': {\n \"default_field\": \"split\",\n \"query\": \"training\"\n }\n },\n \"size\": indexSize\n })\n ids = [d['_id'] for d in res['hits']['hits']]\n for id in ids:\n text = es.get(index=indexName, doc_type=document, id=id)['_source']['body']\n terms = text.split()\n for term in terms:\n features[term] = term\n count = 0\n for term in features:\n count += 1\n out.write(str(count)+ \" \" + term + '\\n')",
"def main ():\n\n\tfio = fileIo('input.txt')\n text = fio.getInput()\n\n\tp = re.compile(r'#?\\d[\\s\\.]?[\\s]?')\n\tout = filter(None, p.split(text))\n\ti = 0\n\tlistOfLists = []\n\t\n\n\tfor s in out:\n\t\ti += 1\n\t\ttext = nltk.word_tokenize(s)\n\t\tpos = nltk.pos_tag(text)\n\t\tpattern = \"NP: {<DT>?<JJ>*<NN>}\"\n\t\tNPChunker = nltk.RegexpParser(pattern)\n\t\tresult = NPChunker.parse(pos)\n\t\tlistOfLists.append( result )\n\n\tprint \"Noun Count:\\n\" + str(countNouns( listOfLists ))\n\tprint \"Verb Count:\\n\" + str(countVerbs( listOfLists ))\n\tprint \"Adjective Count:\\n\" + str(countAdjectives( listOfLists ))",
"def get_triplets_visualphrase(self):\n vocab = self.vocab['sro']\n triplets = torch.zeros(len(vocab), 3)\n for j in range(len(vocab)):\n subjname, relname, objname = vocab.idx2word[j].split('-')\n triplets[j, 0] = self.vocab['all'].wordpos2idx[subjname + '_noun']\n triplets[j, 1] = self.vocab['all'].wordpos2idx[objname + '_noun']\n triplets[j, 2] = self.vocab['all'].wordpos2idx[relname + '_verb']\n\n triplets = triplets.long()\n return triplets",
"def get_liwc_features(train_data, test_data):\n print(\"getting liwc features\")\n train_liwc_matrix = []\n test_liwc_matrix = []\n for phrase in train_data:\n liwc_scores = word_category_counter.score_text(phrase)\n feature_vector = []\n for key in liwc_categories:\n if key in liwc_scores.keys():\n # print(key)\n # print(liwc_scores[key])\n feature_vector.append(liwc_scores[key])\n else:\n feature_vector.append(0)\n # print(feature_vector)\n train_liwc_matrix.append(feature_vector)\n for phrase in test_data:\n liwc_scores = word_category_counter.score_text(phrase)\n feature_vector = []\n for key in liwc_categories:\n if key in liwc_scores.keys():\n # print(key)\n # print(liwc_scores[key])\n feature_vector.append(liwc_scores[key])\n else:\n feature_vector.append(0)\n test_liwc_matrix.append(feature_vector)\n # print(train_liwc_matrix)\n return sparse.csr_matrix(train_liwc_matrix), sparse.csr_matrix(test_liwc_matrix)",
"def test_model():\n test_text = \"what is the price of jug?\"\n model = spacy.load(\"../model/custom_ner_model\")\n doc = model(test_text)\n for ent in doc.ents:\n print(ent.text, ent.start_char, ent.end_char, ent.label_)",
"def parse_train_data(training_set, language):\n print \"Reading training set: \" + training_set\n xmldoc = minidom.parse(training_set)\n lex_list = xmldoc.getElementsByTagName('lexelt')\n training_output = {}\n\n print \"Processing training set and training models...\"\n for node in lex_list:\n lexelt = node.getAttribute('item')\n training_output[lexelt] = {}\n inst_list = node.getElementsByTagName(\"instance\")\n # setup the neighbor_word_list within k distance of the word\n neighbor_word_list = []\n senseid_set = set()\n for inst in inst_list:\n sentence = inst.getElementsByTagName('context')[0]\n senseid_set.add(inst.getElementsByTagName('answer')[0].getAttribute('senseid'))\n neighbor_word_list = list(set(neighbor_word_list + get_neighbor_words_list(sentence, language)))\n senseid_list = list(senseid_set)\n training_output[lexelt][\"neighbor_word_list\"] = neighbor_word_list\n _4c_4d_feature = extract_4c_4d_feature(neighbor_word_list, senseid_list, inst_list, language)\n training_output[lexelt][\"4c_4d_feature\"] = _4c_4d_feature\n x_list = []\n y_list = []\n for inst in inst_list:\n y = inst.getElementsByTagName('answer')[0].getAttribute('senseid')\n if ignore_U_activated and y.__eq__('U'):\n continue\n y_list.append(str(replace_accented(y)))\n x = extract_vector(inst, neighbor_word_list, _4c_4d_feature, language)\n x_list.append(x)\n # for each node, build a classifier\n if language.__eq__(\"English\"):\n #clf = RandomForestClassifier(n_estimators=10) 58.9\n #clf = SGDClassifier() 61.1\n #clf = MultinomialNB() 62.9\n #clf = BernoulliNB() 55.8\n #clf = Perceptron() 60.4\n #clf = PassiveAggressiveClassifier() 62.1\n #clf = RidgeClassifier() 62.7\n #clf = svm.LinearSVC() 62.5\n #clf = KNeighborsClassifier()\n #clf = GaussianNB()\n clf = MultinomialNB(alpha=0.95) #+ alpha=0.95 + k=13 + left_right_order + vector_0_1 off = 64.7\n elif language.__eq__(\"Spanish\"):\n #clf = svm.LinearSVC() 82.0\n #clf = MultinomialNB() 82.2\n #clf = RidgeClassifier() 81.5\n #clf = PassiveAggressiveClassifier() 81.9\n #clf = BernoulliNB() 72.4\n clf = MultinomialNB(alpha=0.50) #0.25:82.6 0.4:83.1 0.45:83.2 0.5: 83.2 0.55:83.2 0.6:82.8 0.75:82.7\n elif language.__eq__(\"Catalan\"):\n #clf = svm.LinearSVC() # 82.8\n #clf = MultinomialNB() # 80.8\n #clf = RidgeClassifier() 82.6\n #clf = svm.LinearSVC(C=1.5) 82.9\n clf = MultinomialNB(alpha=0.25) # 0.5:84.3 0.35:84.6 0.3:84.8 0.25:85.4 0.2:85.3\n else:\n clf = svm.LinearSVC()\n clf.fit(x_list, y_list)\n training_output[lexelt][\"Classifier\"] = clf\n print \"Models trained.\"\n return training_output",
"def extract_features(data, stopwords=STOPWORDS):\n tags = set()\n docs = []\n for document in data:\n doc_data = dict()\n doc_data['pmid'] = document['sourceid']\n text = document['text']\n\n # Insert PubTator annotations inside abstract\n denotations = document['denotations']\n sorted_denotations = []\n for denotation in denotations:\n begin = denotation['span']['begin']\n end = denotation['span']['end']\n obj = denotation['obj']\n for c in punctuation:\n obj = obj.replace(c, '')\n tags.add(obj)\n doc_data[obj] = doc_data.get(obj,0)+1\n sorted_denotations.append([begin,end,obj])\n sorted_denotations.sort()\n sorted_denotations.reverse()\n for begin, end, obj in sorted_denotations:\n text = text[:begin] + obj + ' ' + text[end:]\n\n doc_data['text'] = clean_text(text, stopwords)\n docs.append(doc_data)\n\n return docs",
"def nlp_parse(self, input):\n resp = {}\n resp['type'] = 'nomatch'\n VDB_set = {}\n WP_set = {}\n tagset = self.build_tagset(input)\n resp['words'] = self.build_keywords(tagset)\n w = resp['words']\n\n if not w:\n if constants.DEBUG:\n log.debug(\"No words: \" + str(resp))\n return resp\n\n # store nouns\n NN_set = set(w.get('NN', []))\n\n # matches a request for a list\n if 'list' in NN_set \\\n or 'List' in w.get('NNP', []):\n resp['count'] = w.get('CD', [constants.LIST_COUNT])[0]\n resp['type'] = 'show-list'\n if set(['serving', 'serve']) & set(w.get('VBG', [])):\n resp['meal'] = (NN_set & constants.MEALS_SET).pop()\n if 'in' in w.get('IN', []):\n resp['zone'] = w.get('NNP', [None])[0]\n if 'close' in w.get('VBD', []) \\\n or 'close' in w.get('JJ', []) \\\n or 'close' in NN_set:\n resp['distance'] = True\n return resp\n\n # finds neighborhood\n for word in tagset:\n if word[1] == 'VBD':\n VDB_set = word[0]\n for word in tagset:\n if word[1] == 'WP':\n WP_set = word[0]\n if 'neighborhood' in VDB_set and 'what' in WP_set:\n if w.get('NNP', [None])[0]: \n r_name = w.get('NNP', [None])[0]\n else :\n return resp\n\n r_name = w.get('NNP', [None])[0] \n resp['restaurant'] = r_name\n resp['type'] = 'name-zone'\n return resp\n\n # matches \"how expensive it is\" and \"is it expensive\"\n if 'expensive' in w.get('JJ', ()):\n if w.get('NNP', [None])[0]: \n r_name = w.get('NNP', [None])[0]\n else :\n return resp\n\n r_name = w.get('NNP', [None])[0] \n resp['restaurant'] = r_name\n resp['type'] = 'name-price'\n return resp\n\n if 'between' in w.get('IN', ()) \\\n or 'price' in NN_set:\n price_range = w.get('CD', ())\n\n # price between a and b\n # require at least 2 numerals\n if len(price_range) >= 2:\n resp['min'] = min(map(int, price_range))\n resp['max'] = max(map(int, price_range))\n resp['type'] = 'list-price-range'\n return resp\n\n # price of exactly a\n if len(price_range) > 0:\n price_range = w.get('CD', ())\n resp['price'] = min(price_range)\n resp['type'] = 'list-price-single'\n return resp\n\n\n # need to merge NN and JJ for this step\n w['NNJJ'] = NN_set | set(w.get('JJ', []))\n meal = constants.MEALS_SET & w['NNJJ']\n if meal:\n resp['type'] = 'list-meal-single'\n resp['meal'] = meal.copy().pop()\n return resp\n\n # matches a quality list\n if 'quality' in NN_set and \\\n (constants.QUALITIES & w['NNJJ']) and \\\n (set(['food', 'service']) & w['NNJJ']):\n resp['degree'] = (constants.QUALITIES \\\n & w['NNJJ']).pop()\n resp['type'] = 'list-quality-' + \\\n (set(['food', 'service']) & w['NNJJ']).pop()\n return resp\n\n # matches a phone number request\n if NN_set & constants.PHONE_KEYWORDS:\n r_name = w.get('NNP', [None])[0] or \\\n w['NN'][-1]\n\n resp['restaurant'] = r_name\n resp['type'] = 'name-phone'\n return resp\n\n # matches a single meal request\n if NN_set & constants.MEALS_SET:\n r_name = w.get('NNP', [None])[0] or \\\n w['NN'][-1]\n\n resp['restaurant'] = r_name\n resp['type'] = 'name-meal'\n resp['meal'] = word.lower()\n return resp\n\n # matches a request for an address\n if 'address' in NN_set:\n r_name = w.get('NNP', [None])[0] or \\\n w['NN'][-1]\n resp['restaurant'] = r_name\n resp['type'] = 'name-location'\n return resp\n\n # matches a restaurant in neighborhood\n if 'in' in w.get('IN', []) and \\\n NN_set & constants.NAME_KEYWORDS:\n r_name = w.get('NNP', [None])[0]\n if not r_name:\n for kw in reversed(w['NN']):\n if kw not in constants.NAME_KEYWORDS:\n r_name = kw\n break\n if r_name:\n resp['type'] = 'random-city'\n resp['city'] = string.capitalize(r_name)\n return resp\n\n # matches a request for a cuisine type\n if NN_set & constants.NAME_KEYWORDS:\n r_name = w.get('NNP', [None])[0]\n if not r_name:\n for kw in reversed(w['NN']):\n if kw not in constants.NAME_KEYWORDS:\n r_name = kw\n break\n if r_name:\n resp['type'] = 'random-cuisine'\n resp['cuisine'] = string.capitalize(r_name)\n return resp\n\n # merge all numerals together for list-mode\n w['CDLS'] = set(w.get('CD', []) + w.get('LS', []))\n if w['CDLS']:\n w_copy = w['CDLS'].copy()\n while w_copy:\n try:\n resp['listitem'] = int(w_copy.pop())\n resp['type'] = 'single-listitem'\n return resp\n except:\n pass\n\n # distance / how far\n if ('far' in w.get('RB', [])\n and 'how' in w.get('WRB', [])\n ) or ('distance' in NN_set):\n r = w.get('NNP', [None])[0]\n if r:\n resp['type'] = 'name-distance'\n resp['restaurant'] = string.capitalize(r)\n return resp\n\n if constants.DEBUG:\n log.debug(resp)\n return resp",
"def main(url):\n \n words = fetch_words(url)\n print_items(words)",
"def parse_and_analyse_corenlp_coref(input_dir = 'CoreNLP_coref_anno/dev', gold_annotations_folder = '../../../data/baseline/dev'):\n\tmentions = []\n\n\n\twith open('coref_analyse_output.txt', 'w') as out_file:\n\n\t\tfor file_name in os.listdir(input_dir):\n\t\t\tif re.match(r'(.+)\\.xml', file_name)!= None:\n\t\t\t\tokr_graph = load_graph_from_file(gold_annotations_folder + '/'+ re.match(r'(.+)\\.xml', file_name).group(1)[:-4]+'.xml')\n\n\t\t\t\ttree = ET.parse(input_dir + '/' + file_name)\n\t\t\t\tdocument = tree.getroot()[0]\n\t\t\t\tsentence_wise_predicted_mentions = defaultdict(list)\n\t\t\t\tsentence_wise_gold_mentions = defaultdict(list)\n\t\t\t\tpredicted_coref_dict = defaultdict(list)\n\t\t\t\tgold_coref_dict = defaultdict(list)\n\n\t\t\t\tcoref_node = document.find('coreference')\n\t\t\t\t\n\t\t\t\t\n\t\t\t\tfor coref_id, coref_chain in enumerate(coref_node):\n\t\t\t\t\tfor mention in coref_chain:\n\t\t\t\t\t\tsent_num = int(mention[0].text)\n\t\t\t\t\t\tstart = int(mention[1].text)-1\n\t\t\t\t\t\tend = int(mention[2].text)-1\n\t\t\t\t\t\ttext = mention[4].text\n\t\t\t\t\t\tsentence_wise_predicted_mentions[sent_num].append({\"indices\":range(start, end),\"coref\":coref_id+1, \"text\":text})\n\t\t\t\t\t\tpredicted_coref_dict[coref_id+1].append({\"indices\":range(start, end), \"s_num\":sent_num, \"text\":text })\n\n\n\t\t\t\t\n\t\t\t\t\t\t\t\t\n\t\t\t\tfor entity in okr_graph.entities.values():\n\t\t\t\t\tfor mention in entity.mentions.values():\n\t\t\t\t\t\tsentence_wise_gold_mentions[mention.sentence_id].append({\"indices\":mention.indices,\"coref\":entity.id, 'text':mention.terms})\n\n\t\t\t\tprint'###'+ file_name + '\\n'\t\n\t\t\t\tfor sentence_id, sentence in enumerate(okr_graph.sentences.values()):\n\t\t\t\t\tprint 'Sentence: ', ' '.join(sentence) \n\t\t\t\t\tprint 'Predicted entities: ', [element['text'] for element in sentence_wise_predicted_mentions[sentence_id+1]]\n\t\t\t\t\tprint 'Gold entities: ', [element['text'] for element in sentence_wise_gold_mentions[sentence_id+1]]\n\t\t\t\t\tprint ' '\n\t\t\t\n\t\t\t\tprint \"Not printing singletons\"\n\t\t\t\tprint('\\nThe predicted clusters: ')\n\t\t\t\tfor cluster_id, cluster in enumerate(predicted_coref_dict.values()):\n\t\t\t\t\tprint('Cluster id: ', cluster_id +1)\n\t\t\t\t\tprint([[okr_graph.sentences[mention['s_num']][index] for index in mention['indices']]for mention in predicted_coref_dict[cluster_id+1]] )\n\n\t\t\t\tprint('\\n The Gold clusters:')\t\n\t\t\t\tfor entity in okr_graph.entities.values():\n\t\t\t\t\tprint('cluster_id: ', entity.id )\n\t\t\t\t\tprint([mention.terms for mention in entity.mentions.values()])\n\n\t\t\t\tprint '**********'",
"def _get_nouns(self, review):\n review_features = []\n for sent in review:\n doc = self.nlp(sent)\n # noun_phrase = [np.text for np in doc.noun_chunks]\n nouns = [unicode(lemma(str(word).lower())) for word in doc if word.pos == NOUN]\n review_features.append(nouns)\n return review_features",
"def main(url, inputFile, directory, rss, opml, output, verbose, debug, relevanceAlgorithm):\n\n if (len(argv) < 2):\n print(\n \"Usage: python3 ctirt.py [options] [target files]\\n\\n Use --> ctirt.py --help for more details...\"\n )\n exit(1)\n\n if (verbose and url) or (url and debug):\n print(\"URL is mutually exclusive with verbose and debug\")\n exit(1)\n \n \n \n # INITIALIZE DOCUMENTS LIST\n documents = [] # list of document objects\n\n # OPML FILE INPUT\n\n if opml:\n printLogo()\n print(\"\\033[0;34m\" + \"Parsing provided opml file: \" + \"\\033[0m\" + \"\\033[1m\" + opml + \"\\033[0m\")\n\n rssList = parser.parseOpml(opml)\n\n for rss in rssList:\n print(\"Parsing RSS feed: \" + \"\\033[1m\" + rss + \"\\033[0m\")\n\n feed = parser.parseRss(rss)\n \n if not verbose:\n # progress bar\n progressBar = IncrementalBar('\\tParsing URLs in RSS feed:', max=len(feed.entries), suffix='%(index)d / %(max)d')\n\n for entry in feed.entries:\n document = Document()\n\n document.path = entry.link\n \n document.name, document.text = parser.parseUrl(document.path)\n \n document.wordCount = parser.countWords(document.text)\n \n # Add document object to list, add document wordcount to list\n documents.append(document)\n\n if not verbose:\n progressBar.next()\n else:\n print(\"Done.\")\n \n print(\"\\n\\t\" + \"\\033[0;32m\" + u'\\u2713' + \" Done parsing RSS feed: \" + \"\\033[0m\" + \"\\033[1m\" + rss + \"\\033[0m\")\n # RSS INPUT\n\n elif rss:\n printLogo()\n print(\"Parsing\", rss)\n\n feed = parser.parseRss(rss)\n if not verbose:\n # progress bar\n progressBar = IncrementalBar('Parsing URLs', max=len(feed.entries), suffix='%(index)d / %(max)d')\n\n for entry in feed.entries:\n document = Document()\n\n document.path = entry.link\n \n document.name, document.text = parser.parseUrl(document.path)\n \n document.wordCount = parser.countWords(document.text)\n \n # Add document object to list, add document wordcount to list\n documents.append(document)\n\n if not verbose:\n progressBar.next()\n else:\n print(\"Done.\")\n \n if not verbose:\n progressBar.finish()\n\n print(\"Done.\")\n \n # URL INPUT\n \n elif url:\n printLogo()\n print(\"Parsing...\")\n\n document = Document()\n\n document.path = url\n \n document.name, document.text = parser.parseUrl(url)\n \n document.wordCount = parser.countWords(document.text)\n \n # Add document object to list, add document wordcount to list\n documents.append(document)\n\n print(\"Done.\")\n\n \n # SINGLE FILE INPUT\n\n elif inputFile:\n printLogo()\n print(\"Parsing...\")\n\n document = Document()\n\n document.name = os.path.splitext(inputFile)[0]\n document.path = inputFile\n\n if inputFile.lower().endswith(\".pdf\"): # PDF Parsing\n document.text = parser.parsePdf(inputFile)\n elif inputFile.lower().endswith(\".html\"): # HTML Parsing\n document.text = parser.parseHtml(inputFile)\n\n document.wordCount = parser.countWords(document.text) # Document word count\n\n # Add document object to list, add document wordcount to list\n documents.append(document)\n\n print(\"Done.\")\n\n\n # DIRECTORY INPUT\n\n elif directory:\n printLogo()\n if not verbose:\n # progress bar\n progressBar = IncrementalBar('Parsing', max=len(\n os.listdir(directory)), suffix='%(index)d / %(max)d')\n\n # Loop through files in directory\n for inputFile in os.scandir(directory):\n beginningTime = time.time()\n\n if verbose:\n timeStamp = time.time()\n print(\"***[\" + inputFile.name[0:50] + \"]***\", \"is currently being parsed\",\n \"-->\", (timeStamp - beginningTime), \"seconds have elapsed...\")\n\n document = Document()\n\n document.name = os.path.splitext(inputFile.name)[0]\n document.path = inputFile.path\n\n if verbose:\n print(inputFile.name)\n\n if inputFile.name.lower().endswith(\".pdf\"): # PDF Parsing\n document.text = parser.parsePdf(inputFile.path)\n elif inputFile.name.lower().endswith(\".html\"): # HTML Parsing\n document.text = parser.parseHtml(inputFile.path)\n\n document.wordCount = parser.countWords(\n document.text) # Document word count\n\n # Add document object to list, add document wordcount to list\n documents.append(document)\n\n if not verbose:\n progressBar.next()\n else:\n print(\"Done.\")\n \n if not verbose:\n progressBar.finish()\n\n\n # BASIC RELEVANCE CALCULATION\n\n for document in documents:\n document.relevance = relevance.computeBasicRelevance(document.text)\n\n\n # TF-IDF RELEVANCE CALCULATION\n\n if directory and (verbose or debug or relevanceAlgorithm == \"tfidf\"):\n dirWordCount = parser.countDirectoryWords(documents)\n\n wordList = {}\n with open('./assets/wordlist.json') as f:\n jsonWordList = load(f)\n for pair in jsonWordList.items():\n wordList[pair[0]] = float(pair[1])\n\n for document in documents:\n # TODO Figure out how to run - fix arguments (ex. import wordlist), make debug work better by allowing it to work not in verbose\n idfs = relevance.computeIDF(documents, dirWordCount)\n print(\"**************** IDFS ****************\")\n print(idfs)\n tf = relevance.computeTF(wordList, document.wordCount)\n print(\"**************** TF DICT ****************\")\n print(tf)\n\n tfidf = relevance.computeTFIDF(tf, idfs)\n print(\"**************** TF-IDF Values ****************\")\n print(tfidf)\n\n relevanceScore = 0\n\n for word, val in tfidf.items():\n relevanceScore += val\n \n document.tfidf = relevanceScore * 100\n\n\n # OUTPUT SECTION\n\n documents.sort(key=lambda document: document.relevance, reverse=True)\n\n table = []\n tableHeaders = []\n outputData = []\n # print(\"**************** RELEVANCE SCORES ****************\")\n for document in documents:\n outputData.append({'name': document.name[0:30], 'relevance': document.relevance,'path': document.path, 'topTerms': list(document.wordCount.items())[:10]})\n if url or rss or opml: \n table.append([document.name[0:30], document.relevance, document.path])\n tableHeaders = [\"Document\",\"Relevance Score\",\"URL\"]\n elif not verbose:\n table.append([document.name[0:70], document.relevance])\n tableHeaders=[\"Document\",\"Relevance Score\"]\n elif verbose and directory:\n table.append([document.name[0:70], document.relevance, document.tfidf, list(document.wordCount.items())[:10]])\n tableHeaders=[\"Document\",\"Relevance Score\", \"TF-IDF Score\", \"Top Terms\"]\n else:\n table.append([document.name[0:70], document.relevance, list(document.wordCount.items())[:10]])\n tableHeaders=[\"Document\",\"Relevance Score\", \"Top Terms\"]\n\n print(tabulate(table, headers=tableHeaders, tablefmt=\"fancy_grid\"))\n\n # OUTPUT TO FILE\n\n with open(output, 'w', encoding='utf-8') as o:\n dump(outputData, o, indent=3)",
"def main(url):\n words = fetch_words(url)\n print_items(words)",
"def tokenize(doc):\n text = doc\n doc = doc.lower()\n doc = re.sub('[,;]', ' ', doc)\n doc = re.split('\\s+', doc)\n doc = sorted(list(filter(None, doc)))\n ent = le.stanfordTagger(text)\n print(ent)\n l = []\n for item in ent:\n if ent[item] in ['LOCATION', 'GPE','PERSON']:\n l.append(item)\n ent = l#ent = sorted(list(le.stanfordTagger(text).keys()))\n #print(ent)\n #ent = [e.lower() for e in ent]\n crime_type = fileCrimeClassify.extractCrimeWord(text, returnOnlyLabels=True)\n crime_type = [c.lower() for c in crime_type]\n #print(crime_type + ent)\n #print(doc)\n return doc, ent + crime_type",
"def main(url):\n words = fetch_words(url)\n\n print_items(words)",
"def getFeatures2(url, label):\r\n result = []\r\n url = str(url)\r\n \r\n #add the url to feature set\r\n result.append(url)\r\n \r\n #parse the URL and extract the domain information\r\n path = urlparse(url)\r\n ext = tldextract.extract(url)\r\n \r\n #counting number of dots in subdomain \r\n result.append(countdots(ext.subdomain))\r\n \r\n #checking hyphen in domain \r\n result.append(CountSoftHyphen(path.netloc))\r\n \r\n #length of URL \r\n result.append(length(url))\r\n \r\n #checking @ in the url \r\n result.append(CountAt(path.netloc))\r\n \r\n #checking presence of double slash \r\n result.append(CountDSlash(path.path))\r\n \r\n #Count number of subdir \r\n result.append(countSubDir(path.path))\r\n \r\n #number of sub domain \r\n result.append(countSubDomain(ext.subdomain))\r\n \r\n #length of domain name \r\n path2 = urlparse(url_format(url))\r\n result.append(len(path2.netloc)) \r\n \r\n #count number of queries \r\n result.append(len(path.query))\r\n \r\n #Adding domain information\r\n \r\n #if IP address is being used as a URL \r\n result.append(containsip(ext.domain))\r\n \r\n #presence of Suspicious_TLD\r\n result.append(1 if ext.suffix in Suspicious_TLD else 0)\r\n \r\n #append default for create_age(months)country\r\n result.append(-1)\r\n \r\n #append default for expiry_age(months)\r\n result.append(-1)\r\n \r\n #append default for update_age(days)\r\n result.append(-1)\r\n \r\n #append default for country\r\n result.append('None')\r\n \r\n #append extension\r\n path = urlparse(url)\r\n \r\n if get_ext(path.path) == '':\r\n result.append('None')\r\n else:\r\n result.append(get_ext(path.path))\r\n \r\n #append label\r\n result.append(str(label))\r\n \r\n return result"
]
| [
"0.61961323",
"0.6138691",
"0.55915344",
"0.55132866",
"0.540919",
"0.5384903",
"0.5370869",
"0.5334106",
"0.5277659",
"0.5261717",
"0.5182233",
"0.51706254",
"0.5137889",
"0.50874555",
"0.50837165",
"0.50661874",
"0.5056941",
"0.50455534",
"0.4996862",
"0.4972436",
"0.49688175",
"0.49559432",
"0.4950781",
"0.49396268",
"0.49269733",
"0.4925733",
"0.49223816",
"0.4894507",
"0.48925436",
"0.48896813"
]
| 0.6626662 | 0 |
readPDF opens pdf as array to be hundled by opevCV. {{{ When the pdf file has multiple pages, automatically pick first page. | def readPDF(infile, width, grayscale=True):
#To open a pdf file.
imgAllPages = convert_from_path(infile, dpi=100)
img = imgAllPages[0] #pick first page up
img = np.asarray(img)
img = img.take([1,2,0], axis=2) #change color ch. (GBR -> RGB)
#To scale image to designated width.
if img.shape[1] != width:
height = int(round(img.shape[0] / img.shape[1] * width))
img = cv2.resize(img, (width, height),
interpolation = cv2.INTER_CUBIC)
#To convert image in grayscale.
if grayscale:
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
return img
#}}} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def read_pdf(exp_type='c'):\n # shape file path\n file_name = pdffile_exptype[exp_type]\n file_path_name = os.path.join('data', 'pdf', file_name)\n pdf_file = resource_filename(__name__, file_path_name)\n\n return h5py.File(pdf_file, 'r')",
"def parse_pdf(url):\n pdf_data = urllib2.urlopen(Request(url)).read()\n # Cast to StringIO object\n from StringIO import StringIO\n memory_file = StringIO(pdf_data)\n\n # Create a PDF parser object associated with the StringIO object\n parser = PDFParser(memory_file)\n\n # Create a PDF document object that stores the document structure\n document = PDFDocument(parser)\n\n # Define parameters to the PDF device object\n rsrcmgr = PDFResourceManager()\n retstr = StringIO()\n laparams = LAParams()\n pageno = 1\n codec = 'utf-8'\n\n # Create a PDF device object\n device = TextConverter(rsrcmgr, retstr, codec=codec, pageno=pageno,\n laparams=laparams)\n\n # Create a PDF interpreter object\n interpreter = PDFPageInterpreter(rsrcmgr, device)\n\n # Process each page contained in the document\n text = ''\n for page in PDFPage.create_pages(document):\n interpreter.process_page(page)\n text = retstr.getvalue()\n\n vol = get_vol(text)\n no = get_no(text)\n return vol, no",
"def read_pdf_file(file):\n return pdftotext.PDF(file)",
"def read_pdf(\n pdf_file: UploadFile = File(...),\n settings: config.Settings = Depends(get_settings),\n db: Session = Depends(get_db),\n authorization: str = Header(None),\n):\n if authorization != settings.upload_secret:\n raise HTTPException(401, \"Operação inválida!\")\n\n file = pdf_file.file\n content = file.read()\n\n # Builds the path\n target_path = Path(settings.pdf_storage_path)\n filename = target_path.joinpath(pdf_file.filename)\n save_pdf(content, filename)\n\n db_results = read_results(db, PDF_Filename=pdf_file.filename)\n\n if db_results:\n db_result = db_results[0]\n user, password = create_patient_user(\n db,\n cpf=db_result.CPF,\n name=f\"{db_result.prMotherFirstname} {db_result.prMotherSurname}\",\n )\n\n sms_message = f\"{user.name}, o resultado do exame do pézinho está pronto. \"\n\n if password:\n sms_message += f\"Faça login com seu cpf e a senha {password}\"\n\n number = db_result.ptnPhone1 or db_result.ptnPhone2\n\n if number:\n sms_utils.send_sms(number, sms_message)\n else:\n log(\n f\"[PDF] Arquivo {pdf_file.filename} importado mas sem \"\n \"celulares associados. SMS não será enviado.\"\n )\n else:\n log(\n f\"[PDF] Arquivo {pdf_file.filename} importado mas sem \"\n \"resultado associado. SMS não será enviado.\"\n )\n\n log(\"[PDF] PDF foi importado.\", db)\n\n return PDFProcessed(\n length=len(content), filename=pdf_file.filename, sha256=sha256(filename)\n )",
"def fetch_pdf(url, browser):\n\tpass\n\n\t# grab link page\n\n\t# search soup for pdf file\n\n\t# grab pdf file and return it",
"def load_pdf(self, env=\"default\", debug=()):\n os.makedirs(\"txt\", exist_ok=True)\n if env is \"default\": # default python path\n call([executable,\n os.path.join(f\"{exec_prefix}\", \"Scripts\", \"pdf2txt.py\"),\n os.path.join(\"pdf\", f\"{self.pdf_filename}\"),\n os.path.join(f\"-otxt\", f\"{self.txt_filename}\")])\n if env is \"venv\": # virtual environment\n call([os.path.join(\"venv\", \"Scripts\", \"python.exe\"),\n os.path.join(\"venv\", \"Scripts\", \"pdf2txt.py\"),\n os.path.join(\"pdf\", f\"{self.pdf_filename}\"),\n os.path.join(f\"-otxt\", f\"{self.txt_filename}\")])\n with open(os.path.join(\"txt\", f\"{self.txt_filename}\"), \"r\", encoding=\"utf-8\") as file:\n self.paragraphs = [paragraph.rstrip('\\n') for paragraph in file]\n os.remove(os.path.join(\"txt\", f\"{self.txt_filename}\"))\n if debug:\n for counter, paragraph in enumerate(self.paragraphs):\n try:\n if int(debug[0]) < counter < int(debug[1]):\n print(counter, paragraph)\n except TypeError:\n print(\"Debug must be a (x,y) touple.\")",
"def parsepdf(intext): # type: (str) -> str\n\n pdfbinarydata = base64.b64decode(intext.strip())\n pdfFileObj = io.BytesIO()\n pdfFileObj.write(pdfbinarydata)\n pdfReader = PyPDF2.PdfFileReader(pdfFileObj)\n extractedText = ''\n for i in range(0, pdfReader.numPages):\n pageObj = pdfReader.getPage(i)\n extractedText = extractedText + pageObj.extractText()\n\n return extractedText.strip()",
"def process_pdf(path):\r\n str = \"\"\r\n try:\r\n pages = layout_scanner.get_pages(path) \r\n i = 0\r\n l = len(pages)\r\n while i < l: \r\n str += pages[i]\r\n i += 1\r\n except Exception, e:\r\n return g_error_template % e, \"\" \r\n \r\n return \"\", str",
"def get_pdf_pages(pdf: Path) -> int:\n res = run(('qpdf', '--show-npages', pdf), check=True, stdout=subprocess.PIPE)\n npages = int(res.stdout.strip())\n return npages",
"def pdf_miner_extract(pdf_file, password='', pages=0):\n pdf_resource_manager = PDFResourceManager()\n output_stream = StringIO()\n device = TextConverter(pdf_resource_manager, output_stream,\n laparams=LAParams(char_margin=0.8, detect_vertical=False))\n file_stream = open(pdf_file, 'rb')\n interpreter = PDFPageInterpreter(pdf_resource_manager, device)\n pages_set = []\n for page in PDFPage.get_pages(file_stream, set(), pages, password):\n interpreter.process_page(page)\n pages_set.append(output_stream.getvalue())\n output_stream.truncate(0)\n file_stream.close()\n device.close()\n output_stream.close()\n return pages_set",
"def test_read_text_of_non_indexed_pdf_without_ocr(pdf_path):\n pdf = PdfReader(path=pdf_path)\n assert pdf.read_text(allow_ocr=False) is None",
"def process_pdf(filename, qualies_only=False):\n if filename.endswith('.txt'):\n f = open(filename)\n text = f.read()\n f.close()\n else:\n text = subprocess.check_output([\"pdftotext\", \"-layout\",\n filename, \"-\"]).decode('utf-8')\n\n print(\"Processing {}...\".format(filename))\n\n pages = text.split(chr(12))\n print (\"{} Pages\".format(len(pages)))\n md = []\n qd = []\n for p in pages:\n if ('MAIN DRAW SINGLES' in p or 'Singles Championship' in p\n or 'Ladies\\' Singles' in p):\n md += [p]\n elif ('QUALIFYING SINGLES' in p or 'Qualifying Singles' in p\n or 'Qualifying Ladies\\' Singles' in p):\n qd += [p]\n elif ('Qualifiers' in p and not 'Doubles' in p):\n qd += [p]\n\n md_result = None\n qd_result = None\n\n meta = None\n if md and not qualies_only:\n md_result = drawsheet_process(chr(12).join(md))\n meta = md_result[2]\n\n # copy the metadata to the quaily draw if possible\n if qd:\n qd_result = drawsheet_process(chr(12).join(qd), meta, True)\n\n return (md_result, qd_result)",
"def extract_pages(pdf):\n parser = PDFParser(pdf)\n document = PDFDocument(parser)\n\n if not document.is_extractable:\n return\n\n resource_manager = PDFResourceManager()\n device = PDFPageAggregator(resource_manager)\n interpreter = PDFPageInterpreter(resource_manager, device)\n\n for page in PDFPage.create_pages(document):\n interpreter.process_page(page)\n yield device.get_result()",
"def processPdf(self, pdf_path: str) -> (list, list):\n hocr_list = []\n images = []\n numPages = self.getNumberPages(pdf_path)\n for initalpage in range(1, numPages+self.batch, self.batch):\n pages = pdf2image.convert_from_path(pdf_path,\n first_page=initalpage,\n last_page=min(\n initalpage+self.batch-1, numPages),\n output_folder=self.images_path,\n grayscale='true',\n fmt='tif')\n for page in pages:\n hocr_bytes = pytesseract.image_to_pdf_or_hocr(page, \n lang='por',\n extension='hocr',\n config='--psm 1')\n hocr_list.append(hocr_bytes)\n images.append(page.filename)\n page.close()\n return hocr_list, images",
"def from_pdf(path):\n raw_regexes = [\n r\"\"\"<prism:doi>(10.\\d{4,9}/[-._;()/:a-zA-Z0-9]+)</prism:doi>\"\"\",\n r\"\"\"[\"'](?:doi|DOI):(10.\\d{4,9}/[-._;()/:a-zA-Z0-9]+)[\"']\"\"\",\n r\"\"\"URI\\s*\\(https?://doi.org/(10.\\d{4,9}/[-._;()/:a-zA-Z0-9]+)\\)\\s*>\"\"\",\n r\"\"\"URI\\s*\\((?:https?://)?www.nature.com/doifinder/(10.\\d{4,9}/[-._;()/:a-zA-Z0-9]+)\\)\\s*>\"\"\",\n # This one works for some ACIE papers, but is too risky. It matches\n # against DOIs of cited papers too. Better to use WPS-ARTICLEDOI.\n # r\"\"\"/URI\\(https?://(?:dx)?.doi.org/(10.\\d{4,9}/[-._;()/:a-zA-Z0-9]+)\\)\"\"\",\n r\"\"\"/WPS-ARTICLEDOI\\s*\\((10.\\d{4,9}/[-._;()/:a-zA-Z0-9]+)\\)\"\"\",\n r\"\"\"\\((?:doi|DOI):\\s*(10.\\d{4,9}/[-._;()/:a-zA-Z0-9]+)\\)\"\"\",\n r\"\"\"<rdf:li.+>(?:doi|DOI):(10.\\d{4,9}/[-._;()/:a-zA-Z0-9]+)</rdf:li>\"\"\",\n ]\n regexes = [re.compile(regex) for regex in raw_regexes]\n class _DOIFound(Exception):\n pass\n\n p = Path(path)\n if not (p.exists() or p.is_file()):\n return _error(f\"from_pdf: invalid path '{p}' given\")\n\n strings = subprocess.Popen([\"strings\", p], stdout=subprocess.PIPE)\n grep = subprocess.Popen([\"grep\", \"-i\", \"doi\"], stdin=strings.stdout, stdout=subprocess.PIPE)\n try:\n for line in grep.stdout:\n line = line.decode(_g.gpe).strip()\n for regex in regexes:\n match = regex.search(line)\n if match:\n raise _DOIFound(match.group(1))\n except _DOIFound as e:\n doi = e.args[0]\n # Prune away any extra parentheses at the end.\n nopen = doi.count('(')\n nclose = doi.count(')')\n if nopen != nclose:\n doi = doi.rsplit(')', maxsplit=(nclose - nopen))[0]\n # Report success.\n return DOI(doi)\n else:\n return _error(f\"from_pdf: could not find DOI from '{p}'\")",
"def readFile(self):\n with pdfplumber.open(self.path) as pdf:\n first_page = pdf.pages[0]\n text = first_page.extract_text()\n text = text.split('\\n')\n return processText(text)",
"def pdf_to_test(file_name):\n #Opening, reading and parsing a pdf file to string\n pdfFileObj = open(file_name, 'rb')\n pdfReader = PyPDF2.PdfFileReader(pdfFileObj)\n pdf_string = pdfReader.getPage(0).extractText()\n \n #Find the RechnungsNr.\n start_of_RN = pdf_string.find(\"No.Invoice Date\") + len(\"No.Invoice Date\")\n rechnungs_nr = pdf_string[start_of_RN:start_of_RN+7]\n \n #Find the address\n start_of_address = pdf_string.find(\"Invoice Address\") + len(\"Invoice Address\")\n end_of_address = pdf_string.find(\"Payment Terms:\")\n address = pdf_string[start_of_address:end_of_address]\n \n #Liefermonat commenrs\n start_of_contract = pdf_string.find(\"Company Name / Line of business\") + len(\"Company Name / Line of business\")\n end_of_contract = pdf_string.find(\"Summary of Charges\")\n contract = pdf_string[start_of_contract:end_of_contract]\n \n #Nettobetrag - read base charge\n start_of_netto = pdf_string.find(\"Base Charges\") + len(\"Base Charges\")\n end_of_netto = pdf_string.find(\"Click Charges - Color\")\n nettobetrag = pdf_string[start_of_netto:end_of_netto]\n \n pdfFileObj.close()\n \n return pdfFileObj.name, rechnungs_nr, address, contract, nettobetrag",
"def first_min_pdf(pdf):\n\t# to be continued in the future if necessary for exact accuracy\n\treturn None",
"def pdf_file(filename):\n return fnmatch(filename, '*.pdf')",
"def pdf(self, identifier):\n return self.client.request_with_method(Methods.PDF % (self.name, identifier,))",
"def test_pdf_with_many_pages_requires_ocr(ocr_many_pages):\n pdf = PdfReader(path=ocr_many_pages)\n assert pdf.read_text(allow_ocr=False) is None",
"def using_multiprocess(pdfname):\n\n # logs.info('worker {} started with pdf file {}'.format(__name__, pdfname))\n\n with Pool(PROCESS_POOL_SIZE) as pool:\n iterable = yield_text_from_pdf(pdfname)\n results = pool.map(process_worker, iterable)\n\n return results",
"def open_pdf(directory):\n for sub_folder in os.listdir(directory):\n sub_directory = os.path.join(directory,sub_folder)\n for pdf_file in os.listdir(sub_directory):\n full_path = os.path.join(sub_directory,pdf_file)\n try:\n pdf_content = pdf_to_txt(full_path)\n if isinstance(pdf_content, str) and len(pdf_content) > 1000:\n yield full_path, pdf_content\n else:\n print('No text found, skipping \"{}\"..'.format(pdf_file))\n continue\n except Exception as e:\n print(e)\n print('Failed to parse \"%s\"' % pdf_file)",
"def get_pdf(self, language='en-US'):\r\n self.require_item()\r\n\r\n headers = {'Accept': 'application/pdf', 'Accept-Language': language}\r\n request = http.Request('GET', self.get_url(), {}, headers)\r\n\r\n return request, parse_passthrough",
"def convert_pdf_to_text(pdf_path):\n process_id = os.getpid()\n resource_manager = PDFResourceManager()\n output = StringIO.StringIO()\n laparams = LAParams(detect_vertical=True)\n device = TextConverter(\n resource_manager,\n output,\n codec='utf-8',\n laparams=laparams\n )\n interpreter = PDFPageInterpreter(resource_manager, device)\n file_handler = file(pdf_path, 'rb')\n pages = PDFPage.get_pages(file_handler)\n\n for idx, page in enumerate(pages):\n print(\"Page \" + str(idx + 1), end='\\r')\n sys.stdout.flush()\n interpreter.process_page(page)\n print()\n\n data = output.getvalue()\n data = data.replace('\\n', ' ')\n data = data.replace('\\t', ' ')\n data = data.replace('\\r', ' ')\n data = data.replace('\\x0c', ' ')\n\n return data",
"def parse_pdf(\n pdf_path: str,\n fulltext: bool = True,\n soup: bool = False,\n return_coordinates: bool = True,\n grobid_url: str = GROBID_URL,\n):\n # GROBID URL\n if fulltext:\n url = \"%s/api/processFulltextDocument\" % grobid_url\n else:\n url = \"%s/api/processHeaderDocument\" % grobid_url\n\n files = []\n if return_coordinates:\n files += [\n (\"teiCoordinates\", (None, \"persName\")),\n (\"teiCoordinates\", (None, \"figure\")),\n (\"teiCoordinates\", (None, \"ref\")),\n (\"teiCoordinates\", (None, \"formula\")),\n (\"teiCoordinates\", (None, \"biblStruct\")),\n ]\n\n if isinstance(pdf_path, str):\n if validate_url(pdf_path) and op.splitext(pdf_path)[-1].lower() != \".pdf\":\n print(\"The input URL has to end with ``.pdf``\")\n parsed_article = None\n elif validate_url(pdf_path) and op.splitext(pdf_path)[-1] == \".pdf\":\n page = urllib.request.urlopen(pdf_path).read()\n parsed_article = requests.post(url, files={\"input\": page}).text\n elif op.exists(pdf_path):\n parsed_article = requests.post(\n url, files={\"input\": open(pdf_path, \"rb\")}\n ).text\n else:\n parsed_article = None\n elif isinstance(pdf_path, bytes):\n # assume that incoming is byte string\n parsed_article = requests.post(url, files={\"input\": pdf_path}).text\n else:\n parsed_article = None\n\n if soup and parsed_article is not None:\n parsed_article = BeautifulSoup(parsed_article, \"lxml\")\n return parsed_article",
"def pdf_open(self, arg=None):\n nums = self.selector.select_by_index(arg)\n if not nums or nums is None:\n self.visual.print(\"Need a selection to open.\")\n # arg has to be a single string\n if utils.has_none(nums):\n self.visual.print(\"Need a valid entry index.\")\n for num in nums:\n entry_id = self.reference_entry_id_list[num]\n entry = self.entry_collection.entries[entry_id]\n pdf_in_entry = self.get_editor().open_pdf(entry)\n if not pdf_in_entry and len(nums) == 1:\n if self.visual.yes_no(\"Search for pdf on the web?\"):\n self.search_web_pdf()",
"def choose_pdf(self, args, numpdf):\n filetypes = ((\"Portable Document Format (PDF)\", \"*.pdf\"), (\"All Files\", \"*\"))\n filename = fd.askopenfilename(title=\"Choose the PDF file\", initialdir=os.path.abspath(os.sep),\n filetypes=filetypes)\n if numpdf == 1:\n self.entry_firstPDF.delete(0, tk.END)\n self.entry_firstPDF.insert(0, filename)\n\n else:\n self.entry_secondPDF.delete(0, tk.END)\n self.entry_secondPDF.insert(0, filename)",
"def getPDFBuffer():\n buf, n = dislin.pdfbuf(0)\n buf, n = dislin.pdfbuf(n)\n return buf",
"def isPdf(page):\n return page['data'][:4] == '%PDF'"
]
| [
"0.6721945",
"0.6396926",
"0.63947225",
"0.62901264",
"0.6155976",
"0.6124074",
"0.61084336",
"0.608346",
"0.59989536",
"0.5985878",
"0.5940284",
"0.59238076",
"0.5911455",
"0.58851826",
"0.58835346",
"0.5843061",
"0.58381116",
"0.5762712",
"0.5747169",
"0.5674462",
"0.56550306",
"0.56419057",
"0.563572",
"0.56355417",
"0.5633008",
"0.56136334",
"0.5601792",
"0.55914086",
"0.5580653",
"0.55504155"
]
| 0.6609024 | 1 |
correctMisalign corrects misalignment/misscale of a image {{{ by using two markers on the image. | def correctMisalign(img, marker, center, compus, scope=100):
markerCenter = np.asarray(marker.shape)//2
guide = np.asarray([center, compus])
landmark = np.zeros(guide.shape)
#To run template matching to finder markers
result = cv2.matchTemplate(img, marker, 0)
result = (1-result/np.max(result))*255
M = np.float32([
[1, 0, markerCenter[1]] ,
[0, 1, markerCenter[0]] ])
resultPadded = cv2.warpAffine(result, M, (width, height))
mask = np.zeros(resultPadded.shape)
for i in range(0, len(guide)):
mask[:] = 0
mask_xfr = max(0, guide[i,1]-(scope+markerCenter[0]))
mask_xto = min(width, guide[i,1]+(scope+markerCenter[0]))
mask_yfr = max(0, guide[i,0]-(scope+markerCenter[1]))
mask_yto = min(width, guide[i,0]+(scope+markerCenter[1]))
mask[mask_xfr:mask_xto, mask_yfr:mask_yto] = 255
min_val, max_val, min_loc, landmark[i,:] = \
cv2.minMaxLoc(np.multiply(resultPadded, mask))
#To shift image
shift = guide[0] - landmark[0]
M = np.float32([
[1, 0, shift[0]] ,
[0, 1, shift[1]] ])
imgShifted = cv2.warpAffine(img, M, (width, height))
#To rescale & rotate image
radius = np.linalg.norm(landmark[1,:] - landmark[0,:])
scale = np.linalg.norm(guide[1,:] - guide[0,:])/radius
cos = (landmark[1,0]-landmark[0,0])/radius
theta = np.arccos(cos) / (2 * np.pi) * 360
M = cv2.getRotationMatrix2D((guide[0,0],guide[0,1]),-theta,scale)
imgModified = cv2.warpAffine(imgShifted,M,(width,height))
return imgModified
#}}} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def align_preprocessed(self, img):\n aligner = FaceAligner(self.args.wing_path, self.args.lm_path, self.args.img_size)\n return aligner.align(img)",
"def resetAlignmentCenter(self):\n cent = self.TiltSeries_._TiltAlignmentParas.cent\n imdimX = self.TiltSeries_._imdimX\n imdimY = self.TiltSeries_._imdimY\n print(imdimX, imdimY)\n if cent[0] != imdimX//2+1 or cent[1] != imdimY//2+1:\n #rint \"Centers do not match: cent=\"+str(cent)+\", imdim=\"+str(imdim)\n self.TiltSeries_._TiltAlignmentParas.cent = [imdimX//2+1, imdimY//2+1]",
"def align(self, *, skip_corners=False, return_on_invalid_result=False, warpwarnings=False, **kwargs):\n #load the images for all HPFs and keep them in memory as long as\n #the AlignSample is active\n self.getDAPI()\n self.logger.info(\"starting alignment\")\n\n weighted_sum_mse = 0.\n sum_weights = 0.\n done = set()\n\n for i, overlap in enumerate(self.overlaps, start=1):\n if skip_corners and overlap.tag in [1,3,7,9] :\n continue\n self.logger.debug(f\"aligning overlap {overlap.n} ({i}/{len(self.overlaps)})\")\n result = None\n #check if the inverse overlap has already been aligned\n #(e.g. if the current overlap is between (1, 2), check the overlap between (2, 1))\n #if so, we don't have to align again\n if self.inverseoverlapsdictkey(overlap) in done:\n inverseoverlap = self.overlapsdict[self.inverseoverlapsdictkey(overlap)]\n if hasattr(inverseoverlap, \"result\"):\n result = overlap.getinversealignment(inverseoverlap)\n #do the alignment\n if result is None:\n result = overlap.align(gputhread=self.gputhread, gpufftdict=self.gpufftdict, **kwargs)\n done.add(self.overlapsdictkey(overlap))\n\n #contribution of the mean squared difference after alignment\n #to the weighted sum\n if result is not None and result.exit == 0: \n w = (overlap.cutimages[0].shape[0]*overlap.cutimages[0].shape[1])\n weighted_sum_mse+=w*result.mse[2]\n sum_weights+=w\n else :\n if result is None:\n reason = \"is None\"\n else:\n reason = f\"has exit status {result.exit}\"\n if return_on_invalid_result :\n if warpwarnings: self.logger.warningglobal(f'Overlap number {i} alignment result {reason}: returning 1e10!!')\n return 1e10\n else :\n if warpwarnings: self.logger.warningglobal(f'Overlap number {i} alignment result {reason}: adding 1e10 to sum_mse!!')\n w = (overlap.cutimages[0].shape[0]*overlap.cutimages[0].shape[1])\n weighted_sum_mse+=w*1e10\n sum_weights+=w\n\n self.logger.info(\"finished align loop for \"+self.SlideID)\n return weighted_sum_mse/sum_weights",
"def cleanAlign(align, badaa=None):\n return align.loc[[isvalidpeptide(s, badaa) for s in align]]",
"def check_alignment(image, r1, r2):\n \n distance = dist_between_spheres(r1, r2, image.shape[0] / 2. + 10, image.shape[0] / 2.)\n gap_signal = []\n denoised = median_filter(image.copy(), 3)\n \n for j in np.arange(0., image.shape[1]): \n # Take the region around the gap, which later on will be used\n # to define the intensity at the gap between the spheres.\n # The width of the gap is not exact\n if image.shape[1] / 2. + distance + 5 > j > image.shape[1] / 2. - distance - 5:\n gap_signal.append(denoised[image.shape[0] / 2. + 10, j])\n \n centre = np.mean(np.argwhere(np.min(gap_signal) == gap_signal))\n print centre\n print len(gap_signal) / 2.\n print\n \n if abs(centre - len(gap_signal) / 2.) <= 1.5:\n return True\n else:\n return False",
"def revtranslate_align(aaseqs, dnaseqs, check=False, trim=False):\n\n align = new_align(aaseqs)\n\n for name, seq in aaseqs.iteritems():\n try:\n dna = dnaseqs[name].upper()\n dnalen = len(dna)\n aalen = sum(int(a != \"-\") for a in seq)\n\n if len(dna) != aalen * 3:\n if trim:\n # make dna a multiple of three\n dna = dna[:(len(dna) // 3) * 3]\n\n if len(dna) > aalen * 3:\n # trim dna\n dna = dna[:aalen*3]\n else:\n # trim peptide to match nucleotide\n j = 0\n for i in xrange(len(seq)):\n if seq[i] != '-':\n j += 1\n if j > len(dna) // 3:\n seq = seq[:i] + \"-\" * (len(seq) - i)\n break\n\n aalen2 = sum(int(a != \"-\") for a in seq)\n assert len(dna) == aalen2 * 3, (\n len(dna), aalen2 * 3)\n\n util.logger(\"trim dna (%d) and pep (%d)\" %\n (dnalen - len(dna), aalen - aalen2))\n\n else:\n # is last residue X?\n for i in xrange(len(seq)-1, -1, -1):\n if seq[i] == \"-\":\n continue\n if seq[i] == \"X\":\n # repair\n seq = seq[:i] + \"-\" * (len(seq)-i)\n dna = dna[:-3]\n break\n\n align[name] = seqlib.revtranslate(seq, dna, check=check)\n except seqlib.TranslateError:\n raise\n\n return align",
"def test_align_invert(self):\n al = align(self.amp1, self.amp2, inverse=False)\n\n al_inv = align(self.amp2, self.amp1, inverse=True)\n\n print(al.R)\n print(al_inv.R)\n\n print(al.T)\n print(al_inv.T)",
"def precheck_align(a_mat, b_mat, a_cast, b_cast):\n\n # cast to DataFrame in case either is a Series\n a_mat = pd.DataFrame(a_mat)\n b_mat = pd.DataFrame(b_mat)\n\n # drop samples with all missing values\n a_mat = a_mat.dropna(how=\"all\", axis=0)\n b_mat = b_mat.dropna(how=\"all\", axis=0)\n\n # align samples\n a_mat, b_mat = a_mat.align(b_mat, axis=0, join=\"inner\")\n\n # check sample sizes\n num_samples = a_mat.shape[0] # number of samples for each variable\n if num_samples < 2:\n raise ValueError(\"x and y must have length at least 2.\")\n\n return a_mat, b_mat",
"def test_align():\n target = ('TAAATAAATATCTGGTGTTTGAGGCAAAAAGGCAGACTTAAATTCTAAATCACACCTGTGCTT'\n 'CCAGCACTACCTTCAAGCGCAGGTTCGAGCCAGTCAGGCAGGGTACATAAGAGTCCATTGTGC'\n 'CTGTATTATTTTGAGCAATGGCTAAAGTACCTTCACCCTTGCTCACTGCTCCCCCACTTCCTC'\n 'AAGTCTCATCGTGTTTTTTTTAGAGCTAGTTTCTTAGTCTCATTAGGCTTCAGTCACCAT')\n query = ('TCTGGTGTTTGAGGCAAAAAGGCAGACTTAAATTCTAAATCACACCTGTGCTTCCAGCACTACC'\n 'TTCAAGCGCAGGTTCGAGCCAGTCAGGACTGCTCCCCCACTTCCTCAAGTCTCATCGTGTTTTT'\n 'TTTAGAGCTAGTTTCTTAGTCTCATTAGGCTTCAGTCACCATCATTTCTTATAGGAATACCA')\n assert kevlar.align(target, query) == ('10D91M69D79M20I', 155)",
"def CorrectMotion(self):\n if self.verbose:\n print \"Correct for motion\"\n for entry in self.entry_map['epi']:\n info = self.info[entry]\n\n if os.path.exists(info['imgfile_m'] + info['suffix']):\n return\n# Always use brik for 3dDeconvolve.\n suffix = '+orig'\n epifile = '%s%s' % (info['imgfile'], suffix)\n prefix = info['imgfile_m']\n base_entry = info['base_entry']\n if info['base'] == 'start':\n# Use the first frame specified in template file. Defaults\n# to zero.\n base = info['motion_ref_frame']\n else:\n# Use the last frame.\n base = self.info[base_entry]['tdim'] - info['skip']-1\n base = ('%d' % base).replace(' ','')\n\n# Correct for slice-timing.\n self.SliceTimeCorrect(info, epifile)\n\n plane = info['plane']\n anat_tgt = info['anat_tgt']\n# anat_entry = self.anat_entry[plane]\n\n if info['catmats']:\n# Include additonal transformation in motion correction such\n# that final image is in register with the fieldmap, which has\n# been registered to the structural image that will be used for\n# spatial normalization.\n self.MotcorCatenate(info, base, anat_tgt)\n else:\n# Assume fieldmap is in register with the structural.\n self.Motcor(info, base)\n\n if info.get('fmapname', None) is None:\n# No fieldmap correction.\n if self.fsl_flip:\n# Flip the way fslview likes it.\n self.FSLFlip(info['imgfile_m'], info['imgfile_final'])\n elif info['suffix'] == '.nii':\n# Copy motion-corrected images from /tmp to output directory\n outfile = info['imgfile_final'] + info['suffix']\n cmd = '3dcopy %s+orig %s' % (info['imgfile_m'], outfile)\n self.CheckExec(cmd, [outfile], force=True)\n cmd = '/bin/rm %s+orig*' % info['imgfile_m']\n self.CheckExec(cmd, [], force=True)",
"def computeCoarseAlignmentOld(self, TiltSeries_, mute=True, outfile=''):\n #print('ref index: ', numpy.argwhere( self._projIndices.astype(int) == TiltSeries_._TiltAlignmentParas.ireftilt)[0][0], TiltSeries_._TiltAlignmentParas.ireftilt )\n (psiindeg, shiftX, shiftY, x, y, z, distLine, diffX, diffY,\n shiftVarX, shiftVarY) = alignmentFixMagRot(\n Markers_=self._Markers, cTilt=self._cTilt, sTilt=self._sTilt,\n ireftilt=numpy.argwhere( self._projIndices.astype(int) == TiltSeries_._TiltAlignmentParas.ireftilt)[0][0],\n irefmark=TiltSeries_._TiltAlignmentParas.irefmark,\n r=TiltSeries_._TiltAlignmentParas.r, imdim=TiltSeries_._imdim,\n handflip=TiltSeries_._TiltAlignmentParas.handflip, mute=mute, writeResults=outfile)\n if not mute:\n print((\"Tilt Axis: %.2f\" % psiindeg))\n # copy parameters to TiltSeries\n self._alignmentRotations = numpy.array(self._ntilt * [psiindeg])\n self.setRotationsInTiltSeries(TiltSeries_)\n self._alignmentTransX = shiftX\n self._alignmentTransY = shiftY\n self.set_TranslationsInTiltSeries(TiltSeries_)\n self.Psi = psiindeg\n\n for (imark, Marker) in enumerate(self._Markers):\n Marker.set_r(numpy.array([x[imark], y[imark], z[imark]]))",
"def test_align_without_gaps(self):\n aln = ArrayAlignment(\n {\"seq1\": \"ACGG\", \"seq2\": \"CGCA\", \"seq3\": \"CCG-\"}, moltype=\"dna\"\n )\n aln_plot = aln.dotplot(\"seq1\")\n self.assertNotEqual(aln_plot._aligned_coords, None)",
"def affine_align(x, y, p1, p2, g, s):\n #Create M, Ix, and Iy as Y x X matrices of 0's\n M = [[0]*(len(x)+1) for i in range(len(y)+1)]\n Ix = [[0]*(len(x)+1) for i in range(len(y)+1)]\n Iy = [[0]*(len(x)+1) for i in range(len(y)+1)]\n #Set up initial values for Ix and Iy\n #M infs along both axes\n for i in range(1, len(y)+1):\n M[i][0] = -math.inf\n for j in range(1, len(x)+1):\n M[0][j] = -math.inf\n #Ix: Aligning X with gap, horizontal move, infs along top row\n for i in range(0, len(y)+1):\n Ix[i][0] = -math.inf\n #Gap penalties along left column\n for j in range(1, len(x)+1):\n Ix[0][j] = -g if Ix[0][j-1] == -math.inf else Ix[0][j-1] - s\n #Iy: Aligning Y with gap, vertical move, infs along left column\n for j in range(0, len(x)+1):\n Iy[0][j] = -math.inf\n #Gap penalties along top row\n for i in range(1, len(y)+1):\n Iy[i][0] = -g if Iy[i-1][0] == -math.inf else Iy[i-1][0] - s\n #Populate remaining cells\n for i in range(1, len(y)+1):\n for j in range(1, len(x)+1):\n M[i][j] = max(M[i-1][j-1] + delta(x[j-1], y[i-1], p1, p2),\n Ix[i-1][j-1] + delta(x[j-1], y[i-1], p1, p2),\n Iy[i-1][j-1] + delta(x[j-1], y[i-1], p1, p2))\n Ix[i][j] = max(M[i][j-1] - g,\n Ix[i][j-1] - s)\n Iy[i][j] = max(M[i-1][j] - g,\n Iy[i-1][j] - s)\n #TRACEBACK\n x_ret=\"\"; y_ret=\"\"\n i = len(y); j = len(x)\n #Determine start matrix\n align_scores = (M[i][j], Iy[i][j], Ix[i][j])\n matrix_idx = align_scores.index(max(align_scores))\n #matrix_key will track the current matrix through the traceback\n matrix_key = [\"M\", \"Iy\", \"Ix\"][matrix_idx]\n while i > 0 and j > 0:\n #From M: Check diagonal moves back to all three matrices, align characters\n if matrix_key == \"M\":\n if M[i][j] == M[i-1][j-1] + p1 or M[i][j] == M[i-1][j-1] - p2:\n x_ret = x[j-1] + x_ret\n y_ret = y[i-1] + y_ret\n i -= 1; j -= 1\n matrix_key = \"M\"\n elif M[i][j] == Iy[i-1][j-1] + p1 or M[i][j] == Iy[i-1][j-1] - p2:\n x_ret = x[j-1] + x_ret\n y_ret = y[i-1] + y_ret\n i -= 1; j -= 1\n matrix_key = \"Iy\"\n elif M[i][j] == Ix[i-1][j-1] + p1 or M[i][j] == Ix[i-1][j-1] - p2:\n x_ret = x[j-1] + x_ret\n y_ret = y[i-1] + y_ret\n i -= 1; j -= 1\n matrix_key = \"Ix\"\n #From Iy: Check vertical move to Iy and M, align y character with x gap\n elif matrix_key == \"Iy\":\n if Iy[i][j] == M[i-1][j] - g:\n x_ret = \"_\" + x_ret\n y_ret = y[i-1] + y_ret\n i -= 1\n matrix_key = \"M\"\n elif Iy[i][j] == Iy[i-1][j] - s:\n x_ret = \"_\" + x_ret\n y_ret = y[i-1] + y_ret\n i -= 1\n matrix_key = \"Iy\"\n #From Ix: Check horizontal move to Ix and M, align x character with y gap\n elif matrix_key == \"Ix\":\n if Ix[i][j] == M[i][j-1] - g:\n x_ret = x[j-1] + x_ret\n y_ret = \"_\" + y_ret\n j -= 1\n matrix_key = \"M\"\n elif Ix[i][j] == Ix[i][j-1] - s:\n x_ret = x[j-1] + x_ret\n y_ret = \"_\" + y_ret\n j -= 1\n matrix_key = \"Ix\"\n #Finish sequence if edge was reached\n #i>0 means mach remaining characters in y with gaps in x\n if i > 0:\n x_ret = (\"_\"*i) + x_ret\n y_ret = y[0:i] + y_ret\n #j>0 means mach remaining characters in x with gaps in y\n if j > 0:\n x_ret = x[0:j] + x_ret\n y_ret = (\"_\"*j) + y_ret\n #Return alinged strings\n return (x_ret, y_ret)",
"def align_crop(img, landmarks, standard_landmarks, crop_size=572, face_factor=0.45, align_type='similarity', order=3,\r\n mode='edge'):\r\n interpolation = {0: cv2.INTER_NEAREST,\r\n 1: cv2.INTER_LINEAR,\r\n 2: cv2.INTER_AREA,\r\n 3: cv2.INTER_CUBIC,\r\n 4: cv2.INTER_LANCZOS4,\r\n 5: cv2.INTER_LANCZOS4}\r\n border = {'constant': cv2.BORDER_CONSTANT,\r\n 'edge': cv2.BORDER_REPLICATE,\r\n 'symmetric': cv2.BORDER_REFLECT,\r\n 'reflect': cv2.BORDER_REFLECT101,\r\n 'wrap': cv2.BORDER_WRAP}\r\n\r\n # check\r\n assert align_type in ['affine', 'similarity'], \\\r\n \" [!] Invalid 'align_type'! The {} is not included in ['affine' and 'similarity']!\".format(align_type)\r\n assert order in [0, 1, 2, 3, 4, 5], \\\r\n \" [!] Invalid 'order'! The {} is not included in [0, 1, 2, 3, 4, 5]!\".format(order)\r\n assert mode in ['constant', 'edge', 'symmetric', 'reflect', 'wrap'], \\\r\n \" [!] Invalid 'mode'! the {} is not included in ['constant', 'edge', 'symmetric', 'reflect', and 'wrap']\".format(mode)\r\n\r\n # crop size\r\n if isinstance(crop_size, (list, tuple)) and len(crop_size) == 2:\r\n crop_size_h = crop_size[0]\r\n crop_size_w = crop_size[1]\r\n elif isinstance(crop_size, int):\r\n crop_size_h = crop_size_w = crop_size\r\n else:\r\n raise Exception(\" [!] Invalid 'crop_size'! The 'crop_size' should be (1) one integer for (crop_size, crop_size) ar (2) (int, int) for (crop_size_h, crop_size_w)!\")\r\n\r\n # estimate transform matrix\r\n target_landmarks = standard_landmarks * max(crop_size_h, crop_size_w) * face_factor + np.array([crop_size_w // 2, crop_size_h // 2])\r\n if align_type == 'affine': # 6 degree of freedom\r\n transform_matrix, _ = cv2.estimateAffine2D(target_landmarks, landmarks, ransacReprojThreshold=np.Inf)\r\n else: # 4 degree of freedom: using the combinations of translation, rotation, and uniform scaling\r\n transform_matrix, _ = cv2.estimateAffinePartial2D(target_landmarks, landmarks, ransacReprojThreshold=np.Inf)\r\n\r\n # warp image by given transform\r\n img_crop = cv2.warpAffine(img, transform_matrix, dsize=(crop_size_w, crop_size_h),\r\n flags=cv2.WARP_INVERSE_MAP + interpolation[order], borderMode=border[mode])\r\n\r\n # get transformed landmarks\r\n transformed_landmarks = cv2.transform(np.expand_dims(landmarks, axis=0), m=cv2.invertAffineTransform(transform_matrix))\r\n\r\n return img_crop, transformed_landmarks",
"def imagesAlign(I,Iref,fillval=np.nan,trfm_type='similarity',\n vCells=1,hCells=1,rszFac=1,verbose=False,\n minArea = np.power(2, 11), applyWarp=True):\n if len(I.shape)==3:\n I1=sh.rgb2gray(I)\n else:\n I1=I\n \n if len(Iref.shape)==3:\n Iref1=sh.rgb2gray(Iref)\n else:\n Iref1=Iref\n\n WARN_USER, ORIG_DTYPE = False, None\n if I1.dtype != 'float32':\n WARN_USER, ORIG_DTYPE = True, I1.dtype\n I1 = I1.astype('float32')\n if Iref1.dtype != 'float32':\n WARN_USER, ORIG_DTYPE = True, Iref1.dtype\n Iref1 = Iref1.astype('float32')\n if WARN_USER:\n print \"(Info) imagesAlign was called with input image dtype={0}. \\\nimagesAlign expects image dtype='float32' (Also, intensity vals in range \\\n[0.0,1.0]. The image dtype conversion was \\\nautomatically done, but this slows down the computation a little. Consider \\\ntrying to work in 'float32' in the first place if convenient for a little \\\nspeed boost.\".format(ORIG_DTYPE)\n\n t1 = time.clock()\n # check if more than one vertical and horizontal cell\n if (vCells>1) and (hCells>1):\n I2=imagesAlign(I1,Iref1,trfm_type=trfm_type, minArea=minArea)[1];\n Iout=np.copy(Iref1);\n pFac=.25;\n vStep=math.ceil(I1.shape[0]/vCells); vPad=pFac*vStep;\n hStep=math.ceil(I1.shape[1]/hCells); hPad=pFac*vStep;\n for i in range(vCells):\n for j in range(hCells):\n # 2. chop + pad each cell then align\n # 3. stitch back together\n i1=i*vStep; i1=max(i1,0);\n i2=(i+1)*vStep; i2=min(i2,I1.shape[0]-1);\n j1=j*hStep; j1=max(j1,0);\n j2=(j+1)*hStep; j2=min(j2,I1.shape[1]-1);\n\n i1p=i1-vPad; i1p=max(i1p,0);\n i2p=i2+vPad; i2p=min(i2p,I1.shape[0]-1);\n j1p=j1-hPad; j1p=max(j1p,0);\n j2p=j2+hPad; j2p=min(j2p,I1.shape[1]-1);\n \n Ic=I2[i1p:i2p,j1p:j2p]\n Irefc=Iref1[i1p:i2p,j1p:j2p]\n (H,err)=imagesAlign1(Ic,Irefc,trfm_type=trfm_type,verbose=verbose, minArea=minArea)\n IcT=sh.imtransform(Ic, H)\n Iout[i1:i2,j1:j2]=IcT[i1-i1p:(i1-i1p)+(i2-i1),j1-j1p:(j1-j1p)+(j2-j1)]\n\n return (np.eye(3),Iout,-1)\n\n if rszFac==1:\n t0 = time.clock()\n (H,err)=imagesAlign1(I1,Iref1,trfm_type=trfm_type,verbose=verbose, minArea=minArea)\n if verbose:\n print 'alignment time:',time.clock()-t0,'(s)'\n\n #print 'alignment time:',time.clock()-t0,'(s)' \n else:\n I1=sh.fastResize(I1,rszFac)\n Iref1=sh.fastResize(Iref1,rszFac)\n S=np.eye(3, dtype=np.float32);\n S[0,0]=1/rszFac; S[1,1]=1/rszFac;\n H0=np.eye(3, dtype=np.float32)\n H0=np.dot(np.dot(np.linalg.inv(S),H0),S)\n t0 = time.clock()\n (H,err)=imagesAlign1(I1,Iref1,H0=H0,trfm_type=trfm_type,verbose=verbose, minArea=minArea)\n if verbose:\n print 'alignment time:',time.clock()-t0,'(s)'\n\n #print 'alignment time:',time.clock()-t0,'(s)'\n H=np.dot(S,np.dot(H,np.linalg.inv(S)))\n\n #print \"overall time: \", time.clock() - t1\n if applyWarp:\n return (H,sh.imtransform(I,H,fillval=fillval),err)\n else:\n return (H,err)",
"def test_correct_barcode_no_error_correction(self):\r\n barcode = \"GGAGACAAGGGT\"\r\n barcode_to_sample_id = {\r\n \"GGAGACAAGGGA\": \"s1\",\r\n \"ACACCTGGTGAT\": \"s2\"}\r\n correction_fn = None\r\n\r\n actual = correct_barcode(barcode, barcode_to_sample_id, correction_fn)\r\n expected = (0, barcode, False, None)\r\n self.assertEqual(actual, expected)\r\n\r\n # barcode contains N\r\n barcode = \"CCAGTGTANGCA\"\r\n actual = correct_barcode(barcode, barcode_to_sample_id, correction_fn)\r\n expected = (0, \"CCAGTGTANGCA\", False, None)\r\n self.assertEqual(actual, expected)",
"def test_align(self):\n al = align(self.amp1, self.amp2).m\n\n # Both objects are already centered, so should be close to origin (allowing for some inaccuracy)\n self.assertAlmostEqual(al.vert.mean(axis=0)[0], 0, delta=TestAlign.DELTA)\n self.assertAlmostEqual(al.vert.mean(axis=0)[1], 0, delta=TestAlign.DELTA)\n self.assertAlmostEqual(al.vert.mean(axis=0)[2], 0, delta=TestAlign.DELTA)",
"def image_align(first_image, second_image):\r\n\r\n high_diff = (second_image.shape[0] - first_image.shape[0]) // 2\r\n width_diff = (second_image.shape[1] - first_image.shape[1]) // 2\r\n\r\n align_image = second_image[high_diff: high_diff + first_image.shape[0],\r\n width_diff: width_diff + first_image.shape[1],\r\n :]\r\n\r\n\r\n assert align_image.shape == first_image.shape\r\n\r\n return align_image",
"def alignment_uncertainty(w, I, d=0):\n return sqrt(w**2/I + d**2/12.)",
"def _correct_indel_coords(chrom, pos, ref, alt, pysamtxeff):\n lref = len(ref)\n lalt = len(alt)\n if lref == 1 and lalt == 1:\n # Substitution case\n change = '>'.join([ref, alt])\n new_pos = str(pos) + change\n return new_pos\n elif lalt == 1 and lref > lalt:\n dels = RptHandler(pysamtxeff, chrom, pos, ref)\n # Deletion case\n if dels.check_rpt_status():\n new_start, new_end = dels.find_rpt_coords()\n if len(dels.allele) == 1:\n new_pos = '_'.join([str(new_start)]) + 'del'\n else:\n new_pos = '_'.join([str(new_start), str(new_end)]) + 'del'\n else:\n shift = lref - lalt\n if shift == 1:\n new_pos = str(int(pos) + 1) + 'del'\n else:\n new_start = str(int(pos) + 1)\n new_end = str(int(pos) + shift)\n new_pos = '_'.join([new_start, new_end]) + 'del'\n return new_pos\n elif lref == 1 and lalt > lref:\n dups = RptHandler(pysamtxeff, chrom, pos, alt)\n # Duplication case\n if dups.check_rpt_status():\n new_start, new_end = dups.find_rpt_coords()\n # Check if there is a padding base, then adjust accordingly.\n if alt[0] == ref[0]:\n new_start += 1\n new_end += 1\n if len(dups.allele) == 1:\n new_pos = '_'.join([str(new_start)]) + 'dup'\n else:\n new_pos = '_'.join([str(new_start), str(new_end)]) + 'dup'\n # Insertion case\n else:\n new_start = str(pos)\n new_end = str(int(pos) + 1)\n new_pos = '_'.join([new_start, new_end]) + 'ins' + alt[1:]\n return new_pos\n elif lref > 1 and lalt > 1:\n # Multi-nucleotide substitution case\n # NG_012232.1: g.12_13delinsTG\n new_start = pos\n new_end = str(int(pos) + lref - 1)\n # If there is a common padding base, like ATG>ACC, make sure we are taking this in to account.\n if alt[0] == ref[0]:\n new_start += 1\n alt = alt[1:]\n new_pos = '_'.join([str(new_start), new_end]) + 'delins' + alt\n return new_pos\n else:\n raise Exception(\"Unknown change type: \" + pos + ':' + ref + '>' + alt)",
"def align(img, left_eye, right_eye):\n left_eye_x, left_eye_y = left_eye\n right_eye_x, right_eye_y = right_eye\n point_3rd, direction = (left_eye, -1) if left_eye_y > right_eye_y else (right_eye, 1)\n\n # np.linalg.norm is being used for euclidean distance\n a = np.linalg.norm(np.array(left_eye) - np.array(point_3rd))\n b = np.linalg.norm(np.array(right_eye) - np.array(point_3rd))\n c = np.linalg.norm(np.array(right_eye) - np.array(left_eye))\n\n if b != 0 and c != 0:\n angle = np.arccos((b ** 2 + c ** 2 - a ** 2) / (2 * b * c))\n angle = (angle * 180) / math.pi\n if direction == -1:\n angle = 90 - angle\n img = Image.fromarray(img)\n img = np.array(img.rotate(direction * angle))\n\n return img",
"def test_align_unaligned_seqs(self):\n res = align_unaligned_seqs(self.seqs1_fp, RNA)\n self.assertEqual(res.toFasta(), self.seqs1_aln)",
"def init_basic_aligner(allow_mismatches=False):\n a = Align.PairwiseAligner()\n if allow_mismatches:\n a.mismatch_score = -1\n a.gap_score = -3\n a.target_gap_score = -np.inf\n if not allow_mismatches:\n a.mismatch = -np.inf\n a.mismatch_score = -np.inf\n return a",
"def computeCoarseAlignment(self, TiltSeries_, mute=True, outfile='', optimizeShift=True, logfile_residual=''):\n #print('ref index: ', numpy.argwhere( self._projIndices.astype(int) == TiltSeries_._TiltAlignmentParas.ireftilt)[0][0], TiltSeries_._TiltAlignmentParas.ireftilt )\n (psiindeg, shiftX, shiftY, x, y, z, distLine, diffX, diffY,\n shiftVarX, shiftVarY) = alignmentFixMagRot(\n Markers_=self._Markers, cTilt=self._cTilt, sTilt=self._sTilt,\n ireftilt=numpy.argwhere( self._projIndices.astype(int) == TiltSeries_._TiltAlignmentParas.ireftilt)[0][0],\n irefmark=TiltSeries_._TiltAlignmentParas.irefmark,\n r=TiltSeries_._TiltAlignmentParas.r, imdim=TiltSeries_._imdim,imdimX=TiltSeries_._imdimX, imdimY=TiltSeries_._imdimY,\n handflip=TiltSeries_._TiltAlignmentParas.handflip, mute=mute, writeResults=outfile,\n optimizeShift=optimizeShift, logfile_residual=logfile_residual)\n if not mute:\n print((\"Tilt Axis: %.2f\" % psiindeg))\n # copy parameters to TiltSeries\n ireftilt = numpy.argwhere( self._projIndices.astype(int) == TiltSeries_._TiltAlignmentParas.ireftilt)[0][0]\n self._alignmentRotations = numpy.array(self._ntilt * [psiindeg])\n self.setRotationsInTiltSeries(TiltSeries_)\n self._alignmentTransX = shiftX\n self._alignmentTransY = shiftY\n self.set_TranslationsInTiltSeries(TiltSeries_)\n self.Psi = psiindeg\n\n for (imark, Marker) in enumerate(self._Markers):\n Marker.set_r(numpy.array([x[imark], y[imark], z[imark]]))\n # if not optimizeShift:\n # Marker.set_r(numpy.array([x[imark] + 6.326546124766944 , y[imark] + 5.187672225662868, z[imark]]))",
"def test_alignments(self):\n # test against the correct input file\n parser = Lav(self.__correct_file)\n for alignment in parser.alignments():\n self.assertEqual(len(alignment), 7)\n for alignment in parser.alignments(gapped=False):\n self.assertEqual(len(alignment), 8)\n # test againts incorrect input files\n for lav_file in self.__incorrect_files:\n parser = Lav(os.path.join(self.__incorrect_file_dir,\n lav_file))\n with self.assertRaises(LavError):\n for alignment in parser.alignments():\n self.assertIsInstance(alignment,\n Lav.GapFreeAlignment)",
"def imalign(src_file, dst_file, face_landmarks, output_size=1024, transform_size=1024, enable_padding=True, x_scale=1, y_scale=1, em_scale=0.1, alpha=False):\n lm = np.array(face_landmarks)\n lm_chin = lm[0 : 17] # left-right\n lm_eyebrow_left = lm[17 : 22] # left-right\n lm_eyebrow_right = lm[22 : 27] # left-right\n lm_nose = lm[27 : 31] # top-down\n lm_nostrils = lm[31 : 36] # top-down\n lm_eye_left = lm[36 : 42] # left-clockwise\n lm_eye_right = lm[42 : 48] # left-clockwise\n lm_mouth_outer = lm[48 : 60] # left-clockwise\n lm_mouth_inner = lm[60 : 68] # left-clockwise\n\n # Calculate auxiliary vectors.\n eye_left = np.mean(lm_eye_left, axis=0)\n eye_right = np.mean(lm_eye_right, axis=0)\n eye_avg = (eye_left + eye_right) * 0.5\n eye_to_eye = eye_right - eye_left\n mouth_left = lm_mouth_outer[0]\n mouth_right = lm_mouth_outer[6]\n mouth_avg = (mouth_left + mouth_right) * 0.5\n eye_to_mouth = mouth_avg - eye_avg\n\n # Choose oriented crop rectangle.\n x = eye_to_eye - np.flipud(eye_to_mouth) * [-1, 1]\n x /= np.hypot(*x)\n x *= max(np.hypot(*eye_to_eye) * 2.0, np.hypot(*eye_to_mouth) * 1.8)\n x *= x_scale\n y = np.flipud(x) * [-y_scale, y_scale]\n c = eye_avg + eye_to_mouth * em_scale\n quad = np.stack([c - x - y, c - x + y, c + x + y, c + x - y])\n qsize = np.hypot(*x) * 2\n\n # Load in-the-wild image.\n if not os.path.isfile(src_file):\n print('\\nCannot find source image. Please run \"--wilds\" before \"--align\".')\n return\n img = Image.open(src_file)\n\n # Shrink.\n shrink = int(np.floor(qsize / output_size * 0.5))\n if shrink > 1:\n rsize = (int(np.rint(float(img.size[0]) / shrink)), int(np.rint(float(img.size[1]) / shrink)))\n img = img.resize(rsize, Image.ANTIALIAS)\n quad /= shrink\n qsize /= shrink\n\n # Crop.\n border = max(int(np.rint(qsize * 0.1)), 3)\n crop = (int(np.floor(min(quad[:,0]))), int(np.floor(min(quad[:,1]))), int(np.ceil(max(quad[:,0]))), int(np.ceil(max(quad[:,1]))))\n crop = (max(crop[0] - border, 0), max(crop[1] - border, 0), min(crop[2] + border, img.size[0]), min(crop[3] + border, img.size[1]))\n if crop[2] - crop[0] < img.size[0] or crop[3] - crop[1] < img.size[1]:\n img = img.crop(crop)\n quad -= crop[0:2]\n\n # Pad.\n pad = (int(np.floor(min(quad[:,0]))), int(np.floor(min(quad[:,1]))), int(np.ceil(max(quad[:,0]))), int(np.ceil(max(quad[:,1]))))\n pad = (max(-pad[0] + border, 0), max(-pad[1] + border, 0), max(pad[2] - img.size[0] + border, 0), max(pad[3] - img.size[1] + border, 0))\n if enable_padding and max(pad) > border - 4:\n pad = np.maximum(pad, int(np.rint(qsize * 0.3)))\n img = np.pad(np.float32(img), ((pad[1], pad[3]), (pad[0], pad[2]), (0, 0)), 'reflect')\n h, w, _ = img.shape\n y, x, _ = np.ogrid[:h, :w, :1]\n mask = np.maximum(1.0 - np.minimum(np.float32(x) / pad[0], np.float32(w-1-x) / pad[2]), 1.0 - np.minimum(np.float32(y) / pad[1], np.float32(h-1-y) / pad[3]))\n blur = qsize * 0.02\n img += (ndimage.gaussian_filter(img, [blur, blur, 0]) - img) * np.clip(mask * 3.0 + 1.0, 0.0, 1.0)\n img += (np.median(img, axis=(0,1)) - img) * np.clip(mask, 0.0, 1.0)\n img = np.uint8(np.clip(np.rint(img), 0, 255))\n if alpha:\n mask = 1-np.clip(3.0 * mask, 0.0, 1.0)\n mask = np.uint8(np.clip(np.rint(mask*255), 0, 255))\n img = np.concatenate((img, mask), axis=2)\n img = Image.fromarray(img, 'RGBA')\n else:\n img = Image.fromarray(img, 'RGB')\n quad += pad[:2]\n\n # Transform.\n img = img.transform((transform_size, transform_size), Image.QUAD, (quad + 0.5).flatten(), Image.BILINEAR)\n print(transform_size)\n if output_size < transform_size:\n img = img.resize((output_size, output_size), Image.ANTIALIAS)\n\n # Save aligned image.\n img.save(dst_file, 'PNG')",
"def initial_alignment(in_file, name, outdir, errors, config):\n align_out = os.path.join(outdir, \"%s-match.fastq\" % name)\n noalign_out = os.path.join(outdir, \"%s-nomatch.fastq\" % name)\n if not os.path.exists(align_out) or not os.path.exists(noalign_out):\n out_params = [\"--al\", align_out, \"--un\", noalign_out]\n out_params += [\"--solexa1.3-quals\"]\n run_bowtie(in_file, config[\"reference\"][0][\"file\"], None, errors,\n extra_params=out_params)\n return align_out, noalign_out",
"def cleanBadPix(redux_science, bad_pixel_map, method = 'median', replacement_box = 5, replace_constant = -99):\n #add negative pixels to the bad pixel map\n bad_pixel_map = np.logical_or(bad_pixel_map, redux_science <= 0)\n # im = np.copy(redux_science)\n # im[np.where(bad_pixel_map)[1]] = 0.\n if method == 'median':\n med_fil = median_filter(redux_science, size = replacement_box)\n\n cleaned = redux_science*~bad_pixel_map + med_fil*bad_pixel_map\n\n #elif method == 'interpolate':\n\n # print('so clean')\n\n return cleaned",
"def remove_gapped_columns(aln):\n cols = zip(* aln.values())\n ind = util.find(lambda col: \"-\" not in col, cols)\n return subalign(aln, ind)",
"def align(self):\n ..."
]
| [
"0.5492849",
"0.5472743",
"0.5366377",
"0.53595704",
"0.53412116",
"0.53201115",
"0.5234939",
"0.5230496",
"0.519625",
"0.5172563",
"0.51371306",
"0.5108179",
"0.5102144",
"0.50905186",
"0.5074283",
"0.50719476",
"0.5055099",
"0.49876216",
"0.4970037",
"0.49581602",
"0.4957396",
"0.49467072",
"0.49397787",
"0.4887296",
"0.4877813",
"0.4871632",
"0.4859425",
"0.48593876",
"0.48565975",
"0.48534498"
]
| 0.63359916 | 0 |
tv_loss. Deprecated. Please use tensorflow total_variation loss implementation. | def tv_loss(x, name='tv_loss'):
raise NotImplementedError("Please use tensorflow total_variation loss.") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def tv_loss(input: th.Tensor):\n input = tf.pad(input, (0, 1, 0, 1), \"replicate\")\n x_diff = input[..., :-1, 1:] - input[..., :-1, :-1]\n y_diff = input[..., 1:, :-1] - input[..., :-1, :-1]\n return (x_diff ** 2 + y_diff ** 2).mean([1, 2, 3])",
"def tv_loss(img, tv_weight):\n # Your implementation should be vectorized and not require any loops!\n # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n\n pass\n\n # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****",
"def vae_loss(x, t_decoded):\r\n return K.mean(reconstruction_loss(x, t_decoded))",
"def tvd(pred: Tensor, label: Tensor) -> Tensor:\n return (Tensor([0.5]) * torch.abs(pred - label)).sum()",
"def loss(loss_name):\n \n def contrastive_loss(y_true, y_pred, margin = 1):\n \"\"\"Implementation of the triplet loss function\n\n\n Contrastive loss = 0.5 * mean( (1-true_value) * square(distance) + true_value * square( max(margin-distance, 0) ))\n\n Args:\n y_true (int): true label, positive pair (same class) -> 0, \n negative pair (different class) -> 1\n \n y_pred (list): python list containing two objects in a pair of tensors:\n left : the encodings for one image data in a pair\n right : the encodings for the other image data in a pair\n margin (float, optional): m > 0 determines how far the embeddings of \n a negative pair should be pushed apart. Defaults to 1.\n\n\n Returns:\n loss (float): real number, value of the loss\n \"\"\"\n\n left = y_pred[0]\n right = y_pred[1]\n\n distance = tf.math.sqrt(tf.math.reduce_sum(tf.math.square(left - right), axis=-1))\n\n loss_positive = tf.math.square(distance)\n loss_negative = tf.math.square(tf.maximum(0., margin - distance))\n \n loss = y_true * loss_negative + (1 - y_true) * loss_positive\n loss = 0.5 * tf.math.reduce_mean(loss)\n\n return loss\n\n def triplet_loss(y_true, y_pred, margin = 1):\n \"\"\"Implementation of the triplet loss function\n\n Arguments:\n y_true : true labels, required when you define a loss in Keras, \n not applied in this function.\n\n y_pred (list): python list containing three objects:\n anchor : the encodings for the anchor data\n positive : the encodings for the positive data (similar to anchor)\n negative : the encodings for the negative data (different from anchor)\n \n margin (float, optional): m > 0 determines how far the embeddings of \n a negative data should be pushed apart. Defaults to 1.\n\n Returns:\n loss (float): real number, value of the loss\n \"\"\"\n\n anchor = y_pred[0]\n positive = y_pred[1]\n negative = y_pred[2]\n\n # squared distance between the anchor and the positive\n pos_dist = tf.math.reduce_sum(tf.math.square(anchor - positive), axis=-1)\n\n # squared distance between the anchor and the negative\n neg_dist = tf.math.reduce_sum(tf.math.square(anchor - negative), axis=-1)\n\n # compute loss\n basic_loss = margin + pos_dist - neg_dist\n loss = tf.math.maximum(basic_loss,0.0)\n loss = tf.math.reduce_mean(loss)\n return loss\n\n \n if loss_name == 'contrastive_loss':\n return contrastive_loss\n \n if loss_name == 'triplet_loss':\n return triplet_loss",
"def calculate_loss(model, t, logits, labels):\n model_para = model.get_paramaters_list_reshape()\n myTF.calculate_para_dependence_loss(model_para,t)\n\n myTF.calculate_cross_entropy_loss(logits, labels)\n\n return tf.add_n(tf.get_collection('losses'), name='loss_total')",
"def compute_tv_loss(generate_tensor):\n\n # xi,j - xi+1,j\n noise_along_h = torch.abs(generate_tensor[:, :, 1:, :] -\n generate_tensor[:, :, :-1, :]).mean()\n # xi,j - xi,j+1\n noise_along_w = torch.abs(generate_tensor[:, :, :, 1:] -\n generate_tensor[:, :, :, :-1]).mean()\n return 0.5 * (noise_along_h + noise_along_w)",
"def tversky_loss(yhat, ytrue):\n return torch.mean(1 - tversky_index(yhat, ytrue))",
"def compute_loss(self):",
"def compute_loss(self):\n self.prototypes = self.compute_prototypes()\n self.test_logits = self.compute_logits()\n loss = tf.nn.sparse_softmax_cross_entropy_with_logits(\n labels=self.episode.test_labels, logits=self.test_logits)\n cross_entropy_loss = tf.reduce_mean(loss)\n regularization = tf.reduce_sum(\n tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES))\n loss = cross_entropy_loss + self.weight_decay * regularization\n return loss",
"def tf_l2_loss(Gt, pred,_axis):\n l2diff = tf.subtract(Gt, pred)\n l2loss = tf.reduce_sum(tf.square(l2diff), axis=_axis)\n l2loss = tf.maximum(l2loss, 1e-10)\n l2loss = tf.sqrt(l2loss) # (n_batch, n_class) -> (n_batch, 1)\n\n return l2loss",
"def verbose_loss(self, feedback: _Feedback, extra_info) -> Dict[str, _Array]:\n hint_preds, diff_logits, gt_diffs = extra_info\n\n for inp in feedback.features.inputs:\n if inp.location in [_Location.NODE, _Location.EDGE]:\n nb_nodes = inp.data.shape[1]\n break\n\n total_loss = 0.0\n lengths = feedback.features.lengths\n\n losses = {}\n if self.decode_diffs:\n for loc in _Location:\n for i in range(len(gt_diffs)):\n is_not_done = _is_not_done_broadcast(lengths, i, gt_diffs[i][loc])\n diff_loss = (\n jnp.maximum(diff_logits[i][loc], 0) -\n diff_logits[i][loc] * gt_diffs[i][loc] +\n jnp.log1p(jnp.exp(-jnp.abs(diff_logits[i][loc]))) * is_not_done)\n losses[loc.name + '_diff_%d' % i] = jnp.mean(diff_loss)\n\n if self.decode_hints:\n for truth in feedback.features.hints:\n for i in range(truth.data.shape[0] - 1):\n assert truth.name in hint_preds[i]\n pred = hint_preds[i][truth.name]\n is_not_done = _is_not_done_broadcast(lengths, i, truth.data[i + 1])\n if truth.type_ == _Type.SCALAR:\n if self.decode_diffs:\n total_loss = jnp.mean((pred - truth.data[i + 1])**2 *\n gt_diffs[i][truth.location] * is_not_done)\n else:\n total_loss = jnp.mean((pred - truth.data[i + 1])**2 * is_not_done)\n elif truth.type_ == _Type.MASK:\n if self.decode_diffs:\n total_loss = jnp.mean(\n jnp.maximum(pred, 0) - pred * truth.data[i + 1] +\n jnp.log1p(jnp.exp(-jnp.abs(pred))) *\n gt_diffs[i][truth.location] * is_not_done)\n else:\n total_loss = jnp.mean(\n jnp.maximum(pred, 0) - pred * truth.data[i + 1] +\n jnp.log1p(jnp.exp(-jnp.abs(pred))) * is_not_done)\n elif truth.type_ == _Type.MASK_ONE:\n if self.decode_diffs:\n total_loss = jnp.mean(\n -jnp.sum(\n truth.data[i + 1] * jax.nn.log_softmax(\n pred) * is_not_done, axis=-1, keepdims=True) *\n gt_diffs[i][truth.location])\n else:\n total_loss = jnp.mean(-jnp.sum(\n truth.data[i + 1] * jax.nn.log_softmax(\n pred) * is_not_done, axis=-1))\n elif truth.type_ == _Type.CATEGORICAL:\n if self.decode_diffs:\n total_loss = jnp.mean(\n -jnp.sum(\n truth.data[i + 1] * jax.nn.log_softmax(\n pred), axis=-1, keepdims=True) *\n jnp.expand_dims(gt_diffs[i][truth.location], -1) *\n is_not_done)\n else:\n total_loss = jnp.mean(-jnp.sum(\n truth.data[i + 1] * jax.nn.log_softmax(pred), axis=-1) *\n is_not_done)\n elif truth.type_ == _Type.POINTER:\n if self.decode_diffs:\n total_loss = jnp.mean(-jnp.sum(\n hk.one_hot(truth.data[i + 1], nb_nodes) *\n jax.nn.log_softmax(pred),\n axis=-1) * gt_diffs[i][truth.location] * is_not_done)\n else:\n total_loss = jnp.mean(-jnp.sum(\n hk.one_hot(truth.data[i + 1], nb_nodes) *\n jax.nn.log_softmax(pred),\n axis=-1) * is_not_done)\n else:\n raise ValueError('Incorrect type')\n losses[truth.name + '_%d' % i] = total_loss\n return losses",
"def build_loss(self, n_loss, t_loss):\n loss = tf.add(n_loss, t_loss)\n return loss",
"def loss_step3(self, y, u, v):\n uv = -0.5 * (y.float().t() @ (u + v))\n return uv",
"def build_loss(self):\n if self.mode != \"encode\":\n total_loss = tf.losses.get_total_loss()\n tf.summary.scalar(\"losses/total\", total_loss)\n\n self.total_loss = total_loss",
"def tLoss(self, dt, qt, tl, ta, qa):\n\t return 1./(10.**6)*(qt+(self.evap(self.LAI, tl, ta, qa)))",
"def loss_op(self):\n return self.loss",
"def get_loss(self):\r\n\r\n if F.loss_type==\"cosine\":\r\n self.losscos = r2d*tf.acos(1-tf.losses.cosine_distance(tf.nn.l2_normalize(self.labels,1), tf.nn.l2_normalize(self.out, 1), dim=1))\r\n self.loss = tf.losses.cosine_distance(tf.nn.l2_normalize(self.labels,1), tf.nn.l2_normalize(self.out, 1), dim=1)\r\n elif F.loss_type==\"mse2d\":\r\n xl, yl, zl = tf.split(self.labels, 3, axis=1)\r\n xo, yo, zo = tf.split(self.out, 3, axis=1)\r\n thetal, thetao = tf.asin(-yl), tf.asin(-yo)\r\n phil, phio = tf.atan2(-zl, -xl), tf.atan2(-zo, -xo)\r\n self.lb = tf.concat([thetal, phil], axis=1)\r\n self.ob = tf.concat([thetao, phio], axis=1)\r\n self.loss = tf.scalar_mul(tf.constant(r2d), tf.losses.mean_squared_error(self.lb, self.ob, 2))\r\n elif F.loss_type==\"mse3d\":\r\n self.loss = tf.losses.mean_squared_error(tf.nn.l2_normalize(self.labels, 0), tf.nn.l2_normalize(self.out, 0))",
"def _get_total_variational_loss(self, content):\n return tf.reduce_sum(tf.image.total_variation(content))",
"def _get_loss(self):\n raise NotImplementedError",
"def VAE_losses(t_logits, t_truncate, mu_logvar0, mu_logvar1, tiny=1e-8):\n # NEW ONE! with different strategy of calculating loss for censoring, adding \\sum p_b, not \\sum w_b*p_b\n # Reconstruction loss\n t_dist = tf.nn.softmax(t_logits)\n reconstruction = -tf.log(tf.reduce_sum(t_dist*t_truncate, axis=1))\n\n # KL divergence\n mu0, logvar0 = tf.split(mu_logvar0, num_or_size_splits=2, axis=1)\n mu1, logvar1 = tf.split(mu_logvar1, num_or_size_splits=2, axis=1)\n\n kl_d = 0.5 * tf.reduce_sum(tf.exp(logvar1-logvar0)\\\n + tf.divide(tf.square(mu0-mu1),tf.exp(logvar0)+tiny) \\\n + logvar0 - logvar1 -1.0, \\\n 1)\n\n # Total loss for event\n loss = tf.reduce_mean(reconstruction + kl_d) \n \n return reconstruction, kl_d, loss",
"def build_tt_loss(self, t_logits, t_target):\n t_loss = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=t_logits, labels=t_target)\n t_loss = tf.reduce_mean(t_loss)\n return t_loss",
"def get_loss(self):\n raise NotImplementedError",
"def loss(self, **kwargs):\n pass",
"def loss_fun(model: GPModel, params: dict) -> float:\n py = model.module.call(params, train_ds['index_points'])\n return -py.log_prob(train_ds['y'])",
"def setup_loss(self):\n with vs.variable_scope(\"loss\"):\n self.loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(labels=self.label_placeholder, logits=self.label_predictions))",
"def loss_fn(y_true,y_pred): \n loss = tf.nn.softmax_cross_entropy_with_logits_v2(y_true,\n y_pred,\n axis=-1,\n )\n loss = tf.reduce_mean(loss,name=\"loss\")\n return loss",
"def vertex_loss(self, pred_vertices, gt_vertices, has_smpl):\n conf = has_smpl.float()\n loss_vertex = self.criterion_vertex(pred_vertices, gt_vertices)\n loss_vertex = (conf[:, None, None] * loss_vertex).mean()\n return loss_vertex",
"def _create_loss(self):\n with tf.device('/cpu:0'):\n with tf.name_scope('loss'):\n self.loss = tf.reduce_mean(\n tf.nn.softmax_cross_entropy_with_logits(\n labels=self.labels_placeholder, \n logits=self.logits, name='loss'))",
"def loss_perceptual(self, vgg_out, vgg_gt, vgg_comp): \n loss = 0\n for o, c, g in zip(vgg_out, vgg_comp, vgg_gt):\n loss += self.l1(o, g) + self.l1(c, g)\n return loss"
]
| [
"0.71811527",
"0.7058403",
"0.63139117",
"0.62708676",
"0.6116712",
"0.60516834",
"0.6036121",
"0.5978986",
"0.5977184",
"0.59507954",
"0.5913045",
"0.591241",
"0.58793354",
"0.5870999",
"0.5853943",
"0.580963",
"0.5757218",
"0.5752254",
"0.5751082",
"0.57493937",
"0.5742276",
"0.57300854",
"0.5711516",
"0.56981725",
"0.56931156",
"0.56846166",
"0.56686693",
"0.5658347",
"0.5647846",
"0.56315935"
]
| 0.8830987 | 0 |
Runs loadData from LoadDataModel. Runs also previewData from this class. Shows error warning in GUI if data load does not work. | def loadPreviewData(self):
# parameters for data load from GUI
self.loadDataModel.pathToDataSet = self.entryPath.get()
self.loadDataModel.firstRowIsTitle = bool(self.checkVarRow.get())
self.loadDataModel.firstColIsRowNbr = bool(self.checkVarCol.get())
# if entry field is empty, set nbrOfCategories to 0
if len(self.entrytrainRowNbr.get()) == 0: # Code for this line from: https://stackoverflow.com/questions/15455113/tkinter-check-if-entry-box-is-empty
self.loadDataModel.trainRowNumber = 0
else:
self.loadDataModel.trainRowNumber = int(self.entrytrainRowNbr.get())
# if entry field is empty, set nbrOfCategories to 0
if len(
self.entryNbrCategories.get()) == 0: # Code for this line from: https://stackoverflow.com/questions/15455113/tkinter-check-if-entry-box-is-empty
self.loadDataModel.nbrOfCategories = 0
else:
self.loadDataModel.nbrOfCategories = int(self.entryNbrCategories.get())
self.loadDataModel.dataIsForTraining = True
# Load data
try:
self.loadDataModel.loadData()
print("LoadDataView: self.loadDataModel.data: ", self.loadDataModel.data)
except FileNotFoundError:
tk.messagebox.showerror("Error", " File not found.")
except ValueError:
tk.messagebox.showerror("Error", "The number of categories entered is incorrect. Enter number > 0 and smaller"
" the number of columns in the dataset.")
except:
print("Load data failed because of something different than nbrOfCategories entered or file not found.")
else: # if data load worked do the following
self.loadDataInformation.config(text="Data has been successfully loaded and stored.", fg="green")
self.previewData() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def loadPreviewDataforClassification(self):\n # parameters for data load from GUI\n self.loadDataModel.pathToDataSet = self.entryPath.get()\n self.loadDataModel.firstRowIsTitle = bool(self.checkVarRow.get())\n self.loadDataModel.firstColIsRowNbr = bool(self.checkVarCol.get())\n # if entry field is empty, set nbrOfCategories to 0\n self.loadDataModel.dataIsForTraining = False\n\n # Load data\n try:\n self.loadDataModel.loadData()\n print(\"LoadDataView: self.loadDataModel.data: \", self.loadDataModel.data)\n except FileNotFoundError:\n tk.messagebox.showerror(\"Error\", \" File not found.\")\n except ValueError:\n tk.messagebox.showerror(\"Error\", \"The number of categories entered is incorrect. Enter number > 0 and smaller\"\n \" the number of columns in the dataset.\")\n except:\n print(\"Load data failed because of something different than nbrOfCategories entered or file not found.\")\n else: # if data load worked do the following\n self.loadDataInformation.config(text=\"Data has been successfully loaded and stored.\", fg=\"green\")\n self.previewData()",
"def load_data(self):\n if self.debug:\n print(\"Loading data\")",
"def _LoadDataModel( self, reason ):\n if not self.isLoading:\n update_args = self._LoadDataModelValues( reason )\n if 'replot' in update_args:\n wx.CallAfter( self.UpdateState, replot = True )",
"def load_data(self) -> None:",
"def run(self):\r\n # Close any open models\r\n self.cleanUp()\r\n # Dynamically select file to preview\r\n self.selectTrial() \r\n # Add adjusted COM (RRA/CMC) model\r\n self.loadAdjustedModel()\r\n # Hide the markers from view\r\n self.hideModelMarkers()\r\n # Load CMC motion to model\r\n self.loadCMCMotion()",
"def view_data(self):\r\n if self.population.data != []:\r\n try:\r\n self.process_view_data()\r\n except InputError as ex:\r\n print(ex)\r\n self.view_data()\r\n\r\n else:\r\n print(\"\\nThere is no imported data to view. Please import in some data before trying to view data\")\r\n self.menu_page()",
"def load_data(self):",
"def load_data(self):\n raise NotImplementedError()",
"def _loadData(self, data):\n Movie._loadData(self, data)\n PlexHistory._loadData(self, data)",
"def load_and_predict(self):\n if DataLoader.data is None:\n messagebox.showerror(\"Information\", \"Data file is empty, please load the data first.\")\n return\n\n path = filedialog.askopenfilename()\n with open(path, 'rb') as file:\n Trainer.model = pickle.load(file)\n\n scale = DataLoader.data['out'].max() - DataLoader.data['out'].min()\n scaler = MinMaxScaler(feature_range=(0, 1))\n scaler.fit(DataLoader.data)\n data_scaled = pd.DataFrame(scaler.transform(DataLoader.data), columns=DataLoader.data.columns)\n\n Trainer.y_pred = batch_predict(Trainer.model, data_scaled.drop(columns=['out']))\n Trainer.y_true = data_scaled['out']\n\n self.test_rmse = scale * math.sqrt(mean_squared_error(Trainer.y_pred, Trainer.y_true))\n print(self.test_rmse)\n self.r_squared = np.corrcoef(Trainer.y_pred * scale, data_scaled['out'] * scale)[0, 1] ** 2\n print(self.r_squared)\n\n models = Trainer.model.get_models()\n param_string = f'Component Function Trained Parameters:\\n'\n for i in range(len(models)):\n param_string += \"length scale: {:.4f}\".format(models[i].kernel_.k1.length_scale) + ' ' + \\\n \"noise level: {:.4e}\".format(models[i].kernel_.k2.noise_level) + '\\n'\n param_string += f'\\nRMSE on the test set: {self.test_rmse}\\n'\n param_string += f'R^2 value on the test set: {self.r_squared}'\n display_params = ttk.Label(self, text=param_string, width=40)\n display_params.grid(row=24 + 7, column=0, columnspan=2, sticky=tk.W + tk.E)",
"def load_data(self):\n try:\n self.manager.load()\n except error:\n show_error_message(title='Initialization error!',\n message='File lords.sdb was not found!')\n else:\n self.update_widgets_values()",
"def run_model_slot(self):\n if self.Data is None:\n self.label_current_message.setText('尚未有資料以執行!請確認是否已載入資料。')\n else:\n self.stopped = False\n self.train_model1()\n self.btn_run_model.setEnabled(False)",
"def load_run(self, reload=False, *args, **kwargs):\n if 'data_source' in kwargs:\n data_source = kwargs['data_source']\n else:\n data_source = self.get_data_source(*args, **kwargs)\n \n if data_source:\n# try:\n if True:\n self.data_source = data_source\n if self.psana_cfg_dict:\n self.setOptions()\n elif self.cfg:\n # if a cfg file is specified it will be loaded\n # however, the cfg_setOptions takes precidence\n # in future may try combind the two.\n psana.setConfigFile(self.cfg)\n\n calibDir = '/reg/d/psdm/cxi/{:}/calib'.format(self.exp)\n print 'setting calibDir', self.exp, calibDir\n psana.setOption('psana.calib-dir', calibDir)\n\n print 'Loading data from ',data_source\n if self.ds and self.live:\n print 'WARNING: Currently Cannot reload live shared memory'\n print ' Need to exit python to reload'\n else:\n self.ds = psana.DataSource(data_source)\n self._no_evtData = False\n\n self._ds_run = self.ds.runs().next()\n\n _source_attrs = ['ds','events','evt']\n if self.indexed:\n self.times = self._ds_run.times()\n\n self.events = self._ds_run.events()\n self.configStore = PsanaDictify(self._ds_run.env().configStore())\n self.evrConfig = EvrDictify(self.configStore)\n self.load_epicsStore()\n\n# self.daqEventCodes = [ec.code() for ec in self.configStore.evr0.eventcodes] \n self.ievent = 0\n if not reload and self._kwargs.get('nstart'):\n for i in range(self._kwargs.get('nstart')-1),:\n self.next_event()\n \n# except:\n# print 'Failed to load data source \"{:}\"'.format(data_source)\n else:\n if len(self.runs) > 0:\n print 'WARNING: No xtc files for {:} available in {:}'.format(\n self.exp,self.xtc_dir)\n print 'Either set xtc_dir to a valid directory or restore files' \n print ' through the Data Manager:'\n pswww_portal = 'https://pswww.slac.stanford.edu/apps/portal/index'\n print pswww_portal+'.php?exper_id={:}'.format(self.exper_id)\n else:\n print 'No runs taken for this experiment'\n\n if self._reloadOnLoadRun:\n self._reloadOnLoadRun = False\n self.load_run(reload=True)",
"def _load_data(self, event):\n if self.parent is not None:\n wx.PostEvent(self.parent, NewLoadDataEvent())",
"def _load(self, dataset):\n raise NotImplementedError('Loader {} does not support loading datasets.'.format(self.type()))",
"def load_data(self):\n filename = filedialog.askopenfilename(title=\"Select A File\",\n file=((\"csv files\", \"*.csv\"),\n (\"dat files\", \"*.dat\"),\n (\"excel files\", \"*.xlsx\"),\n (\"All Files\", \"*.*\")))\n file_path = filename\n try:\n filename = f\"{file_path}\"\n name = os.path.splitext(os.path.basename(filename))[0]\n if name in ['h2o', 'KED', 'financial']:\n DataLoader.data = load_data(name)\n else:\n DataLoader.data = pd.read_csv(filename)\n except ValueError:\n messagebox.showerror(\"Information\", \"The file you have chosen is invalid.\")\n except FileNotFoundError:\n messagebox.showerror(\"Information\", f\"No such file as {file_path}\")\n self.clear_tree()\n\n self.treeview['columns'] = list(DataLoader.data.columns)\n for i in self.treeview['columns']:\n self.treeview.column(i, anchor=\"w\")\n self.treeview.heading(i, text=i, anchor='w')\n\n for index, row in DataLoader.data.iterrows():\n self.treeview.insert(\"\", 0, text=self.data.shape[0] - 1 - index, values=list(row))\n self.treeview.column('#0', width=100)\n\n self.summary_label = ttk.Label(self, text=f'Data shape: {DataLoader.data.shape}', width=40)\n self.summary_label.grid(row=2, column=0, columnspan=2, sticky=tk.S + tk.N)",
"def _loadData(self, data):\n Movie._loadData(self, data)\n PlexSession._loadData(self, data)",
"def run_data (arguments):\n if arguments.define_labels:\n data.define_labels()\n elif arguments.preprocess:\n # Preprocess from data_raw --> data_preprocessed\n data.preprocess()\n elif arguments.annotate:\n # Annotate from data_preprocessed --> data_annotated\n reverse = False # DEBUG\n annotator.annotate(reverse)\n elif arguments.split:\n # Split from data_annotated --> train.txt/valid.txt\n restrict = 100 # Default: Keep 100% of all files\n splitter.train_valid(restrict_to=restrict)",
"def run(self):\r\n self.collect_data()",
"def load_data(self):\n\n self._load_train_data()\n self._load_test_data()",
"def _loadData(self, data):\n Clip._loadData(self, data)\n PlexHistory._loadData(self, data)",
"def loadData(self):\n\n\n path = QtWidgets.QFileDialog.getOpenFileName(self, 'Open File', os.getcwd(), 'CSV, XLSX(*.csv *.xlsx)')\n\n # If a file was specified, load it up. If not, tell the user to pick a valid file\n if path[0] != '':\n\n if os.path.exists(path[0]) and os.path.getsize(path[0]):\n\n filepath, filename = os.path.split(path[0])\n pandaData = procedures.load(filename, filepath)\n\n while self.tabWidget.count() != 0:\n self.closeTab()\n self.createTab(pandaData)\n\n else:\n self.notifyUser(\"Please pick a valid file.\")",
"def load_data(self, data):\n self.data = data\n self.validate()",
"def prompt_load_data(self):\n\n self.status.config(\n text=\"Selections modified! Click Load Data to update statistics when ready...\"\n )",
"def run(self):\n self.load_template()\n self.load_data()\n self.load_files()\n self.render_content()\n self.process()\n # pprint(self.data)",
"def run(self):\n\t\tself.print_header_information()\n\n\t\t#self.get_number_of_instances_from_user()\n\n\t\t#self.compile_dataframe(self.number_of_instances)\n\n\t\tprint \"\\n{}\".format(self.data)\n\n\t\t# Uncomment these lines for debugging\n\t\tself.compile_dataframe_default()\n\t\t# print \"\\n{}\".format(self.data)\n\n\t\tself.analysis_of_dataframe(self.data)",
"def load_data(self):\n self.data = self.read_var(self.datavar)\n self.test_shape(self.datavar, self.data.shape, 2)",
"def load_model(self):\n Thread(target=self.__load_model).start()",
"def load(self):\n if self.verbosity:\n self.header(\"Loading data files\")\n\n model_list = [\n x for x in get_model_list() if os.path.exists(x.objects.get_csv_path())\n ]\n\n if self.resume_mode:\n # get finished load command logs of last update\n prev_loaded = [\n x.file_name\n for x in self.log_record.called.filter(\n command='loadcalaccessrawfile',\n finish_datetime__isnull=False\n )\n ]\n self.log(\"{} models already loaded.\".format(len(prev_loaded)))\n # remove these from model_list\n model_list = [x for x in model_list if x._meta.db_table not in prev_loaded]\n\n if self.verbosity:\n model_list = progress.bar(model_list)\n for model in model_list:\n call_command(\n \"loadcalaccessrawfile\",\n model.__name__,\n verbosity=self.verbosity,\n keep_files=self.keep_files,\n app_name=self.app_name,\n )",
"def execute(self):\n # Put your execute step code here before calling the '_doneExecution' method.\n self._data.load_data()\n self._data.update_from_config()\n print('LL estimation configs:')\n print(self._data.config)\n if self._config['GUI'] == 'True':\n # Start gui\n widget = LowerLimbGenerationDialog(self._data, self._doneExecution)\n self._setCurrentWidget(widget)\n else:\n self._data.register()\n self._doneExecution()"
]
| [
"0.7363575",
"0.6815484",
"0.6728596",
"0.65464854",
"0.64301383",
"0.6418219",
"0.6348947",
"0.6315146",
"0.62988985",
"0.6275391",
"0.6239956",
"0.616186",
"0.6159784",
"0.61524343",
"0.6125845",
"0.6116381",
"0.6072893",
"0.6040148",
"0.603637",
"0.6027977",
"0.5955419",
"0.5947545",
"0.59383684",
"0.591387",
"0.5903573",
"0.58901125",
"0.5889762",
"0.5882455",
"0.58678",
"0.58626074"
]
| 0.7560283 | 0 |
Runs loadData from LoadDataModel. Runs also previewData from this class. Shows error warning in GUI if data load does not work. | def loadPreviewDataforClassification(self):
# parameters for data load from GUI
self.loadDataModel.pathToDataSet = self.entryPath.get()
self.loadDataModel.firstRowIsTitle = bool(self.checkVarRow.get())
self.loadDataModel.firstColIsRowNbr = bool(self.checkVarCol.get())
# if entry field is empty, set nbrOfCategories to 0
self.loadDataModel.dataIsForTraining = False
# Load data
try:
self.loadDataModel.loadData()
print("LoadDataView: self.loadDataModel.data: ", self.loadDataModel.data)
except FileNotFoundError:
tk.messagebox.showerror("Error", " File not found.")
except ValueError:
tk.messagebox.showerror("Error", "The number of categories entered is incorrect. Enter number > 0 and smaller"
" the number of columns in the dataset.")
except:
print("Load data failed because of something different than nbrOfCategories entered or file not found.")
else: # if data load worked do the following
self.loadDataInformation.config(text="Data has been successfully loaded and stored.", fg="green")
self.previewData() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def loadPreviewData(self):\n # parameters for data load from GUI\n self.loadDataModel.pathToDataSet = self.entryPath.get()\n self.loadDataModel.firstRowIsTitle = bool(self.checkVarRow.get())\n self.loadDataModel.firstColIsRowNbr = bool(self.checkVarCol.get())\n # if entry field is empty, set nbrOfCategories to 0\n if len(self.entrytrainRowNbr.get()) == 0: # Code for this line from: https://stackoverflow.com/questions/15455113/tkinter-check-if-entry-box-is-empty\n self.loadDataModel.trainRowNumber = 0\n else:\n self.loadDataModel.trainRowNumber = int(self.entrytrainRowNbr.get())\n # if entry field is empty, set nbrOfCategories to 0\n if len(\n self.entryNbrCategories.get()) == 0: # Code for this line from: https://stackoverflow.com/questions/15455113/tkinter-check-if-entry-box-is-empty\n self.loadDataModel.nbrOfCategories = 0\n else:\n self.loadDataModel.nbrOfCategories = int(self.entryNbrCategories.get())\n self.loadDataModel.dataIsForTraining = True\n\n # Load data\n try:\n self.loadDataModel.loadData()\n print(\"LoadDataView: self.loadDataModel.data: \", self.loadDataModel.data)\n except FileNotFoundError:\n tk.messagebox.showerror(\"Error\", \" File not found.\")\n except ValueError:\n tk.messagebox.showerror(\"Error\", \"The number of categories entered is incorrect. Enter number > 0 and smaller\"\n \" the number of columns in the dataset.\")\n except:\n print(\"Load data failed because of something different than nbrOfCategories entered or file not found.\")\n else: # if data load worked do the following\n self.loadDataInformation.config(text=\"Data has been successfully loaded and stored.\", fg=\"green\")\n self.previewData()",
"def load_data(self):\n if self.debug:\n print(\"Loading data\")",
"def _LoadDataModel( self, reason ):\n if not self.isLoading:\n update_args = self._LoadDataModelValues( reason )\n if 'replot' in update_args:\n wx.CallAfter( self.UpdateState, replot = True )",
"def load_data(self) -> None:",
"def run(self):\r\n # Close any open models\r\n self.cleanUp()\r\n # Dynamically select file to preview\r\n self.selectTrial() \r\n # Add adjusted COM (RRA/CMC) model\r\n self.loadAdjustedModel()\r\n # Hide the markers from view\r\n self.hideModelMarkers()\r\n # Load CMC motion to model\r\n self.loadCMCMotion()",
"def view_data(self):\r\n if self.population.data != []:\r\n try:\r\n self.process_view_data()\r\n except InputError as ex:\r\n print(ex)\r\n self.view_data()\r\n\r\n else:\r\n print(\"\\nThere is no imported data to view. Please import in some data before trying to view data\")\r\n self.menu_page()",
"def load_data(self):",
"def load_data(self):\n raise NotImplementedError()",
"def _loadData(self, data):\n Movie._loadData(self, data)\n PlexHistory._loadData(self, data)",
"def load_and_predict(self):\n if DataLoader.data is None:\n messagebox.showerror(\"Information\", \"Data file is empty, please load the data first.\")\n return\n\n path = filedialog.askopenfilename()\n with open(path, 'rb') as file:\n Trainer.model = pickle.load(file)\n\n scale = DataLoader.data['out'].max() - DataLoader.data['out'].min()\n scaler = MinMaxScaler(feature_range=(0, 1))\n scaler.fit(DataLoader.data)\n data_scaled = pd.DataFrame(scaler.transform(DataLoader.data), columns=DataLoader.data.columns)\n\n Trainer.y_pred = batch_predict(Trainer.model, data_scaled.drop(columns=['out']))\n Trainer.y_true = data_scaled['out']\n\n self.test_rmse = scale * math.sqrt(mean_squared_error(Trainer.y_pred, Trainer.y_true))\n print(self.test_rmse)\n self.r_squared = np.corrcoef(Trainer.y_pred * scale, data_scaled['out'] * scale)[0, 1] ** 2\n print(self.r_squared)\n\n models = Trainer.model.get_models()\n param_string = f'Component Function Trained Parameters:\\n'\n for i in range(len(models)):\n param_string += \"length scale: {:.4f}\".format(models[i].kernel_.k1.length_scale) + ' ' + \\\n \"noise level: {:.4e}\".format(models[i].kernel_.k2.noise_level) + '\\n'\n param_string += f'\\nRMSE on the test set: {self.test_rmse}\\n'\n param_string += f'R^2 value on the test set: {self.r_squared}'\n display_params = ttk.Label(self, text=param_string, width=40)\n display_params.grid(row=24 + 7, column=0, columnspan=2, sticky=tk.W + tk.E)",
"def load_data(self):\n try:\n self.manager.load()\n except error:\n show_error_message(title='Initialization error!',\n message='File lords.sdb was not found!')\n else:\n self.update_widgets_values()",
"def run_model_slot(self):\n if self.Data is None:\n self.label_current_message.setText('尚未有資料以執行!請確認是否已載入資料。')\n else:\n self.stopped = False\n self.train_model1()\n self.btn_run_model.setEnabled(False)",
"def load_run(self, reload=False, *args, **kwargs):\n if 'data_source' in kwargs:\n data_source = kwargs['data_source']\n else:\n data_source = self.get_data_source(*args, **kwargs)\n \n if data_source:\n# try:\n if True:\n self.data_source = data_source\n if self.psana_cfg_dict:\n self.setOptions()\n elif self.cfg:\n # if a cfg file is specified it will be loaded\n # however, the cfg_setOptions takes precidence\n # in future may try combind the two.\n psana.setConfigFile(self.cfg)\n\n calibDir = '/reg/d/psdm/cxi/{:}/calib'.format(self.exp)\n print 'setting calibDir', self.exp, calibDir\n psana.setOption('psana.calib-dir', calibDir)\n\n print 'Loading data from ',data_source\n if self.ds and self.live:\n print 'WARNING: Currently Cannot reload live shared memory'\n print ' Need to exit python to reload'\n else:\n self.ds = psana.DataSource(data_source)\n self._no_evtData = False\n\n self._ds_run = self.ds.runs().next()\n\n _source_attrs = ['ds','events','evt']\n if self.indexed:\n self.times = self._ds_run.times()\n\n self.events = self._ds_run.events()\n self.configStore = PsanaDictify(self._ds_run.env().configStore())\n self.evrConfig = EvrDictify(self.configStore)\n self.load_epicsStore()\n\n# self.daqEventCodes = [ec.code() for ec in self.configStore.evr0.eventcodes] \n self.ievent = 0\n if not reload and self._kwargs.get('nstart'):\n for i in range(self._kwargs.get('nstart')-1),:\n self.next_event()\n \n# except:\n# print 'Failed to load data source \"{:}\"'.format(data_source)\n else:\n if len(self.runs) > 0:\n print 'WARNING: No xtc files for {:} available in {:}'.format(\n self.exp,self.xtc_dir)\n print 'Either set xtc_dir to a valid directory or restore files' \n print ' through the Data Manager:'\n pswww_portal = 'https://pswww.slac.stanford.edu/apps/portal/index'\n print pswww_portal+'.php?exper_id={:}'.format(self.exper_id)\n else:\n print 'No runs taken for this experiment'\n\n if self._reloadOnLoadRun:\n self._reloadOnLoadRun = False\n self.load_run(reload=True)",
"def _load_data(self, event):\n if self.parent is not None:\n wx.PostEvent(self.parent, NewLoadDataEvent())",
"def _load(self, dataset):\n raise NotImplementedError('Loader {} does not support loading datasets.'.format(self.type()))",
"def load_data(self):\n filename = filedialog.askopenfilename(title=\"Select A File\",\n file=((\"csv files\", \"*.csv\"),\n (\"dat files\", \"*.dat\"),\n (\"excel files\", \"*.xlsx\"),\n (\"All Files\", \"*.*\")))\n file_path = filename\n try:\n filename = f\"{file_path}\"\n name = os.path.splitext(os.path.basename(filename))[0]\n if name in ['h2o', 'KED', 'financial']:\n DataLoader.data = load_data(name)\n else:\n DataLoader.data = pd.read_csv(filename)\n except ValueError:\n messagebox.showerror(\"Information\", \"The file you have chosen is invalid.\")\n except FileNotFoundError:\n messagebox.showerror(\"Information\", f\"No such file as {file_path}\")\n self.clear_tree()\n\n self.treeview['columns'] = list(DataLoader.data.columns)\n for i in self.treeview['columns']:\n self.treeview.column(i, anchor=\"w\")\n self.treeview.heading(i, text=i, anchor='w')\n\n for index, row in DataLoader.data.iterrows():\n self.treeview.insert(\"\", 0, text=self.data.shape[0] - 1 - index, values=list(row))\n self.treeview.column('#0', width=100)\n\n self.summary_label = ttk.Label(self, text=f'Data shape: {DataLoader.data.shape}', width=40)\n self.summary_label.grid(row=2, column=0, columnspan=2, sticky=tk.S + tk.N)",
"def _loadData(self, data):\n Movie._loadData(self, data)\n PlexSession._loadData(self, data)",
"def run_data (arguments):\n if arguments.define_labels:\n data.define_labels()\n elif arguments.preprocess:\n # Preprocess from data_raw --> data_preprocessed\n data.preprocess()\n elif arguments.annotate:\n # Annotate from data_preprocessed --> data_annotated\n reverse = False # DEBUG\n annotator.annotate(reverse)\n elif arguments.split:\n # Split from data_annotated --> train.txt/valid.txt\n restrict = 100 # Default: Keep 100% of all files\n splitter.train_valid(restrict_to=restrict)",
"def run(self):\r\n self.collect_data()",
"def load_data(self):\n\n self._load_train_data()\n self._load_test_data()",
"def _loadData(self, data):\n Clip._loadData(self, data)\n PlexHistory._loadData(self, data)",
"def loadData(self):\n\n\n path = QtWidgets.QFileDialog.getOpenFileName(self, 'Open File', os.getcwd(), 'CSV, XLSX(*.csv *.xlsx)')\n\n # If a file was specified, load it up. If not, tell the user to pick a valid file\n if path[0] != '':\n\n if os.path.exists(path[0]) and os.path.getsize(path[0]):\n\n filepath, filename = os.path.split(path[0])\n pandaData = procedures.load(filename, filepath)\n\n while self.tabWidget.count() != 0:\n self.closeTab()\n self.createTab(pandaData)\n\n else:\n self.notifyUser(\"Please pick a valid file.\")",
"def load_data(self, data):\n self.data = data\n self.validate()",
"def prompt_load_data(self):\n\n self.status.config(\n text=\"Selections modified! Click Load Data to update statistics when ready...\"\n )",
"def run(self):\n self.load_template()\n self.load_data()\n self.load_files()\n self.render_content()\n self.process()\n # pprint(self.data)",
"def run(self):\n\t\tself.print_header_information()\n\n\t\t#self.get_number_of_instances_from_user()\n\n\t\t#self.compile_dataframe(self.number_of_instances)\n\n\t\tprint \"\\n{}\".format(self.data)\n\n\t\t# Uncomment these lines for debugging\n\t\tself.compile_dataframe_default()\n\t\t# print \"\\n{}\".format(self.data)\n\n\t\tself.analysis_of_dataframe(self.data)",
"def load_data(self):\n self.data = self.read_var(self.datavar)\n self.test_shape(self.datavar, self.data.shape, 2)",
"def load_model(self):\n Thread(target=self.__load_model).start()",
"def load(self):\n if self.verbosity:\n self.header(\"Loading data files\")\n\n model_list = [\n x for x in get_model_list() if os.path.exists(x.objects.get_csv_path())\n ]\n\n if self.resume_mode:\n # get finished load command logs of last update\n prev_loaded = [\n x.file_name\n for x in self.log_record.called.filter(\n command='loadcalaccessrawfile',\n finish_datetime__isnull=False\n )\n ]\n self.log(\"{} models already loaded.\".format(len(prev_loaded)))\n # remove these from model_list\n model_list = [x for x in model_list if x._meta.db_table not in prev_loaded]\n\n if self.verbosity:\n model_list = progress.bar(model_list)\n for model in model_list:\n call_command(\n \"loadcalaccessrawfile\",\n model.__name__,\n verbosity=self.verbosity,\n keep_files=self.keep_files,\n app_name=self.app_name,\n )",
"def execute(self):\n # Put your execute step code here before calling the '_doneExecution' method.\n self._data.load_data()\n self._data.update_from_config()\n print('LL estimation configs:')\n print(self._data.config)\n if self._config['GUI'] == 'True':\n # Start gui\n widget = LowerLimbGenerationDialog(self._data, self._doneExecution)\n self._setCurrentWidget(widget)\n else:\n self._data.register()\n self._doneExecution()"
]
| [
"0.75603956",
"0.68159103",
"0.6730189",
"0.6547364",
"0.6430838",
"0.64175487",
"0.63499826",
"0.6315992",
"0.6298599",
"0.6275572",
"0.62402666",
"0.61616594",
"0.61607134",
"0.61529624",
"0.61271083",
"0.61156225",
"0.60730803",
"0.60393345",
"0.60365355",
"0.6028048",
"0.59549415",
"0.5947337",
"0.59380335",
"0.59145844",
"0.5904249",
"0.58892226",
"0.5888902",
"0.58842325",
"0.5869316",
"0.5862851"
]
| 0.7363604 | 1 |
Set initials and try to set django user before saving | def save(self, *args, **kwargs):
self._set_first_initial()
self._set_user()
super(AbstractHuman, self).save(*args, **kwargs) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _set_user(self):\n\n if '' in (self.last_name, self.first_name):\n return\n\n self._set_first_initial()\n\n User = get_user_model()\n try:\n self.user = User.objects.get(\n models.Q(last_name__iexact=self.last_name),\n models.Q(first_name__iexact=self.first_name) |\n models.Q(first_name__istartswith=self.first_initial[0])\n )\n except User.DoesNotExist:\n pass\n except User.MultipleObjectsReturned:\n pass",
"def save(self, *args, **kwargs):\n self.username = self.username or self.email\n super().save(*args, **kwargs)",
"def setUp(self):\n self.user = User.objects.get(username='Aslan')\n self.user.save()\n self.setUpFormData()\n self.form = CompoundForm(self.user, self.formData)",
"def prefill(self, user):\n print('prefilling')\n self.username.data = user.username\n self.full_name.data = user.full_name\n self.email.data = user.email",
"def set_initial_data(self):\n if self.require_user_stockrecord:\n try:\n user_partner = self.user.partners.get()\n except (exceptions.ObjectDoesNotExist,\n exceptions.MultipleObjectsReturned):\n pass\n else:\n partner_field = self.forms[0].fields.get('partner', None)\n if partner_field and partner_field.initial is None:\n partner_field.initial = user_partner",
"def load_initial_data(apps, schema_editor):\n\n\n #\n # get the model by name\n User = apps.get_model('auth', 'User')\n password = User.objects.make_random_password()\n\n\n draftboard = User()\n draftboard.username= settings.USERNAME_DRAFTBOARD\n draftboard.password = make_password(password)\n draftboard.is_superuser = False\n draftboard.is_staff = True\n draftboard.save()\n\n escrow = User()\n escrow.username = settings.USERNAME_ESCROW\n escrow.password= make_password(password)\n escrow.is_superuser = False\n escrow.is_staff = True\n escrow.save()",
"def signup(self, request, user):\n user.first_name = self.cleaned_data['first_name']\n user.last_name = self.cleaned_data['last_name']\n user.save()\n\n return user",
"def save(self, commit=True):\n model = super(UserCreationForm, self).save(commit=False)\n model.username = self.cleaned_data['username']\n\n if commit:\n model.save()\n\n return model",
"def save(self):\n # First save the parent form and get the user.\n new_user = super(SignupFormExtra, self).save()\n\n new_user.first_name = self.cleaned_data['first_name']\n new_user.last_name = self.cleaned_data['last_name']\n new_user.save()\n\n # Userena expects to get the new user from this form, so return the new\n # user.\n return new_user",
"def accounts_setup(request):\n if request.method == 'POST':\n form = UsernameSetupForm(request.POST, instance=request.user)\n if form.is_valid():\n username = form.clean_username()\n request.user.username = username\n request.user.save()\n request.user.userprofile.first_time = False\n request.user.userprofile.save()\n return HttpResponseRedirect('/p/')\n else:\n form = UsernameSetupForm()\n return render(request, \"accounts_setup.html\", {\"form\":form})",
"def save(self, commit=True):\n user = super(SignupForm, self).save(commit=False)\n user.email = self.cleaned_data.get('email')\n user.username = self.cleaned_data.get('email')\n user.set_password(self.cleaned_data['password'])\n if commit:\n user.save()\n return user",
"def test_set_user_field(self):\n pass",
"def initial(self, request, *args, **kwargs):\n try:\n request.data[\"user\"] = request.auth.user\n except:\n pass\n return super(BoundToUserMixin, self).initial(request, *args, **kwargs)",
"def setUp(self):\n self.user = User.objects.create_user(username='Marry', email='[email protected]', password='secret')\n self.user.first_name = 'Marry'\n self.user.last_name = 'Tomson'\n self.user.save()",
"def __init__(self, *args, **kwargs):\n user = None\n if 'user' in kwargs:\n user = kwargs.pop('user')\n super(PersonForm, self).__init__(*args, **kwargs)\n if user:\n self.fields['username'].initial = user.username\n self.fields['first_name'].initial = user.first_name\n self.fields['last_name'].initial = user.last_name\n self.fields['email_address'].initial = user.email\n self.fields.keyOrder = [\n 'id', 'username', 'first_name', 'middle_name', 'last_name',\n 'email_address', 'gender',\n 'new_password', 'confirm_new_password', 'signature',\n 'signature_html', 'time_zone', 'language', 'show_signatures',\n 'avatar', 'autosubscribe', 'comment'\n ]",
"def get_initial(self):\n return {'user': self.request.user}",
"def get_initial(self):\n initial = self.initial.copy()\n initial['contact_email'] = self.request.user.email\n return initial",
"def setUp(self):\n self.new_user = User(username=\"Hey\")\n self.new_user.save()",
"def user_post_save(sender, instance, created, **kwargs):\n\t\tif created == True:\n\t\t\tup = UserProfile()\n\t\t\tup.user = instance\n\t\t\tup.save()",
"def pre_save(self, obj):\n obj.owner = self.request.user",
"def save(self, commit=True):\n\n email_local_part = self.cleaned_data['email'].split('@')[0]\n username_start = email_local_part[:5] if len(email_local_part) >= 5 else email_local_part\n self.instance.username = username_start + ''.join(\n [choice(ascii_letters) for _ in range(30 - len(username_start))])\n\n return super(RegisterForm, self).save(commit=commit)",
"def save(self, commit=True):\n instance = super(AbstractUserChangeForm, self).save(commit=False)\n \n # Il faut obligatoirement mettre un username pour que le modèle de base\n # de Django fonctionne alors on copie simplement l'adresse courriel.\n instance.username = self.cleaned_data['email']\n if commit:\n instance.save()\n return instance",
"def save_model(self, request, obj, form, change):\n if not change:\n if form.is_valid():\n user = form.save()\n user.identity = Users.SUPERVISOR\n user.set_password(form.data.get('password'))\n user.iCode = InviteCls.encode_invite_code(user.id)\n user.save()\n UserExtra.objects.create(uid=user)\n UserBase.objects.create(\n uid=user,\n phone=user.username\n )\n UserBusiness.objects.create(uid=user)\n else:\n super().save_model(request, obj, form, change)",
"def save(self, commit=True):\n instance = super(AbstractUserCreationForm, self).save(commit=False)\n \n # Il faut obligatoirement mettre un username pour que le modèle de base\n # de Django fonctionne alors on copie simplement l'adresse courriel.\n instance.username = self.cleaned_data['email']\n if commit:\n instance.save()\n return instance",
"def setUp(self):\n self.user1 = User.objects.create_user(username='jack', email='[email protected]', password='secret')\n self.user1.first_name = \"Jack\"\n self.user1.last_name = \"Smith\"\n self.user1.save()",
"def setUp(self):\n a, b, c = (\n User.objects.create_user(guy, email=\"%[email protected]\" % guy, password=guy)\n for guy in \"abc\"\n )\n a.is_superuser = True\n a.save()",
"def form_valid(self, form):\n self.object = form.save(commit=False)\n self.object.user = self.request.user\n self.object.customer_of = self.request.user.setting_set.get().employer\n messages.success(self.request, 'Changes Saved!')\n return super().form_valid(form)",
"def form_valid(self, form):\n obj = form.save(commit=False)\n obj.user = self.request.user\n obj.save()\n return super().form_valid(form)",
"def test_first_name_is_optional(self):\n self.updated_data['first_name'] = ''\n self.update_user()\n self.assertEqual(self.user.first_name, self.updated_data['first_name'])",
"def setUp(self):\n self.new_user = User('JosphatOtieno','jose@otis45')"
]
| [
"0.7399037",
"0.6818464",
"0.67360806",
"0.6703886",
"0.6615366",
"0.65040535",
"0.64916104",
"0.6349639",
"0.6300928",
"0.6296971",
"0.6286593",
"0.6262981",
"0.62472683",
"0.62465847",
"0.6244291",
"0.62277496",
"0.6220768",
"0.62102824",
"0.62016064",
"0.61989367",
"0.6194715",
"0.61195683",
"0.61002654",
"0.60846555",
"0.6073301",
"0.60656375",
"0.60296214",
"0.6023131",
"0.59397185",
"0.5919138"
]
| 0.7171682 | 1 |
Get this entry first author | def _get_first_author(self):
if not len(self.get_authors()):
return ''
return self.get_authors()[0] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_author(self):\n return self.author",
"def get_author(self):\n return self.author",
"def author(self):\n return self._data.get('author', None)",
"def author(self):\n return self._author",
"def author(self):\n return self._author",
"def author(self):\n return self._author",
"def author(self) -> str:\n return pulumi.get(self, \"author\")",
"def author(self):\n return self._changeset.get('author', None)",
"def author(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"author\")",
"def author(self):\n return User(None, self.get_data(\"author\"))",
"def author(self) -> str:\n return self._author",
"def author(self) -> str:\n return self._author",
"def getAuthor(self):\n return self.bookAuthor",
"def author(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"author\")",
"def author(self):\r\n return self.user",
"def author(self) -> 'User': # stub\n return self._author",
"def _get_last_author(self):\n if not len(self.get_authors()):\n return ''\n return self.get_authors()[-1]",
"def first_author(self) -> Tuple[str, str]:\n first_author = self.author[0]\n *name, last_name = first_author.split()\n name = ' '.join(name)\n return name, last_name",
"def get_author_full_name(self, obj):\n return obj.author.get_full_name()",
"def get_author(self):\n return self._get_property(core.SVN_PROP_REVISION_AUTHOR)",
"def author(self):\n\n for item in self.metadata:\n if item.tag.localname == \"creator\":\n if 'file-as' in item.tag:\n return item.tag['file-as']\n else:\n return item.tag.text",
"def getAuthor(self):\n\t\tself.authorList = [submission.author for submission in self.subreddit.top(time_filter = 'day', limit = self.limits)]\n\t\treturn self.authorList",
"def extract_author(bs_soup):\n sub_item = bs_soup.find(\"div\", class_=AUTHOR_CLASS)\n if sub_item:\n return sub_item.text\n return None",
"def get_article_author(self, article_webpage):\n pass",
"def get_author(self, attribute_name, default=None):\n return getattr(self, '%s__author' % attribute_name, default)",
"def author(self) -> GitLabUser:\n return GitLabUser.from_data(self.data['author'],\n self._token,\n self.data['author']['id'])",
"def author(self):\n\t\tauthor = re.search(r\"([Ff]rom\\s)(.+) ([tT]\\s*o)([^,]+),\",self.raw_text()[:150])\n\t\treport = re.search(r\".*[Rr]eport of ([^,]+),\",self.raw_text()[:150])\n\t\torder = re.search(r\".*[Oo]rder of ([^,]+)[,.]* to\",self.raw_text()[:250])\n\t\torder2 = re.search(r\".*[Oo]rder of ([^,]+),\",self.raw_text()[:250])\n\t\tlogbook = re.search(r\".*[lL]og of ([^,]+)[,.]\",self.raw_text()[:250])\n\t\tif order:\n\t\t\torder = order.group(1)\n\t\t\treturn order\n\t\tif order2:\n\t\t\torder2 = order2.group(1)\n\t\t\treturn order2\n\t\tif logbook:\n\t\t\tlogbook = logbook.group(1)\n\t\t\treturn logbook\n\t\tif report:\n\t\t\treport = report.group(1)\n\t\t\treturn report\n\t\tif author: \t\n\t\t\tauthor = author.group(2) \n\t\t\tauthor = re.sub(r\"([^,]*),.*\",r\"\\1\",author)\t \n\t\t\treturn author\n\t\t\n\t\treturn \"Unknown\"",
"def get_author(mods):\n name_part = mods.find(\"{{{0}}}name/{{{0}}}namePart\".format(common.MODS_NS))\n return name_part.text",
"def svn_client_commit_info_t_author_get(svn_client_commit_info_t_self): # real signature unknown; restored from __doc__\n return \"\"",
"def owner(self):\n \n if not self.logMessage is None:\n return self.logMessage[\"author\"]"
]
| [
"0.8229262",
"0.8229262",
"0.8032657",
"0.7940489",
"0.7940489",
"0.7940489",
"0.78599817",
"0.7840174",
"0.7684184",
"0.7680722",
"0.7656797",
"0.7656797",
"0.75249034",
"0.7519448",
"0.748793",
"0.7446496",
"0.73990786",
"0.7372625",
"0.7222093",
"0.7180607",
"0.7167988",
"0.71214586",
"0.69437104",
"0.6878948",
"0.67723966",
"0.6746661",
"0.6743087",
"0.6661065",
"0.6571779",
"0.6543618"
]
| 0.88237596 | 0 |
Get this entry last author | def _get_last_author(self):
if not len(self.get_authors()):
return ''
return self.get_authors()[-1] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_author(self):\n return self.author",
"def get_author(self):\n return self.author",
"def author(self):\n return self._author",
"def author(self):\n return self._author",
"def author(self):\n return self._author",
"def _get_first_author(self):\n if not len(self.get_authors()):\n return ''\n return self.get_authors()[0]",
"def author(self):\n return self._data.get('author', None)",
"def author(self):\n return self._changeset.get('author', None)",
"def author(self) -> str:\n return pulumi.get(self, \"author\")",
"def author(self) -> str:\n return self._author",
"def author(self) -> str:\n return self._author",
"def svn_info_t_last_changed_author_get(svn_info_t_self): # real signature unknown; restored from __doc__\n return \"\"",
"def get_author(self):\n return self._get_property(core.SVN_PROP_REVISION_AUTHOR)",
"def author(self):\r\n return self.user",
"def author(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"author\")",
"def getAuthor(self):\n\t\tself.authorList = [submission.author for submission in self.subreddit.top(time_filter = 'day', limit = self.limits)]\n\t\treturn self.authorList",
"def author(self) -> 'User': # stub\n return self._author",
"def author(self):\n return User(None, self.get_data(\"author\"))",
"def getAuthor(self):\n return self.bookAuthor",
"def owner(self):\n \n if not self.logMessage is None:\n return self.logMessage[\"author\"]",
"def svn_client_commit_info_t_author_get(svn_client_commit_info_t_self): # real signature unknown; restored from __doc__\n return \"\"",
"def author(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"author\")",
"def get_author_full_name(self, obj):\n return obj.author.get_full_name()",
"def author(self):\n\n for item in self.metadata:\n if item.tag.localname == \"creator\":\n if 'file-as' in item.tag:\n return item.tag['file-as']\n else:\n return item.tag.text",
"def title_authors_fingerprint(self):\n if None in (self.title, self.authors_lastnames):\n return None\n\n lastnames = list(map(normalize_text_value, self.authors_lastnames))\n lastnames = normalize_list_direction(lastnames)\n lastnames = '.'.join(lastnames)\n\n title = normalize_text_value(self.title)\n\n return '$'.join((lastnames, title))",
"def get_ir(self, author):\n return self.divided[author][-1]",
"def extract_author(bs_soup):\n sub_item = bs_soup.find(\"div\", class_=AUTHOR_CLASS)\n if sub_item:\n return sub_item.text\n return None",
"def get_author(mods):\n name_part = mods.find(\"{{{0}}}name/{{{0}}}namePart\".format(common.MODS_NS))\n return name_part.text",
"def get_PI(soup):\n last_author = soup.find_all(attrs={\"name\":\"LastAuthor\"})[0].get_text()\n return(last_author)",
"def get_comment_author(self, author_id):\n response = self.http_call(\"{0}/users/{1}.json\".format(self.uri, author_id))\n return json.loads(response.content.decode(sys.stdout.encoding, \"replace\"))[\"user\"][\"name\"]"
]
| [
"0.77854884",
"0.77854884",
"0.76113564",
"0.76113564",
"0.76113564",
"0.7610408",
"0.75873405",
"0.7484349",
"0.7426977",
"0.73916334",
"0.73916334",
"0.73881865",
"0.72666234",
"0.7250214",
"0.7246593",
"0.71829695",
"0.7149919",
"0.70887244",
"0.70449716",
"0.7027704",
"0.69428915",
"0.683131",
"0.675527",
"0.6732246",
"0.64808154",
"0.6433944",
"0.639822",
"0.6354691",
"0.62843484",
"0.62770396"
]
| 0.876842 | 0 |
Get ordered authors list Note that authorentryrank_set is ordered as expected while the authors queryset is not (M2M with a through case). | def get_authors(self):
return [aer.author for aer in self.authorentryrank_set.all()] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def author_articles(self):\n return ArticlePage.objects.live().filter(author=self).order_by('-date')",
"def authors(self):\n authors = [\n n.people for n in self.pymbake_person_relationship.all()\n ]\n\n return authors",
"def authors(self):\n user_ids = set(r.author.id for r in self.history())\n return User.query.find({'_id': {'$in': list(user_ids)}}).all()",
"def query_authors(cls):\n authors = from_cache('AuthorsList')\n if not authors:\n authors = SuiAuthor.all().order('name').fetch(400)\n to_cache('AuthorsList', authors)\n return authors",
"def getAuthors(self):\n authors = []\n for each in self.context.getAuthors():\n title = each['title']\n firstname = each['firstname']\n middlename = each['middlename']\n lastname = each['lastname']\n author = Author(title, firstname, middlename, lastname)\n authors.append(author)\n return authors",
"def citing_authors(self, entities):\n result = self.db.execute(u'''SELECT DISTINCT(author_id)\n FROM \"entity_occurrences\"\n WHERE entity IN ({}) AND rho > ?'''.format(join_entities_sql(entities)), (DEFAULT_MIN_SCORE,)).fetchall()\n return [t[0] for t in result]",
"def Authors(self, default=[{}]):\n tmp = self.data.get('authors', default)\n return [HEP.AuthorReducedObject(i) for i in tmp]",
"def display_authors(self, *args):\n return ', '.join(author.name for author in args[0].authors.all()[:3])",
"def authors(self):\n authors = self.context.Authors(sep=' and ',\n lastsep=' and ',\n format=\"%L, %F %M\",\n abbrev=0,\n lastnamefirst=0)\n if not isinstance(authors, unicode):\n authors = unicode(authors, 'utf-8')\n return authors",
"def get_authors(self, instance):\n\n # Get Authors in the specified order\n author_order = Author.objects \\\n .filter(dataset_id=instance.id) \\\n .order_by('order')\n\n # Put in a list\n authors = [a.author for a in author_order]\n\n # Return a list of person urls\n serializers = PersonSerializer(authors, many=True, context={'request': self.context['request']}).data\n return [p[\"url\"] for p in serializers]",
"def all_authors( data ) :\n return list(set( chain.from_iterable( [ authors(x) for x in data ] ) ))",
"def popAuthors(self):\r\n# cur = self.dbConn.execute(\"SELECT * FROM People WHERE PersonID>0 ORDER BY Lastname\")\r\n# res = cur.fetchall()\r\n res = self.dbConn.execute(\"SELECT * FROM People WHERE PersonID>0 ORDER BY Lastname\").fetchall()\r\n\r\n self.authorList = [formatNameSQL(ln) for ln in res]\r\n self.quickAuthors = [ln[\"Lastname\"].lower() for ln in res]\r\n vals = [ln[\"PersonID\"] for ln in res]\r\n \r\n self.authorLookup = dict(zip(self.authorList,vals))",
"def get_popular_authors():\n db = psycopg2.connect(database=DBNAME)\n c = db.cursor()\n query_popular_authors = \"\"\"\n SELECT aut.name, COUNT(lg.id) AS views\n FROM articles AS art\n JOIN log AS lg ON art.slug = SUBSTRING(lg.path,10)\n AND lg.status = '200 OK'\n JOIN authors AS aut ON aut.id = art.author\n GROUP BY aut.name\n ORDER BY views desc; \"\"\"\n c.execute(query_popular_authors)\n authors = from_db_cursor(c)\n db.close()\n return authors",
"def get_authors_from_papers(papers):\n auth_set = set()\n for p in papers:\n auth_set.update(p['authors'])\n return list(auth_set)",
"def getAuthor(self):\n\t\tself.authorList = [submission.author for submission in self.subreddit.top(time_filter = 'day', limit = self.limits)]\n\t\treturn self.authorList",
"def Authors(self, default=[{}]):\n tmp = self.data.get('metadata', {}).get('authors', default)\n return [HEP.AuthorObject(i) for i in tmp]",
"def test_list_all_authors(self):\n response = self.client.get(reverse('authors') + '?page=2')\n self.assertEqual(response.status_code, 200)\n self.assertTrue('is_paginated' in response.context)\n self.assertTrue(response.context['is_paginated'] is True)\n self.assertTrue(len(response.context['author_list']) == 3)",
"def get_authors(self):\n\n names = []\n rows = []\n\n try:\n rows = self.find_elements_in_owner(self.locators['author_row'])\n except NoSuchElementException:\n # there are no authors\n rows = []\n\n for rowEle in rows:\n authorname = self._get_author(rowEle)\n names.append(authorname)\n return names",
"def test_author_sorted_articles(self):\n\n self.make_test('articles', ArticleListSerializer, 'author:articles')",
"def getAuthors(self): #$NON-NLS-1$\r",
"def authors(author_ids):\n if author_ids is None:\n return ''\n else:\n ids = []\n for author_id in author_ids.split(','):\n ids.append(User.id == int(author_id))\n authors = User.query.filter(or_(*ids)).all()\n if authors is None:\n return ''\n else:\n return 'by ' + ', '.join([author.name for author in authors])",
"def get_authors(draft):\n authors = []\n for a in draft.authors.all():\n initial = ''\n prefix, first, middle, last, suffix = a.person.name_parts()\n if first:\n initial = first + '. '\n entry = '%s%s <%s>' % (initial,last,a.address)\n authors.append(entry)\n return authors",
"def get_authors(self, blogid=1):\n return self.execute('wp.getAuthors', blogid, self.username, self.password)",
"def get_top_authors():\n query2 = \"\"\"select name, count(*) as views\n from authors, articles, log\n where authors.id = articles.author\n and log.path like '%' || articles.slug\n group by name\n order by views desc;\"\"\"\n results = execute_query(query2)\n for result in results:\n print(\"- %s — %s views\" % (result[0], result[1]))",
"def authors(self):\n return self.properties.get('Authors', ClientValueCollection(SharedWithMeDocumentUser))",
"def top_authors():\n\n cur.execute(\"\"\"\n SELECT author, count(*) AS article_author\n FROM article_summary\n GROUP BY author\n ORDER BY article_author DESC;\n \"\"\")\n result = cur.fetchall()\n return result",
"def get_all_authors():\n try:\n authors = g.projects.distinct('authors')\n all_authors = sorted(authors, key=lambda k: str(k).lower()) if authors else []\n return jsonify(all_authors)\n except Exception as err:\n raise ApiException(str(err), 500)",
"def load_authors(self):\n authors = self.session.query(Author).join(AuthorStatus) \\\n .filter(Author.status_id == AuthorStatus.id) \\\n .filter(AuthorStatus.status == 'active') \\\n .all()\n return authors",
"def _get_authors_list():\n\n articles = os.listdir(\"../data/\")\n authors = []\n for article in articles:\n with open(\"../data/\" + article, 'r') as file:\n lines = file.readlines()\n author = tuple(\n line.replace(\"\\n\", \"\").split()[1] for line in lines\n if \"Автор:\" in line\n )[0]\n authors.append(author)\n\n return authors",
"def print_popular_authors():\n print(\"\\nAuthors listed by article views:\\n\")\n views_data = get_query_results(AUTHORS_VIEWS_QUERY)\n author_row_format = '{} - {} views'\n for author, views in views_data:\n print(author_row_format.format(author, views))"
]
| [
"0.67668843",
"0.67430836",
"0.6685066",
"0.6684926",
"0.6559511",
"0.65458703",
"0.65159404",
"0.64794177",
"0.64563096",
"0.64382637",
"0.63679224",
"0.63222766",
"0.6306415",
"0.63006264",
"0.6292419",
"0.6286021",
"0.6283298",
"0.6283292",
"0.6231312",
"0.62176764",
"0.62137",
"0.62034065",
"0.61938035",
"0.6091305",
"0.6090266",
"0.60900277",
"0.607416",
"0.60440975",
"0.6014041",
"0.59799176"
]
| 0.8204957 | 0 |
This method determines the highest http a server can support. This is done over http or https, depending on the parameter. HTTP2 is checked but never used to exchange messages. | def get_highest_http(uri, https, upgrade=True):
highest_http = '1.0'
response_status = ""
redirect = False
location = ""
port = 443 if https else 80
use_https = https
use_upgrade = upgrade
host, path = get_host(uri)
i_p = check_host_name(host)
request_line = "GET "+ path +" HTTP/1.1\r\n"
headers_line = "Host: "+ host+ "\r\n"
upgrade_line = "Connection: close\r\nUpgrade: h2c\r\n\r\n" if not https \
else "Connection: Close\r\nuser-agent: Mozilla/5.0 (Macintosh; U; Intel Mac OS X; en-US)"+ \
"AppleWebKit/533.4 (KHTML, like Gecko) Chrome/5.0.375.86 Safari/533.4\r\n\r\n" #[3]
h11_request = (request_line+headers_line+upgrade_line).encode()
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
if https:
ctx = ssl.create_default_context()
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
ctx.set_alpn_protocols(['h2', 'http/1.1', 'http/1.0'])
ssl_sock = ctx.wrap_socket(sock, server_hostname=host)
sock = ssl_sock
try:
sock.settimeout(5)
sock.connect((i_p, port))
sock.settimeout(None)
except socket.error:
print("The socket can't seem to connect,"+
"even though host name was resolved for the provided URI")
sys.exit()
except socket.timeout:
print("A timeout occured because the host failed to connect for 5 seconds")
if https:
proto = sock.selected_alpn_protocol()
if proto == 'h2':
highest_http = '2.0'
sock.close()
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
ctx = ssl.create_default_context()
ctx.set_alpn_protocols(['http/1.1', 'http/1.0'])
ssl_sock = ctx.wrap_socket(sock, server_hostname=host)
sock = ssl_sock
sock.connect((i_p, port))
rec = send_and_recieve(sock, h11_request)
sock.close()
status_line = rec[0]
response_headers = rec[1:]
if highest_http != '2.0':
highest_http = "1.0" if 'HTTP/1.0' in status_line else "1.1"
if not https and '101' in status_line:
highest_http = "2.0"
if '200' not in status_line and '204' not in status_line and '205' not in status_line:
if '302' in status_line or '301' in status_line:
redirect = True
for header in response_headers:
if 'Location' in header:
if 'https' in header:
use_https = True
redirect = True
location = (header.split(" ")[1])
if location == uri:
print("This site keeps redirecting to itself and returning 302's Something is wrong")
redirect = False
break
elif '101' in status_line:
use_upgrade = False
location = uri
redirect = True
elif '500' in status_line or '505' in status_line:
print("Recieved a 5xx response from the server at location: " + uri +" exiting now...")
sys.exit()
elif '404' in status_line:
print("The specified host exists but the path " + path + " was not found")
sys.exit()
else:
print('An unexpected response status of ' +status_line.split(" ")[1] +' was received from site "' + uri +'"')
sys.exit()
response_status = status_line.split(" ")[1]
tup = (
response_status,
response_headers,
highest_http,
redirect,
location, use_https,
use_upgrade
)
return tup | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_http_protocol(self):\n if self.cfg.ssl:\n return \"https\"\n else:\n return \"http\"",
"def get_protocol():\n if https():\n protocol = 'https'\n else:\n protocol = 'http'\n return protocol",
"def supports_http_1_1():",
"def get_protocol(self):\n if self.ssl:\n return \"https\"\n else:\n return \"http\"",
"def http(self) -> Optional[pulumi.Input['HttpScaleRuleArgs']]:\n return pulumi.get(self, \"http\")",
"def protocol(self):\n return 'https' if self.allow_https and self.is_secure else 'http'",
"def is_http2(listener):\n return (hasattr(listener, 'alpn_protocols') and listener.alpn_protocols and\n lib_consts.ALPN_PROTOCOL_HTTP_2 in listener.alpn_protocols)",
"def protocol(ver):\r\n if ver == 1:\r\n return 1\r\n\r\n if ver == 2:\r\n return 2\r\n\r\n\r\n raise ValueError",
"def use_http(self):\r\n use_http = self.config.get_bool(\"gox\", \"use_http_api\")\r\n if FORCE_HTTP_API:\r\n use_http = True\r\n if FORCE_NO_HTTP_API:\r\n use_http = False\r\n return use_http",
"def protocol(self) -> Optional[pulumi.Input['TargetServerProtocol']]:\n return pulumi.get(self, \"protocol\")",
"def __get_http2_ssl_context(self):\n # Get the basic context from the standard library.\n if self.client_side == False:\n #self.ctx = ssl.create_default_context(purpose=ssl.Purpose.CLIENT_AUTH)\n self.ctx = ssl._create_unverified_context()\n else:\n #self.ctx = ssl.create_default_context(purpose=ssl.Purpose.SERVER_AUTH, cafile=self.server_cert)\n #self.ctx = ssl.create_default_context(purpose=ssl.Purpose.SERVER_AUTH)\n self.ctx = ssl._create_unverified_context()\n\n # RFC 7540 Section 9.2: Implementations of HTTP/2 MUST use TLS version 1.2\n # or higher. Disable TLS 1.1 and lower.\n self.ctx.options |= (\n ssl.OP_NO_SSLv2 | ssl.OP_NO_SSLv3 | ssl.OP_NO_TLSv1 | ssl.OP_NO_TLSv1_1\n )\n\n # RFC 7540 Section 9.2.1: A deployment of HTTP/2 over TLS 1.2 MUST disable\n # compression.\n self.ctx.options |= ssl.OP_NO_COMPRESSION\n\n # RFC 7540 Section 9.2.2: \"deployments of HTTP/2 that use TLS 1.2 MUST\n # support TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256\". In practice, the\n # blacklist defined in this section allows only the AES GCM and ChaCha20\n # cipher suites with ephemeral key negotiation.\n\n\n if self.client_side == False:\n self.ctx.load_cert_chain(certfile=self.server_cert, keyfile=self.server_key)\n self.ctx.load_verify_locations(cafile=self.client_certs) \n else:\n self.ctx.load_cert_chain(certfile=self.client_certs, keyfile=self.client_key)\n self.ctx.load_verify_locations(cafile=self.client_certs) \n pass\n\n\n\n # We want to negotiate using NPN and ALPN. ALPN is mandatory, but NPN may\n # be absent, so allow that. This setup allows for negotiation of HTTP/1.1.\n self.ctx.set_alpn_protocols([\"h2\", \"http/1.1\"])\n\n try:\n self.ctx.set_npn_protocols([\"h2\", \"http/1.1\"])\n except NotImplementedError as e:\n print(\"TLS Error: NotImplementedError=%s\" % (e))\n pass\n\n #self.ctx = ctx\n\n return True",
"def negotiate_tls(self, tcp_sock, peer_ipaddr):\n try:\n # Note that SNI is mandatory for HTTP/2, so you *must* pass the\n # server_hostname argument.\n if self.client_side == False:\n self.tls_conn = self.ctx.wrap_socket(tcp_sock, server_side=True)\n else:\n self.tls_conn = self.ctx.wrap_socket(tcp_sock, server_hostname=peer_ipaddr)\n except Exception as e:\n #print(\"Fail to create tls connection1!! : client_side=%s, Err=%s\" % (self.client_side, e))\n return None\n\t\n # Always prefer the result from ALPN to that from NPN.\n # You can only check what protocol was negotiated once the handshake is\n # complete.\n try:\n negotiated_protocol = self.tls_conn.selected_alpn_protocol()\n if negotiated_protocol is None:\n negotiated_protocol = self.tls_conn.selected_npn_protocol()\n\n if negotiated_protocol != \"h2\":\n print(\"Err. negotiated_protocol=%s\" % (negotiated_protocol))\n raise RuntimeError(\"Didn't negotiate HTTP/2!\")\n except Exception as e:\n print(\"Fail to create tls connection2!! : %s\" % (e))\n return None\n \n #print(\"%s\" % self.ctx.client_random())\n #print(\"%s\" % ssl.Connection.client_random())\n #print(self.tls_conn._sslobj.SSL_get_client_random())\n #print(self.ctx.SSL_get_client_random())\n\n return self.tls_conn",
"def test_http_get_kind(self):\n assert_equal(self.test_http.get_kind(), 'mphttp')",
"def scheme(self):\n return self.use_ssl and \"https\" or \"http\"",
"def trafficProtocol(self):\n #\n # TODO: Reimplement this if possible.\n #\n return client.trafficProtocol(self)",
"def _http(self):\n raise NotImplementedError(\"HTTP transport is not supported.\")",
"def app_protocol(self):\n if settings.INAPP_REQUIRE_HTTPS:\n return 'https'\n else:\n return 'https' if self.is_https else 'http'",
"def protocol(self):\n\n if '://' in self.host:\n scheme, host = self.host.split('://', 1)\n return scheme\n elif self.port == 21:\n return 'ftp'\n elif self.port == 22:\n return 'sftp'\n elif self.port == 990:\n return 'ftps'\n else:\n # Uncertain, assume FTP.\n return 'ftp'",
"def verify_http_https_connection_and_fw_version(self, task):\n error_msg_https = ('Access to REST API returns unexpected '\n 'status code. Check driver_info parameter '\n 'related to iRMC driver')\n error_msg_http = ('Access to REST API returns unexpected '\n 'status code. Check driver_info parameter '\n 'or version of iRMC because iRMC does not '\n 'support HTTP connection to iRMC REST API '\n 'since iRMC S6 2.00.')\n try:\n # Check connection to iRMC\n elcm_license = irmc_common.check_elcm_license(task.node)\n\n # On iRMC S6 2.00, access to REST API through HTTP returns 404\n if elcm_license.get('status_code') not in (200, 500):\n port = task.node.driver_info.get(\n 'irmc_port', CONF.irmc.get('port'))\n if port == 80:\n e_msg = error_msg_http\n else:\n e_msg = error_msg_https\n raise exception.IRMCOperationError(\n operation='establishing connection to REST API',\n error=e_msg)\n\n irmc_common.set_irmc_version(task)\n except (exception.InvalidParameterValue,\n exception.MissingParameterValue) as irmc_exception:\n raise exception.IRMCOperationError(\n operation='configuration validation',\n error=irmc_exception)",
"def tls_max_version(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"tls_max_version\")",
"def tls_max_version(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"tls_max_version\")",
"def _choice_protocol(self):\n # space to add more complex choice algorithms, if desired\n return 0",
"def server(secure=False, idx=0):\n if secure:\n try:\n url = 'https://' + wwl.secure_wwl_servers[idx]\n except:\n url = ''\n else:\n try:\n url = 'http://' + wwl.wwl_servers[idx]\n except:\n url = ''\n return url",
"def _get_http_service_port(self):\n tcp_port = 0\n service_name = \"http_port\"\n if _is_https_enabled(self.dbapi):\n service_name = \"https_port\"\n\n try:\n web_port = self.dbapi.service_parameter_get_one(service=\"http\",\n section=\"config\",\n name=service_name)\n tcp_port = int(web_port.value)\n except exception.NotFound:\n LOG.info(\"cannot retrieve web service port\")\n\n return tcp_port",
"def protocol(self, code: str) -> str:\n return 'https'",
"def buildProtocol(self, addr):\n return _SSLServerProtocol(self)",
"def _init_http(self, http: t.Optional[Http] = None) -> Http:\n if not isinstance(http, Http):\n http: Http = Http(**self.ARGS_HTTP)\n return self._check_binding(http)",
"def protocol(self):\n return self._host[CONF_PROTOCOL]",
"def tls_max_version(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"tls_max_version\")",
"def getProtocol(self) -> str:\n ..."
]
| [
"0.6752688",
"0.6329223",
"0.62902206",
"0.6241211",
"0.6171372",
"0.6168764",
"0.61381495",
"0.5726957",
"0.5725487",
"0.57185143",
"0.5554565",
"0.5525395",
"0.55057085",
"0.5468806",
"0.5437173",
"0.543616",
"0.5421312",
"0.5418585",
"0.5397975",
"0.5352422",
"0.5352422",
"0.53116274",
"0.5310956",
"0.5252264",
"0.5208044",
"0.51815987",
"0.5171244",
"0.5167845",
"0.51506597",
"0.5150082"
]
| 0.69072604 | 0 |
Passes CL args to smart_client() | def main():
parser = argparse.ArgumentParser()
parser.add_argument("URI")
args = parser.parse_args()
smart_client(args.URI) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __init__(self):\n super(BaseCLIClient, self).__init__()\n self.client_type = \"cli\"\n self.set_content_type('raw')\n self.set_accept_type('raw')\n self.execution_type = \"sync\"",
"def client():",
"def __init__(self, args):\n ClientPlugin.__init__(self)\n self.args = args",
"def _run(self, client: OpenrCtrl.Client, *args, **kwargs) -> None:\n\n raise NotImplementedError",
"def __init__(self, **kwargs):\n self.config = kwargs[\"config\"]\n self.cli = client.DefaultClient(app_key=self.config[\"app_key\"], app_secret=self.config[\"app_secret\"])\n self.req = None",
"def process_cl_args():\n\n parser = argparse.ArgumentParser(add_help=False)\n parser.add_argument('commands', nargs='*')\n parser.add_argument('--help', '-h', action='store_true')\n parser.add_argument('--version', '-v', action='store_true')\n parser.add_argument('--debug', '-d', action='store_true')\n parser.add_argument('--logging', '-l', action='store_true')\n parser.add_argument('--no-autosize', action='store_true')\n parser.add_argument('--no-preload', action='store_true')\n args = parser.parse_args()\n\n if args.version:\n xprint(get_version_info())\n xprint(\"\")\n sys.exit()\n\n elif args.help:\n for x in helptext():\n xprint(x[2])\n sys.exit()\n\n if args.debug or os.environ.get(\"mpsytdebug\") == \"1\":\n xprint(get_version_info())\n g.debug_mode = True\n g.no_clear_screen = True\n logfile = os.path.join(tempfile.gettempdir(), \"mpsyt.log\")\n logging.basicConfig(level=logging.DEBUG, filename=logfile)\n logging.getLogger(\"pafy\").setLevel(logging.DEBUG)\n\n elif args.logging or os.environ.get(\"mpsytlog\") == \"1\":\n logfile = os.path.join(tempfile.gettempdir(), \"mpsyt.log\")\n logging.basicConfig(level=logging.DEBUG, filename=logfile)\n logging.getLogger(\"pafy\").setLevel(logging.DEBUG)\n\n if args.no_autosize:\n g.detectable_size = False\n\n g.command_line = \"playurl\" in args.commands or \"dlurl\" in args.commands\n if g.command_line:\n g.no_clear_screen = True\n\n if args.no_preload:\n g.preload_disabled = True\n\n g.argument_commands = args.commands",
"def __init__(self, client):\n self.client = client\n self.call_params = {\n }",
"def call(self) -> global___Snippet.ClientCall:",
"def call(self) -> global___Snippet.ClientCall:",
"def __add_client_server_args(self, other_cs_list):\n self.__client_server_args.extend(other_cs_list)",
"def __init__(self, auth_args, name, desc,\n srv_chain, flow_conf):\n logger = logging.getLogger(__name__)\n self.conn = connection.Connection(**auth_args)\n self.pc_client = netsfc_clt.SFCClient(auth_args, logger)\n\n self.name = name\n self.desc = desc\n self.srv_chain = srv_chain\n self.flow_conf = flow_conf",
"def get_args(cls, client, args) :\n\t\ttry :\n\t\t\tobj = bfdsession()\n\t\t\toption_ = options()\n\t\t\toption_.args = nitro_util.object_to_string_withoutquotes(args)\n\t\t\tresponse = obj.get_resources(client, option_)\n\t\t\treturn response\n\t\texcept Exception as e :\n\t\t\traise e",
"def main() -> None:\n params = demisto.params()\n # if your Client class inherits from BaseClient, SSL verification is\n # handled out of the box by it, just pass ``verify_certificate`` to\n # the Client constructor\n verify_certificate = not params.get('insecure', False)\n\n # if your Client class inherits from BaseClient, system proxy is handled\n # out of the box by it, just pass ``proxy`` to the Client constructor\n proxy = params.get('proxy', False)\n app_id = params.get('creds_client_id', {}).get('password', '') or params.get('app_id') or params.get('_app_id')\n base_url = params.get('base_url')\n\n tenant_id = params.get('creds_tenant_id', {}).get('password', '') or params.get('tenant_id') or params.get('_tenant_id')\n client_credentials = params.get('client_credentials', False)\n enc_key = params.get('enc_key') or (params.get('credentials') or {}).get('password')\n certificate_thumbprint = params.get('creds_certificate', {}).get('identifier', '') or \\\n params.get('certificate_thumbprint', '')\n\n private_key = (replace_spaces_in_credential(params.get('creds_certificate', {}).get('password', ''))\n or params.get('private_key', ''))\n managed_identities_client_id = get_azure_managed_identities_client_id(params)\n\n first_fetch_time = params.get('first_fetch', '3 days').strip()\n fetch_limit = arg_to_number(params.get('max_fetch', 10))\n fetch_timeout = arg_to_number(params.get('fetch_timeout', TIMEOUT))\n demisto.debug(f'Command being called is {demisto.command()}')\n\n command = demisto.command()\n args = demisto.args()\n\n try:\n if not managed_identities_client_id and not app_id:\n raise Exception('Application ID must be provided.')\n\n client = Client(\n app_id=app_id,\n verify=verify_certificate,\n base_url=base_url,\n proxy=proxy,\n tenant_id=tenant_id,\n enc_key=enc_key,\n client_credentials=client_credentials,\n certificate_thumbprint=certificate_thumbprint,\n private_key=private_key,\n managed_identities_client_id=managed_identities_client_id\n )\n if demisto.command() == 'test-module':\n # This is the call made when pressing the integration Test button.\n return_results(test_module(client))\n\n elif command == 'microsoft-365-defender-auth-start':\n return_results(start_auth(client))\n\n elif command == 'microsoft-365-defender-auth-complete':\n return_results(complete_auth(client))\n\n elif command == 'microsoft-365-defender-auth-reset':\n return_results(reset_auth())\n\n elif command == 'microsoft-365-defender-auth-test':\n return_results(test_connection(client))\n\n elif command == 'microsoft-365-defender-incidents-list':\n test_context_for_token(client)\n return_results(microsoft_365_defender_incidents_list_command(client, args))\n\n elif command == 'microsoft-365-defender-incident-update':\n test_context_for_token(client)\n return_results(microsoft_365_defender_incident_update_command(client, args))\n\n elif command == 'microsoft-365-defender-advanced-hunting':\n test_context_for_token(client)\n return_results(microsoft_365_defender_advanced_hunting_command(client, args))\n\n elif command == 'microsoft-365-defender-incident-get':\n test_context_for_token(client)\n return_results(microsoft_365_defender_incident_get_command(client, args))\n\n elif command == 'fetch-incidents':\n fetch_limit = arg_to_number(fetch_limit)\n fetch_timeout = arg_to_number(fetch_timeout) if fetch_timeout else None\n incidents = fetch_incidents(client, first_fetch_time, fetch_limit, fetch_timeout)\n demisto.incidents(incidents)\n else:\n raise NotImplementedError\n # Log exceptions and return errors\n except Exception as e:\n return_error(f'Failed to execute {demisto.command()} command.\\nError:\\n{str(e)}')",
"def __init__(self, *args, **kwargs):\n super(Client, self).__init__(role='c', *args, **kwargs)\n\n # Internal variables\n self._bulksize = None\n self._server_hostname = None\n self._port = None\n self._num_streams = None\n self._zerocopy = False",
"def test_constructor_all_args(self):\n test_utils.generate_test_config_file()\n expected_auth = (\"hello\", \"world\")\n expected_url = \"http://wat.com/testing.json\"\n client = PowerTrackClient(_dummy_callback, auth=expected_auth, url=expected_url, config_file_path=config_file)\n\n self.assertEqual(expected_auth[0], client.auth[0])\n self.assertEqual(expected_auth[1], client.auth[1])\n self.assertEqual(expected_url, client.url)",
"def init_cloud_api(self, args=None):\n pass",
"def cmd_stru(args):",
"def cli(ctx):",
"def cli(ctx):",
"def cli(**_) -> None:\n pass",
"def doCall(self, *args, **kw):\n args = list(args)\n\n for param in self.params[len(args):]:\n args.append(kw.pop(param.name, []))\n\n if not set(kw) <= {'_client'}:\n raise TypeError('Invalid keyword arguments: %s' % kw)\n\n if len(args) > len(self.params):\n err = cTypeError('%(func)s() takes exactly %(needed)d arguments '\n '(%(given)d given)',\n nt={'func': self.name,\n 'needed': len(self.params),\n 'given': len(args)})\n\n if kw['_client']:\n raise ClientError(err)\n else:\n raise err\n\n elist = []\n for i in range(len(self.params)):\n attr = self.params[i]\n try:\n v = attr.coerceValueList(args[i], str(i))\n attr.validateValues(False, v)\n except LocalisedError as e:\n if not hasattr(attr, '_toc') and hasattr(attr, '_xlatKey'):\n e.t['name'] = attr._xlatKey\n elist.append(e)\n continue\n args[i] = v\n if elist:\n if kw['_client']:\n raise cAttrErrorList(*elist)\n else:\n raise AttrErrorList(*elist)\n\n # Exceptions in the implementation won't be wrapped in ClientError\n if self.toi:\n aList = [self.toi] + args\n return self.method(*aList)\n else:\n return self.method(*args)",
"def cmd_stor(args):",
"def main_CL():\r\n version=1.0\r\n st = time.time()\r\n parser = OptionParser(usage=usage(), version='%s'%version)\r\n parser.add_option(\"-n\", \"--days\", dest=\"days\", default=\"30\", help=\"Days ago, defaults to 30 days\")\r\n parser.add_option(\"-s\", \"--stream\", dest=\"stream\", default=\"all\", help=\"Code Stream, defaults to all\")\r\n parser.add_option(\"-u\", \"--usage\", dest=\"usage\", default=\"\", help=\"Show usage information\")\r\n parser.add_option(\"-d\", \"--debug\", dest='debug', action=\"count\", help=\"The debug level, use multiple to get more.\")\r\n (options, args) = parser.parse_args()\r\n\r\n if options.debug > 1:\r\n print ' days %s' %(options.days)\r\n print ' args: %s' %args\r\n else:\r\n options.debug = 0\r\n \r\n if options.usage:\r\n print usage()\r\n else:\r\n obj=ListCRs()\r\n obj.setUp()\r\n since = options.days \r\n \r\n #stream = str(stream).strip() \r\n obj.listCRsCL(since, options, st) \r\n \r\n print '\\nTook a total of %3.2f secs -^' %(time.time()-st)",
"def __init__(self, client):\n self._client = client\n self._argument_converter = ArgumentConverter()",
"def twcloud_cli(**kwargs):\n fire.Fire(gen_twcloud)",
"def execute(self, args):",
"def service_client_initialization(self) -> global___Snippet.ClientInitialization:",
"async def run():\n # Get the arguments from the parser\n args = client.arguments\n\n # If the help argument was used, return\n if hasattr(args, \"help\"):\n return\n # Otherwise, check the correct command and invoke the respective function\n # BUILD\n if args.command == \"build\":\n if args.action == \"delete\":\n await client.delete_build(args.build)\n elif args.action == \"download\":\n await client.download_build(args.build, args.force)\n elif args.action == \"info\":\n await client.show_build(args.build)\n # BUILDS\n elif args.command == \"builds\":\n if args.refresh:\n await client.update_builds()\n await client.show_builds(args.ready_only)\n # FOLDER\n elif args.command == \"folder\":\n if args.action == \"create\":\n await client.create_folder(args.folder, args.no_resources)\n elif args.action == \"info\":\n await client.get_folder(args.folder)\n elif args.action == \"resources\":\n await client.get_resources(args.folder)\n elif args.action == \"delete\":\n await client.delete_folder(args.folder)\n # FOLDERS\n elif args.command == \"folders\":\n if args.refresh:\n await client.post(\"/folders\")\n await client.show_folders()\n # SERVER\n elif args.command == \"server\":\n if args.action == \"start\":\n await client.start_server(args.server, args.build)\n elif args.action == \"info\":\n await client.get_server(args.server)\n elif args.action == \"stop\":\n await client.stop_server(args.server)\n # SERVERS\n elif args.command == \"servers\":\n await client.print_servers()\n # INFO\n else:\n await client.show_info()",
"def _client_cmd(self, cmd):\n logging.info('Client cmd: [%s]', cmd)\n return self._client.run(cmd)",
"async def execute(self, client, message, arg):\n\t\treturn"
]
| [
"0.58076566",
"0.5776437",
"0.56957334",
"0.5592278",
"0.5579138",
"0.55543005",
"0.5536592",
"0.55259174",
"0.55259174",
"0.55040467",
"0.55021024",
"0.549863",
"0.5486501",
"0.5452488",
"0.5451785",
"0.54476345",
"0.54291",
"0.53953785",
"0.53953785",
"0.53887117",
"0.5365382",
"0.53493726",
"0.5336452",
"0.53283167",
"0.53097796",
"0.53085816",
"0.5302902",
"0.5271597",
"0.52644527",
"0.5261855"
]
| 0.6573514 | 0 |
Set names using a semicolon (;) seperated string or a list of strings | def SetNames(self, names):
# parse the names (a semicolon seperated list of names)
if isinstance(names, str):
names = names.split(';')
if self.__names != names:
self.__names = names
self.Modified() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def names(self, names):\n\n self._names = names",
"def setName(self,value):\n assert value == None or type(value) == str, repr(value)+' is not a valid name'\n self._name = value",
"def setnames(self, *args, **kwargs):\n return _coordsys.coordsys_setnames(self, *args, **kwargs)",
"def setName(self, name: str, /) -> Any:\n ...",
"def add_names(self, *sNames):\n self.names += list(sNames)",
"def set_pinnames(self, names):\n self.pnames = names",
"def _set_list(name, value, context):\n\n if name in os.environ:\n context[name] = os.environ.get(name).lower().split(\",\")\n\n _set_default(name, value, context)",
"def set_markets(self, markets=None):\n if markets and isinstance(markets, str):\n if markets.find(',') != -1:\n market_list = markets.split(',')\n for item in market_list:\n self.markets.append(item.strip())\n else:\n self.markets.append(markets)\n else:\n self.markets = [\"Nasdaq\", \"Dow Jones & Company\",\n \"Standard & Poor's\", \"EURO STOXX 50\",\n \"OMX Vilnius\", \"MICEX\"]",
"def setName(self, name):\n self.name = str(name)",
"def names(self, *names):\n assert len(names) == len(self._preds)\n self._names = names\n return self",
"def set_Names(self, value):\n super(GetTokenDetailsInputSet, self)._set_input('Names', value)",
"def name(self, name: List[NameAndValue]):\n\n self._name = name",
"def name_list(string):\n names = []\n for name in string.split('; '):\n if ', ' in name:\n last_comma_first = name.split(', ', 2)\n first = last_comma_first[1].strip()\n last = last_comma_first[0].strip()\n names.append(first + \" \" + last)\n else:\n names.append(name.strip())\n return names",
"def set_name(self, newname=\"\"):\n self.name = newname",
"def setName(self, *args):\n return _libsbml.ListOfMembers_setName(self, *args)",
"def set_attr(self, name: str, values: Union[list, tuple, object]):",
"def put_side_set_name(self, id, name):\n # Find the side set.\n _idx = self._f.variables[\"ss_prop1\"][:]\n assert id in _idx, \"Could not find side set with id %i.\" % id\n # 1-based indexing!\n idx = np.argwhere(_idx == id)[0][0] + 1\n\n self._f.variables[\"ss_names\"][idx - 1] = b\"\"\n self._f.variables[\"ss_names\"][idx - 1, :len(name)] = \\\n [_i.encode() if hasattr(_i, \"encode\") else _i for _i in name]",
"def set_name(self):\n if self.first_name and self.last_name:\n name_string = \"%s\" % self.first_name\n name_string += \" %s\" % self.last_name\n self.name = name_string\n\n if self.name:\n if not self.first_name and not self.last_name:\n n = HumanName(self.name)\n self.first_name = n.first\n if n.middle:\n self.first_name = n.first + \" \" + n.middle\n self.last_name = n.last\n if n.suffix:\n self.last_name = n.last + \" \" + n.suffix",
"def set_name(self, name):\n self.name = name # overwrite the existing name with the input name",
"def set_name(self, name):\n self.name = name # overwrite the existing name with the input name",
"def set_name(self, n, line_number=0):\n self.name = n\n self._name_line = line_number",
"def set_name(self, item_name):\r\n self.name = item_name",
"def set_name(self,name):\n if not isinstance(name,(str)):\n raise TypeError('name must be string')\n else:\n self._name = name",
"def setName(self, name):\n self.name = name",
"def setName(self, name):\n self.name = name",
"def setName(self, name):\n self.name = name",
"def setName(self, name):\n self.name = name",
"def set_name(self, name):\n # XXX: convert name to unicode, if it's a plain string?\n d = analyze_name(name, canonical=0)\n self.data.update(d)",
"def set_blockname(self, names: Iterable):\n\n if len(names) != self.n_blocks_:\n raise TypeError(f'length mismatch [self.n_blocks_: {self.n_blocks_}, names(given): {len(names)}]')\n\n self.block_names_ = names",
"def _set_names(self, names, *, level=None, validate: bool = True):\n # GH 15110\n # Don't allow a single string for names in a MultiIndex\n if names is not None and not is_list_like(names):\n raise ValueError(\"Names should be list-like for a MultiIndex\")\n names = list(names)\n\n if validate:\n if level is not None and len(names) != len(level):\n raise ValueError(\"Length of names must match length of level.\")\n if level is None and len(names) != self.nlevels:\n raise ValueError(\n \"Length of names must match number of levels in MultiIndex.\"\n )\n\n if level is None:\n level = range(self.nlevels)\n else:\n level = [self._get_level_number(lev) for lev in level]\n\n # set the name\n for lev, name in zip(level, names):\n if name is not None:\n # GH 20527\n # All items in 'names' need to be hashable:\n if not is_hashable(name):\n raise TypeError(\n f\"{type(self).__name__}.name must be a hashable type\"\n )\n self._names[lev] = name\n\n # If .levels has been accessed, the names in our cache will be stale.\n self._reset_cache()"
]
| [
"0.58341724",
"0.5792466",
"0.57665694",
"0.57385945",
"0.57164675",
"0.5659365",
"0.56351304",
"0.5599404",
"0.5597629",
"0.55682",
"0.55471784",
"0.5483656",
"0.53453225",
"0.5342401",
"0.53067094",
"0.52203083",
"0.5213452",
"0.5210753",
"0.52076817",
"0.52076817",
"0.5205807",
"0.5205177",
"0.5204841",
"0.51973915",
"0.51973915",
"0.51973915",
"0.51973915",
"0.51665086",
"0.514489",
"0.5141075"
]
| 0.74215704 | 0 |
Set the number of columns for the output ``vtkTable`` | def SetNumberOfColumns(self, ncols):
if isinstance(ncols, float):
ncols = int(ncols)
if self.__ncols != ncols:
self.__ncols = ncols
self.Modified() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def setNumColumns(self, num):\n ExportDialog.numColumns = num",
"def setNumCols(serDisplay, cols):\n cmd = array.array('B', (124,0))\n if (cols == 20):\n cmd[1] = 3\n else:\n if (cols != 16):\n print(\"WARNING: num columns of %d not valid - must be 16 or 20. Defaulting to 16\", cols)\n cmd[1] = 6 \n writeToDisplay(serDisplay, cmd.tostring())",
"def setoutputsize(self, size, column=None):\n pass",
"def getColumnCount(self) -> int:\n ...",
"def test_num_columns(self):\n pass",
"def n_cols(self):\n\n return len(self.plaincolumns)",
"def columnCount(self, index):\n return 4",
"def columnCount(self, parent): # pylint: disable=unused-argument\n return 5",
"def no_of_columns(self): \n return len(self.columns) + (1 if self.serialize else 0)",
"def columnCount(self, index=QModelIndex()):\n\t\treturn 5",
"def ncolumns(self):\n return self.__ncols",
"def columnCount(self, parent: QtModelIndex = qtc.QModelIndex()):\n return 3",
"def ncolumns(self):\n return len(self.__column_list)",
"def __set_column_width(self):\n for i in range(0, len(self.header_width)):\n self.view.setColumnWidth(i, self.header_width[i])",
"def columnCount(self, index=QModelIndex()):\n\t\treturn 2",
"def getNoOfCols(self):\n return _patchExtractor.patchExtractor_getNoOfCols(self)",
"def set_headers(self,executer, tree, cursor, table, columns_size):\n\n # Getting headers\n headers = executer.get_columns(table, cursor)\n tree[\"columns\"] = headers\n\n # Setting width to all column headers basing on columns amount.\n set_width = int(self.column_length_configurator/len(headers))\n\n\n # Setting columns width and headers\n for column in headers:\n tree.column(column, width=set_width,minwidth=self.min_width)\n tree.heading(column, text=column)",
"def __store_column_width(self):\n self.header_width = []\n for i in range(0, self.view.header().count()):\n self.header_width.append(self.view.columnWidth(i))",
"def setNumRows(serDisplay, rows):\n cmd = array.array('B', (124,0))\n if (rows == 4):\n cmd[1] = 5\n else:\n if (rows != 2):\n print(\"WARNING: num rows of %d not valid - must be 2 or 4. Defaulting to 2\", rows)\n cmd[1] = 6\n writeToDisplay(serDisplay, cmd.tostring())",
"def get_table_total_cols(table_name):\n return table_spec[table_name]['number_of_columns']",
"def columns(self):\n \n pass",
"def number_of_columns(self):\n return len(self._columns)",
"def set_size(self, new_size: int):\n self.__tab_size = new_size\n self.__check_interpreter()\n self.__vals = [0 for _ in range(self.__tab_size)]",
"def columnCount(self,\n parent=QtCore.QModelIndex()) -> int:\n return len(self.Column);",
"def SetTableColumns(this, a_cols):\n this.cols = a_cols",
"def get_num_columns(table):\n\n\treturn max((len(row) for row in table))",
"def columns(self):\n return int(ShellCommandOutput('tput cols'))",
"def start_table(self):\n self.col_widths = []\n self.result = \"\"",
"def num_cols(self):\n return len(self.column_names())",
"def columnCount(self, parent:typing.Optional[QtCore.QModelIndex]=QtCore.QModelIndex()) -> int:"
]
| [
"0.7280288",
"0.6517732",
"0.634845",
"0.62785757",
"0.6208003",
"0.61394155",
"0.61232775",
"0.61086184",
"0.6093149",
"0.60815024",
"0.6042228",
"0.59893984",
"0.597266",
"0.5961764",
"0.5887591",
"0.58124113",
"0.5785969",
"0.5768652",
"0.57523006",
"0.5723587",
"0.56808984",
"0.56654197",
"0.5664459",
"0.566378",
"0.5658305",
"0.5657645",
"0.5624688",
"0.56086504",
"0.559614",
"0.5580278"
]
| 0.6582668 | 1 |
Test case for networking_project_network_create | def test_networking_project_network_create(self):
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_03_network_create(self):\n # Validate the following\n # 1. Create a project.\n # 2. Add virtual/direct network resource to the project. User shared\n # network resource for the project\n # 3. Verify any number of Project level Virtual/Direct networks can be\n # created and used for vm deployment within the project.\n # 4. Verify shared networks (zone and domain wide) from outside the\n # project can also be used in a project.\n\n # Create project as a domain admin\n project = Project.create(\n self.apiclient,\n self.services[\"project\"],\n account=self.account.name,\n domainid=self.account.domainid\n )\n # Cleanup created project at end of test\n self.cleanup.append(project)\n self.debug(\"Created project with domain admin with ID: %s\" %\n project.id)\n\n network_offerings = list_network_offerings(\n self.apiclient,\n projectid=project.id,\n supportedServices='SourceNat',\n type='isolated',\n state='Enabled'\n )\n self.assertEqual(\n isinstance(network_offerings, list),\n True,\n \"Check for the valid network offerings\"\n )\n network_offering = network_offerings[0]\n\n self.debug(\"creating a network with network offering ID: %s\" %\n network_offering.id)\n self.services[\"network\"][\"zoneid\"] = self.zone.id\n network = Network.create(\n self.apiclient,\n self.services[\"network\"],\n networkofferingid=network_offering.id,\n projectid=project.id\n )\n self.debug(\"Created network with ID: %s\" % network.id)\n networks = Network.list(\n self.apiclient,\n projectid=project.id,\n listall=True\n )\n self.assertEqual(\n isinstance(networks, list),\n True,\n \"Check for the valid network list response\"\n )\n\n self.debug(\"Deploying VM with network: %s\" % network.id)\n\n virtual_machine = VirtualMachine.create(\n self.apiclient,\n self.services[\"server\"],\n templateid=self.template.id,\n networkids=[str(network.id)],\n serviceofferingid=self.service_offering.id,\n projectid=project.id\n )\n self.debug(\"Deployed VM with ID: %s\" % virtual_machine.id)\n # Verify VM state\n self.assertEqual(\n virtual_machine.state,\n 'Running',\n \"Check VM state is Running or not\"\n )\n\n network_offerings = list_network_offerings(\n self.apiclient,\n state='Enabled',\n guestiptype='Shared',\n name='DefaultSharedNetworkOffering',\n displaytext='Offering for Shared networks'\n )\n self.assertEqual(\n isinstance(network_offerings, list),\n True,\n \"Check for the valid network offerings\"\n )\n network_offering = network_offerings[0]\n\n self.debug(\"creating a shared network in domain: %s\" %\n self.domain.id)\n\n # Getting physical network and free vlan in it\n physical_network, vlan = get_free_vlan(self.apiclient, self.zone.id)\n\n self.services[\"domain_network\"][\"vlan\"] = vlan\n self.services[\"domain_network\"][\"physicalnetworkid\"] = physical_network.id\n\n # Generating random subnet number for shared network creation\n shared_network_subnet_number = random.randrange(1,254)\n\n self.services[\"domain_network\"][\"gateway\"] = \"172.16.\"+str(shared_network_subnet_number)+\".1\"\n self.services[\"domain_network\"][\"startip\"] = \"172.16.\"+str(shared_network_subnet_number)+\".2\"\n self.services[\"domain_network\"][\"endip\"] = \"172.16.\"+str(shared_network_subnet_number)+\".20\"\n\n domain_network = Network.create(\n self.apiclient,\n self.services[\"domain_network\"],\n domainid=self.domain.id,\n networkofferingid=network_offering.id,\n zoneid=self.zone.id\n )\n self.cleanup.append(domain_network)\n self.debug(\"Created network with ID: %s\" % domain_network.id)\n\n virtual_machine = VirtualMachine.create(\n self.apiclient,\n self.services[\"server\"],\n templateid=self.template.id,\n networkids=[str(domain_network.id)],\n serviceofferingid=self.service_offering.id,\n projectid=project.id\n )\n self.debug(\"Deployed VM with ID: %s\" % virtual_machine.id)\n # Verify VM state\n self.assertEqual(\n virtual_machine.state,\n 'Running',\n \"Check VM state is Running or not\"\n )\n\n # Delete VM before network gets deleted in cleanup\n virtual_machine.delete(self.apiclient, expunge=True)\n return",
"def test_networking_project_network_tag_create(self):\n pass",
"def test_create_network():\n _network = Network()",
"def test_networking_project_network_get(self):\n pass",
"def test_networking_project_network_list(self):\n pass",
"def test_add_network(self):\n pass",
"def test_create_network_and_subnet(self):\n suffix = datetime.utcnow().strftime('%Y%m%d%H%M%S')\n network_name = TEST_NETWORK_PREFIX + \"_\" + suffix\n network_cidr = TEST_CIDR_PATTERN % 254\n self.__create_network_and_subnet_test_helper__(network_name, network_cidr)",
"def test_get_network(self):\n pass",
"def test_networking_project_network_delete(self):\n pass",
"def test_networking_project_network_update(self):\n pass",
"def test_networking_project_network_service_get(self):\n pass",
"def test_create_cluster_network(self):\n pass",
"def test_register_network(self):\n pass",
"def network_create(request, **kwargs):\n LOG.debug(\"network_create(): kwargs = %s\", kwargs)\n if 'tenant_id' not in kwargs:\n kwargs['tenant_id'] = request.user.project_id\n body = {'network': kwargs}\n network = neutronclient(request).create_network(body=body).get('network')\n return Network(network)",
"def test_networking_project_network_service_list(self):\n pass",
"def create(self):\n logging.debug(\"%s create called\" % self)\n # networks = self.infra.get(\"networks\")\n notify(\"Creating network %s\" % self.name)\n self.cloudnet = cn.create(self.name, cidr=self.cidr)\n return True",
"def test_get_networks(self):\n pass",
"def test_create_network(self):\n network = vertigo.create_network(\"test\")\n self.assert_equals(\"test\", network.address)\n network.address = \"foo\"\n self.assert_equals(\"foo\", network.address)\n network.enable_acking()\n self.assert_true(network.acking_enabled())\n network.disable_acking()\n self.assert_false(network.acking_enabled())\n network.num_ackers = 10\n self.assert_equals(10, network.num_ackers)\n network.ack_expire = 50000\n self.assert_equals(50000, network.ack_expire)\n component = network.from_verticle('test_feeder_verticle', main='test_feeder_verticle.py')\n self.assert_equals('test_feeder_verticle', component.name)\n self.assert_equals('test_feeder_verticle.py', component.main)\n component.workers = 4\n self.assert_equals(4, component.workers)\n component2 = component.to_verticle('test_worker_verticle')\n component2.main = 'test_worker_verticle.py'\n self.assert_equals('test_worker_verticle.py', component2.main)\n self.complete()",
"def test_networking_project_network_tag_list(self):\n pass",
"def test_networking_project_network_event_list(self):\n pass",
"def network_create(auth=None, **kwargs):\n cloud = get_operator_cloud(auth)\n kwargs = _clean_kwargs(keep_name=True, **kwargs)\n return cloud.create_network(**kwargs)",
"def test_networking_project_network_tag_put(self):\n pass",
"def test_api_use_web_network_post(self):\n body = Network()\n response = self.client.open(\n '/api/use/web-network/',\n method='POST',\n data=json.dumps(body),\n content_type='application/json')\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))",
"def create_network(options, vsm_obj):\n edge_id = get_edge(vsm_obj)\n if not edge_id:\n if not add_edge(options):\n print(\"Failed to create edge\")\n return False\n edge_id = get_edge(vsm_obj)\n\n vdn_scope = get_transport_zone(options)\n virtual_wire = VirtualWire(vdn_scope)\n name = get_network_name(options)\n response = virtual_wire.read_by_name(name)\n if response != \"FAILURE\":\n print(\"Found network %s already exists\" % options.name)\n return True\n\n virtual_wire_create = VirtualWireCreateSpecSchema()\n virtual_wire_create.name = name\n virtual_wire_create.tenantId = name\n virtual_wire_create.description = 'NSX network %s' % name\n\n # check if user needs to enable guest vlan tagging,\n # this is require if one needs to run vlan tests in nested\n # environment.\n if hasattr(options, 'guest_vlan'):\n if options.guest_vlan is True:\n print(\"network %s has guest vlan tagging enabled\"\\\n % options.name)\n virtual_wire_create.guestVlanAllowed = True\n\n print(\"Creating network %s\" % options.name)\n result = virtual_wire.create(virtual_wire_create)\n if (result[0].response.status != 201):\n print \"response: %s\" % result[0].response.status\n print \"response: %s\" % result[0].response.reason\n return False\n print(\"Changing security settings on the network\")\n set_network_security_policy(options)\n return add_edge_interface(options, edge_id)",
"def test_api_use_royal_network_post(self):\n body = Network()\n response = self.client.open(\n '/api/use/royal-network/',\n method='POST',\n data=json.dumps(body),\n content_type='application/json')\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))",
"def test_create_net_namespace(self):\n pass",
"def test_networking_project_network_event_get(self):\n pass",
"def _build_network(self):\n pass",
"def run(self, network_create_args=None):\n network = self.neutron.create_network(**(network_create_args or {}))\n self.neutron.get_network(network[\"id\"])",
"def test_api_use_virtual_network_post(self):\n body = Network()\n response = self.client.open(\n '/api/use/virtual-network/',\n method='POST',\n data=json.dumps(body),\n content_type='application/json')\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))"
]
| [
"0.88057435",
"0.83907145",
"0.8311333",
"0.82338715",
"0.8144531",
"0.8073008",
"0.766816",
"0.7591707",
"0.75781333",
"0.75702184",
"0.7456846",
"0.7370858",
"0.7303496",
"0.72163486",
"0.71597195",
"0.7158633",
"0.71257806",
"0.7089581",
"0.69936407",
"0.69620895",
"0.6961304",
"0.6935521",
"0.69048536",
"0.686473",
"0.68621343",
"0.68589395",
"0.68548095",
"0.6853115",
"0.68322134",
"0.6826048"
]
| 0.95674586 | 0 |
Test case for networking_project_network_delete | def test_networking_project_network_delete(self):
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_delete_network(self):\n pass",
"def test_networking_project_network_tag_delete(self):\n pass",
"def test_delete__network(self):\n arglist = [\n '--network',\n self.projects[0].id,\n ]\n verifylist = [\n ('service', 'network'),\n ('project', self.projects[0].id),\n ]\n\n parsed_args = self.check_parser(self.cmd, arglist, verifylist)\n\n result = self.cmd.take_action(parsed_args)\n\n self.assertIsNone(result)\n self.projects_mock.get.assert_called_once_with(self.projects[0].id)\n self.compute_quotas_mock.delete.assert_not_called()\n self.volume_quotas_mock.delete.assert_not_called()\n self.network_mock.delete_quota.assert_called_once_with(\n self.projects[0].id,\n )",
"def test_delete_cluster_network(self):\n pass",
"def delete_network(self, network):\r\n return self.delete(self.network_path % (network))",
"def test_delete_collection_cluster_network(self):\n pass",
"def test_networking_project_network_create(self):\n pass",
"def testDeleteNetworkAuth(self):\n response = self._delete('inventory/networks/1/')\n self.assertEquals(response.status_code, 401)\n\n response = self._delete('inventory/networks/1/',\n username=\"testuser\", password=\"password\")\n self.assertEquals(response.status_code, 403)",
"def network_delete(auth=None, **kwargs):\n cloud = get_operator_cloud(auth)\n kwargs = _clean_kwargs(**kwargs)\n return cloud.delete_network(**kwargs)",
"def test_delete_project(self):\n pass",
"def test_delete_project(self):\n pass",
"def test_delete_net_namespace(self):\n pass",
"def test_networking_project_network_list(self):\n pass",
"def delete_network(name, host, network_type):\n logging.info(\"Deleting %s '%s' from host '%s'\", network_type, name, host.name)\n\n try:\n if network_type.lower() == \"vswitch\":\n host.configManager.networkSystem.RemoveVirtualSwitch(name)\n elif network_type.lower() == \"portgroup\":\n host.configManager.networkSystem.RemovePortGroup(name)\n except vim.fault.NotFound:\n logging.error(\"Tried to remove %s '%s' that does not exist from host '%s'\",\n network_type, name, host.name)\n except vim.fault.ResourceInUse:\n logging.error(\"%s '%s' can't be removed because there are vNICs associated with it\",\n network_type, name)",
"def test_networking_project_network_get(self):\n pass",
"def delete(self):\n \n logging.info(\"Deleting network %s\" % self.cloudnet)\n # res = cn.delete(self.cloudnet)\n res = self.cloudnet.delete()\n return res",
"def test_delete_host_subnet(self):\n pass",
"def network_delete_event(self, network_info):\n\n net_id = network_info['network_id']\n if net_id not in self.network:\n LOG.error(_LE('network_delete_event: net_id %s does not exist.'),\n net_id)\n return\n\n segid = self.network[net_id].get('segmentation_id')\n tenant_id = self.network[net_id].get('tenant_id')\n tenant_name = self.get_project_name(tenant_id)\n net = utils.Dict2Obj(self.network[net_id])\n if not tenant_name:\n LOG.error(_LE('Project %(tenant_id)s does not exist.'),\n {'tenant_id': tenant_id})\n self.update_network_db(net.id, constants.DELETE_FAIL)\n return\n\n try:\n self.dcnm_client.delete_network(tenant_name, net)\n # Put back the segmentation id into the pool.\n self.seg_drvr.release_segmentation_id(segid)\n\n # Remove entry from database and cache.\n self.delete_network_db(net_id)\n del self.network[net_id]\n snets = [k for k in self.subnet if (\n self.subnet[k].get('network_id') == net_id)]\n [self.subnet.pop(s) for s in snets]\n except dexc.DfaClientRequestFailed:\n LOG.error(_LE('Failed to create network %(net)s.'),\n {'net': net.name})\n self.update_network_db(net_id, constants.DELETE_FAIL)\n # deleting all related VMs\n instances = self.get_vms()\n instances_related = [k for k in instances if k.network_id == net_id]\n for vm in instances_related:\n LOG.debug(\"deleting vm %s because network is deleted\", vm.name)\n self.delete_vm_function(vm.port_id, vm)\n self.network_del_notif(tenant_id, tenant_name, net_id)",
"def test_networking_project_network_update(self):\n pass",
"def test_remove_project(self):\n pass",
"def delete(self): \n params = {'command':'deleteNetwork',\n 'id':self.id}\n \n self.logger.debug('Remove network %s' % self.name)\n \n try:\n response = self.send_request(params)\n res = json.loads(response)\n clsk_job_id = res['deletenetworkresponse']['jobid']\n self.logger.debug('Start job over %s.%s - %s: %s' % (\n self._obj_type, self.name, \n 'deleteNetwork', res))\n return clsk_job_id\n except KeyError as ex :\n self.logger.error('Error parsing json data: %s' % ex)\n raise ClskError('Error parsing json data: %s' % ex)\n except ApiError as ex:\n self.logger.error(ex)\n raise ClskError(ex)",
"def test_delete_hyperflex_cluster_network_policy(self):\n pass",
"def test_03_network_create(self):\n # Validate the following\n # 1. Create a project.\n # 2. Add virtual/direct network resource to the project. User shared\n # network resource for the project\n # 3. Verify any number of Project level Virtual/Direct networks can be\n # created and used for vm deployment within the project.\n # 4. Verify shared networks (zone and domain wide) from outside the\n # project can also be used in a project.\n\n # Create project as a domain admin\n project = Project.create(\n self.apiclient,\n self.services[\"project\"],\n account=self.account.name,\n domainid=self.account.domainid\n )\n # Cleanup created project at end of test\n self.cleanup.append(project)\n self.debug(\"Created project with domain admin with ID: %s\" %\n project.id)\n\n network_offerings = list_network_offerings(\n self.apiclient,\n projectid=project.id,\n supportedServices='SourceNat',\n type='isolated',\n state='Enabled'\n )\n self.assertEqual(\n isinstance(network_offerings, list),\n True,\n \"Check for the valid network offerings\"\n )\n network_offering = network_offerings[0]\n\n self.debug(\"creating a network with network offering ID: %s\" %\n network_offering.id)\n self.services[\"network\"][\"zoneid\"] = self.zone.id\n network = Network.create(\n self.apiclient,\n self.services[\"network\"],\n networkofferingid=network_offering.id,\n projectid=project.id\n )\n self.debug(\"Created network with ID: %s\" % network.id)\n networks = Network.list(\n self.apiclient,\n projectid=project.id,\n listall=True\n )\n self.assertEqual(\n isinstance(networks, list),\n True,\n \"Check for the valid network list response\"\n )\n\n self.debug(\"Deploying VM with network: %s\" % network.id)\n\n virtual_machine = VirtualMachine.create(\n self.apiclient,\n self.services[\"server\"],\n templateid=self.template.id,\n networkids=[str(network.id)],\n serviceofferingid=self.service_offering.id,\n projectid=project.id\n )\n self.debug(\"Deployed VM with ID: %s\" % virtual_machine.id)\n # Verify VM state\n self.assertEqual(\n virtual_machine.state,\n 'Running',\n \"Check VM state is Running or not\"\n )\n\n network_offerings = list_network_offerings(\n self.apiclient,\n state='Enabled',\n guestiptype='Shared',\n name='DefaultSharedNetworkOffering',\n displaytext='Offering for Shared networks'\n )\n self.assertEqual(\n isinstance(network_offerings, list),\n True,\n \"Check for the valid network offerings\"\n )\n network_offering = network_offerings[0]\n\n self.debug(\"creating a shared network in domain: %s\" %\n self.domain.id)\n\n # Getting physical network and free vlan in it\n physical_network, vlan = get_free_vlan(self.apiclient, self.zone.id)\n\n self.services[\"domain_network\"][\"vlan\"] = vlan\n self.services[\"domain_network\"][\"physicalnetworkid\"] = physical_network.id\n\n # Generating random subnet number for shared network creation\n shared_network_subnet_number = random.randrange(1,254)\n\n self.services[\"domain_network\"][\"gateway\"] = \"172.16.\"+str(shared_network_subnet_number)+\".1\"\n self.services[\"domain_network\"][\"startip\"] = \"172.16.\"+str(shared_network_subnet_number)+\".2\"\n self.services[\"domain_network\"][\"endip\"] = \"172.16.\"+str(shared_network_subnet_number)+\".20\"\n\n domain_network = Network.create(\n self.apiclient,\n self.services[\"domain_network\"],\n domainid=self.domain.id,\n networkofferingid=network_offering.id,\n zoneid=self.zone.id\n )\n self.cleanup.append(domain_network)\n self.debug(\"Created network with ID: %s\" % domain_network.id)\n\n virtual_machine = VirtualMachine.create(\n self.apiclient,\n self.services[\"server\"],\n templateid=self.template.id,\n networkids=[str(domain_network.id)],\n serviceofferingid=self.service_offering.id,\n projectid=project.id\n )\n self.debug(\"Deployed VM with ID: %s\" % virtual_machine.id)\n # Verify VM state\n self.assertEqual(\n virtual_machine.state,\n 'Running',\n \"Check VM state is Running or not\"\n )\n\n # Delete VM before network gets deleted in cleanup\n virtual_machine.delete(self.apiclient, expunge=True)\n return",
"def test_delete_collection_host_subnet(self):\n pass",
"def _delete_network_vm(args):\n libvirtConn = libvirt.openReadOnly(None)\n if libvirtConn is None:\n print('Cannot contact hypervisor', file=sys.stderr)\n return 1\n net = None\n try:\n net = libvirtConn.networkLookupByName(args.network_name)\n except libvirt.libvirtError:\n print('Cannot find network named [%s]' % args.network_name, file=sys.stderr)\n return 1\n print('Network found:\\n')\n print(xml.dom.minidom.parseString(net.XMLDesc()).toprettyxml(indent=\" \", newl=''))\n print('')\n\n if not args.yes:\n if not input('Really destroy this network ?').strip().lower() in ('y', 'yes'):\n return 1\n return oci_utils.kvm.virt.delete_virtual_network(network_name=args.network_name)",
"def delete_net(self, net_id):\n LOG_OBJ.debug(\"Deleting network %s\" % net_id)\n _url = \"http://\" + self.host_ip + \":9696/v2.0/networks/\" + \\\n net_id + \".json\"\n _headers = {'Content-type': 'application/json',\n 'x-auth-token': self.project_info[\"token_project\"]}\n _body = None\n\n response = self.request(\"DELETE\", _url, _headers, _body)\n if response is None:\n LOG_OBJ.error(\"No response from Server while deleting net:%s\" %\n net_id)\n return response\n\n if response.status not in [200, 201, 202, 203, 204]:\n LOG_OBJ.error(\"Deletion of Network Failed with status %s \" %\n response.status)\n return response.status\n\n LOG_OBJ.info(\"Deleted the network : %s \" % net_id)\n return True",
"def delete_network_postcommit(self, context):\n if self.rpc_handler is None:\n return\n network = self._get_network_info(context._network)\n for _, _network in network.items():\n network_type = _network.get('network_type', '')\n if network_type not in CentecConstant.SUPPORTED_NETWORK_TYPES and len(CentecConstant.SUPPORTED_NETWORK_TYPES) > 0:\n return\n if network is not None:\n try:\n self.rpc_handler.delete_network(network)\n except:\n pass",
"def delete_network(options, vsm_obj):\n print(\"Disconnecting edge interface attached to this network\")\n edge_id = get_edge(vsm_obj)\n edge = Edge(vsm_obj, '4.0')\n edge.id = edge_id\n vnics = Vnics(edge)\n vnics_schema = vnics.query()\n network = get_network_id(options, get_network_name_on_vc(options))\n for vnic in vnics_schema.vnics:\n if network and vnic.portgroupId == network:\n print(\"Found a matching vnic %s %s\" % (options.name, vnic.index))\n vnic.isConnected = \"False\"\n vnic.portgroupId = None\n vnic.name = \"vnic%s\" % vnic.index\n vnics_schema = VnicsSchema()\n vnics_schema.vnics = [vnic]\n result = vnics.create(vnics_schema)\n if (result[0].response.status != 204):\n print \"update vnic error: %s %s\" \\\n % (result[0].response.status, result[0].response.reason)\n return False\n else:\n break\n else:\n print (\"No matching vnic found\")\n\n vdn_scope = get_transport_zone(options)\n virtual_wire = VirtualWire(vdn_scope)\n vwire = virtual_wire.read_by_name(get_network_name(options))\n name = get_network_name(options)\n if vwire != \"FAILURE\":\n print(\"Found a matching network %s\" % (options.name))\n virtual_wire.id = vwire.objectId\n result = virtual_wire.delete()\n if (result.response.status != 200):\n print (\"Delete vwire error: %s\" % result.response.reason)\n return False\n else:\n print (\"No matching network found\")\n print(\"Network %s deleted\" % (options.name))\n\n return True",
"def delete(self, oid):\n path = '%s/networks/%s' % (self.ver, oid)\n res = self.client.call(path, 'DELETE', data='', \n token=self.manager.identity.token)\n self.logger.debug('Delete openstack network: %s' % truncate(res))\n return res[0]",
"def delete_network(session, name):\n # type: (Session, Text) -> None\n url_tail = f\"/{CoordConstsV2.RSC_NETWORKS}/{name}\"\n return _delete(session, url_tail)"
]
| [
"0.8734149",
"0.84788615",
"0.8282214",
"0.7911755",
"0.74620235",
"0.74408305",
"0.7362307",
"0.7338039",
"0.72818667",
"0.7161988",
"0.7161988",
"0.71514744",
"0.7089159",
"0.7023079",
"0.70029515",
"0.6991699",
"0.6976583",
"0.68999213",
"0.687359",
"0.6855459",
"0.68387324",
"0.67980844",
"0.67877823",
"0.6777019",
"0.67699194",
"0.6762373",
"0.66326165",
"0.6622852",
"0.657865",
"0.65704"
]
| 0.9514106 | 0 |
Test case for networking_project_network_event_get | def test_networking_project_network_event_get(self):
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_networking_project_network_event_list(self):\n pass",
"def test_networking_project_network_get(self):\n pass",
"def test_networking_project_network_service_get(self):\n pass",
"def test_get_network(self):\n pass",
"def test_networking_project_network_tag_get(self):\n pass",
"def test_networking_project_network_list(self):\n pass",
"def test_networking_project_network_update(self):\n pass",
"def test_networking_project_network_service_list(self):\n pass",
"def test_networking_project_network_create(self):\n pass",
"def test_aws_service_api_networks_get(self):\n pass",
"def test_get_networks(self):\n pass",
"def test_networking_project_network_tag_list(self):\n pass",
"def test_networking_project_network_tag_create(self):\n pass",
"def test_api_predictor_events_get(self):\n pass",
"def get_network_events(self,\r\n options=dict()):\r\n\r\n # Validate required parameters\r\n self.validate_parameters(network_id=options.get(\"network_id\"))\r\n\r\n # Prepare query URL\r\n _url_path = '/networks/{networkId}/events'\r\n _url_path = APIHelper.append_url_with_template_parameters(_url_path, { \r\n 'networkId': options.get('network_id', None)\r\n })\r\n _query_builder = Configuration.base_uri\r\n _query_builder += _url_path\r\n _query_parameters = {\r\n 'productType': options.get('product_type', None),\r\n 'includedEventTypes': options.get('included_event_types', None),\r\n 'excludedEventTypes': options.get('excluded_event_types', None),\r\n 'deviceMac': options.get('device_mac', None),\r\n 'deviceSerial': options.get('device_serial', None),\r\n 'deviceName': options.get('device_name', None),\r\n 'clientIp': options.get('client_ip', None),\r\n 'clientMac': options.get('client_mac', None),\r\n 'clientName': options.get('client_name', None),\r\n 'smDeviceMac': options.get('sm_device_mac', None),\r\n 'smDeviceName': options.get('sm_device_name', None),\r\n 'perPage': options.get('per_page', None),\r\n 'startingAfter': options.get('starting_after', None),\r\n 'endingBefore': options.get('ending_before', None)\r\n }\r\n _query_builder = APIHelper.append_url_with_query_parameters(_query_builder,\r\n _query_parameters, Configuration.array_serialization)\r\n _query_url = APIHelper.clean_url(_query_builder)\r\n\r\n # Prepare headers\r\n _headers = {\r\n 'accept': 'application/json'\r\n }\r\n\r\n # Prepare and execute request\r\n _request = self.http_client.get(_query_url, headers=_headers)\r\n CustomHeaderAuth.apply(_request)\r\n _context = self.execute_request(_request)\r\n self.validate_response(_context)\r\n\r\n # Return appropriate type\r\n return APIHelper.json_deserialize(_context.response.raw_body)",
"def test_add_network(self):\n pass",
"def test_networking_project_network_tag_put(self):\n pass",
"def test_networking_project_network_delete(self):\n pass",
"def test_read_cluster_network(self):\n pass",
"def test_get_default_network(self):\n pass",
"def test_get_source_ip(self):\n pass",
"def test_get(self):\n\n # Grab the server's addresses...\n addrs = self.server.addresses\n\n # Make sure the public and private lists are present\n dtutil.assert_true('public' in addrs)\n dtutil.assert_true('private' in addrs)\n\n # Are IP addresses actually returned?",
"def test_create_network():\n _network = Network()",
"def test_03_network_create(self):\n # Validate the following\n # 1. Create a project.\n # 2. Add virtual/direct network resource to the project. User shared\n # network resource for the project\n # 3. Verify any number of Project level Virtual/Direct networks can be\n # created and used for vm deployment within the project.\n # 4. Verify shared networks (zone and domain wide) from outside the\n # project can also be used in a project.\n\n # Create project as a domain admin\n project = Project.create(\n self.apiclient,\n self.services[\"project\"],\n account=self.account.name,\n domainid=self.account.domainid\n )\n # Cleanup created project at end of test\n self.cleanup.append(project)\n self.debug(\"Created project with domain admin with ID: %s\" %\n project.id)\n\n network_offerings = list_network_offerings(\n self.apiclient,\n projectid=project.id,\n supportedServices='SourceNat',\n type='isolated',\n state='Enabled'\n )\n self.assertEqual(\n isinstance(network_offerings, list),\n True,\n \"Check for the valid network offerings\"\n )\n network_offering = network_offerings[0]\n\n self.debug(\"creating a network with network offering ID: %s\" %\n network_offering.id)\n self.services[\"network\"][\"zoneid\"] = self.zone.id\n network = Network.create(\n self.apiclient,\n self.services[\"network\"],\n networkofferingid=network_offering.id,\n projectid=project.id\n )\n self.debug(\"Created network with ID: %s\" % network.id)\n networks = Network.list(\n self.apiclient,\n projectid=project.id,\n listall=True\n )\n self.assertEqual(\n isinstance(networks, list),\n True,\n \"Check for the valid network list response\"\n )\n\n self.debug(\"Deploying VM with network: %s\" % network.id)\n\n virtual_machine = VirtualMachine.create(\n self.apiclient,\n self.services[\"server\"],\n templateid=self.template.id,\n networkids=[str(network.id)],\n serviceofferingid=self.service_offering.id,\n projectid=project.id\n )\n self.debug(\"Deployed VM with ID: %s\" % virtual_machine.id)\n # Verify VM state\n self.assertEqual(\n virtual_machine.state,\n 'Running',\n \"Check VM state is Running or not\"\n )\n\n network_offerings = list_network_offerings(\n self.apiclient,\n state='Enabled',\n guestiptype='Shared',\n name='DefaultSharedNetworkOffering',\n displaytext='Offering for Shared networks'\n )\n self.assertEqual(\n isinstance(network_offerings, list),\n True,\n \"Check for the valid network offerings\"\n )\n network_offering = network_offerings[0]\n\n self.debug(\"creating a shared network in domain: %s\" %\n self.domain.id)\n\n # Getting physical network and free vlan in it\n physical_network, vlan = get_free_vlan(self.apiclient, self.zone.id)\n\n self.services[\"domain_network\"][\"vlan\"] = vlan\n self.services[\"domain_network\"][\"physicalnetworkid\"] = physical_network.id\n\n # Generating random subnet number for shared network creation\n shared_network_subnet_number = random.randrange(1,254)\n\n self.services[\"domain_network\"][\"gateway\"] = \"172.16.\"+str(shared_network_subnet_number)+\".1\"\n self.services[\"domain_network\"][\"startip\"] = \"172.16.\"+str(shared_network_subnet_number)+\".2\"\n self.services[\"domain_network\"][\"endip\"] = \"172.16.\"+str(shared_network_subnet_number)+\".20\"\n\n domain_network = Network.create(\n self.apiclient,\n self.services[\"domain_network\"],\n domainid=self.domain.id,\n networkofferingid=network_offering.id,\n zoneid=self.zone.id\n )\n self.cleanup.append(domain_network)\n self.debug(\"Created network with ID: %s\" % domain_network.id)\n\n virtual_machine = VirtualMachine.create(\n self.apiclient,\n self.services[\"server\"],\n templateid=self.template.id,\n networkids=[str(domain_network.id)],\n serviceofferingid=self.service_offering.id,\n projectid=project.id\n )\n self.debug(\"Deployed VM with ID: %s\" % virtual_machine.id)\n # Verify VM state\n self.assertEqual(\n virtual_machine.state,\n 'Running',\n \"Check VM state is Running or not\"\n )\n\n # Delete VM before network gets deleted in cleanup\n virtual_machine.delete(self.apiclient, expunge=True)\n return",
"def test_get_lab_network_by_name(\n self, authenticated_client, lab_path, test_network, test_network_data\n ):\n resp = authenticated_client.api.get_lab_network(lab_path, test_network)\n assert resp[\"data\"][\"name\"] == test_network_data[\"name\"]",
"def test_read_net_namespace(self):\n pass",
"def test_get_node_internal_ip_address(self):\n pass",
"def getEvent(number):",
"def test_read_host_subnet(self):\n pass",
"def wait_for_network(container, timeout=30):\n starttime = time.time()\n while(time.time() < starttime + timeout):\n time.sleep(1)\n if 'eth0' in container.state().network:\n addresses = container.state().network['eth0']['addresses']\n if len(addresses) > 0:\n if addresses[0]['family'] == 'inet':\n return addresses[0]\n return None"
]
| [
"0.82577205",
"0.79113317",
"0.76856494",
"0.7209927",
"0.7185073",
"0.6887082",
"0.66650337",
"0.66104114",
"0.6588003",
"0.6416295",
"0.64085007",
"0.6167186",
"0.60606116",
"0.595175",
"0.59372723",
"0.58573383",
"0.5803691",
"0.57594985",
"0.5739596",
"0.56881183",
"0.5636515",
"0.55695814",
"0.55661225",
"0.55488306",
"0.5503089",
"0.54732484",
"0.54595155",
"0.5391938",
"0.5390148",
"0.53708017"
]
| 0.9450455 | 0 |
Test case for networking_project_network_event_list | def test_networking_project_network_event_list(self):
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_networking_project_network_event_get(self):\n pass",
"def test_networking_project_network_list(self):\n pass",
"def test_networking_project_network_service_list(self):\n pass",
"def test_networking_project_network_tag_list(self):\n pass",
"def test_networking_project_network_get(self):\n pass",
"def test_networking_project_network_create(self):\n pass",
"def test_networking_project_network_update(self):\n pass",
"def test_networking_project_network_service_get(self):\n pass",
"def test_get_networks(self):\n pass",
"def test_networking_project_network_tag_get(self):\n pass",
"def test_networking_project_network_tag_create(self):\n pass",
"def test_list_cluster_network(self):\n pass",
"def test_get_network(self):\n pass",
"def test_list_project_request(self):\n pass",
"def test_list_project(self):\n pass",
"def test_add_network(self):\n pass",
"def test_aws_service_api_networks_get(self):\n pass",
"def test_networking_project_network_delete(self):\n pass",
"def test_events_list(self):\n response = self.client.get(url_for(\n 'issues.eventsresourse',\n issue_number=self.TARGET_ISSUE_NUMBER))\n self.assertEqual(response.status_code, 200)\n self.assertTrue(response.json)",
"def test_list_projects(self):\n pass",
"def test_list_projects(self):\n pass",
"def test_verify_list_of_devices_in_my_network():",
"def test_03_network_create(self):\n # Validate the following\n # 1. Create a project.\n # 2. Add virtual/direct network resource to the project. User shared\n # network resource for the project\n # 3. Verify any number of Project level Virtual/Direct networks can be\n # created and used for vm deployment within the project.\n # 4. Verify shared networks (zone and domain wide) from outside the\n # project can also be used in a project.\n\n # Create project as a domain admin\n project = Project.create(\n self.apiclient,\n self.services[\"project\"],\n account=self.account.name,\n domainid=self.account.domainid\n )\n # Cleanup created project at end of test\n self.cleanup.append(project)\n self.debug(\"Created project with domain admin with ID: %s\" %\n project.id)\n\n network_offerings = list_network_offerings(\n self.apiclient,\n projectid=project.id,\n supportedServices='SourceNat',\n type='isolated',\n state='Enabled'\n )\n self.assertEqual(\n isinstance(network_offerings, list),\n True,\n \"Check for the valid network offerings\"\n )\n network_offering = network_offerings[0]\n\n self.debug(\"creating a network with network offering ID: %s\" %\n network_offering.id)\n self.services[\"network\"][\"zoneid\"] = self.zone.id\n network = Network.create(\n self.apiclient,\n self.services[\"network\"],\n networkofferingid=network_offering.id,\n projectid=project.id\n )\n self.debug(\"Created network with ID: %s\" % network.id)\n networks = Network.list(\n self.apiclient,\n projectid=project.id,\n listall=True\n )\n self.assertEqual(\n isinstance(networks, list),\n True,\n \"Check for the valid network list response\"\n )\n\n self.debug(\"Deploying VM with network: %s\" % network.id)\n\n virtual_machine = VirtualMachine.create(\n self.apiclient,\n self.services[\"server\"],\n templateid=self.template.id,\n networkids=[str(network.id)],\n serviceofferingid=self.service_offering.id,\n projectid=project.id\n )\n self.debug(\"Deployed VM with ID: %s\" % virtual_machine.id)\n # Verify VM state\n self.assertEqual(\n virtual_machine.state,\n 'Running',\n \"Check VM state is Running or not\"\n )\n\n network_offerings = list_network_offerings(\n self.apiclient,\n state='Enabled',\n guestiptype='Shared',\n name='DefaultSharedNetworkOffering',\n displaytext='Offering for Shared networks'\n )\n self.assertEqual(\n isinstance(network_offerings, list),\n True,\n \"Check for the valid network offerings\"\n )\n network_offering = network_offerings[0]\n\n self.debug(\"creating a shared network in domain: %s\" %\n self.domain.id)\n\n # Getting physical network and free vlan in it\n physical_network, vlan = get_free_vlan(self.apiclient, self.zone.id)\n\n self.services[\"domain_network\"][\"vlan\"] = vlan\n self.services[\"domain_network\"][\"physicalnetworkid\"] = physical_network.id\n\n # Generating random subnet number for shared network creation\n shared_network_subnet_number = random.randrange(1,254)\n\n self.services[\"domain_network\"][\"gateway\"] = \"172.16.\"+str(shared_network_subnet_number)+\".1\"\n self.services[\"domain_network\"][\"startip\"] = \"172.16.\"+str(shared_network_subnet_number)+\".2\"\n self.services[\"domain_network\"][\"endip\"] = \"172.16.\"+str(shared_network_subnet_number)+\".20\"\n\n domain_network = Network.create(\n self.apiclient,\n self.services[\"domain_network\"],\n domainid=self.domain.id,\n networkofferingid=network_offering.id,\n zoneid=self.zone.id\n )\n self.cleanup.append(domain_network)\n self.debug(\"Created network with ID: %s\" % domain_network.id)\n\n virtual_machine = VirtualMachine.create(\n self.apiclient,\n self.services[\"server\"],\n templateid=self.template.id,\n networkids=[str(domain_network.id)],\n serviceofferingid=self.service_offering.id,\n projectid=project.id\n )\n self.debug(\"Deployed VM with ID: %s\" % virtual_machine.id)\n # Verify VM state\n self.assertEqual(\n virtual_machine.state,\n 'Running',\n \"Check VM state is Running or not\"\n )\n\n # Delete VM before network gets deleted in cleanup\n virtual_machine.delete(self.apiclient, expunge=True)\n return",
"def testRun(self):\n stub = NetworkObjectStub()\n\n e1 = Event(5, stub, 'message')\n e2 = Event(0, stub, 'message')\n e3 = Event(7, stub, 'message')\n e4 = PacketEvent(1, 'sender2', stub, 4, 'message5')\n eventList = [e1, e2, e3, e4]\n\n eventHandler = EventHandler('network', eventList)\n eventHandler.run(0, 4)\n with self.assertRaises(Empty) as e:\n eventHandler.run(0, 1)",
"def _test_network_list_paged(\n self, filter_params, expected_networks, page_data,\n source_networks=None, **extra_kwargs):\n filter_params = filter_params or {}\n sort_dir = page_data['sort_dir']\n # invert sort_dir for calls\n sort_dir = 'asc' if sort_dir == 'desc' else 'desc'\n call_args = {'single_page': True, 'limit': 21, 'sort_key': 'id',\n 'sort_dir': sort_dir}\n\n return_values = []\n all_networks = (self.networks.list() if source_networks is None\n else source_networks)\n\n expected_calls = []\n\n params = filter_params.copy()\n params.update(call_args)\n if page_data.get('marker_id'):\n params.update({'marker': page_data.get('marker_id')})\n extra_kwargs.update({'marker': page_data.get('marker_id')})\n return_values.append(all_networks[0:21])\n expected_calls.append(\n mock.call(test.IsHttpRequest(), **params))\n\n self.mock_network_list.side_effect = return_values\n\n extra_kwargs.update(filter_params)\n ret_val, has_more_data, has_prev_data = api.neutron.network_list_paged(\n self.request, page_data, **extra_kwargs)\n self.mock_network_list.assert_has_calls(expected_calls)\n self.assertEqual(set(n.id for n in expected_networks),\n set(n.id for n in ret_val))\n self.assertNotIn(api.neutron.AUTO_ALLOCATE_ID,\n [n.id for n in ret_val])\n return ret_val, has_more_data, has_prev_data",
"def test_networking_project_network_tag_put(self):\n pass",
"def test_list_group(self):\n pass",
"def test_get_port_group_list(self):\n pass",
"def _test_network_list_for_tenant(\n self, include_external, filter_params, should_called,\n expected_networks, source_networks=None, **extra_kwargs):\n has_more_data = None\n has_prev_data = None\n marker_calls = []\n filter_params = filter_params or {}\n if 'page_data' not in extra_kwargs:\n call_args = {'single_page': False}\n else:\n sort_dir = extra_kwargs['page_data']['sort_dir']\n # invert sort_dir for calls\n sort_dir = 'asc' if sort_dir == 'desc' else 'desc'\n call_args = {'single_page': True, 'limit': 21, 'sort_key': 'id',\n 'sort_dir': sort_dir}\n marker_id = extra_kwargs['page_data'].get('marker_id')\n if extra_kwargs.get('marker_calls') is not None:\n marker_calls = extra_kwargs.pop('marker_calls')\n\n tenant_id = '1'\n return_values = []\n all_networks = (self.networks.list() if source_networks is None\n else source_networks)\n\n expected_calls = []\n call_order = ['shared', 'non_shared', 'external']\n if call_args.get('sort_dir') == 'desc':\n call_order.reverse()\n\n for call in call_order:\n if call in should_called:\n params = filter_params.copy()\n params.update(call_args)\n if call in marker_calls:\n params.update({'marker': marker_id})\n if call == 'external':\n params['router:external'] = True\n params['shared'] = False\n return_values.append(\n [n for n in all_networks\n if n['router:external'] is True and\n n['shared'] is False])\n expected_calls.append(\n mock.call(test.IsHttpRequest(), **params))\n elif call == 'shared':\n params['shared'] = True\n external = params.get('router:external')\n return_values.append(\n [n for n in all_networks\n if (n['shared'] is True and\n n['router:external'] == (\n external if external is not None\n else n['router:external']))])\n expected_calls.append(\n mock.call(test.IsHttpRequest(), **params))\n elif call == 'non_shared':\n params['shared'] = False\n external = params.get('router:external')\n return_values.append(\n [n for n in all_networks\n if (n['tenant_id'] == '1' and\n n['shared'] is False and\n n['router:external'] == (\n external if external is not None\n else n['router:external']))])\n expected_calls.append(\n mock.call(test.IsHttpRequest(),\n tenant_id=tenant_id, **params))\n self.mock_network_list.side_effect = return_values\n\n extra_kwargs.update(filter_params)\n ret_val = api.neutron.network_list_for_tenant(\n self.request, tenant_id,\n include_external=include_external,\n **extra_kwargs)\n if 'page_data' in extra_kwargs:\n has_more_data = ret_val[1]\n has_prev_data = ret_val[2]\n ret_val = ret_val[0]\n self.mock_network_list.assert_has_calls(expected_calls)\n self.assertEqual(set(n.id for n in expected_networks),\n set(n.id for n in ret_val))\n self.assertNotIn(api.neutron.AUTO_ALLOCATE_ID,\n [n.id for n in ret_val])\n return ret_val, has_more_data, has_prev_data",
"def project_list_networks(project):\n q = client.project.networks_in(project)\n sys.stdout.write(\n \"Networks allocated to %s\\t: %s\\n\" % (project, \" \".join(q))\n )"
]
| [
"0.8299006",
"0.81361306",
"0.7848326",
"0.76107645",
"0.70611805",
"0.68074656",
"0.67305243",
"0.6706901",
"0.64687085",
"0.6259645",
"0.62270015",
"0.6207792",
"0.619327",
"0.6122696",
"0.6068232",
"0.6055929",
"0.60044557",
"0.6001083",
"0.59810936",
"0.5941543",
"0.5941543",
"0.59374875",
"0.59311044",
"0.5715381",
"0.569081",
"0.56701875",
"0.5597867",
"0.5589427",
"0.55712134",
"0.55671376"
]
| 0.95086074 | 0 |
Test case for networking_project_network_get | def test_networking_project_network_get(self):
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_networking_project_network_service_get(self):\n pass",
"def test_get_network(self):\n pass",
"def test_networking_project_network_list(self):\n pass",
"def test_networking_project_network_create(self):\n pass",
"def test_networking_project_network_event_get(self):\n pass",
"def test_networking_project_network_tag_get(self):\n pass",
"def test_get_networks(self):\n pass",
"def test_networking_project_network_update(self):\n pass",
"def test_aws_service_api_networks_get(self):\n pass",
"def test_networking_project_network_service_list(self):\n pass",
"def test_get_default_network(self):\n pass",
"def test_03_network_create(self):\n # Validate the following\n # 1. Create a project.\n # 2. Add virtual/direct network resource to the project. User shared\n # network resource for the project\n # 3. Verify any number of Project level Virtual/Direct networks can be\n # created and used for vm deployment within the project.\n # 4. Verify shared networks (zone and domain wide) from outside the\n # project can also be used in a project.\n\n # Create project as a domain admin\n project = Project.create(\n self.apiclient,\n self.services[\"project\"],\n account=self.account.name,\n domainid=self.account.domainid\n )\n # Cleanup created project at end of test\n self.cleanup.append(project)\n self.debug(\"Created project with domain admin with ID: %s\" %\n project.id)\n\n network_offerings = list_network_offerings(\n self.apiclient,\n projectid=project.id,\n supportedServices='SourceNat',\n type='isolated',\n state='Enabled'\n )\n self.assertEqual(\n isinstance(network_offerings, list),\n True,\n \"Check for the valid network offerings\"\n )\n network_offering = network_offerings[0]\n\n self.debug(\"creating a network with network offering ID: %s\" %\n network_offering.id)\n self.services[\"network\"][\"zoneid\"] = self.zone.id\n network = Network.create(\n self.apiclient,\n self.services[\"network\"],\n networkofferingid=network_offering.id,\n projectid=project.id\n )\n self.debug(\"Created network with ID: %s\" % network.id)\n networks = Network.list(\n self.apiclient,\n projectid=project.id,\n listall=True\n )\n self.assertEqual(\n isinstance(networks, list),\n True,\n \"Check for the valid network list response\"\n )\n\n self.debug(\"Deploying VM with network: %s\" % network.id)\n\n virtual_machine = VirtualMachine.create(\n self.apiclient,\n self.services[\"server\"],\n templateid=self.template.id,\n networkids=[str(network.id)],\n serviceofferingid=self.service_offering.id,\n projectid=project.id\n )\n self.debug(\"Deployed VM with ID: %s\" % virtual_machine.id)\n # Verify VM state\n self.assertEqual(\n virtual_machine.state,\n 'Running',\n \"Check VM state is Running or not\"\n )\n\n network_offerings = list_network_offerings(\n self.apiclient,\n state='Enabled',\n guestiptype='Shared',\n name='DefaultSharedNetworkOffering',\n displaytext='Offering for Shared networks'\n )\n self.assertEqual(\n isinstance(network_offerings, list),\n True,\n \"Check for the valid network offerings\"\n )\n network_offering = network_offerings[0]\n\n self.debug(\"creating a shared network in domain: %s\" %\n self.domain.id)\n\n # Getting physical network and free vlan in it\n physical_network, vlan = get_free_vlan(self.apiclient, self.zone.id)\n\n self.services[\"domain_network\"][\"vlan\"] = vlan\n self.services[\"domain_network\"][\"physicalnetworkid\"] = physical_network.id\n\n # Generating random subnet number for shared network creation\n shared_network_subnet_number = random.randrange(1,254)\n\n self.services[\"domain_network\"][\"gateway\"] = \"172.16.\"+str(shared_network_subnet_number)+\".1\"\n self.services[\"domain_network\"][\"startip\"] = \"172.16.\"+str(shared_network_subnet_number)+\".2\"\n self.services[\"domain_network\"][\"endip\"] = \"172.16.\"+str(shared_network_subnet_number)+\".20\"\n\n domain_network = Network.create(\n self.apiclient,\n self.services[\"domain_network\"],\n domainid=self.domain.id,\n networkofferingid=network_offering.id,\n zoneid=self.zone.id\n )\n self.cleanup.append(domain_network)\n self.debug(\"Created network with ID: %s\" % domain_network.id)\n\n virtual_machine = VirtualMachine.create(\n self.apiclient,\n self.services[\"server\"],\n templateid=self.template.id,\n networkids=[str(domain_network.id)],\n serviceofferingid=self.service_offering.id,\n projectid=project.id\n )\n self.debug(\"Deployed VM with ID: %s\" % virtual_machine.id)\n # Verify VM state\n self.assertEqual(\n virtual_machine.state,\n 'Running',\n \"Check VM state is Running or not\"\n )\n\n # Delete VM before network gets deleted in cleanup\n virtual_machine.delete(self.apiclient, expunge=True)\n return",
"def test_networking_project_network_delete(self):\n pass",
"def test_networking_project_network_event_list(self):\n pass",
"def test_networking_project_network_tag_list(self):\n pass",
"def test_get_lab_network_by_name(\n self, authenticated_client, lab_path, test_network, test_network_data\n ):\n resp = authenticated_client.api.get_lab_network(lab_path, test_network)\n assert resp[\"data\"][\"name\"] == test_network_data[\"name\"]",
"def test_add_network(self):\n pass",
"def test_networking_project_network_tag_create(self):\n pass",
"def test_create_network():\n _network = Network()",
"def network_get(auth=None, **kwargs):\n cloud = get_operator_cloud(auth)\n kwargs = _clean_kwargs(**kwargs)\n return cloud.get_network(**kwargs)",
"def test_read_cluster_network(self):\n pass",
"def test_get_valid_networks_for_virtualization_realm(self):\n pass",
"def test_list_cluster_network(self):\n pass",
"def get_network_on_vc(options):\n datacenter = get_datacenter(options)\n networks = datacenter.network\n\n name = get_network_name(options)\n for network in networks:\n if re.search(name, network.name):\n return network",
"def test_networking_project_network_tag_put(self):\n pass",
"def test_support_NETWORK(self):\n self.assertEqual(self._parseFeature(\"NETWORK\", \"IRCNet\"), \"IRCNet\")",
"def get_network_url(project_id, network):\n assert is_valid_project_id(project_id), project_id\n assert is_valid_network(network), network\n return (\n 'https://www.googleapis.com/compute/v1/projects/%s/global/networks/%s' % (\n project_id, network))",
"def _build_network(self):\n pass",
"def test_retrieve_networks(site):\n models.Attribute.objects.create(\n site=site, resource_name='Network', name='test'\n )\n\n net_8 = models.Network.objects.create(\n site=site, cidr=u'10.0.0.0/8', attributes={'test': 'foo'}\n )\n net_24 = models.Network.objects.create(\n site=site, cidr=u'10.0.0.0/24', attributes={'test': 'bar'}\n )\n net_25 = models.Network.objects.create(\n site=site, cidr=u'10.0.0.0/25', attributes={'test': 'baz'}\n )\n ip = models.Network.objects.create(\n site=site, cidr=u'10.0.0.1/32'\n )\n\n # root=True\n assert list(site.networks.filter(parent_id=None)) == [net_8]\n\n # include_networks=True, include_ips=Fals\n assert list(site.networks.filter(is_ip=False)) == [net_8, net_24, net_25]\n\n # include_networks=False, include_ips=False\n assert list(site.networks.none()) == []\n\n # include_networks=True, include_ips=True\n assert list(site.networks.all()) == [net_8, net_24, net_25, ip]\n\n # include_networks=False, include_ips=True\n assert list(site.networks.filter(is_ip=True)) == [ip]\n\n # Filter by attribute\n assert list(site.networks.by_attribute(None, 'foo')) == []\n assert list(site.networks.by_attribute('test', 'foo')) == [net_8]\n\n # Get by address\n assert site.networks.get_by_address(u'10.0.0.0/8') == net_8\n\n #\n # .get_closest_parent()\n #\n # Closest parent for non-existent 10.0.0.128/32 network should be /24\n assert site.networks.get_closest_parent(u'10.0.0.128/32') == net_24\n\n # Closest parent for non-existent 10.0.0.2/32 network should be /25\n assert site.networks.get_closest_parent(u'10.0.0.2/32') == net_25\n\n # Matching ip with shorter prefix_length should not match\n with pytest.raises(models.Network.DoesNotExist):\n site.networks.get_closest_parent(u'10.0.0.2/32', prefix_length=27)\n\n # Non-existent closest parent should error\n with pytest.raises(models.Network.DoesNotExist):\n site.networks.get_closest_parent(u'1.0.0.2/32')\n\n # Invalid prefix_length\n with pytest.raises(exc.ValidationError):\n site.networks.get_closest_parent(u'10.0.0.2/32', prefix_length='shoe')\n\n # Invalid CIDR\n with pytest.raises(exc.ValidationError):\n site.networks.get_closest_parent(u'1')",
"def test_register_network(self):\n pass"
]
| [
"0.8632432",
"0.8508769",
"0.8449543",
"0.80295455",
"0.79398155",
"0.78251946",
"0.7792246",
"0.7602196",
"0.7560636",
"0.7488109",
"0.7394283",
"0.71187145",
"0.7039374",
"0.70385844",
"0.70047534",
"0.68530726",
"0.6823287",
"0.681199",
"0.6767339",
"0.67336947",
"0.65397274",
"0.64418703",
"0.63718665",
"0.6315302",
"0.63151515",
"0.6312304",
"0.6272332",
"0.6226264",
"0.61680126",
"0.61394304"
]
| 0.9436339 | 0 |
Test case for networking_project_network_list | def test_networking_project_network_list(self):
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_networking_project_network_service_list(self):\n pass",
"def test_networking_project_network_get(self):\n pass",
"def test_networking_project_network_event_list(self):\n pass",
"def test_networking_project_network_tag_list(self):\n pass",
"def test_networking_project_network_create(self):\n pass",
"def test_get_networks(self):\n pass",
"def test_networking_project_network_service_get(self):\n pass",
"def test_networking_project_network_update(self):\n pass",
"def test_get_network(self):\n pass",
"def test_list_cluster_network(self):\n pass",
"def test_03_network_create(self):\n # Validate the following\n # 1. Create a project.\n # 2. Add virtual/direct network resource to the project. User shared\n # network resource for the project\n # 3. Verify any number of Project level Virtual/Direct networks can be\n # created and used for vm deployment within the project.\n # 4. Verify shared networks (zone and domain wide) from outside the\n # project can also be used in a project.\n\n # Create project as a domain admin\n project = Project.create(\n self.apiclient,\n self.services[\"project\"],\n account=self.account.name,\n domainid=self.account.domainid\n )\n # Cleanup created project at end of test\n self.cleanup.append(project)\n self.debug(\"Created project with domain admin with ID: %s\" %\n project.id)\n\n network_offerings = list_network_offerings(\n self.apiclient,\n projectid=project.id,\n supportedServices='SourceNat',\n type='isolated',\n state='Enabled'\n )\n self.assertEqual(\n isinstance(network_offerings, list),\n True,\n \"Check for the valid network offerings\"\n )\n network_offering = network_offerings[0]\n\n self.debug(\"creating a network with network offering ID: %s\" %\n network_offering.id)\n self.services[\"network\"][\"zoneid\"] = self.zone.id\n network = Network.create(\n self.apiclient,\n self.services[\"network\"],\n networkofferingid=network_offering.id,\n projectid=project.id\n )\n self.debug(\"Created network with ID: %s\" % network.id)\n networks = Network.list(\n self.apiclient,\n projectid=project.id,\n listall=True\n )\n self.assertEqual(\n isinstance(networks, list),\n True,\n \"Check for the valid network list response\"\n )\n\n self.debug(\"Deploying VM with network: %s\" % network.id)\n\n virtual_machine = VirtualMachine.create(\n self.apiclient,\n self.services[\"server\"],\n templateid=self.template.id,\n networkids=[str(network.id)],\n serviceofferingid=self.service_offering.id,\n projectid=project.id\n )\n self.debug(\"Deployed VM with ID: %s\" % virtual_machine.id)\n # Verify VM state\n self.assertEqual(\n virtual_machine.state,\n 'Running',\n \"Check VM state is Running or not\"\n )\n\n network_offerings = list_network_offerings(\n self.apiclient,\n state='Enabled',\n guestiptype='Shared',\n name='DefaultSharedNetworkOffering',\n displaytext='Offering for Shared networks'\n )\n self.assertEqual(\n isinstance(network_offerings, list),\n True,\n \"Check for the valid network offerings\"\n )\n network_offering = network_offerings[0]\n\n self.debug(\"creating a shared network in domain: %s\" %\n self.domain.id)\n\n # Getting physical network and free vlan in it\n physical_network, vlan = get_free_vlan(self.apiclient, self.zone.id)\n\n self.services[\"domain_network\"][\"vlan\"] = vlan\n self.services[\"domain_network\"][\"physicalnetworkid\"] = physical_network.id\n\n # Generating random subnet number for shared network creation\n shared_network_subnet_number = random.randrange(1,254)\n\n self.services[\"domain_network\"][\"gateway\"] = \"172.16.\"+str(shared_network_subnet_number)+\".1\"\n self.services[\"domain_network\"][\"startip\"] = \"172.16.\"+str(shared_network_subnet_number)+\".2\"\n self.services[\"domain_network\"][\"endip\"] = \"172.16.\"+str(shared_network_subnet_number)+\".20\"\n\n domain_network = Network.create(\n self.apiclient,\n self.services[\"domain_network\"],\n domainid=self.domain.id,\n networkofferingid=network_offering.id,\n zoneid=self.zone.id\n )\n self.cleanup.append(domain_network)\n self.debug(\"Created network with ID: %s\" % domain_network.id)\n\n virtual_machine = VirtualMachine.create(\n self.apiclient,\n self.services[\"server\"],\n templateid=self.template.id,\n networkids=[str(domain_network.id)],\n serviceofferingid=self.service_offering.id,\n projectid=project.id\n )\n self.debug(\"Deployed VM with ID: %s\" % virtual_machine.id)\n # Verify VM state\n self.assertEqual(\n virtual_machine.state,\n 'Running',\n \"Check VM state is Running or not\"\n )\n\n # Delete VM before network gets deleted in cleanup\n virtual_machine.delete(self.apiclient, expunge=True)\n return",
"def project_list_networks(project):\n q = client.project.networks_in(project)\n sys.stdout.write(\n \"Networks allocated to %s\\t: %s\\n\" % (project, \" \".join(q))\n )",
"def test_list_lab_networks(self, authenticated_client, lab_path):\n r = authenticated_client.api.list_lab_networks(lab_path)\n assert r[\"data\"] is not None",
"def test_networking_project_network_delete(self):\n pass",
"def test_networking_project_network_event_get(self):\n pass",
"def test_aws_service_api_networks_get(self):\n pass",
"def test_networking_project_network_tag_get(self):\n pass",
"def test_list_project(self):\n pass",
"def test_networking_project_network_tag_create(self):\n pass",
"def test_get_valid_networks_for_virtualization_realm(self):\n pass",
"def test_list_project_request(self):\n pass",
"def test_add_network(self):\n pass",
"def test_list_projects(self):\n pass",
"def test_list_projects(self):\n pass",
"def test_list_net_namespace(self):\n pass",
"def network_list(self, kwargs=None):\n try:\n scode, networks = Rest.get('Network')\n except docker.errors.APIError as e:\n Console.error(e.explanation)\n return\n\n if len(networks) == 0:\n Console.info(\"No network exist\")\n return\n\n n = 1\n e = {}\n data = []\n for network in networks:\n d = {}\n d['Ip'] = network['Ip']\n d['Id'] = network['Id']\n d['Name'] = network['Name']\n d['Containers'] = network['Containers']\n e[n] = d\n n = n + 1\n Console.ok(str(Printer.dict_table(e, order=['Ip', 'Id', 'Name', 'Containers'])))",
"def list_net(self):\n _url = \"http://\" + self.host_ip + \":9696/v2.0/networks\"\n _headers = {'Content-type': 'application/json',\n 'x-auth-token': self.project_info[\"token_project\"]}\n _body = None\n\n response = self.request(\"GET\", _url, _headers, _body)\n if response is None:\n LOG_OBJ.error(\"No response from Server while listing the networks\")\n return response\n if response.status not in [200, 201, 202, 203, 204]:\n LOG_OBJ.error(\"Get network list Failed with status %s \" %\n response.status)\n return response.status\n output = json.loads(response.data)\n\n LOG_OBJ.info(\"Network List : %s \" % output)\n return output['networks']",
"def test_create_network():\n _network = Network()",
"def test_get_default_network(self):\n pass",
"def _test_network_list_paged(\n self, filter_params, expected_networks, page_data,\n source_networks=None, **extra_kwargs):\n filter_params = filter_params or {}\n sort_dir = page_data['sort_dir']\n # invert sort_dir for calls\n sort_dir = 'asc' if sort_dir == 'desc' else 'desc'\n call_args = {'single_page': True, 'limit': 21, 'sort_key': 'id',\n 'sort_dir': sort_dir}\n\n return_values = []\n all_networks = (self.networks.list() if source_networks is None\n else source_networks)\n\n expected_calls = []\n\n params = filter_params.copy()\n params.update(call_args)\n if page_data.get('marker_id'):\n params.update({'marker': page_data.get('marker_id')})\n extra_kwargs.update({'marker': page_data.get('marker_id')})\n return_values.append(all_networks[0:21])\n expected_calls.append(\n mock.call(test.IsHttpRequest(), **params))\n\n self.mock_network_list.side_effect = return_values\n\n extra_kwargs.update(filter_params)\n ret_val, has_more_data, has_prev_data = api.neutron.network_list_paged(\n self.request, page_data, **extra_kwargs)\n self.mock_network_list.assert_has_calls(expected_calls)\n self.assertEqual(set(n.id for n in expected_networks),\n set(n.id for n in ret_val))\n self.assertNotIn(api.neutron.AUTO_ALLOCATE_ID,\n [n.id for n in ret_val])\n return ret_val, has_more_data, has_prev_data"
]
| [
"0.8585402",
"0.8372369",
"0.82831913",
"0.8275119",
"0.7899982",
"0.76960224",
"0.755038",
"0.74486506",
"0.7311336",
"0.7203733",
"0.7135034",
"0.708045",
"0.70350575",
"0.70243955",
"0.6986017",
"0.69722104",
"0.6854558",
"0.6749359",
"0.67465365",
"0.6731198",
"0.67179805",
"0.6714301",
"0.67025405",
"0.67025405",
"0.63936096",
"0.6380907",
"0.6331122",
"0.6324433",
"0.6323356",
"0.63167214"
]
| 0.94664603 | 0 |
Test case for networking_project_network_service_get | def test_networking_project_network_service_get(self):
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_networking_project_network_get(self):\n pass",
"def test_networking_project_network_service_list(self):\n pass",
"def test_networking_project_network_event_get(self):\n pass",
"def test_get_network(self):\n pass",
"def test_networking_project_network_list(self):\n pass",
"def test_networking_project_network_tag_get(self):\n pass",
"def test_aws_service_api_networks_get(self):\n pass",
"def test_networking_project_network_create(self):\n pass",
"def test_get_networks(self):\n pass",
"def test_networking_project_network_update(self):\n pass",
"def test_networking_project_network_event_list(self):\n pass",
"def test_03_network_create(self):\n # Validate the following\n # 1. Create a project.\n # 2. Add virtual/direct network resource to the project. User shared\n # network resource for the project\n # 3. Verify any number of Project level Virtual/Direct networks can be\n # created and used for vm deployment within the project.\n # 4. Verify shared networks (zone and domain wide) from outside the\n # project can also be used in a project.\n\n # Create project as a domain admin\n project = Project.create(\n self.apiclient,\n self.services[\"project\"],\n account=self.account.name,\n domainid=self.account.domainid\n )\n # Cleanup created project at end of test\n self.cleanup.append(project)\n self.debug(\"Created project with domain admin with ID: %s\" %\n project.id)\n\n network_offerings = list_network_offerings(\n self.apiclient,\n projectid=project.id,\n supportedServices='SourceNat',\n type='isolated',\n state='Enabled'\n )\n self.assertEqual(\n isinstance(network_offerings, list),\n True,\n \"Check for the valid network offerings\"\n )\n network_offering = network_offerings[0]\n\n self.debug(\"creating a network with network offering ID: %s\" %\n network_offering.id)\n self.services[\"network\"][\"zoneid\"] = self.zone.id\n network = Network.create(\n self.apiclient,\n self.services[\"network\"],\n networkofferingid=network_offering.id,\n projectid=project.id\n )\n self.debug(\"Created network with ID: %s\" % network.id)\n networks = Network.list(\n self.apiclient,\n projectid=project.id,\n listall=True\n )\n self.assertEqual(\n isinstance(networks, list),\n True,\n \"Check for the valid network list response\"\n )\n\n self.debug(\"Deploying VM with network: %s\" % network.id)\n\n virtual_machine = VirtualMachine.create(\n self.apiclient,\n self.services[\"server\"],\n templateid=self.template.id,\n networkids=[str(network.id)],\n serviceofferingid=self.service_offering.id,\n projectid=project.id\n )\n self.debug(\"Deployed VM with ID: %s\" % virtual_machine.id)\n # Verify VM state\n self.assertEqual(\n virtual_machine.state,\n 'Running',\n \"Check VM state is Running or not\"\n )\n\n network_offerings = list_network_offerings(\n self.apiclient,\n state='Enabled',\n guestiptype='Shared',\n name='DefaultSharedNetworkOffering',\n displaytext='Offering for Shared networks'\n )\n self.assertEqual(\n isinstance(network_offerings, list),\n True,\n \"Check for the valid network offerings\"\n )\n network_offering = network_offerings[0]\n\n self.debug(\"creating a shared network in domain: %s\" %\n self.domain.id)\n\n # Getting physical network and free vlan in it\n physical_network, vlan = get_free_vlan(self.apiclient, self.zone.id)\n\n self.services[\"domain_network\"][\"vlan\"] = vlan\n self.services[\"domain_network\"][\"physicalnetworkid\"] = physical_network.id\n\n # Generating random subnet number for shared network creation\n shared_network_subnet_number = random.randrange(1,254)\n\n self.services[\"domain_network\"][\"gateway\"] = \"172.16.\"+str(shared_network_subnet_number)+\".1\"\n self.services[\"domain_network\"][\"startip\"] = \"172.16.\"+str(shared_network_subnet_number)+\".2\"\n self.services[\"domain_network\"][\"endip\"] = \"172.16.\"+str(shared_network_subnet_number)+\".20\"\n\n domain_network = Network.create(\n self.apiclient,\n self.services[\"domain_network\"],\n domainid=self.domain.id,\n networkofferingid=network_offering.id,\n zoneid=self.zone.id\n )\n self.cleanup.append(domain_network)\n self.debug(\"Created network with ID: %s\" % domain_network.id)\n\n virtual_machine = VirtualMachine.create(\n self.apiclient,\n self.services[\"server\"],\n templateid=self.template.id,\n networkids=[str(domain_network.id)],\n serviceofferingid=self.service_offering.id,\n projectid=project.id\n )\n self.debug(\"Deployed VM with ID: %s\" % virtual_machine.id)\n # Verify VM state\n self.assertEqual(\n virtual_machine.state,\n 'Running',\n \"Check VM state is Running or not\"\n )\n\n # Delete VM before network gets deleted in cleanup\n virtual_machine.delete(self.apiclient, expunge=True)\n return",
"def test_get_default_network(self):\n pass",
"def test_networking_project_network_tag_list(self):\n pass",
"def test_networking_project_network_delete(self):\n pass",
"def test_networking_project_network_tag_create(self):\n pass",
"def test_get_virtual_service(self):\n pass",
"def test_virtualservice_get(self):\n pass",
"def test_get_service_string(self):\n pass",
"def network_get(auth=None, **kwargs):\n cloud = get_operator_cloud(auth)\n kwargs = _clean_kwargs(**kwargs)\n return cloud.get_network(**kwargs)",
"def test_get_lab_network_by_name(\n self, authenticated_client, lab_path, test_network, test_network_data\n ):\n resp = authenticated_client.api.get_lab_network(lab_path, test_network)\n assert resp[\"data\"][\"name\"] == test_network_data[\"name\"]",
"def test_add_network(self):\n pass",
"def get_service(self):",
"def test_create_network():\n _network = Network()",
"def test_read_net_namespace(self):\n pass",
"def test_get_project(self):\n pass",
"def get_network_on_vc(options):\n datacenter = get_datacenter(options)\n networks = datacenter.network\n\n name = get_network_name(options)\n for network in networks:\n if re.search(name, network.name):\n return network",
"def test_ipam_services_read(self):\n pass",
"def test_networking_project_network_tag_put(self):\n pass",
"def test_read_cluster_network(self):\n pass"
]
| [
"0.8669004",
"0.83204156",
"0.77490056",
"0.76448596",
"0.7637649",
"0.7419113",
"0.7296355",
"0.72419596",
"0.69356287",
"0.68189347",
"0.672783",
"0.65830845",
"0.6519098",
"0.651618",
"0.6364173",
"0.6340621",
"0.62232184",
"0.62168264",
"0.6199433",
"0.6182356",
"0.616484",
"0.6137994",
"0.61023206",
"0.603629",
"0.59801245",
"0.5969051",
"0.5948905",
"0.589687",
"0.5868223",
"0.58317757"
]
| 0.9413343 | 0 |
Test case for networking_project_network_service_list | def test_networking_project_network_service_list(self):
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_networking_project_network_list(self):\n pass",
"def test_networking_project_network_service_get(self):\n pass",
"def test_networking_project_network_event_list(self):\n pass",
"def test_networking_project_network_tag_list(self):\n pass",
"def test_networking_project_network_get(self):\n pass",
"def test_ipam_services_list(self):\n pass",
"def test_networking_project_network_create(self):\n pass",
"def test_get_networks(self):\n pass",
"def test_aws_service_api_networks_get(self):\n pass",
"def test_list_cluster_network(self):\n pass",
"def test_networking_project_network_event_get(self):\n pass",
"def test_list_project(self):\n pass",
"def test_list_project_request(self):\n pass",
"def test_list_projects(self):\n pass",
"def test_list_projects(self):\n pass",
"def test_list_net_namespace(self):\n pass",
"def test_networking_project_network_update(self):\n pass",
"def test_get_network(self):\n pass",
"def test_03_network_create(self):\n # Validate the following\n # 1. Create a project.\n # 2. Add virtual/direct network resource to the project. User shared\n # network resource for the project\n # 3. Verify any number of Project level Virtual/Direct networks can be\n # created and used for vm deployment within the project.\n # 4. Verify shared networks (zone and domain wide) from outside the\n # project can also be used in a project.\n\n # Create project as a domain admin\n project = Project.create(\n self.apiclient,\n self.services[\"project\"],\n account=self.account.name,\n domainid=self.account.domainid\n )\n # Cleanup created project at end of test\n self.cleanup.append(project)\n self.debug(\"Created project with domain admin with ID: %s\" %\n project.id)\n\n network_offerings = list_network_offerings(\n self.apiclient,\n projectid=project.id,\n supportedServices='SourceNat',\n type='isolated',\n state='Enabled'\n )\n self.assertEqual(\n isinstance(network_offerings, list),\n True,\n \"Check for the valid network offerings\"\n )\n network_offering = network_offerings[0]\n\n self.debug(\"creating a network with network offering ID: %s\" %\n network_offering.id)\n self.services[\"network\"][\"zoneid\"] = self.zone.id\n network = Network.create(\n self.apiclient,\n self.services[\"network\"],\n networkofferingid=network_offering.id,\n projectid=project.id\n )\n self.debug(\"Created network with ID: %s\" % network.id)\n networks = Network.list(\n self.apiclient,\n projectid=project.id,\n listall=True\n )\n self.assertEqual(\n isinstance(networks, list),\n True,\n \"Check for the valid network list response\"\n )\n\n self.debug(\"Deploying VM with network: %s\" % network.id)\n\n virtual_machine = VirtualMachine.create(\n self.apiclient,\n self.services[\"server\"],\n templateid=self.template.id,\n networkids=[str(network.id)],\n serviceofferingid=self.service_offering.id,\n projectid=project.id\n )\n self.debug(\"Deployed VM with ID: %s\" % virtual_machine.id)\n # Verify VM state\n self.assertEqual(\n virtual_machine.state,\n 'Running',\n \"Check VM state is Running or not\"\n )\n\n network_offerings = list_network_offerings(\n self.apiclient,\n state='Enabled',\n guestiptype='Shared',\n name='DefaultSharedNetworkOffering',\n displaytext='Offering for Shared networks'\n )\n self.assertEqual(\n isinstance(network_offerings, list),\n True,\n \"Check for the valid network offerings\"\n )\n network_offering = network_offerings[0]\n\n self.debug(\"creating a shared network in domain: %s\" %\n self.domain.id)\n\n # Getting physical network and free vlan in it\n physical_network, vlan = get_free_vlan(self.apiclient, self.zone.id)\n\n self.services[\"domain_network\"][\"vlan\"] = vlan\n self.services[\"domain_network\"][\"physicalnetworkid\"] = physical_network.id\n\n # Generating random subnet number for shared network creation\n shared_network_subnet_number = random.randrange(1,254)\n\n self.services[\"domain_network\"][\"gateway\"] = \"172.16.\"+str(shared_network_subnet_number)+\".1\"\n self.services[\"domain_network\"][\"startip\"] = \"172.16.\"+str(shared_network_subnet_number)+\".2\"\n self.services[\"domain_network\"][\"endip\"] = \"172.16.\"+str(shared_network_subnet_number)+\".20\"\n\n domain_network = Network.create(\n self.apiclient,\n self.services[\"domain_network\"],\n domainid=self.domain.id,\n networkofferingid=network_offering.id,\n zoneid=self.zone.id\n )\n self.cleanup.append(domain_network)\n self.debug(\"Created network with ID: %s\" % domain_network.id)\n\n virtual_machine = VirtualMachine.create(\n self.apiclient,\n self.services[\"server\"],\n templateid=self.template.id,\n networkids=[str(domain_network.id)],\n serviceofferingid=self.service_offering.id,\n projectid=project.id\n )\n self.debug(\"Deployed VM with ID: %s\" % virtual_machine.id)\n # Verify VM state\n self.assertEqual(\n virtual_machine.state,\n 'Running',\n \"Check VM state is Running or not\"\n )\n\n # Delete VM before network gets deleted in cleanup\n virtual_machine.delete(self.apiclient, expunge=True)\n return",
"def test_networking_project_network_tag_get(self):\n pass",
"def project_list_networks(project):\n q = client.project.networks_in(project)\n sys.stdout.write(\n \"Networks allocated to %s\\t: %s\\n\" % (project, \" \".join(q))\n )",
"def test_get_port_group_list(self):\n pass",
"def test_list_clients(self):\n pass",
"def test_client_list(self):\n pass",
"def test_networking_project_network_delete(self):\n pass",
"def test_list_lab_networks(self, authenticated_client, lab_path):\n r = authenticated_client.api.list_lab_networks(lab_path)\n assert r[\"data\"] is not None",
"def test_list_namespaced_build(self):\n pass",
"def test_networking_project_network_tag_create(self):\n pass",
"def test_get_projects(self):\n pass",
"def test_ipam_services_read(self):\n pass"
]
| [
"0.8442342",
"0.8200452",
"0.79131794",
"0.765145",
"0.7384875",
"0.7134209",
"0.68193364",
"0.67868644",
"0.6717133",
"0.6681138",
"0.6677454",
"0.65660775",
"0.6522769",
"0.64892286",
"0.64892286",
"0.6452811",
"0.6409245",
"0.6376114",
"0.6276882",
"0.62717396",
"0.61926925",
"0.61406124",
"0.61138976",
"0.61095995",
"0.61080146",
"0.6075496",
"0.60622567",
"0.6012513",
"0.5985454",
"0.59279394"
]
| 0.94697046 | 0 |
Test case for networking_project_network_tag_create | def test_networking_project_network_tag_create(self):
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_networking_project_network_tag_put(self):\n pass",
"def test_networking_project_network_tag_get(self):\n pass",
"def test_networking_project_network_create(self):\n pass",
"def test_networking_project_network_tag_list(self):\n pass",
"def test_networking_project_network_tag_delete(self):\n pass",
"def test_03_network_create(self):\n # Validate the following\n # 1. Create a project.\n # 2. Add virtual/direct network resource to the project. User shared\n # network resource for the project\n # 3. Verify any number of Project level Virtual/Direct networks can be\n # created and used for vm deployment within the project.\n # 4. Verify shared networks (zone and domain wide) from outside the\n # project can also be used in a project.\n\n # Create project as a domain admin\n project = Project.create(\n self.apiclient,\n self.services[\"project\"],\n account=self.account.name,\n domainid=self.account.domainid\n )\n # Cleanup created project at end of test\n self.cleanup.append(project)\n self.debug(\"Created project with domain admin with ID: %s\" %\n project.id)\n\n network_offerings = list_network_offerings(\n self.apiclient,\n projectid=project.id,\n supportedServices='SourceNat',\n type='isolated',\n state='Enabled'\n )\n self.assertEqual(\n isinstance(network_offerings, list),\n True,\n \"Check for the valid network offerings\"\n )\n network_offering = network_offerings[0]\n\n self.debug(\"creating a network with network offering ID: %s\" %\n network_offering.id)\n self.services[\"network\"][\"zoneid\"] = self.zone.id\n network = Network.create(\n self.apiclient,\n self.services[\"network\"],\n networkofferingid=network_offering.id,\n projectid=project.id\n )\n self.debug(\"Created network with ID: %s\" % network.id)\n networks = Network.list(\n self.apiclient,\n projectid=project.id,\n listall=True\n )\n self.assertEqual(\n isinstance(networks, list),\n True,\n \"Check for the valid network list response\"\n )\n\n self.debug(\"Deploying VM with network: %s\" % network.id)\n\n virtual_machine = VirtualMachine.create(\n self.apiclient,\n self.services[\"server\"],\n templateid=self.template.id,\n networkids=[str(network.id)],\n serviceofferingid=self.service_offering.id,\n projectid=project.id\n )\n self.debug(\"Deployed VM with ID: %s\" % virtual_machine.id)\n # Verify VM state\n self.assertEqual(\n virtual_machine.state,\n 'Running',\n \"Check VM state is Running or not\"\n )\n\n network_offerings = list_network_offerings(\n self.apiclient,\n state='Enabled',\n guestiptype='Shared',\n name='DefaultSharedNetworkOffering',\n displaytext='Offering for Shared networks'\n )\n self.assertEqual(\n isinstance(network_offerings, list),\n True,\n \"Check for the valid network offerings\"\n )\n network_offering = network_offerings[0]\n\n self.debug(\"creating a shared network in domain: %s\" %\n self.domain.id)\n\n # Getting physical network and free vlan in it\n physical_network, vlan = get_free_vlan(self.apiclient, self.zone.id)\n\n self.services[\"domain_network\"][\"vlan\"] = vlan\n self.services[\"domain_network\"][\"physicalnetworkid\"] = physical_network.id\n\n # Generating random subnet number for shared network creation\n shared_network_subnet_number = random.randrange(1,254)\n\n self.services[\"domain_network\"][\"gateway\"] = \"172.16.\"+str(shared_network_subnet_number)+\".1\"\n self.services[\"domain_network\"][\"startip\"] = \"172.16.\"+str(shared_network_subnet_number)+\".2\"\n self.services[\"domain_network\"][\"endip\"] = \"172.16.\"+str(shared_network_subnet_number)+\".20\"\n\n domain_network = Network.create(\n self.apiclient,\n self.services[\"domain_network\"],\n domainid=self.domain.id,\n networkofferingid=network_offering.id,\n zoneid=self.zone.id\n )\n self.cleanup.append(domain_network)\n self.debug(\"Created network with ID: %s\" % domain_network.id)\n\n virtual_machine = VirtualMachine.create(\n self.apiclient,\n self.services[\"server\"],\n templateid=self.template.id,\n networkids=[str(domain_network.id)],\n serviceofferingid=self.service_offering.id,\n projectid=project.id\n )\n self.debug(\"Deployed VM with ID: %s\" % virtual_machine.id)\n # Verify VM state\n self.assertEqual(\n virtual_machine.state,\n 'Running',\n \"Check VM state is Running or not\"\n )\n\n # Delete VM before network gets deleted in cleanup\n virtual_machine.delete(self.apiclient, expunge=True)\n return",
"def test_create_network():\n _network = Network()",
"def test_add_network(self):\n pass",
"def test_networking_project_network_get(self):\n pass",
"def test_create_network_and_subnet(self):\n suffix = datetime.utcnow().strftime('%Y%m%d%H%M%S')\n network_name = TEST_NETWORK_PREFIX + \"_\" + suffix\n network_cidr = TEST_CIDR_PATTERN % 254\n self.__create_network_and_subnet_test_helper__(network_name, network_cidr)",
"def test_networking_project_network_list(self):\n pass",
"def test_register_network(self):\n pass",
"def test_networking_project_network_delete(self):\n pass",
"def network_create(request, **kwargs):\n LOG.debug(\"network_create(): kwargs = %s\", kwargs)\n if 'tenant_id' not in kwargs:\n kwargs['tenant_id'] = request.user.project_id\n body = {'network': kwargs}\n network = neutronclient(request).create_network(body=body).get('network')\n return Network(network)",
"def test_networking_project_network_service_get(self):\n pass",
"def test_get_network(self):\n pass",
"def test_networking_project_network_update(self):\n pass",
"def network_create(auth=None, **kwargs):\n cloud = get_operator_cloud(auth)\n kwargs = _clean_kwargs(keep_name=True, **kwargs)\n return cloud.create_network(**kwargs)",
"def test_api_use_virtual_network_post(self):\n body = Network()\n response = self.client.open(\n '/api/use/virtual-network/',\n method='POST',\n data=json.dumps(body),\n content_type='application/json')\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))",
"def test_networking_project_network_event_get(self):\n pass",
"def test_get_networks(self):\n pass",
"def test_networking_project_network_event_list(self):\n pass",
"def create(self):\n logging.debug(\"%s create called\" % self)\n # networks = self.infra.get(\"networks\")\n notify(\"Creating network %s\" % self.name)\n self.cloudnet = cn.create(self.name, cidr=self.cidr)\n return True",
"def test_networking_project_network_service_list(self):\n pass",
"def test_create_cluster_network(self):\n pass",
"def __create_network_and_subnet_test_helper__(self, network_name, network_cidr):\n network = self.neutron_operations.create_network_and_subnet(network_name, cidr=network_cidr)\n self.assertIsNotNone(network, \"Problems creating network\")\n self.assertEqual(network['status'], 'ACTIVE', \"Network status is not ACTIVE\")\n self.test_world['networks'].append(network['id'])\n self.logger.debug(\"%s\", network)\n\n return network['id'], network['subnet']['id']",
"def _create_network_vm(args):\n #\n # maximum length of network name is 14 chars, longer names will result in\n # a failure 'numerical result out of range' when creating the bridge.\n if len(args.network_name) > 14:\n _logger.error('Network name %s to long, max is 14 characters.', args.network_name)\n return 1\n # check network name unicity\n conn = libvirt.openReadOnly(None)\n _vnets = []\n if conn:\n _vnets = [n.name() for n in conn.listAllNetworks() if n.name() == args.network_name]\n conn.close()\n else:\n print('Cannot contact hypervisor', file=sys.stderr)\n return 1\n if len(_vnets) != 0:\n print(\"Network with name [%s] already exists\" % args.network_name, file=sys.stderr)\n return 1\n\n return oci_utils.kvm.virt.create_virtual_network(network=args.net,\n network_name=args.network_name,\n ip_bridge=args.ip_bridge,\n ip_prefix=args.ip_prefix,\n ip_start=args.ip_start,\n ip_end=args.ip_end)",
"def test_create_net_namespace(self):\n pass",
"def test_create_network(self):\n network = vertigo.create_network(\"test\")\n self.assert_equals(\"test\", network.address)\n network.address = \"foo\"\n self.assert_equals(\"foo\", network.address)\n network.enable_acking()\n self.assert_true(network.acking_enabled())\n network.disable_acking()\n self.assert_false(network.acking_enabled())\n network.num_ackers = 10\n self.assert_equals(10, network.num_ackers)\n network.ack_expire = 50000\n self.assert_equals(50000, network.ack_expire)\n component = network.from_verticle('test_feeder_verticle', main='test_feeder_verticle.py')\n self.assert_equals('test_feeder_verticle', component.name)\n self.assert_equals('test_feeder_verticle.py', component.main)\n component.workers = 4\n self.assert_equals(4, component.workers)\n component2 = component.to_verticle('test_worker_verticle')\n component2.main = 'test_worker_verticle.py'\n self.assert_equals('test_worker_verticle.py', component2.main)\n self.complete()",
"def test_create_tag(self):\n\n tag_payload = {'name': 'Test Tag'}\n self.client.post(URL_TAGS, tag_payload)\n\n is_tag_created = Tag.objects.filter(\n user=self.user,\n name=tag_payload['name']\n ).exists()\n\n self.assertTrue(is_tag_created)"
]
| [
"0.8415681",
"0.8278862",
"0.827278",
"0.8097047",
"0.7693032",
"0.71981835",
"0.716799",
"0.7052762",
"0.7006196",
"0.68853045",
"0.6870068",
"0.677154",
"0.6724007",
"0.65254825",
"0.6511818",
"0.6463387",
"0.6381934",
"0.6359911",
"0.63259894",
"0.6287146",
"0.62654346",
"0.6262999",
"0.6249459",
"0.62063426",
"0.6190127",
"0.6184046",
"0.61692715",
"0.6161571",
"0.6129593",
"0.612166"
]
| 0.95820755 | 0 |
Test case for networking_project_network_tag_delete | def test_networking_project_network_tag_delete(self):
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_networking_project_network_delete(self):\n pass",
"def test_delete_network(self):\n pass",
"def test_networking_project_network_tag_create(self):\n pass",
"def test_delete__network(self):\n arglist = [\n '--network',\n self.projects[0].id,\n ]\n verifylist = [\n ('service', 'network'),\n ('project', self.projects[0].id),\n ]\n\n parsed_args = self.check_parser(self.cmd, arglist, verifylist)\n\n result = self.cmd.take_action(parsed_args)\n\n self.assertIsNone(result)\n self.projects_mock.get.assert_called_once_with(self.projects[0].id)\n self.compute_quotas_mock.delete.assert_not_called()\n self.volume_quotas_mock.delete.assert_not_called()\n self.network_mock.delete_quota.assert_called_once_with(\n self.projects[0].id,\n )",
"def test_networking_project_network_tag_put(self):\n pass",
"def test_networking_project_network_tag_get(self):\n pass",
"def test_delete_cluster_network(self):\n pass",
"def test_networking_project_network_tag_list(self):\n pass",
"def test_delete_tag(self):\r\n\r\n with app.test_client() as client:\r\n resp = client.post(f\"/tags/{self.tag.id}/delete\", follow_redirects=True)\r\n html = resp.get_data(as_text=True)\r\n\r\n self.assertEqual(resp.status_code, 200)\r\n self.assertNotIn(\"Marvel\", html)",
"def testDeleteNetworkAuth(self):\n response = self._delete('inventory/networks/1/')\n self.assertEquals(response.status_code, 401)\n\n response = self._delete('inventory/networks/1/',\n username=\"testuser\", password=\"password\")\n self.assertEquals(response.status_code, 403)",
"def network_delete(auth=None, **kwargs):\n cloud = get_operator_cloud(auth)\n kwargs = _clean_kwargs(**kwargs)\n return cloud.delete_network(**kwargs)",
"def test_delete_net_namespace(self):\n pass",
"def delete_network(self, network):\r\n return self.delete(self.network_path % (network))",
"def test_delete_project(self):\n pass",
"def test_delete_project(self):\n pass",
"def test_delete_host_subnet(self):\n pass",
"def test_remove_project(self):\n pass",
"def test_delete_collection_cluster_network(self):\n pass",
"def network_delete_event(self, network_info):\n\n net_id = network_info['network_id']\n if net_id not in self.network:\n LOG.error(_LE('network_delete_event: net_id %s does not exist.'),\n net_id)\n return\n\n segid = self.network[net_id].get('segmentation_id')\n tenant_id = self.network[net_id].get('tenant_id')\n tenant_name = self.get_project_name(tenant_id)\n net = utils.Dict2Obj(self.network[net_id])\n if not tenant_name:\n LOG.error(_LE('Project %(tenant_id)s does not exist.'),\n {'tenant_id': tenant_id})\n self.update_network_db(net.id, constants.DELETE_FAIL)\n return\n\n try:\n self.dcnm_client.delete_network(tenant_name, net)\n # Put back the segmentation id into the pool.\n self.seg_drvr.release_segmentation_id(segid)\n\n # Remove entry from database and cache.\n self.delete_network_db(net_id)\n del self.network[net_id]\n snets = [k for k in self.subnet if (\n self.subnet[k].get('network_id') == net_id)]\n [self.subnet.pop(s) for s in snets]\n except dexc.DfaClientRequestFailed:\n LOG.error(_LE('Failed to create network %(net)s.'),\n {'net': net.name})\n self.update_network_db(net_id, constants.DELETE_FAIL)\n # deleting all related VMs\n instances = self.get_vms()\n instances_related = [k for k in instances if k.network_id == net_id]\n for vm in instances_related:\n LOG.debug(\"deleting vm %s because network is deleted\", vm.name)\n self.delete_vm_function(vm.port_id, vm)\n self.network_del_notif(tenant_id, tenant_name, net_id)",
"def test_delete_cloud(self):\n pass",
"def delete_network(name, host, network_type):\n logging.info(\"Deleting %s '%s' from host '%s'\", network_type, name, host.name)\n\n try:\n if network_type.lower() == \"vswitch\":\n host.configManager.networkSystem.RemoveVirtualSwitch(name)\n elif network_type.lower() == \"portgroup\":\n host.configManager.networkSystem.RemovePortGroup(name)\n except vim.fault.NotFound:\n logging.error(\"Tried to remove %s '%s' that does not exist from host '%s'\",\n network_type, name, host.name)\n except vim.fault.ResourceInUse:\n logging.error(\"%s '%s' can't be removed because there are vNICs associated with it\",\n network_type, name)",
"def test_delete_collection_host_subnet(self):\n pass",
"def _delete_network_vm(args):\n libvirtConn = libvirt.openReadOnly(None)\n if libvirtConn is None:\n print('Cannot contact hypervisor', file=sys.stderr)\n return 1\n net = None\n try:\n net = libvirtConn.networkLookupByName(args.network_name)\n except libvirt.libvirtError:\n print('Cannot find network named [%s]' % args.network_name, file=sys.stderr)\n return 1\n print('Network found:\\n')\n print(xml.dom.minidom.parseString(net.XMLDesc()).toprettyxml(indent=\" \", newl=''))\n print('')\n\n if not args.yes:\n if not input('Really destroy this network ?').strip().lower() in ('y', 'yes'):\n return 1\n return oci_utils.kvm.virt.delete_virtual_network(network_name=args.network_name)",
"def delete(self, oid):\n path = '%s/networks/%s' % (self.ver, oid)\n res = self.client.call(path, 'DELETE', data='', \n token=self.manager.identity.token)\n self.logger.debug('Delete openstack network: %s' % truncate(res))\n return res[0]",
"def post_virtual_network_delete(self, resource_id, resource_dict):\n pass",
"def delete_network_postcommit(self, context):\n if self.rpc_handler is None:\n return\n network = self._get_network_info(context._network)\n for _, _network in network.items():\n network_type = _network.get('network_type', '')\n if network_type not in CentecConstant.SUPPORTED_NETWORK_TYPES and len(CentecConstant.SUPPORTED_NETWORK_TYPES) > 0:\n return\n if network is not None:\n try:\n self.rpc_handler.delete_network(network)\n except:\n pass",
"def delete(self):\n \n logging.info(\"Deleting network %s\" % self.cloudnet)\n # res = cn.delete(self.cloudnet)\n res = self.cloudnet.delete()\n return res",
"def test_networking_project_network_create(self):\n pass",
"def test_delete_namespaced_build(self):\n pass",
"def removeConnection(tagA, tagB): #@NoSelf"
]
| [
"0.85864913",
"0.79074013",
"0.73118925",
"0.72815436",
"0.726035",
"0.70726025",
"0.7015217",
"0.69911855",
"0.6827487",
"0.6809548",
"0.6754534",
"0.6743542",
"0.67119163",
"0.66424894",
"0.66424894",
"0.65924495",
"0.65480644",
"0.6546871",
"0.65056103",
"0.645967",
"0.6393288",
"0.63471776",
"0.6291367",
"0.62840396",
"0.6267636",
"0.6245787",
"0.62192225",
"0.61711925",
"0.6151862",
"0.61470467"
]
| 0.9531553 | 0 |
Test case for networking_project_network_tag_get | def test_networking_project_network_tag_get(self):
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_networking_project_network_tag_list(self):\n pass",
"def test_networking_project_network_tag_create(self):\n pass",
"def test_networking_project_network_get(self):\n pass",
"def test_networking_project_network_tag_put(self):\n pass",
"def test_networking_project_network_service_get(self):\n pass",
"def test_networking_project_network_tag_delete(self):\n pass",
"def test_networking_project_network_event_get(self):\n pass",
"def test_get_network(self):\n pass",
"def test_networking_project_network_list(self):\n pass",
"def test_aws_service_api_networks_get(self):\n pass",
"def test_get_networks(self):\n pass",
"def test_networking_project_network_create(self):\n pass",
"def test_networking_project_network_service_list(self):\n pass",
"def test_networking_project_network_event_list(self):\n pass",
"def test_get_lab_network_by_name(\n self, authenticated_client, lab_path, test_network, test_network_data\n ):\n resp = authenticated_client.api.get_lab_network(lab_path, test_network)\n assert resp[\"data\"][\"name\"] == test_network_data[\"name\"]",
"def test_networking_project_network_update(self):\n pass",
"def network_tags(self) -> Optional[pulumi.Input['NetworkTagsArgs']]:\n return pulumi.get(self, \"network_tags\")",
"def test_get_default_network(self):\n pass",
"def test_networking_project_network_delete(self):\n pass",
"def network_get(auth=None, **kwargs):\n cloud = get_operator_cloud(auth)\n kwargs = _clean_kwargs(**kwargs)\n return cloud.get_network(**kwargs)",
"def test_get_tag_name(self):\r\n name = self.combinedoe.get_tag_name(\"<t>Tag</t>\")\r\n self.assertEqual(name, \"t\")",
"def test_add_ip(self):\n ip = '1.1.1.1'\n info = self.api.add_ipadress(ip, tags=['asd'])\n self.assertEqual(info['value'], ip)\n tags = [t['name'] for t in info['tags']]\n self.assertEqual(tags, ['asd'])",
"def test_get_ip_tags(client, database, sample_data, url, expected_data):\n\n response = client.get(url)\n response_data = response.get_json()\n\n assert response.status_code == 200\n assert response.headers[\"Content-Type\"] == \"application/json\"\n assert response_data == expected_data",
"def test_support_NETWORK(self):\n self.assertEqual(self._parseFeature(\"NETWORK\", \"IRCNet\"), \"IRCNet\")",
"def test_add_network(self):\n pass",
"def test_read_net_namespace(self):\n pass",
"def test_get_unregistered_networks(self):\n pass",
"def test_get_tag(self):\n self.seed_static_data()\n params = {'id': 1, 'event_id': 1}\n response = self.app.get('/api/v1/tag', headers=self.user1_headers, data=params)\n data = json.loads(response.data)\n self.assertEqual(data['id'], 1)\n self.assertEqual(data['event_id'], 1)\n self.assertEqual(data['tag_type'], 'RESPONSE')\n self.assertDictEqual(data['name'], {\n 'en': 'English Tag 1 Event 1',\n 'fr': 'French Tag 1 Event 1'\n })\n self.assertDictEqual(data['description'], {\n 'en': 'English Tag 1 Event 1 Description',\n 'fr': 'French Tag 1 Event 1 Description'\n })",
"def GetNetworkTags(self, network, reason=None):\n query = []\n _AppendReason(query, reason)\n\n return self._SendRequest(HTTP_GET,\n (\"/%s/networks/%s/tags\" %\n (GANETI_RAPI_VERSION, network)), query, None)",
"def test_get_host_with_invalid_tag_no_key(mq_create_three_specific_hosts, api_get):\n url = build_hosts_url(query=\"?tags=namespace/=Value\")\n response_status, response_data = api_get(url)\n\n assert response_status == 400"
]
| [
"0.81374985",
"0.7996989",
"0.7775014",
"0.7694906",
"0.7312093",
"0.71594137",
"0.7070286",
"0.69871694",
"0.68383783",
"0.67855865",
"0.6660819",
"0.64519733",
"0.62692016",
"0.6119776",
"0.60387516",
"0.59296036",
"0.59206605",
"0.5916272",
"0.59021133",
"0.5760429",
"0.5586733",
"0.5583526",
"0.55713445",
"0.55480516",
"0.5530759",
"0.55190736",
"0.551232",
"0.5509802",
"0.55088264",
"0.5499449"
]
| 0.9510173 | 0 |
Test case for networking_project_network_tag_list | def test_networking_project_network_tag_list(self):
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_networking_project_network_tag_get(self):\n pass",
"def test_networking_project_network_list(self):\n pass",
"def test_networking_project_network_tag_create(self):\n pass",
"def test_networking_project_network_service_list(self):\n pass",
"def test_networking_project_network_event_list(self):\n pass",
"def test_networking_project_network_tag_put(self):\n pass",
"def test_networking_project_network_tag_delete(self):\n pass",
"def test_networking_project_network_get(self):\n pass",
"def test_networking_project_network_create(self):\n pass",
"def test_get_networks(self):\n pass",
"def test_project_list_tags(self):\n # Add test project with tags.\n tags = ['tag1', 'tag2', 'tag3']\n add_project(title='1', description='1', tags=tags)\n\n # Check that project list contains each tag.\n response = self.client.get(reverse('portfolio:project_list'))\n self.assertEqual(response.status_code, 200)\n for tag in tags:\n self.assertContains(response, tag)",
"def test_networking_project_network_service_get(self):\n pass",
"def test_aws_service_api_networks_get(self):\n pass",
"def test_list_lab_networks(self, authenticated_client, lab_path):\n r = authenticated_client.api.list_lab_networks(lab_path)\n assert r[\"data\"] is not None",
"def test_networking_project_network_update(self):\n pass",
"def test_networking_project_network_delete(self):\n pass",
"def test_get_network(self):\n pass",
"def test_networking_project_network_event_get(self):\n pass",
"def test_add_network(self):\n pass",
"def test_list_cluster_network(self):\n pass",
"def project_list_networks(project):\n q = client.project.networks_in(project)\n sys.stdout.write(\n \"Networks allocated to %s\\t: %s\\n\" % (project, \" \".join(q))\n )",
"def test_get_valid_networks_for_virtualization_realm(self):\n pass",
"def test_get_project_list_with_tag_filter(self):\n # Add test projects.\n tag = 'tag1'\n projects_with_tag = [\n add_project(title='1', description='1', tags=[tag]),\n add_project(title='2', description='2', tags=[tag]),\n ]\n project_without_tag = add_project(title='3', description='3', tags=[])\n\n result = get_project_list(tag=tag)\n result_projects = result['projects'].object_list\n\n # Make sure only projects with tag are retrieved.\n for project_with_tag in projects_with_tag:\n self.assertTrue(project_with_tag in result_projects)\n self.assertFalse(project_without_tag in result_projects)\n self.assertEqual(len(result_projects), len(projects_with_tag))\n self.assertTrue(result['filtered'])\n self.assertEqual(result['tag'], tag)",
"def test_03_network_create(self):\n # Validate the following\n # 1. Create a project.\n # 2. Add virtual/direct network resource to the project. User shared\n # network resource for the project\n # 3. Verify any number of Project level Virtual/Direct networks can be\n # created and used for vm deployment within the project.\n # 4. Verify shared networks (zone and domain wide) from outside the\n # project can also be used in a project.\n\n # Create project as a domain admin\n project = Project.create(\n self.apiclient,\n self.services[\"project\"],\n account=self.account.name,\n domainid=self.account.domainid\n )\n # Cleanup created project at end of test\n self.cleanup.append(project)\n self.debug(\"Created project with domain admin with ID: %s\" %\n project.id)\n\n network_offerings = list_network_offerings(\n self.apiclient,\n projectid=project.id,\n supportedServices='SourceNat',\n type='isolated',\n state='Enabled'\n )\n self.assertEqual(\n isinstance(network_offerings, list),\n True,\n \"Check for the valid network offerings\"\n )\n network_offering = network_offerings[0]\n\n self.debug(\"creating a network with network offering ID: %s\" %\n network_offering.id)\n self.services[\"network\"][\"zoneid\"] = self.zone.id\n network = Network.create(\n self.apiclient,\n self.services[\"network\"],\n networkofferingid=network_offering.id,\n projectid=project.id\n )\n self.debug(\"Created network with ID: %s\" % network.id)\n networks = Network.list(\n self.apiclient,\n projectid=project.id,\n listall=True\n )\n self.assertEqual(\n isinstance(networks, list),\n True,\n \"Check for the valid network list response\"\n )\n\n self.debug(\"Deploying VM with network: %s\" % network.id)\n\n virtual_machine = VirtualMachine.create(\n self.apiclient,\n self.services[\"server\"],\n templateid=self.template.id,\n networkids=[str(network.id)],\n serviceofferingid=self.service_offering.id,\n projectid=project.id\n )\n self.debug(\"Deployed VM with ID: %s\" % virtual_machine.id)\n # Verify VM state\n self.assertEqual(\n virtual_machine.state,\n 'Running',\n \"Check VM state is Running or not\"\n )\n\n network_offerings = list_network_offerings(\n self.apiclient,\n state='Enabled',\n guestiptype='Shared',\n name='DefaultSharedNetworkOffering',\n displaytext='Offering for Shared networks'\n )\n self.assertEqual(\n isinstance(network_offerings, list),\n True,\n \"Check for the valid network offerings\"\n )\n network_offering = network_offerings[0]\n\n self.debug(\"creating a shared network in domain: %s\" %\n self.domain.id)\n\n # Getting physical network and free vlan in it\n physical_network, vlan = get_free_vlan(self.apiclient, self.zone.id)\n\n self.services[\"domain_network\"][\"vlan\"] = vlan\n self.services[\"domain_network\"][\"physicalnetworkid\"] = physical_network.id\n\n # Generating random subnet number for shared network creation\n shared_network_subnet_number = random.randrange(1,254)\n\n self.services[\"domain_network\"][\"gateway\"] = \"172.16.\"+str(shared_network_subnet_number)+\".1\"\n self.services[\"domain_network\"][\"startip\"] = \"172.16.\"+str(shared_network_subnet_number)+\".2\"\n self.services[\"domain_network\"][\"endip\"] = \"172.16.\"+str(shared_network_subnet_number)+\".20\"\n\n domain_network = Network.create(\n self.apiclient,\n self.services[\"domain_network\"],\n domainid=self.domain.id,\n networkofferingid=network_offering.id,\n zoneid=self.zone.id\n )\n self.cleanup.append(domain_network)\n self.debug(\"Created network with ID: %s\" % domain_network.id)\n\n virtual_machine = VirtualMachine.create(\n self.apiclient,\n self.services[\"server\"],\n templateid=self.template.id,\n networkids=[str(domain_network.id)],\n serviceofferingid=self.service_offering.id,\n projectid=project.id\n )\n self.debug(\"Deployed VM with ID: %s\" % virtual_machine.id)\n # Verify VM state\n self.assertEqual(\n virtual_machine.state,\n 'Running',\n \"Check VM state is Running or not\"\n )\n\n # Delete VM before network gets deleted in cleanup\n virtual_machine.delete(self.apiclient, expunge=True)\n return",
"def test_verify_list_of_devices_in_my_network():",
"def test_add_ip(self):\n ip = '1.1.1.1'\n info = self.api.add_ipadress(ip, tags=['asd'])\n self.assertEqual(info['value'], ip)\n tags = [t['name'] for t in info['tags']]\n self.assertEqual(tags, ['asd'])",
"def test_list_projects(self):\n pass",
"def test_list_projects(self):\n pass",
"def test_support_NETWORK(self):\n self.assertEqual(self._parseFeature(\"NETWORK\", \"IRCNet\"), \"IRCNet\")",
"def test_list_net_namespace(self):\n pass"
]
| [
"0.8407284",
"0.82673967",
"0.8170092",
"0.7694319",
"0.7642346",
"0.7619595",
"0.73755604",
"0.7235602",
"0.7030399",
"0.6982695",
"0.6671765",
"0.66642684",
"0.65270686",
"0.64974034",
"0.6470683",
"0.64488524",
"0.6414432",
"0.6403719",
"0.6192123",
"0.611372",
"0.6030102",
"0.6014697",
"0.5978699",
"0.59567016",
"0.58669657",
"0.58192945",
"0.5794497",
"0.5794497",
"0.576683",
"0.57585657"
]
| 0.94968116 | 0 |
Test case for networking_project_network_tag_put | def test_networking_project_network_tag_put(self):
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_networking_project_network_tag_create(self):\n pass",
"def test_networking_project_network_tag_get(self):\n pass",
"def test_networking_project_network_tag_delete(self):\n pass",
"def test_networking_project_network_tag_list(self):\n pass",
"def test_aws_service_api_vm_tag_put(self):\n pass",
"def test_networking_project_network_update(self):\n pass",
"def test_networking_project_network_create(self):\n pass",
"def test_add_ip(self):\n ip = '1.1.1.1'\n info = self.api.add_ipadress(ip, tags=['asd'])\n self.assertEqual(info['value'], ip)\n tags = [t['name'] for t in info['tags']]\n self.assertEqual(tags, ['asd'])",
"def test_add_network(self):\n pass",
"def test_networking_project_network_delete(self):\n pass",
"def test_networking_project_network_get(self):\n pass",
"def testPutNetworkAuth(self):\n response = self._put('inventory/networks/1/',\n data= testsxml.network_put_xml)\n self.assertEquals(response.status_code, 401)\n\n response = self._put('inventory/networks/1/',\n data=testsxml.network_put_xml,\n username=\"testuser\", password=\"password\")\n self.assertEquals(response.status_code, 403)",
"def test_register_network(self):\n pass",
"def testPutNetworkNotFound(self):\n try:\n response = self._put('inventory/networks/1zcvxzvzgvsdzfewrew4t4tga34/',\n data=testsxml.network_put_xml,\n username=\"testuser\", password=\"password\")\n self.assertEquals(response.status_code, 404)\n except TemplateDoesNotExist, e:\n # might not have template, so check for 404 in error\n self.assertTrue(\"404\" in str(e))",
"def test_add_hostname(self):\n hostname = 'test123.com'\n info = self.api.add_hostname(hostname, tags=['asd'])\n self.assertEqual(info['value'], hostname)\n tags = [t['name'] for t in info['tags']]\n self.assertEqual(tags, ['asd'])",
"def test_networking_project_network_list(self):\n pass",
"def testPutNetworkLocalIp(self):\n models.System.objects.all().delete()\n self._saveSystem()\n old_count = models.Network.objects.count()\n self._put('inventory/networks/1/',\n data=testsxml.network_put_xml_opt_ip_addr % \"169.254.4.4\",\n username=\"admin\", password=\"password\")\n self.assertEquals(old_count, models.Network.objects.count())\n\n self._put('inventory/networks/1/',\n data=testsxml.network_put_xml_opt_ip_addr % \"4.4.4.4\",\n username=\"admin\", password=\"password\")\n self.assertEquals(old_count + 1, models.Network.objects.count())",
"def test_networking_project_network_service_get(self):\n pass",
"def test_networking_project_network_event_get(self):\n pass",
"def test_aws_service_api_vm_command_put(self):\n pass",
"def test_03_network_create(self):\n # Validate the following\n # 1. Create a project.\n # 2. Add virtual/direct network resource to the project. User shared\n # network resource for the project\n # 3. Verify any number of Project level Virtual/Direct networks can be\n # created and used for vm deployment within the project.\n # 4. Verify shared networks (zone and domain wide) from outside the\n # project can also be used in a project.\n\n # Create project as a domain admin\n project = Project.create(\n self.apiclient,\n self.services[\"project\"],\n account=self.account.name,\n domainid=self.account.domainid\n )\n # Cleanup created project at end of test\n self.cleanup.append(project)\n self.debug(\"Created project with domain admin with ID: %s\" %\n project.id)\n\n network_offerings = list_network_offerings(\n self.apiclient,\n projectid=project.id,\n supportedServices='SourceNat',\n type='isolated',\n state='Enabled'\n )\n self.assertEqual(\n isinstance(network_offerings, list),\n True,\n \"Check for the valid network offerings\"\n )\n network_offering = network_offerings[0]\n\n self.debug(\"creating a network with network offering ID: %s\" %\n network_offering.id)\n self.services[\"network\"][\"zoneid\"] = self.zone.id\n network = Network.create(\n self.apiclient,\n self.services[\"network\"],\n networkofferingid=network_offering.id,\n projectid=project.id\n )\n self.debug(\"Created network with ID: %s\" % network.id)\n networks = Network.list(\n self.apiclient,\n projectid=project.id,\n listall=True\n )\n self.assertEqual(\n isinstance(networks, list),\n True,\n \"Check for the valid network list response\"\n )\n\n self.debug(\"Deploying VM with network: %s\" % network.id)\n\n virtual_machine = VirtualMachine.create(\n self.apiclient,\n self.services[\"server\"],\n templateid=self.template.id,\n networkids=[str(network.id)],\n serviceofferingid=self.service_offering.id,\n projectid=project.id\n )\n self.debug(\"Deployed VM with ID: %s\" % virtual_machine.id)\n # Verify VM state\n self.assertEqual(\n virtual_machine.state,\n 'Running',\n \"Check VM state is Running or not\"\n )\n\n network_offerings = list_network_offerings(\n self.apiclient,\n state='Enabled',\n guestiptype='Shared',\n name='DefaultSharedNetworkOffering',\n displaytext='Offering for Shared networks'\n )\n self.assertEqual(\n isinstance(network_offerings, list),\n True,\n \"Check for the valid network offerings\"\n )\n network_offering = network_offerings[0]\n\n self.debug(\"creating a shared network in domain: %s\" %\n self.domain.id)\n\n # Getting physical network and free vlan in it\n physical_network, vlan = get_free_vlan(self.apiclient, self.zone.id)\n\n self.services[\"domain_network\"][\"vlan\"] = vlan\n self.services[\"domain_network\"][\"physicalnetworkid\"] = physical_network.id\n\n # Generating random subnet number for shared network creation\n shared_network_subnet_number = random.randrange(1,254)\n\n self.services[\"domain_network\"][\"gateway\"] = \"172.16.\"+str(shared_network_subnet_number)+\".1\"\n self.services[\"domain_network\"][\"startip\"] = \"172.16.\"+str(shared_network_subnet_number)+\".2\"\n self.services[\"domain_network\"][\"endip\"] = \"172.16.\"+str(shared_network_subnet_number)+\".20\"\n\n domain_network = Network.create(\n self.apiclient,\n self.services[\"domain_network\"],\n domainid=self.domain.id,\n networkofferingid=network_offering.id,\n zoneid=self.zone.id\n )\n self.cleanup.append(domain_network)\n self.debug(\"Created network with ID: %s\" % domain_network.id)\n\n virtual_machine = VirtualMachine.create(\n self.apiclient,\n self.services[\"server\"],\n templateid=self.template.id,\n networkids=[str(domain_network.id)],\n serviceofferingid=self.service_offering.id,\n projectid=project.id\n )\n self.debug(\"Deployed VM with ID: %s\" % virtual_machine.id)\n # Verify VM state\n self.assertEqual(\n virtual_machine.state,\n 'Running',\n \"Check VM state is Running or not\"\n )\n\n # Delete VM before network gets deleted in cleanup\n virtual_machine.delete(self.apiclient, expunge=True)\n return",
"def test_create_network():\n _network = Network()",
"def test_add_url(self):\n url = 'http://test.com/'\n info = self.api.add_url(url, tags=['asd'])\n self.assertEqual(info['value'], url)\n tags = [t['name'] for t in info['tags']]\n self.assertEqual(tags, ['asd'])",
"def test_0_put(self):\n self.assertIsNotNone(save_node_info(self.node.name, self.node))",
"def test_deploy_instance_with_new_network_and_metadata(self):\n suffix = datetime.utcnow().strftime('%Y%m%d%H%M%S')\n instance_name = TEST_SERVER_PREFIX + \"_network_metadata_\" + suffix\n instance_meta = {\"test_item\": \"test_value\"}\n network_name = TEST_NETWORK_PREFIX + \"_\" + suffix\n network_cidr = TEST_CIDR_PATTERN % 251\n self.__deploy_instance_helper__(instance_name=instance_name,\n network_name=network_name,\n network_cidr=network_cidr,\n metadata=instance_meta)",
"def test_add_macaddress(self):\n mac = '00:00:00:00:00:00'\n info = self.api.add_macaddress(mac, tags=['asd'])\n self.assertEqual(info['value'], mac)\n tags = [t['name'] for t in info['tags']]\n self.assertEqual(tags, ['asd'])",
"def test_delete_network(self):\n pass",
"def test_aws_service_api_networks_get(self):\n pass",
"def test_put(self):\n self.seed_static_data()\n\n params = {\n 'id': 2,\n 'event_id': 1,\n 'tag_type': 'REGISTRATION',\n 'name': {\n 'en': 'Renamed English Name', # Rename\n 'zu': 'Zulu Name'\n },\n 'description': {\n 'en': 'Renamed English Description',\n 'zu': 'Zulu Description'\n },\n 'active': True\n }\n\n response = self.app.put(\n '/api/v1/tag', \n headers=self.user1_headers, \n data=json.dumps(params),\n content_type='application/json')\n self.assertEqual(response.status_code, 200)\n\n response = self.app.get('/api/v1/tag', headers=self.user1_headers, data={'id': 2, 'event_id': 1, 'language': 'en'})\n data = json.loads(response.data)\n\n self.assertEqual(data['id'], 2)\n self.assertEqual(data['event_id'], 1)\n self.assertEqual(data['tag_type'], 'REGISTRATION')\n self.assertDictEqual(data['name'], {\n 'en': 'Renamed English Name',\n 'zu': 'Zulu Name'\n })\n self.assertDictEqual(data['description'], {\n 'en': 'Renamed English Description',\n 'zu': 'Zulu Description'\n })",
"def test_api_use_virtual_network_post(self):\n body = Network()\n response = self.client.open(\n '/api/use/virtual-network/',\n method='POST',\n data=json.dumps(body),\n content_type='application/json')\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))"
]
| [
"0.8162548",
"0.7632924",
"0.7529343",
"0.73087597",
"0.69838846",
"0.67863727",
"0.67096",
"0.6460837",
"0.62958515",
"0.6271951",
"0.6260058",
"0.6142768",
"0.6081592",
"0.60224813",
"0.6007177",
"0.59064764",
"0.5894923",
"0.58403486",
"0.5809004",
"0.5711",
"0.5707141",
"0.5697741",
"0.5657273",
"0.5639932",
"0.56397176",
"0.5611062",
"0.55656135",
"0.55597",
"0.5552797",
"0.554389"
]
| 0.9519759 | 0 |
Test case for networking_project_network_update | def test_networking_project_network_update(self):
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_networking_project_network_get(self):\n pass",
"def test_networking_project_network_create(self):\n pass",
"def test_networking_project_network_list(self):\n pass",
"def test_networking_project_network_tag_put(self):\n pass",
"def test_networking_project_network_delete(self):\n pass",
"def test_networking_project_network_service_get(self):\n pass",
"def test_networking_project_network_event_get(self):\n pass",
"def test_add_network(self):\n pass",
"def test_networking_project_network_event_list(self):\n pass",
"def test_update_network(self):\n policies_ports = [\n (None, {self.ports[0].id}),\n (self.qos_policies[1].id, {self.ports[0].id})]\n\n self.ports[1].qos_policy_id = self.qos_policies[0].id\n self.ports[1].update()\n self.ports[2].qos_policy_id = self.qos_policies[1].id\n self.ports[2].update()\n for qos_policy_id, reference_ports in policies_ports:\n self.networks[0].qos_policy_id = qos_policy_id\n self.networks[0].update()\n original_network = {'qos_policy_id': self.qos_policies[0]}\n reviewed_port_ids, _, _ = self.qos_driver.update_network(\n mock.ANY, self.networks[0], original_network)\n self.assertEqual(reference_ports, reviewed_port_ids)\n calls = [mock.call(mock.ANY, self.ports[0].id,\n self.ports[0].network_id, qos_policy_id,\n None)]\n self.mock_rules.assert_has_calls(calls)\n self.mock_rules.reset_mock()",
"def test_get_network(self):\n pass",
"def test_update_external_network(self):\n network_policies = [(self.qos_policies[1].id,\n {self.fips[1].id},\n {self.router_fips.id}),\n (None,\n {self.fips[1].id},\n {self.router_fips.id})]\n\n self.fips[0].qos_policy_id = self.qos_policies[0].id\n self.fips[0].update()\n for qos_policy_id, ref_fips, ref_routers in network_policies:\n self.fips_network.qos_policy_id = qos_policy_id\n self.fips_network.update()\n original_network = {'qos_policy_id': self.qos_policies[0]}\n _, reviewed_fips_ids, reviewed_router_ids = (\n self.qos_driver.update_network(\n mock.Mock(), self.fips_network, original_network))\n self.assertEqual(ref_fips, reviewed_fips_ids)\n self.assertEqual(ref_routers, reviewed_router_ids)",
"def test_update_project(self):\n pass",
"def test_update_project(self):\n pass",
"def test_update_network_no_policy_change(self):\n for qos_policy_id in (self.qos_policies[0].id, None):\n self.networks[0].qos_policy_id = qos_policy_id\n self.networks[0].update()\n original_network = {'qos_policy_id': qos_policy_id}\n port_ids, fip_ids, router_ids = self.qos_driver.update_network(\n mock.ANY, self.networks[0], original_network)\n self.assertEqual(set([]), port_ids)\n self.assertEqual(set([]), fip_ids)\n self.assertEqual(set([]), router_ids)\n self.mock_rules.assert_not_called()",
"def test_03_network_create(self):\n # Validate the following\n # 1. Create a project.\n # 2. Add virtual/direct network resource to the project. User shared\n # network resource for the project\n # 3. Verify any number of Project level Virtual/Direct networks can be\n # created and used for vm deployment within the project.\n # 4. Verify shared networks (zone and domain wide) from outside the\n # project can also be used in a project.\n\n # Create project as a domain admin\n project = Project.create(\n self.apiclient,\n self.services[\"project\"],\n account=self.account.name,\n domainid=self.account.domainid\n )\n # Cleanup created project at end of test\n self.cleanup.append(project)\n self.debug(\"Created project with domain admin with ID: %s\" %\n project.id)\n\n network_offerings = list_network_offerings(\n self.apiclient,\n projectid=project.id,\n supportedServices='SourceNat',\n type='isolated',\n state='Enabled'\n )\n self.assertEqual(\n isinstance(network_offerings, list),\n True,\n \"Check for the valid network offerings\"\n )\n network_offering = network_offerings[0]\n\n self.debug(\"creating a network with network offering ID: %s\" %\n network_offering.id)\n self.services[\"network\"][\"zoneid\"] = self.zone.id\n network = Network.create(\n self.apiclient,\n self.services[\"network\"],\n networkofferingid=network_offering.id,\n projectid=project.id\n )\n self.debug(\"Created network with ID: %s\" % network.id)\n networks = Network.list(\n self.apiclient,\n projectid=project.id,\n listall=True\n )\n self.assertEqual(\n isinstance(networks, list),\n True,\n \"Check for the valid network list response\"\n )\n\n self.debug(\"Deploying VM with network: %s\" % network.id)\n\n virtual_machine = VirtualMachine.create(\n self.apiclient,\n self.services[\"server\"],\n templateid=self.template.id,\n networkids=[str(network.id)],\n serviceofferingid=self.service_offering.id,\n projectid=project.id\n )\n self.debug(\"Deployed VM with ID: %s\" % virtual_machine.id)\n # Verify VM state\n self.assertEqual(\n virtual_machine.state,\n 'Running',\n \"Check VM state is Running or not\"\n )\n\n network_offerings = list_network_offerings(\n self.apiclient,\n state='Enabled',\n guestiptype='Shared',\n name='DefaultSharedNetworkOffering',\n displaytext='Offering for Shared networks'\n )\n self.assertEqual(\n isinstance(network_offerings, list),\n True,\n \"Check for the valid network offerings\"\n )\n network_offering = network_offerings[0]\n\n self.debug(\"creating a shared network in domain: %s\" %\n self.domain.id)\n\n # Getting physical network and free vlan in it\n physical_network, vlan = get_free_vlan(self.apiclient, self.zone.id)\n\n self.services[\"domain_network\"][\"vlan\"] = vlan\n self.services[\"domain_network\"][\"physicalnetworkid\"] = physical_network.id\n\n # Generating random subnet number for shared network creation\n shared_network_subnet_number = random.randrange(1,254)\n\n self.services[\"domain_network\"][\"gateway\"] = \"172.16.\"+str(shared_network_subnet_number)+\".1\"\n self.services[\"domain_network\"][\"startip\"] = \"172.16.\"+str(shared_network_subnet_number)+\".2\"\n self.services[\"domain_network\"][\"endip\"] = \"172.16.\"+str(shared_network_subnet_number)+\".20\"\n\n domain_network = Network.create(\n self.apiclient,\n self.services[\"domain_network\"],\n domainid=self.domain.id,\n networkofferingid=network_offering.id,\n zoneid=self.zone.id\n )\n self.cleanup.append(domain_network)\n self.debug(\"Created network with ID: %s\" % domain_network.id)\n\n virtual_machine = VirtualMachine.create(\n self.apiclient,\n self.services[\"server\"],\n templateid=self.template.id,\n networkids=[str(domain_network.id)],\n serviceofferingid=self.service_offering.id,\n projectid=project.id\n )\n self.debug(\"Deployed VM with ID: %s\" % virtual_machine.id)\n # Verify VM state\n self.assertEqual(\n virtual_machine.state,\n 'Running',\n \"Check VM state is Running or not\"\n )\n\n # Delete VM before network gets deleted in cleanup\n virtual_machine.delete(self.apiclient, expunge=True)\n return",
"def test_networking_project_network_service_list(self):\n pass",
"def test_patch_cluster_network(self):\n pass",
"async def update_from_workbench(\n projects_networks_repository: ProjectsNetworksRepository,\n projects_repository: ProjectsRepository,\n scheduler: DynamicSidecarsScheduler,\n director_v0_client: DirectorV0Client,\n rabbitmq_client: RabbitMQClient,\n project_id: ProjectID,\n) -> None:\n\n try:\n existing_projects_networks = (\n await projects_networks_repository.get_projects_networks(\n project_id=project_id\n )\n )\n except ProjectNotFoundError:\n existing_projects_networks = ProjectsNetworks.parse_obj(\n dict(project_uuid=project_id, networks_with_aliases={})\n )\n\n existing_networks_with_aliases = existing_projects_networks.networks_with_aliases\n\n # NOTE: when UI is in place this is no longer required\n # for now all services are placed on the same default network\n project: ProjectAtDB = await projects_repository.get_project(project_id)\n assert project.prj_owner # nosec\n new_networks_with_aliases = await _get_networks_with_aliases_for_default_network(\n project_id=project_id,\n user_id=project.prj_owner,\n new_workbench=project.workbench,\n director_v0_client=director_v0_client,\n rabbitmq_client=rabbitmq_client,\n )\n logger.debug(\"%s\", f\"{existing_networks_with_aliases=}\")\n await projects_networks_repository.upsert_projects_networks(\n project_id=project_id, networks_with_aliases=new_networks_with_aliases\n )\n\n await _send_network_configuration_to_dynamic_sidecar(\n scheduler=scheduler,\n project_id=project_id,\n new_networks_with_aliases=new_networks_with_aliases,\n existing_networks_with_aliases=existing_networks_with_aliases,\n )",
"def testAutomodeNetwork(self):\n ### create test resources\n instance_name = \"end-to-end-test-instance-1\"\n instance_selfLink = \\\n self.test_resource_creator.create_instance_using_template(\n instance_name,\n self.test_resource_creator.legacy_instance_template_selfLink)[\n 'targetLink']\n original_config = self.google_api_interface.get_instance_configs(\n instance_name)\n auto_subnetwork_name = 'end-to-end-test-auto-subnetwork'\n try:\n network_selfLink = self.google_api_interface.get_network(auto_subnetwork_name)['selfLink']\n except:\n network_selfLink = self.google_api_interface.create_auto_subnetwork(auto_subnetwork_name)['targetLink']\n\n ### start migration\n selfLink_executor = SelfLinkExecutor(self.compute, instance_selfLink,\n auto_subnetwork_name,\n None,\n True)\n\n migration_handler = selfLink_executor.build_migration_handler()\n migration_handler.network_migration()\n\n ### check result\n new_config = self.google_api_interface.get_instance_configs(\n instance_name)\n self.assertTrue(\n resource_config_is_unchanged_except_for_network(new_config,\n original_config))\n self.assertTrue(\n compare_instance_external_ip(new_config, original_config))\n # network changed\n self.assertTrue(check_instance_network(new_config,\n network_selfLink,\n ))\n print('Pass the current test')",
"def modify_network(self, username, machine_name, new_network, txn_id):\n logger = get_task_logger(txn_id=txn_id, task_id=self.request.id, loglevel=const.VLAB_ONEFS_LOG_LEVEL.upper())\n resp = {'content' : {}, 'error': None, 'params': {}}\n logger.info('Task starting')\n try:\n vmware.update_network(username, machine_name, new_network)\n except ValueError as doh:\n logger.error('Task failed: {}'.format(doh))\n resp['error'] = '{}'.format(doh)\n logger.info('Task complete')\n return resp",
"def test_get_networks(self):\n pass",
"def dvs_update_network(self):\n self.show_step(1)\n self.env.revert_snapshot(\"dvs_vcenter_systest_setup\")\n\n cluster_id = self.fuel_web.get_last_created_cluster()\n\n self.show_step(2)\n os_ip = self.fuel_web.get_public_vip(cluster_id)\n os_conn = os_actions.OpenStackActions(\n os_ip, SERVTEST_USERNAME,\n SERVTEST_PASSWORD,\n SERVTEST_TENANT)\n\n tenant = os_conn.get_tenant(SERVTEST_TENANT)\n\n net_1 = os_conn.create_network(\n network_name=self.net_data[0].keys()[0],\n tenant_id=tenant.id)['network']\n\n os_conn.create_subnet(\n subnet_name=net_1['name'],\n network_id=net_1['id'],\n cidr=self.net_data[0][self.net_data[0].keys()[0]],\n ip_version=4)\n\n assert_true(os_conn.get_network(net_1['name'])['id'] == net_1['id'])\n\n self.show_step(3)\n os_conn.neutron.update_network(net_1[\"id\"],\n {\"network\": {\"name\": 'net_2'}})\n\n assert_true(os_conn.get_network('net_2')['id'] == net_1['id'])\n\n self.show_step(4)\n default_net = os_conn.nova.networks.find(label=self.inter_net_name)\n os_conn.neutron.update_network(\n default_net.id, {\"network\": {\"name\": 'spring'}})\n\n assert_true(os_conn.get_network('spring')['id'] == default_net.id)",
"def test_networking_project_network_tag_create(self):\n pass",
"def test_update_port_group(self):\n pass",
"def testPutNetworkLocalIp(self):\n models.System.objects.all().delete()\n self._saveSystem()\n old_count = models.Network.objects.count()\n self._put('inventory/networks/1/',\n data=testsxml.network_put_xml_opt_ip_addr % \"169.254.4.4\",\n username=\"admin\", password=\"password\")\n self.assertEquals(old_count, models.Network.objects.count())\n\n self._put('inventory/networks/1/',\n data=testsxml.network_put_xml_opt_ip_addr % \"4.4.4.4\",\n username=\"admin\", password=\"password\")\n self.assertEquals(old_count + 1, models.Network.objects.count())",
"def test_networking_project_network_tag_list(self):\n pass",
"def test_replace_cluster_network(self):\n pass",
"def test_patch_net_namespace(self):\n pass",
"def update_network(self, dbnetwork, qipinfo):\n\n # We don't want to add the plenary to self.plenaries if we aren't going\n # to change anything\n plenary = Plenary.get_plenary(dbnetwork)\n updated = False\n\n if dbnetwork.name != qipinfo.name:\n self.logger.client_info(\"Setting network {0!s} name to {1}\"\n .format(dbnetwork, qipinfo.name))\n dbnetwork.name = qipinfo.name\n if dbnetwork.network_type != qipinfo.network_type:\n self.logger.client_info(\"Setting network {0!s} type to {1}\"\n .format(dbnetwork, qipinfo.network_type))\n dbnetwork.network_type = qipinfo.network_type\n if dbnetwork.location != qipinfo.location:\n self.logger.client_info(\"Setting network {0!s} location to {1:l}\"\n .format(dbnetwork, qipinfo.location))\n dbnetwork.location = qipinfo.location\n if dbnetwork.side != qipinfo.side:\n self.logger.client_info(\"Setting network {0!s} side to {1}\"\n .format(dbnetwork, qipinfo.side))\n dbnetwork.side = qipinfo.side\n if dbnetwork.network_compartment != qipinfo.compartment:\n self.logger.client_info(\"Setting network {0!s} compartment to {1!s}\"\n .format(dbnetwork, qipinfo.compartment))\n dbnetwork.network_compartment = qipinfo.compartment\n\n if dbnetwork in self.session.dirty:\n updated = True\n\n old_rtrs = set(dbnetwork.router_ips)\n new_rtrs = set(qipinfo.routers)\n\n del_routers = []\n for router in dbnetwork.routers:\n if router.ip in old_rtrs - new_rtrs:\n del_routers.append(router)\n\n for router in del_routers:\n self.logger.client_info(\"Removing router {0:s} from \"\n \"{1:l}\".format(router.ip, dbnetwork))\n for dns_rec in router.dns_records:\n if dns_rec.is_unused:\n delete_dns_record(dns_rec)\n dbnetwork.routers.remove(router)\n updated = True\n\n for ip in new_rtrs - old_rtrs:\n self.add_router(dbnetwork, ip)\n updated = True\n\n if updated:\n self.plenaries.append(plenary)\n\n # TODO: add support for updating router locations\n\n return dbnetwork.netmask == qipinfo.address.netmask"
]
| [
"0.76064444",
"0.74541",
"0.7369837",
"0.69817394",
"0.6963334",
"0.6901382",
"0.69006497",
"0.682339",
"0.6764553",
"0.67619634",
"0.6689635",
"0.6650327",
"0.658505",
"0.658505",
"0.65725785",
"0.65587854",
"0.651825",
"0.6489945",
"0.6476724",
"0.6430147",
"0.6420457",
"0.64091825",
"0.6382478",
"0.6357081",
"0.6342277",
"0.62963635",
"0.62246186",
"0.6190512",
"0.61896956",
"0.61749774"
]
| 0.94251615 | 0 |
Validate that required_cols are in self.frame | def validate(self):
super().validate()
frame = getattr(self, 'frame', None)
if frame is None:
raise ValueError('Missing columns %s since no frame' % ', '.join(
self.required_cols))
cols = set(list(self.frame))
missing = sorted(self.required_cols - cols)
if missing:
raise ValueError('Missing columns: [%s]' % ', '.join(missing)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def cols_valid(self,\n df: pd.DataFrame,\n req_cols: set) -> bool:\n missing_cols = req_cols.difference(df.columns)\n\n if len(missing_cols) > 0:\n logging.error(f\"{missing_cols} columns required but missing\")\n return False\n\n return True",
"def _validate_cols(cols):\n\n\tif cols is not None and len(cols) < 2:\n\t\traise ValueError('too few features')",
"def validate_required_fields(dataframe):\n\n if dataframe is None:\n raise ValueError(\"It was not provided a valid Dataframe.\")",
"def validate(self):\n super().validate()\n frame = getattr(self, 'frame', None)\n if frame is None:\n raise ValueError('Missing columns %s since no frame' % ', '.join(\n [c[0] for c in self.col_regexps]))\n for col_name, c_re in self.col_regexps:\n if col_name not in self.frame:\n raise ValueError('Missing required column %s' % col_name)\n if c_re is None:\n continue # Just verified that column is present\n c_re_c = re.compile(c_re)\n for date, item in self.frame[col_name].iteritems():\n if not c_re_c.match(item):\n raise ValueError(\n 'In column %s, index %s, item %s fails regexp %s' % (\n col_name, date, item, c_re))",
"def _check_missing_columns(self, df: pd.DataFrame) -> None:\n if any([c not in df.columns for c in REQUIRED_COLUMNS]):\n raise ValueError(\"Missing columns in dataset.\"\n f\"Columns: {df.columns}\"\n f\"Required: {REQUIRED_COLUMNS}\")",
"def _check_required_columns(self, param_df, var_name='param_mean'):\n\n if param_df is None:\n return pd.DataFrame()\n\n try:\n if param_df.shape[0] == 0:\n return pd.DataFrame(columns=list(set(self.required_columns[var_name])|set(param_df.columns)))\n\n if self.required_columns[var_name] - set(param_df.columns) == set([]): # df has required cols.\n return param_df\n else:\n note = \"'{}' must be a pd.DataFrame with the following column names: \".format(var_name) + \\\n _list_the_errors(self.required_columns[var_name] - set(param_df.columns)) + \".\"\n raise ValueError(note)\n except KeyError:\n raise KeyError(\"'{}' is not supported\".format(var_name))",
"def _dataframe_column_check(df: DataFrame, compulsory_columns: Sequence) -> None:\n if not set(compulsory_columns).issubset(df.columns):\n diff = set(compulsory_columns).difference(df.columns)\n msg = (\n \"The following compulsory column(s) are missing from the \"\n f\"DataFrame: {diff}\"\n )\n raise ValueError(msg)",
"def validate_column_names(self, cols):\n self.stdout.write('Verifying CSV header')\n csv_cols = set(cols)\n if self.required_csv_columns <= csv_cols:\n return True\n else:\n missing_cols = set(self.required_csv_columns).difference(csv_cols)\n raise ValidationError(\n \"These columns '{0}' are required, but missing in the CSV \"\n \"file.\".format(\n ', '.join(missing_cols)\n )\n )",
"def _validate_data(df):\n if constants.IMAGE_URI_KEY not in df.columns:\n # or label_col not in df.columns:\n raise AttributeError(\n 'DataFrame must contain image_uri column {}.')\n if constants.LABEL_KEY not in df.columns:\n raise AttributeError(\n 'DataFrame must contain label column.')\n if constants.SPLIT_KEY not in df.columns:\n raise AttributeError(\n 'DataFrame must contain split column.')\n if list(df.columns) != constants.IMAGE_CSV_COLUMNS:\n raise AttributeError(\n 'DataFrame column order must be {}'.format(\n constants.IMAGE_CSV_COLUMNS))",
"def validate_col_lst(self, df, columns_lst):\n if columns_lst == []:\n raise ValueError(\"column_lst is empty\")\n col_set = set(columns_lst)\n df_col_set = set(list(df))\n if col_set - df_col_set != set():\n msg = \"col_lst has columns name that does not exists in the DataFrame columns:{}\".format(\n str(col_set - df_col_set))\n print(msg)\n raise ValueError(msg)\n return True",
"def is_cols_valid(bd):\n for col in cols:\n seen = []\n for num in nums:\n if bd[col[num]] == \" \":\n continue\n elif bd[col[num]] not in seen:\n seen += [bd[col[num]]]\n else:\n return False\n else:\n continue\n return True",
"def check_ingress_required_columns(self, col_names):\n if not set(col_names).issuperset(REQUIRED_COLUMNS):\n if not set(col_names).issuperset(REQUIRED_ALT_COLUMNS):\n missing_columns = [x for x in REQUIRED_ALT_COLUMNS if x not in col_names]\n return missing_columns\n return None",
"def _validate_columns(self, names):\n if not is_list_like(names):\n raise ValueError(\"Columns should be list-like\")\n\n if len(set(names)) != len(names):\n raise ValueError(\"Duplicate column names\")\n\n if self._data and len(names) != len(self._data[0]):\n raise ValueError(\"Invalid columns length\")",
"def _validate(self, obj):\n assert (self._confidence in obj.columns and self._predicted in obj.columns\n and self._groundtruth in obj.columns), \\\n \"Must at least have '%s', '%s' and '%s' columns.\" \\\n % (self._confidence, self._predicted, self._groundtruth)\n assert len(obj['groundtruth']) == len(obj['predicted']) == len(obj['confidence']), \\\n \"Dataframe columns are inconsistent \"\n\n if len(obj.index) < 2:\n self._logger.fatal(\"Stored procedure returned empty dataframe\")\n raise RuntimeError(\"Stored procedure returned empty dataframe\")\n\n self._logger.debug(obj.head)",
"def _these_columns_cannot_annotate_exp_cons(self):\n _cols = set([]) #\n for param_name, req_cols in self.required_columns.items():\n _cols |= req_cols\n\n return _cols | self.other_useful_columns",
"def validate_input(self):\n self._validate_limits_cols_prefixed()\n self._validate_fillna_cols_prefixed()\n self._validate_ratio_input()",
"def test_frame_invalid_column(self):\n with self.assertRaises(ValueError):\n self.frame.take(100, columns=['not_in'])",
"def _check_columns(df: pd.DataFrame, names: typing.Sequence[str]) -> None:\n for expected in names:\n if expected not in df.columns:\n raise ValueError(f\"'{expected}' column not found in input\")\n return",
"def _validate_columns(data, ip_column, lat_column, long_column, other_columns):\n if not ip_column and not (lat_column and long_column):\n raise ValueError(\n \"Data must have either an IpAddress ('ip_column')\",\n \"or latitude ('lat_column') and longitude ('long_column')\",\n )\n param_cols: List[str] = []\n for param in other_columns:\n if not param:\n continue\n if isinstance(param, list):\n param_cols.extend(param)\n else:\n param_cols.append(param)\n missing_columns = {col for col in param_cols if col not in data.columns}\n if missing_columns:\n raise LookupError(\n \"The following columns are not in the supplied DataFrame\",\n \",\".join(f\"'{col}'\" for col in missing_columns),\n )",
"def _validate_self(self):\n self._validate_columns(self._columns)\n\n if self._data:\n head = self._data[0]\n if len(head) != len(self._columns):\n raise ValueError(\"Columns length does not match data\")",
"def _validate_plaincolumns(self):\n\n # assert tuples for plaincolumns and plaincolumns to be PlainColumn\n if not isinstance(self.plaincolumns, tuple):\n raise ValueError(\"PlainFrame was instantiated incorrectly. \"\n \"`plaincolumns` needs to be of type `tuple`. \"\n \"However, {} was encountered. Please use \"\n \"`PlainFrame.from_plain` instead for convenient \"\n \"instantiation and proper type casts.\"\n .format(type(self.plaincolumns)))\n\n not_plaincolumn = [type(column)\n for column in self.plaincolumns\n if not isinstance(column, PlainColumn)]\n\n if not_plaincolumn:\n raise ValueError(\"PlainFrame was instantiated incorrectly. \"\n \"Elements of `plaincolumns` needs to be of type \"\n \"`PlainColumn`. However, {} was encountered. \"\n \"Please use `PlainFrame.from_plain` instead for \"\n \"convenient instantiation and proper type casts.\"\n .format(not_plaincolumn))\n\n # assert equal number of values per column\n row_lenghts = {len(column.values) for column in self.plaincolumns}\n if len(row_lenghts) > 1:\n raise ValueError(\"Input data has varying number of values per \"\n \"column. Please check provided input data.\")\n\n # assert unique column names\n duplicates = {x for x in self.columns if self.columns.count(x) > 1}\n if duplicates:\n raise ValueError(\"Duplicated column names encountered: {}. \"\n \"Please use unique column names.\"\n .format(duplicates))",
"def _validate_guidelines(self):\n target_column = G.Env.target_column\n id_column = G.Env.id_column\n train_dataset = G.Env.train_dataset.copy()\n\n self.feature_selector = self.feature_selector or train_dataset.columns.values\n restricted_cols = [_ for _ in target_column + [id_column] if _ is not None]\n self.feature_selector = [_ for _ in self.feature_selector if _ not in restricted_cols]",
"def assert_check_bounds_column(self):\n value = logic.check_bounds_column(config.NR_COLS-1)\n self.assertTrue(value)\n value = logic.check_bounds_column(config.NR_COLS)\n self.assertFalse(value)\n value = logic.check_bounds_column(config.NR_COLS+1)\n self.assertFalse(value)",
"def checkcolumnstest(chosen_columns, chosen_df):\n if not all([item in chosen_columns for item in chosen_df.columns]):\n raise ValueError('Columns do not match')",
"def validate_columns(self, fieldnames, dao):\n unstored_columns = ['blank']\n expected_columns = dao.model_type.__table__.columns.keys() + unstored_columns\n for column_name in fieldnames:\n if column_name not in expected_columns:\n raise AttributeError(f\"{self.file_path}: {column_name} column mismatch for \"\n f\"expected file type: {self.file_type.name}\")",
"def __validate_inputs(self):\n if self.train_df is None:\n raise ValueError(\"Dataframe cannot be null\")\n\n if (\n self.test_df is not None\n and self.train_df.shape[1] != self.test_df.shape[1]\n ):\n raise KeyError(\n \"Target variable in still present in one of the datasets or\"\n \" the number of columns in both test and train are not equal.\"\n )\n\n # target_label should not be in list of columns\n if self.target_label is None:\n warnings.warn(\n \"Parameter 'target_label' is empty. If not provided and is present in dataframe, it may get encoded. \"\n \"To mitigate, provide the target_label from dataframe or provide explicit list of columns for encoding \"\n \"via the 'cat_cols' parameter\",\n UserWarning,\n )\n if (\n self.target_label is not None\n and self.cat_cols is not None\n and (self.target_label in self.cat_cols)\n ):\n raise ValueError(\n f\"Target column: {self.target_label} will be encoded. Remove it from cat_cols if in there.\"\n )\n\n if self.ord_dict is not None:\n for key, mapping in self.ord_dict.items():\n if mapping is None or mapping == {}:\n raise ValueError(\n f\"Expected a weight mapping for ordinal column {key}.\"\n f\" Received {self.ord_dict[key]}\"\n )",
"def verify_columns_in_dataset(self, columns):\n all_cols = self.dataset.columns\n for col in columns:\n if not col in all_cols:\n raise KeyError(\"column '%s' not in dataset\" % col)",
"def _validateRowCol(self, rows, cols, numRow, numCol, dvName):\n if rows is not None:\n rowArr = np.array(rows)\n if np.max(rowArr) > numRow:\n raise Error(\n \"Design variable \"\n + dvName\n + \" slice out of bounds. \"\n + \"Design var has \"\n + str(numRow)\n + \" rows and index up to \"\n + str(np.max(rowArr))\n + \" was specified: \"\n + str(rows)\n )\n if np.min(rowArr) < 1:\n raise Error(\n \"Design variable \"\n + dvName\n + \" slice out of bounds. \"\n + \"Row index less than 1 specified: \"\n + str(rows)\n )\n if len(rows) != len(set(rows)):\n # duplicates\n raise Error(\"Duplicate indices specified in the rows of design variable \" + dvName + \": \" + str(rows))\n\n if cols is not None:\n colArr = np.array(cols)\n if np.max(colArr) > numCol:\n raise Error(\n \"Design variable \"\n + dvName\n + \" slice out of bounds. \"\n + \"Design var has \"\n + str(numCol)\n + \" cols and index up to \"\n + str(np.max(colArr))\n + \" was specified: \"\n + str(cols)\n )\n if np.min(colArr) < 1:\n raise Error(\n \"Design variable \"\n + dvName\n + \" slice out of bounds. \"\n + \"col index less than 1 specified: \"\n + str(cols)\n )\n if len(cols) != len(set(cols)):\n # duplicates\n raise Error(\"Duplicate indices specified in the cols of design variable \" + dvName + \": \" + str(cols))",
"def check_cols(self):\n if self.ad_tab is not None and 'date' not in self.ad_cols:\n raise DataException(\"\"\"date column not found in adServer table.\"\"\")\n if self.ad_tab is not None and 'impressions' not in self.ad_cols:\n raise DataException(\"\"\"impressions column not found in adServer table.\"\"\")\n if 'timestamp' not in self.log_cols and 'date' not in self.log_cols:\n raise DataException(\"\"\"Both timestamp and date column missing from {t}\nCannot do dailyQA\"\"\".format(t=self.log_tab))\n if self.configs['hourshift'] != 0 or 'date' not in self.log_cols:\n if 'timestamp' not in self.log_cols:\n raise DataException(\"\"\"Time shift requested \\\nbut no timestamp column in {t}.\"\"\".format(t=self.log_tab))\n else:\n check_timestamp(self.configs['schema'], self.log_tab)",
"def _validate_ratio_cols_exist(self):\n\n for col in self._ratio_cols:\n no_prefix_name = \"_\".join(col.split('_')[1:])\n if not self.data.contains_col(no_prefix_name):\n msg = (\"Input ratios column {!r} not found in either meta \"\n \"data! Please check the input files {!r} and {!r}\")\n e = msg.format(no_prefix_name, self.data.solar_fpath,\n self.data.wind_fpath)\n logger.error(e)\n raise FileInputError(e)"
]
| [
"0.7897676",
"0.7564741",
"0.7366434",
"0.7349003",
"0.7240517",
"0.7036314",
"0.70306194",
"0.69790363",
"0.68797225",
"0.6794021",
"0.66460437",
"0.6629376",
"0.66237783",
"0.65535456",
"0.65107137",
"0.6479833",
"0.64632124",
"0.646052",
"0.6442498",
"0.64324087",
"0.6413576",
"0.64062953",
"0.6377319",
"0.63746154",
"0.63484746",
"0.62892675",
"0.6246445",
"0.6243278",
"0.62198925",
"0.62179184"
]
| 0.8550042 | 0 |
read calibration file returns > dict calibration matrices as 44 numpy arrays | def read_calib_file(filename):
calib = {}
"""calib1 = np.eye(4,4)
calib1[0:3, 3] = [0.27, 0.0, -0.08]
print(calib1)
calib.append(calib1)
calib2 = np.eye(4,4)
calib2[0:3, 3] = [0.27, -0.51, -0.08]
print(calib2)
calib.append(calib2)
calib3 = np.eye(4,4)
calib3[0:3, 3] = [0.27, 0.06, -0.08]
print(calib3)
calib.append(calib3)
calib4 = np.eye(4,4)
calib4[0:3, 3] = [0.27, -0.45, -0.08]
print(calib4)
calib.append(calib4)"""
calib_file = open(filename)
key_num = 0
for line in calib_file:
key, content = line.strip().split(":")
values = [float(v) for v in content.strip().split()]
pose = np.zeros((4,4))
pose[0, 0:4] = values[0:4]
pose[1, 0:4] = values[4:8]
pose[2, 0:4] = values[8:12]
pose[3, 3] = 1.0
calib[key] = pose
calib_file.close()
#print(calib)
return calib | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def read_calib_file(self, filepath):\n data = {}\n with open(filepath, 'r') as f:\n for line in f.readlines():\n values = line.split()\n # The only non-float values in these files are dates, which\n # we don't care about anyway\n try:\n data[values[0]] = np.array(\n [float(x) for x in values[1:]]).reshape(3, 4)\n except ValueError:\n pass\n return data",
"def read_calib_file(calib_path):\n data = {}\n with open(calib_path, 'r') as f:\n for line in f.readlines():\n if not line or line == \"\\n\":\n continue\n key, value = line.split(':', 1)\n try:\n data[key] = np.array([float(x) for x in value.split()])\n except ValueError:\n pass\n return data",
"def read_calib_file(calib_path):\n data = {}\n with open(calib_path, 'r') as f:\n for line in f.readlines():\n if not line or line == \"\\n\":\n continue\n key, value = line.split(':', 1)\n try:\n data[key] = np.array([float(x) for x in value.split()])\n except ValueError:\n pass\n return data",
"def load_calib(self):\n # We'll build the calibration parameters as a dictionary, then\n # convert it to a namedtuple to prevent it from being modified later\n data = {}\n\n # Load the calibration file\n calib_filepath = os.path.join(self.sequence_path, 'calib.txt')\n filedata = utils.read_calib_file(calib_filepath)\n\n # Create 3x4 projection matrices\n P_rect_00 = np.reshape(filedata['P0'], (3, 4))\n P_rect_10 = np.reshape(filedata['P1'], (3, 4))\n P_rect_20 = np.reshape(filedata['P2'], (3, 4))\n P_rect_30 = np.reshape(filedata['P3'], (3, 4))\n\n # Compute the rectified extrinsics from cam0 to camN\n T1 = np.eye(4)\n T1[0, 3] = P_rect_10[0, 3] / P_rect_10[0, 0]\n T2 = np.eye(4)\n T2[0, 3] = P_rect_20[0, 3] / P_rect_20[0, 0]\n T3 = np.eye(4)\n T3[0, 3] = P_rect_30[0, 3] / P_rect_30[0, 0]\n\n # Compute the velodyne to rectified camera coordinate transforms\n data['T_cam0_velo'] = np.reshape(filedata['Tr'], (3, 4))\n data['T_cam0_velo'] = np.vstack([data['T_cam0_velo'], [0, 0, 0, 1]])\n data['T_cam1_velo'] = T1.dot(data['T_cam0_velo'])\n data['T_cam2_velo'] = T2.dot(data['T_cam0_velo'])\n data['T_cam3_velo'] = T3.dot(data['T_cam0_velo'])\n\n # Compute the camera intrinsics\n data['K_cam0'] = P_rect_00[0:3, 0:3]\n data['K_cam1'] = P_rect_10[0:3, 0:3]\n data['K_cam2'] = P_rect_20[0:3, 0:3]\n data['K_cam3'] = P_rect_30[0:3, 0:3]\n\n # Compute the stereo baselines in meters by projecting the origin of\n # each camera frame into the velodyne frame and computing the distances\n # between them\n p_cam = np.array([0, 0, 0, 1])\n p_velo0 = np.linalg.inv(data['T_cam0_velo']).dot(p_cam)\n p_velo1 = np.linalg.inv(data['T_cam1_velo']).dot(p_cam)\n p_velo2 = np.linalg.inv(data['T_cam2_velo']).dot(p_cam)\n p_velo3 = np.linalg.inv(data['T_cam3_velo']).dot(p_cam)\n\n data['b_gray'] = np.linalg.norm(p_velo1 - p_velo0) # gray baseline\n data['b_rgb'] = np.linalg.norm(p_velo3 - p_velo2) # rgb baseline\n\n self.calib = namedtuple('CalibData', data.keys())(*data.values())",
"def read_calib_file(self, filepath):\n data = {}\n with open(filepath, 'r') as f:\n for line in f.readlines():\n line = line.rstrip()\n if len(line) == 0: continue\n key, value = line.split(':', 1)\n # The only non-float values in these files are dates, which\n # we don't care about anyway\n try:\n data[key] = np.array([float(x) for x in value.split()])\n except ValueError:\n pass\n return data",
"def read_calib_file(filepath):\n data = {}\n\n with open(filepath, 'r') as f:\n for line in f.readlines():\n key, value = line.split(':', 1)\n # The only non-float values in these files are dates, which\n # we don't care about anyway\n try:\n data[key] = np.array([float(x) for x in value.split()])\n except ValueError:\n pass\n\n return data",
"def LoadCalibration(self, fname):\n\n output = [{}] * N_ChanUIDS # for i in range(N_ChanUIDS)]\n\n # Load file\n with open(fname, \"r\") as f:\n pass",
"def _read_calibration_params(self) -> np.ndarray:\n print('Loading calibration parameters...')\n cameras_data = []\n\n for c in range(self.num_cameras):\n camera = 'camera' + str(c).zfill(2) + '.json'\n print(' ', camera+'...')\n with open(os.path.join(self.cameras_dir, camera)) as f:\n data = json.load(f)\n\n # # Store data for each frame in numpy array\n # camera_params = np.empty(0)\n # for d in data:\n # frames = d['end_frame'] - d['start_frame']\n # del d['start_frame']\n # del d['end_frame']\n # cam = np.full(frames, d)\n # camera_params = np.append(camera_params, cam, axis=0)\n #\n cameras_data.append(data)\n return np.array(cameras_data, dtype=object)",
"def read_calib_file(path):\n float_chars = set(\"0123456789.e+- \")\n data = {}\n with open(path, 'r') as f:\n for line in f.readlines():\n key, value = line.split(':', 1)\n value = value.strip()\n data[key] = value\n if float_chars.issuperset(value):\n # try to cast to float array\n try:\n data[key] = np.array(list(map(float, value.split(' '))))\n except ValueError:\n # casting error: data[key] already eq. value, so pass\n pass\n return data",
"def read_calib_file(path):\n float_chars = set(\"0123456789.e+- \")\n data = {}\n with open(path, 'r') as f:\n for line in f.readlines():\n key, value = line.split(':', 1)\n value = value.strip()\n data[key] = value\n if float_chars.issuperset(value):\n # try to cast to float array\n try:\n data[key] = np.array(list(map(float, value.split(' '))))\n except ValueError:\n # casting error: data[key] already eq. value, so pass\n pass\n\n return data",
"def v2calib2sections(filename):\n\n from xfel.cftbx.detector.cspad_cbf_tbx import read_slac_metrology\n from scitbx.matrix import sqr\n from xfel.cxi.cspad_ana.cspad_tbx import pixel_size\n\n # metro is a dictionary where the keys are levels in the detector\n # hierarchy and the values are 'basis' objects\n metro = read_slac_metrology(filename)\n\n # 90 degree rotation to get into same frame\n reference_frame = sqr((0,-1, 0, 0,\n 1, 0, 0, 0,\n 0, 0, 1, 0,\n 0, 0, 0, 1))\n\n d = 0\n d_basis = metro[(d,)]\n\n sections = []\n for q in range(4):\n sections.append([])\n q_basis = metro[(d,q)]\n for s in range(8):\n if not (d,q,s) in metro:\n continue\n\n s_basis = metro[(d,q,s)]\n\n # collapse the transformations from the detector center to the quadrant center\n # to the sensor center\n transform = reference_frame * \\\n d_basis.as_homogenous_transformation() * \\\n q_basis.as_homogenous_transformation() * \\\n s_basis.as_homogenous_transformation()\n\n # an homologous transformation is a 4x4 matrix, with a 3x3 rotation in the\n # upper left corner and the translation in the right-most column. The last\n # row is 0,0,0,1\n ori = sqr((transform[0],transform[1],transform[2],\n transform[4],transform[5],transform[6],\n transform[8],transform[9],transform[10]))\n angle = ori.r3_rotation_matrix_as_x_y_z_angles(deg=True)[2]\n\n # move the reference of the sensor so its relative to the upper left of the\n # detector instead of the center of the detector\n center = (1765/2)+(transform[3]/pixel_size),(1765/2)+(transform[7]/pixel_size)\n\n sections[q].append(Section(angle, center))\n\n return sections",
"def read_kitti_calib(filename):\n\n with open(filename) as f:\n for line in f:\n data = line.split(' ')\n if data[0] == 'P2:':\n calib_P2 = np.array([float(x) for x in data[1:13]])\n calib_P2 = calib_P2.reshape(3, 4)\n return _extend_matrix(calib_P2)\n\n raise Exception(\n 'Could not find entry for P2 in calib file {}'.format(filename))",
"def loadCameraCalibration(self, file_name=None):\n\n mat_str = []\n if file_name == None:\n file_str = \"/home/student/armlab-w20/util/calibration.cfg\"\n else:\n file_str = file_name\n with open(file_str, 'r') as f:\n for line in f:\n line = line.replace('[', '')\n line = line.replace(']', '')\n line = line.replace('\\n', '')\n mat_str.append(line)\n cam_mat_str = mat_str[1:4]\n dist_coeffs = mat_str[-2:]\n dist_coeffs = \"\".join(dist_coeffs)\n dist_coeffs = dist_coeffs.split()\n dist_coeffs = [float(coeff) for coeff in dist_coeffs]\n self.cam_int_mat = []\n for row in cam_mat_str:\n mat_row = []\n mat_row = row.split()\n mat_row = [float(i) for i in mat_row]\n self.cam_int_mat.append(mat_row)\n self.cam_int_mat = np.asarray(self.cam_int_mat)\n self.dist_coeffs = np.asarray(dist_coeffs)",
"def read_dict(path):\n\n # Open the dataset\n miriad_data = aipy.miriad.UV(path)\n\n # Construct the set of frequency channels (in GHz)\n nfreq = miriad_data['nchan']\n delta_freq = miriad_data['sdf'] # GHz\n sfreq = miriad_data['sfreq'] # GHz\n freq = np.arange(nfreq) * delta_freq + sfreq\n\n # TODO: should generalise this to select other polarisation types\n miriad_data.select('polarization', -8, -5, include=True)\n miriad_data.select('polarization', -7, -5, include=True)\n miriad_data.select('polarization', -6, -5, include=True)\n miriad_data.select('polarization', -5, -5, include=True)\n\n miriad_data.rewind()\n\n data, mask, times, lengths, uvw, ant, pol = [], [], [], [], [], [], []\n\n # Iterate over all entries in MIRIAD dataset and pull out their useful\n # quantities\n for pream, data_row, mask_row in miriad_data.all(raw=True):\n\n # Ensure that data arrays are of the correct type\n data_row = data_row.astype(np.complex64)\n mask_row = mask_row.astype(np.bool)\n\n # Unpack co-ordinates\n uvw_row, t, ant_row = pream\n pp = aipy.miriad.pol2str[miriad_data['pol']]\n\n # Append this rows data to the global set\n lengths.append(len(data))\n times.append(t)\n ant.append(ant_row)\n uvw.append(uvw_row)\n data.append(data_row)\n mask.append(mask_row)\n pol.append(pp)\n\n data_dict = {\n 'data': np.array(data),\n 'mask': np.array(mask),\n 'time': np.array(times),\n 'length': np.array(lengths),\n 'uvw': np.array(uvw),\n 'ant': np.array(ant),\n 'pol': np.array(pol),\n 'freq': freq\n }\n\n return data_dict",
"def loadCameraCalibration(self):\n\n # Read calibration.csv\n with open(\"util/calibration.csv\", 'rb') as csvfile:\n csvreader = csv.reader(csvfile, delimiter=\",\", quotechar=\"|\")\n tmp = []\n intrinsic_matrix = []\n distort_coef = []\n i = 0\n for row in csvreader:\n for col in row:\n try:\n tmp.append(float(col))\n except:\n print(\"ERROR in calibration.csv intrinsic matrix\")\n if(i!=3):\n intrinsic_matrix.append(tmp)\n i += 1\n tmp = []\n if(i==3):\n distort_coef = tmp\n tmp = []\n \n return intrinsic_matrix, distort_coef",
"def load_calibration(file_path=\"calibration_info\" ):\n info_file = Path(file_path)\n if info_file.is_file():\n with open(file_path, 'rb') as infile:\n payload = pickle.load(infile)\n mtx = payload['matrix']\n dist_coeff = payload['dist_coeff']\n return mtx, dist_coeff\n else:\n warnings.warn(\"File {} do not exist! Try to recalibrate\".format(file_path), UserWarning)\n mtx, dist_coeff = CalibrationUtils.calibrate()\n CalibrationUtils.save_calibration(mtx, dist_coeff, file_path)\n return mtx, dist_coeff",
"def extract_calibration(self):\n #TODO add function to check if the folder exists because opencv points to other error rather than saying it doesnt exist\n cv_file = cv2.FileStorage(\"calib_images/calibration.yaml\", cv2.FILE_STORAGE_READ)\n camera_matrix = cv_file.getNode(\"camera_matrix\").mat()\n dist_matrix = cv_file.getNode(\"dist_coeff\").mat()\n print(\"[INFO]: Extracted camera parameters.\")\n cv_file.release()\n return camera_matrix, dist_matrix",
"def read_calib_file(self, velo_to_cam, cam_to_cam):\n data = {}\n data2 = {}\n\n data_new = {}\n\n \n with open(cam_to_cam, 'r') as f:\n for line in f.readlines():\n line = line.rstrip()\n if len(line) == 0: continue\n key, value = line.split(':', 1)\n # The only non-float values in these files are dates, which\n # we don't care about anyway\n try:\n data[key] = np.array([float(x) for x in value.split()])\n except ValueError:\n pass\n \n with open(velo_to_cam, 'r') as f:\n for line in f.readlines():\n line = line.rstrip()\n if len(line) == 0: continue\n key, value = line.split(':', 1)\n # The only non-float values in these files are dates, which\n # we don't care about anyway\n try:\n data2[key] = np.array([float(x) for x in value.split()])\n except ValueError:\n pass\n \"\"\"\n data3 = {}\n with open(imu_to_velo, 'r') as f:\n for line in f.readlines():\n line = line.rstrip()\n if len(line) == 0: continue\n key, value = line.split(':', 1)\n # The only non-float values in these files are dates, which\n # we don't care about anyway\n try:\n data3[key] = np.array([float(x) for x in value.split()])\n except ValueError:\n pass\n \"\"\"\n insert = np.insert(data2['R'], [3], data2['T'][0]) \n insert = np.insert(insert, [7], data2['T'][1]) \n Tr_velo_to_cam = np.insert(insert, [11], data2['T'][2]) \n # insert = np.insert(data3['R'], [3], data3['T'][0]) \n # insert = np.insert(insert, [7], data3['T'][1]) \n # Tr_imu_to_velo = np.insert(insert, [11], data3['T'][2]) \n # data_new['Tr_imu_to_velo'] = Tr_imu_to_velo\n data_new['Tr_velo_to_cam'] = Tr_velo_to_cam\n data_new['P0'] = data['P_rect_00']\n data_new['P1'] = data['P_rect_01']\n data_new['P2'] = data['P_rect_02']\n data_new['P3'] = data['P_rect_03']\n data_new['R0_rect'] = data['R_rect_00']\n return data_new",
"def file2dic(result_file):\n \n #### Read file\n \n fic=open(result_file,'rt')\n lines=fic.readlines()\n fic.close()\n \n #### Feed structure\n \n dic_result={key_val:[] for key_val in lines[0].split()}\n keys=[key_val for key_val in lines[0].split()]\n param_keys=keys[4:]\n \n logging.info('Parameters in file %s'%(str(param_keys)))\n \n ### Start loop\n for line in lines[1:]:\n kk=0\n for key in dic_result:\n value=line.split()[kk]\n if key!='MODEL_KEY':\n value=float(value)\n \n dic_result[key]=np.append(dic_result[key],value)\n \n kk=kk+1\n \n return dic_result",
"def _read(self):\n # initializng data dictionary\n self.data={}\n\n f = FortranFile(self.filename)\n # Default omnivor binary header\n self.data['MK'] = f.readInts('i')\n self.data['itime'] = f.readInts('i')\n self.data['version'] = f.readString()\n self.data['file_id'] = f.readInts('i')\n self.data['sversion'] = f.readString()\n # Velocity field\n self.data['stype'] = f.readString()\n self.data['is_grid'] = f.readInts('i')\n nCPs = f.readInts('i')\n self.data['nCPs'] = nCPs\n if self.data['MK'] == 8:\n real_char='d'\n else:\n real_char='f'\n if self.data['is_grid']:\n #print('File is a velocity grid file')\n n1 = f.readInts('i')\n n2 = f.readInts('i')\n n3 = f.readInts('i')\n self.data['n1'] = n1\n self.data['n2'] = n2\n self.data['n3'] = n3\n self.data['is_straight'] = f.readInts('i')\n self.data['v1'] = f.readReals(real_char)\n self.data['v2'] = f.readReals(real_char)\n self.data['v3'] = f.readReals(real_char)\n\n CPs_raw = f.readReals(real_char)\n Utot_raw = f.readReals(real_char)\n CPs = np.reshape(CPs_raw,(3,nCPs),order = 'F')\n Utot = np.reshape(Utot_raw,(3,nCPs),order = 'F')\n\n acc=-1\n CPsTab = np.zeros((3, n1,n2,n3))\n UtotTab = np.zeros((3, n1,n2,n3))\n # Reshaping the nasty way (this is natural order). \n for i in range(0,n1):\n for j in range(0,n2):\n for k in range(0,n3):\n acc=acc+1\n CPsTab[0:3,i,j,k] = CPs[0:3,acc]\n UtotTab[0:3,i,j,k] = Utot[0:3,acc]\n\n self.data['CPs'] = CPs\n self.data['CPsTab'] = CPsTab\n self.data['Utot'] = Utot\n self.data['UtotTab'] = UtotTab",
"def _read_calibration_data(self):\n #Declare global variables.\n global calT1\n global calT2\n global calT3\n global calP1\n global calP2\n global calP3\n global calP4\n global calP5\n global calP6\n global calP7\n global calP8\n global calP9\n global calP10\n global calH1\n global calH2\n global calH3\n global calH4\n global calH5\n global calH6\n global calH7\n global calGH1\n global calGH2\n global calGH3\n global calResHeatRange\n global calResHeatVal\n global calRangeSwErr\n\n #Temperature calibration.\n calT1 = self._read_2bytes_as_ushort_lsbfirst(self.BME680_T1_LSB_REG)\n calT2 = self._read_2bytes_as_short_lsbfirst(self.BME680_T2_LSB_REG)\n calT3 = self._read_register_1sbyte(self.BME680_T3_REG)\n\n #Pressure calibration.\n calP1 = self._read_2bytes_as_ushort_lsbfirst(self.BME680_P1_LSB_REG)\n calP2 = self._read_2bytes_as_short_lsbfirst(self.BME680_P2_LSB_REG)\n calP3 = self._read_register_1sbyte(self.BME680_P3_REG)\n calP4 = self._read_2bytes_as_short_lsbfirst(self.BME680_P4_LSB_REG)\n calP5 = self._read_2bytes_as_short_lsbfirst(self.BME680_P5_LSB_REG)\n calP6 = self._read_register_1sbyte(self.BME680_P6_REG)\n calP7 = self._read_register_1sbyte(self.BME680_P7_REG)\n calP8 = self._read_2bytes_as_short_lsbfirst(self.BME680_P8_LSB_REG)\n calP9 = self._read_2bytes_as_short_lsbfirst(self.BME680_P9_LSB_REG)\n calP10 = self._read_register_1ubyte(self.BME680_P10_REG)\n\n #Humidity calibration.\n calH1 = self._read_register_1ubyte(self.BME680_H1_MSB_REG) << 4 | (self._read_register_1ubyte(self.BME680_H1_LSB_REG) & 0x0F)\n calH2 = self._read_register_1ubyte(self.BME680_H2_MSB_REG) << 4 | ((self._read_register_1ubyte(self.BME680_H2_LSB_REG)) >> 4)\n calH3 = self._read_register_1sbyte(self.BME680_H3_REG)\n calH4 = self._read_register_1sbyte(self.BME680_H4_REG)\n calH5 = self._read_register_1sbyte(self.BME680_H5_REG)\n calH6 = self._read_register_1ubyte(self.BME680_H6_REG)\n calH7 = self._read_register_1sbyte(self.BME680_H7_REG)\n\n #Gas calibration.\n calGH1 = self._read_register_1sbyte(self.BME680_GH1_REG)\n calGH2 = self._read_2bytes_as_short_lsbfirst(self.BME680_GH2_LSB_REG)\n calGH3 = self._read_register_1sbyte(self.BME680_GH3_REG)\n\n #Heat calibration.\n calResHeatRange = (self._read_register_1ubyte(self.BME680_RES_HEAT_RANGE) & 0x30) / 16\n calResHeatVal = self._read_register_1sbyte(self.BME680_RES_HEAT_VAL)\n calRangeSwErr = (self._read_register_1sbyte(self.BME680_RANGE_SW_ERR) & 0xF0) / 16",
"def extract_calib_info(fname):\n\n # read in the text file\n f = open(fname, 'r')\n lines = f.readlines()\n\n # reading criteria\n k1 = 'fitting method'\n k2 = 'function evals'\n k3 = 'data points'\n k4 = 'Bayesian info crit'\n k5 = ' (' # calibrated parameters\n k6 = '(init' # calibrated parameters\n k7 = '+/-' # calibrated parameters\n k8 = ':' # calibrated parameters\n k9 = '(fixed' # calibrated parameters\n k10 = '==' # calibrated parameters\n\n # info to keep\n info = [e.split('=') if (k1 in e) else [e.split('=')[1]] if ((k2 in e) or\n (k3 in e) or (k4 in e)) else\n [(e.split(k6)[0].split(k5)[0].split(k7)[0].split(k8)[0]),\n (e.split(k6)[0].split(k5)[0].split(k7)[0].split(k8)[1]),\n e.split(k6)[0].split(k5)[0].split(k7)[1]] if (k7 in e) else\n [e.split(k6)[0].split(':')[0], e.split(k6)[0].split(':')[1], 'nan']\n if (k6 in e) else [e.split(k9)[0].split(':')[0],\n e.split(k9)[0].split(':')[1], 'nan']\n if (k9 in e) else [e.split(k10)[0].split(':')[0],\n e.split(k10)[0].split(':')[1], 'nan']\n if (k10 in e) else [''] for e in lines]\n\n # remove end lines and formatting issues\n info = [e.strip('\\n') for sub in info for e in sub if e != '']\n info = [e.replace(' ', '') if (':' in e) else e.strip() for e in info]\n\n # split into sublists containing each solver's info\n info = [list(sub) for e, sub in groupby(info, lambda x: k1 not in x) if e]\n\n return info",
"def _read_calibration_data(self):\n #Declare global variables.\n global calDig_T1\n global calDig_T2\n global calDig_T3\n global calDig_P1\n global calDig_P2\n global calDig_P3\n global calDig_P4\n global calDig_P5\n global calDig_P6\n global calDig_P7\n global calDig_P8\n global calDig_P9\n global calDig_H1\n global calDig_H2\n global calDig_H3\n global calDig_H4\n global calDig_H5\n global calDig_H6\n\n #Temperature calibration\n calDig_T1 = self._read_2bytes_as_ushort_lsbfirst(self.BME280_DIG_T1)\n calDig_T2 = self._read_2bytes_as_short_lsbfirst(self.BME280_DIG_T2)\n calDig_T3 = self._read_2bytes_as_short_lsbfirst(self.BME280_DIG_T3)\n\n #Pressure calibration\n calDig_P1 = self._read_2bytes_as_ushort_lsbfirst(self.BME280_DIG_P1)\n calDig_P2 = self._read_2bytes_as_short_lsbfirst(self.BME280_DIG_P2)\n calDig_P3 = self._read_2bytes_as_short_lsbfirst(self.BME280_DIG_P3)\n calDig_P4 = self._read_2bytes_as_short_lsbfirst(self.BME280_DIG_P4)\n calDig_P5 = self._read_2bytes_as_short_lsbfirst(self.BME280_DIG_P5)\n calDig_P6 = self._read_2bytes_as_short_lsbfirst(self.BME280_DIG_P6)\n calDig_P7 = self._read_2bytes_as_short_lsbfirst(self.BME280_DIG_P7)\n calDig_P8 = self._read_2bytes_as_short_lsbfirst(self.BME280_DIG_P8)\n calDig_P9 = self._read_2bytes_as_short_lsbfirst(self.BME280_DIG_P9)\n\n #Humidity calibration\n calDig_H1 = self._read_register_1sbyte(self.BME280_DIG_H1)\n calDig_H2 = self._read_2bytes_as_ushort_lsbfirst(self.BME280_DIG_H2)\n calDig_H3 = self._read_register_1sbyte(self.BME280_DIG_H3)\n calDig_H4 = (self._read_register_1sbyte(self.BME280_DIG_H4) << 4) | (self._read_register_1sbyte(self.BME280_DIG_H4 + 1) & 0xF)\n calDig_H5 = self._read_register_1sbyte((self.BME280_DIG_H5 + 1) << 4) | (self._read_register_1sbyte(self.BME280_DIG_H5) >> 4)\n calDig_H6 = self._read_register_1sbyte(self.BME280_DIG_H6)",
"def getCalibFromHeader(self):\n calibs = ['DARK', 'FLAT', 'PHAS', 'VISI', 'WAVE']\n fsus = ['FSUA', 'FSUB']\n channels = ['W', '1', '2', '3', '4', '5']\n try:\n self.fsu_calib = {}\n for fsu in fsus:\n for calib in calibs:\n self.fsu_calib[(fsu, calib)] = np.zeros((6,4))\n for k, chan in enumerate(channels):\n self.fsu_calib[(fsu, calib)][k,:] =\\\n self.read4num('OCS '+fsu+' K'+\\\n chan+calib)\n\n return True\n except:\n if self.verbose:\n print '*WARNING* there do not seem to be calibrations'\n return False",
"def load_sparse_matrix(self, filename):\n with open(filename) as data_file:\n data = json.load(data_file)\n values = data[\"values\"]\n print(\"JSON\")\n print(values)",
"def load_info():\n data = np.loadtxt(\"u_sol_meta.txt\", dtype=int)\n return data",
"def get_calibration_info():\n mjpeg_info_dict = redis_tools.get_dict(db,'mjpeg_info_dict')\n calibration_info = mct_introspection.get_homography_calibration_info()\n for camera in mjpeg_info_dict:\n if not camera in calibration_info:\n calibration_info[camera] = {'modified': ''}\n return calibration_info",
"def header(fpath):\n # If you want to change something, instead of overwriting a bug, add a new\n # key with the desired functionallity. This way, prior code doesn't break.\n # One can be very waste full with this function as it is fast anyways.\n\n\n ret = {}\n with open(fpath) as f:\n for line in f:\n if line[0] is not \"#\":\n break\n # Strip comment marker\n line = line[2:]\n name, value = line.split(\"=\")\n # Strip newline\n ret[name] = value[:-1]\n\n # To have some compatibility between spe veronica and viktor files,\n # we further unify some of the namings\n ret['gain'] = ret.get('Gain')\n\n exp_time = ret.get('ExposureTime [s]')\n if exp_time:\n ret['exposure_time'] = datetime.timedelta(seconds=float(exp_time))\n\n hbin = ret.get('HBin')\n if hbin:\n ret['hbin'] = {'ON': True}.get(value, False)\n\n cw = ret.get('Central-Wavelength')\n if cw:\n ret['central_wl'] = float(cw)\n\n vis_wl = ret.get('vis-Wavelength')\n if vis_wl:\n ret['vis_wl'] = float(vis_wl)\n\n syringe_pos = ret.get('Syringe Pos')\n if syringe_pos:\n ret['syringe_pos'] = int(syringe_pos)\n\n cursor = ret.get(\"Cursor\")\n if cursor:\n ret['cursor'] = tuple([int(elm) for elm in cursor.split('\\t')])\n\n x_mirror = ret.get('x-mirror')\n if x_mirror:\n ret['x_mirror'] = {'ON': True}.get(x_mirror, False)\n\n calib_coeff = ret.get('calib Coeff')\n if calib_coeff:\n ret['calib Coeff'] = tuple([float(elm) for elm in calib_coeff.split('\\t')])\n # Index 0 is actually central_wl during calibration,\n ret['calib_central_wl'] = ret['calib Coeff'][0]\n\n\n # For np.poly1d the calibration coefficents need to be in decreasing\n # order and no zero values are not allowed\n _cc = np.array(ret['calib Coeff'][1:])\n ret['calib_coeff'] = _cc[np.nonzero(_cc)][::-1]\n\n scan_start_time = ret.get('Scan Start time')\n if scan_start_time:\n ret['date'] = datetime.datetime.strptime(scan_start_time, '%d.%m.%Y %H:%M:%S')\n\n scan_stop_time = ret.get('Scan Stop time')\n if scan_stop_time:\n ret['date_stop'] = datetime.datetime.strptime(scan_stop_time, '%d.%m.%Y %H:%M:%S')\n\n timedelay = ret.get('Timedelay')\n if timedelay:\n ret['timedelay'] = np.array([int(elm) for elm in timedelay.split('\\t')])\n\n timedelay_pos= ret.get('Timedelay Pos')\n if timedelay_pos:\n ret['timedel_pos'] = np.array([int(elm) for elm in timedelay_pos.split('\\t')])\n\n return ret",
"def readSoft2Dict(softFileName,index=11):\n import gzip\n probe2Entrez = {}\n Flag = False\n if softFileName[-2:] == \"gz\":\n softHandle = gzip.open(softFileName,\"rt\")\n else:\n softHandle = open(softFileName,\"r\")\n softMatrix = softHandle.readlines()\n for line in softMatrix:\n line = line.split(\"\\t\")\n #if len(line[0]) <5 :\n # print(line[0].lower())\n if len(line) <= index:\n continue\n if Flag:\n #print(line)\n if line[0] in probe2Entrez.keys():\n probe2Entrez[line[0]].append(line)\n else:\n probe2Entrez[line[0]] = [line]\n if line[0].lower() == 'id':\n Flag = True\n multipleKeyList = []\n for key in probe2Entrez: #discard probs refer to multiple genes\n if len(probe2Entrez[key]) > 1:\n multipleKeyList.append(key)\n for key in multipleKeyList: #can't del keys of dictionary when iterating it\n del probe2Entrez[key]\n return probe2Entrez",
"def read_data():\n ADV_MAT = np.load('ADV.npy');\n ADJ_MAT = np.load('ADJ.npy');\n PR_MAT = np.load('PR.npy'); \n NN_MAT = np.load('NN.npy');\n for i in range(ADV_MAT.shape[0]):RUNNING_DATA['ADV___'+str(i)] = ADV_MAT[i];\n for i in range(ADJ_MAT.shape[0]):RUNNING_DATA['ADJ___'+str(i)] = ADJ_MAT[i];\n for i in range(PR_MAT.shape[0]):RUNNING_DATA['PR___'+str(i)] = PR_MAT[i];\n for i in range(NN_MAT.shape[0]):RUNNING_DATA['NN___'+str(i)] = NN_MAT[i];"
]
| [
"0.72249043",
"0.7208568",
"0.7208568",
"0.70669484",
"0.70456314",
"0.7041981",
"0.69114566",
"0.68411756",
"0.6807544",
"0.6807203",
"0.6558858",
"0.6471239",
"0.6442276",
"0.63747907",
"0.6323744",
"0.62730527",
"0.61834854",
"0.61613894",
"0.61245453",
"0.6097307",
"0.6078166",
"0.60493267",
"0.6040077",
"0.60049915",
"0.59834975",
"0.5942004",
"0.592525",
"0.590102",
"0.5897579",
"0.588349"
]
| 0.792746 | 0 |
Parse the model description string to a keras model builder. | def _parse_model(model: str, num_classes: int) -> Callable[[], tf.keras.Model]:
if model == 'cnn':
keras_model_builder = functools.partial(
create_conv_dropout_model, num_classes=num_classes)
elif model in ['resnet18', 'resnet34', 'resnet50', 'resnet101', 'resnet152']:
keras_model_builder = functools.partial(
getattr(resnet_models, f'create_{model}'),
input_shape=(28, 28, 1),
num_classes=num_classes)
else:
raise ValueError(
'Cannot handle model flag [{!s}], must be one of {!s}.'.format(
model, _EMNIST_MODELS))
return keras_model_builder | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def parse_model_description(model_description: str) -> ModelDescription:\n root = ET.fromstring(model_description)\n\n defaults = _get_attribute_default_values()\n\n # mandatory p.32\n fmi_version = root.get(\"fmiVersion\")\n model_name = root.get(\"modelName\")\n guid = root.get(\"guid\")\n # optional\n description = root.get(\"description\", default=\"\")\n author = root.get(\"author\", default=\"\")\n copyright = root.get(\"copyright\", default=\"\")\n version = root.get(\"version\", default=\"\")\n license = root.get(\"license\", default=\"\")\n generation_tool = root.get(\"generationTool\", default=\"\")\n generation_date_and_time = root.get(\"generationDateAndTime\", default=\"\")\n variable_naming_convention = root.get(\"variableNamingConvention\", default=\"flat\")\n numberOfEventIndicators = root.get(\"numberOfEventIndicators\", default=0)\n\n model_variables = []\n\n \"\"\" Iterate over model variables:\n <ScalarVariable name=\"real_a\" valueReference=\"0\" variability=\"continuous\" causality=\"input\">\n <Real start=\"0.0\" />\n </ScalarVariable>\n \"\"\"\n for scalarVariable in root.iter(\"ScalarVariable\"):\n\n causality = scalarVariable.get(\"causality\", default=\"local\")\n variability = scalarVariable.get(\"variability\", default=\"continuous\")\n\n initial = scalarVariable.get(\"initial\", default=None)\n # defaults of initial depend on causality and variablilty\n # the combinations lead to 5 different cases denoted A-E on p.50\n if initial is None:\n initial, _ = get_intitial_choices_and_default(causality, variability)\n\n var = list(scalarVariable)[0]\n start = var.get(\"start\", default=None)\n dataType = var.tag\n\n model_variables.append(\n ScalarVariable(\n name=scalarVariable.get(\"name\"),\n valueReference=scalarVariable.get(\"valueReference\"),\n variability=variability,\n causality=causality,\n description=scalarVariable.get(\"description\", default=\"\"),\n initial=initial,\n start=start,\n dataType=dataType,\n )\n )\n\n log_categories = []\n for category in root.iter(\"Category\"):\n log_categories.append(category.get(\"name\"))\n\n model_structure = []\n\n # cosimulation\n cosim_element = root.find(\"CoSimulation\")\n\n modelIdentifier = cosim_element.get(\"modelIdentifier\")\n needsExecutionTool = cosim_element.get(\n \"needsExecutionTool\", default=defaults[\"needsExecutionTool\"]\n )\n canHandleVariableCommunicationStepSize = cosim_element.get(\n \"canHandleVariableCommunicationStepSize\",\n default=defaults[\"canHandleVariableCommunicationStepSize\"],\n )\n canInterpolateInputs = cosim_element.get(\n \"canInterpolateInputs\", default=defaults[\"canInterpolateInputs\"]\n )\n maxOutputDerivativeOrder = cosim_element.get(\n \"maxOutputDerivativeOrder\", default=defaults[\"maxOutputDerivativeOrder\"]\n )\n canRunAsynchronuously = cosim_element.get(\n \"canRunAsynchronuously\", default=defaults[\"canRunAsynchronuously\"]\n )\n canBeInstantiatedOnlyOncePerProcess = cosim_element.get(\n \"canBeInstantiatedOnlyOncePerProcess\",\n default=defaults[\"canBeInstantiatedOnlyOncePerProcess\"],\n )\n canNotUseMemoryManagementFunctions = cosim_element.get(\n \"canNotUseMemoryManagementFunctions\",\n default=defaults[\"canNotUseMemoryManagementFunctions\"],\n )\n canGetAndSetFMUstate = cosim_element.get(\n \"canGetAndSetFMUstate\", default=defaults[\"canGetAndSetFMUstate\"]\n )\n canSerializeFMUstate = cosim_element.get(\n \"canSerializeFMUstate\", default=defaults[\"canSerializeFMUstate\"]\n )\n providesDirectionalDerivative = cosim_element.get(\n \"providesDirectionalDerivative\",\n default=defaults[\"providesDirectionalDerivative\"],\n )\n\n def xs_boolean(s):\n if s is None:\n return None\n if s in {\"false\", \"0\"}:\n return False\n elif s in {\"true\", \"1\"}:\n return True\n else:\n raise ValueError(f\"Unable to convert {s} to xsd boolean\")\n\n def xs_normalized_string(s: str):\n if s is None:\n return None\n if not s.isprintable():\n raise ValueError(r\"normalized string can not contain: \\n, \\t or \\r\")\n return s\n\n def xs_unsigned_int(s: str):\n if s is None:\n return None\n value = int(s)\n if value > 4294967295:\n raise ValueError(\"xs:unsingedInt cannot exceed the value 4294967295\")\n return value\n\n cosimulation = CoSimulation(\n modelIdentifier=modelIdentifier,\n needsExecutionTool=xs_boolean(needsExecutionTool),\n canHandleVariableCommunicationStepSize=xs_boolean(\n canHandleVariableCommunicationStepSize\n ),\n canInterpolateInputs=xs_boolean(canInterpolateInputs),\n maxOutputDerivativeOrder=xs_unsigned_int(maxOutputDerivativeOrder),\n canRunAsynchronuously=xs_boolean(canRunAsynchronuously),\n canBeInstantiatedOnlyOncePerProcess=xs_boolean(\n canBeInstantiatedOnlyOncePerProcess\n ),\n canNotUseMemoryManagementFunctions=xs_boolean(\n canNotUseMemoryManagementFunctions\n ),\n canGetAndSetFMUstate=xs_boolean(canGetAndSetFMUstate),\n canSerializeFMUstate=xs_boolean(canSerializeFMUstate),\n providesDirectionalDerivative=xs_boolean(providesDirectionalDerivative),\n )\n\n return ModelDescription(\n fmiVersion=fmi_version,\n modelName=model_name,\n guid=guid,\n author=author,\n description=description,\n version=version,\n copyright=copyright,\n logCategories=log_categories,\n license=license,\n generationTool=generation_tool,\n generationDateAndTime=generation_date_and_time,\n variableNamingConvention=variable_naming_convention,\n numberOfEventIndicators=numberOfEventIndicators,\n CoSimulation=cosimulation,\n modelVariables=model_variables,\n modelStructure=model_structure,\n )",
"def buildModel(model_name):\n if model_name == \"resnet50\":\n model = kapp.resnet50.ResNet50(weights=\"imagenet\", include_top=False)\n return model, kapp.resnet50.preprocess_input\n elif model_name == \"vgg16\":\n model = kapp.vgg16.VGG16(weights=\"imagenet\", include_top=False)\n return model, kapp.vgg16.preprocess_input\n elif model_name == 'xception':\n model = kapp.xception.Xception(weights=\"imagenet\", include_top=False)\n return model, kapp.xception.preprocess_input\n elif model_name == 'vgg19':\n model = kapp.vgg19.VGG19(weights=\"imagenet\", include_top=False)\n return model, kapp.vgg19.preprocess_input\n elif model_name == 'inceptionv3':\n model = kapp.inception_v3.InceptionV3(weights=\"imagenet\", include_top=False)\n return model, kapp.inception_v3.preprocess_input\n elif model_name == 'mobilenet':\n model = kapp.mobilenet.MobileNet(weights=\"imagenet\", include_top=False)\n return model, kapp.mobilenet.preprocess_input\n else:\n raise Exception(\"Unsupported model error\")",
"def build_model(\n model_purpose: str,\n name: str,\n init_w: str,\n input_shape: np.ndarray,\n classes: int,\n dropout_rate: np.float32,\n) -> keras.Model:\n\n if model_purpose.startswith(\"segmentation\"):\n seg_builder = sm.Seg_model_builder(name, input_shape, classes, dropout_rate)\n model = seg_builder.get_model()\n\n elif model_purpose == \"inversion\":\n reg_builder = rm.Reg_model_builder(name, input_shape, classes, init_w)\n model = reg_builder.get_model()\n\n elif model_purpose == \"pixel_concentration_retrieval\":\n model = pwrm.Unet_2(input_shape, classes)\n\n return model",
"def build_model(self):\n # model type\n self.model = Sequential()\n \n # Add embedding layer for first layer\n self.model.add(Embedding(self.embeding_matrix.shape[0], self.embeding_matrix.shape[1], input_length=self.tweet_len,\n weights=[self.embeding_matrix], name='emb'))\n # Add one dimensional convolution layer\n self.model.add(Conv1D(filters=self.params[\"filters\"] , kernel_regularizer=regularizers.l2(0.01), \n kernel_size=self.params[\"kernel_size\"], activation=self.params[\"activation\"]))\n # Add one dimensional max pooling layer\n self.model.add(MaxPooling1D(pool_size=self.params[\"MP_pool_size\"]))\n # Add flatten layer\n self.model.add(Flatten())\n # Add dense layer to predict label\n self.model.add(Dense(1, activation=self.params[\"dense_activation\"]))\n # Compile\n self.model.compile(loss=self.params[\"loss\"] , metrics=['accuracy'] , optimizer='adam')",
"def create_model_from_dict(config_dict, input_shape):\n model_dict = config_dict['model']\n if model_dict['type'].lower() == 'mlp':\n input_layer = None\n output_layer = None\n for idx in range(model_dict['num_hidden_layers']):\n if output_layer is None:\n dense = layers.Dense(\n num_units=model_dict['num_hidden_units'][idx],\n weight_decay=config_dict['weight_decay'],\n input_shape=input_shape,\n name=\"dense\" + str(idx + 1)\n )\n input_layer = dense\n else:\n dense = layers.Dense(\n num_units=model_dict['num_hidden_units'][idx],\n weight_decay=config_dict['weight_decay'],\n input_layer=output_layer,\n name=\"dense\" + str(idx + 1)\n )\n act_class = getattr(layers, str(model_dict['activation']))\n act = act_class(\n input_layer=dense,\n name=str(model_dict['activation']) + str(idx + 1)\n )\n output_layer = act\n if output_layer is None:\n dense_final = layers.Dense(\n num_units=10,\n weight_decay=config_dict['weight_decay'],\n input_shape=input_shape,\n name=\"dense_final\"\n )\n input_layer = dense_final\n else:\n dense_final = layers.Dense(\n num_units=10,\n weight_decay=config_dict['weight_decay'],\n input_layer=output_layer,\n name=\"dense_final\"\n )\n act_final = layers.Softmax(\n input_layer=dense_final,\n name=\"softmax\"\n )\n output_layer = act_final\n model = models.MLP(\n input_layer=input_layer,\n output_layer=output_layer,\n loss=str(model_dict['loss']),\n log_file=config_dict['log_file'],\n name=model_dict['name']\n )\n elif model_dict['type'].lower() == 'rmlr':\n model = models.RMLR(\n num_classes=10,\n weight_decay=config_dict['weight_decay'],\n name=model_dict['name'],\n log_file=config_dict['log_file']\n )\n else:\n print(\"No model of type \" + model_dict['type'] + \"found!\")\n model = None\n return model",
"def build_model():",
"def build_model(training_info, model, db_url=None):\n r2dt.build_model(training_info, db_url, Path(model))",
"def build_model(model_name: Text,\n model_config: archs.MobileNetConfig,\n dataset_config: Optional[dataset_factory.DatasetConfig] = None,\n ) -> tf.keras.models.Model:\n\n model_build_function = _get_model_builder().get(model_name)\n if model_build_function:\n if dataset_config:\n image_size = dataset_config.image_size\n channels = dataset_config.num_channels\n model_config.input_shape = (image_size, image_size, channels)\n model_config.num_classes = dataset_config.num_classes\n return model_build_function(config=model_config)\n else:\n raise ValueError('The model {} is not supported.'.format(model_name))",
"def build_parser():\n\n parser = argparse.ArgumentParser()\n\n parser.add_argument('-m','--model', type=str,\n dest='model_dir', required=True, \n help='path to model file', default=AUTO_CNN)\n \n parser.add_argument('-t', '--train', action='store_true',\n dest='is_train', help='Activate training flag',\n default=False)\n \n parser.add_argument('-p', '--predict', action='store_true',\n dest='is_test', help='Activate predict flag',\n default=False)\n \n parser.add_argument('-e', '--evaluate', action='store_true',\n dest='is_eval', help='Activate evaluation flag',\n default=False)\n\n parser.add_argument('-d','--data', type=str,\n dest='data', help='path to images folder (it can be train data or test data)',\n required=True)\n\n parser.add_argument('--checkpoint', type=str,\n dest='checkpoint', help='path to save the trained model',\n default=\"trained_model.h5\")\n\n parser.add_argument('--epochs', type=int,\n dest='epochs', help='num epochs',\n default=NUM_EPOCHS)\n\n parser.add_argument('--batch-size', type=int,\n dest='batch_size', help='batch size',\n default=BATCH_SIZE)\n \n parser.add_argument('--learning-rate', type=float,\n dest='learning_rate',\n help='learning rate (default %(default)s)',\n default=LEARNING_RATE)\n \n parser.add_argument('--steps-per-epoch', type=float,\n dest='steps_per_epoch',\n help='steps per epoch (default %(default)s)',\n default=SPE)\n\n parser.add_argument('--version', action='version',\n\t version='%(prog)s 1.0')\n\n return parser",
"def create_model(model_name, random_state, epoch, device, log_path, **hparams):\n model = eval(f'{model_name}')(\n **hparams, epoch=int(epoch), random_state=random_state, device=device,\n log_path=log_path\n )\n\n return model",
"def build_model():\n model = Sequential()\n model.add(Dense(beer_emb.EMB_DIM, activation=\"relu\",\n input_dim=beer_emb.EMB_DIM))\n model.add(Dropout(0.5))\n model.add(Dense(64, activation=\"relu\"))\n model.add(Dropout(0.5))\n model.add(Dense(32, activation=\"relu\"))\n model.add(Dense(1, activation='sigmoid'))\n model.compile(loss='binary_crossentropy',\n metrics=['accuracy'], optimizer='adam')\n\n return model",
"def make_model():\n m = model_class(*argv[2:-1])\n modelobj[\"model\"] = m",
"def deserialize(self, str):\n try:\n if self.model is None:\n self.model = articulation_msgs.msg.ModelMsg()\n if self.data is None:\n self.data = articulation_msgs.msg.ModelMsg()\n end = 0\n _x = self\n start = end\n end += 12\n (_x.model.header.seq, _x.model.header.stamp.secs, _x.model.header.stamp.nsecs,) = _struct_3I.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.model.header.frame_id = str[start:end].decode('utf-8')\n else:\n self.model.header.frame_id = str[start:end]\n start = end\n end += 4\n (self.model.id,) = _struct_i.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.model.name = str[start:end].decode('utf-8')\n else:\n self.model.name = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.model.params = []\n for i in range(0, length):\n val1 = articulation_msgs.msg.ParamMsg()\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.name = str[start:end].decode('utf-8')\n else:\n val1.name = str[start:end]\n _x = val1\n start = end\n end += 9\n (_x.value, _x.type,) = _struct_dB.unpack(str[start:end])\n self.model.params.append(val1)\n _x = self\n start = end\n end += 12\n (_x.model.track.header.seq, _x.model.track.header.stamp.secs, _x.model.track.header.stamp.nsecs,) = _struct_3I.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.model.track.header.frame_id = str[start:end].decode('utf-8')\n else:\n self.model.track.header.frame_id = str[start:end]\n start = end\n end += 4\n (self.model.track.id,) = _struct_i.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.model.track.pose = []\n for i in range(0, length):\n val1 = geometry_msgs.msg.Pose()\n _v15 = val1.position\n _x = _v15\n start = end\n end += 24\n (_x.x, _x.y, _x.z,) = _struct_3d.unpack(str[start:end])\n _v16 = val1.orientation\n _x = _v16\n start = end\n end += 32\n (_x.x, _x.y, _x.z, _x.w,) = _struct_4d.unpack(str[start:end])\n self.model.track.pose.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.model.track.pose_headers = []\n for i in range(0, length):\n val1 = std_msgs.msg.Header()\n start = end\n end += 4\n (val1.seq,) = _struct_I.unpack(str[start:end])\n _v17 = val1.stamp\n _x = _v17\n start = end\n end += 8\n (_x.secs, _x.nsecs,) = _struct_2I.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.frame_id = str[start:end].decode('utf-8')\n else:\n val1.frame_id = str[start:end]\n self.model.track.pose_headers.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.model.track.pose_projected = []\n for i in range(0, length):\n val1 = geometry_msgs.msg.Pose()\n _v18 = val1.position\n _x = _v18\n start = end\n end += 24\n (_x.x, _x.y, _x.z,) = _struct_3d.unpack(str[start:end])\n _v19 = val1.orientation\n _x = _v19\n start = end\n end += 32\n (_x.x, _x.y, _x.z, _x.w,) = _struct_4d.unpack(str[start:end])\n self.model.track.pose_projected.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.model.track.pose_resampled = []\n for i in range(0, length):\n val1 = geometry_msgs.msg.Pose()\n _v20 = val1.position\n _x = _v20\n start = end\n end += 24\n (_x.x, _x.y, _x.z,) = _struct_3d.unpack(str[start:end])\n _v21 = val1.orientation\n _x = _v21\n start = end\n end += 32\n (_x.x, _x.y, _x.z, _x.w,) = _struct_4d.unpack(str[start:end])\n self.model.track.pose_resampled.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sI'%length\n start = end\n end += struct.calcsize(pattern)\n self.model.track.pose_flags = struct.unpack(pattern, str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.model.track.channels = []\n for i in range(0, length):\n val1 = sensor_msgs.msg.ChannelFloat32()\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.name = str[start:end].decode('utf-8')\n else:\n val1.name = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sf'%length\n start = end\n end += struct.calcsize(pattern)\n val1.values = struct.unpack(pattern, str[start:end])\n self.model.track.channels.append(val1)\n _x = self\n start = end\n end += 12\n (_x.data.header.seq, _x.data.header.stamp.secs, _x.data.header.stamp.nsecs,) = _struct_3I.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.data.header.frame_id = str[start:end].decode('utf-8')\n else:\n self.data.header.frame_id = str[start:end]\n start = end\n end += 4\n (self.data.id,) = _struct_i.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.data.name = str[start:end].decode('utf-8')\n else:\n self.data.name = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.data.params = []\n for i in range(0, length):\n val1 = articulation_msgs.msg.ParamMsg()\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.name = str[start:end].decode('utf-8')\n else:\n val1.name = str[start:end]\n _x = val1\n start = end\n end += 9\n (_x.value, _x.type,) = _struct_dB.unpack(str[start:end])\n self.data.params.append(val1)\n _x = self\n start = end\n end += 12\n (_x.data.track.header.seq, _x.data.track.header.stamp.secs, _x.data.track.header.stamp.nsecs,) = _struct_3I.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.data.track.header.frame_id = str[start:end].decode('utf-8')\n else:\n self.data.track.header.frame_id = str[start:end]\n start = end\n end += 4\n (self.data.track.id,) = _struct_i.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.data.track.pose = []\n for i in range(0, length):\n val1 = geometry_msgs.msg.Pose()\n _v22 = val1.position\n _x = _v22\n start = end\n end += 24\n (_x.x, _x.y, _x.z,) = _struct_3d.unpack(str[start:end])\n _v23 = val1.orientation\n _x = _v23\n start = end\n end += 32\n (_x.x, _x.y, _x.z, _x.w,) = _struct_4d.unpack(str[start:end])\n self.data.track.pose.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.data.track.pose_headers = []\n for i in range(0, length):\n val1 = std_msgs.msg.Header()\n start = end\n end += 4\n (val1.seq,) = _struct_I.unpack(str[start:end])\n _v24 = val1.stamp\n _x = _v24\n start = end\n end += 8\n (_x.secs, _x.nsecs,) = _struct_2I.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.frame_id = str[start:end].decode('utf-8')\n else:\n val1.frame_id = str[start:end]\n self.data.track.pose_headers.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.data.track.pose_projected = []\n for i in range(0, length):\n val1 = geometry_msgs.msg.Pose()\n _v25 = val1.position\n _x = _v25\n start = end\n end += 24\n (_x.x, _x.y, _x.z,) = _struct_3d.unpack(str[start:end])\n _v26 = val1.orientation\n _x = _v26\n start = end\n end += 32\n (_x.x, _x.y, _x.z, _x.w,) = _struct_4d.unpack(str[start:end])\n self.data.track.pose_projected.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.data.track.pose_resampled = []\n for i in range(0, length):\n val1 = geometry_msgs.msg.Pose()\n _v27 = val1.position\n _x = _v27\n start = end\n end += 24\n (_x.x, _x.y, _x.z,) = _struct_3d.unpack(str[start:end])\n _v28 = val1.orientation\n _x = _v28\n start = end\n end += 32\n (_x.x, _x.y, _x.z, _x.w,) = _struct_4d.unpack(str[start:end])\n self.data.track.pose_resampled.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sI'%length\n start = end\n end += struct.calcsize(pattern)\n self.data.track.pose_flags = struct.unpack(pattern, str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.data.track.channels = []\n for i in range(0, length):\n val1 = sensor_msgs.msg.ChannelFloat32()\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.name = str[start:end].decode('utf-8')\n else:\n val1.name = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sf'%length\n start = end\n end += struct.calcsize(pattern)\n val1.values = struct.unpack(pattern, str[start:end])\n self.data.track.channels.append(val1)\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) #most likely buffer underfill",
"def __build_model(self) -> Sequential:\n self.__name = 'Training model'\n input_dim, *hidden_dims, output_dim = parameters.ANET_DIMENSIONS\n\n model = Sequential()\n model.add(Input(shape=(input_dim,)))\n\n for dimension in hidden_dims:\n model.add(Dense(dimension, activation=self.__activation_function))\n\n model.add(Dense(output_dim, activation=softmax))\n\n model.compile(\n optimizer=(self.__optimizer(learning_rate=self.__learning_rate) if self.__learning_rate is not None else self.__optimizer()),\n loss=self.__loss_function\n )\n model.summary()\n return model",
"def build(config, is_training=False):\n if not isinstance(config, model_pb2.Model):\n raise ValueError('The config has to be an instance of model_pb2.Model.')\n\n model = config.WhichOneof('model')\n\n if 'vse_model' == model:\n return vse_model.Model(config.vse_model, is_training)\n\n if 'advise_model' == model:\n return advise_model.Model(config.advise_model, is_training)\n\n raise ValueError('Unknown model: {}'.format(model))",
"def from_string(\n string: str, *, formatter: Optional[ModelFormatter] = None\n ) -> \"Model\":\n formatter = formatter if formatter is not None else ModelJSONFormatter()\n return formatter.parse(string)",
"def build(model_name):\n return pretrain.factory.create(model_name)",
"def build_model_from_inputs(self):\n if self.term_list is None:\n # no supplied token list -- use vocabulary of the training dataset\n # self.term_list = self.vocabulary\n # info(\"Setting bag dimension to {} from input vocabulary.\".format(len(self.term_list)))\n # will generate the vocabulary from the input\n pass\n info(f\"Building {self.name} model\")\n bagger = None\n if self.config.max_terms is not None:\n bagger = Bag(vocabulary=self.term_list, weighting=self.base_name, ngram_range=self.ngram_range, max_terms=self.config.max_terms)\n else:\n bagger = Bag(vocabulary=self.term_list, weighting=self.base_name, ngram_range=self.ngram_range)\n\n train_idx = self.indices.get_train_instances()\n texts = Text.get_strings(self.text.data.get_slice(train_idx))\n bagger.map_collection(texts, fit=True, transform=False)\n self.term_list = bagger.get_vocabulary()\n\n self.dimension = len(self.term_list)\n self.config.dimension = self.dimension",
"def build_model(input_shape, X_train, arch=\"VGG16\", loss=\"sparse_categorical_crossentropy\", learning_rate=[0.0005, 0.0001, 0.00002]):\n # select model architecture\n if arch == \"VGG16\":\n model = models.VGG16(input_shape, num_layers=num_labels)\n elif arch = \"VGG16_twist\":\n model = models.VGG16_twst(input_shape, num_layers=num_labels)\n elif arch = \"VGG11\":\n model = VGG11(input_shape, X_train, num_layers=num_labels)\n\n # learning rate constant decay\n learning_rate_fn = keras.optimizers.schedules.PiecewiseConstantDecay(\n BOUNDARIES, learning_rate)\n\n model.summary()\n # compile model\n optimiser = tf.optimizers.Adam(learning_rate=learning_rate_fn)\n model.compile(optimizer=optimiser,\n # loss=loss,\n loss=tf.keras.losses.SparseCategoricalCrossentropy(),\n metrics=[\"accuracy\"])\n return model",
"def create_model():\r\n parser = argparse.ArgumentParser()\r\n parser.add_argument('--DISC_LR', type=float, default=1e-4)\r\n parser.add_argument('--GEN_LR', type=float, default=1e-3)\r\n parser.add_argument('--GEN_BETA1', type=float, default=0.9)\r\n parser.add_argument('--GEN_BETA2', type=float, default=0.999)\r\n parser.add_argument('--IMAGE_SIZE', type=int, default=None)\r\n parser.add_argument('--QUES_SIZE', type=int, default=None)\r\n parser.add_argument('--QUES_EMBED_SIZE', type=int, default=2048)\r\n parser.add_argument('--WORD_EMBED_SIZE', type=int, default=512)\r\n parser.add_argument('--VOCAB_SIZE', type=int, default=1004)\r\n args, task_args = parser.parse_known_args()\r\n override_if_not_in_args('--max_steps', '1000', task_args)\r\n override_if_not_in_args('--batch_size', '64', task_args)\r\n override_if_not_in_args('--eval_set_size', '370', task_args)\r\n override_if_not_in_args('--eval_interval_secs', '2', task_args)\r\n override_if_not_in_args('--log_interval_secs', '2', task_args)\r\n override_if_not_in_args('--min_train_eval_rate', '2', task_args)\r\n\r\n return Model(args.DISC_LR, args.GEN_LR, args.GEN_BETA1, args.GEN_BETA2,\r\n args.IMAGE_SIZE, args.QUES_EMBED_SIZE, args.WORD_EMBED_SIZE,\r\n args.QUES_SIZE, args.VOCAB_SIZE), task_args",
"def model(self):\n i = self.keras.Input(self.s)\n\n return keras.Model(inputs=[i], outputs=self.call(i))",
"def build_eval_model_from_args(args, saved_model_data, device):\n # NOTE - this may be changed to instantiate a kind of model depending on parameters\n # FOR legacy models that did not have these attributes.\n normalize_embedding = args.normalize_embedding\n out_gaussian = args.out_gaussian\n use_history = args.feed_history\n discard_zeros = args.discard_zeros\n activation_on_output = get_output_activation(args)\n # pick the type of lstm model\n model_interaction_module_label, pooling_shape = get_interaction_module_label(args)\n if model_interaction_module_label is not None:\n # model incorporates social interactions\n interaction_module = build_interaction_module(args, model_interaction_module_label, pooling_shape)\n shape_config = build_shape_config(args, interaction_module, pooling_shape)\n if 'fields' in saved_model_data and saved_model_data['fields'] is not None:\n # uses interactions and motion fields - interaction and scene-aware\n model_data = saved_model_data['fields']\n fields = SparseMotionFields(model_data['Te'], model_data['Qe'], model_data['Bc'],\n [model_data['min'], model_data['max']], model_data['parameters'])\n model = FieldsWithInteractionModuleAndLSTM(fields, interaction_module, shape_config,\n embedding_dim=args.embedding_dim, h_dim=args.lstm_h_dim,\n activation_on_input_embedding=get_input_activation(args),\n output_gaussian=out_gaussian,\n activation_on_output=activation_on_output,\n feed_all=args.feed_all_fields,\n use_probs=args.feed_with_probabilities)\n else:\n model = LSTMWithInteractionModule(interaction_module, shape_config, embedding_dim=args.embedding_dim,\n h_dim=args.lstm_h_dim,\n activation_on_input_embedding=get_input_activation(args),\n output_gaussian=out_gaussian, use_enc_dec=args.use_enc_dec,\n activation_on_output=activation_on_output)\n elif 'fields' in saved_model_data and saved_model_data['fields'] is not None:\n model_data = saved_model_data['fields']\n fields_model = SparseMotionFields(model_data['Te'], model_data['Qe'], model_data['Bc'],\n [model_data['min'], model_data['max']], model_data['parameters'])\n if args.simple_fields:\n model = SimpleFieldsWithLSTM(fields=fields_model, embedding_dim=args.embedding_dim,\n h_dim=args.lstm_h_dim, num_layers=args.num_layers,\n activation_on_input_embedding=get_input_activation(args),\n activation_on_output=activation_on_output,\n normalize_embedding=normalize_embedding, output_gaussian=out_gaussian,\n discard_zeros=discard_zeros)\n else:\n feed_all = args.feed_all_fields\n use_probabilities = args.feed_with_probabilities\n model = FieldsWithLSTM(fields=fields_model, feed_all=feed_all or use_probabilities,\n use_probs=use_probabilities, embedding_dim=args.embedding_dim, h_dim=args.lstm_h_dim,\n num_layers=args.num_layers, activation_on_input_embedding=get_input_activation(args),\n activation_on_output=activation_on_output, normalize_embedding=normalize_embedding,\n output_gaussian=out_gaussian, discard_zeros=discard_zeros)\n # Vanilla LSTM models - no scene compliance nor social interactions\n elif hasattr(args, 'use_enc_dec') and args.use_enc_dec:\n model = VanillaLstmEncDec(args.embedding_dim, args.lstm_h_dim, num_layers=args.num_layers,\n activation_on_input_embedding=get_input_activation(args),\n activation_on_output=activation_on_output, extra_info=use_history,\n normalize_embedding=normalize_embedding, output_gaussian=out_gaussian,\n discard_zeros=discard_zeros)\n else:\n model = VanillaLSTM(args.embedding_dim, args.lstm_h_dim,\n activation_on_input_embedding=get_input_activation(args),\n activation_on_output=activation_on_output, history_on_pred=use_history,\n normalize_embedding=normalize_embedding, output_gaussian=out_gaussian,\n discard_zeros=discard_zeros)\n model.load_state_dict(saved_model_data['model_state_dict'])\n model.to(device)\n # model.eval() used to set dropout and batch normalization layers to evaluation mode before running inference.\n # Failing to do this can yield inconsistent inference results.\n model.eval()\n return model",
"def build_model(self, model_def_path: Optional[str] = None) -> 'nn.Module':\n cfg = self.cfg\n model = cfg.model.build(\n num_classes=cfg.data.num_classes,\n in_channels=cfg.data.img_channels,\n save_dir=self.modules_dir,\n hubconf_dir=model_def_path,\n img_sz=cfg.data.img_sz)\n return model",
"def get_model(name, dataset):\n field_dims = dataset.field_dims\n if name == 'lr':\n return LogisticRegressionModel(field_dims)\n elif name == 'fm':\n return FactorizationMachineModel(field_dims, embed_dim=16)\n elif name == 'hofm':\n return HighOrderFactorizationMachineModel(\n field_dims, order=3, embed_dim=16)\n elif name == 'ffm':\n return FieldAwareFactorizationMachineModel(field_dims, embed_dim=4)\n elif name == 'fnn':\n return FactorizationSupportedNeuralNetworkModel(\n field_dims, embed_dim=16, mlp_dims=(16, 16), dropout=0.2)\n elif name == 'wd':\n return WideAndDeepModel(\n field_dims, embed_dim=16, mlp_dims=(16, 16), dropout=0.2)\n elif name == 'ipnn':\n return ProductNeuralNetworkModel(\n field_dims,\n embed_dim=16,\n mlp_dims=(16, ),\n method='inner',\n dropout=0.2)\n elif name == 'opnn':\n return ProductNeuralNetworkModel(\n field_dims,\n embed_dim=16,\n mlp_dims=(16, ),\n method='outer',\n dropout=0.2)\n elif name == 'dcn':\n return DeepCrossNetworkModel(\n field_dims,\n embed_dim=16,\n num_layers=3,\n mlp_dims=(16, 16),\n dropout=0.2)\n elif name == 'nfm':\n return NeuralFactorizationMachineModel(\n field_dims, embed_dim=64, mlp_dims=(64, ), dropouts=(0.2, 0.2))\n elif name == 'ncf':\n # only supports MovieLens dataset because for other datasets user/item colums are indistinguishable\n assert isinstance(dataset, MovieLens20MDataset) or isinstance(\n dataset, MovieLens1MDataset)\n return NeuralCollaborativeFiltering(\n field_dims,\n embed_dim=16,\n mlp_dims=(16, 16),\n dropout=0.2,\n user_field_idx=dataset.user_field_idx,\n item_field_idx=dataset.item_field_idx)\n elif name == 'fnfm':\n return FieldAwareNeuralFactorizationMachineModel(\n field_dims, embed_dim=4, mlp_dims=(64, ), dropouts=(0.2, 0.2))\n elif name == 'dfm':\n return DeepFactorizationMachineModel(\n field_dims, embed_dim=16, mlp_dims=(16, 16), dropout=0.2)\n elif name == 'xdfm':\n return ExtremeDeepFactorizationMachineModel(\n field_dims,\n embed_dim=16,\n cross_layer_sizes=(16, 16),\n split_half=False,\n mlp_dims=(16, 16),\n dropout=0.2)\n elif name == 'afm':\n return AttentionalFactorizationMachineModel(\n field_dims, embed_dim=16, attn_size=16, dropouts=(0.2, 0.2))\n elif name == 'afi':\n return AutomaticFeatureInteractionModel(\n field_dims,\n embed_dim=16,\n atten_embed_dim=64,\n num_heads=2,\n num_layers=3,\n mlp_dims=(400, 400),\n dropouts=(0, 0, 0))\n elif name == 'afn':\n print('Model:AFN')\n return AdaptiveFactorizationNetwork(\n field_dims,\n embed_dim=16,\n LNN_dim=1500,\n mlp_dims=(400, 400, 400),\n dropouts=(0, 0, 0))\n else:\n raise ValueError('unknown model name: ' + name)",
"def build_model(language_id, model_type):\n\n # getting the language code from it's id\n language_code = get_language_code(language_id)\n\n # getting the model name from it's type\n model_name = get_model_name(model_type)\n\n # getting the training data\n training_data_path = \"%s/%s/\" % (training_base_path, language_code)\n training_data_handles = get_training_data_handles(training_data_path)\n\n # computing the model data depending on the selected model type\n model_data = compute_model_data(training_data_handles, model_type)\n\n # building the model's full path\n model_path = \"%s/%s/%s.txt\" % (models_base_path, language_code, model_name)\n\n # writing the model data to file\n with codecs.open(model_path, 'w') as f:\n f.write(model_data)",
"def _create_string_input_trainable_model():\n\n class BlockWithStringInputs(onnxblock.ForwardBlock):\n def __init__(self):\n super().__init__()\n self.cast = onnxblock.blocks.Cast(to=onnx.TensorProto.FLOAT)\n self.linear = onnxblock.blocks.Linear(4, 2)\n\n def build(self, string_input):\n return self.linear(self.cast(string_input))\n\n string_block = BlockWithStringInputs()\n with onnxblock.empty_base() as model_accessor:\n model_accessor.model.graph.input.extend(\n [\n onnx.helper.make_tensor_value_info(\"input\", onnx.TensorProto.STRING, [1, 4]),\n ]\n )\n _ = string_block(\"input\")\n\n return string_block.to_model_proto()",
"def create_model(model_type: str, input_size: Tuple[int, ...], output='binary', summary=True) -> keras.Model:\n if model_type in MODELS:\n inputs, outputs = MODELS[model_type](input_size)\n else:\n raise ValueError(f'Model type \"{model_type}\" not supported')\n\n if output == 'binary':\n activation = layers.Dense(1, activation='sigmoid')(outputs)\n loss = 'binary_crossentropy'\n elif output == 'multi':\n activation = layers.Dense(5, activation='softmax')(outputs)\n loss = 'categorical_crossentropy'\n else:\n raise ValueError(f'Method \"{output}\" not supported')\n\n model = keras.Model(inputs=inputs, outputs=activation)\n model.compile(optimizer='adam', loss=loss, metrics=['acc', keras.metrics.AUC(name='auc')])\n\n if summary:\n model.summary()\n\n return model",
"def read_model(self):\n \n # words dictionary\n f = open(self.name + \"_words\", 'r') \n d_str = f.read()\n f.close()\n \n d = dict(eval(d_str))\n self.words = d\n\n # word_lengths dictionary\n f = open(self.name + \"_word_lengths\", 'r') \n d_str = f.read()\n f.close()\n \n d = dict(eval(d_str))\n self.word_lengths = d\n\n # stems dictionary\n f = open(self.name + \"_stems\", 'r') \n d_str = f.read()\n f.close()\n \n d = dict(eval(d_str))\n self.stems = d\n\n # sentence_lengths dictionary\n f = open(self.name + \"_sentence_lengths\", 'r') \n d_str = f.read()\n f.close()\n \n d = dict(eval(d_str))\n self.sentence_lengths = d\n\n # ten most common words\n f = open(self.name + \"_common_word\", 'r') \n d_str = f.read()\n f.close()\n \n d = list(eval(d_str))\n self.common_word = d",
"def build_model(self):\n self.model = Sequential()\n # print self.layers[0].identifier\n # print self.layers[0].parameters\n for layer in self.layers:\n # print layer.identifier\n # print layer.parameters\n self.model.add(layer.toKerasFn())\n\n\n # super(SequentialModelWrapper, self).compile(optimizer=self.optimizer.toKerasFn(),\n # loss=self.loss,\n # metrics=self.metrics)\n self.model.compile(optimizer=self.optimizer.toKerasFn(),\n loss=self.loss,\n metrics=self.metrics)",
"def _create_model(self):\n config = {\n \"input_features\": self.input_features,\n \"output_features\": self.output_features,\n \"combiner\": {\"type\": \"concat\", \"output_size\": 14},\n TRAINER: {\"epochs\": 2, BATCH_SIZE: 128},\n }\n return LudwigModel(config, logging_level=logging.WARN)"
]
| [
"0.6326366",
"0.6091344",
"0.5876037",
"0.56606436",
"0.5658026",
"0.5654233",
"0.562259",
"0.5619373",
"0.56061447",
"0.55622476",
"0.55564666",
"0.5539365",
"0.55305934",
"0.5526856",
"0.5487436",
"0.5468888",
"0.5461886",
"0.5455671",
"0.54494077",
"0.54338133",
"0.5423484",
"0.5415493",
"0.5397152",
"0.53927183",
"0.5390631",
"0.5381706",
"0.5375373",
"0.5367795",
"0.535878",
"0.5349477"
]
| 0.6771992 | 0 |
Load (unsplitted) EMNIST(like) clientdata from sql database. | def load_custom_emnist_client_data(sql_database: str) -> ClientData:
if sql_database is None:
raise ValueError('sql_database cannot be None.')
return sql_client_data_utils.load_parsed_sql_client_data(
sql_database, element_spec=_ELEMENT_SPEC) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def load_custom_cifar_client_data(sql_database: str) -> ClientData:\n\n if sql_database is None:\n raise ValueError('sql_database cannot be None.')\n\n return sql_client_data_utils.load_parsed_sql_client_data(\n sql_database, element_spec=_ELEMENT_SPEC)",
"def load_data(client):\n codes = [\"DUB\", \"LHR\", \"ETC\", \"XXX\"]\n q = generateMultiInsertQuery(codes, \"Airport\")\n #print(json.dumps(q.json(), indent=4))\n q.execute(client)",
"def load_from_mysql(self):\n\n self._logger.info('Reading data from MySQL database')\n\n # open the database connection\n data = mysql_data.database(self.dataConfig['user'],\n self.dataConfig['password'],\n self.dataConfig['host'],\n self.dataConfig['database'],\n self.dataConfig['port'])\n\n # ---------------------------------------------------\n # determine if it's stations or client\n sta = self.stations\n\n c = None\n stable = None\n if 'client' in self.dataConfig.keys():\n c = self.dataConfig['client']\n stable = self.dataConfig['station_table']\n\n # Determine what table for the metadata\n mtable = self.dataConfig['metadata']\n\n # Raise an error if neither stations or client provided\n if (sta is None) & (c is None):\n raise Exception('Error in configuration file for [mysql],'\n ' must specify either \"stations\" or \"client\"')\n\n self._logger.debug('Loading metadata from table %s' % mtable)\n\n # ---------------------------------------------------\n # load the metadata\n self.metadata = data.metadata(mtable, station_ids=sta,\n client=c, station_table=stable)\n\n self._logger.debug('%i stations loaded' % self.metadata.shape[0])\n\n # ---------------------------------------------------\n # get a list of the stations\n station_ids = self.metadata.index.tolist()\n\n # get the correct column names if specified, along with variable names\n db_var_names = [val for key, val in self.dataConfig.items()\n if key not in self.db_config_vars]\n variables = [x for x in self.dataConfig.keys()\n if x not in self.db_config_vars]\n\n # get the data\n\n dp = data.get_data(self.dataConfig['data_table'], station_ids,\n self.start_date, self.end_date, db_var_names)\n\n # go through and extract the data\n for v in variables:\n setattr(self, v, dp[self.dataConfig[v]])",
"def load_partitioned_custom_cifar_client_data(\n sql_database: str,\n *, # Caller passes below args by name.\n unpart_clients_proportion: float,\n train_val_ratio_intra_client: int,\n part_clients_subsampling_rate: float,\n include_unpart_train_for_val: bool,\n max_elements_per_client: int,\n seed: Optional[int] = None,\n) -> Tuple[ClientData, ClientData, ClientData]:\n\n total_cd = load_custom_cifar_client_data(sql_database)\n\n (part_train_cd, part_val_cd,\n unpart_cd) = client_data_utils.canonical_three_way_partition_client_data(\n total_cd,\n unpart_clients_proportion=unpart_clients_proportion,\n train_val_ratio_intra_client=train_val_ratio_intra_client,\n part_clients_subsampling_rate=part_clients_subsampling_rate,\n include_unpart_train_for_val=include_unpart_train_for_val,\n max_elements_per_client=max_elements_per_client,\n shuffle_buffer_size=100,\n seed=seed)\n\n return part_train_cd, part_val_cd, unpart_cd",
"def read_database():\n file = tables.open_file(glob.datafile)\n table_d = file.root.VelibData.dynamic\n table_s = file.root.VelibData.static\n n_rows = len(table_d)\n print \"Nrows in dynamic table:\", n_rows\n print \"N stations:\", len(table_d[0][\"last_update\"])\n print \"Time of most recent sampling:\", \\\n time.asctime(time.localtime(recover_time(table_d[-1][\"sample_time\"])))\n print \"Nbikes available at most recent sampling:\", \\\n table_d[n_rows-1][\"available_bikes\"]\n print \"Time of last_update at most recent sampling:\", \\\n time.asctime(\n time.localtime(recover_time(table_d[n_rows-1][\"last_update\"][0])))\n print \"Number arr\", table_s[0][\"number\"]\n file.close()",
"def load_data(database_filepath):\n \n engine = create_engine('sqlite:///'+database_filepath)\n \n #reading table created in ETL part into pandas dataframe\n df = pd.read_sql_table('DisasterResponse',engine)\n \n #setting target variables X and Y\n X = df['message'].astype(str)\n Y = df.drop(['id', 'message', 'original','genre'],axis=1)\n \n return X,Y,list(Y.columns)",
"def load_data(database_filepath):\n engine = create_engine('sqlite:///' + database_filepath)\n df = pd.read_sql_table('Disasters', engine)\n X = df['message']\n Y = df.drop(['id', 'message', 'original', 'genre'], axis=1)\n category_names = Y.columns\n return X, Y, category_names",
"def load_data_sql(): \r\n conn = mysql.connect(**st.secrets[\"mysql\"])\r\n\r\n data = pd.read_sql('SELECT * FROM song_data', conn)\r\n lookup_table = pd.read_sql('SELECT * FROM lookup_table', conn)\r\n \r\n return data, lookup_table",
"def get_training_data(db_conn):\n return pd.read_sql('''select * from churn_model.churn_data;''', db_conn)",
"def load_data(database_path):\n\n try:\n engine = create_engine(f\"sqlite:///{database_path}\")\n df = pd.read_sql_table(table_name=\"disaster_data\".lower(), con=engine)\n\n # seperate features and targets\n X = pd.DataFrame(df.loc[:,'message'])\n y = df.iloc[:,3:]\n category_names = y.columns.tolist()\n\n return X, y, category_names\n except:\n raise Exception(\"Could not load data.\")",
"def _init_dataset():\n global _residues\n if _residues is not None:\n # Database is already initialized\n return\n\n # Residuue data is taken from\n # ftp://ftp.wwpdb.org/pub/pdb/data/monomers/components.cif\n # (2019/01/27)\n _info_dir = dirname(realpath(__file__))\n with open(join(_info_dir, \"residues.msgpack\"), \"rb\") as file:\n _residues = msgpack.unpack(\n file, use_list=False, raw=False\n )",
"def load_data(database_filepath):\n conn = create_engine(f\"sqlite:///{database_filepath}\")\n df = pd.read_sql('DisasterResponse', con=conn)\n X = df['message']\n Y = df.iloc[:, 4:]\n category_names = Y.columns.tolist()\n return X, Y, category_names",
"def load_data(database_filepath):\n\n # load data from database\n engine = sqlalchemy.create_engine('sqlite:///' + database_filepath)\n df = pd.read_sql_table('cleandata', engine)\n # Ignoring 'original' text because translation is available in message.\n X = df[['message', 'genre']]\n # child_alone is always 0 in our data set. Thus remove the column.\n y = df.drop(columns=['id', 'message', 'original', 'genre', 'child_alone'])\n\n return X, y, y.columns",
"def load_renter_data():\n return pd.read_sql_query(_sql_query, _con)",
"def load_data(database_filepath):\n # load data from database\n engine = sqlalchemy.create_engine('sqlite:///' + database_filepath)\n df = pd.read_sql_table(\"disaster_clean_data\", con=engine)\n X = df['message']\n Y = df.drop(columns=['id', 'message', 'original', 'genre'])\n category_names = list(df.columns[4:])\n return X, Y, category_names",
"def load_data(db_filepath):\n # load data from database\n engine = create_engine('sqlite:///' + db_filepath)\n\n inspector = inspect(engine)\n # Get table information\n print('tables in the db {}'.format(inspector.get_table_names()))\n\n df = pd.read_sql(\"SELECT * FROM Messages \", engine)\n\n # create X and Y datasets\n X = df['message']\n Y = df.drop(['id','message','original','genre'],axis=1)\n\n # create a list of cat names\n category_names = list(Y.columns.values)\n\n return X, Y, category_names",
"def load_data(database_filepath):\n\n engine = create_engine('sqlite:///{}'.format(database_filepath))\n df = pd.read_sql_table('messages_cleaned', engine)\n X = df.message \n Y = df.drop(columns=['message', 'original', 'genre'])\n category_names = list(Y.columns.values)\n return X, Y, category_names",
"def load_data(database_filepath):\n engine_name = 'sqlite:///' + database_filepath\n engine = create_engine(engine_name)\n df =pd.read_sql(\"SELECT * FROM messages_table\", engine)\n X = df['message']\n Y = df.drop(['id','message','original','genre'], axis=1)\n return X, Y",
"def load_data(database_filepath):\n\n engine = create_engine('sqlite:///{}'.format(database_filepath))\n df = pd.read_sql_table('messages', con= engine)\n\n X = df.message\n y = df.iloc[:, 4:]\n col_names = y.columns\n return X, y, col_names",
"def loadDB(self,dbfilename):\n \n db=[]\n with open(dbfilename,'r',encoding='ISO-8859-1') as dbfilename:\n dbreader= csv.reader(dbfilename,delimiter=self.sDelimiter )\n for lFields in dbreader:\n db.append(lFields)\n\n return db",
"def loadbatch():\n s=\"select * from tblbatch where status='1'\"\n c.execute(s)\n data=c.fetchall()\n return data",
"def load_data(database_filepath):\n engine = create_engine('sqlite:///' + database_filepath)\n df = pd.read_sql_table('Messages', engine)\n X = df['message']\n Y = df.drop(['id', 'message', 'original', 'genre'], axis=1)\n category_names = Y.columns\n return X, Y, category_names",
"def load_data(database_filepath):\n engine = create_engine('sqlite:///'+ database_filepath)\n df = pd.read_sql(\"SELECT * FROM DisasterResponse\", engine)\n #exclude colums that are not needed in model\n col=[i for i in df.columns if i not in ['id','original', 'genre']]\n X = df[\"message\"]\n Y = df.iloc[:,4:]\n #global category_names\n category_names = Y.columns\n return X, Y, category_names",
"def load_data(connection, insert_sql, data):\n cur = connection.cursor()\n for d in data:\n cur.execute(insert_sql, d)\n connection.commit()",
"def load_data(database_filepath):\n\n logging.info(\"run load_data\")\n\n # create engine and connect to file based-database\n engine = create_engine(f\"sqlite:///{database_filepath}\")\n\n # load data in dataframe\n df = pd.read_sql_table('data', engine)\n logging.info(f\"data retrieved from db file: {database_filepath}\")\n\n # shuffle data\n df = df.sample(frac=1, random_state=42)\n\n # split data between input and outputs\n X = df['message']\n Y = df.drop(['id', 'message', 'original', 'genre'], axis=1)\n\n return X, Y, Y.columns",
"def load_data(database_filepath):\n\n # create engine to get connection\n engine = create_engine(\"sqlite:///{}\".format(database_filepath))\n with engine.connect() as connection:\n df = pd.read_sql_table(\"appen\", connection)\n # define the features and output of the model\n X = df[\"message\"]\n Y = df.drop([\"message\", \"id\", \"original\", \"genre\"], axis=1)\n category_names = list(Y.columns)\n\n return X, Y, category_names",
"def load_data(database_filepath):\n \n engine = create_engine('sqlite:///' + database_filepath)\n df = pd.read_sql_table('df',engine)\n X = df['message']\n y = df.drop(columns=['id','message','original','genre'], axis=1)\n category_names = y.columns\n return X, y, category_names",
"def load_data(database_filepath):\n \n engine = create_engine('sqlite:///data/DisasterResponse.db')\n df = pd.read_sql_query('select * from cleanDF', engine)\n X = df['message'].values\n Y = df.iloc[:,5:]\n category_names = Y.columns\n\n return X, Y, category_names",
"def _feed_field_blobs_from_db_file(self, net):\n assert os.path.exists(self.db_path), \\\n 'db_path [{db_path}] does not exist'.format(db_path=self.db_path)\n net.Load(\n [],\n self.ds.get_blobs(),\n db=self.db_path,\n db_type=self.db_type,\n absolute_path=True,\n source_blob_names=self.ds.field_names(),\n )",
"def load_datasets_from_database(test_run_id):\n logging.info('Loading datasets from database for: {}'.format(test_run_id))\n mbtest_run = MBTestDatabaseService().get_run(test_run_id)\n datasets = mbtest_run.get('datasets')\n logging.info('type(Documents): %r', type(datasets))\n logging.info('Documents: %s', str(datasets)[:200])\n\n if isinstance(datasets, basestring):\n documents = yaml.load(datasets)\n # documents should be seq of dicts -- if str then legacy format\n if isinstance(documents, basestring):\n logging.info('Loading datasets from legacy CSV format')\n reader = csv.reader(StringIO(datasets), delimiter=';')\n documents = mbtest_data_csv_to_yaml_format(reader)\n else:\n pass # datasets is seq of dict\n\n return documents"
]
| [
"0.62082094",
"0.60316384",
"0.5761554",
"0.57444966",
"0.5691772",
"0.5639299",
"0.5636918",
"0.5616825",
"0.5611436",
"0.5602569",
"0.55277586",
"0.5519456",
"0.5478773",
"0.543038",
"0.5419184",
"0.54132307",
"0.5410894",
"0.5410576",
"0.5405717",
"0.5401371",
"0.5387977",
"0.5375727",
"0.53459567",
"0.5339261",
"0.53305656",
"0.53297514",
"0.5318445",
"0.5314617",
"0.5305259",
"0.5303695"
]
| 0.702729 | 0 |
Create a preprocessing function for EMNIST client datasets. | def _create_preprocess_fn(
num_epochs: int,
batch_size: int,
merge_case: bool,
shuffle_buffer_size: int = emnist_dataset.MAX_CLIENT_DATASET_SIZE,
use_cache: bool = True,
use_prefetch: bool = True,
) -> Callable[[tf.data.Dataset], tf.data.Dataset]:
@tf.function
def merge_mapping(elem):
original_label_to_merged_label = tf.constant([
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37,
12, 38, 39, 40, 41, 42, 18, 19, 20, 21, 22, 43, 24, 25, 44, 45, 28, 46,
30, 31, 32, 33, 34, 35
])
return collections.OrderedDict(
label=original_label_to_merged_label[elem['label']],
pixels=elem['pixels'])
base_preprocess_fn = emnist_dataset.create_preprocess_fn(
num_epochs=num_epochs,
batch_size=batch_size,
shuffle_buffer_size=shuffle_buffer_size)
def preprocess_fn(dataset: tf.data.Dataset):
if merge_case:
dataset = dataset.map(merge_mapping)
if use_cache:
dataset = dataset.cache()
dataset = base_preprocess_fn(dataset)
if use_prefetch:
dataset = dataset.prefetch(tf.data.AUTOTUNE)
return dataset
return preprocess_fn # pytype: disable=bad-return-type | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_preprocess_fn(**preprocessing_kwargs):\n\n def _preprocess_fn(data):\n \"\"\"The preprocessing function that is returned.\"\"\"\n\n # Validate input\n if not isinstance(data, dict) or 'image' not in data:\n raise ValueError('Argument `data` must be a dictionary, '\n 'not %s' % str(type(data)))\n\n # Apply all the individual steps in sequence.\n image = data['image']\n image = decode_image(image)\n image = normalize_value_range(image)\n image = get_multiscale_patches(image, **preprocessing_kwargs)\n\n data['image'] = image\n return data\n\n return _preprocess_fn",
"def _create_preprocess_fn(\n num_epochs: int,\n batch_size: int,\n shuffle_buffer_size: int = _NUM_EXAMPLES_PER_CLIENT,\n use_cache: bool = True,\n use_prefetch: bool = True,\n) -> Callable[[tf.data.Dataset], tf.data.Dataset]:\n base_preprocess_fn = cifar100_dataset.create_preprocess_fn(\n num_epochs=num_epochs,\n batch_size=batch_size,\n crop_shape=_CROP_SHAPE,\n distort_image=_DISTORT_TRAIN_IMAGES,\n # Set buffer to 1 to disable shuffling since is not necessary for eval.\n shuffle_buffer_size=shuffle_buffer_size)\n\n def preprocess_fn(dataset: tf.data.Dataset):\n if use_cache:\n dataset = dataset.cache()\n dataset = base_preprocess_fn(dataset)\n if use_prefetch:\n dataset = dataset.prefetch(tf.data.AUTOTUNE)\n\n return dataset\n\n return preprocess_fn",
"def preprocess_train_dataset(dataset):\n return (dataset\n # Shuffle according to the largest client dataset\n .shuffle(buffer_size=MAX_CLIENT_DATASET_SIZE)\n # Repeat to do multiple local epochs\n .repeat(CLIENT_EPOCHS_PER_ROUND)\n # Batch to a fixed client batch size\n .batch(CLIENT_BATCH_SIZE, drop_remainder=False)\n # Preprocessing step\n .map(reshape_emnist_element))",
"def _get_data_preprocessing_fns(self):\n # Create new functions with partial positional arguments assigned\n process_path_fn = \\\n partial(data_preprocessing.process_path,\n one_hot=self.ONE_HOT,\n num_classes=self._NUM_CLASSES,\n class_names=self._CLASS_NAMES)\n process_img_path_fn = data_preprocessing.process_img_path\n convert_format_fn = \\\n partial(data_preprocessing.convert_format,\n grayscale_in=self._GRAYSCALE_IN,\n grayscale_out=self._GRAYSCALE_OUT)\n random_rotate_fn = \\\n partial(data_preprocessing.random_rotate,\n stddev=self._ROTATE_STDDEV)\n random_zoom_fn = \\\n partial(data_preprocessing.random_zoom,\n max_percent=self._ZOOM_MAX_PERCENT,\n stddev=self._ZOOM_STDDEV,\n img_height=self._HEIGHT,\n img_width=self._WIDTH)\n resize_fn = \\\n partial(data_preprocessing.resize,\n height=self._HEIGHT,\n width=self._WIDTH)\n\n funcs = edict({'process_path': process_path_fn,\n 'process_img_path': process_img_path_fn,\n 'convert_format': convert_format_fn,\n 'random_rotate': random_rotate_fn,\n 'random_zoom': random_zoom_fn,\n 'resize': resize_fn})\n\n return funcs",
"def run_preprocessing(self, serie):\n pass",
"def preprocess(data):\n raise NotImplementedError",
"def get_preprocess_fn(is_training, is_pretrain):\n # Disable test cropping for small images (e.g. CIFAR)\n if FLAGS.image_size <= 32:\n test_crop = False\n else:\n test_crop = True\n color_jitter_strength = FLAGS.color_jitter_strength if is_pretrain else 0.\n return functools.partial(\n data_util.preprocess_image,\n height=FLAGS.image_size,\n width=FLAGS.image_size,\n is_training=is_training,\n color_jitter_strength=color_jitter_strength,\n test_crop=test_crop)",
"def client_dataset_preprocess_fn(\n batch_size: int = 1,\n num_local_epochs: int = 1,\n use_example_weight: bool = True\n) -> Callable[[tf.data.Dataset], tf.data.Dataset]:\n if num_local_epochs < 1:\n raise ValueError(\"num_epochs must be a positive integer.\")\n\n def preprocess_fn(dataset):\n d = dataset.shuffle(_SHUFFLE_BUFFER_SIZE)\n d = d.map(\n functools.partial(\n decode_example, use_example_weight=use_example_weight),\n num_parallel_calls=tf.data.experimental.AUTOTUNE)\n d = d.repeat(num_local_epochs)\n d = d.batch(batch_size, drop_remainder=True)\n d = d.prefetch(_PREFETCH_BUFFER_SIZE)\n return d\n\n return preprocess_fn",
"def _build_preprocessing(self):\n\n # For now, do nothing\n pass",
"def _build_tracking_preprocessing(input_shape):\n\n def preprocessing(input_img, **kwargs):\n\n to_normalize = False if np.percentile(input_img, 98) > 1.0 else True\n\n if len(input_img.shape) == 4:\n print(\n \"Only preprocessing single image, we will consider the first one of the batch\"\n )\n image = input_img[0] * 255.0 if to_normalize else input_img[0] * 1.0\n else:\n image = input_img * 255.0 if to_normalize else input_img * 1.0\n\n image = cv2.resize(image, input_shape)\n x, _ = transform_test(mx.nd.array(image), min(input_shape))\n return x\n\n return preprocessing",
"def preprocess():\n # Load the data\n random.seed(77)\n X,y = make_classification(n_samples=500, n_features=30, n_informative=8, n_redundant=2, \n n_repeated=0, n_classes=3, n_clusters_per_class=2, weights=None, \n flip_y=0.01, class_sep=1.0, hypercube=True, shift=0.0, scale=1.0, \n shuffle=True, random_state=None)\n\n x_train, x_val, y_train, y_val = train_test_split(X, y, random_state=0, test_size=0.25)\n\n # Standardize the data\n scaler = StandardScaler()\n X_train = scaler.fit_transform(x_train)\n X_val = scaler.transform(x_val)\n\n \n return X_train,y_train,X_val,y_val",
"def _preprocess_fn(data):\n\n # Validate input\n if not isinstance(data, dict) or 'image' not in data:\n raise ValueError('Argument `data` must be a dictionary, '\n 'not %s' % str(type(data)))\n\n # Apply all the individual steps in sequence.\n image = data['image']\n image = decode_image(image)\n image = normalize_value_range(image)\n image = get_multiscale_patches(image, **preprocessing_kwargs)\n\n data['image'] = image\n return data",
"def preprocess(self):",
"def preprocess(function, **kwargs):\n\n print kwargs, \"preprocess\"\n return function(**kwargs)",
"def preprocess(config: Config) -> None:\n print(colored(\"preprocessing:\", attrs=[\"bold\"]))\n factory = PreprocessingFactory()\n factory.process(config)",
"def preprocess_func(cls, func):\n pass",
"def preprocessing(train_raw_, test_raw_):\n \n undef = np.float64(-999.0)\n pred_dict = {'s':'1','b':'0', '?':'-1'}\n # drop 1st column (Id) and also 1st row with column names (\"[1:,\") \n train_raw = train_raw_[1:, :]\n test_raw = test_raw_[1:, :] \n \n # Change s(signal) and b(background) for s:1 and b:0, and change '?' for -1\n train_raw[:,1] = np.vectorize(pred_dict.get)(train_raw[:,1].astype(str))\n test_raw[:,1] = np.vectorize(pred_dict.get)(test_raw[:,1].astype(str))\n \n # Divide the dataset in four according to PRI_jet_num feature and cast to float\n train_data_jets = divide_dataset_by_jet(train_raw)\n test_data_jets = divide_dataset_by_jet(test_raw)\n \n # Remove columns with nan values or with standard deviation of 0\n test_data_jets, train_data_jets = clean_features(test_data_jets, train_data_jets, undef)\n \n # Standardize train and test sets to have mean=0 and std=1\n train_data_jets, test_data_jets = standardize(train_data_jets, test_data_jets)\n \n # Replace remaining undefined values by mean, median or zero\n train_data_mean, train_data_median, train_data_null = replace_nan(train_data_jets)\n test_data_mean, test_data_median, test_data_null = replace_nan(test_data_jets)\n \n return train_data_mean, train_data_median, train_data_null, test_data_mean, test_data_median, test_data_null",
"def pre_processing_single(dataset_file_list, pre_process_paras, type='csv'):\r\n # parameters\r\n take_log = pre_process_paras['take_log']\r\n scaling = pre_process_paras['scaling']\r\n dataset_list = []\r\n data_file = dataset_file_list\r\n\r\n if type == 'csv':\r\n dataset = read_csv(data_file, take_log)\r\n elif type == 'txt':\r\n dataset = read_txt(data_file, take_log)\r\n\r\n dataset['gene_exp'] = dataset['gene_exp'].astype(np.float)\r\n\r\n if scaling: # scale to [0,1]\r\n minmax_scale(dataset['gene_exp'], feature_range=(0, 1), axis=1, copy=False)\r\n\r\n\r\n dataset_list.append(dataset)\r\n return dataset_list",
"def preprocess_func(cls, func):\n return func",
"def preprocess(dataset):\n preped_dataset = {}\n preped_dataset['c'] = preprocess_set(dataset['c'])\n preped_dataset['r'] = preprocess_set(dataset['r'])\n preped_dataset['y'] = dataset['y']\n return preped_dataset",
"def preprocessing_input(self, df, prepro_kwargs=None):\n if prepro_kwargs is None:\n assert hasattr(self,\n 'experiment'), 'Set up the prepro_kwargs argument'\n if 'feature_config' in self.experiment.keys():\n prepro_kwargs = self.experiment.feature_config.validation_augmentation\n elif self.experiment['data_config'] is not None:\n prepro_kwargs = self.experiment.data_config.validation_augmentation\n\n prepro = MetaDataGenerator(**prepro_kwargs)\n return prepro.standardize(df)",
"def preprocessing_fn(inputs):\n outputs = {}\n\n # This function is the entry point for your feature engineering with\n # TensorFlow Transform, using the TFX Transform component. In this example\n # the feature engineering is very simple, only applying z-score scaling.\n for key in Features.FEATURE_KEYS:\n outputs[transformed_name(key)] = tft.scale_to_z_score(inputs[key])\n\n # inputs[key]\n\n # tft.scale_to_z_score(inputs[key])\n\n # Do not apply label transformation as it will result in wrong evaluation.\n outputs[transformed_name(\n Features.LABEL_KEY)] = inputs[Features.LABEL_KEY]\n\n return outputs",
"def pre_train(self, dataset, **kwargs):\n\n pass",
"def call_preprocessing(self, train_mains, train_appliances):\n return train_mains, train_appliances",
"def _make_train_input_fn(is_classification):\n\n def _input_fn():\n features = dict(FEATURES_DICT)\n if is_classification:\n labels = CLASSIFICATION_LABELS\n else:\n labels = REGRESSION_LABELS\n return features, labels\n\n return _input_fn",
"def data_preproc(\n image: str,\n entrypoint: str,\n output_path: str,\n input_path: str = \"http://cs231n.stanford.edu/tiny-imagenet-200.zip\",\n input_md5: str = \"90528d7ca1a48142e341f4ef8d21d0de\",\n env: Optional[Dict[str, str]] = None,\n resource: Optional[str] = None,\n name: str = \"datapreproc\",\n) -> specs.AppDef:\n\n env = env or {}\n args = [\n \"--input_path\",\n input_path,\n \"--input_md5\",\n input_md5,\n \"--output_path\",\n output_path,\n ]\n if resource:\n resource_def = specs.named_resources[resource]\n else:\n resource_def = specs.Resource(cpu=1, gpu=0, memMB=1024)\n\n return binary_component(\n name=\"datapreproc_role\",\n entrypoint=entrypoint,\n args=args,\n env=env,\n image=image,\n resource=resource_def,\n )",
"def funcPreprocessing(fncptrain=clsTrain(), fncptest=clsTest(clsTrain), fncpTopFeatures=10):\r\n\r\n UtilityFunc = clsDataFrameUtilityFunctions()\r\n\r\n # fncptrain.COLUMNSTODROP = COLUMNSTODROP\r\n print('Columns to Drop')\r\n print(fncptrain.COLUMNSTODROP)\r\n\r\n # Feature Selection\r\n # addColumnsToDrop\r\n\r\n # addColumnsToDrop = ['f_QuantityBins', 'f_InvoiceDatePartOfDay', 'f_InvoiceDateMonthStart']\r\n # fncptrain.COLUMNSTODROP.extend(addColumnsToDrop)\r\n\r\n dictFuncTrainPreprocessing = {'BasicCheck': BasicCheck.funcTrainBasicChecksAndFilters,\r\n 'FeatureQuantity': FeatureQuantity.funcTrainQuantityFeatureProcessing,\r\n 'FeatureInvoiceDate': FeatureInvoiceDate.funcInvoiceDateDateFeatureExtraction,\r\n 'FeatureStockCode': FeatureStockCode.funcStockCodeFeatureProcessing,\r\n 'FeatureStockCodeLength': FeatureStockCode.funcStockCodeLength,\r\n 'FeatureStockCodeProductReturned': FeatureStockCode.funcTrainStockCodeProductReturned,\r\n 'FeatureCustomerID': FeatureCustomerID.funcCustomerIDFeatureProcessing,\r\n 'FeatureInvoiceNo': FeatureInvoiceNo.funcInvoiceNoFeatureProcessing,\r\n 'FeatureMultiColumnAggregation': CategoryPreprocessing.funcMultiColumnAggregation,\r\n # 'CountryIDAggregation': CountryID.funcCountryIDFeatureProcessing,\r\n 'CountryID': CountryID.CountryIDBucket,\r\n 'DroppingColumns': fncptrain.COLUMNSTODROP\r\n }\r\n # Train Set\r\n fncptrain = UtilityFunc.funcCustomPipeLine(fncptrain, dictFuncTrainPreprocessing)\r\n\r\n # Test Set\r\n dictFuncTestPreprocessing = dictFuncTrainPreprocessing.copy()\r\n if 'BasicCheck' in dictFuncTestPreprocessing.keys():\r\n dictFuncTestPreprocessing.pop('BasicCheck')\r\n if 'FeatureStockCodeProductReturned' in dictFuncTestPreprocessing.keys():\r\n dictFuncTestPreprocessing['FeatureStockCodeProductReturned'] = FeatureStockCode.funcTestStockCodeProductReturned\r\n if 'FeatureQuantity' in dictFuncTestPreprocessing.keys():\r\n dictFuncTestPreprocessing['FeatureQuantity'] = FeatureQuantity.funcTestQuantityFeatureProcessing\r\n if 'DroppingColumns' in dictFuncTrainPreprocessing.keys():\r\n dictFuncTestPreprocessing['DroppingColumns'] = fncptest.clsTrainData.COLUMNSTODROP\r\n fncptest = UtilityFunc.funcCustomPipeLine(fncptest, dictFuncTestPreprocessing)\r\n\r\n print('\\nPerforming Top X category OHE on Description column')\r\n fncptrain = CategoryPreprocessing.funcTopFeatures(fncptrain, 'Description', fncpTopXFeatures=5)\r\n fncptest = CategoryPreprocessing.funcTopFeatures(fncptest, 'Description', fncpTopXFeatures=5)\r\n\r\n print('\\nPerforming One Hot Encoding on Train DataFrame and Test DataFrame')\r\n fncptrain, fncptest = CategoryPreprocessing.funcOHE(fncptrain, fncptest)\r\n\r\n print('\\nPerforming Min Max Normalization on Train DataFrame and Test DataFrame')\r\n fncptrain, fncptest = NumericalPreprocessing.MinMaxNormalizing(fncptrain, fncptest)\r\n\r\n # print('\\nPerforming Polynomial Transformation on Train DataFrame and Test DataFrame')\r\n # fncptrain, fncptest = NumericalPreprocessing.PolynomialTransformation(fncptrain, fncptest, fncpPolynomialInteger=2)\r\n\r\n print('\\nPerforming Feature Selection on Train DataFrame and Test DataFrame')\r\n featureSelection = clsRegressionFeatureSelection(fncptrain, fncptest, fncpTopFeatures)\r\n featureSelection.funcFeatureSelectionUsingFRegression()\r\n\r\n # print('\\nPerforming Power Transformation on the Target Variable')\r\n # fncptrain = TargetPreprocessing.TargetTransformation(fncptrain)\r\n\r\n return fncptrain, fncptest, dictFuncTrainPreprocessing, dictFuncTestPreprocessing",
"def preprocess_main():",
"def preprocess_dataset(self, dataset, params=None):\n if params is None:\n assert self.params_loaded, (\n \"You must either provide parameters or load the model params before preprocessing.\")\n params = self.params\n for key in dataset.keys():\n if dataset[key] is None:\n continue\n if hasattr(params, \"whiten_data\") and params.whiten_data:\n if hasattr(params, \"whiten_method\"):\n if params.whiten_method == \"FT\": # other methods require patching first\n if hasattr(params, \"whiten_batch_size\"):\n batch_size = params.whiten_batch_size\n else:\n batch_size = None\n dataset[key].images, dataset[key].data_mean, dataset[key].w_filter = \\\n dp.whiten_data_batch(dataset[key].images, method=params.whiten_method,\n batch_size=batch_size)\n print(\"INFO:preprocessing:FT Whitened \"+key+\" data\")\n if hasattr(params, \"lpf_data\") and params.lpf_data:\n dataset[key].images, dataset[key].data_mean, dataset[key].lpf_filter = \\\n dp.lpf_data(dataset[key].images, cutoff=params.lpf_cutoff)\n print(\"INFO:preprocessing:Low pass filtered \"+key+\" data\")\n if hasattr(params, \"contrast_normalize\") and params.contrast_normalize:\n if hasattr(params, \"gauss_patch_size\"):\n dataset[key].images = dp.contrast_normalize(dataset[key].images,\n params.gauss_patch_size)\n else:\n dataset[key].images = dp.contrast_normalize(dataset[key].images)\n print(\"INFO:preprocessing:Contrast normalized \"+key+\" data\")\n if hasattr(params, \"standardize_data\") and params.standardize_data:\n if params.data_type == \"mnist\":\n eps = 1e-5\n else:\n eps = None\n dataset[key].images, dataset[key].data_mean, dataset[key].data_std = \\\n dp.standardize_data(dataset[key].images, eps)\n self.data_mean = dataset[key].data_mean\n self.data_std = dataset[key].data_std\n print(\"INFO:preprocessing:Standardized \"+key+\" data\")\n if hasattr(params, \"extract_patches\") and params.extract_patches:\n assert all(key in params.__dict__.keys()\n for key in [\"num_patches\", \"patch_edge_size\", \"overlapping_patches\",\n \"randomize_patches\"]), (\"Insufficient params for patches.\")\n out_shape = (int(params.num_patches), int(params.patch_edge_size),\n int(params.patch_edge_size), dataset[key].num_channels)\n dataset[key].num_examples = out_shape[0]\n dataset[key].reset_counters()\n if hasattr(params, \"patch_variance_threshold\"):\n dataset[key].images = dp.extract_patches(dataset[key].images, out_shape,\n params.overlapping_patches, params.randomize_patches,\n params.patch_variance_threshold, dataset[key].rand_state)\n else:\n dataset[key].images = dp.extract_patches(dataset[key].images, out_shape,\n params.overlapping_patches, params.randomize_patches,\n var_thresh=0, rand_state=dataset[key].rand_state)\n dataset[key].shape = dataset[key].images.shape\n dataset[key].num_rows = dataset[key].shape[1]\n dataset[key].num_cols = dataset[key].shape[2]\n dataset[key].num_channels = dataset[key].shape[3]\n dataset[key].num_pixels = np.prod(dataset[key].shape[1:])\n print(\"INFO:preprocessing:Extracted patches from \"+key+\" data\")\n if hasattr(params, \"whiten_data\") and params.whiten_data:\n if hasattr(params, \"whiten_method\") and params.whiten_method != \"FT\":\n dataset[key].images, dataset[key].data_mean, dataset[key].w_filter = \\\n dp.whiten_data(dataset[key].images, method=params.whiten_method)\n print(\"INFO:preprocessing:Whitened \"+key+\" data\")\n if hasattr(params, \"norm_data\") and params.norm_data:\n dataset[key].images, dataset[key].data_max = dp.normalize_data_with_max(dataset[key].images)\n self.data_max = dataset[key].data_max\n print(\"INFO:preprocessing:Normalized \"+key+\" data with maximum\")\n if hasattr(params, \"rescale_data\") and params.rescale_data:\n dataset[key].images, dataset[key].data_min, dataset[key].data_max = dp.rescale_data_to_one(dataset[key].images)\n self.data_max = dataset[key].data_max\n self.data_min = dataset[key].data_min\n print(\"INFO:preprocessing:Rescaled each \"+key+\" datapoint to one\")\n if hasattr(params, \"center_data\") and params.center_data:\n dataset[key].images, dataset[key].data_mean = dp.center_data(dataset[key].images,\n use_dataset_mean=True)\n self.data_mean = dataset[key].data_mean\n print(\"INFO:preprocessing:Centered \"+key+\" data\")\n return dataset",
"def build_preprocess_fn(vocab,\n so_nwp_sequence_length=20,\n batch_size=128,\n so_nwp_num_oov_buckets=1,\n debug=False):\n\n def preprocess_fn(org_dataset):\n to_ids = build_to_ids_fn(\n vocab=vocab,\n max_sequence_length=so_nwp_sequence_length,\n num_oov_buckets=so_nwp_num_oov_buckets)\n dataset = org_dataset.map(\n to_ids, num_parallel_calls=tf.data.experimental.AUTOTUNE)\n id_dataset = org_dataset.map(\n lambda x: x['client_id'],\n num_parallel_calls=tf.data.experimental.AUTOTUNE)\n dataset = dataset.padded_batch(\n batch_size, padded_shapes=[so_nwp_sequence_length + 1])\n dataset = dataset.map(\n split_input_target, num_parallel_calls=tf.data.experimental.AUTOTUNE)\n\n dataset = tf.data.Dataset.zip((dataset, id_dataset.batch(batch_size)))\n\n def _reorder_id(x, idx):\n return (x[0], idx), x[1]\n\n dataset = dataset.map(_reorder_id)\n\n if debug:\n logging.info('Test run ...')\n return dataset.take(100)\n else:\n return dataset\n\n return preprocess_fn"
]
| [
"0.6911369",
"0.6907225",
"0.67146105",
"0.64246845",
"0.63995093",
"0.638671",
"0.63772017",
"0.63574916",
"0.62886465",
"0.6257666",
"0.62195",
"0.60264665",
"0.60262877",
"0.60111964",
"0.60054433",
"0.5993835",
"0.59816",
"0.59801525",
"0.59755504",
"0.5967095",
"0.5960033",
"0.59577596",
"0.5938251",
"0.5922912",
"0.592131",
"0.592024",
"0.5884401",
"0.5874425",
"0.5845579",
"0.5833333"
]
| 0.7066413 | 0 |
Configuring federated runner spec. | def build_federated_runner_spec(self) -> training_specs.RunnerSpecFederated:
task_spec = self._task_spec
train_preprocess_fn = _create_preprocess_fn(
num_epochs=task_spec.client_epochs_per_round,
batch_size=task_spec.client_batch_size,
merge_case=self._merge_case,
use_cache=True,
use_prefetch=True)
part_train_cd = self._part_train_cd_raw.preprocess(train_preprocess_fn)
iterative_process = task_spec.iterative_process_builder(
self._tff_model_builder)
training_process = tff.simulation.compose_dataset_computation_with_iterative_process(
part_train_cd.dataset_computation, iterative_process)
client_ids_fn = functools.partial(
tff.simulation.build_uniform_sampling_fn(
part_train_cd.client_ids,
replace=False,
random_seed=task_spec.shared_random_seed),
size=task_spec.train_clients_per_round)
# We convert the output to a list (instead of an np.ndarray) so that it can
# be used as input to the iterative process.
client_sampling_fn = lambda x: list(client_ids_fn(x))
training_process.get_model_weights = iterative_process.get_model_weights
(part_train_eval_fn, part_val_fn, unpart_fn,
_) = trainer_utils.create_federated_eval_fns(
tff_model_builder=self._tff_model_builder,
metrics_builder=functools.partial(
_metrics_builder_generic, tff_training=False),
part_train_eval_cd=self._part_train_eval_cd,
part_val_cd=self._part_val_cd,
unpart_cd=self._unpart_cd,
test_cd=None,
stat_fns=eval_metric_distribution.ALL_STAT_FNS,
rounds_per_eval=task_spec.rounds_per_eval,
part_clients_per_eval=task_spec.part_clients_per_eval,
unpart_clients_per_eval=task_spec.unpart_clients_per_eval,
test_clients_for_eval=task_spec.test_clients_for_eval,
resample_eval_clients=task_spec.resample_eval_clients,
eval_clients_random_seed=task_spec.shared_random_seed)
return training_specs.RunnerSpecFederated(
iterative_process=training_process,
client_datasets_fn=client_sampling_fn,
part_train_eval_fn=part_train_eval_fn,
part_val_fn=part_val_fn,
unpart_fn=unpart_fn,
test_fn=None) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def build_federated_runner_spec(self) -> training_specs.RunnerSpecFederated:\n task_spec = self._task_spec\n\n train_preprocess_fn = _create_preprocess_fn(\n num_epochs=task_spec.client_epochs_per_round,\n batch_size=task_spec.client_batch_size,\n use_cache=True,\n use_prefetch=True)\n part_train_cd = self._part_train_cd_raw.preprocess(train_preprocess_fn)\n\n iterative_process = task_spec.iterative_process_builder(\n self._tff_model_builder)\n training_process = tff.simulation.compose_dataset_computation_with_iterative_process(\n part_train_cd.dataset_computation, iterative_process)\n\n client_ids_fn = functools.partial(\n tff.simulation.build_uniform_sampling_fn(\n part_train_cd.client_ids,\n replace=False,\n random_seed=task_spec.shared_random_seed),\n size=task_spec.train_clients_per_round)\n\n # We convert the output to a list (instead of an np.ndarray) so that it can\n # be used as input to the iterative process.\n\n client_sampling_fn = lambda x: list(client_ids_fn(x))\n training_process.get_model_weights = iterative_process.get_model_weights\n\n (part_train_eval_fn, part_val_fn, unpart_fn,\n _) = trainer_utils.create_federated_eval_fns(\n tff_model_builder=self._tff_model_builder,\n metrics_builder=functools.partial(\n _metrics_builder_generic, tff_training=False),\n part_train_eval_cd=self._part_train_eval_cd,\n part_val_cd=self._part_val_cd,\n unpart_cd=self._unpart_cd,\n test_cd=None,\n stat_fns=eval_metric_distribution.ALL_STAT_FNS,\n rounds_per_eval=task_spec.rounds_per_eval,\n part_clients_per_eval=task_spec.part_clients_per_eval,\n unpart_clients_per_eval=task_spec.unpart_clients_per_eval,\n test_clients_for_eval=task_spec.test_clients_for_eval,\n resample_eval_clients=task_spec.resample_eval_clients,\n eval_clients_random_seed=task_spec.shared_random_seed)\n\n return training_specs.RunnerSpecFederated(\n iterative_process=training_process,\n client_datasets_fn=client_sampling_fn,\n part_train_eval_fn=part_train_eval_fn,\n part_val_fn=part_val_fn,\n unpart_fn=unpart_fn,\n test_fn=None)",
"def configure_training_federated(\n task_spec: training_specs.TaskSpecFederated,\n *, # Caller passes below args by name.\n model: str = 'resnet18',\n only_digits: bool = False,\n merge_case: bool = False,\n) -> training_specs.RunnerSpecFederated:\n return _EmnistCharacterTask(\n task_spec,\n model=model,\n only_digits=only_digits,\n merge_case=merge_case,\n ).build_federated_runner_spec()",
"def configure_test(self, test, config_json):\n pass",
"def setup(self, run, run_id):\n\n raise NotImplementedError",
"def test_configurator(self):\n runner = Runner(YamlManifest(manifest))\n run1 = runner.run(JobOptions(resource=\"test1\"))\n assert not run1.unexpectedAbort, run1.unexpectedAbort.getStackTrace()\n assert len(run1.workDone) == 1, run1.workDone\n result = list(run1.workDone.values())[0].result\n self.assertEqual(result.outputs, {\"fact1\": \"test1\", \"fact2\": \"test\"})\n self.assertEqual(result.result.get(\"stdout\"), sys.executable)\n assert run1.status == Status.ok, run1.summary()",
"def setUp(self):\n os.environ[\"PADDLE_TRAINERS_NUM\"] = \"2\"\n os.environ[\n \"PADDLE_PSERVERS_IP_PORT_LIST\"\n ] = \"127.0.0.1:36001,127.0.0.2:36001\"",
"def configure(self):\n if Config().is_edge_server():\n logging.info(\"Configuring edge server #%d as a %s server.\",\n Config().args.id,\n Config().algorithm.type)\n logging.info(\"Training with %s local aggregation rounds.\",\n Config().algorithm.local_rounds)\n\n if hasattr(Config().server, 'do_test'):\n if not Config().clients.do_test or Config().server.do_test:\n datasource = datasources_registry.get()\n self.testset = datasource.get_test_set()\n\n self.load_trainer()\n\n if hasattr(Config(), 'results'):\n result_dir = Config().result_dir\n result_csv_file = f'{result_dir}/result_{Config().args.id}.csv'\n csv_processor.initialize_csv(result_csv_file,\n self.recorded_items, result_dir)\n\n else:\n super().configure()\n\n if hasattr(Config().server, 'do_test'):\n if Config().clients.do_test and Config().server.do_test:\n datasource = datasources_registry.get()\n self.testset = datasource.get_test_set()",
"def setUpConfig(self):\n pass",
"def _configureActorFactory(self):\n raise NotImplementedError",
"def runner_setup():\n runner = ClassicRunner()\n yield runner",
"def setUp(self):\n self.cfg_path = \"acloud_unittest.config\"\n file_write = open(self.cfg_path, 'w')\n file_write.write(_CreateCfgFile().strip())\n file_write.close()\n self.gcp_env_runner = gcp_setup_runner.GcpTaskRunner(self.cfg_path)\n self.gcloud_runner = gcp_setup_runner.GoogleSDKBins(\"\")",
"def fixture_runner():\n return CliRunner()",
"def initialize(self, config, runner_service):\n self.config = config\n self.runner_service = runner_service",
"def override_config(self):\n super(AuthedConfigFixture, self).override_config()\n self.conf.register_opts(auth_token._OPTS, group='keystone_authtoken')\n self.conf.set_override('auth_uri', 'http://127.0.0.1:35357',\n group='keystone_authtoken')",
"def setup_with_endpoint(self):\n args = self.get_args()\n config = self.create_config_file()\n self.write_config_file(config, args)\n execute_tool(args, test_mode=True)\n\n mac = '00:11:22:33:33:33'\n ip = '3.4.3.4'\n self.assertFalse(self.verify_remote_site_has_entry(mac, ip, 'intersite-testsuite',\n 'l3out', 'intersite-testsuite-app-epg1'))\n\n time.sleep(2)\n self.add_endpoint(mac, ip, 'intersite-testsuite', 'app', 'epg1')\n return mac, ip",
"def setUp(self):\n super().setUp()\n self.runner = CliRunner()",
"def pytest_configure(config):\n config.addinivalue_line(\n \"markers\",\n \"serial: Tests that will not execute with more than 1 MPI process\")\n config.addinivalue_line(\"markers\",\n \"gpu: Tests that should only run on the gpu.\")\n config.addinivalue_line(\n \"markers\",\n \"cupy_optional: tests that should pass with and without CuPy.\")\n config.addinivalue_line(\"markers\", \"cpu: Tests that only run on the CPU.\")\n config.addinivalue_line(\"markers\", \"gpu: Tests that only run on the GPU.\")",
"def configure_training_federated(\n task_spec: training_specs.TaskSpecFederated,\n *, # Caller passes below args by name.\n resnet_layers: int = 18,\n num_classes: int = 100,\n l2_weight_decay: float = 1e-4,\n) -> training_specs.RunnerSpecFederated:\n\n return _Cifar100ImageTask(\n task_spec,\n resnet_layers=resnet_layers,\n num_classes=num_classes,\n l2_weight_decay=l2_weight_decay).build_federated_runner_spec()",
"def runner_setup():\n concurrent_sessions = 5\n runner = VisualGridRunner(concurrent_sessions)\n yield runner",
"def setup_method(self):\n self.hass = get_test_home_assistant()\n\n self.config = {\n ip.DOMAIN: {\n \"platform\": \"microsoft_face_identify\",\n \"source\": {\"entity_id\": \"camera.demo_camera\", \"name\": \"test local\"},\n \"group\": \"Test Group1\",\n },\n \"camera\": {\"platform\": \"demo\"},\n mf.DOMAIN: {\"api_key\": \"12345678abcdef6\"},\n }\n\n self.endpoint_url = f\"https://westus.{mf.FACE_API_URL}\"",
"def config(self, **kw):\n self.cfg_fixture.config(**kw)",
"def _config_set(self):\n p = self._params\n self._config = tf.estimator.RunConfig(save_checkpoints_steps = p.save_checkpoints_steps,\n keep_checkpoint_max = p.keep_checkpoint_max,\n save_summary_steps = p.save_summary_steps\n )",
"def pytest_configure_node(node: Node):\n node.workerinput[\"options\"] = { # type: ignore\n \"dist\": node.config.option.dist, # type: ignore\n \"numprocesses\": node.config.option.numprocesses, # type: ignore\n }",
"def configure(self, args):\n pass",
"def configure(args):\n print('Configures HPC fleet with given name \"{}\"'.format(args))",
"def configure_specie(self, specie):\r\n pass",
"def setUp(self):\n self.server_address = \"http://localhost:3030/$/\"\n self.request_address = \"http://localhost:3030/ds\"\n self.api = \"http://localhost:4032/\"\n self.version = \"0.2\"",
"def setUp(self):\n _, instance_path, shared_inputs = sys.argv\n app = lnt.server.ui.app.App.create_standalone(instance_path)\n app.testing = True\n self.client = app.test_client()\n self.shared_inputs = shared_inputs",
"def launch(config):\n \n launch_with_configs([config])",
"def test_mnist():\n env = os.environ.copy()\n if not \"CUDA_VISIBLE_DEVICES\" in env:\n env[\"CUDA_VISIBLE_DEVICES\"] = \"\"\n subprocess.run(\n \"edflow -b template_tfe/config.yaml -t --max_batcher_per_epoch --num_epochs 1\",\n shell=True,\n check=True,\n env=env,\n )"
]
| [
"0.66514647",
"0.65894425",
"0.60085166",
"0.59400094",
"0.5837415",
"0.5833026",
"0.5818086",
"0.57228833",
"0.57128286",
"0.56866115",
"0.56454456",
"0.56344074",
"0.5603153",
"0.5552686",
"0.5537055",
"0.55300355",
"0.55255353",
"0.5486335",
"0.5477406",
"0.5473784",
"0.54659367",
"0.5458545",
"0.5450977",
"0.5448006",
"0.5447769",
"0.5438111",
"0.54367614",
"0.5427238",
"0.5426032",
"0.5416082"
]
| 0.66257405 | 1 |
Configures federated training for the EMNIST character recognition task. This method will load and preprocess datasets and construct a model used for the task. It then uses `iterative_process_builder` to create an iterative process compatible with `tff.simulation.run_training_process`. | def configure_training_federated(
task_spec: training_specs.TaskSpecFederated,
*, # Caller passes below args by name.
model: str = 'resnet18',
only_digits: bool = False,
merge_case: bool = False,
) -> training_specs.RunnerSpecFederated:
return _EmnistCharacterTask(
task_spec,
model=model,
only_digits=only_digits,
merge_case=merge_case,
).build_federated_runner_spec() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _train_model(\n self,\n dataset: DatasetEntity,\n ):\n logger.info(\"init data cfg.\")\n self._data_cfg = ConfigDict(data=ConfigDict())\n\n for cfg_key, subset in zip(\n [\"train\", \"val\", \"unlabeled\"],\n [Subset.TRAINING, Subset.VALIDATION, Subset.UNLABELED],\n ):\n subset = get_dataset(dataset, subset)\n if subset and self._data_cfg is not None:\n self._data_cfg.data[cfg_key] = ConfigDict(\n otx_dataset=subset,\n labels=self._labels,\n )\n\n self._is_training = True\n\n self._init_task()\n\n cfg = self.configure(True, None)\n logger.info(\"train!\")\n\n timestamp = time.strftime(\"%Y%m%d_%H%M%S\", time.localtime())\n\n # Environment\n logger.info(f\"cfg.gpu_ids = {cfg.gpu_ids}, distributed = {cfg.distributed}\")\n env_info_dict = collect_env()\n env_info = \"\\n\".join([(f\"{k}: {v}\") for k, v in env_info_dict.items()])\n dash_line = \"-\" * 60 + \"\\n\"\n logger.info(f\"Environment info:\\n{dash_line}{env_info}\\n{dash_line}\")\n\n # Data\n datasets = [build_dataset(cfg.data.train)]\n\n if self._train_type == TrainType.Semisupervised:\n # forward the knowledge of num iters per epoch to model for filter loss\n bs_per_gpu = cfg.data.train_dataloader[\"samples_per_gpu\"]\n actual_bs = bs_per_gpu * torch.distributed.get_world_size() if cfg.distributed else bs_per_gpu\n cfg.model.num_iters_per_epoch = math.ceil(len(datasets[0]) / actual_bs)\n\n # FIXME: Currently segmentor does not support multi batch evaluation.\n # For the Self-SL case, there is no val data. So, need to check the\n\n if \"val\" in cfg.data and \"val_dataloader\" in cfg.data:\n cfg.data.val_dataloader[\"samples_per_gpu\"] = 1\n\n # Target classes\n if \"task_adapt\" in cfg:\n target_classes = cfg.task_adapt.final\n else:\n target_classes = datasets[0].CLASSES\n\n # Metadata\n meta = dict()\n meta[\"env_info\"] = env_info\n meta[\"seed\"] = cfg.seed\n meta[\"exp_name\"] = cfg.work_dir\n if cfg.checkpoint_config is not None:\n cfg.checkpoint_config.meta = dict(\n mmseg_version=__version__ + get_git_hash()[:7],\n CLASSES=target_classes,\n )\n\n # Model\n model = self.build_model(cfg, fp16=cfg.get(\"fp16\", False), is_training=self._is_training)\n model.train()\n model.CLASSES = target_classes\n\n if cfg.distributed:\n convert_sync_batchnorm(model)\n\n validate = bool(cfg.data.get(\"val\", None))\n\n if self._hyperparams.learning_parameters.auto_adapt_batch_size != BatchSizeAdaptType.NONE:\n train_func = partial(train_segmentor, meta=deepcopy(meta), model=deepcopy(model), distributed=False)\n adapt_batch_size(\n train_func,\n cfg,\n datasets,\n isinstance(self, NNCFBaseTask), # nncf needs eval hooks\n not_increase=(self._hyperparams.learning_parameters.auto_adapt_batch_size == BatchSizeAdaptType.SAFE),\n )\n\n train_segmentor(\n model,\n datasets,\n cfg,\n distributed=cfg.distributed,\n validate=validate,\n timestamp=timestamp,\n meta=meta,\n )\n\n # Save outputs\n output_ckpt_path = os.path.join(cfg.work_dir, \"latest.pth\")\n best_ckpt_path = glob.glob(os.path.join(cfg.work_dir, \"best_mDice_*.pth\"))\n if len(best_ckpt_path) > 0:\n output_ckpt_path = best_ckpt_path[0]\n best_ckpt_path = glob.glob(os.path.join(cfg.work_dir, \"best_mIoU_*.pth\"))\n if len(best_ckpt_path) > 0:\n output_ckpt_path = best_ckpt_path[0]\n return dict(\n final_ckpt=output_ckpt_path,\n )",
"def _train_model(self):\n self.experiment = EpisodicExperiment(self.task, self.agent)\n n_epochs = int(self.rl_params.n_training_episodes / self.rl_params.n_episodes_per_epoch)\n logger.debug(\"Fitting user model over {} epochs, each {} episodes, total {} episodes.\"\n .format(n_epochs, self.rl_params.n_episodes_per_epoch, n_epochs*self.rl_params.n_episodes_per_epoch))\n for i in range(n_epochs):\n logger.debug(\"RL epoch {}\".format(i))\n self.experiment.doEpisodes(self.rl_params.n_episodes_per_epoch)\n self.agent.learn()\n self.agent.reset() # reset buffers",
"def build_federated_runner_spec(self) -> training_specs.RunnerSpecFederated:\n task_spec = self._task_spec\n\n train_preprocess_fn = _create_preprocess_fn(\n num_epochs=task_spec.client_epochs_per_round,\n batch_size=task_spec.client_batch_size,\n use_cache=True,\n use_prefetch=True)\n part_train_cd = self._part_train_cd_raw.preprocess(train_preprocess_fn)\n\n iterative_process = task_spec.iterative_process_builder(\n self._tff_model_builder)\n training_process = tff.simulation.compose_dataset_computation_with_iterative_process(\n part_train_cd.dataset_computation, iterative_process)\n\n client_ids_fn = functools.partial(\n tff.simulation.build_uniform_sampling_fn(\n part_train_cd.client_ids,\n replace=False,\n random_seed=task_spec.shared_random_seed),\n size=task_spec.train_clients_per_round)\n\n # We convert the output to a list (instead of an np.ndarray) so that it can\n # be used as input to the iterative process.\n\n client_sampling_fn = lambda x: list(client_ids_fn(x))\n training_process.get_model_weights = iterative_process.get_model_weights\n\n (part_train_eval_fn, part_val_fn, unpart_fn,\n _) = trainer_utils.create_federated_eval_fns(\n tff_model_builder=self._tff_model_builder,\n metrics_builder=functools.partial(\n _metrics_builder_generic, tff_training=False),\n part_train_eval_cd=self._part_train_eval_cd,\n part_val_cd=self._part_val_cd,\n unpart_cd=self._unpart_cd,\n test_cd=None,\n stat_fns=eval_metric_distribution.ALL_STAT_FNS,\n rounds_per_eval=task_spec.rounds_per_eval,\n part_clients_per_eval=task_spec.part_clients_per_eval,\n unpart_clients_per_eval=task_spec.unpart_clients_per_eval,\n test_clients_for_eval=task_spec.test_clients_for_eval,\n resample_eval_clients=task_spec.resample_eval_clients,\n eval_clients_random_seed=task_spec.shared_random_seed)\n\n return training_specs.RunnerSpecFederated(\n iterative_process=training_process,\n client_datasets_fn=client_sampling_fn,\n part_train_eval_fn=part_train_eval_fn,\n part_val_fn=part_val_fn,\n unpart_fn=unpart_fn,\n test_fn=None)",
"def init_processes(cfg, local_rank, dataset, fn, backend='nccl'):\n addr = \"localhost\"\n port = cfg.training.master_port\n os.environ['MASTER_ADDR'] = addr\n os.environ['MASTER_PORT'] = str(port)\n dist.init_process_group(backend, rank=0 + local_rank,\n world_size=cfg.training.gpus)\n\n device = torch.device(\"cuda:{}\".format(local_rank))\n\n fn(cfg, local_rank, device, corpus_path=dataset)",
"def build_federated_runner_spec(self) -> training_specs.RunnerSpecFederated:\n\n task_spec = self._task_spec\n\n train_preprocess_fn = _create_preprocess_fn(\n num_epochs=task_spec.client_epochs_per_round,\n batch_size=task_spec.client_batch_size,\n merge_case=self._merge_case,\n use_cache=True,\n use_prefetch=True)\n part_train_cd = self._part_train_cd_raw.preprocess(train_preprocess_fn)\n iterative_process = task_spec.iterative_process_builder(\n self._tff_model_builder)\n training_process = tff.simulation.compose_dataset_computation_with_iterative_process(\n part_train_cd.dataset_computation, iterative_process)\n client_ids_fn = functools.partial(\n tff.simulation.build_uniform_sampling_fn(\n part_train_cd.client_ids,\n replace=False,\n random_seed=task_spec.shared_random_seed),\n size=task_spec.train_clients_per_round)\n\n # We convert the output to a list (instead of an np.ndarray) so that it can\n # be used as input to the iterative process.\n client_sampling_fn = lambda x: list(client_ids_fn(x))\n training_process.get_model_weights = iterative_process.get_model_weights\n\n (part_train_eval_fn, part_val_fn, unpart_fn,\n _) = trainer_utils.create_federated_eval_fns(\n tff_model_builder=self._tff_model_builder,\n metrics_builder=functools.partial(\n _metrics_builder_generic, tff_training=False),\n part_train_eval_cd=self._part_train_eval_cd,\n part_val_cd=self._part_val_cd,\n unpart_cd=self._unpart_cd,\n test_cd=None,\n stat_fns=eval_metric_distribution.ALL_STAT_FNS,\n rounds_per_eval=task_spec.rounds_per_eval,\n part_clients_per_eval=task_spec.part_clients_per_eval,\n unpart_clients_per_eval=task_spec.unpart_clients_per_eval,\n test_clients_for_eval=task_spec.test_clients_for_eval,\n resample_eval_clients=task_spec.resample_eval_clients,\n eval_clients_random_seed=task_spec.shared_random_seed)\n\n return training_specs.RunnerSpecFederated(\n iterative_process=training_process,\n client_datasets_fn=client_sampling_fn,\n part_train_eval_fn=part_train_eval_fn,\n part_val_fn=part_val_fn,\n unpart_fn=unpart_fn,\n test_fn=None)",
"def train_setup(additional_arg_parser=None, args=None):\n if args is None:\n args = parse_input_arguments(additional_arg_parser)\n if args.do_eval or args.do_test:\n args.load_pretrained = True\n if args.load_pretrained and args.pretrained_checkpoint == '':\n raise ValueError('Must provide --pretrained_checkpoint when using --load_pretrained')\n if args.eval_batch_size == 0:\n args.eval_batch_size = args.train_batch_size\n if args.load_pretrained:\n args.save_dir = \"/\".join(args.pretrained_checkpoint.split('/')[:-1])\n else:\n args.save_dir = get_save_dir(args.save_dir, args.run_name)\n if not os.path.exists(args.save_dir):\n os.makedirs(args.save_dir)\n args.start_epoch = 0\n args.start_step = 0\n\n split_name = 'train' if args.do_train else 'validation' if args.do_eval else 'test'\n logger = get_logger(args.save_dir, 'log_train')\n\n logger.info(\"local_rank: %d, node_index: %d, gpu_per_node: %d\"%(args.local_rank, args.node_index, args.gpu_per_node))\n # Setup CUDA, GPU & distributed training\n if args.local_rank == -1 or args.no_cuda:\n device = torch.device(\"cuda\" if torch.cuda.is_available() and not args.no_cuda else \"cpu\")\n args.n_gpu = torch.cuda.device_count()\n else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs\n torch.cuda.set_device(args.local_rank)\n device = torch.device(\"cuda\", args.local_rank)\n torch.distributed.init_process_group(backend='nccl')\n args.local_rank += args.node_index * args.gpu_per_node\n args.n_gpu = 1\n args.device = device\n\n logger.info(\"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s, world size: %s\",\n args.local_rank, device, args.n_gpu, bool(args.local_rank != -1), args.fp16,\n torch.distributed.get_world_size() if args.local_rank != -1 else 1)\n\n set_seed(args)\n\n return args, logger",
"def train_parallel(config):\n _setup_parallel_env()\n print(f\" | Starting training on {os.getenv('RANK_SIZE', None)} devices.\")\n\n pre_train_dataset = load_dataset(\n data_files=config.pre_train_dataset,\n batch_size=config.batch_size,\n sink_mode=config.dataset_sink_mode,\n rank_size=MultiDevice.get_group_size(),\n rank_id=MultiDevice.get_rank()\n ) if config.pre_train_dataset else None\n fine_tune_dataset = load_dataset(\n data_files=config.fine_tune_dataset,\n batch_size=config.batch_size,\n sink_mode=config.dataset_sink_mode,\n rank_size=MultiDevice.get_group_size(),\n rank_id=MultiDevice.get_rank()\n ) if config.fine_tune_dataset else None\n test_dataset = load_dataset(\n data_files=config.test_dataset,\n batch_size=config.batch_size,\n sink_mode=config.dataset_sink_mode,\n rank_size=MultiDevice.get_group_size(),\n rank_id=MultiDevice.get_rank()\n ) if config.test_dataset else None\n\n _build_training_pipeline(config=config,\n pre_training_dataset=pre_train_dataset,\n fine_tune_dataset=fine_tune_dataset,\n test_dataset=test_dataset)",
"def init_training(self):\n\n if not os.path.exists(self._model_root_path):\n os.makedirs(self._model_root_path)\n\n # Only initialize once!\n if self._model is None:\n self._model = TrainableAimbotModel(self._config, self._fov,\n os.path.join(self._model_root_path, 'aimbot_model.tf'))\n\n if not os.path.isfile(self._train_data_tfrecord_path) and not os.path.isfile(self._test_data_tfrecord_path):\n # Only create if not existing\n images_labels = _get_annotations_and_images(self._image_path)\n images_labels_train, images_labels_test = train_test_split(images_labels, shuffle=True, test_size=0.20)\n\n self._model.create_tfrecords(self._train_data_tfrecord_path, images_labels_train)\n self._model.create_tfrecords(self._test_data_tfrecord_path, images_labels_test)\n\n self._train_data_set = self._model.create_dataset(self._train_data_tfrecord_path, augment=True, shuffle=True)\n self._test_data_set = self._model.create_dataset(self._train_data_tfrecord_path)",
"def train(self, batch_training=False):\n raise NotImplementedError",
"def initialize(self, training=True, force_load_plans=False, num_epochs=500, prev_trainer=None):\n # -- The Trainer embodies the actual model that will be used as foundation to continue training on -- #\n # -- It should be already initialized since the output_folder will be used. If it is None, the model will be initialized and trained. -- #\n # -- Further the trainer needs to be of class nnUNetTrainerV2 or nnUNetTrainerMultiHead for this method, nothing else. -- #\n # -- Set prev_trainer correctly as class instance and not a string -- #\n self.trainer = prev_trainer\n\n # -- Set nr_epochs to provided number -- #\n self.max_num_epochs = num_epochs\n\n # -- Initialize the trained_on_tasks and load trained_on_folds -- #\n trained_on_tasks = list()\n trained_on_folds = self.already_trained_on.get(str(self.fold), list())\n \n # -- Reset the trained_on_tasks if the trained_on_folds exist for the current fold -- #\n if isinstance(trained_on_folds, dict):\n trained_on_tasks = trained_on_folds.get('finished_training_on', list())\n\n # -- The new_trainer indicates if the model is a new multi head model, -- #\n # -- ie. if it has been trained on only one task so far (True) or on more than one (False) -- #\n if len(trained_on_tasks) > 1:\n self.new_trainer = False\n else:\n self.new_trainer = True\n \n super().initialize(training, force_load_plans) # --> This updates the corresponding variables automatically since we inherit this class",
"def start_training(self):\n self.training = True",
"def main(config_file):\n \n # Load the configuration from json file\n assert os.path.isfile(\n config_file), \"No json configuration file found at {}\".format(config_file)\n config = utils.LoadConfig(config_file)\n\n # use GPU if available\n config.cuda = torch.cuda.is_available()\n\n # Set the random seed for reproducible experiments\n torch.manual_seed(config.general['seed'])\n if config.cuda:\n torch.cuda.manual_seed(config.general['seed'])\n \n #Generate output path if it does not exist\n out_dir = config.general['out_dir']\n if not os.path.exists(out_dir):\n os.makedirs(out_dir)\n \n #Save config file\n config.save(os.path.join(out_dir, 'experiment_config.json'))\n\n # Set the logger\n utils.set_logger(os.path.join(out_dir, 'train.log'))\n\n # Create the input data pipeline\n logging.info(\"Loading the datasets...\")\n\n # Load data\n train, test = read_and_format_full_dataset()\n train_kaggle, test_kaggle = read_and_format_kaggle_dataset()\n \n #Using kaggle's training data for training\n train, val = split_train_val_partition(train_kaggle, config.data['split_train_percentage'],config.general['seed'])\n \n #Adding data augmentation to training\n # train = MNISTDatasetLabels(train,\n # transform=transforms.Compose([\n # Normalization(),\n # transforms.RandomHorizontalFlip(0.5),\n # transforms.RandomVerticalFlip(0.5),\n # transforms.RandomPerspective(),\n # transforms.RandomRotation(30)])) \n \n train = MNISTDatasetLabels(train,\n transform=transforms.Compose([\n Normalization(),\n transforms.RandomRotation(15)])) \n \n val = MNISTDatasetLabels(val,\n transform=transforms.Compose([Normalization()])) \n \n test = MNISTDatasetLabels(test,\n transform=transforms.Compose([Normalization()])) \n \n test_kaggle = MNISTDatasetNoLabels(test_kaggle,\n transform=transforms.Compose([Normalization()])) \n \n train_dataloader = DataLoader(train, batch_size=config.CNN_train['batch_size'], shuffle=True, num_workers=config.CNN_train['num_workers'])\n val_dataloader = DataLoader(val, batch_size=config.CNN_train['batch_size'], shuffle=True, num_workers=config.CNN_train['num_workers'])\n test_dataloader = DataLoader(test, batch_size=config.CNN_train['batch_size'], shuffle=False, num_workers=config.CNN_train['num_workers'])\n test_kaggle_dataloader = DataLoader(test_kaggle, batch_size=config.CNN_train['batch_size'], shuffle=False, num_workers=config.CNN_train['num_workers'])\n\n logging.info(\"- done.\")\n \n # Train the model\n logging.info(\"Starting training for {} epoch(s)\".format(config.CNN_train['num_epochs']))\n train_wraper(train_dataloader, val_dataloader, config)\n logging.info(\"- done.\")\n \n #Evaluate the model test set \n # Using Kaggle's test set unknown labels (can have true labels or not (Kaggle's case))\n logging.info(\"Starting the model evaluation on Kaggle's test data\")\n eval_out_kaggle = evaluate_return_labels(test_kaggle_dataloader, config)\n #Save the results\n eval_out_kaggle.to_csv(os.path.join(out_dir, 'test_result_kaggle.csv'),index=False)\n logging.info(\"- done.\")\n \n # Using test set with known labels\n logging.info(\"Starting the model evaluation on test data\")\n eval_out = evaluate_return_labels(test_dataloader, config)\n #Save the results\n eval_out.to_csv(os.path.join(out_dir, 'test_result.csv'),index=False)\n logging.info(\"- done.\")\n \n # Compute metrics\n if 'TrueLabel' in eval_out:\n #Evaluate the model with test set (known labels)\n logging.info(\"Calculating final metrics\")\n # Get unique true labels in dataset\n classes = eval_out.TrueLabel.unique()\n # Sort them\n classes.sort()\n # Calculate accuracy\n accuracy_total = accuracy(eval_out)\n # Calculate error rate\n error_rate_total = error_rate(eval_out)\n # Confussion matrix\n c_matrix = confusion_matrix(eval_out, classes)\n plot_confusion_matrix(c_matrix, classes, 'CNN', out_dir)\n # Overall metrics\n metrics_per_class, metrics_overall = confusion_matrix_metrics(c_matrix)\n metrics_overall['accuracy_percent'] = accuracy_total\n metrics_overall['error_rate_percent'] = error_rate_total\n \n metrics_per_class.to_csv(os.path.join(out_dir, 'CNN_results_per_class.csv'))\n metrics_overall.to_csv(os.path.join(out_dir, 'CNN_results_overall.csv'))\n \n logging.info(\"- done.\")",
"def training(self) -> None:\n self.compile_model()\n self.train_epoch()\n self.agent.save()",
"def train(cfg):\n # Set up environment.\n distributed.init_distributed_training(cfg)\n\n # Set random seed from configs.\n np.random.seed(cfg.RNG_SEED)\n torch.manual_seed(cfg.RNG_SEED)\n\n # Print config.\n if distributed.is_master_proc():\n print(\"Train with config:\")\n print(pprint.pformat(cfg))\n\n # Create train and val loaders.\n train_dataset = PanopticNarrativeGroundingDataset(cfg, cfg.DATA.TRAIN_SPLIT, train=True)\n train_loader = DataLoader(\n train_dataset,\n batch_size=int(cfg.TRAIN.BATCH_SIZE / max(1, cfg.NUM_GPUS)),\n shuffle=(False if cfg.NUM_GPUS > 1 else True),\n sampler=(DistributedSampler(train_dataset) if cfg.NUM_GPUS > 1 else None),\n num_workers=cfg.DATA_LOADER.NUM_WORKERS,\n pin_memory=cfg.DATA_LOADER.PIN_MEMORY\n )\n if cfg.DATA.VAL_SPLIT is not None:\n val_dataset = PanopticNarrativeGroundingDataset(cfg, cfg.DATA.VAL_SPLIT, train=False)\n val_loader = DataLoader(\n val_dataset,\n batch_size=(1 if cfg.NUM_GPUS > 1 else cfg.TRAIN.BATCH_SIZE),\n shuffle=False,\n sampler=(DistributedSampler(val_dataset) if cfg.NUM_GPUS > 1 else None),\n num_workers=cfg.DATA_LOADER.NUM_WORKERS,\n pin_memory=cfg.DATA_LOADER.PIN_MEMORY\n )\n\n # Build the model and print model statistics.\n # Use cuda if available\n device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n # Construct the model\n model = PanopticNarrativeGroundingBaseline(cfg, device=device)\n # Determine the GPU used by the current process\n cur_device = torch.cuda.current_device()\n # Transfer the model to the current GPU device\n model = model.cuda(device=cur_device)\n # Use multi-process data parallel model in the multi-gpu setting\n if cfg.NUM_GPUS > 1:\n # Make model replica operate on the current device\n model = torch.nn.parallel.DistributedDataParallel(\n module=model, device_ids=[cur_device], output_device=cur_device,\n find_unused_parameters=True\n )\n if cfg.LOG_MODEL_INFO and distributed.is_master_proc():\n print(\"Model:\\n{}\".format(model))\n print(\"Params: {:,}\".format(np.sum([p.numel() for p in model.parameters()]).item()))\n print(\"Mem: {:,} MB\".format(torch.cuda.max_memory_allocated() / 1024 ** 3))\n print(\"nvidia-smi\")\n os.system(\"nvidia-smi\")\n\n if cfg.MODEL.BERT_FREEZE:\n if cfg.NUM_GPUS > 1:\n for param in model.module.bert_encoder.model.bert.encoder.layer.parameters():\n param.requires_grad = False\n else:\n for param in model.bert_encoder.model.bert.encoder.layer.parameters():\n param.requires_grad = False\n\n # Construct the optimizer.\n def optimizer_wrapper(Optim, **kwargs):\n def init_func(model):\n return Optim(model.parameters(), **kwargs)\n return init_func\n\n optimizers = {\n \"adamax\": (\n optimizer_wrapper(optim.Adamax, lr=cfg.SOLVER.BASE_LR),\n lambda optim: optim.param_groups[0][\"lr\"],\n ),\n \"adam\": (\n optimizer_wrapper(optim.Adam, lr=cfg.SOLVER.BASE_LR),\n lambda optim: optim.param_groups[0][\"lr\"],\n ),\n \"sgd\": (\n optimizer_wrapper(optim.SGD, lr=cfg.SOLVER.BASE_LR, momentum=0.9),\n lambda optim: optim.param_groups[0][\"lr\"],\n ),\n }\n\n if cfg.SOLVER.OPTIMIZING_METHOD not in optimizers:\n cfg.SOLVER.OPTIMIZING_METHOD = 'adam'\n if distributed.is_master_proc():\n print(\"{0} not defined in available optimizer list, fallback to Adam\")\n\n optimizer, _ = optimizers[cfg.SOLVER.OPTIMIZING_METHOD]\n optimizer = optimizer(model)\n if distributed.is_master_proc():\n print('optimizer: {}'.format(optimizer))\n\n # Load a checkpoint to resume training if applicable.\n checkpoint_path = osp.join(cfg.OUTPUT_DIR, 'checkpoint.pth')\n if osp.exists(checkpoint_path):\n if distributed.is_master_proc():\n print('Resuming training: loading model from: {0}'.format(checkpoint_path))\n checkpoint = torch.load(checkpoint_path, map_location=\"cpu\")\n if cfg.NUM_GPUS > 1:\n model.module.load_state_dict(checkpoint['model_state'])\n else:\n model.load_state_dict(checkpoint['model_state'])\n optimizer.load_state_dict(checkpoint['optimizer_state'])\n start_epoch = checkpoint['epoch'] + 1\n model_final_path = osp.join(cfg.OUTPUT_DIR, 'model_final.pth')\n if osp.exists(model_final_path):\n model_final = torch.load(model_final_path)\n best_val_score = model_final['accuracy']\n else:\n best_val_score = None\n elif osp.exists(cfg.TRAIN.CHECKPOINT_FILE_PATH):\n if distributed.is_master_proc():\n print('Loading model from: {0}'.format(cfg.TRAIN.CHECKPOINT_FILE_PATH))\n checkpoint = torch.load(cfg.TRAIN.CHECKPOINT_FILE_PATH, map_location=\"cpu\")\n if cfg.NUM_GPUS > 1:\n model.module.load_state_dict(checkpoint['model_state'])\n else:\n model.load_state_dict(checkpoint['model_state'])\n start_epoch, best_val_score = 0, None\n else: \n start_epoch, best_val_score = 0, None\n\n # Define loss function\n loss_function = nn.BCEWithLogitsLoss()\n\n if distributed.is_master_proc():\n print('Train begins...')\n if cfg.TRAIN.EVAL_FIRST:\n accuracy = evaluate(val_loader, model, -1, cfg)\n if best_val_score is None or accuracy > best_val_score:\n best_val_score = accuracy\n try:\n # Perform the training loop\n for epoch in range(start_epoch, cfg.SOLVER.MAX_EPOCH):\n epoch_start_time = time.time()\n # Shuffle the dataset\n if cfg.NUM_GPUS > 1:\n train_loader.sampler.set_epoch(epoch)\n # Train for one epoch\n train_loss = train_epoch(train_loader, model, optimizer, loss_function, epoch, cfg)\n accuracy = evaluate(val_loader, model, epoch, cfg) \n\n if distributed.is_master_proc():\n # Save best model in the validation set\n if best_val_score is None or accuracy > best_val_score:\n best_val_score = accuracy\n model_final_path = osp.join(cfg.OUTPUT_DIR, 'model_final.pth')\n model_final = {\n \"epoch\": epoch,\n \"model_state\": model.state_dict(),\n \"optimizer_state\": optimizer.state_dict(),\n \"accuracy\": accuracy\n }\n torch.save(model_final, model_final_path)\n print('-' * 89)\n print('| end of epoch {:3d} | time: {:5.2f}s '\n '| epoch loss {:.6f} |'.format(\n epoch, time.time() - epoch_start_time, train_loss))\n print('-' * 89)\n except KeyboardInterrupt:\n if distributed.is_master_proc():\n print('-' * 89)\n print('Exiting from training early')",
"def use(self, partition_id, config, mpii_annotation_handle):\n\n image_scale_factor_range = (float(config.neural_network.train.data_augmentation.image_scale_factor.min), float(config.neural_network.train.data_augmentation.image_scale_factor.max))\n input_resolution = int(config.neural_network.train.input_resolution)\n output_resolution = int(config.neural_network.train.output_resolution)\n num_parts = int(config.data.MPII.parts.max_count)\n reference_image_size = int(config.data.MPII.reference_image_size)\n max_rotation_angle = float(config.neural_network.train.data_augmentation.rotation_angle_max)\n image_color_jitter_probability = float(config.neural_network.train.data_augmentation.image_color_jitter_probability)\n image_horizontal_flip_probability = float(config.neural_network.train.data_augmentation.image_horizontal_flip_probability)\n hue_max_delta = float(config.neural_network.train.data_augmentation.hue_max_delta)\n saturation_min_delta = float(config.neural_network.train.data_augmentation.saturation_min_delta)\n brightness_max_delta = float(config.neural_network.train.data_augmentation.brightness_max_delta)\n contrast_min_delta = float(config.neural_network.train.data_augmentation.contrast_min_delta)\n\n training_data_partition = MPIIDataset(\n indices=self.training_partitions[partition_id],\n mpii_annotation_handle=mpii_annotation_handle,\n horizontally_flipped_keypoint_ids=config.data.MPII.parts.flipped_ids,\n input_resolution=input_resolution,\n output_resolution=output_resolution,\n num_parts=num_parts,\n reference_image_size=reference_image_size,\n max_rotation_angle=max_rotation_angle,\n image_scale_factor_range=image_scale_factor_range,\n image_color_jitter_probability=image_color_jitter_probability,\n image_horizontal_flip_probability=image_horizontal_flip_probability,\n hue_max_delta=hue_max_delta,\n saturation_min_delta=saturation_min_delta,\n brightness_max_delta=brightness_max_delta,\n contrast_min_delta=contrast_min_delta\n )\n\n validation_data_partition = MPIIDataset(\n indices=self.validation_partitions[partition_id],\n mpii_annotation_handle=mpii_annotation_handle,\n horizontally_flipped_keypoint_ids=config.data.MPII.parts.flipped_ids,\n input_resolution=input_resolution,\n output_resolution=output_resolution,\n num_parts=num_parts,\n reference_image_size=reference_image_size,\n max_rotation_angle=max_rotation_angle,\n image_scale_factor_range=image_scale_factor_range,\n image_color_jitter_probability=image_color_jitter_probability,\n image_horizontal_flip_probability=image_horizontal_flip_probability,\n hue_max_delta=hue_max_delta,\n saturation_min_delta=saturation_min_delta,\n brightness_max_delta=brightness_max_delta,\n contrast_min_delta=contrast_min_delta\n )\n return training_data_partition, validation_data_partition",
"def _start_distributed_training(self, saving_listeners=None):\n config = self._estimator.config\n\n # Start in-process TensorFlow server if needed. It's important to start the\n # server before we (optionally) sleep. Otherwise, the servers will wait to\n # connect to each other before starting to train.\n if not _is_google_env():\n self._start_std_server(config)\n\n # Delay worker to start. For asynchronous training, this usually helps model\n # to converge faster. Chief starts the training immediately, so, worker\n # with task id x (0-based) should wait (x+1) * _DELAY_SECS_PER_WORKER.\n delay_secs = 0\n if config.task_type == run_config_lib.TaskType.WORKER:\n # TODO(xiejw): Replace the hard code logic (task_id + 1) with unique id in\n # training cluster.\n delay_secs = min(_MAX_DELAY_SECS,\n (config.task_id + 1) * _DELAY_SECS_PER_WORKER)\n if delay_secs > 0:\n logging.info('Waiting %d secs before starting training.', delay_secs)\n time.sleep(delay_secs)\n\n self._estimator.train(input_fn=self._train_spec.input_fn,\n max_steps=self._train_spec.max_steps,\n hooks=self._train_spec.hooks,\n saving_listeners=saving_listeners)",
"def _init_feature_processer(self):\n try:\n model_config = self._conf.get(PredictConstance.BASE_CONFIG,\n PredictConstance.FEATURE_ENGINEERING_CONFIG)\n conf = configparser.ConfigParser()\n conf.read(model_config)\n self._feature_processor = data_processor.DataProcessor(conf=conf,log_path = self.xeasy_log_path)\n if self._feature_processor.init() == runstatus.RunStatus.SUCC:\n return True\n else:\n return False\n except Exception as err:\n self.managerlogger.logger.error(\"init model error: %s\" % err)\n self.errorlogger.logger.error(\"init model error:\\n %s\" % traceback.format_exc())\n return False",
"def train(train_dataset: torch.utils.data.Dataset, test_dataset: torch.utils.data.Dataset,\n training_config: dict = train_config, global_config: dict = global_config):\n\n for path in global_config.values():\n create_dirs(path)\n\n # wrap datasets with Dataloader classes\n train_loader = torch.utils.data.DataLoader(train_dataset,\n **training_config[\"DATA_LOADER_CONFIG\"])\n test_loader = torch.utils.data.DataLoader(test_dataset,\n **training_config[\"DATA_LOADER_CONFIG\"])\n\n # model name & paths\n name = \"_\".join([train_config[\"DATE\"], train_config[\"SESSION_NAME\"]])\n modelpath = os.path.join(global_config[\"WEIGHT_DIR\"], name)\n\n # instantiate model\n model = training_config[\"MODEL\"](**training_config[\"MODEL_CONFIG\"])\n\n optimizer = training_config[\"OPTIMIZER\"](model.parameters(),\n **training_config[\"OPTIMIZER_CONFIG\"])\n\n # set up ignite engine\n training_config[\"METRICS\"].update({\"loss\" : Loss(training_config[\"LOSS\"])})\n trainer = create_supervised_trainer(model=model, optimizer=optimizer,\n loss_fn=training_config[\"LOSS\"],\n device=training_config[\"DEVICE\"])\n evaluator = create_supervised_evaluator(model,\n metrics=training_config[\"METRICS\"],\n device=training_config[\"DEVICE\"])\n\n\n # tensorboardX setup\n log_dir = os.path.join(global_config[\"LOG_DIR\"], \"tensorboardx\", name)\n create_dirs(log_dir)\n writer = SummaryWriter(logdir=log_dir)\n\n # log using the logging tool\n logger = log.Log(training_config, run_name=train_config['SESSION_NAME'])\n\n @trainer.on(Events.ITERATION_COMPLETED)\n def log_training(engine):\n iteration = (engine.state.iteration - 1) % len(train_loader) + 1\n writer.add_scalar(\"training/loss\", engine.state.output, engine.state.iteration)\n if iteration % 4 == 0:\n print(\"\\repoch[{}] iteration[{}/{}] loss: {:.2f} \".format(engine.state.epoch,\n iteration, len(train_loader),\n engine.state.output), end=\"\")\n\n # generic evaluation function\n def evaluate(engine, loader):\n evaluator.run(loader)\n metrics = evaluator.state.metrics\n return metrics\n\n # training data metrics\n @trainer.on(Events.EPOCH_COMPLETED)\n def log_training_results(engine):\n print(\"\\ntraining results - epoch {}\".format(engine.state.epoch))\n metrics = evaluate(engine, train_loader)\n print(metrics)\n for key, value in metrics.items():\n logger.log_metric(key, value)\n writer.add_scalar(\"training/avg_{}\".format(key), value, engine.state.epoch)\n\n # test data metrics\n @trainer.on(Events.EPOCH_COMPLETED)\n def log_validation_results(engine):\n print(\"test results - epoch {}\".format(engine.state.epoch))\n metrics = evaluate(engine, test_loader)\n print(metrics)\n for key, value in metrics.items():\n writer.add_scalar(\"validation/avg_{}\".format(key), value, engine.state.epoch)\n\n # model checkpointing\n @trainer.on(Events.EPOCH_COMPLETED)\n def model_checkpoint(engine):\n torch.save(model.state_dict(), modelpath + \".pth\")\n print(\"Checkpoint saved to {}\".format(modelpath + \".pth\"))\n\n # training iteration\n try:\n trainer.run(train_loader, max_epochs=training_config[\"EPOCHS\"])\n except KeyboardInterrupt:\n torch.save(model.state_dict(), modelpath + \".pth\")\n print(\"Model saved to {}\".format(modelpath + \".pth\"))\n raise KeyboardInterrupt\n\n # write weights\n torch.save(model.state_dict(), modelpath + \".pth\")\n\n # write csv log file\n log_content = training_config.copy()\n evaluator.run(test_loader)\n log_content[\"VAL_METRICS\"] = evaluator.state.metrics\n log_path = os.path.join(global_config[\"LOG_DIR\"], training_config[\"LOGFILE\"])\n write_log(log_path, log_content)\n\n logger.end_run()\n \n return evaluator.state.metrics[\"training/avg_loss\"]",
"def setup(self, num_collaborators, **kwargs):\n return [\n FederatedModel(\n self.build_model,\n optimizer=self.lambda_opt,\n loss_fn=self.loss_fn,\n data_loader=data_slice,\n **kwargs\n )\n for data_slice in self.data_loader.split(\n num_collaborators, equally=True\n )]",
"def prepare_model_optimizer_and_scheduler(self):\n\n ###################################################################\n # MODEL PREPARATION\n # -----------------\n # - step 1: Initialize a random model from config\n # - step 2: Load model weights from checkpoint if any\n # - step 3: Move model to device (GPU)\n ###################################################################\n\n # Initialize a random model according to a specific config:\n # NOTE: here we load from a physical path instead of using a keyword\n # as compute nodes may not allow downloading from online hubs\n if self.is_character_bert:\n model_config = CharacterBertConfig.from_pretrained(\n os.path.join(WORKDIR, 'data', 'character-bert'))\n model = CharacterBertForPreTraining(model_config)\n else:\n model_config = BertConfig.from_pretrained(\n os.path.join(WORKDIR, 'data', 'bert-base-uncased'))\n model = BertForPreTraining(model_config)\n if self.is_main_process:\n logging.info(\n \"Initialized %s using Config:\\n%s\",\n \"CharacterBERT\" if self.is_character_bert else \"BERT\",\n model_config\n )\n\n # Load checkpoint if any:\n if not self.resume_pretraining:\n # CASE: no checkpoint -> training from scratch\n self.global_step = 0\n if self.is_main_process:\n logging.info(\"Pre-training from scratch (good luck!)\")\n else:\n if self.init_checkpoint:\n # CASE: load checkpoint from direct path\n self.global_step = 0\n init_checkpoint = self.init_checkpoint\n if self.is_main_process:\n logging.info(\n \"Resuming pre-training from specific checkpoint `%s`\",\n init_checkpoint\n )\n else:\n # CASE: load checkpoint from resume_step\n if self.is_main_process:\n logging.info(\n \"Resuming pre-training from step `%s`. \"\n \"Looking inside `output_directory` for checkpoints...\",\n self.resume_step\n )\n\n if self.resume_step == -1:\n # CASE: resume_step == -1, load latest checkpoint\n model_names = [\n fname\n for fname in os.listdir(self.output_directory)\n if fname.endswith(\".pt\")]\n assert model_names, \"Could not find any checkpoints to resume from.\"\n self.resume_step = max([\n int(x.split('.pt')[0].split('_')[1].strip())\n for x in model_names]) # TODO: find a better way for this\n if self.is_main_process:\n logging.info(\n \"Resuming from latest checkpoint: ckpt_%s.pt\",\n self.resume_step\n )\n else:\n # CASE: resume_step == X, load checkpoint: `ckpt_X.pt`\n if self.is_main_process:\n logging.info(\n \"Resuming from checkpoint: ckpt_%s.pt\",\n self.resume_step\n )\n self.global_step = self.resume_step\n init_checkpoint = os.path.join(\n self.output_directory, f\"ckpt_{self.resume_step}.pt\")\n\n # Load the actual checkpoint file\n self.checkpoint = torch.load(\n init_checkpoint, map_location=\"cpu\"\n )\n\n # NOTE: Keeping these lines below as a reminder that re-training on\n # a different domain with CharacterBERT requires changing the\n # output layer with a topK tokens matrix from the new domain.\n\n # # Case where we would retrain a general_domain CharacterBERT\n # # on the medical domain. Don't use the general domain output layer:\n # if self.is_medical_domain and self.is_character_bert and (not self.phase2):\n # model.load_state_dict(\n # {\n # k: v for (k, v) in self.checkpoint['model'].items()\n # # Don't load output matrix from general domain model\n # if not k.startswith('cls.predictions') # ignoring the old output layer\n # },\n # strict=False)\n # if self.is_main_process:\n # logging.warning(\n # \"Loaded model weights from `%s`, \"\n # \"but ignored the `cls.predictions` module.\",\n # init_checkpoint)\n\n # # General case: load weights from checkpoint\n # else:\n # model.load_state_dict(self.checkpoint['model'], strict=True)\n # if self.is_main_process:\n # logging.info('Loaded model weights from `%s`',\n # init_checkpoint)\n\n # General case: load weights from checkpoint\n model.load_state_dict(self.checkpoint['model'], strict=True)\n if self.is_main_process:\n logging.info('Loaded model weights from `%s`', init_checkpoint)\n\n # Deduce previous steps from phase1 when in phase2\n if self.phase2 and not self.init_checkpoint:\n self.global_step -= self.phase1_end_step\n\n if self.is_main_process:\n logging.info(\"Training will start at global_step=%s\", self.global_step)\n\n # Move model to GPU:\n model.to(self.device)\n if self.is_main_process:\n logging.info(\"Model was moved to device: %s\", self.device)\n\n ###################################################################\n # OPTIMIZER / SCHEDULER PREPARATION\n # ---------------------------------\n # - step 1: Define the optimizer (FusedLAMB w/ some weight decay)\n # - step 2: Define the learning rate scheduler (PolyWarmUpScheduler)\n ###################################################################\n\n # Initialize an optimizer:\n no_decay = ['bias', 'gamma', 'beta', 'LayerNorm'] # no weight decay\n optimizer_grouped_parameters = [\n {\n 'params': [\n param for name, param in model.named_parameters()\n if not any((nd in name) for nd in no_decay)],\n 'weight_decay': 0.01\n },\n {\n 'params': [\n param for name, param in model.named_parameters()\n if any((nd in name) for nd in no_decay)],\n 'weight_decay': 0.0\n }\n ]\n optimizer = FusedLAMB(\n optimizer_grouped_parameters, lr=self.learning_rate)\n if self.is_main_process:\n logging.info(\"Using optimizer: %s\", optimizer)\n\n # Initialize a learning rate scheduler:\n self.lr_scheduler = PolyWarmUpScheduler(\n optimizer,\n warmup=self.warmup_proportion,\n total_steps=self.total_steps\n )\n if self.is_main_process:\n logging.info(\"Using scheduler: %s\", self.lr_scheduler)\n\n ###################################################################\n # OTHER PREPARATION STEPS\n # -----------------------\n # - step 1: Set up Mixed Precision training (fp16) if required\n # - step 2: Load optimizer stat from checkpoint if any\n # - step 2: Set up DataParallel\n ###################################################################\n\n # Set up fp16:\n if self.fp16:\n if self.is_main_process:\n logging.info(\"Setting up `Almost FP16` Mixed Precision...\")\n if self.loss_scale == 0:\n model, optimizer = amp.initialize(\n model, optimizer, opt_level=\"O2\", loss_scale=\"dynamic\")\n else:\n model, optimizer = amp.initialize(\n model, optimizer, opt_level=\"O2\", loss_scale=self.loss_scale)\n amp._amp_state.loss_scalers[0]._loss_scale = 2**20\n\n # Load optimizer state from checkpoint\n if self.resume_pretraining:\n if self.is_main_process:\n logging.info(\"Loading optimizer state from checkpoint...\")\n if self.phase2 or self.init_checkpoint:\n keys = list(self.checkpoint['optimizer']['state'].keys())\n # Override hyperparameters from previous self.checkpoint\n for key in keys:\n self.checkpoint['optimizer']['state'][key]['step'] = self.global_step\n for i, _ in enumerate(self.checkpoint['optimizer']['param_groups']):\n self.checkpoint['optimizer']['param_groups'][i]['step'] = self.global_step\n self.checkpoint['optimizer']['param_groups'][i]['t_total'] = self.total_steps\n self.checkpoint['optimizer']['param_groups'][i]['warmup'] = self.warmup_proportion\n self.checkpoint['optimizer']['param_groups'][i]['lr'] = self.learning_rate\n if self.is_main_process:\n logging.info(\"Overwrote the following parameters with new values:\")\n logging.info(\"* step: %s\", self.global_step)\n logging.info(\"* t_total: %s\", self.total_steps)\n logging.info(\"* warmup: %s\", self.warmup_proportion)\n logging.info(\"* lr: %s\", self.learning_rate)\n optimizer.load_state_dict(self.checkpoint['optimizer'])\n # Restore AMP master parameters\n if self.fp16:\n if self.is_main_process:\n logging.info(\"Restoring AMP master parameters (optimizer)...\")\n optimizer._lazy_init_maybe_master_weights()\n optimizer._amp_stash.lazy_init_called = True\n optimizer.load_state_dict(self.checkpoint['optimizer'])\n for param, saved_param in zip(amp.master_params(optimizer), self.checkpoint['master params']):\n param.data.copy_(saved_param.data)\n\n # Distribute model\n if self.training_is_distributed:\n if not self.allreduce_post_accumulation:\n model = DistributedDataParallel(\n model,\n message_size=250000000,\n gradient_predivide_factor=\\\n torch.distributed.get_world_size()\n )\n else:\n flat_dist_call(\n [param.data for param in model.parameters()],\n torch.distributed.broadcast,\n (0,)\n )\n elif self.n_gpu > 1:\n model = torch.nn.DataParallel(model)\n\n # Set the values of self.model and self.optimizer\n self.model = model\n self.optimizer = optimizer",
"def main(argv: Sequence[str] | None = None) -> int:\n args = parse_args(argv)\n\n torch.distributed.init_process_group(\n backend=args.backend,\n init_method='env://',\n )\n\n logging.basicConfig(\n format='[%(asctime)s] %(levelname)-5s (%(name)s): %(message)s',\n datefmt='%Y-%m-%d %H:%M:%S',\n level=logging.INFO\n if torch.distributed.get_rank() == 0\n else logging.ERROR,\n stream=sys.stdout,\n )\n\n if args.cuda:\n torch.cuda.set_device(args.local_rank)\n torch.cuda.manual_seed(args.seed)\n\n if torch.distributed.get_rank() == 0:\n logger.info('Collecting env info...')\n logger.info(collect_env.get_pretty_env_info())\n logger.info(f'Training arguments:\\n{args}')\n\n datasets, vocab = get_dataset(\n args.dataset,\n args.download_dir,\n seq_len=args.seq_len,\n batch_size=args.batch_size,\n cuda=args.cuda,\n rank=torch.distributed.get_rank(),\n world_size=torch.distributed.get_world_size(),\n )\n\n model: torch.nn.Module = TransformerModel(\n ntoken=len(vocab),\n d_model=args.embedding_dim,\n nhead=args.attention_heads,\n d_hid=args.hidden_dim,\n nlayers=args.layers,\n dropout=args.dropout,\n )\n model.to(args.device)\n model = torch.nn.parallel.DistributedDataParallel(\n model,\n device_ids=[args.local_rank] if args.cuda else None,\n )\n\n criterion = torch.nn.CrossEntropyLoss()\n optimizer = torch.optim.SGD(model.parameters(), lr=args.lr)\n scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(\n optimizer,\n factor=0.1,\n patience=2,\n min_lr=1e-4,\n )\n\n logger.info(f'Transformer model:\\n{model}')\n preconditioner: kfac.preconditioner.KFACPreconditioner | None = None\n if args.kfac:\n strategy = kfac.enums.DistributedStrategy[args.strategy.upper()]\n preconditioner = kfac.preconditioner.KFACPreconditioner(\n model,\n factor_update_steps=args.factor_update_steps,\n inv_update_steps=args.inv_update_steps,\n damping=args.damping,\n factor_decay=args.factor_decay,\n kl_clip=args.kl_clip,\n lr=lambda x: optimizer.param_groups[0]['lr'],\n grad_worker_fraction=strategy,\n skip_layers=args.skip_layers,\n loglevel=logging.INFO,\n )\n if torch.distributed.get_rank() == 0:\n logger.info(f'Preconditioner config:\\n{preconditioner}')\n\n start = time.perf_counter()\n for epoch in range(args.epochs):\n datasets.train.sampler.set_epoch(epoch)\n train(\n model,\n criterion=criterion,\n optimizer=optimizer,\n preconditioner=preconditioner,\n dataloader=datasets.train.loader,\n epoch=epoch + 1,\n epochs=args.epochs,\n )\n eval_loss = evaluate(\n model,\n criterion=criterion,\n dataloader=datasets.val.loader,\n prefix='Validation',\n )\n scheduler.step(eval_loss)\n end = time.perf_counter()\n logger.info(f'Training completed in {end-start:.2f} seconds.')\n\n evaluate(\n model,\n criterion=criterion,\n dataloader=datasets.test.loader,\n prefix='Test',\n )\n\n return 0",
"def train_start(self):\n self.module.img_enc.train()\n self.module.txt_enc.train()",
"def _setup_training(self, params, **kwargs):\n model_params = params.permute_training_on_top().model\n\n model_kwargs = {**model_params.fixed, **model_params.variable}\n\n model = self.model_cls(**model_kwargs)\n\n training_params = params.permute_training_on_top().training\n losses = training_params.nested_get(\"losses\")\n optimizer_cls = training_params.nested_get(\"optimizer_cls\")\n optimizer_params = training_params.nested_get(\"optimizer_params\")\n train_metrics = training_params.nested_get(\"train_metrics\", {})\n lr_scheduler_cls = training_params.nested_get(\"lr_sched_cls\", None)\n lr_scheduler_params = training_params.nested_get(\"lr_sched_params\",\n {})\n val_metrics = training_params.nested_get(\"val_metrics\", {})\n\n # necessary for resuming training from a given path\n save_path = kwargs.pop(\"save_path\", os.path.join(\n self.save_path,\n \"checkpoints\",\n \"run_%02d\" % self._run))\n\n return self.trainer_cls(\n network=model,\n save_path=save_path,\n losses=losses,\n key_mapping=self.key_mapping,\n optimizer_cls=optimizer_cls,\n optimizer_params=optimizer_params,\n train_metrics=train_metrics,\n val_metrics=val_metrics,\n lr_scheduler_cls=lr_scheduler_cls,\n lr_scheduler_params=lr_scheduler_params,\n optim_fn=self._optim_builder,\n save_freq=self.checkpoint_freq,\n **kwargs\n )",
"def train(self):\n self.dataGenerator.printDataStatistics()\n sE = len(self.dataGenerator.ids[\"train\"])// 32\n sV = len(self.dataGenerator.ids[\"validation\"])// 32\n self.model.fit_generator(\n generator=self.dataGenerator.trainingGenerator,\n steps_per_epoch= sE,\n epochs=2,\n validation_data=self.dataGenerator.validationGenerator,\n validation_steps=sV,\n # use_multiprocessing=True,\n # workers=2,\n )",
"def initialize_training(args, device):\n # Create tokenizer, datasets and loaders\n tokenizer = EpisodeSummaryTokenizer.from_pretrained(\n args.gpt2_size, max_num_words=args.max_num_words, size_variance_handling=args.size_var_handling\n )\n train_dataset, val_dataset = create_datasets_from_jsons(args.json_paths, tokenizer, args.val_split)\n\n dataloaders = {\n 'train': DataLoader(train_dataset,\n shuffle=True,\n batch_size=args.batch_size,\n collate_fn=tokenizer.pad_batch_to_same_size),\n 'val': DataLoader(val_dataset,\n shuffle=False,\n batch_size=args.batch_size,\n collate_fn=tokenizer.pad_batch_to_same_size)\n }\n\n # Load pre-trained network weights\n model = GPT2LMHeadModel.from_pretrained(args.gpt2_size)\n model = model.to(device)\n\n # Prepare optimizer and scheduler\n no_decay = ['bias', 'LayerNorm.weight'] # no decay for biases and layer norm\n optimizer_grouped_parameters = [\n {\n 'params': [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],\n 'weight_decay': args.weight_decay\n },\n {\n 'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)],\n 'weight_decay': 0.0\n }\n ]\n optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)\n scheduler = WarmupLinearSchedule(optimizer, warmup_steps=0, t_total=args.max_steps)\n model.zero_grad()\n\n train_state = make_train_state(save_path=args.model_save_path, early_stopping_patience=args.early_stopping_patience)\n\n return tokenizer, dataloaders, model, optimizer, scheduler, train_state",
"def run(self) -> None:\n self.model = self.trainer.train_model(self.model, self.data)",
"def train():\n import training\n\n # Ensure output directories exist\n os.makedirs(os.path.dirname(cfg.scaler_path), exist_ok=True)\n os.makedirs(cfg.model_path, exist_ok=True)\n os.makedirs(cfg.log_path, exist_ok=True)\n\n # Load (standardized) input data and target values\n tr_x, tr_y, _ = _load_data(cfg.training_set, is_training=True)\n val_x, val_y, _ = _load_data(cfg.validation_set)\n\n # Try to create reproducible results\n np.random.seed(cfg.initial_seed)\n\n # Save free parameters to disk\n utils.log_parameters(cfg.training, os.path.join(cfg.model_path,\n 'parameters.json'))\n\n training.train(tr_x, tr_y, val_x, val_y)",
"def train(self):\n backend = self.config.backend.build(self.config, self.tmp_dir)\n backend.train(source_bundle_uri=self.config.source_bundle_uri)",
"def _set_train(self):\n\n if not self.model.__dict__['training']:\n self.model.train()",
"def launch(self, force_prep=False):\n #TODO process upploaded csv\n assert self.ready()\n self.launch_progress = 0\n self.set_status(\"Gathering data\")\n if 'O' not in set(self.labels.dict.values()):\n self.add_labels({max(list(self.labels.dict.keys()))+1:'O'})\n\n processed_file_path = os.path.join(DATASETS_PATH, self.dataset_uuid, 'processed.csv')\n bert_file_path = os.path.join(DATASETS_PATH, self.dataset_uuid, 'processed.bert')\n elmo_file_path = os.path.join(DATASETS_PATH, self.dataset_uuid, 'processed.elmo')\n nlp_file_path = os.path.join(DATASETS_PATH, self.dataset_uuid, 'processed.nlp')\n sbert_file_path = os.path.join(DATASETS_PATH, self.dataset_uuid, 'processed.sbert')\n if os.path.exists(processed_file_path) and not force_prep:\n df = pd.read_csv(processed_file_path)\n if 'span_label' in df.columns:\n df['span_label']=df['span_label'].apply(eval)\n # Let's say loading the file is ~half the launch time\n # (if the file already exists)\n self.total = 2\n self.update(1)\n else:\n datafiles = [os.path.join(DATASETS_PATH, self.dataset_uuid, d) \\\n for d in os.listdir(os.path.join(DATASETS_PATH, self.dataset_uuid))]\n df = concat_dataset(datafiles)\n # expand total to account for time it takes to initialize the model\n self.total = len(df)*(1.1) \n self.set_status(\"Preprocessing data\")\n df = self.process_data(df, processed_file_path)\n\n # load list of 'allennlp.data.instance's. allennlp.data.instance can store true labels and tag info internally.\n if os.path.exists(nlp_file_path) and not force_prep:\n with open(nlp_file_path, 'rb') as f:\n sentences = pickle.load(f)\n else:\n #TODO define a universal reader for certain format\n # reader = RestaurantsDatasetReader()\n # data = reader.read(processed_file_path)\n #TODO handle when aux files do not exist\n pass\n bert_emb = np.load(bert_file_path, allow_pickle=True)\n elmo_emb = np.load(elmo_file_path, allow_pickle=True)\n sbert_emb = np.load(sbert_file_path, allow_pickle=True)\n for s, b, e, sb in zip(sentences, bert_emb, elmo_emb, sbert_emb):\n s.fields['bert'] = b\n s.fields['sbert'] = sb\n s.fields['elmo'] = e\n\n df['bert'] = bert_emb\n df['sbert'] = [sb for sb in sbert_emb]\n df['elmo'] = elmo_emb\n df['text_nlp'] = sentences\n\n columns_to_drop = list(\n set(df.columns).intersection(set(['span_label','file','label'])))\n df = df.drop(columns=columns_to_drop).reset_index()\n # since df['text_nlp'] contains true label info, drop 'labels' column.\n columns_to_drop = list(set(df.columns).difference(set(['index', 'Unnamed: 0', 'text', 'labels', 'split', 'bert', 'sbert',\n 'elmo', 'text_nlp'])))\n if len(columns_to_drop) > 0:\n df = df.drop(columns=columns_to_drop)\n df_train = df[df['split']=='train']\n df_dev = df[df['split'] == 'dev']\n df_valid = df[df['split'] == 'valid']\n df_test = df[df['split'] == 'test']\n\n self.text_inv_dict = dict(\n zip(list(df['text']),list(df.index))\n )\n\n # TODO split heldout set if necessary\n # for now, passing empty df as heldout set\n df_heldout = df_test\n\n self.emb_dict = Embeddings(df)\n\n self.set_status(\"Initializing modeler\")\n self.modeler = Modeler(df_train, df_dev, df_valid, df_test, df_heldout, self.labels, emb_dict=self.emb_dict)\n\n self.launch_progress = 1.0\n self.set_status(\"Finished\")\n return self.modeler"
]
| [
"0.61181545",
"0.6045724",
"0.6024364",
"0.6009437",
"0.5953014",
"0.59526104",
"0.5929882",
"0.5899533",
"0.58728266",
"0.58654255",
"0.57695735",
"0.5762787",
"0.5755617",
"0.5746565",
"0.5687636",
"0.5658742",
"0.5657223",
"0.56548303",
"0.56238604",
"0.56198394",
"0.56055254",
"0.55994385",
"0.5592554",
"0.55827445",
"0.5576264",
"0.5553501",
"0.5545922",
"0.5531972",
"0.55168927",
"0.55155414"
]
| 0.61471665 | 0 |
Configures centralized training for the EMNIST character recognition task. | def configure_training_centralized(
task_spec: training_specs.TaskSpecCentralized,
*, # Caller passes below args by name.
model: str = 'resnet18',
only_digits: bool = False,
merge_case: bool = False,
) -> training_specs.RunnerSpecCentralized:
return _EmnistCharacterTask(
task_spec,
model=model,
only_digits=only_digits,
merge_case=merge_case,
).build_centralized_runner_spec() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def set_training_parameters(\n self,\n config: ConfigDict,\n len_train: int,\n len_test: int,\n ):\n self.configure_steps(config, len_train, len_test)\n self.configure_reporting(config)\n self.configure_training_functions(config)",
"def train_start(self):\n self.module.img_enc.train()\n self.module.txt_enc.train()",
"def configure(self):\n self.data_batch_file = self.get_value_from_config('data_batch_file')\n self.batch_meta_file = self.get_value_from_config('batch_meta_file')\n self.has_background = self.get_value_from_config('has_background')\n self.num_classes = self.get_value_from_config('num_classes')\n self.converted_images_dir = self.get_value_from_config('converted_images_dir')\n if not self.converted_images_dir:\n self.converted_images_dir = self.data_batch_file.parent / 'converted_images'\n self.convert_images = self.get_value_from_config('convert_images')\n # create directory for storing images if it is necessary\n if self.convert_images and not self.converted_images_dir.exists():\n self.converted_images_dir.mkdir(parents=True)\n self.dataset_meta = self.get_value_from_config('dataset_meta_file')",
"def prepare_training(self, config: TreeConfigParser) -> None:\n callbacks.initiate_wb(config)\n generator = generators.Generator(\n config.get(\"data.output.label.choice\"),\n config.get_int(\"model.batch_size\"),\n config.get_int(\"data.input.aug.rot.range\"),\n config.get_float(\"data.input.aug.shift.range\"),\n config.get_bool(\"data.input.aug.flip.bool\"),\n config.get_float(\"data.input.aug.shear.range\"),\n config.get_float(\"data.input.aug.zoom.range\"),\n )\n list_callbacks = callbacks.create_list_callbacks(\n config.get(\"orga.save.directory\"),\n config.get(\"orga.save.folder\"),\n config.get_bool(\"model.callback.modelcheckpoint\"),\n config.get_bool(\"model.callback.reducelronplateau\"),\n config.get_bool(\"model.callback.earlystopping\"),\n config.get_bool(\"model.callback.wandb\"),\n )\n self.trainer = Trainer(\n generator,\n list_callbacks,\n config.get_int(\"model.batch_size\"),\n config.get_int(\"model.epochs.number\"),\n )",
"def setUp(self):\n # The short NSC used in this example\n self.net_nsc = [\n (1, 4, 0, 0, 0), # Layer 1: Identity(input)\n (2, 1, 1, 1, 0), # Layer 2: Convolution(Layer1)\n (3, 1, 3, 2, 0), # Layer 3: Convolution(Layer2)\n (4, 1, 1, 1, 0), # Layer 4: Convolution(Layer1)\n (5, 1, 5, 4, 0), # Layer 5: Convolution(Layer4)\n (6, 6, 0, 3, 5), # Layer 6: Concat(Layer3, Layer5)\n (7, 2, 3, 1, 0), # Layer 7: MaxPooling(Layer1)\n (8, 1, 1, 7, 0), # Layer 8: Convolution(Layer7)\n (9, 6, 0, 6, 8), # Layer 9: Concat(Layer6, Layer8)\n (10, 7, 0, 0, 0), # Layer 10: Terminal\n ]\n\n # Load training and eval data\n (train_data, train_labels), (eval_data, eval_labels) = \\\n tf.keras.datasets.mnist.load_data()\n\n # Fix the dataset\n self.train_data = normalize_dataset(dataset=train_data, baseline=255)\n self.train_labels = train_labels.astype(np.int32)\n\n self.eval_data = normalize_dataset(dataset=eval_data, baseline=255)\n self.eval_labels = eval_labels.astype(np.int32)\n\n # The batch size\n self.batch_size = 256\n\n # Workspace directory\n workspace_dir = \"./workspace\"\n self.training_dir = \"{workspace}/trainer_test_earlystop\".format(\n workspace=workspace_dir\n )",
"def setUp(self):\n # The short NSC used in this example\n self.net_nsc = [\n (1, 4, 0, 0, 0), # Layer 1: Identity(input)\n (2, 1, 1, 1, 0), # Layer 2: Convolution(Layer1)\n (3, 1, 3, 2, 0), # Layer 3: Convolution(Layer2)\n (4, 1, 1, 1, 0), # Layer 4: Convolution(Layer1)\n (5, 1, 5, 4, 0), # Layer 5: Convolution(Layer4)\n (6, 6, 0, 3, 5), # Layer 6: Concat(Layer3, Layer5)\n (7, 2, 3, 1, 0), # Layer 7: MaxPooling(Layer1)\n (8, 1, 1, 7, 0), # Layer 8: Convolution(Layer7)\n (9, 6, 0, 6, 8), # Layer 9: Concat(Layer6, Layer8)\n (10, 7, 0, 0, 0), # Layer 10: Terminal\n ]\n\n # Load training and eval data\n (train_data, train_labels), (eval_data, eval_labels) = \\\n tf.keras.datasets.mnist.load_data()\n\n # Fix the dataset\n self.train_data = normalize_dataset(dataset=train_data, baseline=255)\n self.train_labels = train_labels.astype(np.int32)\n\n self.eval_data = normalize_dataset(dataset=eval_data, baseline=255)\n self.eval_labels = eval_labels.astype(np.int32)\n\n # The batch size\n self.batch_size = 256\n\n # Workspace directory\n workspace_dir = \"./workspace\"\n self.training_dir = \"{workspace}/trainer_test\".format(\n workspace=workspace_dir\n )",
"def train(self, training_data, cfg, **kwargs):\n pass",
"def setUp(self):\n self._vocab = np.array([\"one\", \"two\", \"three\", \"four\",\n \"five\", \"six\", \"seven\", \"eight\", \"nine\"])\n self._embedding_dim = 2\n\n self._default_config = {\n \"vocab\": self._vocab,\n \"embedding_dim\": self._embedding_dim,\n \"position_encoding\": None\n }",
"def setUp(self):\n self._default_call_inputs = (\n np.array([[\"one\", \"two\", \"three\"],\n [\"four\", \"five\", \"six\"]]),\n None\n )\n\n self._hash_bins = 10\n self._hash_embedding_dim = 4\n self._embedding_dim = 2\n self._attention_heads = 4\n self._attention_key_dim = 128\n self._attention_concat = False\n self._attention_mask = False\n self._masking = False\n\n self._default_config = {\n \"hash_bins\": self._hash_bins,\n \"hash_embedding_dim\": self._hash_embedding_dim,\n \"embedding_dim\": self._embedding_dim,\n \"attention_heads\": self._attention_heads,\n \"attention_key_dim\": self._attention_key_dim,\n \"attention_concat\": self._attention_concat,\n \"attention_causal_mask\": self._attention_mask,\n \"masking\": self._masking\n }",
"def _initial_setup(self, **train_kwargs):\n super(NetworkValidationBase, self)._initial_setup(**train_kwargs)",
"def config_and_train(self, sys_args):\n \n self.run_config_function(sys_args)\n self.set_model_name('vgg_16')\n self.set_trainable_and_exclude_scopes(constants.checkpoint_exclude_scopes,\n constants.trainable_scopes)\n self.set_optimizer('sgd')\n self.set_max_number_of_steps(6000)\n self.train_or_eval_net(sys_args)",
"def setUp(self):\n self._default_call_inputs = (\n np.array([[\"one\", \"two\", \"three\"],\n [\"four\", \"five\", \"six\"]]),\n None\n )\n\n self._hash_embedding_dim = 4\n self._embedding_dim = 2\n\n self._default_config = {\n \"hash_embedding_dim\": self._hash_embedding_dim,\n \"embedding_dim\": self._embedding_dim\n }",
"def experiment(**config):\n from ..training.train import training\n \n training(config)",
"def initialize(self, training=True, force_load_plans=False, num_epochs=500, prev_trainer=None):\n # -- The Trainer embodies the actual model that will be used as foundation to continue training on -- #\n # -- It should be already initialized since the output_folder will be used. If it is None, the model will be initialized and trained. -- #\n # -- Further the trainer needs to be of class nnUNetTrainerV2 or nnUNetTrainerMultiHead for this method, nothing else. -- #\n # -- Set prev_trainer correctly as class instance and not a string -- #\n self.trainer = prev_trainer\n\n # -- Set nr_epochs to provided number -- #\n self.max_num_epochs = num_epochs\n\n # -- Initialize the trained_on_tasks and load trained_on_folds -- #\n trained_on_tasks = list()\n trained_on_folds = self.already_trained_on.get(str(self.fold), list())\n \n # -- Reset the trained_on_tasks if the trained_on_folds exist for the current fold -- #\n if isinstance(trained_on_folds, dict):\n trained_on_tasks = trained_on_folds.get('finished_training_on', list())\n\n # -- The new_trainer indicates if the model is a new multi head model, -- #\n # -- ie. if it has been trained on only one task so far (True) or on more than one (False) -- #\n if len(trained_on_tasks) > 1:\n self.new_trainer = False\n else:\n self.new_trainer = True\n \n super().initialize(training, force_load_plans) # --> This updates the corresponding variables automatically since we inherit this class",
"def train(self, training_data: TrainingData, config: RasaNLUModelConfig, **kwargs: Any) -> None:\n pass",
"def start_training(self):\n self.training = True",
"def config(self):\n\n train_dataset = RandomClassificationDataset()\n eval_dataset = RandomClassificationDataset()\n\n return {\n 'model':\n SimpleModel(),\n 'train_dataloader':\n DataLoader(\n dataset=train_dataset,\n batch_size=4,\n sampler=dist.get_sampler(train_dataset),\n ),\n 'eval_dataloader':\n DataLoader(\n dataset=eval_dataset,\n sampler=dist.get_sampler(eval_dataset),\n ),\n 'max_duration':\n '2ep',\n 'autoresume':\n True,\n 'loggers': [],\n }",
"def _training_before_hook(self):\n pass",
"def input_config():\n run_dir = 'runs/ODEMnistClassification/8'\n epoch = 'latest'\n device = 'cpu'\n min_end_time = 10\n max_end_time = 100\n tol = 1e-3",
"def setup(args):\n cfg = get_cfg()\n\n cfg.merge_from_file(model_zoo.get_config_file(\"COCO-Detection/faster_rcnn_X_101_32x8d_FPN_3x.yaml\"))\n cfg.merge_from_list(args.opts)\n\n # configs for training\n if args.small_vidor: # cfg.DATASETS.VIDOR.SIZE == 'small':\n cfg.DATASETS.TRAIN = (\"vidor_small_train\",)\n elif args.small_vidor_10imgs: # cfg.DATASETS.VIDOR.SIZE == 'small-10imgs':\n cfg.DATASETS.TRAIN = (\"vidor_small_10imgs_train\",)\n else:\n cfg.DATASETS.TRAIN = (\"vidor_large_train\",)\n # cfg.DATALOADER.NUM_WORKERS = 2\n if not args.eval_only:\n cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url(\"COCO-Detection/faster_rcnn_X_101_32x8d_FPN_3x.yaml\") # Let training initialize from model zoo\n factor = 4\n cfg.SOLVER.IMS_PER_BATCH = 16 * factor\n cfg.SOLVER.BASE_LR = 0.0001 * factor # finetune using 10x smaller base_lr\n cfg.SOLVER.MAX_ITER = 270000 // factor \n cfg.SOLVER.STEPS = [210000 // factor, 250000 // factor]\n # cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = 128 # default: 512\n cfg.MODEL.ROI_HEADS.NUM_CLASSES = 78\n\n # configs for testing\n # cfg.MODEL.WEIGHTS = os.path.join(cfg.OUTPUT_DIR, \"model_final.pth\")\n if args.small_vidor: # cfg.DATASETS.VIDOR.SIZE == 'small':\n cfg.DATASETS.TEST = (\"vidor_small_val\",)\n elif args.small_vidor_10imgs: # cfg.DATASETS.VIDOR.SIZE == 'small-10imgs':\n cfg.DATASETS.TEST = (\"vidor_small_10imgs_val\",)\n else:\n cfg.DATASETS.TEST = (\"vidor_large_val\",)\n # cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.5\n\n # cfg.OUTPUT_DIR = './output/train_vidor_with_pseudo_labels'\n \n \n if not args.eval_only:\n os.makedirs(cfg.OUTPUT_DIR, exist_ok=True)\n cfg.freeze()\n default_setup(cfg, args)\n return cfg",
"def start_training(self, logdir: str, **info):\n pass",
"def config_task(self) -> None:\n if self.hparams[\"model\"] == \"resnet18\":\n self.model = models.resnet18(pretrained=True)\n in_features = self.model.fc.in_features\n self.model.fc = nn.Linear( # type: ignore[attr-defined]\n in_features, out_features=1\n )\n else:\n raise ValueError(f\"Model type '{self.hparams['model']}' is not valid.\")",
"def add_train_arguments(self):\n parser = self.parser\n parser.add_argument(\"source_dir\", help=\"Directory containing test source images.\")\n parser.add_argument(\"target_dir\", help=\"Directory containing test target images.\")\n parser.add_argument(\"--batch_size\", \"-bs\", default=1, type=int, help=\"Batch size.\")\n parser.add_argument(\"--cycle_loss_weight\", \"-clw\", default=0, type=int, help=\"Cycle loss weight.\")\n parser.add_argument(\n \"--discriminator_architecture\", \"-d\", default=\"basic\",\n help=\"architecture of the discriminator ('basic' | 'N_layers')\"\n )\n parser.add_argument(\n \"--discriminator_filters\", \"-df\", type=int, default=64,\n help=\"Number of filters in the last conv layer of the discriminator.\"\n )\n parser.add_argument(\n \"--n_frames_discriminator\", \"-dn\", type=int, default=0,\n help=\"Number of frames the sequence discriminators discriminate.\"\n )\n parser.add_argument(\n \"--discriminator_temporal_scales\", \"-dts\", type=int, default=1,\n help=\"Number of temporal scales in framerate sampling (= number of sequence discriminators).\"\n )\n parser.add_argument(\n \"--feature_matching_loss_weight\", \"-fmlw\", default=0, type=int, help=\"Loss weight of feature matching.\"\n )\n parser.add_argument(\n \"--flow_loss_weight\", \"-flw\", default=0, type=int, help=\"Loss weight of flow loss in vid2vid.\"\n )\n parser.add_argument(\n \"--gan_mode\", \"-gan\", default=\"lsgan\", help=\"type of the gan loss ('vanilla' | 'lsgan' | 'wgangp').\"\n )\n parser.add_argument(\n \"--init_epoch\", \"-ie\", default=0, type=int, help=\"If set, load models saved at a specific epoch.\"\n )\n parser.add_argument(\n \"--init_checkpoint_dir\", \"-i\",\n help=\"If set, initialize models from saved checkpoints in init_checkpoint_dir.\"\n )\n parser.add_argument(\n \"--log_every\", \"-le\", default=100, type=int, help=\"Log losses and images every log_every iterations.\"\n )\n parser.add_argument(\n \"--log_images_every\", \"-lie\", default=0, type=int,\n help=\"If specified, log images every log_images_every iterations, instead of every log_every iterations.\"\n )\n parser.add_argument(\"--load_height\", \"-lh\", type=int, default=0, help=\"image load height (before cropping).\")\n parser.add_argument(\"--l1_loss_weight\", \"-llw\", default=0, type=int, help=\"L1 loss weight.\")\n parser.add_argument(\"--learning_rate\", \"-lr\", default=0.0002, type=float, help=\"Learning rate.\")\n parser.add_argument(\"--load_width\", \"-lw\", type=int, default=0, help=\"Image load width (before cropping).\")\n parser.add_argument(\n \"--mask_loss_weight\", \"-mlw\", default=0, type=int, help=\"Loss weight of mask loss (weight loss) in vid2vid.\"\n )\n parser.add_argument(\"--num_epochs\", \"-ne\", default=10, type=int, help=\"Number of training epochs.\")\n parser.add_argument(\n \"--perceptual_loss_weight\", \"-plw\", default=0, type=int, help=\"Loss weight of perceptual (VGG19) loss.\"\n )\n parser.add_argument(\n \"--recycle_loss_weight\", \"-rclw\", default=0, type=int, help=\"Loss weight of recycle in RecycleGAN.\"\n )\n parser.add_argument(\n \"--recycle_predictor_architecture\", \"-rcp\", default=\"resnet_6blocks\",\n help=\"Architecture of RecycleGAN predictor. See generator_architecture for options.\"\n )\n parser.add_argument(\n \"--recycle_predictor_filters\", \"-rcpf\", type=int, default=64,\n help=\"Number of filters in the last conv layer of the RecycleGAN predictor.\"\n )\n parser.add_argument(\n \"--save_every\", \"-se\", default=1, type=int, help=\"Save model checkpoints every save_every epochs.\"\n )\n parser.add_argument(\n \"--spatial_scaling\", \"-ss\", default=[1], type=float, nargs='+',\n help=\"Set steps for spatial scaling.\\n\"\n \"I.e. [0.25, 0.5, 1] to train a model with width and height 256 on 64 > 128 > 256 images.\"\n )\n parser.add_argument(\"--timecycle_loss\", \"-tcl\", default=\"l1\", help=\"Timecycle loss ('l1' | 'l2')\")\n parser.add_argument(\"--timecycle_loss_weight\", \"-tclw\", default=0, type=int, help=\"Timecycle loss weight.\")\n parser.add_argument(\n \"--timecycle_motion_model_architecture\", \"-tcmm\", default=\"resnet_1blocks\",\n help=\"Architecture of Timecycle motion model. See generator_architecture for options.\"\n )\n parser.add_argument(\n \"--timecycle_motion_model_filters\", \"-tcmmf\", type=int, default=64,\n help=\"Number of filters in the last conv layer of the Timecycle motion model.\"\n )\n parser.add_argument(\n \"--timecycle_separate_motion_models\", \"-tcsmm\", action=\"store_true\",\n help=\"Set to use separate motion models for forward/backward predictions.\"\n )\n parser.add_argument(\n \"--timecycle_type\", \"-tct\", default=\"conditional\",\n help=\"Type of Timecycle ('conditional' | 'pingpong').\"\n )\n parser.add_argument(\n \"--timecycle_warp_loss_weight\", \"-tcwlw\", default=0, type=int, help=\"Timecycle warp loss weight.\"\n )\n parser.add_argument(\n \"--temporal_scaling\", \"-ts\", default=[1], type=float, nargs='+',\n help=\"Set steps for temporal scaling.\\n\"\n \"I.e. [0.2, 0.6, 1] to train a model with block_size 5 on 1 -> 3 -> 5 frames.\"\n )\n parser.add_argument(\n \"--warp_loss_weight\", \"-wlw\", default=0, type=int, help=\"Loss weight of warp loss in vid2vid.\"\n )",
"def init(self, train):\n return",
"def init(self, train):\n return",
"def init(self, train):\n return",
"def init(self, train):\n return",
"def init(self, train):\n return",
"def train(self):\n self.training = True",
"def train_setup(additional_arg_parser=None, args=None):\n if args is None:\n args = parse_input_arguments(additional_arg_parser)\n if args.do_eval or args.do_test:\n args.load_pretrained = True\n if args.load_pretrained and args.pretrained_checkpoint == '':\n raise ValueError('Must provide --pretrained_checkpoint when using --load_pretrained')\n if args.eval_batch_size == 0:\n args.eval_batch_size = args.train_batch_size\n if args.load_pretrained:\n args.save_dir = \"/\".join(args.pretrained_checkpoint.split('/')[:-1])\n else:\n args.save_dir = get_save_dir(args.save_dir, args.run_name)\n if not os.path.exists(args.save_dir):\n os.makedirs(args.save_dir)\n args.start_epoch = 0\n args.start_step = 0\n\n split_name = 'train' if args.do_train else 'validation' if args.do_eval else 'test'\n logger = get_logger(args.save_dir, 'log_train')\n\n logger.info(\"local_rank: %d, node_index: %d, gpu_per_node: %d\"%(args.local_rank, args.node_index, args.gpu_per_node))\n # Setup CUDA, GPU & distributed training\n if args.local_rank == -1 or args.no_cuda:\n device = torch.device(\"cuda\" if torch.cuda.is_available() and not args.no_cuda else \"cpu\")\n args.n_gpu = torch.cuda.device_count()\n else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs\n torch.cuda.set_device(args.local_rank)\n device = torch.device(\"cuda\", args.local_rank)\n torch.distributed.init_process_group(backend='nccl')\n args.local_rank += args.node_index * args.gpu_per_node\n args.n_gpu = 1\n args.device = device\n\n logger.info(\"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s, world size: %s\",\n args.local_rank, device, args.n_gpu, bool(args.local_rank != -1), args.fp16,\n torch.distributed.get_world_size() if args.local_rank != -1 else 1)\n\n set_seed(args)\n\n return args, logger"
]
| [
"0.63909596",
"0.6319854",
"0.6294421",
"0.62634546",
"0.62572944",
"0.62403774",
"0.6204174",
"0.6202169",
"0.620016",
"0.6191305",
"0.6153793",
"0.6125302",
"0.6112304",
"0.61037904",
"0.6086183",
"0.60679245",
"0.6048397",
"0.60145044",
"0.6010601",
"0.5994686",
"0.5976073",
"0.5973724",
"0.59656745",
"0.59622025",
"0.59622025",
"0.59622025",
"0.59622025",
"0.59622025",
"0.5921763",
"0.59188515"
]
| 0.65846765 | 0 |
Fit model that predicts return of credit | def fit_model():
global _HOME_OWNERSHIP
_HOME_OWNERSHIP = {x: i for i, x in enumerate(["rent", "own", "mortgage", "other"])}
df = pd.read_csv(os.path.join(settings.BASE_DIR, "LoanStats3a.csv"), skiprows=1).head(5000)
df = df[df.apply(is_poor_coverage, axis=1)]
df['year_issued'] = df.issue_d.apply(lambda x: int(x.split("-")[0]))
df_term = df[df.year_issued < 2012]
bad_indicators = [
"Late (16-30 days)",
"Late (31-120 days)",
"Default",
"Charged Off"
]
df_term['is_rent'] = df_term.home_ownership == "RENT"
df_term = df_term[df_term.home_ownership.apply(lambda x: x is not None and x != 'NONE')]
df_term['is_bad'] = df_term.loan_status.apply(lambda x: x in bad_indicators)
df_term['term'] = df_term.term.apply(lambda x: x.split()[0])
df_term['home_ownership'] = df_term.home_ownership.apply(lambda x: _HOME_OWNERSHIP[x.lower()])
global _LENDING_PREDICT_MODEL
_LENDING_PREDICT_MODEL = LogisticRegression()
_LENDING_PREDICT_MODEL.fit(df_term[_FEATURES], df_term.is_bad) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def fit(self, X):",
"def predict():\n model = LinearRegression().fit(input_data[['x']], input_data['y'])\n future_vals = [[20], [21], [22]]\n return None",
"def fit(self, X_raw, y_made_claim, y_claims_amount):\n\n # YOUR CODE HERE\n\n # Remember to include a line similar to the one below\n # X_clean = self._preprocessor(X_raw)\n \n # made_metrics = [tf.keras.metrics.AUC(name=\"auc\")]\n # def made_nn_model(metrics, input_shape, lr=0.001):\n # model = tf.keras.Sequential([\n # tf.keras.layers.Dense(256,activation=\"relu\",input_shape=(input_shape,),kernel_regularizer=l2(l=0.05)),\n # tf.keras.layers.Dropout(0.5),\n # tf.keras.layers.Dense(64,activation=\"relu\",kernel_regularizer=l2(l=0.01)),\n # tf.keras.layers.Dropout(0.5),\n # tf.keras.layers.Dense(8,activation=\"relu\",kernel_regularizer=l2(l=0.001)),\n # tf.keras.layers.Dropout(0.5),\n # tf.keras.layers.Dense(1,activation=\"sigmoid\")\n # ])\n\n # model.compile(\n # optimizer=tf.keras.optimizers.Adam(lr=lr),\n # loss=tf.keras.losses.BinaryCrossentropy(),\n # metrics=metrics)\n\n # return model\n\n # claim_metrics = [tf.keras.metrics.MeanSquaredError(name=\"mse\")]\n # def claim_nn_model(metrics, input_shape, lr=0.001):\n # model = tf.keras.Sequential([\n # tf.keras.layers.Dense(256,activation=\"relu\",input_shape=(input_shape,)),\n # tf.keras.layers.Dropout(0.5),\n # tf.keras.layers.Dense(16,activation=\"relu\",input_shape=(input_shape,)),\n # tf.keras.layers.Dropout(0.5),\n # tf.keras.layers.Dense(8,activation=\"relu\",input_shape=(input_shape,)),\n # tf.keras.layers.Dropout(0.5),\n # tf.keras.layers.Dense(1)\n # ])\n \n # model.compile(\n # optimizer=tf.keras.optimizers.Adam(lr=lr),\n # loss=tf.keras.losses.MeanSquaredError(),\n # metrics=metrics)\n # return model\n\n \n # X_1, X_1val, y_1, y_1val, y_2, y_2val = train_test_split(X_raw,y_made_claim,y_claims_amount,test_size=0.05)\n # X_1, drop_index = self._preprocessor(X_1, train=True)\n # y_1 = y_1.drop(drop_index).values\n # y_2 = y_2.drop(drop_index).values\n \n # X_1val, drop_index = self._preprocessor(X_1val, train=False)\n # y_1val = y_1val.drop(drop_index).values\n # y_2val = y_2val.drop(drop_index).values\n \n # self.scaler = StandardScaler()\n # X_1 = self.scaler.fit_transform(X_1)\n # X_1val = self.scaler.transform(X_1val)\n \n # #prepare for claim amount\n # X_2 = X_1[y_1==1]\n # y_2 = y_2[y_1==1]\n # X_2val = X_1val[y_1val==1]\n # y_2val = y_1val[y_1val==1]\n \n # self.y_mean = np.mean(y_2)\n # self.y_std = np.std(y_2)\n # y_2 = (y_2 - self.y_mean)/self.y_std\n # y_2val = (y_2val - self.y_mean)/self.y_std\n\n # #fit made claim\n # logdir = \"log\" + datetime.now().strftime(\"%Y%m%d-%H%M%S\")\n # tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=logdir)\n # early_stopping = tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=10, mode=\"min\", restore_best_weights=True)\n \n # self.Model_made = made_nn_model(made_metrics, X_1.shape[1], lr=0.0003)\n # History_made = self.Model_made.fit(X_1,y_1,\n # class_weight={0:1,1:10},\n # callbacks=[tensorboard_callback, early_stopping],\n # validation_data = (X_1val, y_1val),\n # epochs=200,\n # batch_size=512)\n\n # #fit claim amount\n # early_stopping = tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=20, mode=\"min\", restore_best_weights=True)\n # logdir = \"log\" + datetime.now().strftime(\"%Y%m%d-%H%M%S\")\n # tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=logdir)\n \n # self.Model_claim = claim_nn_model(claim_metrics, X_2.shape[1], lr=0.0005)\n # History = self.Model_claim.fit(X_2,y_2,\n # callbacks=[tensorboard_callback, early_stopping],\n # validation_data=(X_2, y_2),\n # epochs=5000,\n # batch_size=512)\n \n \n X_1, drop_index = self._preprocessor(X_raw, train=True)\n y_1 = y_made_claim.drop(drop_index).values\n y_2 = y_claims_amount.drop(drop_index).values\n \n scaler = StandardScaler()\n clf_made = RandomForestClassifier(n_estimators=500,class_weight={0:1,1:10},n_jobs=-1,max_depth=10,max_features=33,min_samples_leaf=30)\n self.Model_made = Pipeline([(\"scale\",scaler),(\"clf\",clf_made)])\n self.Model_made.fit(X_1,y_1)\n #self.Model_made = fit_and_calibrate_classifier(self.Model_made, X_1, y_1)\n \n # #prepare for claim amount\n X_2 = X_1[y_1==1]\n y_2 = y_2[y_1==1]\n \n self.y_mean = np.mean(y_2)\n self.y_std = np.std(y_2)\n y_2 = (y_2 - self.y_mean)/self.y_std\n\n clf_claim = RandomForestRegressor(n_estimators=500,n_jobs=-1,max_depth=10,max_features=30,min_samples_leaf=70)\n self.Model_claim = Pipeline([(\"scale\",scaler),(\"clf\",clf_claim)])\n self.Model_claim.fit(X_2,y_2)\n \n\n return None",
"def fit_model(self):\n logger.info('Fitting model')\n if self.traj_dict is None:\n self.traj_dict = self.get_traj_dict()\n self.model.fit(self.traj_dict.values())",
"def fit():\n pass",
"def predict(self, fit_result, x):\n raise NotImplementedError()",
"def fit_and_predict_LR(X_train, Y_train, X_test):\n\n # Import the package\n from sklearn.linear_model import LogisticRegression\n\n #referenced to sklearn documentation \n \n # fit the model... \n clf = LogisticRegression().fit(X_train, Y_train) \n # make predictions \n predicted_LR = clf.predict(X_test)\n return predicted_LR\n ### END SOLUTION ### ",
"def __fit_model(self):\n\n labels = self.labeled_labels\n features = self.labeled_features\n\n pred = np.array(cross_val_predict(self.clf,\n features,\n labels,\n cv=self.cv))\n\n stats = self.__get_statistics(labels, pred)\n self.statistics.append(stats)\n\n self.clf.fit(features, labels)\n\n return self",
"def nnRegression(data):",
"def train_model(self):\r\n alpha, accuracy_rate = self.select_model()\r\n # Initialize logistic regression with alpha(learning rate)\r\n lg = logisticregression(C=alpha)\r\n # Train the model.\r\n lg.fit(self.training_data, self.target_data)\r\n # Save the trained model as .pkl file.\r\n joblib.dump(value=lg, filename=self.intention_id+'.pkl', compress=1)\r\n print \"Estimated Parameters of Logistic Regression\"\r\n # Estimated parameters of logistic regression.\r\n print lg.get_params()",
"def predict(self, model, x_test):\n pass",
"def predict(model, x):\n y = model.predict(x)\n print(\"y\")\n print(y)\n return y[0]",
"def predict_price(area) -> float:\n response = requests.get(TRAIN_DATA_URL)\n # YOUR IMPLEMENTATION HERE\n #print(response.content)\n d = pd.read_csv(TRAIN_DATA_URL, header = None)\n d_T = d.T\n #d_T = d_T[:].values()\n d_T.drop(d_T.index[1])\n #print(d_T)\n '''x_a = [row[0] for row in d]\n y_a = [row[1] for row in d]\n x_s = np.array(x_a[1:])\n y_s = np.array(y_a[1:])'''\n x_1 = d_T[0][1:]\n y_1 = d_T[1][1:]\n x_min = x_1.min()\n x_max = x_1.max()\n y_min = y_1.min()\n y_max = y_1.max()\n x = np.array((x_1-x_min)/(x_max-x_min))\n y = np.array((y_1-y_min)/(y_max-y_min))\n x_mean, y_mean = mean(x), mean(y)\n b1 = covariance(x, x_mean, y, y_mean/variance(x, x_mean))\n b0 = y_mean - b1*x_mean\n print(b0, b1)\n return np.array(b0+b1*area)",
"def trainRegressionModel(X,y):\n # # instantiate a logistic regression model, and fit with X and y\n # model = LogisticRegression()\n # model = model.fit(X, y)\n # # check the accuracy on the training set\n # print(model.score(X, y))\n #X['intercept'] = 1.0\n #del X['isCapitalized']\n #del X['isNN']\n #del X['isNNP']\n #del X['isJJ']\n #del X['isUpper']\n #del X['isPrecedingIN']\n logit = sm.Logit(y, X)\n result = logit.fit()\n print(result.summary())\n print(result.conf_int())\n model = LogisticRegression()\n model = model.fit(X, y)\n print(model.score(X, y))\n print(y.mean())\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=0)\n model2 = LogisticRegression()\n model2.fit(X_train, y_train)\n # predict class labels for the test set\n predicted = model.predict(X_test)\n print(predicted)\n for i in predicted:\n if i==1:\n print(\"Test:\"+str(i))\n print(max(predicted))\n #generate class probabilities\n probs = model2.predict_proba(X_test)\n print(probs)\n # generate evaluation metrics\n print(\"Accuracy: \"+str(metrics.accuracy_score(y_test, predicted)))\n print(\"AUC: \"+str(metrics.roc_auc_score(y_test, probs[:, 1])))\n print(metrics.confusion_matrix(y_test, predicted))\n print(metrics.classification_report(y_test, predicted))\n\n from sklearn.cross_validation import cross_val_score\n # evaluate the model using 10-fold cross-validation\n scores = cross_val_score(LogisticRegression(), X, y, scoring='accuracy', cv=10)\n print(scores)\n print(scores.mean())",
"def predict(x_tst, model):\n\n predictions = model.predict(x_tst)\n return predictions",
"def fit_predict(self):\n self.classifier = self.model\n self.classifier.fit(self.X_sample, self.y_sample)\n self.y_pred = self.classifier.predict(self.X_test)",
"def fit(self, X, y):",
"def fit(self, X, y):",
"def fit(self, X, y):",
"def predict(self, X):",
"def predict(self, X):",
"def fit(model, input: str, output: str):\n print(f\"Fitting model of type: {model}\")\n\n # Define the model. Use a randomized search to efficiently explore the\n # hyperparameter space in a limited time.\n if model == \"logistic\":\n # Primal logistic regression\n param_dist = {\n 'C': loguniform(0.1, 100), 'max_iter': [250], 'fit_intercept': [True],\n 'intercept_scaling': [1], 'penalty': ['l2'], 'tol': loguniform(1e-6, 1e-4)\n }\n mdl_cv = RandomizedSearchCV(LogisticRegression(solver='lbfgs'), param_dist, cv=3, refit=True, verbose=2, n_iter=10)\n elif model == \"rand_forest\":\n # Random Forest classifier\n param_dist = {'max_depth': randint(3,8), 'max_features': randint(2,9), 'n_estimators': randint(50, 100),\n 'min_samples_split': randint(3, 7)}\n mdl_cv = RandomizedSearchCV(RandomForestClassifier(), param_dist, cv=3, refit=True, verbose=2, n_iter=10)\n elif model == \"gradient_boost\":\n # Extreme Gradient Boost classifier\n param_dist = {'max_depth': [3, 4], 'gamma': loguniform(1e-3, 1e-2), 'min_child_weight': randint(1, 10),\n 'learning_rate': loguniform(0.05, 0.3), 'n_estimators': randint(10, 40)}\n mdl_cv = RandomizedSearchCV(XGBClassifier(), param_dist, cv=3, refit=True, verbose=2, n_iter=10)\n else:\n raise NotImplementedError(f\"Don't know how to train model of type: {model}.\\nValid options are: logistic, rand_forest, gradient_boost.\")\n\n # Define x (input data) and y (target data)\n df = pd.read_csv(input)\n x = df.loc[:, df.columns != 'Exited']\n y = df.Exited\n print(f\"Data has x.shape = {x.shape} and y.shape = {y.shape}\")\n\n # Fit the model with randomized search\n mdl_cv.fit(x, y)\n\n # Print some results\n print(\"Best score:\", mdl_cv.best_score_)\n print(\"Best params:\", pprint.pformat(mdl_cv.best_params_))\n\n # Save to data store\n os.makedirs(os.path.dirname(output), exist_ok=True)\n with open(output, \"wb\") as f:\n joblib.dump(mdl_cv.best_estimator_, f)",
"def fit(self):\n self.lr = LRHMC( self.X_train, self.X_test, self.y_train, self.y_test )\n self.lr.fit()",
"def predict_price(area) -> float:\n response = requests.get(TRAIN_DATA_URL)\n # YOUR IMPLEMENTATION HERE\n ...\n # print(response.content)\n x = str(response.content)\n # print(x)\n x = x.split('\\\\n')\n # print(len(x))\n # print(x[2])\n a = x[0].split(',')\n p = x[1].split(',')\n a = a[1:]\n p = p[1:]\n a = np.array([float(i) for i in a])\n p = np.array([float(i) for i in p])\n print(a.shape)\n clf = LR()\n clf.fit(a, p)\n\n ans = []\n for i in area:\n \tans.append(i)\n return ans\n # print(a)\n\n return 0",
"def predict(self, X): \n return self.f(X, self.coefficients)",
"def fit_training_data(self):\n self.model.fit(self.X_train)",
"def mlr(df, exp_vars, resp_var, \n method='ols', \n fit_intercept=True,\n kcv=3,\n normalize=False):\n from sklearn import cross_validation\n from sklearn.linear_model import LinearRegression, RidgeCV\n from sklearn.linear_model import LassoCV, ElasticNetCV\n from sklearn.metrics import r2_score\n from sklearn.utils import resample\n import matplotlib.pyplot as plt\n import seaborn as sn\n import pandas as pd\n import numpy as np\n \n # Separate data\n X = df[exp_vars]\n y = df[resp_var]\n \n # Setup model\n if method == 'ols':\n model = LinearRegression(fit_intercept=fit_intercept, \n normalize=normalize)\n elif method == 'lasso':\n model = LassoCV(fit_intercept=fit_intercept, \n normalize=normalize, \n max_iter=10000,\n cv=kcv)\n elif method == 'ridge':\n model = RidgeCV(fit_intercept=fit_intercept, \n normalize=normalize, \n alphas=np.logspace(-10, 10, 21))\n elif method == 'el-net':\n model = ElasticNetCV(l1_ratio=[.1, .5, .7, .9, .95, .99, 1],\n fit_intercept=fit_intercept, \n normalize=normalize,\n cv=kcv)\n else:\n raise ValueError('\"method\" parameter must be in [\"ols\", \"lasso\", \"ridge\", \"el-net\"]')\n \n # k-fold cross validation\n #cv_scores = cross_validation.cross_val_score(model, X, y, cv=kcv, scoring='r2')\n #print 'Mean r2 from %s-fold CV: %.3f\\n' % (kcv, cv_scores.mean())\n \n # Train model on full dataset\n model.fit(X, y)\n \n # Get y-hat\n y_pred = model.predict(X)\n \n # r2 based on calibration data\n r2 = r2_score(y, y_pred)\n print 'r2:', r2\n print ''\n \n # Summary of model\n print model\n print ''\n \n if method == 'lasso':\n print 'Lasso alpha:', model.alpha_\n print ''\n elif method == 'ridge':\n print 'Ridge alpha:', model.alpha_\n print ''\n elif method == 'el-net':\n print 'Elastic net alpha:', model.alpha_ \n print 'Elastic net L1 ratio:', model.l1_ratio_ \n print ''\n else: # OLS\n pass\n \n # Plot\n fig = plt.figure(figsize=(15,15))\n \n # Paired points for each site\n ax1 = plt.subplot2grid((2,2), (0,0), colspan=2)\n ax1.plot(range(0, len(X.index)), y, 'ro', label='Observed')\n ax1.plot(range(0, len(X.index)), y_pred, 'b^', label='Modelled')\n \n ax1.set_xticks(range(0, len(X.index)))\n ax1.set_xticklabels(X.index, rotation=90, fontsize=12)\n ax1.set_xlim(0, len(X.index)-1)\n \n ax1.set_xlabel('Site code', fontsize=16)\n ax1.set_ylabel(resp_var)\n ax1.set_title('Points paired for each location', fontsize=20)\n ax1.legend(loc='best', fontsize=16)\n \n # Modelled versus observed\n ax2 = plt.subplot2grid((2,2), (1,0), colspan=1)\n ax2.plot(y, y_pred, 'ro')\n ax2.set_xlabel('Observed', fontsize=16)\n ax2.set_ylabel('Modelled', fontsize=16)\n ax2.set_title('Modelled versus observed', fontsize=20)\n \n # Hist of residuals\n ax3 = plt.subplot2grid((2,2), (1,1), colspan=1)\n sn.distplot(y - y_pred, kde=True, ax=ax3)\n ax3.set_title('Histogram of residuals', fontsize=20)\n \n plt.tight_layout()\n \n # Get param estimates\n params = pd.Series(model.coef_, index=X.columns)\n\n # Estimate confidence using bootstrap\n # i.e. what is the std. dev. of the estimates for each parameter\n # based on 1000 resamplings\n err = np.std([model.fit(*resample(X, y)).coef_ for i in range(1000)], \n axis=0)\n\n # Build df\n res = pd.DataFrame({'effect':params,\n 'error':2*err})\n\n # Rough indicator of significance: are the estimated values more than\n # 2 std. devs. from 0 (~95% CI?). NB: this assumnes the \"marginal posterior\" \n # is normal, which I haven't tested for and which quite possibly isn't true\n # - use with care! \n res['signif'] = np.abs(res['effect']) > res['error']\n \n return res",
"def fit(self, x):\n pass",
"def model_predict(self, X):\n return self.cmodel.predict(X=X)",
"def fit_predict_single_fold(\n self, train: TabularDataset, valid: TabularDataset\n ) -> Tuple[LinearEstimator, np.ndarray]:\n if type(train) is PandasDataset:\n train = train.to_numpy()\n valid = valid.to_numpy()\n\n _model, cs, l1_ratios, early_stopping = self._infer_params()\n\n train_target, train_weight = self.task.losses[\"sklearn\"].fw_func(train.target, train.weights)\n valid_target, valid_weight = self.task.losses[\"sklearn\"].fw_func(valid.target, valid.weights)\n\n model = deepcopy(_model)\n\n best_score = -np.inf\n best_pred = None\n best_model = None\n\n metric = self.task.losses[\"sklearn\"].metric_func\n\n for l1_ratio in sorted(l1_ratios, reverse=True):\n\n try:\n model.set_params(**{\"l1_ratio\": l1_ratio})\n except ValueError:\n pass\n\n model = deepcopy(_model)\n\n c_best_score = -np.inf\n c_best_pred = None\n c_best_model = None\n es = 0\n\n for n, c in enumerate(cs):\n\n try:\n model.set_params(**{\"C\": c})\n except ValueError:\n model.set_params(**{\"alpha\": c})\n\n model.fit(train.data, train_target, train_weight)\n\n if np.allclose(model.coef_, 0):\n if n == (len(cs) - 1):\n logger.info2(\n \"All model coefs are 0. Model with l1_ratio {0} is dummy\".format(l1_ratio),\n UserWarning,\n )\n else:\n logger.debug(\"C = {0} all model coefs are 0\".format(c))\n continue\n\n pred = self._predict_w_model_type(model, valid.data)\n score = metric(valid_target, pred, valid_weight)\n\n logger.debug(\"C = {0}, l1_ratio = {1}, score = {2}\".format(c, 1, score))\n\n # TODO: check about greater and equal\n if score >= c_best_score:\n c_best_score = score\n c_best_pred = deepcopy(pred)\n es = 0\n c_best_model = deepcopy(model)\n else:\n es += 1\n\n if es >= early_stopping:\n logger.debug(\"Early stopping..\")\n break\n\n if self.timer.time_limit_exceeded():\n logger.info3(\"Time limit exceeded\")\n break\n\n # TODO: Think about is it ok to check time inside train loop?\n if (model.coef_ != 0).all():\n logger.debug(\"All coefs are nonzero\")\n break\n\n if c_best_score >= best_score:\n best_score = c_best_score\n best_pred = deepcopy(c_best_pred)\n best_model = deepcopy(c_best_model)\n\n if self.timer.time_limit_exceeded():\n logger.info3(\"Time limit exceeded\")\n break\n\n val_pred = self.task.losses[\"sklearn\"].bw_func(best_pred)\n\n return best_model, val_pred"
]
| [
"0.6555756",
"0.6489909",
"0.64893115",
"0.6393043",
"0.6281668",
"0.6277158",
"0.62597495",
"0.6257928",
"0.62528986",
"0.6236069",
"0.62204945",
"0.6190761",
"0.6151779",
"0.6123758",
"0.61215025",
"0.61070174",
"0.60761875",
"0.60761875",
"0.60761875",
"0.6063148",
"0.6063148",
"0.6062561",
"0.6059941",
"0.6038616",
"0.6035422",
"0.6035213",
"0.6012619",
"0.60111123",
"0.5976396",
"0.59502167"
]
| 0.66583115 | 0 |
Random rhythm and notes based on time | def randMelody(self, value, chord):
self.randRhythm()
self.randNotes(value, chord) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def randNotes(self, value, chord):\n aChord = mgChord(value, chord)\n self.data = []\n for i in range(len(self.rhythm)):\n note = random.choice(aChord).copy()\n note.setDuration(self.rhythm[i])\n self.data.append(note)",
"def create_melody(inst, chord_progression):\r\n \r\n for chord in chord_progression:\r\n \r\n meas = inst.NewMeasure()\r\n last_note = random.choice(CHORDS[chord])\r\n pos = 0\r\n \r\n while pos < 64:\r\n dur = random.choice([8, 8, 16, 16]) \r\n last_note = get_rdm_note(chord, last_note)\r\n if random.randint(1,10) <= 8: # 20% silence\r\n meas.NewNote(pos, dur, last_note, 5)\r\n pos += dur",
"def make_music_rand():\n pass",
"def gen_melody(scale, timings, note_deviance):\n melody = [random.sample(scale, 1)[0] for i in range(len(timings))]\n\n px = np.random.rand(len(timings))\n idx = np.argwhere(px <= note_deviance)\n direction = [-1, 1]\n for i in idx:\n melody[i[0]] = melody[i[0]] + random.sample(direction, 1)[0]\n return melody",
"def get_rdm_note(chord, last_note):\r\n \r\n if random.randint(1,10) <= 6:\r\n # chord note\r\n return random.choice(CHORDS[chord]) \r\n else:\r\n # scale note \r\n n = C_SCALE.index(last_note)\r\n return C_SCALE[n+random.choice(range(-n, 6-n)[2:5])]",
"def sample_times(times, num_examples):\n sample = random.sample(times, num_examples)\n make_hist(sample, 10, 'Sample of Size ' + str(num_examples),\n 'Minutes to Complete Race', 'Number of Runners')",
"def get_random_time_between_tts():\n return random.randint(TIME_BETWEEN_TTS_LOWER, TIME_BETWEEN_TTS_UPPER)",
"def generate_line_durations(\n n_measures: int,\n duration_weights: Dict[float, float],\n valid_rhythmic_patterns: List[List[float]],\n end_with_whole_note: bool = True\n) -> List[float]:\n current_time = 0\n line_durations = []\n current_measure_durations = []\n total_time = n_measures - int(end_with_whole_note)\n\n while current_time < total_time:\n appropriate_durations = select_appropriate_durations(\n current_time, total_time, current_measure_durations,\n valid_rhythmic_patterns\n )\n duration = random.choices(\n appropriate_durations,\n [duration_weights[x] for x in appropriate_durations]\n )[0]\n current_time += duration\n current_measure_durations = update_current_measure_durations(\n current_measure_durations, duration\n )\n line_durations.append(duration)\n\n if end_with_whole_note:\n line_durations.append(1.0)\n return line_durations",
"def _rhythm_obs_proc(pattern):\n # We asign the endpoint of the hypothesis.\n pattern.hypothesis.end.value = pattern.evidence[o.QRS][-1].time.value",
"def time_per_part():\r\n return random.normalvariate(PT_MEAN, PT_SIGMA)",
"def getStatusMessage():\n\n now = datetime.datetime.now()\n hour = now.hour\n mood = Sentience.getPrimaryMood()\n exp_mood = Sentience.getExposedPositivity()\n\n random.seed((time.time()//86400*86400))\n\n #sleeping\n if not (9 <= hour < 21) and (mood <= 0.5 or not 7 <= hour < 23):\n if exp_mood < -0.1:\n return random.choice([\n \"bleh\",\n \"not sleeping well\",\n \"why's chat so noisy\",\n \"can't sleep\",\n \"do not disturb pls thx\",\n ])\n\n if mood < 0:\n return random.choice([\n \"crying myself to sleep rn\",\n \":(\",\n \"had a nightmare\",\n \"can't sleep\",\n \"._.\"\n ])\n\n return random.choice([\n \"zzz...\",\n \"sweet dreams\",\n \"good night\",\n \"sleeping...\",\n \"having some rest\"\n ])\n\n if Sentience.isExposedPositivityOverloaded():\n return random.choice([\n \"i'm done\",\n \"too much\"\n \"goodbye\",\n \"tired\",\n \"need rest\",\n ])\n\n #happy\n if mood >= 0.7:\n return random.choice([\n \":D\",\n \"great day\",\n \"happy happy\",\n \"hehe\",\n \"good times\",\n \"yay\",\n \"what's up\",\n \"happiness\",\n \"nice day\",\n ])\n #moody-ish\n if mood >= 0.4:\n return random.choice([\n \"hmm\",\n \"yeet\",\n \"bleh\",\n \"oh\",\n \"moody rn\",\n \"nothing\"\n ])\n #more moody\n if mood >= -0.3:\n return random.choice([\n \"moody rn\",\n \"not happy\",\n \"i'm fine.\",\n \"bleh\",\n \"._.\",\n \":(\",\n ])\n #very unhappy\n return random.choice([\n \"sad\",\n \"cries\",\n \"roar\",\n \":_(\",\n \">:(\",\n \"mad\",\n \"angry\",\n \"I'M FINE.\",\n \"bleh\",\n \"no\",\n ])",
"def tempo_r(mid, beats, rs):\n bt = mid.ticks_per_beat\n trk = MidiTrack()\n trk.name = \"Tempo variation\"\n trk.append(MetaMessage(\"set_tempo\",\n tempo=beats[0],\n time=0))\n\n for i, beat in enumerate(beats):\n r = rs[i]\n if r == 0: # For the deterministic case\n tempo_r = beat\n else:\n tempo_r = rd.randint(beat-int(beat*r), beat + int(beat*r)) + 1\n trk.append(MetaMessage(\"set_tempo\",\n time=bt,\n tempo=tempo_r))\n\n mid.tracks.append(trk)\n return mid",
"def time(is_train):\n context = composition.Context()\n start_minutes = random.randint(1, 24*60 - 1)\n while True:\n duration_minutes = random.randint(1, 12*60 - 1)\n if train_test_split.is_train(duration_minutes) == is_train:\n break\n end_minutes = start_minutes + duration_minutes\n\n def format_12hr(minutes):\n \"\"\"Format minutes from midnight in 12 hr format.\"\"\"\n hours = (minutes // 60) % 24\n minutes %= 60\n #am_pm = 'AM' if hours < 12 else 'PM'\n #hours = (hours - 1) % 12 + 1\n return '{}:{:02}'.format(hours, minutes)\n #return '{}:{:02} {}'.format(hours, minutes, am_pm)\n\n start = format_12hr(start_minutes)\n end = format_12hr(end_minutes)\n\n which_question = random.randint(0, 3)\n if which_question == 0:\n # Question: What is start = end - duration?\n template = random.choice([\n 'Che ore sono se mancano {duration} minuti alle {end}?',\n ])\n return example.Problem(\n question=example.question(\n context, template, duration=duration_minutes, end=end),\n answer=start)\n elif which_question == 1:\n # Question: What is end = start + duration?\n template = random.choice([\n 'Che ore sono se sono passati {duration} minuti dalle {start}?',\n ])\n return example.Problem(\n question=example.question(\n context, template, duration=duration_minutes, start=start),\n answer=end)\n else:\n # Question: What is duration = end - start?\n template = random.choice([\n 'Quanti minuti ci sono tra le {start} e le {end}?',\n ])\n return example.Problem(\n question=example.question(context, template, start=start, end=end),\n answer=duration_minutes)",
"def generate_horror_title():\n d666 = random.randint(1, 666)\n if d666 <= 111:\n #the adj noun\n return \"The \" + horror_adj[random.randint(0, len(horror_adj) - 1)] + \" \" + horror_noun[random.randint(0, len(horror_noun) - 1)]\n elif d666 > 111 and d666 <= 222: \n #noun of noun\n return horror_noun[random.randint(0, len(horror_noun) - 1)] + \" of \" + horror_noun[random.randint(0, len(horror_noun) - 1)]\n elif d666 > 222 and d666 < 444: \n #the adj noun of verb \n return \"The \" + horror_adj[random.randint(0, len(horror_adj) - 1)] + \" \" + horror_noun[random.randint(0, len(horror_noun) - 1)] + \" of \" + horror_verb[random.randint(0, len(horror_verb) - 1)]\n elif d666 >= 444 and d666 < 555: \n #noun of noun\n return horror_noun[random.randint(0, len(horror_noun) - 1)] + \" of \" + horror_noun[random.randint(0, len(horror_noun) - 1)]\n elif d666 >= 555:\n #verb of the adj noun\n return horror_verb[random.randint(0, len(horror_verb) - 1)] + \" of the \" + horror_adj[random.randint(0, len(horror_adj) - 1)] + \" \" + horror_noun[random.randint(0, len(horror_noun) - 1)]",
"def __init__(self, length):\r\n # Receive how many notes should be in the melody\r\n self.length = length\r\n # Initialize size of melody's note list\r\n self.notes = [0] * self.length\r\n \r\n noteList = [0, 2, 4, 5, 7, 9, 11, 12, 14, 16, 17, 19, 21, 23, 24]\r\n \r\n # randomly assign notes to each index in the melody's\r\n # note list\r\n i = 0\r\n while i < self.length:\r\n self.notes[i] = random.choice(noteList)\r\n i += 1",
"def rhythm_data_generator(patient_generator, frame_size=2048, samples_per_patient=1):\n for _, (signal, labels) in patient_generator:\n num_segments, segment_size = signal.shape\n patient_rhythm_labels = labels['rtype'] # note: variables in a .npz file are only loaded when accessed\n for _ in range(samples_per_patient):\n # randomly choose a frame that lies within the segment i.e. no zero-padding is necessary\n segment_index = np.random.randint(num_segments)\n frame_start = np.random.randint(segment_size - frame_size)\n frame_end = frame_start + frame_size\n x = signal[segment_index, frame_start:frame_end]\n x = np.expand_dims(x, axis=1) # add channel dimension\n # calculate the durations of each rhythm in the frame and determine the final label\n rhythm_ends, rhythm_labels = patient_rhythm_labels[segment_index]\n frame_rhythm_durations, frame_rhythm_labels = get_rhythm_durations(\n rhythm_ends, rhythm_labels, frame_start, frame_end)\n y = get_rhythm_label(frame_rhythm_durations, frame_rhythm_labels)\n yield x, y",
"def generate_random_line(\n line_durations: List[float],\n pitches: List[ScaleElement]\n) -> List[PieceElement]:\n melodic_line = []\n current_time = 0\n for duration in line_durations:\n scale_element = random.choice(pitches)\n piece_element = PieceElement(\n note=scale_element.note,\n position_in_semitones=scale_element.position_in_semitones,\n position_in_degrees=scale_element.position_in_degrees,\n degree=scale_element.degree,\n start_time=current_time,\n duration=duration\n )\n melodic_line.append(piece_element)\n current_time += duration\n return melodic_line",
"def randomize_trajectory(self):\n self.angle = randint(-360, 360)\n self.speed = randint(1, 5)/2.5",
"def get_timings(beat, max_length, rand_beat):\n BEATS = [int(beat/4),\n int(beat/2),\n int(beat),\n int(beat*2),\n int(beat*4)]\n\n total_steps = beat * max_length\n timings = [beat] * max_length\n\n if rand_beat == True:\n timings = []\n while total_steps > 0:\n duration = random.sample(BEATS, 1)[0]\n total_steps -= duration\n timings.append(duration)\n\n return timings",
"def mon_maker():\r\n random_mon = {'eyeratio':0.2, 'eyeL':30,\r\n 'mouthratio':0.8, 'mouthL':30,\r\n 'headL':40, 'headA':15,\r\n 'cheekL':25, 'cheekA':45,\r\n 'chinL': 30, 'chinA':90\r\n }\r\n return random_mon",
"def generateNotes():\r\n fs = 44100 # hertz\r\n seconds = 3 # Note duration of 3 seconds\r\n noteNames = [\"C4\", \"D4\", \"E4\", \"F4\", \"G4\", \"A4\", \"B4\"]\r\n for noteName in noteNames:\r\n myNote = music21.note.Note(noteName)\r\n noteFrequency = myNote.pitch.frequency\r\n # Generate array with seconds*sample_rate steps, ranging between 0 and seconds\r\n t = np.linspace(0, seconds, seconds * fs, False)\r\n\r\n # Generate a 440 Hz sine wave\r\n sound = np.sin(noteFrequency * t * 2 * np.pi)\r\n\r\n # Ensure that highest value is in 16-bit range\r\n audio = sound * (2**15 - 1) / np.max(np.abs(sound))\r\n # Convert to 16-bit data\r\n audio = audio.astype(np.int16)\r\n\r\n # Start playback\r\n play_obj = sa.play_buffer(audio, 1, 2, fs)\r\n\r\n # Wait for playback to finish before exiting\r\n play_obj.wait_done()\r\n\r\n #Write sound to file\r\n sf.write('assets/patterns/'+noteName+'.wav', audio, fs)",
"def make_note_sound(self, freq, time, amplitude=1):\n note = sin(2 * pi * freq * amplitude *\n linspace(0, time, time * self.RATE))\n freq = utils.reverse_dictionary(self.FREQUENCY_MAP)[freq]\n print freq, time, amplitude, len(note)\n return note / np.max(np.abs(note), axis=0) # Normalize the note",
"def time(self):\n\n self.timing = True\n self.scramble()\n\n self.disp = False",
"def buildMelodyNotes(m, bars):\n\n # Get the notes part of the scale specified in Melody object\n s = generate_scale(m.getKeySignature(), m.getScale(), m.getOctave())\n\n if bars < MIN_BARS or bars > MAX_BARS:\n bars = random.randint(MIN_BARS, 4)\n\n # Get a random number of notes value between the number of bars (so\n # that there is roughly one note per bar) and SMALLEST_NOTE //\n # OTHER_NOTE (so that each note would be roughly OTHER_NOTE long)\n numNotes = random.randint(4, max(1, bars * (SMALLEST_NOTE //\n DOTTED_EIGHTH_NOTE)))\n seq = []\n # randomize notes in scale by choosing {numNotes} random notes\n for i in range(numNotes):\n seq.append(random.choice(s))\n\n # randomize length of each notes using getRandomStructure function\n lengths, barSpace = getEqualStructure(bars, numNotes)\n\n # add the random notes to generate the melody\n for i in range(numNotes):\n if seq[i] == -1:\n m.addNote(Note(0, lengths[i], vel=0))\n else:\n m.addNote(Note(seq[i], lengths[i]))\n\n if barSpace > 0:\n # Add rest just in case last note does not extend to end of bar\n m.addNote(Note(0, barSpace, vel=0))",
"def single_introduction(end):\n return [random.randint(0,end)]",
"def totem_random():\n random_head()\n random_head()\n random_head()",
"def randomize(data, length=0):\r\n longitud = 0 if length == 0 else 12 - length\r\n return data + \"-\" + str(time.time()).replace(\".\", \"\")[longitud:]",
"def seconds (self):\n if self.counter == self.times:\n return None\n\n self.counter += 1\n return random.uniform(self.lower, self.upper)",
"def migration_time(rand):\n ta=np.random.uniform(4,8)*1e4*yr\n K=np.power(10,rand)\n te=ta/K\n return ta, te",
"def generate_round():\n prog_len = random.randint(_MIN_LEN, _MAX_LEN)\n diff = random.randint(_MIN_DIFF, _MAX_DIFF)\n start = random.randint(0, _MAX_START)\n prog = _generate_progression(prog_len, diff, start)\n missing_position = random.randint(0, len(prog) - 1)\n missing_element = prog[missing_position]\n prog[missing_position] = '..'\n prog = list(map(str, prog))\n string_question = ' '.join(prog)\n return string_question, missing_element"
]
| [
"0.70728934",
"0.6706153",
"0.6540487",
"0.6391081",
"0.6068149",
"0.60672534",
"0.60280246",
"0.60105956",
"0.59780055",
"0.5953066",
"0.59486765",
"0.5922868",
"0.57505494",
"0.5716557",
"0.5710636",
"0.57026637",
"0.5684732",
"0.56817174",
"0.5633969",
"0.5596766",
"0.556031",
"0.5557936",
"0.5541022",
"0.5512192",
"0.54990596",
"0.54879117",
"0.54747665",
"0.5471065",
"0.5451147",
"0.5436061"
]
| 0.72257495 | 0 |
Generate random rhythm, based on chord It must have rhythm before | def randNotes(self, value, chord):
aChord = mgChord(value, chord)
self.data = []
for i in range(len(self.rhythm)):
note = random.choice(aChord).copy()
note.setDuration(self.rhythm[i])
self.data.append(note) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def randMelody(self, value, chord):\n self.randRhythm()\n self.randNotes(value, chord)",
"def get_rdm_note(chord, last_note):\r\n \r\n if random.randint(1,10) <= 6:\r\n # chord note\r\n return random.choice(CHORDS[chord]) \r\n else:\r\n # scale note \r\n n = C_SCALE.index(last_note)\r\n return C_SCALE[n+random.choice(range(-n, 6-n)[2:5])]",
"def create_melody(inst, chord_progression):\r\n \r\n for chord in chord_progression:\r\n \r\n meas = inst.NewMeasure()\r\n last_note = random.choice(CHORDS[chord])\r\n pos = 0\r\n \r\n while pos < 64:\r\n dur = random.choice([8, 8, 16, 16]) \r\n last_note = get_rdm_note(chord, last_note)\r\n if random.randint(1,10) <= 8: # 20% silence\r\n meas.NewNote(pos, dur, last_note, 5)\r\n pos += dur",
"def make_music_rand():\n pass",
"def generate_horror_title():\n d666 = random.randint(1, 666)\n if d666 <= 111:\n #the adj noun\n return \"The \" + horror_adj[random.randint(0, len(horror_adj) - 1)] + \" \" + horror_noun[random.randint(0, len(horror_noun) - 1)]\n elif d666 > 111 and d666 <= 222: \n #noun of noun\n return horror_noun[random.randint(0, len(horror_noun) - 1)] + \" of \" + horror_noun[random.randint(0, len(horror_noun) - 1)]\n elif d666 > 222 and d666 < 444: \n #the adj noun of verb \n return \"The \" + horror_adj[random.randint(0, len(horror_adj) - 1)] + \" \" + horror_noun[random.randint(0, len(horror_noun) - 1)] + \" of \" + horror_verb[random.randint(0, len(horror_verb) - 1)]\n elif d666 >= 444 and d666 < 555: \n #noun of noun\n return horror_noun[random.randint(0, len(horror_noun) - 1)] + \" of \" + horror_noun[random.randint(0, len(horror_noun) - 1)]\n elif d666 >= 555:\n #verb of the adj noun\n return horror_verb[random.randint(0, len(horror_verb) - 1)] + \" of the \" + horror_adj[random.randint(0, len(horror_adj) - 1)] + \" \" + horror_noun[random.randint(0, len(horror_noun) - 1)]",
"def get_random_chord_type(self) -> str:\n i = random.randint(0, len(self._chord_types) - 1)\n return self._chord_types[i]",
"def generateHarmonyPitchMU(self, harmonicRhythm, harmonicComplexity,\n minMajRatio, structureLevelMU):\n # decide whether to have cadence\n cadenceProbDict = self.chordProfile.getCadenceProb()\n cadenceProb = cadenceProbDict[structureLevelMU]\n random = RandomManager.getActive()\n r = random.random()\n\n cadenceChordProgression = []\n if r <= cadenceProb:\n cadenceChordProgression = self._createCadence(harmonicRhythm)\n if len(cadenceChordProgression) == len(harmonicRhythm):\n return cadenceChordProgression\n\n # remove as many durations from harmonicRhythm as the\n # number of chords used for the cadence\n harmonicRhythm = harmonicRhythm[:-len(cadenceChordProgression)]\n\n chordProgression = []\n previousTriadCode = None\n chordIndex = None\n\n # step through all durations forming the harmonic rhythm to assign\n # chord\n for durationObj in harmonicRhythm:\n duration = durationObj.getDuration()\n scale = self.chordProfile.getScale().getName()\n metricalAccentLevel = durationObj.getMetricalAccent()\n\n # calculate scores\n scores = self._calcMetrics(previousTriadCode, chordIndex, metricalAccentLevel,\n harmonicComplexity, minMajRatio)\n\n # choose triad\n chord, chordIndex = self._decideTriad(scores, durationObj)\n code = chord.getCode()\n\n # get probability of adding dissonant thirds\n dissonanceProb = self._calcDissonanceProb(harmonicComplexity,\n metricalAccentLevel)\n r = random.random()\n\n # decide whether to apply dissonance\n if r <= dissonanceProb:\n\n # add dissonance(s)\n code = self._decideDissonance(chord)\n\n # create new chord\n newChord = Chord(code, duration=duration, scale=scale,\n octave=4)\n\n # append chord to progression\n chordProgression.append(newChord)\n\n # update previous code and triad code\n previousCode = newChord.getCode()\n previousTriadCode = previousCode[:3]\n\n # add up chord progression and chords for cadence\n chordProgression += cadenceChordProgression\n\n s = self._realizeM21Sequence(chordProgression)\n s.show(\"midi\")\n\n return chordProgression",
"def _prev_rhythm_tconst(pattern, rhythm):\n BASIC_TCONST(pattern, rhythm)\n tnet = pattern.last_tnet\n tnet.set_equal(pattern.hypothesis.start, rhythm.end)",
"async def chord(ctx, root:str,length=3,):\n if length < 2:\n length = 2\n elif length > 5:\n length = 5\n chord = Tempo.generateChordProgression(root)\n await ctx.send(chord)",
"def rhythm_data_generator(patient_generator, frame_size=2048, samples_per_patient=1):\n for _, (signal, labels) in patient_generator:\n num_segments, segment_size = signal.shape\n patient_rhythm_labels = labels['rtype'] # note: variables in a .npz file are only loaded when accessed\n for _ in range(samples_per_patient):\n # randomly choose a frame that lies within the segment i.e. no zero-padding is necessary\n segment_index = np.random.randint(num_segments)\n frame_start = np.random.randint(segment_size - frame_size)\n frame_end = frame_start + frame_size\n x = signal[segment_index, frame_start:frame_end]\n x = np.expand_dims(x, axis=1) # add channel dimension\n # calculate the durations of each rhythm in the frame and determine the final label\n rhythm_ends, rhythm_labels = patient_rhythm_labels[segment_index]\n frame_rhythm_durations, frame_rhythm_labels = get_rhythm_durations(\n rhythm_ends, rhythm_labels, frame_start, frame_end)\n y = get_rhythm_label(frame_rhythm_durations, frame_rhythm_labels)\n yield x, y",
"def tempo_r(mid, beats, rs):\n bt = mid.ticks_per_beat\n trk = MidiTrack()\n trk.name = \"Tempo variation\"\n trk.append(MetaMessage(\"set_tempo\",\n tempo=beats[0],\n time=0))\n\n for i, beat in enumerate(beats):\n r = rs[i]\n if r == 0: # For the deterministic case\n tempo_r = beat\n else:\n tempo_r = rd.randint(beat-int(beat*r), beat + int(beat*r)) + 1\n trk.append(MetaMessage(\"set_tempo\",\n time=bt,\n tempo=tempo_r))\n\n mid.tracks.append(trk)\n return mid",
"def _prev_rhythm_gconst(_, rhythm):\n # A bigeminy cannot be preceded by another bigeminy or an extrasystole.\n verify(not isinstance(rhythm, (o.Bigeminy, o.Extrasystole)))",
"def _rhythm_obs_proc(pattern):\n # We asign the endpoint of the hypothesis.\n pattern.hypothesis.end.value = pattern.evidence[o.QRS][-1].time.value",
"def generate_round():\n prog_len = random.randint(_MIN_LEN, _MAX_LEN)\n diff = random.randint(_MIN_DIFF, _MAX_DIFF)\n start = random.randint(0, _MAX_START)\n prog = _generate_progression(prog_len, diff, start)\n missing_position = random.randint(0, len(prog) - 1)\n missing_element = prog[missing_position]\n prog[missing_position] = '..'\n prog = list(map(str, prog))\n string_question = ' '.join(prog)\n return string_question, missing_element",
"def mgChordAugmented(value):\n chord = [MgNote(value), MgNote(value) + 4, MgNote(value) + 8]\n return chord",
"def random_note_in_chord_and_vocal_range(relPitchList, key, vocal_range, prev_note):\n\n\t# for rests, can only be a rest\n\tif relPitchList == -1:\n\t\treturn [note.Rest()]\n\n\t# extract_range\n\tlowest_note, highest_note = vocal_range\n\n\tif (not isinstance(prev_note, note.Rest)) and (prev_note is not None):\n\n\t\t# restrict intervals to a sixth\n\t\tif prev_note.pitch.transpose(-7) > lowest_note:\n\t\t\tlowest_note = prev_note.pitch.transpose(-7)\n\n\t\tif prev_note.pitch.transpose(7) < highest_note:\n\t\t\thighest_note = prev_note.pitch.transpose(7)\n\n\t# turn relative Pitch into a note.Note\n\tnotes = []\n\n\n\tfor relPitch in relPitchList:\n\t\tif relPitch != 0:\n\t\t\tinterval_ = interval.ChromaticInterval(relPitch)\n\t\t\tnotes.append(note.Note(interval_.transposePitch(key.getTonic()), quarterLength=1))\n\t\telse:\n\t\t\t# needed as no transposition by 0 possible\n\t\t\tnotes.append(note.Note(key.getTonic(), quarterLength=1))\n\t \n\t# get all notes in right octave\n\toctave_corrected_notes = []\n\tfor n in notes:\n\n\t\t# possible octaves\n\t\tlow_octave = lowest_note.octave\n\t\thigh_octave = highest_note.octave\n \n\t\t# select random octave\n\t\t# if only one choice available\n\t\toctave_choice = np.arange(low_octave, high_octave + 1)\n\n\t\t# loop throughpossible octaves\n\t\tfor o in octave_choice:\n\t\t\tnote_ = copy.deepcopy(n)\n\t\t\tnote_.octave = o\n\t\t\tif (note_.pitch >= lowest_note) and (note_.pitch <= highest_note):\n\t\t\t\toctave_corrected_notes.append(note_)\n\n\treturn octave_corrected_notes",
"def get_random_chord(self) -> []:\n return self.get_random_chords(1)[0]",
"def get_random_phrase():\n return random.choices(PHRASES, WEIGHTS, k=1)[0]",
"def gen_melody(scale, timings, note_deviance):\n melody = [random.sample(scale, 1)[0] for i in range(len(timings))]\n\n px = np.random.rand(len(timings))\n idx = np.argwhere(px <= note_deviance)\n direction = [-1, 1]\n for i in idx:\n melody[i[0]] = melody[i[0]] + random.sample(direction, 1)[0]\n return melody",
"def generate_line_durations(\n n_measures: int,\n duration_weights: Dict[float, float],\n valid_rhythmic_patterns: List[List[float]],\n end_with_whole_note: bool = True\n) -> List[float]:\n current_time = 0\n line_durations = []\n current_measure_durations = []\n total_time = n_measures - int(end_with_whole_note)\n\n while current_time < total_time:\n appropriate_durations = select_appropriate_durations(\n current_time, total_time, current_measure_durations,\n valid_rhythmic_patterns\n )\n duration = random.choices(\n appropriate_durations,\n [duration_weights[x] for x in appropriate_durations]\n )[0]\n current_time += duration\n current_measure_durations = update_current_measure_durations(\n current_measure_durations, duration\n )\n line_durations.append(duration)\n\n if end_with_whole_note:\n line_durations.append(1.0)\n return line_durations",
"def mgChordDiminished(value):\n chord = [MgNote(value), MgNote(value) + 3, MgNote(value) + 6]\n return chord",
"def generate_fantasy_title():\n d20 = random.randint(1, 20)\n if d20 <= 4:\n #genetive noun\n return fantasy_genetive[random.randint(0, len(fantasy_genetive) - 1)] + \" \" + fantasy_noun[random.randint(0, len(fantasy_noun) - 1)]\n elif d20 > 4 and d20 < 13: \n #The adj noun\n return \"The \" + fantasy_adj[random.randint(0, len(fantasy_adj) - 1)] + \" \" + fantasy_noun[random.randint(0, len(fantasy_noun) - 1)]\n elif d20 >= 13:\n #something of something\n return fantasy_noun[random.randint(0, len(fantasy_noun) - 1)] + \" of \" + fantasy_what_is_this[random.randint(0, len(fantasy_what_is_this) - 1)]",
"def sample(self):\n seq = []\n for i in range(self._pwm.shape[1]):\n p = numpy.array(self._pwm[:, i], dtype=numpy.float64)\n p /= p.sum()\n seq.extend(numpy.random.choice(self.alphabet, p=p))\n return \"\".join(seq)",
"def generate_sentence(self, t=20):\n result = [\"START\", \"START\"]\n\n for i in range(t-3):\n if result[-1] == \"STOP\":\n break\n\n match = {}\n for k,v in self.trigramcounts.items():\n if k[0] == result[-2] and k[1] == result[-1]:\n match[k[-1]] = v\n r = np.random.choice(list(match.keys()), p=np.array(list(match.values())) / np.sum(np.array(list(match.values()))))\n result.append(r)\n\n return result",
"def mgChord(value, chord):\n ret = None\n if chord == 'M':\n ret = mgChordMajor(value)\n elif chord == 'm':\n ret = mgChordMinor(value)\n elif chord == 'dim':\n ret = mgChordDiminished(value)\n elif chord == 'aug':\n ret = mgChordAugmented(value)\n\n return ret",
"def randomHelmet():\n return random.choice(HELMETS)",
"def rand_gen(below, baseline):\n\treturn secrets.randbelow(below)/ baseline",
"def getRandomRarity():\n r = random.randint(1,100)\n if r <= Rarities.IMPOSIBIL:\n return \"IMPOSIBIL\"\n elif r <= Rarities.LEGENDAR:\n return \"LEGENDAR\"\n elif r <= Rarities.EPIC:\n return \"EPIC\"\n else:\n return \"COMUN\"",
"def _createCadence(self, harmonicRhythm):\n random = RandomManager.getActive()\n\n # choose cadence to apply\n cadences = self.chordProfile.getCadences()\n\n #TODO: Choose cadence in more intelligent way\n cadence = random.choice(cadences)\n\n scale = self.chordProfile.getScale().getName()\n cadenceChordProgression = []\n\n reversedHarmonicRhythm = reversed(harmonicRhythm[:])\n # create as many cadence\n for count, durationObj in enumerate(reversedHarmonicRhythm):\n duration = durationObj.getDuration()\n code = cadence[-count+1]\n chord = Chord(code, duration=duration, scale=scale, octave=4)\n\n if count >= len(cadence):\n return cadenceChordProgression\n\n # prepend chord\n cadenceChordProgression.insert(0, chord)\n return cadenceChordProgression",
"def random():\n length = 10**np.random.uniform(2, 6)\n radius = 10**np.random.uniform(1, 3)\n axis_ratio = 10**np.random.uniform(-1, 1)\n kuhn_length = 10**np.random.uniform(-2, -0.7)*length # at least 10 segments\n pars = dict(\n length=length,\n radius=radius,\n axis_ratio=axis_ratio,\n kuhn_length=kuhn_length,\n )\n return pars"
]
| [
"0.7600951",
"0.6710161",
"0.6659425",
"0.6366649",
"0.6244833",
"0.59357905",
"0.59273636",
"0.5922246",
"0.58458304",
"0.5795843",
"0.5778329",
"0.5752907",
"0.57455",
"0.5733294",
"0.5724642",
"0.57119113",
"0.57089704",
"0.5679657",
"0.5665693",
"0.5636343",
"0.55583066",
"0.5549534",
"0.5535191",
"0.55293566",
"0.5519769",
"0.5503923",
"0.5491074",
"0.5450109",
"0.54429084",
"0.5441284"
]
| 0.70778203 | 1 |
Get the duration remain, within this bar list, values of rhythm int | def durationRemain(self, l=None):
if l is None:
l = self.rhythm
full = float(self.time.upper)/self.time.lower
s = 0
for i in range(len(l)):
s += 1.0 / l[i]
return full - s | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def getDuration(self):\n return (self._get_int('duration'), self._attributes.getDivisions())",
"def get_dur(self):\n return [char.get_dur() for char in self.string]",
"def duration(self):\r\n return self.t2 - self.t1",
"def duration(self) -> float:\n return self.delta_t * len(self)",
"def duration(self) -> float:\n return self.delta_t * len(self)",
"def getDuration(self):\n if self.getDot():\n return self.duration*1.5\n else:\n return self.duration",
"def getDuration(self):\n #return np.sum(self.subintinfo['TSUBINT']) #This is constant.\n return np.sum(self.getSubintinfo('TSUBINT')) #This is constant.",
"def _get_dur(inst):\n for fil, sig in inst['localization'].items():\n ke = sorted([int(i) for i in sig.keys()], key=int)\n if (len(ke) != 2):\n log(0, \"Error: Instance has two ranges\\n%s\" % (str(inst)))\n exit(1)\n dur = ke[1] - ke[0]\n assert dur > 0, \"Duration <= 0\"\n return(dur)",
"def get_metrical_duration_ticks(self):\n return int(sum([d[0]/d[1] for d in self.metrical_durations.flattened()]) * self.rhythm_denominator)",
"def bar_to_lilypond_duration(durations):\n size = len(durations)\n durations = [d for d in durations if d is not None]\n # Convert to lilypond note durations\n def is_pow2(n):\n return ((n & (n-1)) == 0)\n def compute_lp_duration(d,s):\n # If it's a power of 2\n if is_pow2(d):\n return str(int(size/d))\n # If it's a multiple of 3\n if d%3 == 0:\n if is_pow2(int(d/3)):\n return str(int(size/(d/3*2)))+\".\"\n # Otherwise, it's a tied note. Split into factors.\n # Test all possible splittings\n for i in range(1,int(d/2)+1):\n d1 = compute_lp_duration(d-i,s)\n d2 = compute_lp_duration(i,s)\n if d1 is None or d2 is None:\n continue\n if type(d1) is not list:\n d1 = [d1]\n if type(d2) is not list:\n d2 = [d2]\n return d1+d2\n return None\n lp_durations = [compute_lp_duration(d,size) for d in durations]\n return lp_durations",
"def get_duration(self):\n frame_dur = self.get_frame_duration()\n num_frames = self.get_num_frames()\n motion_dur = frame_dur * (num_frames - 1)\n return motion_dur",
"def _get_duration(self):\n try:\n dur = self.im.info[\"duration\"] / 1000.0\n except KeyError:\n dur = DEFAULT_DURATION / 1000.0 \n\n return dur",
"def get_duration(self):\n duration = 0\n\n for entry in self.entries:\n duration += entry.get_duration()\n return duration",
"def Duration(self):\r\n\t\treturn self._get_attribute('duration')",
"def duration(self):\n pass",
"def duration(self):\n pass",
"def get_duration(self):\n return float(self.time.iloc[-1] - self.time.iloc[0])",
"def getDurations(self):\n return self.durations",
"def Duration(self):\n\t\treturn self._get_attribute('duration')",
"def find_duration(data):\n t = [i[0] for i in data]\n duration = t[len(t) - 1] - t[0]\n logging.info('Calculated duration: %s', duration)\n return duration",
"def get_rhythm_durations(indices, labels=None, start=0, end=None):\n if end is None:\n end = indices[-1]\n if start >= end:\n raise ValueError('`end` must be greater than `start`')\n # find the first rhythm label after the beginning of the frame\n start_index = np.searchsorted(indices, start, side='right')\n # find the first rhythm label after or exactly at the end of the frame\n end_index = np.searchsorted(indices, end, side='left') + 1\n frame_indices = indices[start_index:end_index]\n # compute the duration of each rhythm adjusted for the beginning and end of the frame\n frame_rhythm_durations = np.diff(frame_indices[:-1], prepend=start, append=end)\n if labels is None:\n return frame_rhythm_durations\n else:\n frame_labels = labels[start_index:end_index]\n return frame_rhythm_durations, frame_labels",
"def duration(self):\n return self._end - self._begin",
"def duration(self) -> int:\n return 0",
"def getDuration(self):\n return _osgAnimation.Motion_getDuration(self)",
"def durations_per_type(self):\n pass",
"def get_duration(self):\n seconds = self.duration.total_seconds()\n mins, secs = divmod(seconds, 60)\n return int(mins), int(secs)",
"def create_event_dur_score(self):\n for inst in self.instruments:\n #[rest/midipitch, dur, vel]\n inst_score=[]\n running_clock = 0\n for n, note in enumerate(inst.notes):\n freq = mp_to_adjusted_freq(note[0], self.ratios)\n if type(freq) != int: freq = np.asscalar(freq)\n if type(note[0]) != int: inst.notes[n][0] = np.asscalar(note[0])\n if type(note[1]) != int: inst.notes[n][1] = np.asscalar(note[1])\n if type(note[2]) != int: inst.notes[n][2] = np.asscalar(note[2])\n # if type(note[3]) != int: inst.notes[n][3] = np.asscalar(note[3])\n if note[1] != running_clock:\n inst_score.append(['Rest()', note[1] - running_clock, 0])\n inst_score.append([freq, note[2], note[3]])\n running_clock = note[1] + note[2]\n inst.event_dur_score = inst_score",
"def increment_duration(syllables, note_attrs):\n syllables[-1][-1][-1]['duration'] += note_attrs['duration']\n return syllables",
"def getDuration(self):\n return _osgAnimation.Animation_getDuration(self)",
"def duration(self) -> str:\n return pulumi.get(self, \"duration\")"
]
| [
"0.65641594",
"0.65380937",
"0.6458796",
"0.64535576",
"0.64535576",
"0.6451906",
"0.6380517",
"0.6314581",
"0.62721646",
"0.625763",
"0.6241156",
"0.6210288",
"0.6162527",
"0.61441207",
"0.6135919",
"0.6135919",
"0.6128094",
"0.61246526",
"0.6111688",
"0.6027541",
"0.6024031",
"0.600552",
"0.59731317",
"0.5967308",
"0.5964228",
"0.5941234",
"0.5936502",
"0.58956796",
"0.58623606",
"0.5854704"
]
| 0.6915621 | 0 |
Compute MD5 hash of the data_path (dir or file) for data versioning. | def hash_data(data_path: Union[str, Path], chunk_size: int = 65536) -> str:
if Path(data_path).is_dir():
hash = _hash_dir(data_path, chunk_size)
elif Path(data_path).is_file():
hash = _hash_file(data_path, chunk_size)
else:
raise ValueError(f"{data_path} is neither directory nor file.")
return hash.hexdigest() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def python_repo_hash_md5(root_dir: str, *, verbose: bool = False):\n m = hashlib.md5()\n for e in _collect_entries(root_dir, '.'):\n if verbose:\n log_info('Processing e', e)\n m.update(\n f\"path={e['path']}\\tisdir={e['isdir']}\\tsize={e['size']}\\tmode={e['mode']:03o}\\tmtime={e['mtime']}\\n\"\n .encode('UTF-8'))\n\n return m.hexdigest()",
"def calculate_md5_of_dir(self, verbose=0):\n directory = self.cfg['sharing_path']\n if verbose:\n start = time.time()\n md5Hash = hashlib.md5()\n if not os.path.exists(directory):\n self.stop(1, 'Error during calculate md5! Impossible to find \"{}\" in user folder'.format(directory))\n\n for root, dirs, files in os.walk(directory, followlinks=False):\n for names in files:\n filepath = os.path.join(root, names)\n rel_path = self.relativize_path(filepath)\n if rel_path in self.client_snapshot:\n md5Hash.update(self.client_snapshot[rel_path][1])\n md5Hash.update(hashlib.md5(filepath).hexdigest())\n else:\n hashed_file = self.hash_file(filepath)\n if hashed_file:\n md5Hash.update(hashed_file)\n md5Hash.update(hashlib.md5(filepath).hexdigest())\n else:\n print \"can't hash file: \", filepath\n\n if verbose:\n stop = time.time()\n print stop - start\n return md5Hash.hexdigest()",
"def _md5sum(data):\n hash = hashlib.md5()\n hash.update(six.b(data))\n hash_hex = hash.hexdigest()\n return hash_hex",
"def svn_fs_file_md5_checksum(*args):\r\n return _fs.svn_fs_file_md5_checksum(*args)",
"def md5(self):\n return md5file(self.abspath)",
"def md5_hash(self) -> str:\n\n ordered_model_data = sort_dictionary(self.data, recursive=True)\n\n return md5(json.dumps(ordered_model_data).encode(\"utf-8\")).hexdigest()",
"def data_checksum(self, node):\n cmd = f\"find {RedpandaService.DATA_DIR} -type f -exec md5sum '{{}}' \\; -exec stat -c %s '{{}}' \\;\"\n lines = node.account.ssh_output(cmd)\n tokens = lines.split()\n return {\n tokens[ix + 1].decode(): (tokens[ix].decode(), int(tokens[ix + 2]))\n for ix in range(0, len(tokens), 3)\n }",
"def checksum(path):\n with open(path, 'r') as f:\n return md5(f.read()).digest()",
"def calc_file_md5(file_path):\n hash_md5 = str()\n method = hashlib.md5()\n if not os.path.exists(file_path):\n logger.error(\"File(%s) don not exist, can not calculation file hash\" % file_path)\n return hash_md5\n\n with open(file_path, 'rb') as f:\n for chunk in read_chunks(f, 1024 * 1024):\n method.update(chunk)\n return method.hexdigest()",
"def calculate(d):\r\n\r\n # Set correct slashes for the OS\r\n if sys.platform == 'windows':\r\n slash = '\\\\'\r\n elif sys.platform == 'linux':\r\n slash = '/'\r\n else:\r\n print('#Error. Unknown platform.')\r\n return\r\n\r\n print('Files in the current directory and their md5-hashes:\\n')\r\n i = 0\r\n assert i == 0, '#Error. Variable i != 0.'\r\n\r\n for i in range(len(d[2])): # Go through the list of files\r\n full_path = d[0]+slash+d[2][i]\r\n print(full_path) # Get the list of files with full paths\r\n print(md5(full_path))\r\n size(full_path)",
"def md5Hash(pathAndFilename, blockSize=8192):\n hashcode = hashlib.md5()\n with open(pathAndFilename, \"rb\" ) as f:\n block = f.read(blockSize)\n while len(block)>0:\n hashcode.update(block)\n block = f.read(blockSize)\n return hashcode.hexdigest()",
"def md5_hexdigest(data):\n\n if not (data and isinstance(data, six.text_type)):\n raise Exception(\"invalid data to be hashed: %s\", repr(data))\n\n encoded_data = data.encode(\"utf-8\")\n\n if not new_md5:\n m = md5.new() # nosec\n else:\n m = md5()\n m.update(encoded_data)\n\n return m.hexdigest()",
"def get_md5(data):\n if hasattr(data, \"read\") and hasattr(data, 'seek'):\n data.seek(0)\n m = md5()\n chunk = data.read(1024*1024) # 1Mb\n f_size = 0\n while(chunk):\n f_size += len(chunk)\n m.update(chunk)\n chunk = data.read(1024*1024)\n data.seek(0)\n return m.hexdigest(), f_size\n else: # normal str\n m = md5()\n f_size = len(data)\n m.update(data)\n return m.hexdigest(), f_size",
"def md5_hash(file_path):\n with open(file_path, 'rb') as fp:\n return md5(fp.read()).hexdigest()",
"def data_hash(self):\n md5 = hashlib.md5()\n # text header row\n for column in self.columns:\n md5.update(hxl.datatypes.normalise_space(column.header).encode('utf-8'))\n # hashtag row\n for column in self.columns:\n md5.update(hxl.datatypes.normalise_space(column.display_tag).encode('utf-8'))\n # data rows\n for row in self:\n for value in row:\n md5.update(hxl.datatypes.normalise_space(value).encode('utf-8'))\n return md5.hexdigest()",
"def md5(dir):\n\n # ugly way to avoid circular imports\n from . import settings\n\n files = [ \n settings.DATA['nation']['file_name'],\n settings.DATA['regions']['file_name'],\n settings.DATA['provinces']['file_name'],\n ]\n\n hash_md5 = hashlib.md5()\n for f in files:\n with open(dir+'/'+f, \"rb\") as f:\n for chunk in iter(lambda: f.read(4096), b\"\"):\n hash_md5.update(chunk)\n \n return hash_md5.hexdigest()",
"def md5_checksum(file_path):\n with open(file_path, 'rb') as fh:\n m = hashlib.md5()\n while True:\n data = fh.read(8192)\n if not data:\n break\n m.update(data)\n return m.hexdigest()",
"def MD5(self) -> _n_0_t_3[_n_0_t_9]:",
"def aws_md5(data):\n hasher = hashlib.new(\"md5\")\n if hasattr(data, \"read\"):\n data.seek(0)\n while True:\n chunk = data.read(8192)\n if not chunk:\n break\n hasher.update(chunk)\n data.seek(0)\n else:\n hasher.update(data)\n return b64encode(hasher.digest()).decode(\"ascii\")",
"def _md5_of_file(path):\n md5 = hashlib.md5()\n\n with open(path, 'rb') as f:\n for chunk in iter(lambda: f.read(4096), b''):\n md5.update(chunk)\n\n return md5.hexdigest()",
"def md5_sum_file(path):\n with open(path, 'rb') as f:\n m = hashlib.md5()\n while True:\n data = f.read(8192)\n if not data:\n break\n m.update(data)\n return m.hexdigest()",
"def _get_local_md5(self, blocksize=2**20):\n m = hashlib.md5()\n with open(self.dst, \"rb\") as f:\n buf = f.read(blocksize)\n while buf:\n m.update(buf)\n buf = f.read(blocksize)\n return m.hexdigest()",
"def hash_file(path: str) -> str:\n return _hash_file(path, hashlib.md5()).hexdigest()",
"def sha256Sum(self, data):\n data = str(data)\n m = hashlib.sha256()\n if os.path.isfile(data):\n try:\n f = file(data, 'rb')\n except:\n return 'ERROR: unable to open %s' % data\n while True:\n d = f.read(8096)\n if not d:\n break\n m.update(d)\n f.close()\n # Otherwise it could be either 1) a directory 2) miscellaneous data (like json)\n else:\n m.update(data)\n return m.hexdigest()",
"def static_file_hash(filepath):\n hasher = hashlib.md5() # nosec: B303\n\n with contextlib.closing(open(filepath, 'rb')) as file:\n hasher.update(file.read())\n return hasher.hexdigest()",
"def hash_directory(path):\n if not os.path.isdir(path):\n raise ValueError(\n \"The given path `{}` is not a directory.\".format(path))\n\n md5 = hashlib.md5()\n\n for root, _, files in os.walk(path):\n for fil in files:\n md5.update(hash_file(os.path.join(root, fil)).encode(\"utf-8\"))\n\n return \"{}\".format(md5.hexdigest())",
"def local_md5(filepath, blocksize=65536):\n hasher = hashlib.md5()\n with open(filepath, 'rb') as source:\n buf = source.read(blocksize)\n while len(buf) > 0:\n hasher.update(buf)\n buf = source.read(blocksize)\n return hasher.hexdigest()",
"def GetFileMd5(file_path):\n return binascii.hexlify(GetFileHashes(file_path, do_md5=True)['md5'])",
"def md5hash(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"md5hash\")",
"def md5(path):\n with open(path, 'rb') as f:\n md5hash = hashlib.md5()\n for chunk in iter(lambda: f.read(4096), b''):\n md5hash.update(chunk)\n return md5hash.hexdigest()"
]
| [
"0.6850441",
"0.6710063",
"0.6619043",
"0.654432",
"0.6535877",
"0.6533921",
"0.644331",
"0.6342902",
"0.6323048",
"0.6295428",
"0.6286576",
"0.62413317",
"0.6216765",
"0.6169943",
"0.61495596",
"0.6140576",
"0.61181694",
"0.6096366",
"0.60879916",
"0.6078742",
"0.60649794",
"0.6051122",
"0.60456747",
"0.6030513",
"0.60283333",
"0.6022939",
"0.60052115",
"0.599215",
"0.5989495",
"0.5983932"
]
| 0.6991578 | 0 |
Returns a normalized complex data. If the data is a numpy data with complex, returns the absolute value. Else returns the input data. | def _normalizeComplex(data):
if hasattr(data, "dtype"):
isComplex = numpy.issubdtype(data.dtype, numpy.complexfloating)
else:
isComplex = isinstance(data, numbers.Complex)
if isComplex:
data = numpy.absolute(data)
return data | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def complex_abs(data):\n assert data.size(-1) == 2\n return (data ** 2).sum(dim=-1).sqrt()",
"def complex_abs(data):\n assert data.size(-1) == 2\n return (data ** 2).sum(dim=-1).sqrt()",
"def complex_abs(data):\n assert data.size(-1) == 2\n return (data ** 2).sum(dim=-1).sqrt()",
"def complex_abs_sq(data):\n assert data.size(-1) == 2\n return (data ** 2).sum(dim=-1)",
"def abs(data):\n return _make.abs(data)",
"def complex_normalize(c):\n mag = complex_magnitude(c)\n mag = mag if mag > 0 else 1\n return c / mag",
"def __complex__(self) -> complex:\n return self._translate_in_type(complex, self.integer, self.float_num)",
"def complex_normalize(X, axis=-1):\n mags = np.linalg.norm(np.abs(X), axis=axis, keepdims=True)\n return X / mags",
"def _norm_data(data):\n if data is None:\n return data\n data_min = np.min(data)\n c_norm = np.max(data) - data_min\n return (data - data_min) / c_norm if (c_norm != 0) else (data - data_min)",
"def test_op_abs_offload_array_complex(self):\n\n device = pymic.devices[0]\n stream = device.get_default_stream()\n a = numpy.arange(1.0, 4711.0 * 1024, dtype=complex)\n\n old_a = numpy.empty_like(a)\n old_a[:] = a[:]\n expect = abs(a)\n\n offl_a = stream.bind(a)\n offl_r = abs(offl_a)\n r = offl_r.update_host().array\n stream.sync()\n\n self.assertTrue((a == old_a).all(),\n \"Input array operand 1 must not be modified: \"\n \"{0} should be {1}\".format(a, old_a))\n self.assertTrue((r == expect).all(),\n \"Array contains unexpected values: \"\n \"{0} should be {1}\".format(r, expect))",
"def __abs__(self):\r\n return (self._real.fma(self._real, self._imag*self._imag)).sqrt()",
"def get_value(self):\n return complex(*self.points[0, :2])",
"def real_of_complex(z):\n return np.vstack((np.real(z[:,0]),np.imag(z[:,0]),np.real(z[:,1]),np.imag(z[:,1]))).T",
"def __complex__(self):\n return complex(self._reNum, self._imNum)",
"def test_op_isub_scalar_complex(self):\n\n device = pymic.devices[0]\n stream = device.get_default_stream()\n a = numpy.arange(1, 4711 * 1024, dtype=complex)\n s = complex(1.2, -1.3)\n\n old = numpy.empty_like(a)\n old[:] = a[:]\n expect = a - s\n\n offl_a = stream.bind(a)\n offl_a -= s\n offl_a.update_host()\n r = offl_a.array\n stream.sync()\n\n self.assertTrue((r != old).all(),\n \"Input array operand must be modified: \"\n \"{0} should be {1}\".format(r, old))\n self.assertTrue((r == expect).all(),\n \"Array contains unexpected values: \"\n \"{0} should be {1}\".format(r, expect))",
"def dcomplex(self):\n return self._dcomplex",
"def to_float_complex(self):\r\n return eval(str(self))",
"def copy_abs(self):\r\n return (self._real.fma(self._real, self._imag*self._imag)).sqrt()",
"def complex_value(self) -> global___Expression.ComplexValue:",
"def __complex__(self):\n return complex(self.q[0], self.q[1])",
"def test_op_reverse_complex(self):\n\n device = pymic.devices[0]\n stream = device.get_default_stream()\n a = numpy.arange(1.0, 4711.0 * 1024, dtype=complex)\n old_a = numpy.empty_like(a)\n old_a[:] = a[:]\n expect = numpy.array(a[::-1])\n\n offl_a = stream.bind(a)\n offl_r = offl_a.reverse()\n offl_r.update_host()\n stream.sync()\n r = offl_r.array\n\n self.assertTrue((a == old_a).all(),\n \"Input array operand 1 must not be modified: \"\n \"{0} should be {1}\".format(a, old_a))\n self.assertTrue((r == expect).all(),\n \"Array contains unexpected values: \"\n \"{0} should be {1}\".format(a, r))",
"def complex(real, imag):",
"def test_op_isub_array_complex(self):\n\n device = pymic.devices[0]\n stream = device.get_default_stream()\n a = numpy.arange(1, 4711 * 1024, dtype=complex)\n o = a + complex(1.2, -1.3)\n\n old_a = numpy.empty_like(a)\n old_o = numpy.empty_like(o)\n old_a[:] = a[:]\n old_o[:] = o[:]\n expect = a - o\n\n offl_a = stream.bind(a)\n offl_o = stream.bind(o)\n offl_a -= o\n offl_a.update_host()\n r = offl_a.array\n stream.sync()\n\n self.assertTrue((r != old_a).all(),\n \"Array operand must be modified: \"\n \"{0} should be {1}\".format(a, old_a))\n self.assertTrue((o == old_o).all(),\n \"Input array operand 2 must not be modified: \"\n \"{0} should be {1}\".format(o, old_o))\n self.assertTrue((r == expect).all(),\n \"Array contains unexpected values: \"\n \"{0} should be {1}\".format(r, expect))",
"def _get_complex_null_value(complex):\n return _COMPLEX_NULL_VALUE",
"def complex(self, real=0.0, imag=0.0):\n if imag == 0.0 and real == 0.0:\n return complex_zero",
"def test_op_sub_scalar_complex(self):\n\n device = pymic.devices[0]\n stream = device.get_default_stream()\n a = numpy.arange(1.0, 4711.0 * 1024, dtype=complex)\n s = complex(1.3, 1.3)\n\n old = numpy.empty_like(a)\n old[:] = a[:]\n expect = a - s\n\n offl_a = stream.bind(a)\n offl_r = offl_a - s\n offl_a.update_host()\n r = offl_r.update_host().array\n stream.sync()\n\n self.assertEqual(r.shape, a.shape)\n self.assertEqual(r.dtype, a.dtype)\n self.assertTrue((a == old).all(),\n \"Input array operand must not be modified: \"\n \"{0} should be {1}\".format(a, old))\n self.assertTrue((r == expect).all(),\n \"Array contains unexpected values: \"\n \"{0} should be {1}\".format(r, expect))",
"def _cmplx_to_complex_ ( s ) :\n return complex ( s.real , s.imag )",
"def normalize(complex_vec, axis=None):\n cv_mag_vector = complex_magnitude(complex_vec)\n return vector_normalize(cv_mag_vector, axis=axis)",
"def test_parserval_complex_2d():\n\ta, b, c = np.meshgrid([1j, 1, 1, 1j], [0, 1j, 1j], [0, 1, 1, 1])\n\tdummy_array = xr.DataArray(a * b * c, dims=['x', 'y', 'z'])\n\tchunked_array = dummy_array.chunk(chunks={'x': 2, 'y': 2, 'z': 2})\n\tchunked_array_zeromean = chunked_array - chunked_array.mean(dim=['y', 'z'])\n\tspec = xfft.fft(chunked_array_zeromean, dim=['y', 'z'], sym=True)\n\tassert np.allclose(np.var(a * b * c, axis=(1, 2)),\n\t xfft.ps(spec).sum(dim=['f_y','f_z']))",
"def test_op_fillfrom_complex(self):\n\n device = pymic.devices[0]\n stream = device.get_default_stream()\n a = numpy.arange(1.0, 4711.0 * 1024, dtype=complex)\n offl_r = stream.empty_like(a)\n offl_r.fillfrom(a)\n offl_r.update_host()\n stream.sync()\n r = offl_r.array\n self.assertTrue((a == r).all(),\n \"Array contains unexpected values: \"\n \"{0} should be {1}\".format(a, r))"
]
| [
"0.766307",
"0.766307",
"0.766307",
"0.661879",
"0.6422132",
"0.6198529",
"0.6017067",
"0.5993175",
"0.5902083",
"0.58823895",
"0.5855812",
"0.57926065",
"0.57813406",
"0.5773507",
"0.5769934",
"0.57515544",
"0.5712033",
"0.56957585",
"0.568994",
"0.5641625",
"0.5637767",
"0.5625738",
"0.5615207",
"0.5614494",
"0.5612818",
"0.5542472",
"0.55367124",
"0.55245197",
"0.55121094",
"0.5500521"
]
| 0.87394947 | 0 |
Called when the widget of the view was created | def viewWidgetCreated(self, view, plot):
return | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def init_widget(self):",
"def create_widgets(self):",
"def on_show_view(self):\n self.setup()",
"def on_show_view(self):\n self.setup()",
"def on_show_view(self):\n self.setup()",
"def create_widgets( self ):",
"def create_widget(self):\n pass",
"def onShow(self):\n pass",
"def init_widget(self):\n super(UiKitTextView, self).init_widget()\n self.init_text()",
"def getWidget(self):",
"def on_widget_constructed(callback):\n Widget._widget_construction_callback = callback",
"def on_draw(self):\n\t\tself.render()",
"def on_draw(self):\n\t\tself.render()",
"def on_create(self):",
"def create_widget(self):\n self.widget = ListView(self.get_context())",
"def updateWidget(self):\n pass",
"def prepare_UI(self):",
"def create(self, parent):\n self.widget = QtCore.QObject(parent)",
"def onInit(self):\n pass",
"def update_view(self): \n raise NotImplementedError(\"Widget descendents MUST implement the update_view() method!\")",
"def __init__(self):\r\n super().__init__()\r\n self.init_ui()",
"def __init__(self):\n self.view = GuiView(self)\n return",
"def initializeUI(self):\n self.setStyleSheet(abstyle)\n self.setGeometry(140, 100, 860, 484)\n self.setWindowTitle('Emotions Data View')\n self.setupModelView()",
"def init_ui(self):\n raise NotImplementedError",
"def init_ui(self):\n raise NotImplementedError",
"def create_widget(self):\n self.widget = UILabel()",
"def on_show_view(self):\n\n # Makes the background darker\n arcade.set_background_color([rgb - 50 for rgb in arcade.color.DARK_BLUE_GRAY])\n\n # Enable the UIManager when the view is showm.\n self.manager.enable()",
"def on_show_view(self):\n arcade.set_background_color(arcade.color.DARK_BLUE_GRAY)\n\n # Enable the UIManager when the view is showm.\n self.manager.enable()",
"def init_widget(self):\n super(AndroidListView, self).init_widget()\n d = self.declaration\n\n if d.divider_height >= 0:\n self.set_divider_height(d.divider_height)\n if d.header_dividers:\n self.set_header_dividers(d.header_dividers)\n if d.footer_dividers:\n self.set_footer_dividers(d.footer_dividers)\n if d.items_can_focus:\n self.set_items_can_focus(d.items_can_focus)\n\n w = self.widget\n w.setOnItemClickListener(w.getId())\n w.setOnItemLongClickListener(w.getId())\n w.onItemClick.connect(self.on_item_click)\n w.onItemLongClick.connect(self.on_item_long_click)\n #self.widget.setOnScrollListener(self.widget.getId())\n #self.widget.onScroll.connect(self.on_scroll)\n\n #: Selection listener\n #self.widget.setOnItemSelectedListener(self.widget.getId())\n #self.widget.onItemSelected.connect(self.on_item_selected)\n #self.widget.onNothingSelected.connect(self.on_nothing_selected)",
"def fromControls(self,widget):"
]
| [
"0.7902635",
"0.7300569",
"0.7272871",
"0.7272871",
"0.7272871",
"0.71997607",
"0.7128017",
"0.7008906",
"0.6765284",
"0.66968477",
"0.6603362",
"0.6543564",
"0.6543564",
"0.65416574",
"0.651932",
"0.64497674",
"0.6448184",
"0.64066774",
"0.63004065",
"0.62705046",
"0.62472427",
"0.62237954",
"0.6223471",
"0.6220702",
"0.6220702",
"0.6216364",
"0.61887383",
"0.61881894",
"0.6173309",
"0.616524"
]
| 0.79256094 | 0 |
Returns the data viewer hooks used by this view. | def getHooks(self):
return self.__hooks | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def hooks(self):\n return tuple(self.__hooks.keys())",
"def get_extra_lvs_hooks(self) -> List[HammerToolHookAction]:\n return list()",
"def get_extra_hierarchical_lvs_hooks(self) -> Dict[str, List[HammerToolHookAction]]:\n return dict()",
"def custom_hooks(self):\n return self.config.options(self.CUSTOM_HOOKS_SECTION, [])",
"def get_extra_drc_hooks(self) -> List[HammerToolHookAction]:\n return list()",
"def extension_hooks(self, global_step):\n return []",
"def extension_hooks(self, global_step):\n return []",
"def available_hooks(self):\n return [RoutesDevHandlerHook]",
"def get_extra_hierarchical_drc_hooks(self) -> Dict[str, List[HammerToolHookAction]]:\n return dict()",
"def getSyscallHooks(self):\n return None",
"def get_user_hook_info ( self ):\n return [\n ( s.name, [ ( s.event or \"undef\", s.priority ) ] )\n for event, s in self.user_hooks.iter_scripts()\n ]",
"def get_hooks(name):\n register_all_hooks()\n return _hooks.get(name, [])",
"def get_application_hooks(cls) -> Optional[AppExtensionHooks]:\n return cls._app_exts",
"def get_hooks(self, hook: str) -> List[Callable]:\n return [getattr(self, name) for name in self.__plugin_data.hooks.get(hook, ())]",
"def available_hooks(self):\n return [HtmlMinPostRenderHook]",
"def dataOverrides(self):\n\t\treturn self._overrides",
"def list(self):\n return self._post(\n request=ApiActions.LIST.value,\n uri=ApiUri.HOOKS.value,\n ).get('hooks')",
"def builtin_hooks(self):\n return [k for k, v in self.config.items(self.BUILTIN_HOOKS_SECTION, ())\n if rh.shell.boolean_shell_value(v, None)]",
"def getRawSyscallHooks(self):\n return None",
"def process_hooks(self, hooks):\n try:\n enabled_hooks = self.project.HOOKS\n except AttributeError:\n return hooks",
"def get_extra_hierarchical_synthesis_hooks(self) -> Dict[str, List[HammerToolHookAction]]:\n return dict()",
"def setHooks(self, hooks):\n super(SelectOneDataView, self).setHooks(hooks)\n if hooks is not None:\n for v in self.__views:\n v.setHooks(hooks)",
"def functions(self):\n return self.__functions",
"def hooks(self) -> Sequence['outputs.GetGroupHooksHookResult']:\n return pulumi.get(self, \"hooks\")",
"def set_hooks(self):\n for layer_idx, layer in enumerate(self.vgg_features):\n if layer_idx in self.layers_to_watch:\n layer.register_forward_hook(self.layer_watch_hooks(layer_idx))",
"def getViews(self):\n return list(self.__views.keys())",
"def get_custom_hook_names():\n return list(_CUSTOM_HOOKS)",
"def game_functions(self):\n\t\treturn self._game_functions",
"def get_hooks(self, type_: str) -> typing.Iterable:\n hooks = []\n if self._parent:\n hooks.extend(self._parent.get_hooks(type_))\n\n hooks.extend(self._request_hooks.get(type_, []))\n\n return hooks",
"def setHooks(self, hooks):\n super(SelectManyDataView, self).setHooks(hooks)\n if hooks is not None:\n for v in self.__views:\n v.setHooks(hooks)"
]
| [
"0.69417256",
"0.6475532",
"0.6276654",
"0.624319",
"0.61839193",
"0.60875654",
"0.60875654",
"0.6062711",
"0.6000305",
"0.59981173",
"0.59789795",
"0.5936823",
"0.5913618",
"0.58525425",
"0.5787659",
"0.57738584",
"0.577255",
"0.576733",
"0.5754267",
"0.56217355",
"0.5590773",
"0.5569066",
"0.55612904",
"0.5559603",
"0.5544965",
"0.55279243",
"0.55103725",
"0.54937005",
"0.54753315",
"0.5458202"
]
| 0.7191292 | 0 |
Returns a default colormap. | def defaultColormap(self):
colormap = None
if self.__hooks is not None:
colormap = self.__hooks.getColormap(self)
if colormap is None:
colormap = Colormap(name="viridis")
return colormap | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_colormap(self):\n return colors.colormaps[self.name]",
"def get_colormap(self):\n return colors.colormaps[self.name]",
"def get_colormap(self):\n return file_io.load_viscm_colormap(self.path)",
"def get_colormap(self):\n return file_io.load_viscm_colormap(self.path)",
"def colormap(self):\n palette = [(0, 0, 0), (0, 0, 0), (0, 0, 0), (0, 0, 0), (0, 0, 0),\n (111, 74, 0), (81, 0, 81), (128, 64, 128), (244, 35, 232),\n (250, 170, 160), (230, 150, 140), (70, 70, 70),\n (102, 102, 156), (190, 153, 153), (180, 165, 180),\n (150, 100, 100), (150, 120, 90), (153, 153, 153),\n (153, 153, 153), (250, 170, 30), (220, 220, 0),\n (107, 142, 35), (152, 251, 152), (70, 130, 180),\n (220, 20, 60), (255, 0, 0), (0, 0, 142), (0, 0, 70),\n (0, 60, 100), (0, 0, 90), (0, 0, 110), (0, 80, 100),\n (0, 0, 230), (119, 11, 32), (0, 0, 142)]\n\n num_colors = self[0][1].shape[-1]\n colormap = np.zeros((num_colors, 3), dtype=int)\n for i in range(num_colors):\n colormap[i, ...] = palette[self._update_labels_dict[i]]\n return colormap",
"def get_mpl_colormap(self):\n return mpl.colors.ListedColormap(self.get_colors().astype(float) / 255.0)",
"def default_colors():\n # default_colors = [\n # # r, g, b, a\n # [92, 192, 98, 0.5],\n # [90, 155, 212, 0.5],\n # [246, 236, 86, 0.6],\n # [241, 90, 96, 0.4],\n # [255, 117, 0, 0.3],\n # [82, 82, 190, 0.2],\n # ]\n\n default_colors = [\n # r, g, b, a\n [188, 114, 3, 0.5],\n [3, 133, 188, 0.5],\n [155, 9, 118, 0.6],\n [155, 53, 9, 0.4],\n [4, 140, 128, 0.3],\n [140, 8, 8, 0.2],\n ]\n\n default_colors = [\n [i[0] / 255.0, i[1] / 255.0, i[2] / 255.0, i[3]]\n for i in default_colors\n ]\n\n return default_colors",
"def get_density_cmap():\n # Add completely white color to Reds colormap in Matplotlib\n list_colors = plt.cm.datad['Reds']\n list_colors = list(list_colors)\n list_colors.insert(0, (1, 1, 1))\n list_colors.insert(0, (1, 1, 1))\n lscm = matplotlib.colors.LinearSegmentedColormap.from_list(\"my_Reds\", list_colors)\n return lscm",
"def get_colormap(self):\n return colors.ev_colormaps[self.name]",
"def get_cmap(cmap=None):\n if cmap:\n if isinstance(cmap, (mpl.colors.Colormap)):\n colormap = cmap\n elif cmap in cmo.cmapnames:\n colormap = cmo.cmap_d[cmap]\n elif cmap in plt.colormaps():\n colormap = plt.get_cmap(cmap)\n else:\n raise ValueError(\n \"Get unrecognised name for the colormap `{}`. Colormaps should be from standard matplotlib set of from cmocean package.\".format(\n cmap\n )\n )\n else:\n colormap = plt.get_cmap(\"Spectral_r\")\n\n return colormap",
"def cmap(self):\n return self._cmap",
"def cmap(self):\n return self._palette",
"def get_cmap(n, name='Paired'):\n return plt.cm.get_cmap(name, n)",
"def cmap(self):\n return self.pixels.get_cmap()",
"def get_cmap(n, name='jet'):\n return plt.cm.get_cmap(name, n)",
"def cmap(self, background_color='#000000'):\n return get_cmap(self.img.data.max() + 1,\n background_color=background_color)",
"def get_cmap(n, name='hsv'):\n return plt.cm.get_cmap(name, n)",
"def get_cmap(n, name='hsv'):\n return plt.cm.get_cmap(name, n)",
"def create_label_colormap():\n colormap = np.array([\n [128, 64, 128],\n [244, 35, 232],\n [ 70, 70, 70],\n [102, 102, 156],\n [190, 153, 153],\n [153, 153, 153],\n [250, 170, 30],\n [220, 220, 0],\n [107, 142, 35],\n [152, 251, 152],\n [ 70, 130, 180],\n [220, 20, 60],\n [255, 0, 0],\n [ 0, 0, 142],\n [ 0, 0, 70],\n [ 0, 60, 100],\n [ 0, 80, 100],\n [ 0, 0, 230],\n [119, 11, 32],\n [ 0, 0, 0]], dtype=np.uint8)\n return colormap",
"def get_cmap(n, name=\"hsv\"):\n return plt.cm.get_cmap(name, n)",
"def get_cmap(n, name=\"hsv\"):\n return plt.cm.get_cmap(name, n)",
"def get_cmap(n, name=\"hsv\"):\n return plt.cm.get_cmap(name, n)",
"def cmap(num,cmap = plt.cm.gist_earth_r):\n return cmap(np.linspace(0, 1, num))",
"def GetDefaultColor(self):\n return wx.BLACK",
"def default_colour(self):\n colour = self.DEFAULT_COLOUR\n return colour",
"def getColormapImage(self):\n if self._colormapImage is None and self._colormap is not None:\n image = numpy.zeros((16, 130, 3), dtype=numpy.uint8)\n image[1:-1, 1:-1] = self._colormap.getNColors(image.shape[1] - 2)[:, :3]\n self._colormapImage = convertArrayToQImage(image)\n return self._colormapImage",
"def getColorMap(colors):\n # Normalise RGBs\n norm_colors = []\n for color in colors:\n norm_colors.append([val / 255. for val in color])\n # create color map\n cmap = cols.ListedColormap(norm_colors)\n\n return cmap",
"def linkcolormap(self, linkcolors=\"viridis\"):\n\n if isinstance(linkcolors, list) and len(linkcolors) == self.n:\n # provided a list of color names\n return colors.ListedColormap(linkcolors)\n else:\n # assume it is a colormap name\n return cm.get_cmap(linkcolors, 6)",
"def get_colors(nlevels, colormap=None):\n if colormap is None:\n from matplotlib.pyplot import cm\n colormap = cm.rainbow\n return colormap(np.linspace(0, 1, nlevels))",
"def test_density_colormap(self):\n cmap = matplotlib.cm.get_cmap('density')\n np.testing.assert_allclose(cmap(0.0), [0.214, 0.152, 0.535, 1], atol=0.001)\n np.testing.assert_allclose(cmap(1.0), [0.988, 0.978, 0.042, 1], atol=0.001)"
]
| [
"0.72189206",
"0.72189206",
"0.7079339",
"0.7079339",
"0.7058597",
"0.7002624",
"0.6966337",
"0.6905248",
"0.68922603",
"0.67234963",
"0.6715274",
"0.6709201",
"0.66109717",
"0.66100955",
"0.6520509",
"0.64768016",
"0.6442025",
"0.6442025",
"0.6424049",
"0.63886",
"0.63886",
"0.63886",
"0.63851047",
"0.6250673",
"0.6247811",
"0.6201924",
"0.61323",
"0.60997665",
"0.6053372",
"0.60439646"
]
| 0.83686036 | 0 |
Returns a default color dialog. | def defaultColorDialog(self):
dialog = None
if self.__hooks is not None:
dialog = self.__hooks.getColormapDialog(self)
if dialog is None:
dialog = ColormapDialog()
dialog.setModal(False)
return dialog | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def colorPickerDialog(self, current_color=None):\n\t\tcolor_dialog = QtWidgets.QColorDialog()\n\t\t#color_dialog.setOption(QtWidgets.QColorDialog.DontUseNativeDialog)\n\n\t\t# Set current colour\n\t\tif current_color is not None:\n\t\t\tcolor_dialog.setCurrentColor(current_color)\n\n\t\t# Only return a color if valid / dialog accepted\n\t\tif color_dialog.exec_() == color_dialog.Accepted:\n\t\t\tcolor = color_dialog.selectedColor()\n\t\t\treturn color",
"def _color_change_mode(self):\r\n self.dlg.exec_()\r\n self.color = self.dlg.currentColor().name()\r\n self.colorPlate.setStyleSheet(\"background-color: %s;\" % self.color)\r\n self.input_scene.get_stk_color(self.color)\r\n return",
"def GetDefaultColor(self):\n return wx.BLACK",
"def createColorWindow():\n colorWindow = g.GraphWin(\"Color\", 150, 500) #Window to show colors to pick\n return colorWindow",
"def defaultColor(self, p_int=None): # real signature unknown; restored from __doc__ with multiple overloads\r\n pass",
"def onCustomColorMenu(self, item):\n self.dialog = pyui.dialogs.ColorDialog(self.customColorChosen)\n self.dialog.doModal()\n return 1",
"def pickColour(self):\n colour = QColorDialog.getColor()\n if colour.isValid():\n self.user[\"Colour\"] = colour.name()\n self.ui.l_colour.setText(self.user[\"Colour\"])",
"def default_colour(self):\n colour = self.DEFAULT_COLOUR\n return colour",
"def pickAColor():\n color = _tkCall(tkColorChooser.askcolor)\n if color[0] != None:\n return Color(color[0][0], color[0][1], color[0][2])",
"def choose_colour(self) -> None:\n self.chosen_colour = QtWidgets.QColorDialog.getColor(Qt.white, self, \"Выберите цвет\")\n self.pen_colour = self.chosen_colour\n self.palette.setColor(QPalette.Background, self.chosen_colour)\n self.ui.frame.setPalette(self.palette)",
"def addDialogBox(self, color):\n\t\tself._getSize()\n\t\tobj = nceDialogBox(self.hcenter - 11, 10)\n\t\tobj.rtc = False\n\t\tobj.frame = [\t['╭' + ('─' * 19) + '╮', 3],\n\t\t\t\t\t\t['│' + (' ' * 19) + '│', 3],\n\t\t\t\t\t\t['│' + (' ' * 19) + '│', 3],\n\t\t\t\t\t\t['└' + ('─' * 19) + '╯', 3]]\n\t\tobj.visible = True\n\t\tobj.color = color\n\t\tself.menus.append(obj)\n\t\treturn obj",
"def _confirm_color(self, event = None):\n color = self._entry.get().strip()\n if color != \"\":\n self._color = color\n self._window.destroy()",
"def open_win(e) -> None:\n widget_sfx()\n r, g, b = parse_color(var.get())\n new_color, tk_color = askcolor(\n color=f\"#{r:02x}{g:02x}{b:02x}\",\n parent=parent.winfo_toplevel(),\n title=str(TRANS_SELECT_TITLE),\n )\n if new_color is not None:\n # On 3.8, these are floats.\n rf, gf, bf = new_color\n var.set(f'{int(rf)} {int(gf)} {int(bf)}')",
"def select_color(self):\n\t\tresult = tkinter.colorchooser.askcolor(self.center['bg'])\n\t\tif result:\n\t\t\t# 2nd part of result is the color object\n\t\t\tself.center['bg'] = result[1]",
"def comdlg32_ChooseColor(jitter, get_str, set_str):\n ret_ad, args = jitter.func_args_stdcall([\"lpcc\"])\n raise RuntimeError('API not implemented')\n jitter.func_ret_stdcall(ret_ad, ret_value)",
"def set_window_colour(self, event):\n rgb_triplet, rgb_string = tkColorChooser.askcolor()\n self.canvas.config(bg = rgb_string)",
"def showColors(self):\n\t\tcolors = ['white', 'red', 'green', 'orange', 'blue', 'purple', 'cyan', 'lightgrey',\n\t\t\t\t 'darkgrey', 'light red', 'light green', 'yellow', 'light blue', 'purple', 'cyan', 'dark white']\n\t\tmax = curses.COLORS if curses.COLORS <= 16 else 16\n\t\tself.screen.clear()\n\t\tfor c in range(0, max):\n\t\t\tself.wts(c + 2, 1, \"color \" + str(c) + ' : ' + colors[c], c)\n\t\tself.wts(18, 1, \"color 16 : red on white\", 16)\n\t\tself.wts(20, 1, 'Color demo, displaying ' + str(max) + ' colors + 1 special')\n\t\tself.screen.refresh()\n\t\tch = False\n\t\twhile not ch:\n\t\t\tch = self.screen.getch()\n\t\tself.exit('Color demo complete')",
"def get_color(self):\n self.view.present(\n \"sheet\",\n orientations=ORIENTATIONS,\n )\n self.view.wait_modal()\n return self.rgb",
"def get_color(self):\n return \"yellow\"",
"def color(self):\n return self.settings['color']",
"def color(self):\n global bordercolor\n bordercolor = QColorDialog.getColor(Qt.green, self)\n self.b_color.setPalette(QPalette(bordercolor))",
"def default_style_sheet(self) -> str:\n # Valid color names: http://www.w3.org/TR/SVG/types.html#ColorKeywords\n g.trace('===== using default style sheet =====')\n return '''\\\n\n /* A QWidget: supports only background attributes.*/\n QSplitter::handle {\n background-color: #CAE1FF; /* Leo's traditional lightSteelBlue1 */\n }\n QSplitter {\n border-color: white;\n background-color: white;\n border-width: 3px;\n border-style: solid;\n }\n QTreeWidget {\n background-color: #ffffec; /* Leo's traditional tree color */\n }\n QsciScintilla {\n background-color: pink;\n }\n '''",
"def set_colors(self, ):\n try:\n odd = self._parent.settings.get_key('interface.odd_color')\n even = self._parent.settings.get_key('interface.even_color')\n self.dialog.instruments.set_odd_color(odd)\n self.dialog.accounts.set_odd_color(odd)\n self.dialog.instruments.set_even_color(even)\n self.dialog.accounts.set_even_color(even)\n except od_exception_config_key_error:\n pass",
"def set_color(self):\n new_color = QColorDialog.getColor(QColor(self.config['color']))\n if not new_color.isValid():\n return\n self.config['color'] = new_color.rgb()\n self.paint()",
"def __init__(self, parent):\n self.parent = parent\n myStyle = wx.DEFAULT_DIALOG_STYLE|wx.RESIZE_BORDER|wx.TAB_TRAVERSAL\n wx.Dialog.__init__(self, None, wx.ID_ANY, '%s - Choose your colors' % globs.myName, style=myStyle)\n\n self._initialize()\n\n self.panel1.SetSizerAndFit(self.topBoxSizer)\n self.SetClientSize(self.topBoxSizer.GetSize())\n self.Centre()",
"def abrirDialogoDeConfiguracion(self):\n ventanaEmergente = Toplevel()\n ventanaEmergente.geometry(\"200x320\")\n # Se crea un canvas\n tela = Canvas(ventanaEmergente, height=320, width=200, bg=\"snow\")\n tela.place(x=0, y=0)\n btnColorEspacioDisponible = Button(tela, text=\"Espacio Disponible\", command = lambda :self.cambiarColor(0))\n btnColorEspacioDisponible.place(x=50, y=20)\n btnColorPared = Button(tela, text=\"Paredes\", command = lambda :self.cambiarColor(2))\n btnColorPared.place(x=66, y=50)\n btnColorNodo = Button(tela, text=\"Nodo\", command = lambda :self.cambiarColor(3))\n btnColorNodo.place(x=70, y=80)",
"def getColor(col=None,caption=None):\n if type(col) == tuple:\n col = QtGui.QColor.fromRgb(*col)\n else:\n col = QtGui.QColor(col)\n dia = QtGui.QColorDialog\n #myButton = QtGui.QPushButton('MY')\n #dia.layout()\n col = dia.getColor(col)\n if col.isValid():\n return str(col.name())\n else:\n return None",
"def getCenterColor():\n return input(\"What color do you want the center color to be?\")",
"def color(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"color\")",
"def color(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"color\")"
]
| [
"0.7305316",
"0.6629004",
"0.66000414",
"0.65310407",
"0.6420928",
"0.63252723",
"0.6239439",
"0.62127674",
"0.60073066",
"0.59559417",
"0.5938414",
"0.5874301",
"0.582238",
"0.58178926",
"0.5801812",
"0.57167405",
"0.5696698",
"0.567963",
"0.56683296",
"0.5642302",
"0.56238717",
"0.56186587",
"0.558961",
"0.55782926",
"0.55780405",
"0.55506265",
"0.55038905",
"0.5491939",
"0.5489528",
"0.5489528"
]
| 0.8498397 | 0 |
Returns the mode id | def modeId(self):
return self.__modeId | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def mode(self) -> str:\r\n return self._mode",
"def mode(self) -> str:\n return pulumi.get(self, \"mode\")",
"def mode(self) -> str:\n return pulumi.get(self, \"mode\")",
"def getmode(self):\n return self.mode",
"def get_mode(self):\r\n return self.mode",
"def mode(self):\n return self._data.get('mode', None)",
"def mode(self):\n return self.__mode",
"def mode(self):\n return self._lift(\"mode\")",
"def mode(self):\r\n return self._mode",
"def mode(self):\r\n return self._mode",
"def mode(self):\r\n return self._mode",
"def get_mode(self, ):\n return self.get_parameter('mode')",
"def mode(self):\n return self._mode",
"def mode(self):\n return self._mode",
"def mode(self):\n return self._mode",
"def __mode_modesetid(self, mode):\n\t\tfor key,val in self.ms_all.iteritems():\n\t\t\tix = val.index(mode)\n\t\t\tif ix is not None:\n\t\t\t\treturn key, ix",
"def mode(self) -> int:\n return self._mode",
"def mode(self) -> str:\r\n ...",
"def mode(self):\n return self._mode_func",
"def _mode_key(guild_id: int) -> str:\n return f\"mode/{guild_id}\"",
"def get_mode(self):\r\n return self._api.get_mode()",
"def getMode(self):\n return self._mode",
"def get_mode(self) -> str:\n\n return self.send(self.cmd.GET_MODE)",
"def mode(self) -> int:",
"def mode(self) -> Optional[str]:\n return pulumi.get(self, \"mode\")",
"def mode(self) -> Optional[str]:\n return pulumi.get(self, \"mode\")",
"def mode(self):\n\n return self._mode",
"def getMode(self):\n with self.lock:\n mode = self.mode\n return mode",
"def mode(self, mode: Optional[int] = None) -> Optional[int]:\n ...",
"def get_mode_name(self, i):\n for mode in self.modes:\n if mode['id'] == i:\n return mode['name']\n return 'Unknown Game Mode'"
]
| [
"0.7811208",
"0.7805747",
"0.7805747",
"0.77869534",
"0.771352",
"0.76968026",
"0.76935",
"0.7689646",
"0.7655183",
"0.7655183",
"0.7655183",
"0.76503944",
"0.7631035",
"0.7631035",
"0.7631035",
"0.7579294",
"0.75691974",
"0.7563148",
"0.75571734",
"0.7544744",
"0.7537633",
"0.75248075",
"0.7524057",
"0.7479501",
"0.7476838",
"0.7476838",
"0.745721",
"0.741831",
"0.7384315",
"0.73825043"
]
| 0.9080869 | 0 |
Set the value of a custom axis | def setCustomAxisValue(self, name, value):
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def value_axis(self, value_axis):\n\n self.container['value_axis'] = value_axis",
"def set_point(self, axis: int, value: Union[int, float]):\n if axis < 0:\n axis += self.ndim\n if axis < 0:\n raise ValueError(\n f'axis is negative, expected positive, got {axis}'\n )\n if self.point[axis] != value:\n self._point[axis] = value\n self.events.axis(axis=axis, value=value)",
"def _scale_setter(self, value: float) -> None:\n self.uaxis.scale = value\n self.vaxis.scale = value",
"def _offset_setter(self, value: float) -> None:\n self.uaxis.offset = value\n self.vaxis.offset = value",
"def setValues(\n self,\n axis: \"Axis\" = None,\n labelFrequency: int = 1,\n labelPlacement: SymbolicConstant = INSIDE,\n labelStyle: TextStyle = TextStyle(),\n lineStyle: LineStyle = LineStyle(),\n placement: SymbolicConstant = MIN_MAX_EDGE,\n tickLength: float = 2,\n tickPlacement: SymbolicConstant = INSIDE,\n tickStyle: LineStyle = LineStyle(),\n titleStyle: TextStyle = TextStyle(),\n ):\n pass",
"def series_axis(self, series_axis):\n\n self.container['series_axis'] = series_axis",
"def setX(self, value):\n self.components[0] = value",
"def setX(self, value):\n self.components[0] = value",
"def _set_axis(axis):\n\n def axis_setter(self, labels):\n new_qc = DataFrameDefault.register(pandas.DataFrame.set_axis)(\n self, axis=axis, labels=labels\n )\n self.__dict__.update(new_qc.__dict__)\n\n return axis_setter",
"def setX(self, value):\n self.position[0] = value",
"def set_value(self, value: float):\n self.points[0, 0] = value\n return self",
"def set_point(\n self,\n axis: Union[int, Sequence[int]],\n value: Union[Union[int, float], Sequence[Union[int, float]]],\n ):\n if isinstance(axis, Integral):\n axis = assert_axis_in_bounds(axis, self.ndim) # type: ignore\n (min_val, max_val, step_size) = self.range[axis]\n raw_step = (value - min_val) / step_size\n self.set_current_step(axis, raw_step)\n else:\n value = tuple(value) # type: ignore\n axis = tuple(axis) # type: ignore\n if len(axis) != len(value):\n raise ValueError(\n trans._(\"axis and value sequences must have equal length\")\n )\n raw_steps = []\n for ax, val in zip(axis, value):\n ax = assert_axis_in_bounds(int(ax), self.ndim)\n min_val, _, step_size = self.range[ax]\n raw_steps.append((val - min_val) / step_size)\n self.set_current_step(axis, raw_steps)",
"def set_value(self,x):\n self._value = x",
"def set_value(self,x):\n self._value = x",
"def set_custom_value(self, value):\n self.logger.info(\"Set custom value : %s\" % value)\n\n try:\n self._answer_payload['custom_value'] = value\n except Exception as e:\n self.logger.error(\"Error on set custom variables : %s\" % e)",
"def set_axes(self, a):\r\n self.axes = a",
"def set_data(self, x = None, y = None):\n self.x_axis = x\n self.y_axis = y",
"def setAxisLabel(self, dim, label): \n try:\n self.__axis_labels__[dim] = label\n except IndexError:\n self.__axis_labels__.append(label)",
"def set_axis_x(self, new_axis_point):\r\n self.__x_axis = new_axis_point",
"def set_value(self, device_name, val):\n epics.caput(device_name, val)\n\n\t\t#mu = mu\n\t\t#sig = math.sqrt(abs(mu))\n\t\t#y = (float(x)-mu)/(sig)",
"def set_axis_y(self, new_axis_point):\r\n self.__y_axis = new_axis_point",
"def _update_ax(self):\n raise NotImplementedError(\"Implement _update_ax(self) in subclass\")",
"def setvalue(self,num,name,val):\n self.M.reconfigure(num,{name:float(val)})",
"def getAxisValue(self, vf, tag, value):\n if not tag in vf.axes:\n return None\n minValue, defaultValue, maxValue = vf.axes[tag]\n if not value:\n return defaultValue\n if value < 0: # Realative scale between minValue and default\n return defaultValue + (defaultValue - minValue)*value\n # else wdth > 0: Relative scale between default and maxValue\n return defaultValue + (maxValue - defaultValue)*value",
"def setY(self, value):\n self.components[1] = value",
"def setY(self, value):\n self.components[1] = value",
"def set_val(self, val):\n xy = self.poly.xy\n if self.orientation == 'vertical':\n xy[1] = .25, val\n xy[2] = .75, val\n self._handle.set_ydata([val])\n else:\n xy[2] = val, .75\n xy[3] = val, .25\n self._handle.set_xdata([val])\n self.poly.xy = xy\n self.valtext.set_text(self._format(val))\n if self.drawon:\n self.ax.figure.canvas.draw_idle()\n self.val = val\n if self.eventson:\n self._observers.process('changed', val)",
"def value_axis(self):\n return self.container['value_axis']",
"def setPoint(self, x, y, value):\n self._c[x*self.__height + y] = value",
"def set_value(self,x):\n self._value = float(x)"
]
| [
"0.7426107",
"0.6823979",
"0.6566052",
"0.6564019",
"0.64065546",
"0.6389571",
"0.6220259",
"0.6220259",
"0.6219633",
"0.61972004",
"0.6181993",
"0.61811894",
"0.6174139",
"0.6174139",
"0.6139899",
"0.6137108",
"0.6131543",
"0.60985774",
"0.60769933",
"0.6062639",
"0.60532314",
"0.6037908",
"0.60195154",
"0.5999198",
"0.5977438",
"0.5977438",
"0.5954252",
"0.5935508",
"0.5926782",
"0.591135"
]
| 0.8554661 | 0 |
Returns true if the widget is already initialized. | def isWidgetInitialized(self):
return self.__widget is not None | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _is_initialized(self) -> bool:\n return len(self) > 0",
"def is_editor_ready(self):\r\n if self.editor_widget:\r\n window = self.editor_widget.window()\r\n if hasattr(window, 'is_starting_up') and not window.is_starting_up:\r\n return True",
"def _isinit(self):\n return self.dp.state()==PyTango.DevState.INIT",
"def init(self):\n return True",
"def isInitialized(self):\n\t\tif self.isTypeSet and self.isCfgSet:\n\t\t\treturn True\n\t\telse:\n\t\t\treturn False",
"def needs_init(self, view):\n # TODO: test this approach. Call it in main file\n if not self.flags_manager:\n log.debug(\" flags handler not initialized. Do it.\")\n return True\n if self.flags_manager.any_file_modified():\n log.debug(\" .clang_complete or CMakeLists.txt were modified. \"\n \"Need to reinit.\")\n return True\n if self.exists_for_view(view.buffer_id()):\n log.debug(\" view %s, already has a completer\", view.buffer_id())\n return False\n log.debug(\" need to init view '%s'\", view.buffer_id())\n return True",
"def is_initialized(self) -> bool:\n return (\n self._amount_by_currency_id is not None\n and self._quantities_by_good_id is not None\n )",
"def is_initialized(self) -> bool:\n return self._ledger_apis.has_default_ledger",
"def ready(self):\n return self.settings is not None",
"def is_initialized(self) -> bool:\n return (\n (self._exchange_params_by_currency_id is not None)\n and (self._utility_params_by_good_id is not None)\n and (self._transaction_fees is not None)\n )",
"def initialized(self):\n # verbose = CMAKE_BUILD_VERBOSE\n # if verbose:\n # self.diagnose_initialized_problems()\n return self.project_build_dir.exists() and self.has_stored_config_file()",
"def is_ready(self) -> bool:\n pass",
"def initialized(self):\n return self.workspace == find_enclosing_workspace(self.workspace)",
"def is_ready(cls):\n\n return False",
"def initialized(self) -> bool:\n return self._env.repo_is_initialized",
"def is_ready(self):\n return len(self.unresolved_placeholders) == 0",
"def is_initialised(self) -> bool:\n return _schema.has_schema(self._engine)",
"def if_ready(self, **kwargs):\n return True",
"def is_init(self):\n cur = self.connection.cursor()\n masterdata = list(cur.execute(\n 'SELECT * FROM sqlite_master WHERE name=\"metadata\"'\n ))\n if len(masterdata) == 0:\n return False\n schema_version = list(cur.execute(\n 'SELECT value FROM metadata ' +\n 'WHERE key=\"schema_version\"'\n ))\n if len(schema_version) > 0 and \\\n schema_version[0][0] == self.schema_version:\n return True\n return False",
"def isInit(this):\n\t\treturn not not this._CAP\n\t\t# Who's here ?\n\t\t# - Me, I kill you.",
"def has_initial_states(self):\n return len(self.initial_states()) > 0",
"def has_initial_state(self, state):\n try:\n return self.state(state).is_initial\n except LookupError:\n return False",
"def ready(self):\n return self.counter > 0",
"def is_ready() -> bool:\n return True",
"def is_ready(self):\n if self.id is None:\n return False\n\n return True",
"def initialized(self):\n return len(self.ops) > 0",
"def hasBeenStarted(self):\n return (not self._slave_dhcp_client_pid is None) and (not self._slave_dhcp_client_proc is None)",
"def at_start(self) -> bool:\n return not self.open",
"def init_place(self):\n if self._f == None:\n return False\n else:\n return True",
"def isConfigured(self):\n if self.__install and not self.__saved:\n return False\n else:\n return True"
]
| [
"0.7549473",
"0.7125627",
"0.7058051",
"0.6861954",
"0.68026435",
"0.67455906",
"0.67142135",
"0.6673255",
"0.6608615",
"0.6608254",
"0.65874594",
"0.65844923",
"0.6568072",
"0.6543216",
"0.6534076",
"0.65151435",
"0.64635277",
"0.6377944",
"0.63721126",
"0.63586754",
"0.6357148",
"0.63498735",
"0.63223344",
"0.62896603",
"0.6288529",
"0.6251735",
"0.6251244",
"0.6234133",
"0.6201521",
"0.6199005"
]
| 0.86975914 | 0 |
Format an iterable of slice objects | def __formatSlices(self, indices):
if indices is None:
return ''
def formatSlice(slice_):
start, stop, step = slice_.start, slice_.stop, slice_.step
string = ('' if start is None else str(start)) + ':'
if stop is not None:
string += str(stop)
if step not in (None, 1):
string += ':' + step
return string
return '[' + ', '.join(
formatSlice(index) if isinstance(index, slice) else str(index)
for index in indices) + ']' | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def make_slice_strings(cls, slice_key):\n start = slice_key.start\n size = slice_key.stop - start\n return (str(start), str(size))",
"def __getslice__(self, i, j):\n return OutputGroup(list.__getslice__(self, i, j))",
"def print_slice(input, iz=0):\n\timage=get_image(input)\n\tnx = image.get_xsize()\n\tny = image.get_ysize()\n\tnz = image.get_zsize()\n\tprint \"(z = %d slice)\" % (iz)\n\tline = []\n\tfor iy in xrange(ny):\n\t\tline.append(\"Row \")\n\t\tline.append(\"%4i \" % iy)\n\t\tfor ix in xrange(nx):\n\t\t\tline.append(\"%12.5g \" % (image.get_value_at(ix,iy,iz)))\n\t\t\tif ((ix + 1) % 5 == 0): \n\t\t\t\tline.append(\"\\n \")\n\t\t\t\tline.append(\" \")\n\t \tline.append(\"\\n\")\n\t \tif(nx%5 != 0): line.append(\"\\n\")\n\tprint \"\".join(line)",
"def normalize_slice(s):\n start, stop, step = s.start, s.stop, s.step\n if start is None:\n start = 0\n if step is None:\n step = 1\n if start < 0 or step < 0 or stop is not None and stop < 0:\n raise NotImplementedError()\n return slice(start, stop, step)",
"def _get_span_slices(self, entities: List[Dict]) -> str:\n span_slices = []\n for entity in entities:\n # rather than iterate over multiples of offset_step, we use entity['render_slot']\n # to determine the vertical position, since that tells where\n # the span starts vertically so we can extend it horizontally,\n # past other spans that might have already ended\n color = self.colors.get(entity[\"label\"].upper(), self.default_color)\n top_offset = self.top_offset + (\n self.offset_step * (entity[\"render_slot\"] - 1)\n )\n span_slice = self.span_slice_template.format(\n bg=color,\n top_offset=top_offset,\n )\n span_slices.append(span_slice)\n return \"\".join(span_slices)",
"def slice(self) -> Tuple[slice, ...]:\n\n total_slice = tuple(slice(None) for _ in self.collection_shape)\n for obj in self.objects.flat:\n for i, current_slice in enumerate(obj.slices):\n if total_slice[i].start is None:\n total_slice = total_slice[:i] + (current_slice,) + total_slice[i + 1:]\n else:\n if current_slice.start < total_slice[i].start:\n total_slice = total_slice[:i] + (\n slice(current_slice.start, total_slice[i].stop, total_slice[i].step),) + total_slice[i + 1:]\n if current_slice.stop > total_slice[i].stop:\n total_slice = total_slice[:i] + (\n slice(total_slice[i].start, current_slice.stop, total_slice[i].step),) + total_slice[i + 1:]\n return total_slice",
"def _conv_slice_to_list(slice_obj, start_def=0, stop_def=100, step_def=1):\n if slice_obj.start is None:\n start = start_def\n else:\n start = slice_obj.start\n if slice_obj.stop is None:\n stop = stop_def\n else:\n stop = slice_obj.stop\n if slice_obj.step is None:\n step = step_def\n else:\n step = slice_obj.step\n return list(range(start, stop, step))",
"def __getslice__( self, *args):\n return array.array.__getslice__(self, *args).tostring()",
"def __call__(self, start: int = 0, end: int = 5):\n pprint(self.data[start:end])",
"def simplices_to_lines(simplices: np.ndarray):\n i = 0\n formatted = np.zeros((simplices.shape[0] * 3 * 2, 3))\n for vertex_group in simplices:\n v1 = vertex_group[0]\n v2 = vertex_group[1]\n v3 = vertex_group[2]\n formatted[i:i + 6] = np.array([v1, v2, v2, v3, v3, v1])\n i += 6\n\n return formatted",
"def to_slice(self):\n return np.index_exp[self.start[2]:self.end[2], #\n self.start[1]:self.end[1], #\n self.start[0]:self.end[0]]",
"def __repr__(self):\n outlist = []\n for idx in range(len(self)):\n outlist.append(repr(self[idx]))\n return f\"({', '.join(outlist)})\"",
"def print_image_slice(input, iz=0):\n\timage=get_image(input)\n\tnx = image.get_xsize()\n\tny = image.get_ysize()\n\tnz = image.get_zsize()\n\tprint \"(z = %d slice)\" % (iz)\n\tline = []\n\tfor iy in xrange(ny-1,-1,-1):\n\t\tline.append(\"Row \")\n\t\tline.append(\"%4i \" % iy)\n\t\tfor ix in xrange(nx):\n\t\t\tline.append(\"%12.5g \" % (image.get_value_at(ix,iy,iz)))\n\t\t\tif ((ix + 1) % 5 == 0): \n\t\t\t\tline.append(\"\\n \")\n\t\t\t\tline.append(\" \")\n\t \tline.append(\"\\n\")\n\t \tif(nx%5 != 0): line.append(\"\\n\")\n\tprint \"\".join(line)",
"def iter_format(nitems, testobj='ndarray'):\n for t in iter_mode(nitems, testobj):\n yield t\n if testobj != 'ndarray':\n return\n yield struct_items(nitems, testobj)",
"def iter_format(nitems, testobj='ndarray'):\n for t in iter_mode(nitems, testobj):\n yield t\n if testobj != 'ndarray':\n return\n yield struct_items(nitems, testobj)",
"def SliceView(sequence, start=None, stop=None, step=1):\n start, stop, step = slice(start, stop, step).indices(len(sequence))\n for i in range(start, stop, step):\n yield sequence[i]",
"def _slice(self, start, stop, step=None):\n\n slices = [slice(None)] * self.data.ndim\n slices[self.axis] = slice(start, stop, step)\n return tuple(slices)",
"def process_slice(slc, shape, n):\n if not isinstance(slc, tuple):\n slc = (slc,)\n slc = list(slc)\n ndim = len(shape) - n\n assert ndim >= 0\n shape_idx = 0\n for slice_idx, s in enumerate(slc):\n if s == nax:\n continue\n if shape[shape_idx] == 1:\n if type(s) == int:\n slc[slice_idx] = 0\n else:\n slc[slice_idx] = slice(None)\n shape_idx += 1\n if shape_idx != ndim:\n raise IndexError('Must have %d terms in the slice object' % ndim)\n return extend_slice(tuple(slc), n)",
"def seqlists2slicestr(seqlists):\n def seqlist2slice(seqlist):\n seqlistcanon = []\n for seqel in seqlist.split(','):\n seqel = [int(el) for el in seqel.split(':')]\n seq = range(seqel[0], seqel[-1]+1)\n seqlistcanon.extend(seq)\n seqsteps = set(numpy.diff(seqlistcanon))\n if len(seqsteps) > 1:\n raise ValueError('Subband spec {} too complicated.'.format(seqlist))\n elif len(seqsteps) == 0:\n slicestr = \"{}\".format(seqlistcanon[0])\n else:\n seqstep = seqsteps.pop()\n seqstepstr = str(seqstep) + ':' if seqstep > 1 else ''\n slicestr = \"{}:{}{}\".format(seqlistcanon[0], seqstepstr,\n seqlistcanon[-1])\n return slicestr\n\n if type(seqlists) is list:\n slicestrlist = []\n for seqlist in seqlists:\n seqstr = seqlist2slice(seqlist)\n slicestrlist.append(seqstr)\n slicestr = _RCU_SB_SEP.join(slicestrlist)\n else:\n slicestr = seqlist2slice(seqlists)\n return slicestr",
"def _build_iterable(self):",
"def format_subrange(start, end, step):\n \n if start == end:\n return str(start)\n elif step == 1:\n return \"%d-%d\" % (start, end)\n else:\n return \"%d-%dx%d\" % (start, end, step)",
"def pretty_print(iterable):\n for elem in iterable:\n print(elem)",
"def _create_slice(arr, id, reference_name, slice_start, slice_end):\n url = f\"http://{request.host}{BASE_PATH}/data?id={id}&reference_name={reference_name}&start={slice_start}&end={slice_end}\"\n arr.append({ 'url': url, })",
"def slice(iterable, *args):\n return iter(it.islice(iterable, *args))",
"def iterslices(iterable, n, pad_last=False, pad_value=None):\n current = []\n for a in iterable:\n current.append(a)\n if len(current) == n:\n yield current\n current = []\n if current:\n if pad_last:\n current += [pad_value] * (n-len(current))\n yield current",
"def test_enforce_iterable():\n formatter = TabularOutputFormatter()\n loremipsum = (\n \"lorem ipsum dolor sit amet consectetur adipiscing elit sed do eiusmod\".split(\n \" \"\n )\n )\n\n for format_name in formatter.supported_formats:\n formatter.format_name = format_name\n try:\n formatted = next(formatter.format_output(zip(loremipsum), [\"lorem\"]))\n except TypeError:\n assert False, \"{0} doesn't return iterable\".format(format_name)",
"def _slice_template(cls, in_str: str) -> Iterator[RawFileSlice]:\n fmt = Formatter()\n in_idx = 0\n for literal_text, field_name, format_spec, conversion in fmt.parse(in_str):\n if literal_text:\n escape_chars = cls._sorted_occurrence_tuples(\n cls._substring_occurrences(literal_text, [\"}\", \"{\"])\n )\n idx = 0\n while escape_chars:\n first_char = escape_chars.pop()\n # Is there a literal first?\n if first_char[1] > idx:\n yield RawFileSlice(\n literal_text[idx : first_char[1]], \"literal\", in_idx\n )\n in_idx += first_char[1] - idx\n # Add the escaped\n idx = first_char[1] + len(first_char[0])\n # We double them here to make the raw\n yield RawFileSlice(\n literal_text[first_char[1] : idx] * 2, \"escaped\", in_idx\n )\n # Will always be 2 in this case.\n # This is because ALL escape sequences in the python formatter\n # are two characters which reduce to one.\n in_idx += 2\n # Deal with last one (if present)\n if literal_text[idx:]:\n yield RawFileSlice(literal_text[idx:], \"literal\", in_idx)\n in_idx += len(literal_text) - idx\n # Deal with fields\n if field_name:\n constructed_token = \"{{{field_name}{conv}{spec}}}\".format(\n field_name=field_name,\n conv=f\"!{conversion}\" if conversion else \"\",\n spec=f\":{format_spec}\" if format_spec else \"\",\n )\n yield RawFileSlice(constructed_token, \"templated\", in_idx)\n in_idx += len(constructed_token)",
"def slicer(seq, start=None, stop=None, step=None):\n return seq[start:stop:step]",
"def show_slices(slices):\n fig, axes = plt.subplots(1, len(slices))\n for i, slice in enumerate(slices):\n axes[i].imshow(slice.T, cmap=\"gray\", origin=\"lower\")",
"def __str__(self) -> str:\n result = \"[\"\n for i in range(len(self)):\n if i > 0:\n result += ', '\n result += str(self[i])\n result += ']'\n return result"
]
| [
"0.5865665",
"0.57684493",
"0.5599457",
"0.5525063",
"0.5512937",
"0.55068475",
"0.5437363",
"0.54111654",
"0.5382067",
"0.5355884",
"0.5338547",
"0.53211254",
"0.53099644",
"0.5304407",
"0.5304407",
"0.52531093",
"0.5224272",
"0.5191909",
"0.51722807",
"0.5160424",
"0.51307696",
"0.51250887",
"0.5114102",
"0.5104544",
"0.5102945",
"0.51013994",
"0.50876623",
"0.50768554",
"0.5074549",
"0.5074144"
]
| 0.6723752 | 0 |
Build title from given selection information. | def titleForSelection(self, selection):
if selection is None or selection.filename is None:
return None
else:
directory, filename = os.path.split(selection.filename)
try:
slicing = self.__formatSlices(selection.slice)
except Exception:
_logger.debug("Error while formatting slices", exc_info=True)
slicing = '[sliced]'
permuted = '(permuted)' if selection.permutation is not None else ''
try:
title = self.TITLE_PATTERN.format(
directory=directory,
filename=filename,
datapath=selection.datapath,
slicing=slicing,
permuted=permuted)
except Exception:
_logger.debug("Error while formatting title", exc_info=True)
title = selection.datapath + slicing
return title | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def updateTitle(self):\n \n if len(self.selParams) == 0:\n self.title = 'Measure (Nothing)'\n elif len(self.selParams) == 1:\n self.title = 'Measure ' + self.selParams[0]\n elif len(self.selParams) == 2:\n self.title = 'Measure ' + self.selParams[0] + ', ' + self.selParams[1]\n else:\n self.title = 'Measure ' + self.selParams[0] + ', ' + self.selParams[1] + ', ...'\n self.title = hf.enumSequence(self.pos, self.title)",
"def selected_title(self):\r\n return self.title",
"def showSelectionInTitle(*args, **kwargs)->None:\n pass",
"def make_title(words):",
"def get_title():",
"def get_title(rating):\n\ttitle = \"\"\n\tif rating < 1200:\n\t\ttitle = [\"Newbie\", \"grey-text\"]\n\telif rating < 1400:\n\t\ttitle = [\"Pupil\", \"light-green-text\"]\n\telif rating < 1600:\n\t\ttitle = [\"Specialist\", \"cyan-text\"]\n\telif rating < 1900:\n\t\ttitle = [\"Expert\", \"indigo-text\"]\n\telif rating < 2100:\n\t\ttitle = [\"Candidate Master\", \"purple-text\"]\n\telif rating < 2300:\n\t\ttitle = [\"Master\", \"amber-text\"]\n\telif rating < 2400:\n\t\ttitle = [\"International Master\", \"orange-text\"]\n\telif rating < 2600:\n\t\ttitle = [\"Grandmaster\", \"red-text\"]\n\telif rating < 3000:\n\t\ttitle = [\"International Grandmaster\", \"red-text\"]\n\telse:\n\t\ttitle = [\"Legendary Grandmaster\", \"red-text\"]\n\treturn title",
"def generate_title(self, title=None):\n if title is None:\n title = self.header.get('title', self.title)\n\n title = self.generate(title)\n title = title.replace('<p>', '').replace('</p>', '')\n # no trailing newlines\n title = re.sub(r'\\n+', ' ', title).rstrip()\n return title",
"def select_title(parent, mission):\n\n\tclass Provider(DialogProvider):\n\t\tdef create_body(self, body):\n\t\t\tentry = Entry(body)\n\t\t\tentry.insert(0, safe_tk(mission.title))\n\t\t\tentry.selection_range(0, \"end\")\n\t\t\tentry.pack()\n\t\t\tentry.focus_set()\n\t\t\tself.entry = entry\n\n\t\tdef apply(self):\n\t\t\ttitle = self.entry.get()\n\t\t\tmission.set(\"title\", title)\n\n\treturn Dialog(parent, title=\"重命名\", cls=Provider).wait()",
"def get_title(rating):\n title = \"\"\n if rating < 1200:\n title = [\"Newbie\", \"grey-text\"]\n elif rating < 1400:\n title = [\"Pupil\", \"light-green-text\"]\n elif rating < 1600:\n title = [\"Specialist\", \"cyan-text\"]\n elif rating < 1900:\n title = [\"Expert\", \"indigo-text\"]\n elif rating < 2100:\n title = [\"Candidate Master\", \"purple-text\"]\n elif rating < 2300:\n title = [\"Master\", \"amber-text\"]\n elif rating < 2400:\n title = [\"International Master\", \"orange-text\"]\n elif rating < 2600:\n title = [\"Grandmaster\", \"red-text\"]\n elif rating < 3000:\n title = [\"International Grandmaster\", \"red-text\"]\n else:\n title = [\"Legendary Grandmaster\", \"red-text\"]\n return title",
"def create_title(title, year=None, time_step=None, base=0, interval=None,\n gage=None, m=None, h=None):\n if type(gage) is list or type(gage) is tuple:\n title = title + ' at listed gages'\n elif gage is not None:\n title = title + ' at '+ gage\n \n if m is not None:\n title = title + ' for Month {mo} of'.format(mo=m)\n elif h is not None:\n title = title + ' for Hour {ho} of'.format(ho=h) \n elif interval is 'seasonal':\n title = title + ' for Months of'\n elif interval is 'diurnal':\n title = title + ' for Hours of'\n if time_step is not None:\n ts = time_step.replace('min', ' minute').replace('T', ' minute').replace('H', ' hour').replace('D', ' day')\n title = title.format(ts=ts)\n if year is not None:\n title = title +' '+ year\n return title",
"def _build_title(db, place):\n descr = place.get_title()\n location = get_main_location(db, place)\n parish = location.get(PlaceType.PARISH)\n city = location.get(PlaceType.CITY)\n state = location.get(PlaceType.STATE)\n title_descr = \"\"\n if descr:\n title_descr += descr.strip()\n if parish:\n title_descr += ', ' + parish.strip() + _(\" parish\")\n if city:\n title_descr += ', ' + city.strip()\n if state:\n title_descr += ', ' + state.strip() + _(\" state\")\n return _strip_leading_comma(title_descr)",
"def get_title(self):\n\n if self.title: return self.title\n path = self.get_path()\n if str(path) == \"\": \n Settings.err_print(\"missing file title\")\n return \"\"\n title, ext = os.path.splitext(path)\n self.ext = ext\n self.title = \"{}{}\".format(os.path.basename(title), ext)\n return self.title",
"def make_main_title(self, end, end_center=False):\n main_title = r\"\\begin{center}\"\n if self.detector is not None:\n main_title += \"%s \"%self.detector\n if self.selection is not None:\n main_title += \"%s Event Selection \"%self.selection\n main_title += end\n if end_center:\n main_title += r\"\\end{center}\"\n return main_title",
"def inclusive_title(self):\n return self.title + (\" %s\" % (self.episode_to_string(self.latest_season, self.latest_episode),) if self.is_series() else \"\")",
"def _get_full_title(self):\n return \"%s - %s %d\" % (self.title, _('Season'), self.season)",
"def _make_title(self):\n ret = self.properties['reason'].capitalize()\n ret += ' has been reported near ' + self.properties['address'].split(',')[0]\n time = datetime.strptime(self.properties['when'], '%Y-%m-%dT%H:%M:%S')\n times = [time.strftime(i).lstrip('0') for i in ('%m', '%d', '%I:%M%p')]\n ret += ' on {}/{} at {}'.format(times[0], times[1], times[2])\n return ret",
"def get_title(text, uuid=None):\n if uuid is not None:\n text += get_provenance_link(uuid)\n title = pn.Row(pn.pane.HTML('<h2>{}</h2>'.format(text)), align='start')\n\n return title",
"def get_short_name(self):\n split = self.name.split(' - ')\n # author, year, and first couple of words of paper title\n return \"{} ({}), {}\".format(split[0], split[1], \" \".join(split[2].split(' ')[:3]))",
"def dc_title(self):\n return u\"{0} ({1}): {2} {3}\".format(\n self.label, self.in_assessment[0].timepoint,\n self.subjects[0].code_in_study,\n \"...\" if len(self.subjects) > 1 else \"\")",
"def selection_text(case) -> str:\n if hasattr(case, 'client'):\n if isinstance(case.client, DAList) and case.client.number() > 0:\n if hasattr(case.client[0].name, 'last'):\n lname = case.client[0].name.last\n fname = case.client[0].name.first\n client = f\"{lname}, {fname}\"\n else:\n client = case.client[0].name\n else:\n client = \"*{}-{}\".format(case.client.number(), str(case.client))\n else:\n client = \"(NO CLIENT)\"\n\n if hasattr(case, 'description'):\n description = case.description\n else:\n description = case.footer\n\n if hasattr(case, 'case_id') and case.case_id:\n case_id = \"{}: {}\".format(case.county, case.case_id)\n else:\n case_id = \"(NOT FILED)\"\n\n return f\"{client} - {description} - ({case.county})\"",
"def Title(self, **kwargs):\n full_name = ''\n if self.getFirstname() == '' or self.getLastname() == '':\n if not self.getOrganization():\n return '...'\n else:\n return self.getOrganization()\n format = kwargs.get('format', None)\n if format == 'natural':\n full_name = '%s %s' % (self.getFirstname(), self.getLastname())\n else:\n full_name = '%s %s' % (self.getLastname(), self.getFirstname())\n return '%s' % full_name",
"def name_with_title(self):\n return \"%s %s\" % (self.title, self.name)",
"def get_title(self):\n title = (None, 7)\n for text, level in self._headers:\n if level < title[1]:\n title = (text, level)\n return title[0]",
"def _title(hit: DD) -> str:\n return hit[\"_source\"][\"title\"]",
"def title(text, level=0):\n return '\\n' + text + '\\n' + '=-~_#%^' [level] * len(text) + '\\n\\n'",
"def makeTitle(self):\n l1=Label(self.app, text=\"Asset Allocation Combinations\")\n l1.grid(row=0, column=0)",
"def get_title(self) -> str:\n pass",
"def _set_title(self, key, item, group_size=2):\n if self.ndims == 1 and self.dim_dict.get('Default', False):\n return None\n dimension_labels = [dim.pprint_value(k) for dim, k in zip(self._dimensions, key)]\n groups = [', '.join(dimension_labels[i*group_size:(i+1)*group_size])\n for i in range(len(dimension_labels))]\n dims = '\\n '.join(g for g in groups if g)\n if isinstance(item, Overlay):\n for layer in item:\n format_dict = dict(dims=dims, label=layer.label, type=layer.__class__.__name__)\n layer.title = self.title.format(**format_dict)\n else:\n format_dict = dict(dims=dims, label=item.label, type=item.__class__.__name__)\n item.title = self.title.format(**format_dict)",
"def name_title(self, val: str) -> None:\n\n # Make sure they don't pass underscores; title versions are just\n # words and spaces.\n if '_' in val:\n raise CleanError(\n f\"Custom FeatureSet name_title '{val}' contains\"\n ' underscores; it must contain only spaces.'\n )\n\n # Make sure the value they're providing still matches their base\n # name. It could be easy to let this fall out of sync\n # accidentally.\n if val.lower().replace(' ', '_') != self._name:\n raise CleanError(\n f\"Custom FeatureSet name_title '{val}' letters/spacing\"\n f\" does not match base name '{self._name}'.\"\n )\n\n # Ok val; we will accept you.\n self._name_title = val",
"def make_title(dawn: str | None, dusk: str | None, /) -> str:\n logger.debug('Making title')\n if not dawn or not dusk:\n logger.error('Cannot find start/end date\\n')\n sys.exit(1)\n api_dfm, msg_dfm = '%Y-%m-%dT%H:%M:%SZ', '%d %B %Y'\n try:\n start_date = datetime.strptime(dawn, api_dfm).strftime(msg_dfm)\n end_date = datetime.strptime(dusk, api_dfm).strftime(msg_dfm)\n except ValueError as err:\n logger.error(f'{err}\\n')\n sys.exit(1)\n\n logger.debug('Title was made\\n')\n return f'From: {start_date} - To: {end_date}'"
]
| [
"0.6353829",
"0.613545",
"0.6039983",
"0.6028894",
"0.58950776",
"0.5741492",
"0.5696391",
"0.56829673",
"0.5664377",
"0.556874",
"0.5443336",
"0.5429673",
"0.54182184",
"0.54098636",
"0.54039633",
"0.53925604",
"0.53877467",
"0.5369228",
"0.5365641",
"0.53589183",
"0.5351014",
"0.5337689",
"0.53270334",
"0.53094614",
"0.5300629",
"0.52746904",
"0.5269801",
"0.52579516",
"0.52522576",
"0.5250022"
]
| 0.75292933 | 0 |
Set the data selection displayed by the view If called, it have to be called directly after `setData`. | def setDataSelection(self, selection):
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def set_selection(self, selection):\n self._selection = selection",
"def update_selection(self):\n raise NotImplementedError",
"def setSelectedDate(self, data):\n # print('setSelectedDate ', data)\n self.currentDate = data",
"def update_view(self, selected):\n pass",
"def apply_selection(self, rv, index, is_selected):\r\n self.selected = is_selected",
"def apply_selection(self, rv, index, is_selected):\n self.selected = is_selected",
"def apply_selection(self, rv, index, is_selected):\n self.selected = is_selected\n if is_selected:\n SelectableLabel.selected_hotel = rv.data[index]['text']",
"def _update_data(self, selected):\n if selected.row() != self.datasets.index:\n self.datasets.index = selected.row()\n self.datasets.update_current()\n self._update_main()",
"def _set_selected(self):\n if 'selected' in self._report_data:\n match_size: int = len(self._report_data['selected'])\n has_historical: bool = False\n for index, result in enumerate(self._report_data['selected'], start=0):\n result['createDateTime'] = Report._to_report_datetime(result['createDateTime'], False)\n result['index'] = (index + 1)\n if result.get('extraMatches'):\n match_size += len(result.get('extraMatches'))\n if result.get('historicalCount', 0) > 0:\n has_historical = True\n self._report_data['totalResultsSize'] = len(self._report_data['selected'])\n self._report_data['matchResultsSize'] = match_size\n self._report_data['hasHistorical'] = has_historical",
"def handleTableSelectionChange(self):\n self.selectEntireRow()\n self.showSelectedDataset()",
"def set_data(self, data):\n self._model.set_data(data)\n self.__refresh()",
"def set_data(self, data):\n self.closeContext()\n self.clear()\n self.clear_messages()\n\n self.data = data\n if data is not None:\n n_instances = len(data)\n n_attrs = len(data.domain.attributes)\n self.infoLabel.setText(\"%i instances on input\\n%i attributes\" % (\n n_instances, n_attrs))\n\n self.graph_variables = [var for var in data.domain.attributes\n if var.is_continuous]\n if len(self.graph_variables) < 1:\n self.Information.not_enough_attrs()\n else:\n groupvars = [var for var in data.domain.variables +\n data.domain.metas if var.is_discrete]\n\n if len(groupvars) > 0:\n self.cb_attr.addItems([str(var) for var in groupvars])\n self.group_var = str(groupvars[0])\n self.group_variables = groupvars\n self.update_group_var()\n else:\n self._setup_plot()\n\n self.selection = []\n self.openContext(data)\n self.select_data_instances()\n self.commit()",
"def user_selection(self, selected):\n\n source_index = self.proxy_model.mapToSource(selected)\n self.row = self.table_model.selectedRow(source_index.row())\n\n self.curr_selection()\n self.upd_preview()",
"def set_selected(self, selected):\n self.selected = selected",
"def datas(self, newDatas):\n self.__datas = newDatas\n\n self.dropDown.clear()\n self.dropDown.addItems(self.__datas)\n self.update()",
"def set_data(self, value):\n self._set_data(value)\n self.data_changed = True\n return",
"def select_data_item(self, data_items):\n raise NotImplementedError",
"def set_state( self ):\n\n # ensure the selection view has focus so user can immediately page\n # through images via arrow keys.\n self.selectionView.setFocus()\n\n # select the first entry so we can use the keyboard for navigation.\n #\n # NOTE: since the first column of our view is hidden, we need to\n # select the first visible column instead.\n #\n self.selectionView.setCurrentIndex( self.proxyPhotosModel.index( 0, self.PATH_COLUMN ) )",
"def set_data(self, d):\n self._data = d\n self.is_data_set = True",
"def setSelectModeData(self):\n self._nodeSelectMode = False\n self._dataSelectMode = True\n self._elemSelectMode = False",
"def setSelected(*args):",
"def setSelected(*args):",
"def setData(self, data):\n self.data = data",
"def setData(self, data):\n self.data = data",
"def selection_changed(self):\n self.emit('selection_changed')",
"def setData(self,newData):\r\n pass",
"def _menu_select_all(self, uiinfo, selection):\n print selection, uiinfo\n self.model.selected_rows = self.model.data_list[:]\n print \"selection: {}\".format(len(self.model.selected_rows))",
"def setData(self, data):\n return None",
"def setValue(self,selected):\n self['input'].setValue(selected)",
"def set_data(self, data):\n\n pass"
]
| [
"0.6738302",
"0.66910154",
"0.66355103",
"0.6523672",
"0.64766914",
"0.6413622",
"0.63925743",
"0.6344838",
"0.6215796",
"0.62130725",
"0.6122954",
"0.6097352",
"0.608399",
"0.6022608",
"0.6015785",
"0.6015256",
"0.6015057",
"0.6011311",
"0.5971476",
"0.5964737",
"0.59606576",
"0.59606576",
"0.5949183",
"0.5949183",
"0.5946989",
"0.5935866",
"0.5935768",
"0.59334207",
"0.59304893",
"0.5895137"
]
| 0.83170843 | 0 |
Returns names of the expected axes of the view, according to the input data. A none value will disable the default axes selectior. | def axesNames(self, data, info):
return [] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def axesnames(self):\n return self._axesnames",
"def allAxes( mv ):\n if mv is None: return None\n return mv.getAxisList()",
"def _default_axis_names(n_dims):\n _DEFAULT_NAMES = (\"z\", \"y\", \"x\")\n return _DEFAULT_NAMES[-n_dims:]",
"def _find_axes(cls, input_data, explicit_x=None):\n\n if isinstance(input_data, pd.Series):\n if explicit_x is not None:\n raise ArgumentError(\"You cannot pass an explicit x axis with a pandas Series\")\n\n return input_data.index, input_data.values\n elif isinstance(input_data, pd.DataFrame):\n if explicit_x is not None:\n raise ArgumentError(\"You cannot pass an explicit x axis with a pandas DataFrame\")\n\n return input_data.index, input_data.values[:, 0]\n elif isinstance(input_data, np.ndarray):\n if len(input_data.shape) == 2 and input_data.shape[0] == 2:\n if explicit_x is not None:\n raise ArgumentError(\"You cannot pass an explicit x axis with a 2D array of input data\")\n\n return input_data[:, 0], input_data[:, 1]\n elif len(input_data.shape) == 1:\n if explicit_x is not None:\n if len(explicit_x) != len(input_data):\n raise ArgumentError(\"Your explicit x data has a different length that your y data\", x_length=len(explicit_x), y_length=len(input_data))\n\n return explicit_x, input_data\n else:\n return np.linspace(0, len(input_data) - 1, len(input_data)), input_data\n elif explicit_x is not None:\n return np.array(explicit_x), np.array(explicit_x)\n\n return np.linspace(0, len(input_data) - 1, len(input_data)), np.array(input_data)",
"def _get_axes_numbers(self, axes):\n if axes is None:\n return [0, 1]\n\n if isinstance(axes, str):\n return [self._get_axis_number(axes)]\n elif hasattr(axes, '__len__'):\n return [self._get_axis_number(ax) for ax in axes]\n return [axes]",
"def test_get_axes():\n fig, axs = plt.subplots()\n assert all(\n isinstance(ax, mpl.axes.Axes)\n for ax in prettypyplot.tools.get_axes(axs)\n )\n assert all(\n isinstance(ax, mpl.axes.Axes)\n for ax in prettypyplot.tools.get_axes(None)\n )\n with pytest.raises(TypeError):\n prettypyplot.tools.get_axes(fig)",
"def clear_axes_selection(self):\n self.x_axis = ''\n self.y_axis = ''\n self.non_numeric_x_axis = False\n self.count_desired = False\n self.header_choices('x')",
"def axes(self):\n return self._axes",
"def axes(self):\n return self._axes",
"def get_axes(self) -> VGroup:\n return self.axes",
"def get_axes(self) -> VGroup:\n return self.axes",
"def axes_of_symmetry(self):\n if self.number_axes_of_symmetry is None: # distinguish from Falsy 0\n raise NotImplementedError(self.message_unknown)\n return self.number_axes_of_symmetry",
"def axes(self) -> np.ndarray: # array[Axes]\n return self._axes",
"def findaxisbyname(self, *args, **kwargs):\n return _coordsys.coordsys_findaxisbyname(self, *args, **kwargs)",
"def get_data(self):\n return [self.axes]",
"def axesnames(self, axesnames):\n if axesnames is None:\n self._axesnames = None\n else:\n assert isinstance(axesnames, list), 'axesnames must be list'\n self._axesnames = axesnames\n debug('ControllerStartup.axesnames = %s', itemstostr(self._axesnames))",
"def setAxesNames(self):\n \n labels = ['T', 'Z', 'Y', 'X'] + [chr(ord('S')-i) for i in xrange(18)]\n if (len(self.axisList) >= 4):\n i = 0\n else:\n i = 4 - len(self.axisList)\n \n for axis in self.axisList:\n self.axesNames.append(labels[i] + ' - ' + axis.id)\n i += 1",
"def listInputDeviceAxes(*args, **kwargs)->List[AnyStr]:\n pass",
"def customAxisNames(self):\n return []",
"def process_custom_axes(axis_names):\n return axis_names.strip().strip(\"'\").strip('\"').split(',')",
"def test_default_axis_nxdata(self, nexus_base):\n assert isinstance(nexus_base.default_axis, np.ndarray)",
"def feature_axes(self):\n raise NotImplementedError()",
"def axes(*x: Iterable[int]):\n return [_ti_core.Axis(i) for i in x]",
"def test_parse_axes():\n fig, ax = plt.subplots()\n\n # parse correct ax in ax\n argsref = (1, 'a', np.arange(2))\n argsax = prettypyplot.tools.parse_axes(*argsref, ax=ax)\n assert all(\n isinstance(ref, type(parse))\n for ref, parse in zip(argsref, argsax[0])\n )\n assert ax is argsax[1]\n\n # multiple axes\n with pytest.raises(ValueError):\n prettypyplot.tools.parse_axes(ax, ax=ax)\n with pytest.raises(ValueError):\n prettypyplot.tools.parse_axes(1, ax, ax=ax)\n with pytest.raises(ValueError):\n prettypyplot.tools.parse_axes(ax, ax, 1, ax=None)\n\n argsax = prettypyplot.tools.parse_axes(ax, ax=None)\n assert ax is argsax[1]\n\n argsax = prettypyplot.tools.parse_axes(ax=ax)\n assert ax is argsax[1]",
"def showaxes(axl: Union[object, List], whichaxes: str = \"xy\"):\n\n axl = _ax_tolist(axl)\n for ax in axl:\n if ax is None:\n continue\n if \"x\" in whichaxes:\n ax.spines[\"bottom\"].set_visible(True)\n ax.tick_params(bottom=True, labelbottom=True)\n if \"y\" in whichaxes:\n ax.spines[\"left\"].set_visible(True)\n ax.tick_params(left=True, labelleft=True)",
"def exog_names(self):\n return self.data.xnames",
"def _declare_auto_axes_idx(self):\n if not self.axes_idx:\n self.axes_idx = BiMapping(to_first=range(len(self.name_elements)), to_second=range(len(self.name_elements)))",
"def ExpectAxes(self, labels, positions):\n self.assertEqual(self.Param('chxl'), labels)\n self.assertEqual(self.Param('chxp'), positions)",
"def axes_inactive(self) -> np.ndarray:\n return self.axes.flat[self.n_plots:]",
"def _axes(self, X):\n \n return np.arange(len(X.shape) - 1) + 1"
]
| [
"0.697044",
"0.68636376",
"0.6316774",
"0.61642134",
"0.60828024",
"0.60513145",
"0.59091306",
"0.58836395",
"0.58836395",
"0.58783954",
"0.58783954",
"0.58372355",
"0.57716405",
"0.57354414",
"0.57308847",
"0.5713211",
"0.5692361",
"0.5676367",
"0.56661934",
"0.56484467",
"0.5630249",
"0.55856395",
"0.55715257",
"0.5482071",
"0.5480709",
"0.5469916",
"0.54558605",
"0.54538226",
"0.539313",
"0.53841084"
]
| 0.7396134 | 0 |
Returns the views that can be returned by `getMatchingViews`. | def getReachableViews(self):
return [self] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def getMatchingViews(self, data, info):\n raise NotImplementedError()",
"def getMatchingViews(self, data, info):\n if not self.isSupportedData(data, info):\n return []\n views = [v for v in self.__views if v.getCachedDataPriority(data, info) != DataView.UNSUPPORTED]\n return views",
"def getViews(self):\n return list(self.__views)",
"def views(self):\n return self._views",
"def getReachableViews(self):\n raise NotImplementedError()",
"def getViews(self):\n raise NotImplementedError()",
"def getViews(self):\n return list(self.__views.keys())",
"def get_views(self):\n return self._get_types_from_default_ns(View)",
"def getMatchingViews(self, data, info):\n priority = self.getCachedDataPriority(data, info)\n if priority == DataView.UNSUPPORTED:\n return []\n return [self]",
"def get_views(self):\n query = mssqlqueries.get_views()\n logger.info(u'Views query: %s', query)\n for tabular_result in self.execute_query(query):\n for row in tabular_result[0]:\n yield (row[0], row[1])",
"def trait_views ( self, klass = None ):\n return self.__class__.__dict__[ ViewTraits ].filter_by( klass )",
"def getViews(read):\n ...",
"def get_requested_views(self,request,returnformat):\n relview_key = getattr(self,'relview_key','relview')\n reqviews = request.query_params.get(relview_key)\n if reqviews is None:\n # if no relview is passed in query_params then fetch\n # from its attribute relview which is set in urls.py\n if self.relview is not None:\n reqviews = self.relview\n # if related views are not fetched either through query_params\n # or in urls.py and is not jsonrequest then fetch all views\n if reqviews is None: #and returnformat != 'json':\n reqviews = 'all'\n if self.jointrel is not None:\n reqviews = reqviews+','+self.jointrel\n #import pdb; pdb.set_trace()\n if reqviews:\n reqviews = reqviews.split(',')\n relviews = self.related_views.keys()\n include = []\n exclude = []\n for reqview in reqviews:\n if reqview == 'all':\n include += relviews\n elif reqview[0] == '-':\n exclude.append(reqview[1:])\n elif reqview not in include:\n include.append(reqview)\n reqviews = subtractlists(include,exclude)\n return reqviews",
"def page_views(self, *args, **kwargs):\r\n return self._get('PageViews', *args, **kwargs)",
"def get_active_views_paths(window):\n visible_views = []\n\n # Priority for the active view\n active_view = window.active_view()\n visible_views.append(active_view.file_name())\n\n num_groups = window.num_groups()\n for group_id in range(num_groups):\n view = window.active_view_in_group(group_id)\n if view != active_view and view.file_name():\n visible_views.append(view.file_name())\n\n return visible_views",
"def get_views(cohesity_client):\n views = cohesity_client.views.get_views().views\n views_list = views if views else []\n for view in views_list:\n exported_res_dict[\"Protection Views\"].append(view.name)\n return views_list",
"def get_views(view_args, model_type):\n # XXX Why pop?\n metadata = view_args.pop('metadata')\n all_view = metadata.get('views', {}).get('all')\n if not all_view:\n all_view= '%s/all'%model_type\n all_count_view = metadata.get('views', {}).get('all_count')\n if not all_count_view:\n all_count_view= '%s/all_count'%model_type\n return all_view, all_count_view",
"def viewVocab(self): \n mapping = []\n views = registration.getViews(IBrowserRequest)\n for view in views:\n if view.name and self.getRenderableView(view.name):\n mapping.append((view.name, view.name))\n return atapi.DisplayList(mapping)",
"def get_view(self, path):\n _view, params = None, {}\n for route, view in self._routes.items():\n match = route.match(path)\n if not match:\n continue\n if match and isinstance(match, dict): # Means route have parameters\n _view, params = view, match\n else:\n _view = view\n break\n return _view, params",
"def other_views(cls):\n return cls.__other_views",
"def get_requested_views(self,request,returnformat):\n if isinstance(request,Request):\n relview_key = getattr(self,'relview_key','relview')\n reqviews = request.query_params.get(relview_key)\n if reqviews is None:\n # if no relview is passed in query_params then fetch\n # from its attribute relview which is set in urls.py\n if self.relview is not None:\n reqviews = self.relview\n # if related views are not fetched either through query_params\n # or in urls.py and is not jsonrequest then fetch all views\n if reqviews is None: #and returnformat != 'json':\n tabmap = getattr(self,'tabmap',None)\n tabkey = getattr(self,'tab_key','tab')\n if tabmap:\n currenttab = request.query_params.get(tabkey)\n if not currenttab:\n if self.defaulttab is not None:\n currenttab = self.defaulttab\n\n requestedtab = tabmap.get(currenttab,self.defaulttab)\n if currenttab and requestedtab:\n reqviews = map(lambda x: x.strip(),requestedtab.strip(',').split(','))\n self._currenttab = currenttab\n if reqviews:\n return reqviews\n return super(TabAPIView,self).get_requested_views(request,returnformat)",
"def views(self):\r\n return resources.Views(self)",
"def sort_views_by_relevance(self):\n window = sublime.active_window()\n\n # add the current view is the most relevant\n views = [self.view]\n try:\n # the second most relevant suggestions are from the indexed panels\n for panel_name in panel_state:\n panel = window.find_output_panel(panel_name)\n panel.file_name = lambda v=panel_name: v \n views.append(panel)\n except Exception as e:\n print('No panel', e)\n\n # the last but not least are the open views\n for view in window.views():\n if view is not self.view:\n views.append(view)\n\n return views",
"def global_matches(self, visualise=False):\n kp = self._common_keypoints(*self.views)\n if visualise:\n self.show()\n logger.debug(f'{len(kp)} common keypoints found')\n return kp",
"def get_view(self, request) -> Optional[View]:\n\n # Grab ViewAction and use sorted_actions to find first match\n sorted_actions = ViewAction.sorted_actions(self.registry)\n\n # Find the first action which matches the args\n for action, view_class in sorted_actions:\n if action.all_predicates_match(request):\n # Use dependency injection to make an instance of\n # that view class\n view_instance = inject(\n dict(), # props\n self.get_injectables(request),\n view_class,\n request=request\n )\n return view_instance\n\n # No matches, return None\n return None",
"def get_views(self, path, year=None, month=None, day=None, hour=None):\n return self._telegraph.method('getViews', path=path, values={\n 'year': year,\n 'month': month,\n 'day': day,\n 'hour': hour\n })",
"def get_view_endpoints(self):\n return []",
"def views(self):\r\n return Views(self)",
"def extract_channel_views(show_views_channel):\n channel,views,=show_views_channel[1]\n return (channel, views)",
"def static_view_finder(viewname, **other):\n return viewname"
]
| [
"0.73828393",
"0.69941866",
"0.6910991",
"0.6866864",
"0.68596447",
"0.6800824",
"0.67730147",
"0.6732255",
"0.6717297",
"0.6404748",
"0.63528705",
"0.6350586",
"0.62178314",
"0.613579",
"0.6117917",
"0.611534",
"0.6032207",
"0.59530085",
"0.59079045",
"0.5888702",
"0.5878553",
"0.5871562",
"0.58551294",
"0.558645",
"0.5571113",
"0.55264425",
"0.55137056",
"0.5510775",
"0.54915375",
"0.5444633"
]
| 0.70170206 | 1 |
Returns the priority of using this view according to a data. `UNSUPPORTED` means this view can't display this data `1` means this view can display the data `100` means this view should be used for this data `1000` max value used by the views provided by silx ... | def getDataPriority(self, data, info):
return DataView.UNSUPPORTED | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __getBestView(self, data, info):\n if not self.isSupportedData(data, info):\n return None\n views = [(v.getCachedDataPriority(data, info), v) for v in self.__views.keys()]\n views = filter(lambda t: t[0] > DataView.UNSUPPORTED, views)\n views = sorted(views, key=lambda t: t[0], reverse=True)\n\n if len(views) == 0:\n return None\n elif views[0][0] == DataView.UNSUPPORTED:\n return None\n else:\n return views[0][1]",
"def getPriority(self):",
"def get_io_priority(self):\n\t\treturn call_sdk_function('PrlVmCfg_GetIoPriority', self.handle)",
"def get_priority(self):\n priorities = dict(PRIORITY_CHOICES)\n return priorities.get(self.priority, \"N/A\")",
"def getMatchingViews(self, data, info):\n if not self.isSupportedData(data, info):\n return []\n views = [v for v in self.__views if v.getCachedDataPriority(data, info) != DataView.UNSUPPORTED]\n return views",
"def priority(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"priority\")",
"def priority(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"priority\")",
"def priority(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"priority\")",
"def priority(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"priority\")",
"def effective_priority (self):\n return self.priority if self.match.is_wildcarded else (1<<16) + 1",
"def priority(self) -> int:\n return pulumi.get(self, \"priority\")",
"def view_human_priority(unused1, unused2, model, unused3):\n del unused1, unused2, unused3\n return Markup(u\"%s\" % (model.priority_human)) if model else u\"\"",
"def get_priority(self):\n return self.options[\"priority\"]",
"def priority(self):\n return self._pri",
"def priority(self) -> pulumi.Output[Optional[int]]:\n return pulumi.get(self, \"priority\")",
"def priority(self) -> pulumi.Output[Optional[int]]:\n return pulumi.get(self, \"priority\")",
"def get_priority(self):\n return self.options['priority']",
"def _get_fabric_priority(self):\n return self.__fabric_priority",
"def _get_fabric_priority(self):\n return self.__fabric_priority",
"def _get_fabric_priority(self):\n return self.__fabric_priority",
"def _get_fabric_priority(self):\n return self.__fabric_priority",
"def _get_fabric_priority(self):\n return self.__fabric_priority",
"def _get_fabric_priority(self):\n return self.__fabric_priority",
"def get_priority(self) -> str:\n if self.health >= 75 and self.food >= 75 and self.water >= 75:\n if min(self.food, self.water) == self.food:\n return 'food'\n else:\n return 'water'\n else:\n if self.food >= 75 and self.water >= 75:\n return 'monster'\n else:\n return 'food'",
"def vm_priority(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"vm_priority\")",
"def getPriority(self):\n return self.priority",
"def _get_priority(self):\n return self.__priority",
"def _get_priority(self):\n return self.__priority",
"def _get_priority(self):\n return self.__priority",
"def _get_priority(self):\n return self.__priority"
]
| [
"0.73111045",
"0.5799851",
"0.56085545",
"0.5550155",
"0.5519779",
"0.54540956",
"0.54540956",
"0.54540956",
"0.54540956",
"0.5422215",
"0.53575385",
"0.53053236",
"0.5287806",
"0.52869827",
"0.5281557",
"0.5281557",
"0.52641714",
"0.525869",
"0.525869",
"0.525869",
"0.525869",
"0.525869",
"0.525869",
"0.52505094",
"0.5249749",
"0.52232295",
"0.5178616",
"0.5178616",
"0.5178616",
"0.5178616"
]
| 0.7323587 | 0 |
Returns all views that can be reachable at on point. This method return any sub view provided (recursivly). | def getReachableViews(self):
raise NotImplementedError() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def getReachableViews(self):\n return [self]",
"def views(self):\n return self._views",
"def getViews(self):\n raise NotImplementedError()",
"def getViews(self):\n return list(self.__views)",
"def child_views(self):\n return self.children",
"def getViews(self):\n return list(self.__views.keys())",
"def get_view_endpoints(self):\n return []",
"def get_active_views_paths(window):\n visible_views = []\n\n # Priority for the active view\n active_view = window.active_view()\n visible_views.append(active_view.file_name())\n\n num_groups = window.num_groups()\n for group_id in range(num_groups):\n view = window.active_view_in_group(group_id)\n if view != active_view and view.file_name():\n visible_views.append(view.file_name())\n\n return visible_views",
"def page_views(self, *args, **kwargs):\r\n return self._get('PageViews', *args, **kwargs)",
"def getViews(read):\n ...",
"def get_requested_views(self,request,returnformat):\n relview_key = getattr(self,'relview_key','relview')\n reqviews = request.query_params.get(relview_key)\n if reqviews is None:\n # if no relview is passed in query_params then fetch\n # from its attribute relview which is set in urls.py\n if self.relview is not None:\n reqviews = self.relview\n # if related views are not fetched either through query_params\n # or in urls.py and is not jsonrequest then fetch all views\n if reqviews is None: #and returnformat != 'json':\n reqviews = 'all'\n if self.jointrel is not None:\n reqviews = reqviews+','+self.jointrel\n #import pdb; pdb.set_trace()\n if reqviews:\n reqviews = reqviews.split(',')\n relviews = self.related_views.keys()\n include = []\n exclude = []\n for reqview in reqviews:\n if reqview == 'all':\n include += relviews\n elif reqview[0] == '-':\n exclude.append(reqview[1:])\n elif reqview not in include:\n include.append(reqview)\n reqviews = subtractlists(include,exclude)\n return reqviews",
"def other_views(cls):\n return cls.__other_views",
"def get_all_ancestors(self, view_dict):\n result = []\n parent_id = self.__safe_dict_get(view_dict, 'parent', -1)\n if 0 <= parent_id < len(self.views):\n result.append(parent_id)\n result += self.get_all_ancestors(self.views[parent_id])\n return result",
"def getMatchingViews(self, data, info):\n raise NotImplementedError()",
"def get_views(cohesity_client):\n views = cohesity_client.views.get_views().views\n views_list = views if views else []\n for view in views_list:\n exported_res_dict[\"Protection Views\"].append(view.name)\n return views_list",
"def get_views(self):\n return self._get_types_from_default_ns(View)",
"def get_requested_views(self,request,returnformat):\n if isinstance(request,Request):\n relview_key = getattr(self,'relview_key','relview')\n reqviews = request.query_params.get(relview_key)\n if reqviews is None:\n # if no relview is passed in query_params then fetch\n # from its attribute relview which is set in urls.py\n if self.relview is not None:\n reqviews = self.relview\n # if related views are not fetched either through query_params\n # or in urls.py and is not jsonrequest then fetch all views\n if reqviews is None: #and returnformat != 'json':\n tabmap = getattr(self,'tabmap',None)\n tabkey = getattr(self,'tab_key','tab')\n if tabmap:\n currenttab = request.query_params.get(tabkey)\n if not currenttab:\n if self.defaulttab is not None:\n currenttab = self.defaulttab\n\n requestedtab = tabmap.get(currenttab,self.defaulttab)\n if currenttab and requestedtab:\n reqviews = map(lambda x: x.strip(),requestedtab.strip(',').split(','))\n self._currenttab = currenttab\n if reqviews:\n return reqviews\n return super(TabAPIView,self).get_requested_views(request,returnformat)",
"def view_roots(r):\r\n owner = r.owner\r\n if owner is not None:\r\n try:\r\n view_map = owner.op.view_map\r\n view_map = dict([(owner.outputs[o], i) for o, i in view_map.items()])\r\n except AttributeError:\r\n return [r]\r\n if r in view_map:\r\n answer = []\r\n for i in view_map[r]:\r\n answer += view_roots(owner.inputs[i])\r\n return answer\r\n else:\r\n return [r]\r\n else:\r\n return [r]",
"def getMatchingViews(self, data, info):\n if not self.isSupportedData(data, info):\n return []\n views = [v for v in self.__views if v.getCachedDataPriority(data, info) != DataView.UNSUPPORTED]\n return views",
"def children(self):\r\n return self.location_set.filter(hidden=False)",
"def get_views(self):\n query = mssqlqueries.get_views()\n logger.info(u'Views query: %s', query)\n for tabular_result in self.execute_query(query):\n for row in tabular_result[0]:\n yield (row[0], row[1])",
"def getSnippits(self):\n snippits = []\n for viewName in self.view:\n view = self.getRenderableView(viewName)\n if view:\n view = view.__of__(self)\n snippit = view.render()\n \n root = etree.HTML(snippit)\n contentRoot = root.xpath(\"//*[@id='content']\")\n if len(contentRoot):\n root = contentRoot[0]\n snippits.append(etree.tostring(root, method=\"html\"))\n \n return snippits",
"def views(self):\r\n return resources.Views(self)",
"def reachable_province(self, ctx):\n return self.reachable_tiles(ctx)",
"def related_view_filter():\n pass",
"def __view(self, top, view):\n\t\tresult = []\n\t\tdepth = -1\n\t\tfor seeking in reversed(view):\n\t\t\twhile depth > seeking:\n\t\t\t\tdepth -= 1\n\t\t\t\ttop = top[self.NODE_PRIOR]\n\t\t\tresult.append(top[self.NODE_SEMANTIC])\n\t\tresult.reverse()\n\t\treturn result",
"def trait_views ( self, klass = None ):\n return self.__class__.__dict__[ ViewTraits ].filter_by( klass )",
"def getMatchingViews(self, data, info):\n priority = self.getCachedDataPriority(data, info)\n if priority == DataView.UNSUPPORTED:\n return []\n return [self]",
"def get_views(view_args, model_type):\n # XXX Why pop?\n metadata = view_args.pop('metadata')\n all_view = metadata.get('views', {}).get('all')\n if not all_view:\n all_view= '%s/all'%model_type\n all_count_view = metadata.get('views', {}).get('all_count')\n if not all_count_view:\n all_count_view= '%s/all_count'%model_type\n return all_view, all_count_view",
"def get_all_children(self, view_dict):\n children = self.__safe_dict_get(view_dict, 'children')\n if not children:\n return set()\n children = set(children)\n for child in children:\n children_of_child = self.get_all_children(self.views[child])\n children.union(children_of_child)\n return children"
]
| [
"0.7994752",
"0.65861917",
"0.6485654",
"0.6417944",
"0.6358205",
"0.6335218",
"0.6190338",
"0.59789574",
"0.58850986",
"0.5878531",
"0.58777565",
"0.58640367",
"0.585607",
"0.5808636",
"0.57850796",
"0.57698834",
"0.56750935",
"0.55629796",
"0.54616225",
"0.54540443",
"0.5427827",
"0.54025817",
"0.5398855",
"0.53790003",
"0.53765094",
"0.5365123",
"0.5340861",
"0.5307898",
"0.5276642",
"0.52236485"
]
| 0.7903768 | 1 |
Returns sub views matching this data and info. This method return any sub view provided (recursivly). | def getMatchingViews(self, data, info):
raise NotImplementedError() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def getMatchingViews(self, data, info):\n priority = self.getCachedDataPriority(data, info)\n if priority == DataView.UNSUPPORTED:\n return []\n return [self]",
"def child_views(self):\n return self.children",
"def getViews(self):\n raise NotImplementedError()",
"def getViews(read):\n ...",
"def getMatchingViews(self, data, info):\n if not self.isSupportedData(data, info):\n return []\n views = [v for v in self.__views if v.getCachedDataPriority(data, info) != DataView.UNSUPPORTED]\n return views",
"def getReachableViews(self):\n return [self]",
"def views(self):\n return self._views",
"def get_view(self):\n for w in self.child_widgets():\n return w",
"def getReachableViews(self):\n raise NotImplementedError()",
"def activeView(self):\r\n subWin = self.parentWidget().activeSubWindow()\r\n \r\n if subWin:\r\n for child in subWin.children(): \r\n if 'view' in child.objectName(): # Grab the View from the active tab/sub-window\r\n return child",
"def other_views(cls):\n return cls.__other_views",
"def subsections(self):\n return self.children()",
"def get_views(self):\n query = mssqlqueries.get_views()\n logger.info(u'Views query: %s', query)\n for tabular_result in self.execute_query(query):\n for row in tabular_result[0]:\n yield (row[0], row[1])",
"def getViews(self):\n return list(self.__views)",
"def views(self):\r\n return Views(self)",
"def page_views(self, *args, **kwargs):\r\n return self._get('PageViews', *args, **kwargs)",
"def trait_views ( self, klass = None ):\n return self.__class__.__dict__[ ViewTraits ].filter_by( klass )",
"def get_views(view_args, model_type):\n # XXX Why pop?\n metadata = view_args.pop('metadata')\n all_view = metadata.get('views', {}).get('all')\n if not all_view:\n all_view= '%s/all'%model_type\n all_count_view = metadata.get('views', {}).get('all_count')\n if not all_count_view:\n all_count_view= '%s/all_count'%model_type\n return all_view, all_count_view",
"def views(self):\r\n return resources.Views(self)",
"def get_requested_views(self,request,returnformat):\n if isinstance(request,Request):\n relview_key = getattr(self,'relview_key','relview')\n reqviews = request.query_params.get(relview_key)\n if reqviews is None:\n # if no relview is passed in query_params then fetch\n # from its attribute relview which is set in urls.py\n if self.relview is not None:\n reqviews = self.relview\n # if related views are not fetched either through query_params\n # or in urls.py and is not jsonrequest then fetch all views\n if reqviews is None: #and returnformat != 'json':\n tabmap = getattr(self,'tabmap',None)\n tabkey = getattr(self,'tab_key','tab')\n if tabmap:\n currenttab = request.query_params.get(tabkey)\n if not currenttab:\n if self.defaulttab is not None:\n currenttab = self.defaulttab\n\n requestedtab = tabmap.get(currenttab,self.defaulttab)\n if currenttab and requestedtab:\n reqviews = map(lambda x: x.strip(),requestedtab.strip(',').split(','))\n self._currenttab = currenttab\n if reqviews:\n return reqviews\n return super(TabAPIView,self).get_requested_views(request,returnformat)",
"def __getView(self, raw_view_id):\n if iDevice.dump_view:\n self.__dumpview()\n id_RE = re.compile(\"^(id/\\D+)\\((\\S+)\\)$\")\n if DEBUG:\n printLog(self.threadName + \"[__getView] raw view id:%s\" % raw_view_id)\n if id_RE.match(raw_view_id):\n # search the child by sequence path\n viewId, seq_string = id_RE.search(raw_view_id).groups()\n if DEBUG:\n printLog(self.threadName + \"[__getView] view id:%s, seq:%s\" % (viewId, seq_string))\n seqs = seq_string.split(',')\n tv = self.__getChildView(viewId, seqs)\n else:\n # search with the given id directly\n if DEBUG:\n printLog(self.threadName + \"finding view by id %s ...\" % raw_view_id, logging.DEBUG)\n tv = self.vc.findViewById(raw_view_id)\n # if tv:\n # printLog('Found view %s.' % raw_view_id, logging.DEBUG)\n # self.resultFlag = True\n # else:\n # printLog('Target view %s not found.' % raw_view_id, logging.ERROR)\n # self.resultFlag = False\n\n return tv",
"def getViews(self):\n return list(self.__views.keys())",
"def get_view(self, path):\n _view, params = None, {}\n for route, view in self._routes.items():\n match = route.match(path)\n if not match:\n continue\n if match and isinstance(match, dict): # Means route have parameters\n _view, params = view, match\n else:\n _view = view\n break\n return _view, params",
"def get_views(self, path, year=None, month=None, day=None, hour=None):\n return self._telegraph.method('getViews', path=path, values={\n 'year': year,\n 'month': month,\n 'day': day,\n 'hour': hour\n })",
"def viewVocab(self): \n mapping = []\n views = registration.getViews(IBrowserRequest)\n for view in views:\n if view.name and self.getRenderableView(view.name):\n mapping.append((view.name, view.name))\n return atapi.DisplayList(mapping)",
"def get_requested_views(self,request,returnformat):\n relview_key = getattr(self,'relview_key','relview')\n reqviews = request.query_params.get(relview_key)\n if reqviews is None:\n # if no relview is passed in query_params then fetch\n # from its attribute relview which is set in urls.py\n if self.relview is not None:\n reqviews = self.relview\n # if related views are not fetched either through query_params\n # or in urls.py and is not jsonrequest then fetch all views\n if reqviews is None: #and returnformat != 'json':\n reqviews = 'all'\n if self.jointrel is not None:\n reqviews = reqviews+','+self.jointrel\n #import pdb; pdb.set_trace()\n if reqviews:\n reqviews = reqviews.split(',')\n relviews = self.related_views.keys()\n include = []\n exclude = []\n for reqview in reqviews:\n if reqview == 'all':\n include += relviews\n elif reqview[0] == '-':\n exclude.append(reqview[1:])\n elif reqview not in include:\n include.append(reqview)\n reqviews = subtractlists(include,exclude)\n return reqviews",
"def data():\n return app_views",
"def subresources(self):\n return self._get_related_resources(True)",
"def _get_view(self, cursor):\n raise NotImplementedError",
"def __getChildView(self, parentId, childSeq):\n # child_view = None\n # str_getChildView = \"self.vc.findViewById('\" + parentId + \"')\"\n # for index in childSeq:\n # str_getChildView += ('.children[' + str(index) + ']')\n # printLog(self.threadName + \"executing child_view=%s\" % str_getChildView)\n # exec 'child_view=' + str_getChildView\n # return child_view\n pv = self.vc.findViewById(parentId)\n if not pv:\n # printLog(self.threadName + '[__getChildView] could not find parent view %s' % parentId, logging.DEBUG)\n return None\n for index in childSeq:\n if DEBUG:\n printLog(self.threadName + '[__getChildView] searching child view: %s[%s]' % (pv.getId(), index),\n logging.DEBUG)\n cv = pv.children[int(index)]\n if cv:\n if DEBUG:\n printLog(self.threadName + '[__getChildView] found child view: %s' % cv.getId(), logging.DEBUG)\n pv = cv\n else:\n # printLog(self.threadName + '[__getChildView] could not find child of %s' % pv.getId(), logging.DEBUG)\n return None\n return pv"
]
| [
"0.66623676",
"0.6509976",
"0.62610114",
"0.60390395",
"0.60192496",
"0.58090764",
"0.58028334",
"0.5798752",
"0.56958425",
"0.56136817",
"0.55903375",
"0.5563396",
"0.5545881",
"0.55402297",
"0.5515465",
"0.5467731",
"0.54673463",
"0.54271245",
"0.5401829",
"0.53947186",
"0.5365444",
"0.52900463",
"0.52760786",
"0.52379996",
"0.5227296",
"0.5202974",
"0.51645195",
"0.51401776",
"0.5098328",
"0.50890964"
]
| 0.7676684 | 0 |
Add a new dataview to the available list. | def addView(self, dataView):
hooks = self.getHooks()
if hooks is not None:
dataView.setHooks(hooks)
self.__views[dataView] = None | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def addView(self, dataView):\n hooks = self.getHooks()\n if hooks is not None:\n dataView.setHooks(hooks)\n self.__views.append(dataView)",
"def add_view(self, *args, **kwargs):\n return self._resources_manager.add_view(*args, **kwargs)",
"def add_view_step(self, view_step):\n self._data_dict[self.KEY_VIEW_STEPS].append(view_step)",
"def addViewToDb(self,name):\n\t\tsql = \"INSERT INTO hudson_views(viewname) VALUES (%s)\"\n\t\tcsr = self.db.cursor()\n\t\tcsr.execute(sql,[name])",
"def DoAdd(self,event):\r\n newItem = self.data.add()\r\n if newItem and newItem not in self.items:\r\n self.items = self.data.getItemList()\r\n index = self.items.index(newItem)\r\n self.list.InsertItems([newItem],index)",
"def add_item(self) -> None:\n item = self.get_selected_item(self.tree_cache)\n if item is None:\n return\n\n text, ok = QInputDialog.getText(self, \"Appending new data\", \"Data:\")\n if ok:\n parent_id = item.data().get_id()\n data = Data(text, parent_id)\n data_node = DataNode(instance=data)\n self.data_cache.append(data_node)\n self._data_controller.update_node_hierarchy(self.data_cache, remove_from_list=True)\n self.sync_tree_cache()",
"def add_view(self, view):\n # Add to views\n self._views.append(view)\n\n # If app was provided in constructor, register view with Flask app\n if self.app is not None:\n self.app.register_blueprint(view.create_blueprint(self))\n if view.is_menu:\n self._add_view_to_menu(view)",
"def add_new_item(self):\n self.recs += 1\n self.add_rec_fields(self.recs)\n vbar = self.scrl.verticalScrollBar()\n vbar.setMaximum(vbar.maximum() + 36)\n vbar.setValue(vbar.maximum())",
"def add_view( *args, **kwargs ):",
"def _recreate_dvl_data(self):\n if self._dvl_data is not None: #destroy the old control object if it had been created\n self._dvl_data.Destroy()\n self._dvl_columns = []\n \n self._dvl_data = wx.dataview.DataViewListCtrl(self, wx.ID_ANY) #make a new control object\n self._gbs_main.Add(self._dvl_data, wx.GBPosition(0, 0), wx.GBSpan(3, 1), wx.ALL | wx.EXPAND) #add the new control to the sizer\n self.Layout() #update the layout of the window to account for the new control",
"def view_add(\n request: HttpRequest,\n workflow: Optional[Workflow] = None,\n) -> JsonResponse:\n # Get the workflow element\n if workflow.nrows == 0:\n messages.error(\n request,\n _('Cannot add a view to a workflow without data'))\n return JsonResponse({'html_redirect': ''})\n\n # Form to read/process data\n form = ViewAddForm(request.POST or None, workflow=workflow)\n\n return save_view_form(\n request,\n form,\n 'table/includes/partial_view_add.html',\n )",
"def AddSlice(self, data_slice):\n self.slices.append(data_slice)",
"def view_list(self, view_list):\n\n self._view_list = view_list",
"def addDataPoints(self):\n pass",
"def append(self, data):\n self.data_list.append(data)",
"def _add_view_to_menu(self, view):\n self._add_menu_item(MenuView(view.name, view), view.category)",
"def _create_or_alter_view(self, survey_data):\n self.log.info(\"Creating or altering view vw_AllSurveyData \")\n edit_view = self._get_query('edit_view') + \"( \" + survey_data + \" )\"\n self.db.execute_query(edit_view)\n self.log.info(\"View was edited successfully\")",
"def on_add_clicked(self):\n selected_indexes = self.ui.availListView.selectedIndexes()\n for index in selected_indexes:\n row = self.availModel.itemFromIndex(index).row()\n #rowList = self.availModel.takeRow(row)\n student = self.availModel.item(row, 0).text()\n sid = self.availModel.item(row, 1).text()\n try:\n # Actually add the student for the date into the database\n self.db.student_attend(sid, self.date_string)\n except KeyError:\n # Display error window if student missing\n err_msg = QtGui.QErrorMessage()\n err_msg.showMessage(\"Sid not found for student %s\" % student)\n\n self.update_views()",
"def _add_view(self, window, view):\r\n\r\n # If no 'relative_to' is specified then the view is positioned\r\n # relative to the editor area.\r\n if len(view.relative_to) > 0:\r\n relative_to = window.get_view_by_id(view.relative_to)\r\n \r\n else:\r\n relative_to = None\r\n\r\n # Add the view to the window.\r\n window.add_view(\r\n view, view.position, relative_to, (view.width, view.height)\r\n )\r\n\r\n return",
"def add_data(self, new_data, *args):\n raise NotImplementedError",
"def add_tree_view(self):\n self.data_view = QTreeView()\n self.data_view.setRootIsDecorated(False)\n self.data_view.setAlternatingRowColors(True)\n self.mbox.addWidget(self.data_view)\n\n self.data_layout = QHBoxLayout()\n self.data_layout.addWidget(self.data_view)\n\n self.model = self.create_track_model(self)\n self.data_view.setModel(self.model)",
"def add(self, value: object) -> None:\n self.da.append(value)",
"def add(self, value: object) -> None:\n self.da.append(value)",
"def add_data(self, data, data_label):\n old_data_len = len(self.data_collection)\n\n # Include the data in the data collection\n data_label = data_label or \"New Data\"\n self.data_collection[data_label] = data\n\n # Send out a toast message\n snackbar_message = SnackbarMessage(\n f\"Data '{data_label}' successfully added.\", sender=self)\n self.hub.broadcast(snackbar_message)",
"def add_views(self, *args):\n for view in args:\n self.add_view(view)",
"def add_data(self, df):\n # TODO: improve merging code\n self.data = self.data.append(df, ignore_index=False)\n self.data = self.data[~self.data.index.duplicated(keep='first')]",
"def add_data(self, data: List[dict]):\n raise NotImplementedError()",
"def add_data_to_viewer(self, viewer_reference, data_label,\n clear_other_data=False):\n viewer_item = self._viewer_item_by_reference(viewer_reference)\n data_id = self._data_id_from_label(data_label)\n\n data_ids = viewer_item['selected_data_items'] \\\n if not clear_other_data else []\n\n if data_id is not None:\n data_ids.append(data_id)\n self._update_selected_data_items(viewer_item['id'], data_ids)\n else:\n raise ValueError(\n f\"No data item found with label '{data_label}'. Label must be one \"\n f\"of:\\n\\t\" + f\"\\n\\t\".join([\n data_item['name'] for data_item in self.state.data_items]))",
"def _on_data_added(self, msg):\n self._link_new_data()\n data_item = self._create_data_item(msg.data.label)\n self.state.data_items.append(data_item)",
"def _add_level_to_view(self, level):\n key = Level.key(self.sorting)(level)\n index = bisect.bisect(self.view_keys, key)\n self.view_keys[index:index] = [key]\n\n # If sorting is reversed, the key list and view are in different orders\n if(self.sorting & Sorting.Reversed):\n index = len(self.view_list) - index\n\n\n self.list_lock.acquire()\n\n self.beginInsertRows(QModelIndex(), index, index)\n self.view_list[index:index] = [level]\n\n self.endInsertRows()\n\n self.list_lock.release()"
]
| [
"0.7770298",
"0.62751746",
"0.6245295",
"0.61709106",
"0.6146605",
"0.6143366",
"0.6009386",
"0.59872663",
"0.5966884",
"0.5899624",
"0.5888188",
"0.5862685",
"0.58360523",
"0.574251",
"0.5739594",
"0.5723594",
"0.5711219",
"0.5705367",
"0.5701599",
"0.567094",
"0.5650077",
"0.56216997",
"0.56216997",
"0.55986136",
"0.55810106",
"0.55407023",
"0.55243397",
"0.5508145",
"0.5485765",
"0.54666734"
]
| 0.6986231 | 1 |
Returns the list of registered views | def getViews(self):
return list(self.__views.keys()) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def getViews(self):\n return list(self.__views)",
"def views(self):\n return self._views",
"def get_views(self):\n return self._get_types_from_default_ns(View)",
"def getViews(self):\n raise NotImplementedError()",
"def get_views(cohesity_client):\n views = cohesity_client.views.get_views().views\n views_list = views if views else []\n for view in views_list:\n exported_res_dict[\"Protection Views\"].append(view.name)\n return views_list",
"def views(self):\r\n return resources.Views(self)",
"def get_view_endpoints(self):\n return []",
"def getViewListFromDB(self):\n\t\tsql = \"SELECT viewname from hudson_views\"\n\t\tcsr = self.db.cursor()\n\t\tcsr.execute(sql)\n\t\tdata = [ x[0] for x in csr.fetchall() ]\n\t\treturn data",
"def list_views(self, repo):\n return sorted(self.user_con.list_views(repo=repo))",
"def getViews(read):\n ...",
"def getReachableViews(self):\n return [self]",
"def viewVocab(self): \n mapping = []\n views = registration.getViews(IBrowserRequest)\n for view in views:\n if view.name and self.getRenderableView(view.name):\n mapping.append((view.name, view.name))\n return atapi.DisplayList(mapping)",
"def page_views(self, *args, **kwargs):\r\n return self._get('PageViews', *args, **kwargs)",
"def get_view_plugins():\n from pyjen.view import View\n retval = []\n for plugin in get_plugins():\n if issubclass(plugin, View):\n retval.append(plugin)\n\n return retval",
"def get_views(self):\n query = mssqlqueries.get_views()\n logger.info(u'Views query: %s', query)\n for tabular_result in self.execute_query(query):\n for row in tabular_result[0]:\n yield (row[0], row[1])",
"def views(self):\r\n return Views(self)",
"def getReachableViews(self):\n raise NotImplementedError()",
"def list(self, request):\n\n viewset_list = [\n 'User\\'s action (list,create,retrieve ,update , partial_update)',\n 'Automatically maps to the urls using Routers.',\n 'Provides more functionality with less code.',\n ]\n\n return Response({'message':'Hello From ViewSet' , 'viewset':viewset_list})",
"def data():\n return app_views",
"def trait_views ( self, klass = None ):\n return self.__class__.__dict__[ ViewTraits ].filter_by( klass )",
"def list(request):\n return EntryView.__index(request)",
"def get_active_views_paths(window):\n visible_views = []\n\n # Priority for the active view\n active_view = window.active_view()\n visible_views.append(active_view.file_name())\n\n num_groups = window.num_groups()\n for group_id in range(num_groups):\n view = window.active_view_in_group(group_id)\n if view != active_view and view.file_name():\n visible_views.append(view.file_name())\n\n return visible_views",
"def views(self, views):\n\n self._views = views",
"def local_views():\n\tpass",
"def reload_blueprints():\n\t\tmod = lambda view: importlib.import_module('%s.%s.views' % (root, view))\n\t\treturn [getattr(mod(view), view) for view in app.config['LIVE']]",
"def list(default_view):\n ListCommandExecutor(default_view).list()",
"def createViews(views):\n ...",
"def get_views(view_args, model_type):\n # XXX Why pop?\n metadata = view_args.pop('metadata')\n all_view = metadata.get('views', {}).get('all')\n if not all_view:\n all_view= '%s/all'%model_type\n all_count_view = metadata.get('views', {}).get('all_count')\n if not all_count_view:\n all_count_view= '%s/all_count'%model_type\n return all_view, all_count_view",
"def register_views(app: Application, base: str):\n cors = aiohttp_cors.setup(app)\n\n for view in views:\n logger.info(\"Registered %s at %s\", view.__name__, base + view.url)\n view.register_route(app, base)\n view.enable_cors(cors)",
"def get_authenticators_for_view(self, view_name):\n pass"
]
| [
"0.80495256",
"0.7825242",
"0.7322528",
"0.71818465",
"0.70918703",
"0.7040627",
"0.7036368",
"0.70254064",
"0.6989426",
"0.6974881",
"0.6926255",
"0.69062054",
"0.6688032",
"0.6612111",
"0.64889175",
"0.63672256",
"0.63298315",
"0.6300513",
"0.62238866",
"0.61698407",
"0.6074536",
"0.60400707",
"0.6014029",
"0.6011854",
"0.5980789",
"0.5975432",
"0.59713465",
"0.59683883",
"0.595023",
"0.5933417"
]
| 0.8089164 | 0 |
Returns the best view according to priorities. | def __getBestView(self, data, info):
if not self.isSupportedData(data, info):
return None
views = [(v.getCachedDataPriority(data, info), v) for v in self.__views.keys()]
views = filter(lambda t: t[0] > DataView.UNSUPPORTED, views)
views = sorted(views, key=lambda t: t[0], reverse=True)
if len(views) == 0:
return None
elif views[0][0] == DataView.UNSUPPORTED:
return None
else:
return views[0][1] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_best_candidate(self):\n if not self.scores:\n return None\n return self.te_list[self.scores.index(max(self.scores))]",
"def get_best_known_model(self) -> Tuple[Optional[Path], int]:\n return self._get_first_model(sort='total_score', desc=False)",
"def get_best_solution(self):\n if not self.tours:\n raise Exception('No solution has been computed yet')\n scores = {s:get_cost(self.tours[s],self) for s in self.tours}\n best = min(scores,key=scores.get)\n print('The best solution is given by {} with score {}'.format(best,scores[best]))\n return self.tours[best]",
"def _choose_best_option(self):",
"def get_top_view():\n topView = RedisHelper.get_cache(KEY_TOP_VIEW)\n if RedisHelper.is_cache_exist(KEY_TOP_VIEW) is False:\n topView = list(Comment.objects.raw(SQL_VIEW_TOP))\n RedisHelper.create_cache(KEY_TOP_VIEW, topView, RedisTimeOut.REDIS_TIMEOUT_5_MIN)\n return topView",
"def get_view(self, request) -> Optional[View]:\n\n # Grab ViewAction and use sorted_actions to find first match\n sorted_actions = ViewAction.sorted_actions(self.registry)\n\n # Find the first action which matches the args\n for action, view_class in sorted_actions:\n if action.all_predicates_match(request):\n # Use dependency injection to make an instance of\n # that view class\n view_instance = inject(\n dict(), # props\n self.get_injectables(request),\n view_class,\n request=request\n )\n return view_instance\n\n # No matches, return None\n return None",
"def sort_views_by_relevance(self):\n window = sublime.active_window()\n\n # add the current view is the most relevant\n views = [self.view]\n try:\n # the second most relevant suggestions are from the indexed panels\n for panel_name in panel_state:\n panel = window.find_output_panel(panel_name)\n panel.file_name = lambda v=panel_name: v \n views.append(panel)\n except Exception as e:\n print('No panel', e)\n\n # the last but not least are the open views\n for view in window.views():\n if view is not self.view:\n views.append(view)\n\n return views",
"def get_best(self):\n scores, ids = self.sort_best()\n return scores[1], ids[1]",
"def getBestOption(self):\n if len(self.Data) < 1:\n return None\n else:\n bestR = max(self.Data.items(), key=lambda x: x[1]['SPat'].I)\n return bestR[1]",
"def prioritized_viewers():\n\n viewers = [ep.load() for ep in iter_entry_points(entry_point_group['viewers'])]\n return sorted(viewers, key=operator.attrgetter('priority'), reverse=True)",
"def get_best_model(self):\n return self.best_model",
"def get_best(self, number=None):\n\t\tranking = sorted(self.pictures.items(), key=lambda x: x[1], reverse=True)\n\t\treturn ranking[slice(number)]",
"def find_best_match(organ: Organ, wait_list: WaitList,\n weights: Dict[int, float]) -> Optional[Patient]:\n # ANSI codes to emphasize output\n bold_red, red, reset = '\\033[31;1m', '\\033[31m', '\\033[0m'\n matches = wait_list.get_prioritized_patients(organ)\n\n # returns the patient with the highest priority within acceptable proximity\n while len(matches) != 0:\n patient = heapq._heappop_max(matches) # type: ignore\n if organ.viability >= weights[patient.location] - 10:\n return patient\n\n # in the event there are no matches\n print(f'\\n{bold_red}The following organ has no suitable matches:'\n f'\\n{red}{organ.__str__()}{reset}')\n return None",
"def get_personal_best(self):\n return self._personal_best",
"def _choose_best_trip(self):\n times = [(key, self._trips_dict[key].get_duration()) for key in self._trips_dict.keys()\n if self._trips_dict[key] is not None]\n self._primary_mode = min(times, key=lambda tup: tup[1])[0]",
"def get_highest_priority(self):\n for i in self.query.index.values.tolist():\n if not int(self.query.loc[i,'in_%s'%self.program]):\n pick = self.query.loc[i]\n break\n return pick",
"def best_action(self, observation, sess, weighted=False):\n if weighted:\n return self.weighted_choice(observation, sess)[0]\n else:\n return self.best_choice(observation, sess)[0]",
"def personal_best(scores):\n# return sorted(scores, reverse=True)[0]\n return max(scores)",
"def best_node(self):\n nodes = self._all_nodes()\n sorted_nodes, _ = self.scorer.sort(nodes)\n return sorted_nodes[0]",
"def get_reranker_best(self):\n if len(self.parses):\n return min(self, key=lambda parse: parse.reranker_rank)\n else:\n return None",
"def get_best( self ):\n if len(self.listScore) < 1:\n if self.bMinimumIsBest: return 9999,\"Unknown\"\n else: return -1,\"Unknown\"\n return self.listScore[0]",
"def personal_best(scores):\n return max(scores)",
"def strategy_best(cookies, cps, history, time_left, build_info):\n info = build_info.clone()\n best_choice = None\n best_ratio = 0.0\n choices = info.build_items()\n for item in choices:\n ratio = max_return(cookies, cps, time_left, info.get_cost(item), info.get_cps(item))\n\n if ratio >= best_ratio:\n best_choice = item\n best_ratio = ratio\n print best_ratio\n\n if (time_left * cps + cookies) < info.get_cost(best_choice):\n return None\n\n return best_choice",
"def get_parser_best(self):\n if len(self.parses):\n return min(self, key=lambda parse: parse.parser_rank)\n else:\n return None",
"def _get_lip_best(self) -> float:\n pass",
"def best(self, side):\n return Library.functions.best(self._book, side)",
"def get_best_match(self, list):\n raise NotImplementedError",
"def get_best_known_model(cls, model_dir) -> Tuple[Optional[Path], int]:\n return cls._get_first_model(model_dir, sort='total_score', desc=False)",
"def get_best_worst():\n return make_response(jsonify(storage.ranking), 200)",
"def get_player_best_score(self, player):\n return self.get_highscores().filter(player=player).first()"
]
| [
"0.5956185",
"0.5946167",
"0.5866012",
"0.5723517",
"0.571503",
"0.5705968",
"0.5679877",
"0.5675663",
"0.5662041",
"0.5633913",
"0.55750704",
"0.55725336",
"0.557066",
"0.5537898",
"0.551114",
"0.54992044",
"0.54922616",
"0.54763204",
"0.5463753",
"0.54381096",
"0.54253614",
"0.5403584",
"0.53981024",
"0.538523",
"0.5375283",
"0.53282064",
"0.53230083",
"0.53071576",
"0.52671576",
"0.5254082"
]
| 0.6901257 | 0 |
Replace a data view with a custom view. Return True in case of success, False in case of failure. | def replaceView(self, modeId, newView):
oldView = None
for view in self.__views:
if view.modeId() == modeId:
oldView = view
break
elif isinstance(view, _CompositeDataView):
# recurse
hooks = self.getHooks()
if hooks is not None:
newView.setHooks(hooks)
if view.replaceView(modeId, newView):
return True
if oldView is None:
return False
# replace oldView with new view in dict
self.__views = dict(
(newView, None) if view is oldView else (view, idx) for
view, idx in self.__views.items())
return True | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def replaceView(self, modeId, newView):\n oldView = None\n for iview, view in enumerate(self.__views):\n if view.modeId() == modeId:\n oldView = view\n break\n elif isinstance(view, CompositeDataView):\n # recurse\n hooks = self.getHooks()\n if hooks is not None:\n newView.setHooks(hooks)\n if view.replaceView(modeId, newView):\n return True\n\n if oldView is None:\n return False\n\n # replace oldView with new view in dict\n self.__views[iview] = newView\n return True",
"def _create_or_alter_view(self, survey_data):\n self.log.info(\"Creating or altering view vw_AllSurveyData \")\n edit_view = self._get_query('edit_view') + \"( \" + survey_data + \" )\"\n self.db.execute_query(edit_view)\n self.log.info(\"View was edited successfully\")",
"def addView(self, dataView):\n hooks = self.getHooks()\n if hooks is not None:\n dataView.setHooks(hooks)\n self.__views[dataView] = None",
"def UpdateView(self):\n self.View._viewData = self.Model.ModelViewData",
"def test_migrate_view_fields(self):\n self.test_view = RecordView.create(\n self.testcoll, test_view_id, test_view_create_values\n )\n migrate_coll_data(self.testcoll)\n # Read field definition and check for inline field list\n view_data = self.check_entity_values(\n \"_view\", test_view_id, check_values=test_view_migrated_values\n )\n return",
"def MigrateV2View(view, log):\n newView, warnFlag = {\n View.Administrator: ([qtiv2.core.View.proctor], True),\n View.AdminAuthority: ([qtiv2.core.View.proctor], True),\n View.Assessor: ([qtiv2.core.View.scorer], True),\n View.Author: ([qtiv2.core.View.author], False),\n View.Candidate: ([qtiv2.core.View.candidate], False),\n View.Invigilator: ([qtiv2.core.View.proctor], False),\n View.Proctor: ([qtiv2.core.View.proctor], False),\n View.InvigilatorProctor: ([qtiv2.core.View.proctor], False),\n View.Psychometrician: ([qtiv2.core.View.testConstructor], True),\n View.Scorer: ([qtiv2.core.View.scorer], False),\n View.Tutor: ([qtiv2.core.View.tutor], False),\n View.All: ([\n qtiv2.core.View.author,\n qtiv2.core.View.candidate,\n qtiv2.core.View.proctor,\n qtiv2.core.View.scorer,\n qtiv2.core.View.testConstructor,\n qtiv2.core.View.tutor], False)\n }[view]\n if warnFlag:\n log.append(\"Warning: changing view %s to %s\" % (\n View.to_str(view), qtiv2.core.View.list_to_str(newView)))\n return newView",
"def on_action_set_view(self, content):\n self._view = content['view']\n self.refresh_traits_widget()",
"def update(self, view, show_errors):\n raise NotImplementedError(\"calling abstract method\")",
"def run_view(self, expanded, unexpanded) :\n\t\treturn self.manage_view_properties(expanded, unexpanded, \"\", perms = \"View\")",
"def save(self, *args, **kwargs):\n ret = super(ViewFeatureSerializer, self).save(*args, **kwargs)\n if hasattr(ret, '_view_extra'):\n ret._view_extra.save(*args, **kwargs)\n return ret",
"def addView(self, dataView):\n hooks = self.getHooks()\n if hooks is not None:\n dataView.setHooks(hooks)\n self.__views.append(dataView)",
"def view(self, viewname, **data):\n view = self.mylookup.get_template(viewname + '.mako').render(**data)\n \n self.res.status = 202\n self.res.content_type = 'text/html'\n self.res.content_length = len(view)\n \n self.start_response(self.res.status, self.res.headerlist)\n return view",
"def save_view(self, subject, name, is_overwrite=False):\n db.save_view(self, subject, name, is_overwrite)",
"def convert_view(old_view):\n # Make these field to avoid checking if they exist.\n if not 'Children' in old_view:\n old_view['Children'] = []\n if not 'ViewerRecords' in old_view:\n old_view['ViewerRecords'] = []\n\n # The new list of children\n children = []\n \n # First convert viewer records into the first child.\n # If there is only one viewer record, make it a leaf view.\n if len(old_view['ViewerRecords']) == 1:\n leaf, code = convert_viewer_record(old_view['ViewerRecords'][0])\n if code != 200:\n error_message = \"%s, view:%s\"%(leaf,old_view['_id'])\n return error_message, code\n children.append(leaf)\n if len(old_view['ViewerRecords']) > 1:\n node = {'children': [],\n 'type': 'multi'}\n for vr in old_view['ViewerRecords']:\n leaf, code = convert_viewer_record(vr)\n if code != 200:\n error_message = \"%s, view:%s\"%(leaf,old_view['_id'])\n return leaf, code\n node['children'].append(leaf)\n children.append(node)\n\n # Now add the old children.\n for child in old_view['Children']:\n view, code = convert_view(child)\n if code != 200:\n error_message = \"%s, view:%s\"%(leaf,old_view['_id'])\n return view, code\n children.append(view)\n\n # Flatten views with only one child\n if len(children) == 0:\n error_message = \"No image, view:%s\"%(old_view['_id'])\n return error_message, 404\n if len(children) == 1:\n view = children[0]\n else:\n view = {'children': children}\n \n view['_id'] = str(old_view['_id'])\n if 'Title' in old_view:\n view['title'] = old_view['Title']\n return view, 200",
"def create_or_replace_view(self, relation) -> None:\n database = self.quoted(self._correct_case(relation.database))\n schema = self.quoted(self._correct_case(relation.schema))\n ddl_statement = f\"\"\"CREATE OR REPLACE VIEW\n{self.quoted_dot_notation(relation)}\nAS\n{relation.view_ddl}\n\"\"\"\n engine = self.get_connection(database_override=database,\n schema_override=schema)\n try:\n engine.execute(ddl_statement)\n except Exception as exc:\n logger.info(\"Failed to create %s %s:%s\", relation.materialization.name,\n self.quoted_dot_notation(relation),\n exc)\n raise exc\n logger.info('Created relation %s', self.quoted_dot_notation(relation))",
"def exists_for_view(self, view_id):\n raise NotImplementedError(\"calling abstract method\")",
"def on_query_context(self, view, key, operator, operand, match_all):\n\n okay = False\n if key == 'reg_replace_panel_save' and view.settings().get('reg_replace.edit_view', False):\n okay = True\n elif key == 'reg_replace_panel_test' and view.settings().get('reg_replace.edit_view', False):\n okay = True\n return okay",
"def refresh_view():\n pass",
"def view_edit(\n request: HttpRequest,\n pk: Optional[int] = None,\n workflow: Optional[Workflow] = None,\n view: Optional[View] = None,\n) -> JsonResponse:\n # Form to read/process data\n form = ViewAddForm(request.POST or None, instance=view, workflow=workflow)\n\n return save_view_form(\n request,\n form,\n 'table/includes/partial_view_edit.html')",
"def add_view( *args, **kwargs ):",
"def _set_containable_view(self, session):\n for obj_name in self._containable_views:\n if self._containable_views[obj_name] == SEQUESTERED:\n try:\n getattr(session, 'use_sequestered_' + obj_name + '_view')()\n except AttributeError:\n pass\n else:\n try:\n getattr(session, 'use_unsequestered_' + obj_name + '_view')()\n except AttributeError:\n pass",
"def public(self, view):\n view.public = True\n return view",
"def getMatchingViews(self, data, info):\n raise NotImplementedError()",
"def __call__(self, data: bytes, **metadata) -> TViewResult:\n raise NotImplementedError() # pragma: no cover",
"def is_view(self):\n return self._base is not None",
"def get_custom_view(self):\n view = None\n\n # Try to load a custom view.\n if self.view:\n try:\n view = get_callable(self.view)\n except (ImportError, ViewDoesNotExist):\n pass\n\n return view",
"def set_view(self, view, view_xml_filename):\n if self.view_exists(view):\n command = PlatformJenkinsJavaCLI.UPDATE_VIEW\n else:\n command = PlatformJenkinsJavaCLI.CREATE_VIEW\n\n with open(view_xml_filename) as view_xml_file:\n view_xml = view_xml_file.read()\n\n call = subprocess.Popen(self.cli + [command, view], stdin=subprocess.PIPE)\n call.communicate(view_xml)\n call.wait()",
"def test_url_resolves_view(self):\n view = resolve('/add/')\n self.assertEqual(view.func, views.books_add_view)",
"def replaceWithView(self, fieldname, templateId, macro, uid=None, target=None, edit=False):\n\n ksscore = self.getCommandSet('core')\n\n instance = self._getFieldContext(uid)\n locking = ILockable(instance, None)\n if locking and locking.can_safely_unlock():\n locking.unlock()\n\n html = self.renderViewField(fieldname, templateId, macro, uid)\n html = html.strip()\n\n field_id = target or \"parent-fieldname-%s\" % fieldname\n ksscore.replaceHTML(ksscore.getHtmlIdSelector(field_id), html)\n\n return self.render()",
"def is_db_view(db_table):\n if db_table in postgresql_views:\n return True\n return False"
]
| [
"0.6608374",
"0.63342625",
"0.5802944",
"0.5615293",
"0.55753934",
"0.5501989",
"0.5438444",
"0.5432784",
"0.54150265",
"0.5374852",
"0.53694946",
"0.53612304",
"0.53092",
"0.5178626",
"0.5124789",
"0.50912267",
"0.5088075",
"0.5062455",
"0.504078",
"0.50203586",
"0.50010663",
"0.4989605",
"0.4985734",
"0.4970574",
"0.4960853",
"0.49469328",
"0.49398604",
"0.4937575",
"0.49309927",
"0.49069825"
]
| 0.6661702 | 0 |
Add a new dataview to the available list. | def addView(self, dataView):
hooks = self.getHooks()
if hooks is not None:
dataView.setHooks(hooks)
self.__views.append(dataView) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def addView(self, dataView):\n hooks = self.getHooks()\n if hooks is not None:\n dataView.setHooks(hooks)\n self.__views[dataView] = None",
"def add_view(self, *args, **kwargs):\n return self._resources_manager.add_view(*args, **kwargs)",
"def add_view_step(self, view_step):\n self._data_dict[self.KEY_VIEW_STEPS].append(view_step)",
"def addViewToDb(self,name):\n\t\tsql = \"INSERT INTO hudson_views(viewname) VALUES (%s)\"\n\t\tcsr = self.db.cursor()\n\t\tcsr.execute(sql,[name])",
"def DoAdd(self,event):\r\n newItem = self.data.add()\r\n if newItem and newItem not in self.items:\r\n self.items = self.data.getItemList()\r\n index = self.items.index(newItem)\r\n self.list.InsertItems([newItem],index)",
"def add_item(self) -> None:\n item = self.get_selected_item(self.tree_cache)\n if item is None:\n return\n\n text, ok = QInputDialog.getText(self, \"Appending new data\", \"Data:\")\n if ok:\n parent_id = item.data().get_id()\n data = Data(text, parent_id)\n data_node = DataNode(instance=data)\n self.data_cache.append(data_node)\n self._data_controller.update_node_hierarchy(self.data_cache, remove_from_list=True)\n self.sync_tree_cache()",
"def add_view(self, view):\n # Add to views\n self._views.append(view)\n\n # If app was provided in constructor, register view with Flask app\n if self.app is not None:\n self.app.register_blueprint(view.create_blueprint(self))\n if view.is_menu:\n self._add_view_to_menu(view)",
"def add_new_item(self):\n self.recs += 1\n self.add_rec_fields(self.recs)\n vbar = self.scrl.verticalScrollBar()\n vbar.setMaximum(vbar.maximum() + 36)\n vbar.setValue(vbar.maximum())",
"def add_view( *args, **kwargs ):",
"def _recreate_dvl_data(self):\n if self._dvl_data is not None: #destroy the old control object if it had been created\n self._dvl_data.Destroy()\n self._dvl_columns = []\n \n self._dvl_data = wx.dataview.DataViewListCtrl(self, wx.ID_ANY) #make a new control object\n self._gbs_main.Add(self._dvl_data, wx.GBPosition(0, 0), wx.GBSpan(3, 1), wx.ALL | wx.EXPAND) #add the new control to the sizer\n self.Layout() #update the layout of the window to account for the new control",
"def view_add(\n request: HttpRequest,\n workflow: Optional[Workflow] = None,\n) -> JsonResponse:\n # Get the workflow element\n if workflow.nrows == 0:\n messages.error(\n request,\n _('Cannot add a view to a workflow without data'))\n return JsonResponse({'html_redirect': ''})\n\n # Form to read/process data\n form = ViewAddForm(request.POST or None, workflow=workflow)\n\n return save_view_form(\n request,\n form,\n 'table/includes/partial_view_add.html',\n )",
"def AddSlice(self, data_slice):\n self.slices.append(data_slice)",
"def view_list(self, view_list):\n\n self._view_list = view_list",
"def addDataPoints(self):\n pass",
"def append(self, data):\n self.data_list.append(data)",
"def _add_view_to_menu(self, view):\n self._add_menu_item(MenuView(view.name, view), view.category)",
"def _create_or_alter_view(self, survey_data):\n self.log.info(\"Creating or altering view vw_AllSurveyData \")\n edit_view = self._get_query('edit_view') + \"( \" + survey_data + \" )\"\n self.db.execute_query(edit_view)\n self.log.info(\"View was edited successfully\")",
"def on_add_clicked(self):\n selected_indexes = self.ui.availListView.selectedIndexes()\n for index in selected_indexes:\n row = self.availModel.itemFromIndex(index).row()\n #rowList = self.availModel.takeRow(row)\n student = self.availModel.item(row, 0).text()\n sid = self.availModel.item(row, 1).text()\n try:\n # Actually add the student for the date into the database\n self.db.student_attend(sid, self.date_string)\n except KeyError:\n # Display error window if student missing\n err_msg = QtGui.QErrorMessage()\n err_msg.showMessage(\"Sid not found for student %s\" % student)\n\n self.update_views()",
"def _add_view(self, window, view):\r\n\r\n # If no 'relative_to' is specified then the view is positioned\r\n # relative to the editor area.\r\n if len(view.relative_to) > 0:\r\n relative_to = window.get_view_by_id(view.relative_to)\r\n \r\n else:\r\n relative_to = None\r\n\r\n # Add the view to the window.\r\n window.add_view(\r\n view, view.position, relative_to, (view.width, view.height)\r\n )\r\n\r\n return",
"def add_data(self, new_data, *args):\n raise NotImplementedError",
"def add_tree_view(self):\n self.data_view = QTreeView()\n self.data_view.setRootIsDecorated(False)\n self.data_view.setAlternatingRowColors(True)\n self.mbox.addWidget(self.data_view)\n\n self.data_layout = QHBoxLayout()\n self.data_layout.addWidget(self.data_view)\n\n self.model = self.create_track_model(self)\n self.data_view.setModel(self.model)",
"def add(self, value: object) -> None:\n self.da.append(value)",
"def add(self, value: object) -> None:\n self.da.append(value)",
"def add_data(self, data, data_label):\n old_data_len = len(self.data_collection)\n\n # Include the data in the data collection\n data_label = data_label or \"New Data\"\n self.data_collection[data_label] = data\n\n # Send out a toast message\n snackbar_message = SnackbarMessage(\n f\"Data '{data_label}' successfully added.\", sender=self)\n self.hub.broadcast(snackbar_message)",
"def add_views(self, *args):\n for view in args:\n self.add_view(view)",
"def add_data(self, df):\n # TODO: improve merging code\n self.data = self.data.append(df, ignore_index=False)\n self.data = self.data[~self.data.index.duplicated(keep='first')]",
"def add_data(self, data: List[dict]):\n raise NotImplementedError()",
"def add_data_to_viewer(self, viewer_reference, data_label,\n clear_other_data=False):\n viewer_item = self._viewer_item_by_reference(viewer_reference)\n data_id = self._data_id_from_label(data_label)\n\n data_ids = viewer_item['selected_data_items'] \\\n if not clear_other_data else []\n\n if data_id is not None:\n data_ids.append(data_id)\n self._update_selected_data_items(viewer_item['id'], data_ids)\n else:\n raise ValueError(\n f\"No data item found with label '{data_label}'. Label must be one \"\n f\"of:\\n\\t\" + f\"\\n\\t\".join([\n data_item['name'] for data_item in self.state.data_items]))",
"def _on_data_added(self, msg):\n self._link_new_data()\n data_item = self._create_data_item(msg.data.label)\n self.state.data_items.append(data_item)",
"def _add_level_to_view(self, level):\n key = Level.key(self.sorting)(level)\n index = bisect.bisect(self.view_keys, key)\n self.view_keys[index:index] = [key]\n\n # If sorting is reversed, the key list and view are in different orders\n if(self.sorting & Sorting.Reversed):\n index = len(self.view_list) - index\n\n\n self.list_lock.acquire()\n\n self.beginInsertRows(QModelIndex(), index, index)\n self.view_list[index:index] = [level]\n\n self.endInsertRows()\n\n self.list_lock.release()"
]
| [
"0.6986125",
"0.6275191",
"0.6244819",
"0.6171132",
"0.6145084",
"0.6141402",
"0.6009027",
"0.59855765",
"0.59667253",
"0.59002584",
"0.58872473",
"0.58618987",
"0.5837559",
"0.5743151",
"0.5737284",
"0.5722587",
"0.5710407",
"0.57034796",
"0.5701506",
"0.56690824",
"0.5649129",
"0.56208616",
"0.56208616",
"0.55967623",
"0.5581187",
"0.5539313",
"0.55231756",
"0.55068535",
"0.54845715",
"0.546511"
]
| 0.7770105 | 0 |
Returns the list of registered views | def getViews(self):
return list(self.__views) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def getViews(self):\n return list(self.__views.keys())",
"def views(self):\n return self._views",
"def get_views(self):\n return self._get_types_from_default_ns(View)",
"def getViews(self):\n raise NotImplementedError()",
"def get_views(cohesity_client):\n views = cohesity_client.views.get_views().views\n views_list = views if views else []\n for view in views_list:\n exported_res_dict[\"Protection Views\"].append(view.name)\n return views_list",
"def views(self):\r\n return resources.Views(self)",
"def get_view_endpoints(self):\n return []",
"def getViewListFromDB(self):\n\t\tsql = \"SELECT viewname from hudson_views\"\n\t\tcsr = self.db.cursor()\n\t\tcsr.execute(sql)\n\t\tdata = [ x[0] for x in csr.fetchall() ]\n\t\treturn data",
"def list_views(self, repo):\n return sorted(self.user_con.list_views(repo=repo))",
"def getViews(read):\n ...",
"def getReachableViews(self):\n return [self]",
"def viewVocab(self): \n mapping = []\n views = registration.getViews(IBrowserRequest)\n for view in views:\n if view.name and self.getRenderableView(view.name):\n mapping.append((view.name, view.name))\n return atapi.DisplayList(mapping)",
"def page_views(self, *args, **kwargs):\r\n return self._get('PageViews', *args, **kwargs)",
"def get_view_plugins():\n from pyjen.view import View\n retval = []\n for plugin in get_plugins():\n if issubclass(plugin, View):\n retval.append(plugin)\n\n return retval",
"def get_views(self):\n query = mssqlqueries.get_views()\n logger.info(u'Views query: %s', query)\n for tabular_result in self.execute_query(query):\n for row in tabular_result[0]:\n yield (row[0], row[1])",
"def views(self):\r\n return Views(self)",
"def getReachableViews(self):\n raise NotImplementedError()",
"def list(self, request):\n\n viewset_list = [\n 'User\\'s action (list,create,retrieve ,update , partial_update)',\n 'Automatically maps to the urls using Routers.',\n 'Provides more functionality with less code.',\n ]\n\n return Response({'message':'Hello From ViewSet' , 'viewset':viewset_list})",
"def data():\n return app_views",
"def trait_views ( self, klass = None ):\n return self.__class__.__dict__[ ViewTraits ].filter_by( klass )",
"def list(request):\n return EntryView.__index(request)",
"def get_active_views_paths(window):\n visible_views = []\n\n # Priority for the active view\n active_view = window.active_view()\n visible_views.append(active_view.file_name())\n\n num_groups = window.num_groups()\n for group_id in range(num_groups):\n view = window.active_view_in_group(group_id)\n if view != active_view and view.file_name():\n visible_views.append(view.file_name())\n\n return visible_views",
"def views(self, views):\n\n self._views = views",
"def local_views():\n\tpass",
"def reload_blueprints():\n\t\tmod = lambda view: importlib.import_module('%s.%s.views' % (root, view))\n\t\treturn [getattr(mod(view), view) for view in app.config['LIVE']]",
"def list(default_view):\n ListCommandExecutor(default_view).list()",
"def createViews(views):\n ...",
"def get_views(view_args, model_type):\n # XXX Why pop?\n metadata = view_args.pop('metadata')\n all_view = metadata.get('views', {}).get('all')\n if not all_view:\n all_view= '%s/all'%model_type\n all_count_view = metadata.get('views', {}).get('all_count')\n if not all_count_view:\n all_count_view= '%s/all_count'%model_type\n return all_view, all_count_view",
"def register_views(app: Application, base: str):\n cors = aiohttp_cors.setup(app)\n\n for view in views:\n logger.info(\"Registered %s at %s\", view.__name__, base + view.url)\n view.register_route(app, base)\n view.enable_cors(cors)",
"def get_authenticators_for_view(self, view_name):\n pass"
]
| [
"0.8089164",
"0.7825242",
"0.7322528",
"0.71818465",
"0.70918703",
"0.7040627",
"0.7036368",
"0.70254064",
"0.6989426",
"0.6974881",
"0.6926255",
"0.69062054",
"0.6688032",
"0.6612111",
"0.64889175",
"0.63672256",
"0.63298315",
"0.6300513",
"0.62238866",
"0.61698407",
"0.6074536",
"0.60400707",
"0.6014029",
"0.6011854",
"0.5980789",
"0.5975432",
"0.59713465",
"0.59683883",
"0.595023",
"0.5933417"
]
| 0.80495256 | 1 |
Replace a data view with a custom view. Return True in case of success, False in case of failure. | def replaceView(self, modeId, newView):
oldView = None
for iview, view in enumerate(self.__views):
if view.modeId() == modeId:
oldView = view
break
elif isinstance(view, CompositeDataView):
# recurse
hooks = self.getHooks()
if hooks is not None:
newView.setHooks(hooks)
if view.replaceView(modeId, newView):
return True
if oldView is None:
return False
# replace oldView with new view in dict
self.__views[iview] = newView
return True | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def replaceView(self, modeId, newView):\n oldView = None\n for view in self.__views:\n if view.modeId() == modeId:\n oldView = view\n break\n elif isinstance(view, _CompositeDataView):\n # recurse\n hooks = self.getHooks()\n if hooks is not None:\n newView.setHooks(hooks)\n if view.replaceView(modeId, newView):\n return True\n if oldView is None:\n return False\n\n # replace oldView with new view in dict\n self.__views = dict(\n (newView, None) if view is oldView else (view, idx) for\n view, idx in self.__views.items())\n return True",
"def _create_or_alter_view(self, survey_data):\n self.log.info(\"Creating or altering view vw_AllSurveyData \")\n edit_view = self._get_query('edit_view') + \"( \" + survey_data + \" )\"\n self.db.execute_query(edit_view)\n self.log.info(\"View was edited successfully\")",
"def addView(self, dataView):\n hooks = self.getHooks()\n if hooks is not None:\n dataView.setHooks(hooks)\n self.__views[dataView] = None",
"def UpdateView(self):\n self.View._viewData = self.Model.ModelViewData",
"def test_migrate_view_fields(self):\n self.test_view = RecordView.create(\n self.testcoll, test_view_id, test_view_create_values\n )\n migrate_coll_data(self.testcoll)\n # Read field definition and check for inline field list\n view_data = self.check_entity_values(\n \"_view\", test_view_id, check_values=test_view_migrated_values\n )\n return",
"def MigrateV2View(view, log):\n newView, warnFlag = {\n View.Administrator: ([qtiv2.core.View.proctor], True),\n View.AdminAuthority: ([qtiv2.core.View.proctor], True),\n View.Assessor: ([qtiv2.core.View.scorer], True),\n View.Author: ([qtiv2.core.View.author], False),\n View.Candidate: ([qtiv2.core.View.candidate], False),\n View.Invigilator: ([qtiv2.core.View.proctor], False),\n View.Proctor: ([qtiv2.core.View.proctor], False),\n View.InvigilatorProctor: ([qtiv2.core.View.proctor], False),\n View.Psychometrician: ([qtiv2.core.View.testConstructor], True),\n View.Scorer: ([qtiv2.core.View.scorer], False),\n View.Tutor: ([qtiv2.core.View.tutor], False),\n View.All: ([\n qtiv2.core.View.author,\n qtiv2.core.View.candidate,\n qtiv2.core.View.proctor,\n qtiv2.core.View.scorer,\n qtiv2.core.View.testConstructor,\n qtiv2.core.View.tutor], False)\n }[view]\n if warnFlag:\n log.append(\"Warning: changing view %s to %s\" % (\n View.to_str(view), qtiv2.core.View.list_to_str(newView)))\n return newView",
"def on_action_set_view(self, content):\n self._view = content['view']\n self.refresh_traits_widget()",
"def update(self, view, show_errors):\n raise NotImplementedError(\"calling abstract method\")",
"def run_view(self, expanded, unexpanded) :\n\t\treturn self.manage_view_properties(expanded, unexpanded, \"\", perms = \"View\")",
"def save(self, *args, **kwargs):\n ret = super(ViewFeatureSerializer, self).save(*args, **kwargs)\n if hasattr(ret, '_view_extra'):\n ret._view_extra.save(*args, **kwargs)\n return ret",
"def addView(self, dataView):\n hooks = self.getHooks()\n if hooks is not None:\n dataView.setHooks(hooks)\n self.__views.append(dataView)",
"def view(self, viewname, **data):\n view = self.mylookup.get_template(viewname + '.mako').render(**data)\n \n self.res.status = 202\n self.res.content_type = 'text/html'\n self.res.content_length = len(view)\n \n self.start_response(self.res.status, self.res.headerlist)\n return view",
"def save_view(self, subject, name, is_overwrite=False):\n db.save_view(self, subject, name, is_overwrite)",
"def convert_view(old_view):\n # Make these field to avoid checking if they exist.\n if not 'Children' in old_view:\n old_view['Children'] = []\n if not 'ViewerRecords' in old_view:\n old_view['ViewerRecords'] = []\n\n # The new list of children\n children = []\n \n # First convert viewer records into the first child.\n # If there is only one viewer record, make it a leaf view.\n if len(old_view['ViewerRecords']) == 1:\n leaf, code = convert_viewer_record(old_view['ViewerRecords'][0])\n if code != 200:\n error_message = \"%s, view:%s\"%(leaf,old_view['_id'])\n return error_message, code\n children.append(leaf)\n if len(old_view['ViewerRecords']) > 1:\n node = {'children': [],\n 'type': 'multi'}\n for vr in old_view['ViewerRecords']:\n leaf, code = convert_viewer_record(vr)\n if code != 200:\n error_message = \"%s, view:%s\"%(leaf,old_view['_id'])\n return leaf, code\n node['children'].append(leaf)\n children.append(node)\n\n # Now add the old children.\n for child in old_view['Children']:\n view, code = convert_view(child)\n if code != 200:\n error_message = \"%s, view:%s\"%(leaf,old_view['_id'])\n return view, code\n children.append(view)\n\n # Flatten views with only one child\n if len(children) == 0:\n error_message = \"No image, view:%s\"%(old_view['_id'])\n return error_message, 404\n if len(children) == 1:\n view = children[0]\n else:\n view = {'children': children}\n \n view['_id'] = str(old_view['_id'])\n if 'Title' in old_view:\n view['title'] = old_view['Title']\n return view, 200",
"def create_or_replace_view(self, relation) -> None:\n database = self.quoted(self._correct_case(relation.database))\n schema = self.quoted(self._correct_case(relation.schema))\n ddl_statement = f\"\"\"CREATE OR REPLACE VIEW\n{self.quoted_dot_notation(relation)}\nAS\n{relation.view_ddl}\n\"\"\"\n engine = self.get_connection(database_override=database,\n schema_override=schema)\n try:\n engine.execute(ddl_statement)\n except Exception as exc:\n logger.info(\"Failed to create %s %s:%s\", relation.materialization.name,\n self.quoted_dot_notation(relation),\n exc)\n raise exc\n logger.info('Created relation %s', self.quoted_dot_notation(relation))",
"def exists_for_view(self, view_id):\n raise NotImplementedError(\"calling abstract method\")",
"def on_query_context(self, view, key, operator, operand, match_all):\n\n okay = False\n if key == 'reg_replace_panel_save' and view.settings().get('reg_replace.edit_view', False):\n okay = True\n elif key == 'reg_replace_panel_test' and view.settings().get('reg_replace.edit_view', False):\n okay = True\n return okay",
"def refresh_view():\n pass",
"def view_edit(\n request: HttpRequest,\n pk: Optional[int] = None,\n workflow: Optional[Workflow] = None,\n view: Optional[View] = None,\n) -> JsonResponse:\n # Form to read/process data\n form = ViewAddForm(request.POST or None, instance=view, workflow=workflow)\n\n return save_view_form(\n request,\n form,\n 'table/includes/partial_view_edit.html')",
"def add_view( *args, **kwargs ):",
"def _set_containable_view(self, session):\n for obj_name in self._containable_views:\n if self._containable_views[obj_name] == SEQUESTERED:\n try:\n getattr(session, 'use_sequestered_' + obj_name + '_view')()\n except AttributeError:\n pass\n else:\n try:\n getattr(session, 'use_unsequestered_' + obj_name + '_view')()\n except AttributeError:\n pass",
"def public(self, view):\n view.public = True\n return view",
"def getMatchingViews(self, data, info):\n raise NotImplementedError()",
"def __call__(self, data: bytes, **metadata) -> TViewResult:\n raise NotImplementedError() # pragma: no cover",
"def is_view(self):\n return self._base is not None",
"def get_custom_view(self):\n view = None\n\n # Try to load a custom view.\n if self.view:\n try:\n view = get_callable(self.view)\n except (ImportError, ViewDoesNotExist):\n pass\n\n return view",
"def test_url_resolves_view(self):\n view = resolve('/add/')\n self.assertEqual(view.func, views.books_add_view)",
"def set_view(self, view, view_xml_filename):\n if self.view_exists(view):\n command = PlatformJenkinsJavaCLI.UPDATE_VIEW\n else:\n command = PlatformJenkinsJavaCLI.CREATE_VIEW\n\n with open(view_xml_filename) as view_xml_file:\n view_xml = view_xml_file.read()\n\n call = subprocess.Popen(self.cli + [command, view], stdin=subprocess.PIPE)\n call.communicate(view_xml)\n call.wait()",
"def replaceWithView(self, fieldname, templateId, macro, uid=None, target=None, edit=False):\n\n ksscore = self.getCommandSet('core')\n\n instance = self._getFieldContext(uid)\n locking = ILockable(instance, None)\n if locking and locking.can_safely_unlock():\n locking.unlock()\n\n html = self.renderViewField(fieldname, templateId, macro, uid)\n html = html.strip()\n\n field_id = target or \"parent-fieldname-%s\" % fieldname\n ksscore.replaceHTML(ksscore.getHtmlIdSelector(field_id), html)\n\n return self.render()",
"def is_db_view(db_table):\n if db_table in postgresql_views:\n return True\n return False"
]
| [
"0.66609526",
"0.633346",
"0.58019185",
"0.5614997",
"0.557388",
"0.5499673",
"0.5438074",
"0.5433056",
"0.54160523",
"0.5373652",
"0.5368686",
"0.5363025",
"0.5309105",
"0.517758",
"0.51247215",
"0.5092328",
"0.5089086",
"0.50640225",
"0.5039589",
"0.5020905",
"0.50027037",
"0.4990245",
"0.4988321",
"0.49737793",
"0.49642897",
"0.49460977",
"0.49391928",
"0.4938132",
"0.49305582",
"0.4910077"
]
| 0.66075784 | 1 |
Update used colormap according to nxdata's SILX_style | def _updateColormap(self, nxdata):
cmap_norm = nxdata.plot_style.signal_scale_type
if cmap_norm is not None:
self.defaultColormap().setNormalization(
'log' if cmap_norm == 'log' else 'linear') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def changeColor(self):\n self.layer.new_colormap()",
"def color(self, sids=None, sat=1):\n if sids == None: # init/overwrite self.colors\n nids = self.nids\n # uint8, single unit nids are 1-based:\n self.colors = CLUSTERCLRSRGB[nids % len(CLUSTERCLRSRGB) - 1] * sat\n # overwrite unclustered/multiunit points with GREYRGB\n self.colors[nids < 1] = GREYRGB * sat\n else: # assume self.colors exists\n sidis = self.sids.searchsorted(sids)\n nids = self.nids[sidis]\n self.colors[sidis] = CLUSTERCLRSRGB[nids % len(CLUSTERCLRSRGB) - 1] * sat\n self.colors[sidis[nids < 1]] = GREYRGB * sat",
"def color(self, sids=None, sat=1):\n if sids is None: # init/overwrite self.colors\n nids = self.nids\n # uint8, single unit nids are 1-based:\n self.colors = CLUSTERCLRSRGB[nids % len(CLUSTERCLRSRGB) - 1] * sat\n # overwrite unclustered/multiunit points with GREYRGB\n self.colors[nids < 1] = GREYRGB * sat\n else: # assume self.colors exists\n sidis = self.sids.searchsorted(sids)\n nids = self.nids[sidis]\n self.colors[sidis] = CLUSTERCLRSRGB[nids % len(CLUSTERCLRSRGB) - 1] * sat\n self.colors[sidis[nids < 1]] = GREYRGB * sat",
"def update_color(self):\n self.plot(update_traces=False, update_waveforms=True)",
"def uniqueish_color(color_data):\n # return plt.cm.gist_ncar(color_data)\n # return plt.cm.binary(color_data)\n return plt.cm.bwr(color_data)",
"def setColorIndex(idx):\n dislin.setclr(idx)",
"def setColourMap(self):\n cmap = self.config['cmap']\n\n pos, colour, mode = colourMaps.colourMaps(cmap)\n\n cmap = pg.ColorMap(pos, colour,mode)\n self.lut = cmap.getLookupTable(0.0, 1.0, 256)\n minsg = np.min(self.sg)\n maxsg = np.max(self.sg)\n self.colourStart = (self.config['brightness'] / 100.0 * self.config['contrast'] / 100.0) * (maxsg - minsg) + minsg\n self.colourEnd = (maxsg - minsg) * (1.0 - self.config['contrast'] / 100.0) + self.colourStart",
"def _updateColormapImage(self, *args, **kwargs):\n if self._colormapImage is not None:\n self._colormapImage = None\n model = self.model()\n if model is not None:\n index = self.index(column=1)\n model.dataChanged.emit(index, index)",
"def get_density_cmap():\n # Add completely white color to Reds colormap in Matplotlib\n list_colors = plt.cm.datad['Reds']\n list_colors = list(list_colors)\n list_colors.insert(0, (1, 1, 1))\n list_colors.insert(0, (1, 1, 1))\n lscm = matplotlib.colors.LinearSegmentedColormap.from_list(\"my_Reds\", list_colors)\n return lscm",
"def setAxisLabelColor(idx=-1, axes='XYZ'):\n dislin.axclrs(idx, 'LABELS', axes)",
"def test_colormap_discrete_nu():\n with TestingCanvas(size=size, bgcolor='w') as c:\n idata = np.linspace(255, 0, size[0]*size[1]).astype(np.ubyte)\n data = idata.reshape((size[0], size[1]))\n image = Image(cmap=Colormap(np.array([[0, .75, 0], [.75, .25, .5]]),\n [0., .25, 1.], interpolation='zero'),\n clim='auto', parent=c.scene)\n image.set_data(data)\n assert_image_approved(c.render(), \"visuals/colormap_nu.png\")",
"def setColourMap(self):\n cmap = self.config['cmap']\n\n pos, colour, mode = colourMaps.colourMaps(cmap)\n\n cmap = pg.ColorMap(pos, colour, mode)\n self.lut = cmap.getLookupTable(0.0, 1.0, 256)\n minsg = np.min(self.sg)\n maxsg = np.max(self.sg)\n self.colourStart = (self.config['brightness'] / 100.0 * self.config['contrast'] / 100.0) * (\n maxsg - minsg) + minsg\n self.colourEnd = (maxsg - minsg) * (1.0 - self.config['contrast'] / 100.0) + self.colourStart",
"def rescale(self):\n low = self.datasource.data[\"values\"].min()\n high = self.datasource.data[\"values\"].max()\n\n # force color to be at lower end of the colormap if\n # data is all equal\n if low == high:\n high += 1\n\n self.set_limits_minmax(low, high)",
"def set_cmap_cb(self, w, index):\n old_cmap_name = self._cmap_name\n name = cmap.get_names()[index]\n self.cmap_name = name\n self.pipeline.push(StageAction(self,\n dict(cmap_name=old_cmap_name),\n dict(cmap_name=self._cmap_name),\n descr=\"rgbmap / change cmap\"))\n\n self.pipeline.run_from(self)",
"def update_colormap(self, to_overlay=None, **kwargs):\n if self._n_overlay >= 1:\n overlay = self._n_overlay - 1 if to_overlay is None else to_overlay\n # Define the colormap data :\n data_lim = self._data_lim[overlay]\n col = np.linspace(data_lim[0], data_lim[1], LUT_LEN)\n self._text2d_data[overlay, ...] = Colormap(**kwargs).to_rgba(col)\n self._text2d.set_data(self._text2d_data)\n self.update()",
"def uniqueish_color():\n return plt.cm.gist_ncar(np.random.random())",
"def colormap_masked(ncolors=256, knee_index=None, cmap='inferno', alpha=0.3):\n cm = plt.cm.get_cmap(cmap)(np.linspace(0, 1, ncolors))\n if knee_index is None:\n # Then map to pvals, as -log(p) between 0 and 3.5, and threshold at 0.05\n knee_index = np.argmin(abs(np.linspace(0., 3.5, ncolors)+np.log10(0.05)))\n \n cm[:knee_index, :] = np.c_[cm[:knee_index, 0], cm[:knee_index, 1], cm[:knee_index, 2], alpha*np.ones((len(cm[:knee_index, 1])))]\n return LinearSegmentedColormap.from_list('my_colormap', cm)",
"def smecv_sm(N: int = 256, set_over_under: bool = True) \\\n -> colors.LinearSegmentedColormap:\n steps = np.array([[134, 80, 16],\n [164, 117, 13],\n [219, 190, 24],\n [250, 249, 156],\n [144, 202, 240],\n [4, 145, 251],\n [8, 83, 211],\n [13, 37, 161]]) / 255.\n cmap = colors.LinearSegmentedColormap.from_list('smecv_sm', steps, N=N)\n\n if set_over_under:\n cmap.set_under(np.array([112, 65, 12]) / 255)\n cmap.set_over(np.array([7, 25, 106]) / 255)\n\n return cmap",
"def smecv_nobs(N: int = 256, set_over_under: bool = True) \\\n -> colors.LinearSegmentedColormap:\n steps = np.array([[209, 56, 76],\n [255, 239, 161],\n [50, 133, 187]]) / 255.\n cmap = colors.LinearSegmentedColormap.from_list('smecv_nobs', steps, N=N)\n\n if set_over_under:\n cmap.set_under(np.array([172, 46, 62]) / 255.)\n cmap.set_over(np.array([45, 122, 170]) / 255)\n\n return cmap",
"def __init__(self):\n self.primary = '#9CC537' # slightly darker than YellowGreen #9acd32, rgb[156/255, 197/255, 55/255]\n self.secondary = '#2E3743' # asphalt, rgb[46/255, 55/255, 67/255]\n self.tertiary = '#9B2B2C' # red'ish, rgb(155, 43, 44)\n self.fourth = '#E57925' # orange'ish, rgb(229, 121, 37)\n self.fifth = '#F2D869' # yellow'ish, rgb(242, 216, 105)\n self.sixth = '#AB8D60'\n self.seventh = '#A4D29F'\n self.eighth = '#6E807B'\n self.ninth = '#3D636F' # blue grey\n self.tenth = '#A49E9D'\n self.eleventh = '#DA9BA6'\n self.primary_10 = '#1F290A' # darkest green, 10% of primary\n self.primary_35 = '#6C9023' # dark green, 35% of primary\n self.primary_80 = '#D7EBAD' # light green, 80% of primary\n self.primary_90 = '#ebf5d6' # light green, 90% of primary\n self.primary_95 = '#F5FAEA' # lightest green, 95% of primary\n self.secondary_70 = '#6d737b' # light asphalt\n\n _col_map_colors = [self.primary_95, # lightest primary\n self.primary, # primary\n self.primary_10] # darkest primary\n self._color_map = self._set_col_map(_col_map_colors)\n\n self.color_list = [self.primary, self.secondary, self.tertiary, self.fourth, self.fifth, self.sixth,\n self.seventh, self.eighth, self.ninth, self.tenth, self.eleventh, self.primary_35]\n\n # set the mpl color cycler to our colors. It has 10 colors\n # mpl.rcParams['axes.prop_cycle']",
"def update_color(self):\r\n \r\n \r\n colorset = self.colorset\r\n \r\n self.grfx[0].colorset = colorset\r\n pass",
"def setAxisAllColor(idx=-1, axes='XYZ'):\n dislin.axclrs(idx, 'ALL', axes)",
"def cmap_discretize(N):\n \n cmap = matplotlib.cm.jet;\n colors_i = np.concatenate((np.linspace(0, 1., N), (0.,0.,0.,0.)))\n colors_rgba = cmap(colors_i)\n indices = np.linspace(0, 1., N+1)\n cdict = {}\n for ki, key in enumerate(('red','green','blue')):\n cdict[key] = [(indices[i], colors_rgba[i-1,ki], colors_rgba[i,ki]) for i in range(N+1)]\n \n return matplotlib.colors.LinearSegmentedColormap(cmap.name + \"_%d\"%N, cdict, 1024);",
"def test_nan_color_copy():\n\n data = np.zeros((16, 16))\n\n f1 = FITSFigure(data)\n f1.show_grayscale()\n f1.set_nan_color('blue')\n\n f2 = FITSFigure(data)\n f2.show_grayscale()\n f2.set_nan_color('red')\n\n assert f1.image.get_cmap()._rgba_bad == (0.0, 0.0, 1.0, 1.0)\n assert f2.image.get_cmap()._rgba_bad == (1.0, 0.0, 0.0, 1.0)",
"def _cmap_discretize(cmap, N):\n\n if type(cmap) == str:\n cmap = plt.get_cmap(cmap)\n colors_i = np.concatenate((np.linspace(0, 1., N), (0.,0.,0.,0.)))\n colors_rgba = cmap(colors_i)\n indices = np.linspace(0, 1., N+1)\n cdict = {}\n for ki, key in enumerate(('red','green','blue')):\n cdict[key] = [(indices[i], colors_rgba[i-1,ki], colors_rgba[i,ki])\n for i in range(N+1)]\n # Return colormap object.\n return mcolors.LinearSegmentedColormap(cmap.name + \"_%d\"%N, cdict, 1024)",
"def cmap(num,cmap = plt.cm.gist_earth_r):\n return cmap(np.linspace(0, 1, num))",
"def setAxisBackground(idx=-1):\n dislin.axsbgd(idx)",
"def _on_colormap_change(self, event=None):\n with self.layer.events.colormap.blocker():\n self.colormap_combobox.setCurrentIndex(\n self.colormap_combobox.findData(self.layer.colormap)\n )",
"def setAxisNameColor(idx=-1, axes='XYZ'):\n dislin.axclrs(idx, 'Name', axes)",
"def update_ptable(self):\n from bokeh.sampledata.periodic_table import elements\n romans = [\"I\", \"II\", \"III\", \"IV\", \"V\", \"VI\", \"VII\"]\n\n elements[\"atomic mass\"] = elements[\"atomic mass\"].astype(str)\n\n elements[\"period\"] = [x for x in elements.period]\n elements = elements[elements.group != \"-\"]\n\n group_range = [str(x) for x in range(1, 19)]\n print ('reaches colormap def')\n colormap = {\n \"c\" : \"#ffa07a\",\n \"nc\" : \"#A9A9A9\"\n }\n elems_colorpair = {}\n\n fcc_B_extrapol_props = {}\n fcc_dB_extrapol_props = {}\n fcc_V0_extrapol_props = {}\n fcc_E0_extrapol_props = {}\n\n bcc_B_extrapol_props = {}\n bcc_dB_extrapol_props = {}\n bcc_V0_extrapol_props = {}\n bcc_E0_extrapol_props = {}\n\n hcp_B_extrapol_props = {}\n hcp_dB_extrapol_props = {}\n hcp_V0_extrapol_props = {}\n hcp_E0_extrapol_props = {}\n\n available_elems = []\n\n for e in elements[\"symbol\"]:\n if e in np.unique(list(self.plot_data['element'])):\n available_elems.append(e)\n for s in np.unique(list(self.plot_data['structure'])):\n plot_struct = self.plot_data[self.plot_data['structure']==s]\n plot_struct_elem = plot_struct[plot_struct['element']==e]\n if s=='fcc':\n try:\n fcc_B_extrapol_props.update({e:list(plot_struct_elem['B'])[0]})\n\n fcc_dB_extrapol_props.update({e:list(plot_struct_elem['BP'])[0]})\n\n fcc_V0_extrapol_props.update({e:list(plot_struct_elem['V0'])[0]})\n\n fcc_E0_extrapol_props.update({e:list(plot_struct_elem['E0'])[0]})\n except:\n pass\n elif s=='bcc':\n try:\n bcc_B_extrapol_props.update({e:list(plot_struct_elem['B'])[0]})\n\n bcc_dB_extrapol_props.update({e:list(plot_struct_elem['BP'])[0]})\n\n bcc_V0_extrapol_props.update({e:list(plot_struct_elem['V0'])[0]})\n\n bcc_E0_extrapol_props.update({e:list(plot_struct_elem['E0'])[0]})\n except:\n pass\n elif s=='hcp':\n try:\n hcp_B_extrapol_props.update({e:list(plot_struct_elem['B'])[0]})\n\n hcp_dB_extrapol_props.update({e:list(plot_struct_elem['BP'])[0]})\n\n hcp_V0_extrapol_props.update({e:list(plot_struct_elem['V0'])[0]})\n\n hcp_E0_extrapol_props.update({e:list(plot_struct_elem['E0'])[0]})\n except:\n pass\n fcc_E0_extrapol_props.update({k:'xxx' for k in elements['symbol'] if k not in fcc_E0_extrapol_props})\n fcc_V0_extrapol_props.update({k:'xxx' for k in elements['symbol'] if k not in fcc_V0_extrapol_props})\n fcc_B_extrapol_props.update({k:'xxx' for k in elements['symbol'] if k not in fcc_B_extrapol_props})\n fcc_dB_extrapol_props.update({k:'xxx' for k in elements['symbol'] if k not in fcc_dB_extrapol_props})\n\n bcc_E0_extrapol_props.update({k:'xxx' for k in elements['symbol'] if k not in bcc_E0_extrapol_props})\n bcc_V0_extrapol_props.update({k:'xxx' for k in elements['symbol'] if k not in bcc_V0_extrapol_props})\n bcc_B_extrapol_props.update({k:'xxx' for k in elements['symbol'] if k not in bcc_B_extrapol_props})\n bcc_dB_extrapol_props.update({k:'xxx' for k in elements['symbol'] if k not in bcc_dB_extrapol_props})\n\n hcp_E0_extrapol_props.update({k:'xxx' for k in elements['symbol'] if k not in hcp_E0_extrapol_props})\n hcp_V0_extrapol_props.update({k:'xxx' for k in elements['symbol'] if k not in hcp_V0_extrapol_props})\n hcp_B_extrapol_props.update({k:'xxx' for k in elements['symbol'] if k not in hcp_B_extrapol_props})\n hcp_dB_extrapol_props.update({k:'xxx' for k in elements['symbol'] if k not in hcp_dB_extrapol_props})\n\n elems_colorpair.update( { key:'c' for key in np.unique(available_elems) } )\n elems_colorpair.update( { key:'nc' for key in list(elements['symbol']) if key not in list(elems_colorpair.keys()) } )\n\n\n print ([ colormap[elems_colorpair[x]] for x in elements['symbol'] ])\n\n source = ColumnDataSource(\n data=dict(\n group=[str(x) for x in elements[\"group\"]],\n period=[str(y) for y in elements[\"period\"]],\n symx=[str(x)+\":0.1\" for x in elements[\"group\"]],\n numbery=[str(x)+\":0.8\" for x in elements[\"period\"]],\n massy=[str(x)+\":0.15\" for x in elements[\"period\"]],\n namey=[str(x)+\":0.3\" for x in elements[\"period\"]],\n sym=elements[\"symbol\"],\n name=elements[\"name\"],\n# cpk=elements[\"CPK\"],\n atomic_number=elements[\"atomic number\"],\n# electronic=elements[\"electronic configuration\"],\n fcc_B=[fcc_B_extrapol_props[x] for x in elements[\"symbol\"]],\n fcc_dB=[fcc_dB_extrapol_props[x] for x in elements[\"symbol\"]],\n fcc_V0=[fcc_V0_extrapol_props[x] for x in elements[\"symbol\"]],\n fcc_E0=[fcc_E0_extrapol_props[x] for x in elements[\"symbol\"]],\n bcc_B=[bcc_B_extrapol_props[x] for x in elements[\"symbol\"]],\n bcc_dB=[bcc_dB_extrapol_props[x] for x in elements[\"symbol\"]],\n bcc_V0=[bcc_V0_extrapol_props[x] for x in elements[\"symbol\"]],\n bcc_E0=[bcc_E0_extrapol_props[x] for x in elements[\"symbol\"]],\n hcp_B=[hcp_B_extrapol_props[x] for x in elements[\"symbol\"]],\n hcp_dB=[hcp_dB_extrapol_props[x] for x in elements[\"symbol\"]],\n hcp_V0=[hcp_V0_extrapol_props[x] for x in elements[\"symbol\"]],\n hcp_E0=[hcp_E0_extrapol_props[x] for x in elements[\"symbol\"]],\n type=elements[\"metal\"],\n type_color=[ colormap[elems_colorpair[x]] for x in elements['symbol'] ],\n )\n )\n\n # plot the periodic layout\n #name = source.data[\"name\"]\n #B = source.data[\"B\"]\n\n ptable = figure(title=\"Periodic Table\", tools=\"hover\",\n x_range=group_range, y_range=list(reversed(romans)))\n ptable.background_fill_color='white'\n ptable.plot_width = 1500\n ptable.toolbar_location = None\n ptable.outline_line_color = None\n\n ptable.rect(\"group\", \"period\", 0.9, 0.9, source=source,\n fill_alpha=0.3, color='type_color')\n\n text_props = {\n \"source\": source,\n \"angle\": 0,\n \"color\": \"black\",\n \"text_align\": \"left\",\n \"text_baseline\": \"middle\"\n }\n\n ptable.text(x=\"symx\", y=\"period\", text=\"sym\",\n text_font_style=\"bold\", text_font_size=\"22pt\", **text_props)\n\n ptable.text(x=\"symx\", y=\"numbery\", text=\"atomic_number\",\n text_font_size=\"9pt\", **text_props)\n\n# ptable.text(x=\"symx\", y=\"namey\", text=\"name\",\n# text_font_size=\"6pt\", **text_props)\n\n# ptable.text(x=\"symx\", y=\"massy\", text=\"mass\",\n# text_font_size=\"5pt\", **text_props)\n\n ptable.grid.grid_line_color = None\n\n\n ptable.select_one(HoverTool).tooltips = [\n (\"name\", \"@name\"),\n (\"fcc, V0 (A^3 per atom)\", \"@fcc_V0\"),\n (\"fcc, B (GPa)\", \"@fcc_B\"),\n (\"fcc, dB/dP\", \"@fcc_dB\"),\n (\"bcc, V0 (A^3 per atom)\", \"@bcc_V0\"),\n (\"bcc, B (GPa)\", \"@bcc_B\"),\n (\"bcc, dB/dP\", \"@bcc_dB\"),\n (\"hcp, V0 (A^3 per atom)\", \"@hcp_V0\"),\n (\"hcp, B (GPa)\", \"@hcp_B\"),\n (\"hcp, dB/dP\", \"@hcp_dB\")]\n return ptable"
]
| [
"0.6052805",
"0.5751103",
"0.5729292",
"0.56816226",
"0.5550979",
"0.5397923",
"0.5361876",
"0.5355042",
"0.5344387",
"0.5323888",
"0.5323239",
"0.53095704",
"0.52873427",
"0.5242239",
"0.52291805",
"0.52267003",
"0.5224373",
"0.5188541",
"0.5188279",
"0.51769954",
"0.5140223",
"0.51363975",
"0.5135687",
"0.5115405",
"0.50946516",
"0.50805914",
"0.50721014",
"0.5071499",
"0.5054976",
"0.5046666"
]
| 0.7386802 | 0 |
Adds a new LogImg object to the logger. | def add_log_img(self, log_img_type):
self.log_img_map[log_img_type] = LogImg(self.log_path, log_img_type) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def log_image(self, log_name: str, image: Union[str, Any], step: Optional[int] = None) -> None:\n for key, logger in self._loggers.items():\n log_fn = getattr(logger, \"log_image\", None)\n if callable(log_fn):\n log_fn(log_name, image, step)",
"def log_image(self, log_name: str, image: Union[str, Any], step: Optional[int] = None) -> None:\n for logger in self._loggers:\n log_fn = getattr(logger, \"log_image\", None)\n if callable(log_fn):\n log_fn(log_name, image, step)",
"def get_log_img_obj(self, log_img_type):\n if log_img_type in self.log_img_map:\n return self.log_img_map[log_img_type]\n else:\n msg = \"error: log_img_type '{}' does not exist in the Logger object.\\n\".format(log_img_type)\n msg += \"There are currently {} objects saved in the Logger object\".format(len(self.log_img_map))\n exit(msg)",
"def add_log(self, log):\n log = str(datetime.datetime.now()) + \": \"+log+\"\\n\"\n print(log)\n self.logs.append(log)\n if len(self.logs) > 10:\n self.append_to_logfile()",
"def log_image(data_category, image_name, path=None, plot=None, **kwargs):\n # always, just print in logs\n log(logging.INFO, data_category, \"AML MetricImage({})\".format(image_name))\n if data_category == DataCategory.ONLY_PUBLIC_DATA:\n # if public, ask azureml to record (if azureml attached)\n run = AmlRunWrapper()\n run.setup(attach=True)\n run.log_image(image_name, path, plot, **kwargs)\n run.flush()",
"def Add(self, *args):\n return _BRepAlgo.BRepAlgo_Image_Add(self, *args)",
"def log_image(tag: str,\n data: str,\n global_step: int,\n walltime: Optional[float] = None,\n logger: Optional[logging.Logger] = None) -> None:\n logger = logger or _get_context_logger()\n logger.info(ImageT(tag=tag, img_tensor=data, global_step=global_step,\n walltime=walltime or time.time()))",
"def add_log(self,txt):\n try:\n now=datetime.datetime.now()\n new_item=QtWidgets.QListWidgetItem(now.strftime('%Y/%m/%d %H:%M:%S')+\": \"+txt)\n self.ui.logger_list.addItem(new_item)\n if self.h5saver.h5_file.isopen:\n self.h5saver.append(self.h5saver.logger_array, now.strftime('%Y/%m/%d %H:%M:%S')+\": \"+txt)\n\n except:\n pass",
"def add_image(jid, img):\n jrd.hset(_generate_job_key(jid), 'image_status', 'created')\n image_rd.hset(jid, 'image', img)",
"def add_log(self, id='default-log'):\n log_element = ET.SubElement(self.root, 'ipython-log', id=id)",
"def __add_log(self, logType: int, message: str) -> None:\n\n if isinstance(message, BaseException):\n ex: BaseException = message\n if hasattr(ex, 'message'):\n message = ex.message\n else:\n message = ex.__str__()\n\n message += f'\\n{traceback.format_exc().__str__()}'\n\n if message is None:\n return\n\n if isinstance(message, str) and message.strip().__len__() == 0:\n return\n\n st = stack()\n caller: Traceback = getframeinfo(st[2][0])\n log = LogModel()\n log.log_level = logType\n log.filename = caller.filename\n log.function = caller.function\n log.line_number = caller.lineno\n log.message = message\n log.creation_date = datetime.now()\n\n self.__logs.append(log)",
"def add_log(self):\n self.stack = []\n diff = self.diff(self.original, self.doc)\n entry = {\"_id\": utils.get_iuid(),\n \"doctype\": constants.DOCTYPE_LOG,\n \"docid\": self.doc[\"_id\"],\n \"diff\": diff,\n \"timestamp\": utils.get_time()}\n self.modify_log_entry(entry)\n if hasattr(flask.g, \"current_user\") and flask.g.current_user:\n entry[\"username\"] = flask.g.current_user[\"username\"]\n else:\n entry[\"username\"] = None\n if flask.has_request_context():\n entry[\"remote_addr\"] = str(flask.request.remote_addr)\n entry[\"user_agent\"] = str(flask.request.user_agent)\n else:\n entry[\"remote_addr\"] = None\n entry[\"user_agent\"] = os.path.basename(sys.argv[0])\n flask.g.db.put(entry)",
"def add_logger(log, request):\n request.cls.log = log",
"def add_image(self, in_image):\n image = in_image\n if not isinstance(image, Image):\n image = Image()\n image.parse_record(in_image)\n self.img_lst.append(image)",
"def imageinfo(self, *args, **kwargs):\n return self.logger.log(logging.INFO-1, *args, **kwargs)",
"def register_log(self, log):\n self._log = log",
"def add_log_entry(self, log_entry):\n self.log_entries.append(log_entry)",
"def set_up_logging(pics_out_path):\n\n logging.basicConfig(filename=(pics_out_path + \"archive_screenshot_log.txt\"), filemode='a',\n format='%(asctime)s %(name)s %(levelname)s %(message)s', datefmt='%H:%M:%S',\n level=logging.INFO)",
"def add_log(self, logType: int, message: str) -> None:\n\n if logType not in self.__log_levels:\n logType = self.NOTSET\n\n self.__add_log(logType, message)",
"def add_image(self, image):\n if self.temp_dir is None:\n self.temp_dir = tempfile.mkdtemp()\n if self.img_shape is None:\n self.img_shape = image.shape\n assert self.img_shape == image.shape\n filename = self.get_filename(self.current_index)\n plt.imsave(fname=filename, arr=image)\n self.current_index += 1\n return filename",
"def __init__(self, log_path):\n # create a map for storing LogImg objects\n self.log_img_map = OrderedDict()\n\n # set the path to the log directory\n self.log_path = log_path\n\n # check if log directory already exists or create it\n if not os.path.exists(self.log_path):\n os.makedirs(self.log_path)\n\n # set current training step\n self.train_step = 0",
"def add_image(self, image, mode='normal', state='on'):\n raise NotImplementedError",
"def add_game_log(self, game_log: list) -> None:\n self.game_logs.append(game_log)",
"def addImg(in_dict):\n img = Image(name=in_dict[\"name\"],\n b64str=in_dict[\"b64str\"],\n imgsize=in_dict[\"imgsize\"],\n processed=in_dict[\"processed\"],\n timestamp=in_dict[\"timestamp\"])\n ans = img.save()\n return ans.name",
"def putLog(self, log):\n \n moduleCoordinator.ModuleCoordinator().addEvent(moduleCoordinator.LOG_EVENT, log, self.hash, self.config)",
"def add_image(self, tag, img_tensor, global_step=None, caption=None):\n img_tensor = make_np(img_tensor)\n self.vis.image(img_tensor, opts={'title': tag, 'caption': caption})",
"def __add_logger(self):\n #FIXME: adapt to the settings that are proper for you\n self.__logger = logging.getLogger('lib-autopilot')\n self.__logger.setLevel(logging.INFO)\n ch = logging.StreamHandler()\n ch.setLevel(logging.INFO)\n formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n ch.setFormatter(formatter)\n self.__logger.addHandler(ch) \n # TODO: CHANGE from Console to file handler\n # fh = logging.FileHandler('lib-autopilot.log')\n # fh.setLevel(logging.DEBUG)\n #fh.setFormatter(formatter)\n #self.__logger.addHandler(fh)",
"def add_image(self, img, input_or_output, filename, title, description=\"\"):\n if input_or_output.lower() != \"input\" and input_or_output.lower() != \"output\":\n raise Exception(\"input_or_output parameter can only contain 'input' or 'output'\")\n\n image_object = Image(self._name, img, input_or_output, self._img_folder + \"/\" + filename, title, description=\"\")\n image_object.commit()\n s = image_object.to_series()\n self._commit(s)",
"def add_log(self, log, name=None, unit=None):\n log_name = log.descr.replace(' ', '_')\n log_unit = log.units\n if name is not None:\n log_name = name\n if unit is not None:\n log_unit = unit\n if log_name not in self.logs:\n temp_dataframe = pd.DataFrame(\n data={\n 'Depth(m)':log.depth,\n '{}({})'.format(\n log_name, log_unit): log.data})\n self.data_frame = self.data_frame.join(\n temp_dataframe.set_index(\"Depth(m)\"), on=\"Depth(m)\")\n else:\n raise Warning(\"{} already exists in well {}\".format(\n log_name, self.well_name))",
"def add_image(self, image_name, version, image_hash):\n raise NotImplementedError()"
]
| [
"0.69299495",
"0.6862203",
"0.6212648",
"0.619855",
"0.61236614",
"0.6035992",
"0.6000903",
"0.59973085",
"0.59945935",
"0.5994575",
"0.59917325",
"0.5952818",
"0.59483373",
"0.593813",
"0.59179693",
"0.5897211",
"0.5896683",
"0.58311635",
"0.5816518",
"0.58037096",
"0.5770389",
"0.5761629",
"0.5741981",
"0.5710211",
"0.56980443",
"0.5696411",
"0.5672927",
"0.56667566",
"0.5620531",
"0.56028384"
]
| 0.80627126 | 0 |
Get an existing log img object reference | def get_log_img_obj(self, log_img_type):
if log_img_type in self.log_img_map:
return self.log_img_map[log_img_type]
else:
msg = "error: log_img_type '{}' does not exist in the Logger object.\n".format(log_img_type)
msg += "There are currently {} objects saved in the Logger object".format(len(self.log_img_map))
exit(msg) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def getimage(self):",
"def image(self):\n return self._image",
"def get_image ( self, object ):\n return self.image",
"def getImage(cam):\n\n return cam.getImage()",
"def image_reference(self, image_id):\n info = self.image_info[image_id]\n if info[\"source\"] == \"balloon\":\n return info[\"path\"]\n else:\n super(self.__class__, self).image_reference(image_id)",
"def image_reference(self, image_id):\n info = self.image_info[image_id]\n if info[\"source\"] == \"balloon\":\n return info[\"path\"]\n else:\n super(self.__class__, self).image_reference(image_id)",
"def get_image(self):\n return self.image",
"def get_image(self):\n return self.image",
"def get_image(self):\n return self.image",
"def show_image_ref():\n return get_image_ref()",
"def getImage( self ):\n return self.__image;",
"def get_current_image(self):\n raise NotImplementedError",
"def image_reference(self, image_id):\n return self.image_info[image_id]",
"def image_reference(self, image_id):\n info = self.image_info[image_id]\n if info[\"source\"] == \"dsb\":\n return info[\"path\"]\n else:\n super(self.__class__, self).image_reference(image_id)",
"def image_reference(self, image_id):\n info = self.image_info[image_id]\n if info[\"source\"] == \"vesicle\":\n return info[\"path\"]\n else:\n super(self.__class__, self).image_reference(image_id)",
"def get_image(self):\n self.drawer.flush()\n return self.img",
"def get_new_image(self):\n return self.vid_mem_reader.get_latest_image()[0]",
"def imageinfo(self, *args, **kwargs):\n return self.logger.log(logging.INFO-1, *args, **kwargs)",
"def image_reference(self, image_id):\n info = self.image_info[image_id]\n if info[\"source\"] == \"pcb\":\n return info[\"path\"]\n else:\n super(self.__class__, self).image_reference(image_id)",
"def image(self) -> object:\n return self._image",
"def get_image(self):\n return self.camera.getImage()",
"def image(self):\n return self.__getattr__(\"image\", _returnType=\"value\", _location=\"remote\")",
"def get_image_url():",
"def get_image():\n return models.Image.objects.all()[0]",
"def get_img(self, img=None):\n\n if self.img is None: #No image specified to the ROI object\n\n # If no image is saved, check if an image was passed. If so, return the ROI of that image.\n if img is None:\n print('no image provided')\n else:\n return img[self.coords[0]:self.coords[1], self.coords[2]:self.coords[3]]\n else:\n return self.img",
"def add_log_img(self, log_img_type):\n self.log_img_map[log_img_type] = LogImg(self.log_path, log_img_type)",
"def get_image():\n\n # Access the global variable and activate the saving for the last camera's\n # frame\n global _save_image\n _save_image = True",
"def image_reference(self, image_id):\n info = self.image_info[image_id]\n if info[\"source\"].strip().lower() == 'homeobject':\n return info[\"path\"]\n else:\n super(self.__class__, self).image_reference(image_id)",
"def get_image(self, record_id):\n \n for img in self.img_lst:\n if img.get_recordId() == str(record_id):\n return img",
"def image_reference(self, image_id):\n info = self.image_info[image_id]\n return info['path']"
]
| [
"0.65449005",
"0.64309585",
"0.63550127",
"0.62919086",
"0.62624776",
"0.62624776",
"0.62375873",
"0.62375873",
"0.62375873",
"0.6186268",
"0.6169566",
"0.6159949",
"0.61112523",
"0.60648036",
"0.60528",
"0.6019813",
"0.6012564",
"0.5965432",
"0.5955746",
"0.59413415",
"0.59406936",
"0.5926217",
"0.59241563",
"0.59041625",
"0.589351",
"0.5882603",
"0.5867065",
"0.5862002",
"0.5848123",
"0.5823783"
]
| 0.7790943 | 0 |
Check the validity of an AFM number (Greek VAT code). Check if input is a valid AFM number via its check digit (not if it is actually used). Return either True of False. Input should be given as a string. An integer, under certain conditions, could through an exception. | def check_afm(afm):
if not isinstance(afm, str):
raise TypeError( "check_afm()", "You should feed to this function only strings to avoid exceptions and errors! Aborting." )
if len(afm) == 11 and afm[:2].upper() == "EL":
afm=afm[2:]
if afm.isdigit() == True and len(afm) == 9:
i, sums = 256, 0
for digit in afm[:-1]:
sums += int(digit) * i
i /= 2
checksum = sums % 11
if int(afm[-1]) == int(checksum) or (checksum==10 and afm[-1]=="0"):
return True
return False | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def CheckNumber(userInput):\n try:\n float(userInput)\n return True\n except(ValueError):\n return False",
"def input_validation(input_: str) -> bool:\n return fullmatch('[1-9]', input_) is not None",
"def valid(f):\r\n try:\r\n return not re.search(r'\\b0[0-9]', f) and eval(f) is True\r\n except ArithmeticError:\r\n return False",
"def valid(f):\n try:\n return not re.search(r'\\b0[0-9]', f) and eval(f) is True\n except ArithmeticError:\n return False",
"def valid(f):\n try: \n return not re.search(r'\\b0[0-9]', f) and eval(f) is True\n except ArithmeticError:\n return False",
"def valid(f):\n try: \n return not re.search(r'\\b0[0-9]', f) and eval(f) is True\n except ArithmeticError:\n return False",
"def validate(input):\n regex = re.compile(r'(UL)?\\d{1,' + re.escape(str(barcode_digit_length)) + '}$', flags=re.IGNORECASE)\n if regex.match(input):\n is_valid = True\n else:\n is_valid = False\n return is_valid",
"def validate_account_number(num, should_exist=True):\n if len(num) != 8:\n return False\n elif num[0] == '0':\n return False\n else:\n if should_exist:\n return account_number_exists(num)\n else:\n return not account_number_exists(num)",
"def checkifnumber(self, test_string):\r\n try:\r\n float(test_string)\r\n return(True)\r\n except ValueError:\r\n return(False)",
"def value_error(number):\n try:\n nbr = int(number)\n except ValueError:\n print(\"You can't sum letters, please write a number\")\n verification = False\n else:\n verification = True\n return verification",
"def validate(number):\n number = compact(number)\n if len(number) != 9:\n raise InvalidLength()\n if not isdigits(number[2:]):\n raise InvalidFormat()\n if not isdigits(number[:2]) and not all(x in 'ABCEHKMOPT' for x in number[:2]):\n raise InvalidFormat()\n if number[0] not in '1234567ABCEHKM':\n raise InvalidComponent()\n if number[-1] != calc_check_digit(number):\n raise InvalidChecksum()\n return number",
"def valid(f):\n try:\n return not re.search(r'\\b0[0-9]', f) and eval(f) is True\n # \"\\b\" is a word boundary\n \"\"\"We need to exclude numbers starting with zero,\n as these are interpretted as base8 (octal). This in\n turn could cause interpretation errors, and exceptions\n (for example 09 is not octal and will throw and exception)\"\"\"\n except (ArithmeticError, SyntaxError):\n return False",
"def validate_integer(self, p_str):\n # p_str is str\n if re.search(r\"^[1-9]\\d*$\", p_str) or p_str == \"\":\n return True\n self.frame.bell() # alert wrong input\n return False",
"def check_if_armstrong_number(number):\n sum = 0\n number_as_string = str(number)\n digits_number = len(number_as_string)\n for character in number_as_string:\n sum += int(character) ** digits_number\n\n return sum == number",
"def check_ean(eancode):\n if not eancode:\n return True\n if len(eancode) <> 13:\n return False\n try:\n int(eancode)\n except:\n return False\n return ean_checksum(eancode) == int(eancode[-1])",
"def validate_number(input_data):\n if input_data.startswith('-'):\n return input_data.i\n else:\n return False",
"def is_number(c):\n return '0' <= c <= '9'",
"def verify_valid_num(self, user_num):\r\n if not self.range_between_0_and_9(user_num):\r\n print(\"\\033[1;31mJust what do you think you're doing, Dave? Choose a number between 0 and 8\\033[0m\")\r\n return False\r\n\r\n return True",
"def is_valid_number(self, text, widget):\n if len(text) > 2:\n return False\n for char in text:\n if not char.isdigit():\n return False\n if text != '' and int(text) == 0:\n return False\n return True",
"def __checkInput(self, var):\n try:\n int(var)\n\n except:\n return False\n\n else:\n return True",
"def is_valid_integer(input_string):\n\n assert input_string is not None\n try:\n input_string = int(input_string)\n return True\n except ValueError:\n return False",
"def check_for_integer(number):\r\n \r\n try:\r\n int(number) \r\n return True\r\n except ValueError:\r\n return False",
"def valid(formula):\r\n\r\n try:\r\n return not re.search(r'\\b0[0-9]', formula) and eval((formula) is True\r\n #except ArithmeticError:\r\n #return False\r\n except:\r\n return False",
"def is_valid_gender_number(gender_number: int) -> bool:\n if gender_number in range(1, 7):\n return True\n else:\n return False",
"def CheckZipCode(zipcode):\n # see if there are enough digits\n if (len(zipcode) >= 5):\n # check if numerical\n try:\n int(zipcode)\n return True\n except:\n return False\n else:\n return False",
"def is_armstrong_number(number: int) -> bool:\n\n str_number = f\"{number}\"\n return sum(pow(int(x), len(str_number)) for x in str_number) == number",
"def check_number(number):\n digits = str(number)\n if len(digits) != 6:\n return False\n\n double = False\n last = '0'\n for digit in digits:\n if digit < last:\n return False\n\n if digit == last:\n double = True\n\n last = digit\n\n return double",
"def phone_number_validator(phone_number):\n if len(phone_number) != 10:\n return False\n if phone_number[0] == '0':\n return False\n try:\n int(phone_number)\n except ValueError:\n return False\n return True",
"def is_valid_gender_number(gender_number: int) -> bool:\n if gender_number in range(1, 7):\n return True\n return False",
"def verify_format(isbn):\n\n return len(isbn) == 10 and (isbn[-1] == \"X\" or isbn[-1].isdigit()) \\\n and all(digit.isdigit() for digit in isbn[:-1])"
]
| [
"0.6608858",
"0.6412995",
"0.64045024",
"0.6399352",
"0.63957435",
"0.63957435",
"0.632859",
"0.62783635",
"0.6233358",
"0.6208753",
"0.6205611",
"0.62022656",
"0.61651295",
"0.60816836",
"0.6072849",
"0.60379183",
"0.59668356",
"0.5966686",
"0.5965664",
"0.5954444",
"0.5940889",
"0.59292024",
"0.59232366",
"0.59102875",
"0.5905255",
"0.5898552",
"0.58975154",
"0.5888558",
"0.58738005",
"0.5872405"
]
| 0.7674553 | 0 |
This method trains the clustering network from scratch if there is no pretrained autoencoder, else it will load the existing pretrained autoencoder to retrieve the latent representation of the images to train the final clustering layer in the convolutional neural network. | def train(args):
dataset = args.dataset
ae_mode = args.mode
train_input, train_labels = load_data(dataset, mode=ae_mode)
num_clusters = len(np.unique(train_labels))
data_initialization = dataset_parameters[dataset]['data_initialization']
with_attention = args.attention
interval_updation = dataset_parameters[dataset][
'interval_updation'] if args.interval_updation is None else args.interval_updation
temperature = 1.
auto_encoder_optimizer = SGD(lr=args.learning_rate, momentum=0.9)
if ae_mode == "ae":
if train_input.shape[-1] > 1024:
print("Shape of training data before transformation: {}".format(train_input.shape))
train_input = PCA(n_components=728).fit_transform(train_input)
print("Shape of training data after transformation: {}".format(train_input.shape))
dimensions = [train_input.shape[-1], 500, 500, 2000,
len(np.unique(train_labels))] if args.include_layer is None else [train_input.shape[-1], 500, 500,
2000, args.include_layer,
len(np.unique(train_labels))]
else:
dimensions = [32, 64]
model = ClusteringNetwork(dimensions=dimensions, temperature=temperature, data_initialization=data_initialization,
num_clusters=num_clusters, output_directory=args.output_directory, dataset=dataset,
ae_mode=ae_mode, with_attention=with_attention)
if args.ae_weights:
model.auto_encoder.load_weights(args.ae_weights)
else:
model.train_auto_encoder(data=train_input, labels=train_labels, train_steps=args.ae_iterations,
batch_size=args.batch_size, output_directory=args.output_directory,
optimizer=auto_encoder_optimizer)
model.model.summary()
start_time = time.time()
model.compile(optimizer=SGD(0.01, 0.9), loss='kld')
p_labels = model.train_cluster_network(data=train_input, labels=train_labels,
tolerance_threshold=args.tolerance_threshold,
iterations=args.cluster_iterations, batch_size=args.batch_size,
interval_updation=interval_updation)
stop_time = time.time()
print("Accuracy: {}".format(EvaluatePerformance.accuracy(train_labels, p_labels)))
print("Time taken to finish the training: {}s".format((stop_time - start_time))) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def train():\n init_distributed_mode(args)\n save_dir = TRAIN_CFG['save_dir']\n if not os.path.exists(save_dir) and torch.distributed.get_rank() == 0:\n os.mkdir(save_dir)\n kwargs = {}\n # If augmenting data, disable Pytorch's own augmentataion\n # This has to be done manually as augmentation is embedded\n # refer : https://github.com/pytorch/vision/issues/2263\n base_path = DATASET_CFG['base_path']\n train_set = DATASET_CFG['train']\n valid_set = DATASET_CFG['valid']\n dset_mean_std = DATASET_CFG['mean_std']\n if dset_mean_std is not None:\n dataset_mean = [i/255. for i in dset_mean_std[0]]\n dataset_std = [i/255. for i in dset_mean_std[1]]\n else:\n dataset_mean, dataset_std = compute_mean_std(base_path, train_set)\n kwargs['image_mean'] = dataset_mean\n kwargs['image_std'] = dataset_std\n kwargs['min_size'] = DATASET_CFG['min_size']\n kwargs['max_size'] = DATASET_CFG['max_size']\n kwargs['box_detections_per_img'] = 300 # increase max det to max val in our benchmark\n\n # Set benchmark related parameters\n if benchmark == 'ScutHead':\n combined_cfg = {**cfg, **sh_anchors}\n elif benchmark == 'CrowdHuman':\n combined_cfg = {**cfg, **ch_anchors}\n elif benchmark == 'Combined':\n combined_cfg = {**cfg, **combined_anchors}\n else:\n raise ValueError(\"New dataset has to be registered\")\n\n # Create Model\n default_filter = False\n model = customRCNN(cfg=combined_cfg,\n use_deform=NET_CFG['use_deform'],\n ohem=NET_CFG['ohem'],\n context=NET_CFG['context'],\n custom_sampling=NET_CFG['custom_sampling'],\n default_filter=default_filter,\n soft_nms=NET_CFG['soft_nms'],\n upscale_rpn=NET_CFG['upscale_rpn'],\n median_anchors=NET_CFG['median_anchors'],\n **kwargs).cuda() \n model_without_ddp = model\n if args.distributed:\n model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu],\n find_unused_parameters=True)\n model_without_ddp = model.module\n\n # Create Optimizer\n params = [p for p in model.parameters() if p.requires_grad]\n optimizer = torch.optim.SGD(params, lr=HYP_CFG['learning_rate'],\n momentum=HYP_CFG['learning_rate'],\n weight_decay=HYP_CFG['weight_decay'])\n\n scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer,\n milestones=TRAIN_CFG['milestones'],\n gamma=HYP_CFG['gamma'])\n # Restore from checkpoint\n pt_model = TRAIN_CFG['pretrained_model']\n if pt_model:\n model_without_ddp = restore_network(model_without_ddp, pt_model,\n only_backbone=TRAIN_CFG['only_backbone'])\n \n # Create training and vaid dataset\n dataset_param = {'mean': dataset_mean, 'std':dataset_std,\n 'shape':(kwargs['min_size'], kwargs['max_size'])}\n batch_size = HYP_CFG['batch_size']\n train_dataset = HeadDataset(train_set,\n base_path,\n dataset_param,\n train=True)\n val_dataset = HeadDataset(valid_set,\n base_path,\n dataset_param,\n train=False)\n train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset)\n train_batch_sampler = torch.utils.data.BatchSampler(train_sampler,\n batch_size,\n drop_last=True)\n train_data_loader = torch.utils.data.DataLoader(train_dataset,\n batch_sampler=train_batch_sampler,\n num_workers=args.num_workers,\n collate_fn=collate_fn)\n\n val_sampler = torch.utils.data.distributed.DistributedSampler(val_dataset)\n val_batch_sampler = torch.utils.data.BatchSampler(val_sampler,\n batch_size,\n drop_last=True)\n val_data_loader = torch.utils.data.DataLoader(val_dataset,\n batch_sampler=val_batch_sampler,\n num_workers=args.num_workers,\n collate_fn=collate_fn)\n # Fastforward the LR decayer\n start_epoch = TRAIN_CFG['start_epoch']\n max_epoch = TRAIN_CFG['max_epoch']\n for _ in range(0, -1):\n scheduler.step()\n\n # Start training\n print(\"======= Training for \" + str(max_epoch) + \"===========\")\n for epoch in range(start_epoch, int(max_epoch) + 1):\n if epoch % TRAIN_CFG['eval_every'] == 0:\n print(\"========= Evaluating Model ==========\")\n result_dict = evaluate(model, val_data_loader, benchmark=benchmark)\n if torch.distributed.get_rank() == 0:\n logging.info('Eval score at {0} epoch is {1}'.format(str(epoch),\n result_dict))\n \n train_one_epoch(model, optimizer, train_data_loader,\n device, epoch, print_freq=1000)\n scheduler.step()\n if torch.distributed.get_rank() == 0:\n print(\"Saving model\")\n torch.save(model.state_dict(), osp.join(save_dir,\n TRAIN_CFG['exp_name'] + '_epoch_' + str(epoch) + '.pth'))",
"def __init__(self, embed_size=256, finetune=False, cnn_type='resnet50',\n use_abs=False, no_imgnorm=False):\n super(EncoderImageFull, self).__init__()\n self.embed_size = embed_size\n self.no_imgnorm = no_imgnorm\n self.use_abs = use_abs\n\n # Load a pre-trained model\n model = get_model(name=cnn_type, num_classes=5607)\n model = torch.nn.DataParallel(model)\n model.to(\"cuda\")\n checkpoint = torch.load(\"/mnt/data2/betty/webvision_train/results/resnet50/5000classes_onemonth/model_best.tar\")\n model.load_state_dict(checkpoint['state_dict'])\n \n print(\"Successfully load the saved model at model_best.tar\") \n\n self.cnn = model\n\n\n # For efficient memory usage.\n for param in self.cnn.parameters():\n param.requires_grad = False\n\n # Replace the last fully connected layer of CNN with a new one\n \n if cnn_type.startswith('resnet'):\n self.fc = nn.Linear(self.cnn.module.fc.in_features, embed_size)\n self.cnn.module.fc = nn.Sequential()\n else:\n print(\"error in chosing the architecture\")\n return\n\n self.init_weights()",
"def init():\n ########################\n # OPTIONS\n ########################\n # Debugging tools\n global TIMER # displays time of every major step\n TIMER = True\n global MONITOR # displays monitoring infos\n MONITOR = False\n \n global directories\n directories = {'1Face': 'data/1Face/',\n '2Faces': 'data/2Faces/',\n '3Faces': 'data/3Faces/',\n 'test': 'data/test/'}\n \n # Opt. swicthes\n global maxfinder # to find the max dim. amongst the pictures\n maxfinder = False\n global ML_mode\n ML_mode = {'CNN_Train': False,\n 'CNN_Pred' : True,\n 'Sampler': True}\n \n # Global variables\n global num_pics\n num_pics = {'1Face': 0,\n '2Faces': 0,\n '3Faces': 0}\n global labels\n labels = {'1Face': 0,\n '2Faces': 1,\n '3Faces': 2}\n global num_data\n num_data = 0\n global splitsize # Fraction of data to build the training set\n splitsize = 0.7 \n global maxheight # Resize the pictures to a power of 2 for CNN (2^8 here)\n maxheight = 128\n global maxwidth\n maxwidth = 128\n global TreshEdge # Number of consecutive black pixels to define an edge\n TreshEdge = 2\n global TreshFace # Number of white pixels to define a face (or large edge)\n TreshFace = maxheight/16",
"def train_one_epoch(self):\n print('Training......')\n\n # set mode train\n self.network.train()\n\n # prepare data\n train_loss = 0\n transform = transforms.Compose([Rescale(params.rescale_size),\n RandomCrop(params.image_size),\n RandomHorizontalFlip(),\n ToTensor()\n ])\n\n\n\n dataset = Cityscapes(params.dataset_root, mode='train', transforms = transform)\n\n train_loader = DataLoader(dataset,\n batch_size=params.train_batch,\n shuffle=params.shuffle,\n num_workers=params.dataloader_workers)\n \n train_size = 1896\n if train_size % self.params.train_batch != 0:\n total_batch = train_size // self.params.train_batch + 1\n else:\n total_batch = train_size // self.params.train_batch\n recal = 0\n precision = 0\n F_one = 0\n IOU = 0\n accuracy_new = 0 \n # train through dataset\n for batch_idx, batch in enumerate(train_loader):\n self.pb.click(batch_idx, total_batch)\n image, label = batch['image'], batch['label']\n image_cuda, label_cuda = image.cuda(), label.cuda()\n\n # checkpoint split\n if self.params.should_split:\n image_cuda.requires_grad_()\n out = checkpoint_sequential(self.network, self.params.split, image_cuda)\n else:\n out = self.network(image_cuda)\n\n\n loss = self.loss_fn(out, label_cuda)\n \n #display_image(out, label_cuda)\n TP, FP, TN, FN = confusion(out, label_cuda)\n recal = recal+TP\n precision = precision+FP\n F_one = F_one + TN\n IOU = IOU+ FN \n accuracy_final = accuracy(out, label_cuda)\n accuracy_new = accuracy_new + accuracy_final\n\n # optimize\n self.opt.zero_grad()\n loss.backward()\n self.opt.step()\n\n # accumulate\n train_loss += loss.item()\n\n # record first loss\n if self.train_loss == []:\n self.train_loss.append(train_loss)\n self.summary_writer.add_scalar('loss/train_loss', train_loss, 0)\n \n print(\"\\t\")\n print(recal/total_batch, precision/ total_batch, F_one/ total_batch, IOU/ total_batch)\n print(accuracy_new/total_batch)\n \n self.pb.close()\n train_loss /= total_batch\n self.train_loss.append(train_loss)\n\n # add to summary\n self.summary_writer.add_scalar('loss/train_loss', train_loss, self.epoch)",
"def train_one_epoch_Image_display(self):\n print('Training......')\n\n # set mode train\n self.network.train()\n\n # prepare data\n train_loss = 0\n transform = transforms.Compose([Rescale(params.rescale_size),\n RandomCrop(params.image_size),\n RandomHorizontalFlip(),\n ToTensor()\n ])\n\n\n\n dataset = Cityscapes(params.dataset_root, mode='train', transforms = transform)\n\n train_loader = DataLoader(dataset,\n batch_size=params.train_batch,\n shuffle=params.shuffle,\n num_workers=params.dataloader_workers)\n \n train_size = 1896\n if train_size % self.params.train_batch != 0:\n total_batch = train_size // self.params.train_batch + 1\n else:\n total_batch = train_size // self.params.train_batch\n recal = 0\n precision = 0\n F_one = 0\n IOU = 0\n accuracy_new = 0 \n # train through dataset\n for batch_idx, batch in enumerate(train_loader):\n self.pb.click(batch_idx, total_batch)\n image, label = batch['image'], batch['label']\n image_cuda, label_cuda = image.cuda(), label.cuda()\n\n # checkpoint split\n if self.params.should_split:\n image_cuda.requires_grad_()\n out = checkpoint_sequential(self.network, self.params.split, image_cuda)\n else:\n out = self.network(image_cuda)\n \n pred = image_cuda\n pred = pred.to(torch.device(\"cpu\"))\n pred = pred.detach()\n img_grid = pred[0]\n #img_grid = torchvision.utils.make_grid(out) \n img_grid = img_grid.numpy().transpose(1, 2, 0)*255\n cv2.imwrite(\"/content/drive/My Drive/Train_images/original%d.jpg\" % batch_idx, img_grid)\n\n \n loss = self.loss_fn(out, label_cuda)\n \n #display_image(out, label_cuda)\n TP, FP, TN, FN = confusion(out, label_cuda)\n recal = recal+TP\n precision = precision+FP\n F_one = F_one + TN\n IOU = IOU+ FN \n _,predict = torch.max(out.data,1)\n predict = predict.to(torch.device(\"cpu\"))\n predict = predict.detach()\n img = predict[0]\n img = img.numpy()*255\n #img_grid = torchvision.utils.make_grid(out) \n cv2.imwrite(\"/content/drive/My Drive/Train_images/predict_label%d.png\" % batch_idx, img)\n label = label_cuda.to(torch.device(\"cpu\"))\n label = label.detach()\n label = label[0].numpy()*255\n cv2.imwrite(\"/content/drive/My Drive/Train_images/original_label%d.png\" % batch_idx, label)\n\n \n \n\n accuracy_final = accuracy(out, label_cuda)\n accuracy_new = accuracy_new + accuracy_final\n\n \n print(\"\\t\")\n print(recal/total_batch, precision/ total_batch, F_one/ total_batch, IOU/ total_batch)\n print(accuracy_new/total_batch)",
"def train_start(self):\n self.module.img_enc.train()\n self.module.txt_enc.train()",
"def ae_train(self, net, ae_optimizer, train_loader, val_loader, name='Net'):\n print(f'Start {name} Auto Encoder Training')\n ae_iter = 0\n for epoch in range(self.ae_epoch):\n running_loss = 0.0\n for i, data in enumerate(train_loader, 0):\n inputs, _ = data\n inputs = inputs.to(self.device)\n decoded = net(inputs)\n ae_loss = self.ae_criterion(decoded, inputs)\n ae_optimizer.zero_grad()\n ae_loss.backward()\n ae_optimizer.step()\n running_loss += ae_loss.item()\n\n ae_iter += 1\n\n if i % 100 == 99:\n print('[%d, %5d] loss: %.3f' % (epoch + 1, i + 1, running_loss / 100))\n if self.writer is not None:\n self.writer.add_scalar(f'Loss/AutoEncoder-{name}', running_loss, ae_iter)\n running_loss = 0.0\n # Reconstruct Image\n dataiter = iter(val_loader)\n images, _ = dataiter.next()\n self.reconstruct_image(net, images, name)",
"def train(self):\n self.ae_train(self.net0, self.ae0_optimizer, self.train0_loader, self.val_loader, name='Net0')\n self.ae_train(self.net1, self.ae1_optimizer, self.train1_loader, self.val_loader, name='Net1')\n self.ae_train(self.net2, self.ae2_optimizer, self.train2_loader, self.val_loader, name='Net2')\n\n self.classifier_train(self.net0, self.optimizer0, self.train0_loader, self.val_loader, name='Net0')\n self.classifier_train(self.net1, self.optimizer1, self.train1_loader, self.val_loader, name='Net1')\n self.classifier_train(self.net2, self.optimizer2, self.train2_loader, self.val_loader, name='Net2')",
"def get_cluster_centers(args=None, autoencoder=None, cluster_number=2, dataloader_list=None,\n file_path=None, save_name=None, device='cpu'):\n\n if file_path: # Load centers from file and return them on device\n print(\"Loading pretrained KMeans centroids\")\n centers = np.loadtxt(file_path)\n cluster_centers = torch.tensor(\n centers, dtype=torch.float, requires_grad=True).to(device)\n else: # Train Kmeans and generate centers\n # https://github.com/vlukiyanov/pt-dec/blob/11b30553858c1c146a5ee0b696c768ab5244f0ff/ptdec/model.py#L74-L92\n print(\"Training KMeans for centroids\")\n kmeans = KMeans(n_clusters=cluster_number,\n n_init=args.cluster_n_init, random_state=args.seed, max_iter=args.cluster_max_step)\n autoencoder.eval()\n features = []\n actual = []\n\n # merge dataloaders\n concat_dataset = torch.utils.data.ConcatDataset([x.dataset for x in dataloader_list])\n\n dataloader = torch.utils.data.DataLoader(\n dataset=concat_dataset,\n batch_size=args.encoder_bs\n )\n\n # form initial cluster centres\n data_iterator = tqdm(dataloader,\n leave=True,\n unit=\"batch\",\n disable=False,\n )\n print(\"Generating features for kmeans\")\n\n with torch.no_grad():\n # Loop through data and generate features from the encoder. \n for index, batch in enumerate(data_iterator):\n if (isinstance(batch, tuple) or isinstance(batch, list)) and len(batch) == 2:\n # if we have a prediction label, separate it to actual\n batch, value = batch\n actual.append(value)\n # Assuming we use the encoder from module.py\n if args.encoder_type == 'vae':\n feature = autoencoder(batch.to(device))\n elif args.encoder_type == 'resnet50':\n feature = list()\n z = autoencoder(batch.to(device)) # [:,:args.dfc_hidden_dim]\n\n feature.append(z)\n\n features.append(feature[0].detach().cpu())\n print(\"Training samples:\", len(features))\n\n actual = torch.cat(actual).long() # Save labels as long in torch tensor.\n samples = torch.cat(features)\n print(f\"Data shape {samples.shape}\")\n print(f\"Labels shape {actual.shape}\")\n print(\"Training...\")\n predicted = kmeans.fit_predict(samples.numpy(), actual) # predict centers from features.\n _, accuracy = cluster_accuracy(predicted, actual.cpu().numpy()) # Compute accuracy of predictions\n cluster_centers = kmeans.cluster_centers_ # define centers\n\n if save_name: # If param. save_name then save the centers.\n filepath = args.log_dir + save_name + \".txt\"\n if not os.path.exists(args.log_dir):\n os.mkdir(args.log_dir)\n print(\"Saving clusters to:\", filepath)\n np.savetxt(filepath, cluster_centers)\n if not (wandb.run is None): # check if wandb is running\n wandb.run.summary[f\"{save_name}_accuracy\"] = accuracy\n\n cluster_centers = torch.tensor( # Convert centers to tensor and send to device.\n cluster_centers, dtype=torch.float, requires_grad=True\n ).to(device)\n print(f\"Training KMeans completed, accuracy: {accuracy:.2f}\")\n return cluster_centers",
"def pretrain(args, encoder, classifier, data_loader, optimizer, scheduler):\n\n # setup criterion and optimizer\n # optimizer = optim.Adam(list(encoder.parameters()) + list(classifier.parameters()),\n # lr=param.c_learning_rate)\n\n # set train state for Dropout and BN layers\n encoder.to(DEVICE)\n classifier.to(DEVICE)\n encoder.train()\n classifier.train()\n for epoch in range(args.pre_epochs):\n tr_loss = 0\n nb_tr_examples, nb_tr_steps = 0, 0\n for step, batch in enumerate(tqdm(data_loader, desc=\"Iteration\")):\n batch = tuple(t.to(DEVICE) for t in batch)\n input_ids, visual, acoustic, input_mask, segment_ids, label_ids = batch\n visual = torch.squeeze(visual, 1)\n acoustic = torch.squeeze(acoustic, 1)\n feat = encoder(\n input_ids,\n visual,\n acoustic,\n token_type_ids=segment_ids,\n attention_mask=input_mask,\n labels=None,\n )\n feat.to(DEVICE)\n outputs = classifier(feat)\n outputs.to(DEVICE)\n # logits = outputs[0]\n logits = outputs\n # print(\"mine\", logits.shape, label_ids.shape)\n #print(logits, label_ids)\n\n #loss_fct = MSELoss()\n #loss = loss_fct(logits.view(-1), label_ids.view(-1))\n #print('logits.view(-1) : ' + str(logits.view(-1)) + \"label_ids.view(-1) : \" + str(label_ids.view(-1)) + \"\\n\")\n\n CELoss = nn.CrossEntropyLoss()\n loss = CELoss(logits, label_ids.view(-1).long())\n\n if args.gradient_accumulation_step > 1:\n loss = loss / args.gradient_accumulation_step\n\n loss.backward()\n\n tr_loss += loss.item()\n nb_tr_steps += 1\n if (step + 1) % args.gradient_accumulation_step == 0:\n optimizer.step()\n scheduler.step()\n optimizer.zero_grad()\n print(\"Loss so far: \" +str(tr_loss/nb_tr_steps)+\"\\n\")\n\n # save final model\n # save_model(args, encoder, param.src_encoder_path)\n # save_model(args, classifier, param.src_classifier_path)\n\n return encoder, classifier",
"def train_miniautoencoder(self, train_X, depth):\n # Initialize mininet\n network = ann.ANN(train_X,\n hidden_depths=[depth],\n eta=self.eta,\n lamb=self.lamb,\n batch_size=self.batch_size,\n activation_type=self.activation_type)\n # Fit net\n network.fit(self.epochs)\n # Feedforward data array and obtain encoded data array\n data_encoded = encode(train_X, network)\n # Store in class container\n self.data_container.append(data_encoded)\n self.weights_container.append(network.weights)\n self.bias_container.append(network.biases)",
"def run_net(self,\n pre_trained_chckpnt_dir ='' #for resuming training, load the model from this directory\n ):\n\n _rd = _read_data(data=self.data)\n\n self.alpha_coeff=1\n\n #read path of the images for train, test, and validation\n train_CTs, train_GTVs, train_Torso, train_penalize, train_surface,\\\n validation_CTs, validation_GTVs, validation_Torso, validation_penalize, validation_surface,\\\n test_CTs, test_GTVs, test_Torso, test_penalize,test_surface=_rd.read_data_path(fold=self.fold)\n self.img_width = self.img_width\n self.img_height = self.img_height\n # ======================================\n #validation instances\n bunch_of_images_no=20\n _image_class_vl = image_class(validation_CTs, validation_GTVs, validation_Torso,validation_penalize,validation_surface\n , bunch_of_images_no=bunch_of_images_no, is_training=0,\n patch_window=self.patch_window)\n _patch_extractor_thread_vl = _patch_extractor_thread(_image_class=_image_class_vl,\n sample_no=self.sample_no, patch_window=self.patch_window,\n GTV_patchs_size=self.GTV_patchs_size,\n tumor_percent=self.tumor_percent,\n img_no=bunch_of_images_no,\n mutex=settings.mutex,is_training=0,vl_sample_no=self.validation_samples\n )\n _fill_thread_vl = fill_thread(validation_CTs,\n validation_GTVs,\n validation_Torso,\n validation_penalize,\n validation_surface,\n _image_class_vl,\n sample_no=self.sample_no,\n total_sample_no=self.validation_samples,\n patch_window=self.patch_window,\n GTV_patchs_size=self.GTV_patchs_size,\n img_width=self.img_width, img_height=self.img_height,\n mutex=settings.mutex,\n tumor_percent=self.tumor_percent,\n is_training=0,\n patch_extractor=_patch_extractor_thread_vl,\n fold=self.fold)\n\n\n _fill_thread_vl.start()\n _patch_extractor_thread_vl.start()\n _read_thread_vl = read_thread(_fill_thread_vl, mutex=settings.mutex,\n validation_sample_no=self.validation_samples, is_training=0)\n _read_thread_vl.start()\n # ======================================\n #training instances\n bunch_of_images_no = 24\n _image_class = image_class(train_CTs, train_GTVs, train_Torso,train_penalize,train_surface\n , bunch_of_images_no=bunch_of_images_no,is_training=1,patch_window=self.patch_window\n )\n patch_extractor_thread = _patch_extractor_thread(_image_class=_image_class,\n sample_no=240, patch_window=self.patch_window,\n GTV_patchs_size=self.GTV_patchs_size,\n tumor_percent=self.tumor_percent,\n img_no=bunch_of_images_no,\n mutex=settings.mutex,is_training=1)\n _fill_thread = fill_thread(train_CTs, train_GTVs, train_Torso,train_penalize,train_surface,\n _image_class,\n sample_no=self.sample_no,total_sample_no=self.sample_no,\n patch_window=self.patch_window,\n GTV_patchs_size=self.GTV_patchs_size,\n img_width=self.img_width,\n img_height=self.img_height,mutex=settings.mutex,\n tumor_percent=self.tumor_percent,\n is_training=1,\n patch_extractor=patch_extractor_thread,\n fold=self.fold)\n\n _fill_thread.start()\n patch_extractor_thread.start()\n\n _read_thread = read_thread(_fill_thread,mutex=settings.mutex,is_training=1)\n _read_thread.start()\n # ======================================\n\n image = tf.placeholder(tf.float32, shape=[None, None, None, None, 1])\n label = tf.placeholder(tf.float32, shape=[None, None, None, None, 2])\n penalize = tf.placeholder(tf.float32, shape=[None, None, None, None,1])\n surf_map = tf.placeholder(tf.float32, shape=[None, None, None, None,1])\n loss_coef = tf.placeholder(tf.float32, shape=[None, 2]) # shape: batchno * 2 values for each class\n alpha = tf.placeholder(tf.float32, name='alpha') # background coeff\n beta = tf.placeholder(tf.float32, name='beta') # tumor coeff\n\n ave_vali_acc=tf.placeholder(tf.float32)\n ave_loss_vali=tf.placeholder(tf.float32)\n ave_dsc_vali=tf.placeholder(tf.float32)\n\n dropout=tf.placeholder(tf.float32,name='dropout')\n is_training = tf.placeholder(tf.bool, name='is_training')\n is_training_bn = tf.placeholder(tf.bool, name='is_training_bn')\n dense_net_dim = tf.placeholder(tf.int32, name='dense_net_dim')\n\n _dn = _densenet_unet(self.densnet_unet_config,self.compression_coefficient,self.growth_rate) #create object\n y=_dn.dens_net(image=image,is_training=is_training,dropout_rate1=0,dropout_rate2=0,dim=dense_net_dim,is_training_bn=is_training_bn)\n # y = _dn.vgg(image)\n\n y_dirX = ((y[:, int(self.GTV_patchs_size / 2), :, :, 0, np.newaxis]))\n label_dirX = (label[:, int(self.GTV_patchs_size / 2), :, :, 0, np.newaxis])\n penalize_dirX = (penalize[:,16,:,:,0,np.newaxis])\n surf_map_dirX = (surf_map[:,16,:,:,0,np.newaxis])\n image_dirX = ((image[:, int(self.patch_window / 2), :, :, 0, np.newaxis]))\n\n show_img=tf.nn.softmax(y)[:, int(self.GTV_patchs_size / 2) , :, :, 0, np.newaxis]\n tf.summary.image('outprunut',show_img , 3)\n tf.summary.image('output without softmax',y_dirX ,3)\n tf.summary.image('groundtruth', label_dirX,3)\n tf.summary.image('penalize', penalize_dirX,3)\n tf.summary.image('surf_map', surf_map_dirX,3)\n tf.summary.image('image',image_dirX ,3)\n\n print('*****************************************')\n print('*****************************************')\n print('*****************************************')\n sess = tf.Session(config=tf.ConfigProto(log_device_placement=True))\n devices = sess.list_devices()\n print(devices)\n\n print(device_lib.list_local_devices())\n print('*****************************************')\n print('*****************************************')\n print('*****************************************')\n\n train_writer = tf.summary.FileWriter(self.LOGDIR + '/train' ,graph=tf.get_default_graph())\n validation_writer = tf.summary.FileWriter(self.LOGDIR + '/validation' , graph=sess.graph)\n\n extra_update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)\n saver=tf.train.Saver(tf.global_variables(), max_to_keep=1000)\n\n\n\n #define the loss function\n with tf.name_scope('cost'):\n penalize_weight=0\n [ penalized_loss,\n soft_dice_coef,logt,lbl]=self.loss_instance.dice_plus_distance_penalize(logits=y, labels=label,penalize=penalize)\n surface_loss= self.loss_instance.surface_loss(logits=y, labels=label, surf_map=surf_map)\n cost = tf.reduce_mean((1.0 - soft_dice_coef[1])+penalize_weight*penalized_loss+surface_loss, name=\"cost\")\n\n #Setup the Tensorboard plots\n tf.summary.scalar(\"cost\", cost)\n f1_measure = self.loss_instance.f1_measure(logits=y, labels=label)\n tf.summary.scalar(\"dice_bakground\", f1_measure[0])\n tf.summary.scalar(\"dice_tumor\", f1_measure[1])\n\n pwc = self.loss_instance.PWC(y, label)\n tf.summary.scalar(\"pwc_bakground\", pwc[0])\n tf.summary.scalar(\"pwc_tumor\", pwc[1])\n\n recall = self.loss_instance.Recall(y, label)\n tf.summary.scalar(\"recall_bakground\", recall[0])\n tf.summary.scalar(\"recall_tumor\", recall[1])\n\n precision = self.loss_instance.Precision(y, label)\n tf.summary.scalar(\"precision_bakground\", precision[0])\n tf.summary.scalar(\"precision_tumor\", precision[1])\n\n fpr = self.loss_instance.FPR(y, label)\n tf.summary.scalar(\"FPR_bakground\", fpr[0])\n tf.summary.scalar(\"FPR_tumor\", fpr[1])\n\n fnr = self.loss_instance.FNR(y, label)\n tf.summary.scalar(\"FNR_bakground\", fnr[0])\n tf.summary.scalar(\"FNR_tumor\", fnr[1])\n\n extra_update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)\n with tf.control_dependencies(extra_update_ops):\n optimizer_tmp = tf.train.AdamOptimizer(self.learning_rate,epsilon=0.001)\n optimizer = optimizer_tmp.minimize(cost)\n\n with tf.name_scope('validation'):\n average_validation_accuracy=ave_vali_acc\n average_validation_loss=ave_loss_vali\n average_dsc_loss=ave_dsc_vali\n tf.summary.scalar(\"average_validation_accuracy\",average_validation_accuracy)\n tf.summary.scalar(\"average_validation_loss\",average_validation_loss)\n tf.summary.scalar(\"average_dsc_loss\",average_dsc_loss)\n\n with tf.name_scope('accuracy'):\n accuracy=self.loss_instance.accuracy_fn(y, label)\n\n tf.summary.scalar(\"accuracy\", accuracy)\n\n sess.run(tf.global_variables_initializer())\n logging.debug('total number of variables %s' % (\n np.sum([np.prod(v.get_shape().as_list()) for v in tf.trainable_variables()])))\n summ=tf.summary.merge_all()\n\n point = 0 # starting point, starts from a value > 0 if training is resumed\n itr1 = 0 # number of iterations\n if len(pre_trained_chckpnt_dir):\n ckpt = tf.train.get_checkpoint_state(pre_trained_chckpnt_dir)\n saver.restore(sess, ckpt.model_checkpoint_path)\n point=int(ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1])\n itr1=point\n\n\n # patch_radius = 49\n '''loop for epochs'''\n\n for epoch in range(self.total_epochs):\n while self.no_sample_per_each_itr*int(point/self.no_sample_per_each_itr)<self.sample_no:\n print('0')\n print(\"epoch #: %d\" %(epoch))\n startTime = time.time()\n step = 0\n self.beta_coeff=1+1 * np.exp(-point/2000)\n # =============start validation================\n if itr1 % self.display_validation_step ==0:\n '''Validation: '''\n loss_validation = 0\n acc_validation = 0\n validation_step = 0\n dsc_validation=0\n while (validation_step * self.batch_no_validation <settings.validation_totalimg_patch):\n [validation_CT_image, validation_GTV_image,validation_Penalize_patch,validation_Surface_patch] = _image_class_vl.return_patches_validation( validation_step * self.batch_no_validation, (validation_step + 1) *self.batch_no_validation)\n if (len(validation_CT_image)<self.batch_no_validation) | (len(validation_GTV_image)<self.batch_no_validation) | (len(validation_Penalize_patch)<self.batch_no_validation) | (len(validation_Surface_patch)<self.batch_no_validation) :\n _read_thread_vl.resume()\n time.sleep(0.5)\n continue\n\n validation_CT_image_patchs = validation_CT_image\n validation_GTV_label = validation_GTV_image\n tic=time.time()\n\n [acc_vali, loss_vali,dsc_vali,surface_loss1] = sess.run([accuracy, cost,f1_measure,surface_loss],\n feed_dict={image: validation_CT_image_patchs,\n label: validation_GTV_label,\n penalize: validation_Penalize_patch,\n dropout: 1,\n is_training: False,\n ave_vali_acc: -1,\n ave_loss_vali: -1,\n ave_dsc_vali:-1,\n dense_net_dim: self.patch_window,\n is_training_bn:False,\n alpha:1,\n beta:1,\n surf_map:validation_Surface_patch,\n })\n elapsed=time.time()-tic\n\n acc_validation += acc_vali\n loss_validation += loss_vali\n dsc_validation+=dsc_vali[1]\n validation_step += 1\n if np.isnan(dsc_validation) or np.isnan(loss_validation) or np.isnan(acc_validation):\n print('nan problem')\n process = psutil.Process(os.getpid())\n\n print(\n '%d - > %d: elapsed_time:%d acc_validation: %f, loss_validation: %f, memory_percent: %4s' % (\n validation_step,validation_step * self.batch_no_validation\n , elapsed, acc_vali, loss_vali, str(process.memory_percent()),\n ))\n\n settings.queue_isready_vl = False\n acc_validation = acc_validation / (validation_step)\n loss_validation = loss_validation / (validation_step)\n dsc_validation = dsc_validation / (validation_step)\n if np.isnan(dsc_validation) or np.isnan(loss_validation) or np.isnan(acc_validation):\n print('nan problem')\n _fill_thread_vl.kill_thread()\n print('******Validation, step: %d , accuracy: %.4f, loss: %f*******' % (\n itr1, acc_validation, loss_validation))\n\n [sum_validation] = sess.run([summ],\n feed_dict={image: validation_CT_image_patchs,\n label: validation_GTV_label,\n penalize: validation_Penalize_patch,\n dropout: 1,\n is_training: False,\n ave_vali_acc: acc_validation,\n ave_loss_vali: loss_validation,\n ave_dsc_vali:dsc_validation,\n dense_net_dim: self.patch_window,\n is_training_bn: False,\n alpha: 1,\n beta: 1,\n surf_map: validation_Surface_patch,\n\n })\n validation_writer.add_summary(sum_validation, point)\n print('end of validation---------%d' % (point))\n\n #loop for training batches\n while(step*self.batch_no<self.no_sample_per_each_itr):\n [train_CT_image_patchs, train_GTV_label, train_Penalize_patch,loss_coef_weights,train_Surface_patch] = _image_class.return_patches( self.batch_no)\n\n if (len(train_CT_image_patchs)<self.batch_no)|(len(train_GTV_label)<self.batch_no)\\\n |(len(train_Penalize_patch)<self.batch_no)|(len(train_Surface_patch)<self.batch_no):\n time.sleep(0.5)\n _read_thread.resume()\n continue\n\n tic=time.time()\n [acc_train1, loss_train1, optimizing,out,dsc_train11] = sess.run([accuracy, cost, optimizer,y,f1_measure],\n feed_dict={image: train_CT_image_patchs,\n label: train_GTV_label,\n penalize: train_Penalize_patch,\n # loss_coef: loss_coef_weights,\n dropout: self.dropout_keep,\n is_training: True,\n ave_vali_acc: -1,\n ave_loss_vali: -1,\n ave_dsc_vali: -1,\n dense_net_dim: self.patch_window,\n is_training_bn: True,\n alpha: self.alpha_coeff,\n beta: self.beta_coeff,\n surf_map: train_Surface_patch,\n\n })\n elapsed=time.time()-tic\n dsc_train1=dsc_train11[1]\n\n self.x_hist=self.x_hist+1\n # np.hstack((self.x_hist, [np.ceil(\n\n [sum_train] = sess.run([summ],\n feed_dict={image: train_CT_image_patchs,\n label: train_GTV_label,\n penalize: train_Penalize_patch,\n dropout: self.dropout_keep, is_training: True,\n ave_vali_acc: acc_train1,\n ave_loss_vali: loss_train1,\n ave_dsc_vali: dsc_train1,\n dense_net_dim: self.patch_window,\n is_training_bn: True,\n alpha: self.alpha_coeff,\n beta: self.beta_coeff,\n surf_map: train_Surface_patch,\n\n })\n train_writer.add_summary(sum_train,point)\n step = step + 1\n\n process = psutil.Process(os.getpid())\n\n print(\n 'point: %d, elapsed_time:%d step*self.batch_no:%f , LR: %.15f, acc_train1:%f, loss_train1:%f,memory_percent: %4s' % (\n int((point)),elapsed,\n step * self.batch_no, self.learning_rate, acc_train1, loss_train1,\n str(process.memory_percent())))\n\n\n point=int((point))\n if point%100==0:\n '''saveing model inter epoch'''\n chckpnt_path = os.path.join(self.chckpnt_dir,\n ('densenet_unet_inter_epoch%d_point%d.ckpt' % (epoch, point)))\n saver.save(sess, chckpnt_path, global_step=point)\n itr1 = itr1 + 1\n point=point+1\n endTime = time.time()\n\n #==============\n '''saveing model after each epoch'''\n chckpnt_path = os.path.join(self.chckpnt_dir, 'densenet_unet.ckpt')\n saver.save(sess, chckpnt_path, global_step=epoch)\n print(\"End of epoch----> %d, elapsed time: %d\" % (epoch, endTime - startTime))",
"def train():\n if os.path.isfile(load_model):\n all_weights = np.load(load_model) \n else:\n print(\"Model file does not exist. Exiting....\")\n return\n\n print(\"Build up the network\")\n\n\n # Two different types of input\n image_input_var = T.tensor4('original_inputs')\n rotated_image_input_var = T.tensor4('rotated_image_input')\n target_var = T.ivector('targets')\n\n # Build teacher network\n cnn_model, cnn_mid_output, weight_decay_penalty = cifar10_merge.build_cnn(image_input_var)\n\n # Get the intermediate layer of the teacher network\n original_model_mid_output = lasagne.layers.get_output(cnn_mid_output, image_input_var, deterministic = True)\n\n # Get the softmax output of the teacher network.\n\n original_model_output_val = lasagne.layers.get_output(cnn_model, image_input_var, deterministic = True)\n \n # Build the student network\n \n rotated_cnn_model, rotated_model_mid, rotated_weight_penalty = \\\n cifar10_merge.build_cnn(rotated_image_input_var)\n \n # Get the softmax output of the student network. Since it need to be trained on, deterministic = False\n rotated_model_mid_output = lasagne.layers.get_output(rotated_model_mid, rotated_image_input_var, deterministic = False)\n\n # Get the model output of the studenet network.\n rotated_model_output = lasagne.layers.get_output(rotated_cnn_model, rotated_image_input_var, deterministic = True)\n\n # Set the weights for the teacher network\n lasagne.layers.set_all_param_values(cnn_model, all_weights)\n\n # Get the initialized weights below the intermediate layer\n rotated_net_weights_below_mid = lasagne.layers.get_all_param_values(rotated_model_mid)\n\n # Get the parameter of the student network that needs to be trained.\n rotated_net_training_param = lasagne.layers.get_all_params(rotated_model_mid, trainable=True)\n\n # Set the weights for the student network\n lasagne.layers.set_all_param_values(rotated_cnn_model, all_weights)\n\n lasagne.layers.set_all_param_values(rotated_model_mid,\n rotated_net_weights_below_mid)\n \n # cross_entropy_loss = lasagne.objectives.categorical_crossentropy(rotated_model_mid_output, target_var)\n\n # cross_entropy_loss_mean = cross_entropy_loss.mean()\n\n # L = T.mean(lasagne.objectives.squared_error(original_model_mid_output, rotated_model_mid_output), axis = 1)\n L = lasagne.objectives.squared_error(original_model_mid_output, rotated_model_mid_output).mean()\n # cost = T.mean(L)\n\n # cost = cross_entropy_loss_mean\n cost = L\n\n # updates = lasagne.updates.adagrad(cost, rotated_net_training_param, learning_rate=0.1)\n updates = lasagne.updates.adam(cost, rotated_net_training_param, learning_rate=0.001)\n\n # cross_entropy_loss = lasagne.objectives.categorical_crossentropy(model_output, target_var)\n\n # cross_entropy_loss_mean = cross_entropy_loss.mean()\n\n # loss = cross_entropy_loss_mean + weight_decay_penalty\n\n\n train_acc = T.mean(T.eq(T.argmax(rotated_model_output, axis = 1), target_var),\n dtype=theano.config.floatX)\n\n original_model_acc = T.mean(T.eq(T.argmax(original_model_output_val, axis = 1), target_var),\n dtype=theano.config.floatX)\n\n train_fn = theano.function(inputs = [image_input_var, rotated_image_input_var, target_var],\n outputs = [original_model_mid_output, rotated_model_mid_output, train_acc], updates = updates)\n\n # Return the accuracy for teacher network and student network, respectively\n val_fn = theano.function(inputs = [image_input_var, rotated_image_input_var, target_var],\n outputs = [original_model_acc, train_acc])\n\n if os.path.isfile(os.path.join(train_dir, 'latest_model.txt')):\n weight_file = \"\"\n with open(os.path.join(train_dir, 'latest_model.txt'), 'r') as checkpoint_file:\n weight_file = checkpoint_file.read().replace('\\n', '')\n print(\"Loading from: \", weight_file)\n model_weights = np.load(weight_file)\n lasagne.layers.set_all_param_values(rotated_cnn_model, model_weights)\n\n # Get images and labels for CIFAR-10.\n\n cifar10_data = cifar10_merge_input.load_cifar10()\n\n bkgimg = np.array([np.mean(cifar10_data.train.images[cifar10_data.train.labels==i], axis = 0) for i in range(10)])\n for epoch in xrange(max_steps):\n start_time = time.time()\n\n original_test_image, rotated_test_image, test_label = cifar10_data.test.next_eval_batch(batch_size)\n total_t_net_for_original = 0\n total_s_net_for_original = 0\n total_t_net_for_rotation = 0\n total_s_net_for_rotation = 0\n total_count = 0\n\n print(\"Start Evaluating\")\n\n while(rotated_test_image is not None):\n t_net_for_original, s_net_for_original = val_fn(original_test_image, original_test_image, test_label)\n total_t_net_for_original += t_net_for_original * original_test_image.shape[0]\n total_s_net_for_original += s_net_for_original * original_test_image.shape[0]\n\n t_net_for_rotated, s_net_for_rotated = val_fn(rotated_test_image, rotated_test_image, test_label)\n total_t_net_for_rotation += t_net_for_rotated * rotated_test_image.shape[0]\n total_s_net_for_rotation += s_net_for_rotated * rotated_test_image.shape[0]\n\n total_count += rotated_test_image.shape[0]\n original_test_image, rotated_test_image, test_label = cifar10_data.test.next_eval_batch(batch_size)\n \n print(\"Student Network Accuracy on Original Image: %.4f\" % (float(total_s_net_for_original / total_count)))\n print(\"Teacher Network Accuracy on Original Image: %.4f\" % (float(total_t_net_for_original / total_count)))\n\n print(\"Student Network Accuracy on Rotated Image: %.4f\" % (float(total_s_net_for_rotation / total_count)))\n print(\"Teacher Network Accuracy on Rotated Image: %.4f\" % (float(total_t_net_for_rotation / total_count)))\n\n\n print(\"Start Training...\")\n original_train_image, rotated_train_image, train_label, start = cifar10_data.train.next_batch(batch_size)\n original_train_image = generate(original_train_image, train_label, bkgimg, 16, 32, None, batch_size)\n # rotated_train_image = random_rotated_image(original_train_image[::-1])\n rotated_train_image = random_rotated_image(original_train_image)\n\n end_time_1 = time.time() - start_time\n step = 1\n loss_total = 0\n original_start = start\n\n while(start != 0):\n #loss_value, train_acc = train_fn(original_train_image, rotated_train_image, train_label)\n \n ori_mid, rot_mid, train_acc = train_fn(original_train_image, rotated_train_image, train_label)\n # ori_mid, rot_mid, train_acc = train_fn(original_train_image, np.array(np.random.rand(batch_size, 3, 32, 32), dtype = np.float32), train_label)\n step += 1\n if start == original_start:\n print(ori_mid[0])\n print(rot_mid[0])\n print(train_label)\n \n original_train_image, rotated_train_image, train_label, start = cifar10_data.train.next_batch(batch_size)\n original_train_image = generate(original_train_image, train_label, bkgimg, 16, 32, None, batch_size)\n rotated_train_image = random_rotated_image(original_train_image)\n # assert not np.isnan(loss_value), 'Model diverged with loss = NaN'\n # loss_total += loss_value\n if 1:\n if epoch % 100 == 0 or (step + 1) == max_steps:\n checkpoint_path = os.path.join(train_dir, 'model_step%d.npy' % epoch)\n weightsOfParams = lasagne.layers.get_all_param_values(rotated_cnn_model)\n np.save(checkpoint_path, weightsOfParams)\n latest_model_path = os.path.join(train_dir, 'latest_model.txt')\n try:\n os.remove(latest_model_path)\n except OSError:\n pass\n latest_model_file = open(latest_model_path, \"w\")\n latest_model_file.write(checkpoint_path)\n latest_model_file.close()\n\n # print(\"Epoch Stop, loss_averge\", float(loss_total) / float(step))\n duration = time.time() - start_time\n print(\"Duration is\", duration)",
"def _train(args): \n\n #device = 'cuda' if torch.cuda.is_available() else 'cpu'\n device = 'cpu'\n logger.info(\"Device Type: {}\".format(device))\n\n logger.info(\"Loading SUN360 dataset\")\n transform = transforms.Compose(\n [transforms.Resize((224,224)),\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])\n target_transform = transforms.Compose([transforms.Resize((224,224)),\n transforms.ToTensor()]) \n\n trainset = SUN360Dataset(\"imagedata.json\",transform = transform, target_transform = target_transform)\n train_loader = DataLoader(trainset, batch_size=args.batch_size,\n shuffle=True, num_workers=args.workers)\n \"\"\"\n testset = torchvision.datasets.CIFAR10(root=args.data_dir, train=False,\n download=False, transform=transform)\n test_loader = DataLoader(testset, batch_size=args.batch_size,\n shuffle=False, num_workers=args.workers)\n \"\"\" \n\n logger.info(\"Model loaded\")\n model = EfficientNet.from_name('efficientnet-b0',conv_type='Equi')\n\n if torch.cuda.device_count() > 1:\n logger.info(\"Gpu count: {}\".format(torch.cuda.device_count()))\n model = nn.DataParallel(model)\n\n model = model.to(device)\n\n criterion = CELoss().to(device)\n optimizer = torch.optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum)\n\n for epoch in range(0, args.epochs):\n running_loss = 0.0\n for i, data in enumerate(train_loader):\n # get the inputs\n inputs, EM , CM = data\n inputs, EM, CM = inputs.to(device), EM.to(device), CM.to(device)\n\n # zero the parameter gradients\n optimizer.zero_grad()\n\n # forward + backward + optimize\n outputs = model(inputs)\n EMLoss, CMLoss = map_loss(outputs,EM,CM,criterion)\n loss = EMLoss + CMLoss\n loss.backward()\n optimizer.step()\n\n # print statistics\n running_loss += loss.item()\n if i % 2000 == 1999: # print every 2000 mini-batches\n print('[%d, %5d] loss: %.3f' %\n (epoch + 1, i + 1, running_loss / 2000))\n running_loss = 0.0\n print('Finished Training')\n return _save_model(model, args.model_dir)",
"def image_network_train(learn_data_path):\n\n # data selector ----------\n use_da_data = False\n increase_val = False\n print( \"\\nmode: Use Augmented data: {} | increase validation data: {}\".format(use_da_data, increase_val) )\n\n # First define original train_data only as train_dir\n train_dir = os.path.join(data_dir, \"train\")\n if (use_da_data == True) and (increase_val == False):\n # with_augmented data (no validation increase)\n train_dir = os.path.join(data_dir, \"train_with_aug\")\n validation_dir = os.path.join(data_dir, \"val\") # original validation data\n\n # pair of decreaced train_data and increased validation data\n if (increase_val == True):\n train_dir = os.path.join(data_dir, \"red_train\")\n if (use_da_data == True):\n train_dir = os.path.join(data_dir, \"red_train_with_aug\")\n validation_dir = os.path.join(data_dir, \"validation\")\n\n test_dir = os.path.join(data_dir, \"test\")\n\n print(\"\\ntrain_dir: \", train_dir)\n print(\"validation_dir: \", validation_dir)\n\n\n # calcucate the num of category\n num_category = 0\n for dirpath, dirnames, filenames in os.walk(train_dir):\n for dirname in dirnames:\n num_category += 1\n\n # All images will be resized to 299x299\n image_size = 299\n batch_size = 16\n\n # Rescale all images by 1./255 and apply image augmentation\n train_datagen = keras.preprocessing.image.ImageDataGenerator(rescale=1./255)\n validation_datagen = keras.preprocessing.image.ImageDataGenerator(rescale=1./255)\n test_datagen = keras.preprocessing.image.ImageDataGenerator(rescale=1./255)\n\n # Flow training images in batches of using train_datagen generator\n train_generator = train_datagen.flow_from_directory(\n train_dir, # Source directory for the training images\n target_size=(image_size, image_size),\n batch_size=batch_size,\n class_mode='categorical')\n\n # Flow validation images in batches of 20 using validation_datagen generator\n validation_generator = validation_datagen.flow_from_directory(\n validation_dir, # Source directory for the validation images\n target_size=(image_size, image_size),\n batch_size=batch_size,\n class_mode='categorical')\n\n # Flow validation images in batches of 20 using test_datagen generator\n test_generator = test_datagen.flow_from_directory(\n test_dir, # Source directory for the test images\n target_size=(image_size, image_size),\n batch_size=batch_size,\n class_mode='categorical')\n\n # Create the base model from the pre-trained convnets\n IMG_SHAPE = (image_size, image_size, 3)\n\n # Create the base model from the pre-trained model MobileNet V2\n base_model = keras.applications.xception.Xception(input_shape=IMG_SHAPE, include_top=False, weights='imagenet')\n\n # Freeze the convolutional base\n base_model.trainable = False\n\n # モデル\n model = keras.Sequential([\n base_model,\n keras.layers.GlobalAveragePooling2D(),\n keras.layers.Dense(num_category, activation='softmax')\n ])\n\n # Compile the model\n model.compile(optimizer=keras.optimizers.Adam(lr=0.0001),\n loss='categorical_crossentropy',\n metrics=['accuracy'])\n\n # early stopping\n es = keras.callbacks.EarlyStopping(monitor='val_loss', patience=5, restore_best_weights=True)\n\n model.summary()\n\n # 更新される重みの数\n print('after', len(model.trainable_weights))\n\n # Train the model\n epochs = 30\n steps_per_epoch = train_generator.n // batch_size\n validation_steps = validation_generator.n // batch_size\n test_steps = test_generator.n // batch_size\n\n history = model.fit_generator(train_generator,\n steps_per_epoch = steps_per_epoch,\n epochs=epochs,\n workers=4,\n validation_data=validation_generator,\n validation_steps=validation_steps,\n callbacks=[es],\n class_weight={0:1.0, 1:0.4})\n\n loss, acc = model.evaluate_generator(validation_generator, steps=validation_steps)\n print('val loss: {}, val acc: {}'.format(loss, acc))\n\n # Fine tuning\n # Un-freeze the top layers of the model\n base_model.trainable = True\n\n # The nums of layers are in the base model\n print(\"Number of layers in the base model: \", len(base_model.layers))\n\n # Fine tune from this layer onwards\n fine_tune_at = 108\n\n # Freeze all the layers before the `fine_tune_at` layer\n for layer in base_model.layers[:fine_tune_at]:\n layer.trainable = False\n\n # Compile the model using a much-lower training rate\n model.compile(optimizer = keras.optimizers.Adam(lr=2e-5),\n loss='categorical_crossentropy',\n metrics=['accuracy'])\n\n model.summary()\n\n # 更新される重みの数\n print('after Fine tune', len(model.trainable_weights))\n\n # Continue Train the model\n history_fine = model.fit_generator(train_generator,\n steps_per_epoch = steps_per_epoch,\n epochs=epochs,\n workers=4,\n validation_data=validation_generator,\n validation_steps=validation_steps,\n callbacks=[es],\n class_weight={0:1.0, 1:0.4})\n\n\n # print(history_fine.history)\n model_val_acc = history_fine.history['val_accuracy'][-1]\n print('val_acc: ', model_val_acc)\n\n # save model into hdf5 file ----------\n model.save(learn_data_path + '/shen_model.h5')\n\n loss, acc = model.evaluate_generator(validation_generator, steps=validation_steps)\n print('val loss: {}, val acc: {}'.format(loss, acc))\n\n loss, acc = model.evaluate_generator(test_generator, steps=test_steps)\n print('Test loss: {}, Test acc: {}'.format(loss, acc))",
"def init_train(self):\n data = self.loader.load_labelled_data(self.conf.split, 'training')\n\n # Initialise unlabelled data iterator\n num_ul = 0\n if self.conf.ul_mix > 0:\n ul_data = self.loader.load_unlabelled_data(self.conf.split, 'all')\n\n # calculate number of unlabelled images as a proportion of the labelled images\n num_ul = int(data.size() * self.conf.ul_mix)\n num_ul = num_ul if num_ul <= ul_data.size() else ul_data.size()\n log.info('Sampling %d unlabelled images out of total %d.' % (num_ul, ul_data.size()))\n ul_data.sample(num_ul)\n self.gen_X_U = data_utils.generator(self.conf.batch_size, 'overflow', ul_data.images)\n\n # Initialise labelled data iterator\n assert self.conf.l_mix >= 0\n\n # calculate number of labelled images\n num_l = int(data.size() * self.conf.l_mix)\n num_l = num_l if num_l <= data.size() else data.size()\n log.info('Using %d labelled images out of total %d.' % (num_l, data.size()))\n train_images = data.images[:num_l]\n train_masks = data.masks[:num_l]\n\n self.conf.unlabelled_image_num = num_ul\n self.conf.labelled_image_num = num_l\n self.conf.data_len = num_ul if num_ul > num_l else num_l\n self.conf.batches = int(np.ceil(self.conf.data_len / self.conf.batch_size))\n self.conf.save()\n\n self.gen_X_L = data_utils.generator(self.conf.batch_size, 'overflow', train_images, train_masks)\n\n # Initialise real masks iterator for discriminator training, using the real masks from the data CV split.\n self.other_masks = data_utils.generator(self.conf.batch_size, 'overflow', data.masks + 0)",
"def train():\n\n # Load camera parameters\n rcams = cameras.load_cameras()\n\n # Load 3d data and 2d projections\n full_train_set_3d, full_test_set_3d, data_mean_3d, data_std_3d, dim_to_ignore_3d, dim_to_use_3d =\\\n data_utils.read_3d_data( FLAGS.camera_frame, rcams, FLAGS.origin_bc, FLAGS.augment_data,\n FLAGS.procrustes, FLAGS.lowpass )\n \n # Read stacked hourglass 2D predictions\n full_train_set_2d, full_test_set_2d, data_mean_2d, data_std_2d, dim_to_ignore_2d, dim_to_use_2d = \\\n data_utils.read_2d_predictions( FLAGS.origin_bc, FLAGS.augment_data )\n \n print(\"\\n[+] done reading and normalizing data\")\n # Getting the number of training and test subjects\n tr_subj = 0\n for v in full_train_set_3d.values():\n tr_subj += v.shape[0]\n te_subj = 0\n for v in full_test_set_3d.values():\n te_subj += v.shape[0]\n print(\"{0} training subjects, {1} test subjects\".format(tr_subj, te_subj))\n print(dim_to_use_2d)\n print(dim_to_use_3d)\n # Un-normalizing data for visualizations\n unNorm_ftrs2d = data_utils.unNormalize_dic(full_train_set_2d, data_mean_2d, data_std_2d, dim_to_use_2d)\n unNorm_ftrs3d = data_utils.unNormalize_dic(full_train_set_3d, data_mean_3d, data_std_3d, dim_to_use_3d)\n unNorm_ftes3d = data_utils.unNormalize_dic(full_test_set_3d, data_mean_3d, data_std_3d, dim_to_use_3d)\n # Visualize the data\n viz.visualize_train_sample(unNorm_ftrs2d, unNorm_ftrs3d, FLAGS.camera_frame)\n viz.visualize_files_oneatatime(unNorm_ftrs3d, unNorm_ftes3d)\n\n # Getting only the dimensions to use (get rid of body coxas, other limb, antennas, abdomen\n train_set_3d, train_set_2d, test_set_3d, test_set_2d = {}, {}, {}, {}\n for k in full_train_set_3d:\n (f, c) = k\n train_set_3d[k] = full_train_set_3d[k][:, dim_to_use_3d]\n train_set_2d[(f, data_utils.CAMERA_TO_USE)] =\\\n full_train_set_2d[(f, data_utils.CAMERA_TO_USE)][:, dim_to_use_2d]\n for k in full_test_set_3d:\n (f, c) = k\n test_set_3d[k] = full_test_set_3d[k][:, dim_to_use_3d]\n test_set_2d[(f, data_utils.CAMERA_TO_USE)] =\\\n full_test_set_2d[(f, data_utils.CAMERA_TO_USE)][:, dim_to_use_2d]\n \n print(\"3D data mean:\")\n print(data_mean_3d)\n print(\"3D data std:\")\n print(data_std_3d)\n\n print(\"2D data mean:\")\n print(data_mean_2d)\n print(\"2D data std:\")\n print(data_std_2d)\n \n input(\"Press Enter to continue...\")\n\n # Avoid using the GPU if requested\n device_count = {\"GPU\": 0} if FLAGS.use_cpu else {\"GPU\": 1}\n with tf.compat.v1.Session(config=tf.compat.v1.ConfigProto(\n device_count=device_count,\n allow_soft_placement=True )) as sess:\n\n # === Create the model ===\n print(\"[*] creating %d bi-layers of %d units.\" % (FLAGS.num_layers, FLAGS.linear_size))\n model = create_model( sess, FLAGS.batch_size )\n model.train_writer.add_graph( sess.graph )\n print(\"[+] model created\")\n \n #=== This is the training loop ===\n step_time, loss, val_loss = 0.0, 0.0, 0.0\n current_step = 0 if FLAGS.load <= 0 else FLAGS.load + 1\n previous_losses = []\n\n step_time, loss = 0, 0\n current_epoch = 0\n log_every_n_batches = 100\n losses, errors, joint_errors = [], [], []\n for _ in range( FLAGS.epochs ):\n current_epoch = current_epoch + 1\n\n # === Load training batches for one epoch ===\n encoder_inputs, decoder_outputs =\\\n model.get_all_batches( train_set_2d, train_set_3d, FLAGS.camera_frame, training=True )\n nbatches = len( encoder_inputs )\n print(\"[*] there are {0} train batches\".format( nbatches ))\n start_time, loss = time.time(), 0.\n # === Loop through all the training batches ===\n for i in range( nbatches ):\n\n if (i+1) % log_every_n_batches == 0:\n # Print progress every log_every_n_batches batches\n print(\"Working on epoch {0}, batch {1} / {2}...\".format( current_epoch, i+1, nbatches),end=\"\" )\n\n enc_in, dec_out = encoder_inputs[i], decoder_outputs[i]\n step_loss, loss_summary, lr_summary, _ =\\\n model.step( sess, enc_in, dec_out, FLAGS.dropout, isTraining=True )\n\n if (i+1) % log_every_n_batches == 0:\n # Log and print progress every log_every_n_batches batchespixels = pixels / pixels[2,:]\n model.train_writer.add_summary( loss_summary, current_step )\n model.train_writer.add_summary( lr_summary, current_step )\n step_time = (time.time() - start_time)\n start_time = time.time()\n print(\"done in {0:.2f} ms\".format( 1000*step_time / log_every_n_batches ) )\n\n loss += step_loss\n current_step += 1\n # === end looping through training batches ===\n\n loss = loss / nbatches\n losses.append(loss)\n print(\"=============================\\n\"\n \"Global step: %d\\n\"\n \"Learning rate: %.2e\\n\"\n \"Train loss avg: %.4f\\n\"\n \"=============================\" % (model.global_step.eval(),\n model.learning_rate.eval(), loss) )\n # === End training for an epoch ===\n\n # === Testing after this epoch ===\n isTraining = False\n \n n_joints = len(data_utils.DIMENSIONS_TO_USE)\n if FLAGS.origin_bc:\n n_joints -= len(data_utils.ROOT_POSITIONS)\n\n encoder_inputs, decoder_outputs =\\\n model.get_all_batches( test_set_2d, test_set_3d, FLAGS.camera_frame, training=False)\n\n total_err, coordwise_err, joint_err, step_time, loss = evaluate_batches( sess, model,\n data_mean_3d, data_std_3d, dim_to_use_3d, dim_to_ignore_3d,\n data_mean_2d, data_std_2d, dim_to_use_2d, dim_to_ignore_2d,\n current_step, encoder_inputs, decoder_outputs, current_epoch )\n\n print(\"=============================\\n\"\n \"Step-time (ms): %.4f\\n\"\n \"Val loss avg: %.4f\\n\"\n \"Val error avg (mm): %.2f (%.2f, %.2f, %.2f)\\n\"\n \"=============================\" % ( 1000*step_time, loss, total_err,\n coordwise_err[0], coordwise_err[1], coordwise_err[2] ))\n\n for i in range(n_joints):\n # 6 spaces, right-aligned, 5 decimal places\n print(\"Error in joint {0:02d} (mm): {1:>5.2f}\".format(i+1, joint_err[i]))\n print(\"=============================\")\n errors.append(coordwise_err)\n joint_errors.append(joint_err)\n # Log the error to tensorboard\n summaries = sess.run( model.err_mm_summary, {model.err_mm: total_err} )\n model.test_writer.add_summary( summaries, current_step )\n\n # Save the model\n print( \"Saving the model... \", end=\"\" )\n start_time = time.time()\n model.saver.save(sess, os.path.join(train_dir, 'checkpoint'), global_step=current_step )\n print( \"done in {0:.2f} ms\".format(1000*(time.time() - start_time)) )\n\n # Reset global time and loss\n step_time, loss = 0, 0\n\n sys.stdout.flush()\n # Save losses for future plots\n def print_list_tofile(l, filename):\n with open(filename, 'wb') as f:\n pickle.dump(l, f)\n print_list_tofile(losses, train_dir+\"/losses.pkl\")\n print_list_tofile(errors, train_dir+\"/errors.pkl\")\n print_list_tofile(joint_errors, train_dir+\"/joint_errors.pkl\")",
"def __init__(self, embed_size, dropout=0.5, image_model='resnet101', simple=False, pretrained=True):\n super(EncoderCNN, self).__init__()\n resnet = globals()[image_model](pretrained=pretrained)\n modules = list(resnet.children())[:-2] # delete the last fc layer.\n self.resnet = nn.Sequential(*modules)\n \n self.linear = nn.Sequential(nn.Conv2d(resnet.fc.in_features, embed_size, kernel_size=1, padding=0),\n nn.Dropout2d(dropout))\n\n self.simple = simple\n if simple:\n self.avgpool = nn.AdaptiveAvgPool2d((1, 1))",
"def train(args):\n # Create the data loader\n loader = sunnerData.DataLoader(\n dataset = sunnerData.ImageDataset(\n root = [[args.train]],\n transforms = transforms.Compose([\n \n# transforms.RandomCrop(720,720)\n# transforms.RandomRotation(45)\n# transforms.RandomHorizontalFlip(), \n# transforms.ColorJitter(brightness=0.5, contrast=0.5),\n \n\n sunnerTransforms.Resize(output_size = (args.H, args.W)),\n #transforms.RandomCrop(512,512)\n sunnerTransforms.ToTensor(),\n sunnerTransforms.ToFloat(),\n # sunnerTransforms.Transpose(),\n sunnerTransforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]),\n ])\n ), batch_size = args.batch_size, shuffle = True, num_workers = 2\n )\n loader = sunnerData.IterationLoader(loader, max_iter = args.n_iter)\n\n # Create the model\n model = GANomaly2D(r = args.r, device = args.device)\n model.IO(args.resume, direction = 'load')\n model.train()\n \n # Train!\n bar = tqdm(loader)\n for i, (normal_img,) in enumerate(bar):\n model.forward(normal_img)\n model.backward()\n loss_G, loss_D = model.getLoss()\n bar.set_description(\"Loss_G: \" + str(loss_G) + \" loss_D: \" + str(loss_D))\n bar.refresh()\n if i % args.record_iter == 0:\n model.eval()\n with torch.no_grad():\n z, z_ = model.forward(normal_img)\n img, img_ = model.getImg()\n visualizeEncoderDecoder(img, img_, z, z_,i)\n model.train()\n model.IO(args.det, direction = 'save')\n model.IO(args.det, direction = 'save')",
"def trainNet():",
"def train_src_encoder(encoder, classifier, data_loader):\n ####################\n # 1. setup network #\n ####################\n\n # set train state for Dropout and BN layers\n encoder.train()\n classifier.train()\n\n # setup criterion and optimizer\n optimizer = optim.Adam(\n list(encoder.parameters()) + list(classifier.parameters()),\n lr=params.c_learning_rate,\n betas=(params.beta1, params.beta2))\n criterion = nn.CrossEntropyLoss()\n\n ####################\n # 2. train network #\n ####################\n\n for epoch in range(params.num_epochs_pre):\n for step, (images, labels) in enumerate(data_loader):\n # make images and labels variable\n images = make_variable(images)\n labels = make_variable(labels.squeeze_())\n\n # zero gradients for optimizer\n optimizer.zero_grad()\n\n # compute loss for critic\n preds = classifier(encoder(images))\n loss = criterion(preds, labels)\n\n # optimize source classifier\n loss.backward()\n optimizer.step()\n\n # print step info\n if ((step + 1) % params.log_step_pre == 0):\n print(\"Epoch [{}/{}] Step [{}/{}]: loss={}\"\n .format(epoch + 1,\n params.num_epochs_pre,\n step + 1,\n len(data_loader),\n loss.data))\n\n # eval model on test set\n if ((epoch + 1) % params.eval_step_pre == 0):\n eval_src(encoder, classifier, data_loader)\n\n # save model parameters\n if ((epoch + 1) % params.save_step_pre == 0):\n save_model(encoder, \"ADDA-source-encoder-{}.pt\".format(epoch + 1))\n save_model(\n classifier, \"ADDA-source-classifier-{}.pt\".format(epoch + 1))\n\n # # save final model\n save_model(encoder, \"ADDA-source-encoder-final.pt\")\n save_model(classifier, \"ADDA-source-classifier-final.pt\")\n\n return encoder, classifier",
"def train(self):\n\n # Ensure everything is sent to GPU if being trained on the cloud\n if self.local == False:\n torch.set_default_tensor_type(torch.cuda.FloatTensor)\n print(\"\\n \\n EVERYTHING TO CUDA \\n \\n\")\n\n # Load weights if applicable\n if self.load_weights == True:\n start_epoch, loss = self.load_checkpoint(\n self.model, self.optimizer, self.model_name\n )\n start_epoch += 1\n print(\"\\n \\n [WEIGHTS LOADED]\")\n else:\n start_epoch = 0\n\n # Start Training Loop\n for epoch in range(start_epoch, self.epochs + 1):\n\n # TRAIN\n if epoch > 0:\n\n # Set model to training mode\n self.model.train()\n\n # Initialize loss and counter that will allow model weights to be saved and overwritten every 10 minibatches\n train_loss = 0\n counter = 0\n\n # Iterate through train set\n for (\n image1,\n image2,\n annotation1,\n annotation2,\n landmark1,\n landmark2,\n ) in tqdm(self.train_loader, desc=\"Train Epoch \" + str(epoch)):\n\n # image tensors and bounding box and label tensors to device\n image1 = image1.to(self.device)\n image2 = image2.to(self.device)\n\n # Forward pass of model\n x1_hat, x2_hat, z1, z2 = self.model(image1, image2)\n\n # Calculate loss from one pass and append to training loss\n loss = self.loss_fcn(\n image1, image2, x1_hat, x2_hat, z1.detach(), z2.detach()\n )\n train_loss += loss.item()\n\n # Clear optimizer gradient\n self.optimizer.zero_grad()\n\n # Backprop\n loss.backward()\n\n # Take a step with optimizer\n self.optimizer.step()\n\n # Save/overwrite model weights every 10 minibatches\n if counter % 10 == 0:\n self.save_checkpoint(\n self.model,\n self.optimizer,\n self.model_name,\n epoch,\n train_loss,\n )\n\n print(\n f\"====> Epoch: {epoch} Average train loss: {train_loss / len(self.train_loader.dataset):.4f}\\n\"\n )\n\n # Save entire model as .pt after every epoch of training\n if self.local == True:\n torch.save(\n self.model,\n os.path.join(self.save_path, self.model_name + \".pt\"),\n )\n else:\n torch.save(\n self.model, self.model_name + \"_epoch\" + str(epoch) + \".pt\"\n )\n print(\"SAVED MODEL EPOCH \" + str(epoch))\n\n # Evaluate on Validation Set after each epoch\n with torch.no_grad():\n\n # Set model to evaluation mode\n self.model.eval()\n\n # Iterate through validation set\n for image1, image2, annotation1, annotation2 in tqdm(\n self.val_loader, desc=\"Validation Epoch \" + str(epoch)\n ):\n\n # Initialize validation loss\n val_loss = 0\n\n # Send images to device\n image1 = image1.to(self.device)\n image2 = image2.to(self.device)\n\n # Forward pass of model\n x1_hat, x2_hat, z1, z2 = self.model(image1, image2)\n\n # Calculate loss and append to validation loss\n loss = self.loss_fcn(\n image1, image2, x1_hat, x2_hat, z1.detach(), z2.detach()\n )\n val_loss += loss\n\n print(\n f\"====> Epoch: {epoch} Average test loss: {val_loss / len(self.val_loader.dataset):.4f}\\n\"\n )\n\n print(\"[DONE EPOCH{}]\".format(epoch))\n\n print(\"[DONE TRAINING]\")\n\n # Save model after all epochs are finished\n if self.local == True:\n torch.save(\n self.model, os.path.join(self.save_path, self.model_name + \".pt\")\n )\n else:\n torch.save(self.model, self.model_name + \".pt\")",
"def inception_network():\n X = K.Input(shape=(224, 224, 3))\n initializer = K.initializers.he_normal(seed=None)\n conv_1 = K.layers.Conv2D(filters=64, kernel_size=7,\n padding='same', strides=2,\n kernel_initializer=initializer,\n activation='relu')(X)\n max_pool_1 = K.layers.MaxPooling2D(pool_size=3, strides=2,\n padding='same')(conv_1)\n\n conv_2 = K.layers.Conv2D(filters=64, padding='same',\n kernel_size=1, activation='relu',\n kernel_initializer=initializer)(max_pool_1)\n conv2_1 = K.layers.Conv2D(filters=192, padding='same',\n kernel_size=3, activation='relu',\n kernel_initializer=initializer)(conv_2)\n max_pool_2 = K.layers.MaxPooling2D(pool_size=3, strides=2,\n padding='same')(conv2_1)\n\n incep_3a = inception_block(max_pool_2, [64, 96, 128, 16, 32, 32])\n incep_3b = inception_block(incep_3a, [128, 128, 192, 32, 96, 64])\n max_pool_3 = K.layers.MaxPooling2D(pool_size=3, strides=2,\n padding='same')(incep_3b)\n\n incep_4a = inception_block(max_pool_3, [192, 96, 208, 16, 48, 64])\n incep_4b = inception_block(incep_4a, [160, 112, 224, 24, 64, 64])\n incep_4c = inception_block(incep_4b, [128, 128, 256, 24, 64, 64])\n incep_4d = inception_block(incep_4c, [112, 144, 288, 32, 64, 64])\n incep_4e = inception_block(incep_4d, [256, 160, 320, 32, 128, 128])\n max_pool_4 = K.layers.MaxPooling2D(pool_size=3, strides=2,\n padding='same')(incep_4e)\n\n incep_5a = inception_block(max_pool_4, [256, 160, 320, 32, 128, 128])\n incep_5b = inception_block(incep_5a, [384, 192, 384, 48, 128, 128])\n avg_pool = K.layers.AveragePooling2D(pool_size=7, strides=None)(incep_5b)\n\n drop_out = K.layers.Dropout(0.4)(avg_pool)\n dense = K.layers.Dense(units=1000, activation='softmax',\n kernel_initializer=initializer)(drop_out)\n return K.models.Model(inputs=X, outputs=dense)",
"def train(self):\n self.mode = \"train\"\n self.online_net.train()",
"def train(self):\n self.mode = \"train\"\n self.online_net.train()",
"def train(self):\n # self.recognizer.train()\n self.detector.train()\n self.shared_conv.train()",
"def initialize_network(self):\n # intermediate layer size\n ils = int((self.specbinnum + self.numfilters) / 2)\n\n network = lasagne.layers.InputLayer((None, 1, self.specbinnum, self.numtimebins), self.input_var)\n\n network = NormalisationLayer(network, self.specbinnum)\n self.normlayer = network\n\n network, _ = custom_convlayer_2(network, in_num_chans=self.specbinnum, out_num_chans=ils)\n network = batch_norm(network)\n network, _ = custom_convlayer_2(network, in_num_chans=ils, out_num_chans=self.numfilters)\n network = batch_norm(network)\n\n network = lasagne.layers.NonlinearityLayer(network, nonlinearity=elu)\n self.latents = network\n network = ZeroOutBackgroundLatentsLayer(self.latents,\n mp_down_factor=self.mp_down_factor,\n numfilters=self.numfilters,\n numtimebins=self.numtimebins,\n background_latents_factor=self.background_latents_factor,\n use_maxpool=self.use_maxpool)\n network, _ = custom_convlayer_2(network, in_num_chans=self.numfilters, out_num_chans=ils)\n network = batch_norm(network)\n network, _ = custom_convlayer_2(network, in_num_chans=ils, out_num_chans=self.specbinnum)\n network = batch_norm(network)\n\n # output_size\n num_time_samples = int(audioframe_len/2 * (self.numtimebins + 1))\n # network = batch_norm(DenseLayer(network, num_time_samples)) # MemoryError\n network, _ = custom_convlayer_2(network, in_num_chans=self.specbinnum, out_num_chans=num_time_samples)\n network, _ = batch_norm(network)\n network, _ = custom_convlayer_2(network, in_num_chans=num_time_samples, out_num_chans=1)\n network, _ = batch_norm(network)\n\n self.network = network",
"def kmeans_002():\n train_mmap_path = 'data/train_cropped_150_scale_15.memmap'\n test_mmap_path = 'data/test_cropped_150_scale_15.memmap'\n\n if not os.path.exists('data/train_cropped_150.memmap'):\n classes.crop_to_memmap(150, training=True)\n if not os.path.exists('data/test_cropped_150.memmap'):\n classes.crop_to_memmap(150, training=False)\n\n if not os.path.exists(train_mmap_path):\n logger.info(\"Prepping training images\")\n pre_scale = np.memmap('data/train_cropped_150.memmap', mode='r', shape=(N_TRAIN, 150, 150, 3))\n trainX = classes.rescale_memmap(15, pre_scale, train_mmap_path)\n del pre_scale\n else:\n trainX = np.memmap(train_mmap_path, mode='r', shape=(N_TRAIN, 15, 15, 3))\n\n if not os.path.exists(test_mmap_path):\n logger.info(\"Prepping testing images\")\n pre_scale = np.memmap('data/test_cropped_150.memmap', mode='r', shape=(N_TEST, 150, 150, 3))\n testX = classes.rescale_memmap(15, pre_scale, test_mmap_path)\n del pre_scale\n else:\n testX = np.memmap(test_mmap_path, mode='r', shape=(N_TEST, 15, 15, 3))\n\n\n n_jobs = multiprocessing.cpu_count()\n\n if not os.path.exists('data/mdl_kmeans_002_centroids.npy'):\n logger.info(\"Pretraining KMeans feature encoder\")\n km = models.KMeansFeatures.KMeansFeatures(rf_size=5, num_centroids=1600, num_patches=400000)\n km.fit(trainX)\n km.save_to_file('mdl_kmeans_002')\n else:\n logger.info(\"Loading KMeans feature encoder from file\")\n km = models.KMeansFeatures.KMeansFeatures.load_from_file('mdl_kmeans_002', rf_size=5)\n\n # Takes waaaay too long to finish. At least an hour per tree. Clearly too\n # many dimensions\n\n # Instead ran with ridge rf manually\n mdl = models.RandomForest.KMeansRandomForest(km, trainX, testX, n_jobs=n_jobs, cv_sample=0.5)\n # mdl.run('cv')\n mdl.run('train')\n res = mdl.run('predict')\n np.save('submissions/sub_kmeans_rf_002.npy', res)\n output = classes.Submission(res)\n output.to_file('sub_kmeans_rf_002.csv')",
"def RunAutoEncoder_unsupervised(net, criterion, optimizer, lr_scheduler, train_dl, train_len, N_EPOCHS, outputPath, SAVE_FILE,\\\n DO_PROJ_middle, run_model, criterion_classification, LOSS_LAMBDA , feature_name, TYPE_PROJ, ETA, ETA_STAR=100, AXIS=0 ):\n \n device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n epoch_loss, epoch_acc, epoch_reconstruction, epoch_classification, train_time = [], [], [], [], []\n epoch_val_loss, epoch_val_acc, epoch_val_reconstruction, epoch_val_classification = [], [], [], [] \n for e in range(N_EPOCHS):\n t1 = time.perf_counter()\n print('EPOCH:',e)\n running_loss, running_accuracy = 0, 0 \n running_classification , running_reconstruction = 0,0\n net.train()\n \n for i,batch in enumerate(tqdm(train_dl)):\n x = batch[0]\n labels = batch[1]\n \n if torch.cuda.is_available():\n x = x.cuda()\n labels = labels.cuda() \n \n encoder_out, decoder_out = net(x)\n \n # Compute the loss \n loss_classification = criterion_classification(encoder_out,labels.long())\n \n if type(criterion) == torch.nn.modules.loss.KLDivLoss:\n loss_reconstruction = LOSS_LAMBDA * criterion(x.log(), decoder_out)\n \n else:\n loss_reconstruction = LOSS_LAMBDA * criterion(decoder_out, x)\n loss = loss_classification + loss_reconstruction\n \n optimizer.zero_grad()\n loss.backward()\n \n # Set the gradient as 0\n if run_model =='MaskGrad':\n for index,param in enumerate(list(net.parameters())):\n if index<len(list(net.parameters()))/2-2 and index%2==0:\n param.grad[ DO_PROJ_middle[int(index/2)] ] =0 \n optimizer.step() \n \n with torch.no_grad():\n running_loss += loss.item()\n running_reconstruction += loss_reconstruction.item()\n running_classification += loss_classification.item()\n running_accuracy += (encoder_out.max(1)[1] == labels).sum().item() \n \n if e == N_EPOCHS-1 :\n\n if i == 0:\n data_decoded = torch.cat((decoder_out,labels.view(-1,1)), dim = 1)\n data_encoder = torch.cat((encoder_out,labels.view(-1,1)), dim = 1)\n else:\n tmp1 = torch.cat((decoder_out,labels.view(-1,1)), dim = 1)\n data_decoded = torch.cat((data_decoded,tmp1),dim= 0)\n \n tmp2 = torch.cat((encoder_out,labels.view(-1,1)), dim = 1)\n data_encoder = torch.cat((data_encoder,tmp2 ),dim= 0)\n\n t2 = time.perf_counter()\n train_time.append(t2-t1)\n print(\"Total loss:\", running_loss / float(train_len ),'loss_reconstruction: ', running_reconstruction/ train_len ,\\\n 'loss_classification: ',running_classification/ train_len ) \n epoch_loss.append(running_loss / train_len )\n epoch_reconstruction.append( running_reconstruction / train_len )\n epoch_classification.append( running_classification / train_len )\n epoch_acc.append(running_accuracy / train_len)\n \n \n # Do projection at last epoch (GRADIENT_MASK)\n if run_model=='ProjectionLastEpoch' and e==(N_EPOCHS-1):\n net_parameters = list(net.parameters())\n for index,param in enumerate(net_parameters):\n if DO_PROJ_middle == False and \\\n index!= len(net_parameters)/2-2: # Do no projection at middle layer\n param.data = Projection(param.data).to(device)\n \n\n \n\n if SAVE_FILE and str(run_model)!= 'ProjectionLastEpoch':\n # Save encoder data\n Lung_encoder = data_encoder.cpu().detach().numpy()\n colunms = [x for x in range(Lung_encoder.shape[1]-1)] +['label']\n res =pd.DataFrame(Lung_encoder,columns= colunms)\n res.to_csv('{}encoder_tiro_{}.csv'.format(outputPath, str(run_model)),sep=';')\n # Save decoder data\n Lung_decoded = data_decoded.cpu().detach().numpy()\n Label = ['Label']+list(Lung_decoded[:,-1].astype(int)+1)\n Name = ['Name'] + [x+2 for x in range(train_len)]\n Label = np.vstack( (np.array(Name),np.array(Label)) )\n Lung = np.delete(Lung_decoded, -1, axis =1 )\n Lung = np.hstack( (feature_name.reshape(-1,1), Lung.T) )\n Lung = np.vstack((Label, Lung))\n res = pd.DataFrame(Lung)\n res.to_csv('{}decoded_{}.csv'.format(outputPath, str(run_model)),sep=';',index=0, header=0) \n print(\"-----------------------\")\n print(\"Saved file to \",str(outputPath))\n print(\"-----------------------\")\n #Plot \n if str(run_model)!= 'ProjectionLastEpoch':\n plt.figure()\n plt.plot( epoch_loss )\n plt.plot( epoch_val_loss )\n plt.title('Total Loss ')\n plt.figure()\n plt.plot( epoch_reconstruction, label ='λ*reconstruction' )\n plt.plot( epoch_classification, label = 'classification' )\n plt.plot( epoch_val_reconstruction, label ='λ*reconstruction test' )\n plt.plot( epoch_val_classification, label = 'classification test' )\n plt.legend()\n plt.title('Loss ')\n plt.figure()\n plt.plot( epoch_acc )\n plt.plot( epoch_val_acc )\n plt.title('Total accuracy classification')\n print('{} epochs trained for {}s , {} s/epoch'.format(N_EPOCHS, sum(train_time), np.mean(train_time)))\n return data_encoder, data_decoded, epoch_loss , net, sum(epoch_acc)/N_EPOCHS",
"def main():\r\n # Prepare the data and the pretrained embedding matrix\r\n if FRESH_START:\r\n print(\"Preprocessing all data from scratch....\")\r\n train, dev, test = utils.get_data(DATA_FN)\r\n # train_data includes .word2idx and .label_enc as fields if you would like to use them at any time\r\n train_generator, dev_generator, test_generator, embeddings, train_data = utils.vectorize_data(train, dev, test, BATCH_SIZE, EMBEDDING_DIM)\r\n print(\"Saving DataLoaders and embeddings so you don't need to create them again; you can set FRESH_START to \"\r\n \"False to load them from file....\")\r\n with open(TEMP_FILE, \"wb+\") as f:\r\n pickle.dump((train_generator, dev_generator, test_generator, embeddings, train_data), f)\r\n else:\r\n try:\r\n with open(TEMP_FILE, \"rb\") as f:\r\n print(\"Loading DataLoaders and embeddings from file....\")\r\n train_generator, dev_generator, test_generator, embeddings, train_data = pickle.load(f)\r\n except FileNotFoundError:\r\n raise FileNotFoundError(\"You need to have saved your data with FRESH_START=True once in order to load it!\")\r\n \r\n\r\n # Use this loss function in your train_model() and test_model()\r\n loss_fn = nn.CrossEntropyLoss()\r\n\r\n ########## YOUR CODE HERE ##########\r\n HIDDEN_DIM = 64\r\n ########## Base DNN ################\r\n # # TODO: for each of the two models, you should 1) create it,\r\n print(\"train and test on DNN!\")\r\n dnn = models.DenseNetwork(EMBEDDING_DIM, NUM_CLASSES, HIDDEN_DIM, embeddings)\r\n optimizer = optim.Adam(dnn.parameters())\r\n # TODO 2) run train_model() to train it, and\r\n #trained_dnn = train_model(dnn, loss_fn, optimizer, train_generator, dev_generator)\r\n DNN_PATH = 'dense.pth'\r\n #torch.save(trained_dnn, DNN_PATH)\r\n # TODO: 3) run test_model() on the result\r\n print(\"Test on the saved Dense Network\")\r\n dnn_test = torch.load(DNN_PATH)\r\n test_model(dnn_test, loss_fn, test_generator)\r\n \"\"\"\r\n Output:\r\n Test loss: tensor([25.7230])\r\n F-score: 0.4399188910197242\r\n \"\"\"\r\n\r\n ########## Base RNN ################\r\n # TODO: for each of the two models, you should 1) create it,\r\n print(\"train and test on RNN!\")\r\n SENTENCE_LEN = 91\r\n rnn = models.RecurrentNetwork(SENTENCE_LEN, NUM_CLASSES, HIDDEN_DIM, embeddings)\r\n optimizer = optim.Adam(rnn.parameters())\r\n # TODO 2) run train_model() to train it, and\r\n #trained_rnn = train_model(rnn, loss_fn, optimizer, train_generator, dev_generator)\r\n RNN_PATH = 'recurrent.pth'\r\n #torch.save(trained_rnn, RNN_PATH)\r\n # TODO: 3) run test_model() on the result\r\n print(\"Test on the saved Recurrent Network\")\r\n rnn_test = torch.load(RNN_PATH)\r\n test_model(rnn_test, loss_fn, test_generator)\r\n \"\"\"\r\n Output:\r\n Test loss: tensor([25.7136])\r\n F-score: 0.42172967869116373\r\n \"\"\"\r\n\r\n # extension-grading: Extension 1, changes to the preprocessing of the data - Tweets tokenizers.\r\n # Major changes are in the utils.py labeled by \"extension-grading\"\r\n Extension1 = False\r\n if Extension1:\r\n print(\"Train and test dnn with Extension 1: Tweets tokenizers\")\r\n train, dev, test = utils.get_data(DATA_FN)\r\n train_generator, dev_generator, test_generator, embeddings,train_data = utils.vectorize_data(train, dev, test, BATCH_SIZE, EMBEDDING_DIM, extension=True)\r\n # try on DNN\r\n dnn = models.DenseNetwork(EMBEDDING_DIM, NUM_CLASSES, HIDDEN_DIM, embeddings)\r\n optimizer = optim.Adam(dnn.parameters())\r\n trained_dnn = train_model(dnn, loss_fn, optimizer, train_generator, dev_generator)\r\n test_model(trained_dnn, loss_fn, test_generator)\r\n \"\"\"\r\n Output:\r\n Test loss: tensor([25.5987])\r\n F-score: 0.4465511728425936\r\n # Compared with original tokenizer, F-score increased by 1.6%.\r\n \"\"\"\r\n\r\n # extension-grading: Extension 2, architecture changes - flattening embeddings using the average of unpadded sentence words other than sum. \r\n # Major changes are in the models.py labeled by \"extension-grading\"\r\n Extension2 = False\r\n if Extension2:\r\n print(\"Train and test dnn with Extension 2: Architecture changes - flattening embeddings\")\r\n # initialize the experimental model\r\n exp = models.ExperimentalNetwork(EMBEDDING_DIM, NUM_CLASSES, HIDDEN_DIM, embeddings)\r\n optimizer = optim.Adam(exp.parameters())\r\n # run train_model() to train it\r\n trained_exp = train_model(exp, loss_fn, optimizer, train_generator, dev_generator)\r\n # run test_model() on the result\r\n test_model(trained_exp, loss_fn, test_generator)\r\n \"\"\"\r\n Output:\r\n Test loss: tensor([29.4298])\r\n F-score: 0.22199231332724553\r\n # Compared with original architecture, F-score decreased by half.\r\n \"\"\""
]
| [
"0.6275105",
"0.61198753",
"0.60603017",
"0.60560143",
"0.60117126",
"0.5932849",
"0.5908202",
"0.58721745",
"0.58566064",
"0.5848631",
"0.58267033",
"0.5818534",
"0.57857215",
"0.5772631",
"0.5761682",
"0.5756426",
"0.5731873",
"0.5724584",
"0.571866",
"0.5717423",
"0.5712037",
"0.5701962",
"0.5690031",
"0.56819946",
"0.56819946",
"0.56771606",
"0.5659316",
"0.56305206",
"0.5603852",
"0.5591961"
]
| 0.7010814 | 0 |
Picks the first connection based on the best three connections possible. | def pick_first_connection(self):
self.best_connection = []
stations = list(self.grid.stations.values())
# add a first station to the track
for station in stations:
self.track = Track(f"greedy_track_{self.count}", self.grid)
self.track.add_station(self.grid, station.name)
lookahead_1 = station.connections
# calculate quality of all connections and save the best connection
for la1 in lookahead_1:
next_station = stations[int(la1)].name
self.track.add_station(self.grid, next_station)
lookahead_2 = stations[int(la1)].get_connections()
for la2 in lookahead_2:
# if adding the connection exceeds the track's max time length
if self.track.add_station(self.grid, la2[0].name) is False:
break
quality = self.grid.get_quality()
self.track.remove_last_station()
# checks if the quality of the track is the best one yet and remembers it
if quality > self.best_score:
self.best_score = quality
self.best_connection = [station.name, stations[int(la1)].name, la2[0].name]
self.track.remove_last_station()
# if adding another track does not lead to a better quality, stop algorithm
if self.best_connection == []:
return False
# add best connection to the track
self.track = Track(f"greedy_track_{self.count}", self.grid)
self.track.add_station(self.grid, self.best_connection[0])
self.count += 1
return station | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def choose_serial_connection(potential_connections):\n for connection in potential_connections:\n if os.path.exists(connection):\n return connection\n return None",
"def _pick_server(self, key, inport): #key = ipp.srcip, ipp.dstip, tcpp.srcport, tcpp.dstport\n\n if len(self.total_connection) == 0: # {server_IP : total connection}\n return self.live_servers.keys()[0] #{IP : MAC,port}\n ipserver = self.total_connection.keys()[0]\n totalconns = self.total_connection[ipserver]\n \"\"\"\n Select server with least connections\n \"\"\"\n if len(self.total_connection) == 0:\n return self.live_servers.keys()[0]\n ipserver = self.total_connection.keys()[0]\n totalconns = self.total_connection[ipserver]\n \n for x in self.total_connection: #finding the server IP having least no. of connections\n if self.total_connection[x] < totalconns:\n ipserver = x\n totalconns = self.total_connection[x]\n self.log.debug(\"Best available server: %s\" % ipserver)\n return ipserver",
"def _chooseConnection(ctype, name, results):\n # At this point we have a list of result tuples containing (url, token, PlexServer, runtime)\n # or (url, token, None, runtime) in the case a connection could not be established.\n for url, token, result, runtime in results:\n okerr = 'OK' if result else 'ERR'\n log.debug('%s connection %s (%ss): %s?X-Plex-Token=%s', ctype, okerr, runtime, url, token)\n results = [r[2] for r in results if r and r[2] is not None]\n if results:\n log.debug('Connecting to %s: %s?X-Plex-Token=%s', ctype, results[0]._baseurl, results[0]._token)\n return results[0]\n raise NotFound(f'Unable to connect to {ctype.lower()}: {name}')",
"def getconnection(self):\n\n # If we were able to create the affix_tcpsocket, then we attempt to call\n # getconnection() on the affix tcp server socket first. If we were unable \n # to create it or get a SocketWouldBlockError, we default to the basic\n # repy getconnection() call. The reason for this is to ensure that even\n # if the affixstack breaks down, we are able to revert to the default repy\n # connection.\n if self.tcpserversocket_dict['affix_tcpsocket']:\n try:\n rip, rport, sockobj = self.tcpserversocket_dict['affix_tcpsocket'].getconnection()\n return (rip, rport, AffixSocket(sockobj, self.affix_object)) \n except SocketWouldBlockError:\n pass\n\n return self.tcpserversocket_dict['repy_tcpsocket'].getconnection()",
"def _connection_maker(\n self,\n first_device,\n first_port,\n second_device,\n second_port):\n if first_port is None:\n return self.network.make_connection(\n first_device.id, None,\n second_device.id, second_port.id)\n else:\n return self.network.make_connection(\n first_device.id, first_port.id,\n second_device.id, second_port.id)",
"def learn_connectome(self):\n episode_nodes = [node for node in self.container.nodes if node.is_episode]\n if len(episode_nodes) < 2:\n return\n connections_counter = {}\n for node in episode_nodes:\n self._collect_episode_callout_stats(node, connections_counter)\n\n pair_list = [(key, connections_counter[key]) for key in connections_counter]\n pair_list.sort(key=lambda item: item[1], reverse=True)\n top_count = pair_list[0][1]\n if top_count < 4:\n return\n # make connections for the top half of pairs\n for pair, cnt in pair_list:\n if cnt > top_count // 2:\n self._make_connection_for_pair(pair)",
"def weighted_random_choice(self, cands):\n # Only consider the entries with the highest priority (lowest service priority value)\n lowest_priority = min(conn.srv_priority for conn in cands)\n result = [conn for conn in cands if conn.srv_priority == lowest_priority]\n result.sort(key=lambda a: a.weight)\n if not result:\n raise LVPError(\"No connection available for cloning\")\n\n total_weight = sum(e.weight for e in result)\n random_pos = random.random() * total_weight\n weight_pos = 0.0\n for conn in result:\n weight_pos += conn.weight\n if weight_pos >= random_pos:\n return conn\n\n assert False, \"execution should never end up here\"",
"def _choose_best_trip(self):\n times = [(key, self._trips_dict[key].get_duration()) for key in self._trips_dict.keys()\n if self._trips_dict[key] is not None]\n self._primary_mode = min(times, key=lambda tup: tup[1])[0]",
"async def get_best_node(\n self, cache: Union[int, str, 'BaseCache'], key: Any = None, key_hint: 'IgniteDataType' = None\n ) -> 'AioConnection':\n conn = await self.random_node()\n\n if self.partition_aware and key is not None:\n caches = self._caches_to_update_affinity()\n if caches:\n async with self._affinity_query_mux:\n while True:\n caches = self._caches_to_update_affinity()\n if not caches:\n break\n\n try:\n full_affinity = await self._get_affinity(conn, caches)\n self._update_affinity(full_affinity)\n\n asyncio.ensure_future(\n asyncio.gather(\n *[node.reconnect() for node in self._nodes if not node.alive],\n return_exceptions=True\n )\n )\n\n break\n except connection_errors:\n # retry if connection failed\n conn = await self.random_node()\n pass\n except CacheError:\n # server did not create mapping in time\n return conn\n\n c_id = cache.cache_id if isinstance(cache, BaseCache) else cache_id(cache)\n parts = self._cache_partition_mapping(c_id).get('number_of_partitions')\n\n if not parts:\n return conn\n\n key, key_hint = self._get_affinity_key(c_id, key, key_hint)\n\n hashcode = await key_hint.hashcode_async(key, client=self)\n\n best_node = self._get_node_by_hashcode(c_id, hashcode, parts)\n if best_node:\n return best_node\n\n return conn",
"def get_flow_name_connection_optimized(connection, connections, fast_conns=None):\n if not fast_conns:\n return get_flow_name_connection(connection, connections)\n\n if (connection.flow.attr[co.SADDR], connection.flow.attr[co.DADDR], connection.flow.attr[co.SPORT], connection.flow.attr[co.DPORT]) in fast_conns:\n potential_list = fast_conns[(connection.flow.attr[co.SADDR], connection.flow.attr[co.DADDR], connection.flow.attr[co.SPORT],\n connection.flow.attr[co.DPORT])]\n\n if len(potential_list) == 1:\n return potential_list[0][2], potential_list[0][3]\n\n # Search on list\n potential_match_index = 0\n match_indexes = []\n # Check with an error window of 8 seconds for both sides\n while (potential_match_index < len(potential_list)\n and abs((potential_list[potential_match_index][0] - connection.flow.attr[co.START]).total_seconds()) <= 8.0):\n if connection.flow.attr[co.START].total_seconds() <= potential_list[potential_match_index][0].total_seconds() + potential_list[potential_match_index][1]:\n match_indexes += [potential_match_index]\n\n potential_match_index += 1\n\n if len(match_indexes) == 1:\n return potential_list[match_indexes[0]][2], potential_list[match_indexes[0]][3]\n elif len(match_indexes) > 1:\n print(\"More than one possible match...\")\n # By default, return the first match\n return potential_list[match_indexes[0]][2], potential_list[match_indexes[0]][3]\n else:\n print(\"No match found for MPTCP subflow...\")\n\n return None, None",
"def pick_next_station(self, station):\n self.best_score = 0\n\n stations = self.grid.stations\n # all connections of the last added added station \n lookahead_1 = self.grid.get_station(self.best_connection[1]).connections\n\n for la1 in lookahead_1.values():\n next_station = la1[0].name\n # if adding the connection exceeds the tracks max time length \n if self.track.add_station(self.grid, next_station) is False:\n break\n\n lookahead_2 = self.grid.get_station(la1[0].name).connections\n\n # keeps adding stations untill the time limit is reached\n for la2 in lookahead_2:\n la2 = stations.get(la2)\n if self.track.add_station(self.grid, la2.name) is False:\n break\n \n quality = self.grid.get_quality()\n \n self.track.remove_last_station()\n\n # if quality improves, add first station to the track\n if quality > self.best_score:\n self.best_score = quality \n self.best_connection = [la2.name, la1[0].name]\n \n self.track.remove_last_station()",
"def get_flow_name_connection(connection, connections):\n for conn_id, conn in connections.iteritems():\n # Let a little margin, but don't think it's needed\n if conn.attr.get(co.START, None) and (abs((connection.flow.attr[co.START] - conn.attr[co.START]).total_seconds()) <= 8.0 and\n connection.flow.attr[co.START].total_seconds() <=\n conn.attr[co.START].total_seconds() + float(conn.attr[co.DURATION])):\n for flow_id, flow in conn.flows.iteritems():\n if (connection.flow.attr[co.SADDR] == flow.attr[co.SADDR] and\n connection.flow.attr[co.DADDR] == flow.attr[co.DADDR] and\n connection.flow.attr[co.SPORT] == flow.attr[co.SPORT] and\n connection.flow.attr[co.DPORT] == flow.attr[co.DPORT]):\n return conn_id, flow_id\n\n return None, None",
"def get_conn(self, *args, **kwargs):\n connections = self.__connections_for('get_conn', args=args, kwargs=kwargs)\n\n if len(connections) == 1:\n return connections[0]\n else:\n return connections",
"def get_null_connection():\n nc = no_connection_designator\n return Connections(\n upstream_part=nc,\n up_part_rev=nc,\n upstream_output_port=nc,\n downstream_part=nc,\n down_part_rev=nc,\n downstream_input_port=nc,\n start_gpstime=None,\n stop_gpstime=None,\n )",
"def _pick_network(self, network_type, count):\n possible_networks = self.config[\"networks\"][network_type]\n networks = [self._provider.get_network(net) for net in possible_networks]\n usable = []\n for network in networks:\n ips = self._provider.get_ips(ref=network.get(\"id\"))\n available = ips[\"total_ips\"] - ips[\"used_ips\"]\n if available > count:\n usable.append((network[\"name\"], available))\n\n if not usable:\n logger.error(\n f\"{self.dsp_name}: Error: no usable network\"\n f\" for {count} hosts with {network_type}\"\n )\n return None\n\n # sort networks by number of available IPs\n usable = sorted(usable, key=lambda u: u[1])\n logger.debug(f\"{self.dsp_name}: Listing usable networks: {usable}\")\n res_network = usable[-1][0]\n logger.debug(\n f\"{self.dsp_name}: Picking network \"\n f\"with the most available adresses: {res_network}\"\n )\n return res_network # Pick the one with most IPs",
"def getconnection(self):\n # If we were able to create the shim_tcpsocket, then we attempt to call\n # getconnection() on the shim tcp server socket first. If we were unable \n # to create it or get a SocketWouldBlockError, we default to the basic\n # repy getconnection() call. The reason for this is to ensure that even\n # if the shimstack breaks down, we are able to revert to the default repy\n # connection.\n if self.tcpserversocket_dict['shim_tcpsocket']:\n try:\n rip, rport, sockobj = self.tcpserversocket_dict['shim_tcpsocket'].getconnection()\n return (rip, rport, ShimSocket(sockobj, self.shim_object)) \n except SocketWouldBlockError:\n pass\n\n return self.tcpserversocket_dict['repy_tcpsocket'].getconnection()",
"def get_next_match_pick_first_available(population):\n p1 = None\n for player in population:\n if player.available:\n if p1 is not None:\n return p1, player\n else:\n p1 = player",
"def get_connections(network, user):\n if user not in network or network[user][0] == []:\n return None\n return network[user][0]",
"def _get_random_connection_params(self, priority):\n router_list = self._get_available_routers(priority)\n if not router_list:\n return None\n if len(router_list) == 1:\n return router_list[0]\n\n last = len(router_list) - 1\n index = random.randint(0, last)\n return router_list[index]",
"def get_client_by_socket(self, socket):\n candidate_connection_objects = [connection for connection in self if connection.socket() is socket]\n assert len(candidate_connection_objects) != 0, \"?? socket %s not found in list of client objects\" % socket\n assert len(\n candidate_connection_objects) == 1, \"?? socket %s appears in list of client objects multiple times\" % socket\n return candidate_connection_objects[0]",
"def randomConnect(self):\n if self.Nc == 0:\n return\n else:\n possible_pairs = np.vstack(np.triu_indices(self.numMonomers,k=2)).T\n Nl = len(possible_pairs)\n selected = possible_pairs[np.random.choice(Nl,size=self.Nc,replace=False)].T\n self.connect(selected)",
"def get_connection(self, command, args=()):\n # TODO: find a better way to determine if connection is free\n # and not havily used.\n command = command.upper().strip()\n is_pubsub = command in _PUBSUB_COMMANDS\n if is_pubsub and self._pubsub_conn:\n if not self._pubsub_conn.closed:\n return self._pubsub_conn, self._pubsub_conn.address\n self._pubsub_conn = None\n for i in range(self.freesize):\n conn = self._pool[0]\n self._pool.rotate(1)\n if conn.closed: # or conn._waiters: (eg: busy connection)\n continue\n if conn.in_pubsub:\n continue\n if is_pubsub:\n self._pubsub_conn = conn\n self._pool.remove(conn)\n self._used.add(conn)\n return conn, conn.address\n return None, self._address # figure out",
"def _create_common_connections(self):\n\t\tfor muscle,muscAfferentDelay in self._infoMuscles:\n\t\t\tfor connection in self._infoCommonMuscleConnections:\n\t\t\t\t# List of source cells ids\n\t\t\t\tsourcesId = self.cellsId[muscle][connection[0]]\n\t\t\t\t# gather the sources all together\n\t\t\t\tsourcesId = comm.gather(sourcesId,root=0)\n\t\t\t\tif rank==0: sourcesId = sum(sourcesId,[])\n\t\t\t\tsourcesId = comm.bcast(sourcesId,root=0)\n\t\t\t\t# List of taget cells ids\n\t\t\t\ttargetsId = self.cellsId[muscle][connection[1]]\n\t\t\t\t# Ratio of connection\n\t\t\t\tconRatio = connection[2]\n\t\t\t\t# Number of connections\n\t\t\t\tconNum = int(connection[3])\n\t\t\t\t# Weight of connections\n\t\t\t\tconWeight = float(connection[4])\n\t\t\t\t# Type of synapse\n\t\t\t\tsynType = connection[5]\n\t\t\t\t# connect sources to targets\n\t\t\t\tself._connect(sourcesId,targetsId,conRatio,conNum,conWeight,synType)",
"def _find_connection_element(self, var1, var2):\n cn1, cn2 = var1.component.name, var2.component.name\n cnames = set([cn1, cn2])\n for conn in getattr(self.model, u'connection', []):\n mc = conn.map_components\n if set([mc.component_1, mc.component_2]) == cnames:\n break\n else:\n conn = None\n if conn:\n swap = conn.map_components.component_1 == cn2\n else:\n swap = False\n return conn, swap",
"def _create_special_connections(self):\n\t\tfor connection in self._infoSpecialConnections:\n\t\t\t# List of source cells ids\n\t\t\tsourcesId = self.cellsId[connection[0]][connection[1]]\n\t\t\t# gather the sources all together\n\t\t\tsourcesId = comm.gather(sourcesId,root=0)\n\t\t\tif rank==0: sourcesId = sum(sourcesId,[])\n\t\t\tsourcesId = comm.bcast(sourcesId,root=0)\n\t\t\t# List of taget cells ids\n\t\t\ttargetsId = self.cellsId[connection[2]][connection[3]]\n\t\t\t# Ratio of connection\n\t\t\tconRatio = connection[4]\n\t\t\t# Number of connections\n\t\t\tconNum = int(connection[5])\n\t\t\t# Weight of connections\n\t\t\tconWeight = float(connection[6])\n\t\t\t# Type of synapse\n\t\t\tsynType = connection[7]\n\t\t\t# connect sources to targets\n\t\t\tself._connect(sourcesId,targetsId,conRatio,conNum,conWeight,synType)",
"async def random_node(self) -> AioConnection:\n if self.partition_aware:\n # if partition awareness is used just pick a random connected node\n return await self._get_random_node()\n else:\n # if partition awareness is not used then just return the current\n # node if it's alive or the next usable node if connection with the\n # current is broken\n node = self._nodes[self._current_node]\n if node.alive:\n return node\n\n # close current (supposedly failed) node\n await self._nodes[self._current_node].close()\n\n # advance the node index\n self._current_node += 1\n if self._current_node >= len(self._nodes):\n self._current_node = 0\n\n # prepare the list of node indexes to try to connect to\n for i in chain(range(self._current_node, len(self._nodes)), range(self._current_node)):\n node = self._nodes[i]\n try:\n await node.connect()\n except connection_errors:\n pass\n else:\n return node\n\n # no nodes left\n raise ReconnectError('Can not reconnect: out of nodes.')",
"def is_smaller(connect1, connect2):\n if connect1[3] < connect2[3]:\n return connect2\n if connect1[3] > connect2[3]:\n return connect1\n else:\n return connect1",
"def add_random_connection(self, genome, max_attempts=50):\n\n \"\"\"\n TODO:\n If all attempts failed, the channel is most likely dense, therefore use the dense selection process.\n \"\"\"\n\n def _connect(n0, n1, channel):\n new_connection_spec, new_connection_params = self.connection_factory()\n self.add_connection(\n genome,\n new_connection_spec,\n new_connection_params,\n n0.historical_mark,\n n1.historical_mark,\n channel)\n\n channel_weights = []\n acc_weight = 0\n for channel in self._channels:\n acc_weight += genome.calc_channel_capacity(channel)\n channel_weights.append(acc_weight)\n\n if acc_weight == 0:\n return False\n\n # TODO: see if can implement weighted random choice more efficiently using bisect\n channel, = random.choices(self._channels, k=1, cum_weights=channel_weights)\n src_type, dst_type = channel\n src_neurons = genome.layers()[src_type]\n dst_neurons = genome.layers()[dst_type]\n\n n_attempt = 0\n while n_attempt < max_attempts:\n n_attempt += 1\n\n n0 = random.choice(src_neurons)\n while n0 is None:\n n0 = random.choice(src_neurons)\n\n n1 = random.choice(dst_neurons)\n while n1 is None:\n n1 = random.choice(dst_neurons)\n\n if genome.has_connection(n0.historical_mark, n1.historical_mark):\n continue\n\n _connect(n0, n1, channel)\n return True\n return False",
"def get_current_connection_num():\r\n\treturn (len(_clients), MAX_CONNECTION)",
"def get_conn(self):\n if self.game_request:\n # Prima cerca di ricavare la connessione ciclando sulle connessioni\n for connection in connections.values():\n if connection.player == self:\n return connection\n\n # Nell'eventualità remota che non ve la faccia prova nell'altro modo\n try:\n session = self.game_request.getSession()\n except error.AlreadyCalled:\n return None\n if session in connections:\n return connections[session]\n\n return None"
]
| [
"0.6265498",
"0.62159556",
"0.5885226",
"0.58790165",
"0.5841181",
"0.5781805",
"0.5763767",
"0.5667359",
"0.56560063",
"0.55838865",
"0.5580121",
"0.5565697",
"0.554023",
"0.551508",
"0.5456506",
"0.5409392",
"0.54023296",
"0.53600425",
"0.5354556",
"0.5351314",
"0.53210104",
"0.5315882",
"0.52933496",
"0.52897596",
"0.5286234",
"0.5261601",
"0.5218779",
"0.52183235",
"0.5212548",
"0.520876"
]
| 0.7276076 | 0 |
Picks the next station based on the three connections that produce the best score. | def pick_next_station(self, station):
self.best_score = 0
stations = self.grid.stations
# all connections of the last added added station
lookahead_1 = self.grid.get_station(self.best_connection[1]).connections
for la1 in lookahead_1.values():
next_station = la1[0].name
# if adding the connection exceeds the tracks max time length
if self.track.add_station(self.grid, next_station) is False:
break
lookahead_2 = self.grid.get_station(la1[0].name).connections
# keeps adding stations untill the time limit is reached
for la2 in lookahead_2:
la2 = stations.get(la2)
if self.track.add_station(self.grid, la2.name) is False:
break
quality = self.grid.get_quality()
self.track.remove_last_station()
# if quality improves, add first station to the track
if quality > self.best_score:
self.best_score = quality
self.best_connection = [la2.name, la1[0].name]
self.track.remove_last_station() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def pick_first_connection(self):\n self.best_connection = []\n stations = list(self.grid.stations.values())\n\n # add a first station to the track \n for station in stations:\n self.track = Track(f\"greedy_track_{self.count}\", self.grid)\n self.track.add_station(self.grid, station.name)\n\n lookahead_1 = station.connections\n\n # calculate quality of all connections and save the best connection\n for la1 in lookahead_1: \n next_station = stations[int(la1)].name\n self.track.add_station(self.grid, next_station)\n lookahead_2 = stations[int(la1)].get_connections()\n \n for la2 in lookahead_2:\n # if adding the connection exceeds the track's max time length \n if self.track.add_station(self.grid, la2[0].name) is False:\n break\n \n quality = self.grid.get_quality()\n self.track.remove_last_station()\n\n # checks if the quality of the track is the best one yet and remembers it\n if quality > self.best_score:\n self.best_score = quality \n self.best_connection = [station.name, stations[int(la1)].name, la2[0].name]\n self.track.remove_last_station()\n \n # if adding another track does not lead to a better quality, stop algorithm\n if self.best_connection == []:\n return False\n \n # add best connection to the track\n self.track = Track(f\"greedy_track_{self.count}\", self.grid)\n self.track.add_station(self.grid, self.best_connection[0])\n\n self.count += 1\n\n return station",
"def _choose_best_trip(self):\n times = [(key, self._trips_dict[key].get_duration()) for key in self._trips_dict.keys()\n if self._trips_dict[key] is not None]\n self._primary_mode = min(times, key=lambda tup: tup[1])[0]",
"def _select_destination(self):\n # Ideally this should do something clever based on the start location\n # ie known trips. But for now, it will pick randomly!\n station_dict = self.network.station_dict\n\n stations = list(station_dict.keys())\n #stations = [x for x in stations if isinstance(x, int) or x.startswith(\"801\")]\n #stations = [x for x in stations if isinstance(x, int) or x.startswith(\"80139\")]\n weights = [station_dict[x].in_popularity for x in stations]\n\n # pick using the given weight distributions\n self.dest = random.choices(stations, weights=weights)[0]\n\n return",
"def choose_next_target(self, old_sInd, sInds, slewTimes, intTimes):\n\n Comp = self.Completeness\n TL = self.TargetList\n TK = self.TimeKeeping\n OS = self.OpticalSystem\n Obs = self.Observatory\n allModes = OS.observingModes\n\n # cast sInds to array\n sInds = np.array(sInds, ndmin=1, copy=False)\n\n if OS.haveOcculter:\n # current star has to be in the adjmat\n if (old_sInd is not None) and (old_sInd not in sInds):\n sInds = np.append(sInds, old_sInd)\n \n # calculate dt since previous observation\n dt = TK.currentTimeNorm.copy() + slewTimes[sInds] - self.lastObsTimes[sInds]\n # get dynamic completeness values\n comps = Comp.completeness_update(TL, sInds, self.starVisits[sInds], dt)\n \n # if first target, or if only 1 available target, \n # choose highest available completeness\n nStars = len(sInds)\n if (old_sInd is None) or (nStars == 1):\n sInd = np.random.choice(sInds[comps == max(comps)])\n return sInd, None\n \n # define adjacency matrix\n A = np.zeros((nStars,nStars))\n \n # only consider slew distance when there's an occulter\n if OS.haveOcculter:\n r_ts = TL.starprop(sInds, TK.currentTimeAbs)\n u_ts = (r_ts.value.T/np.linalg.norm(r_ts, axis=1)).T\n angdists = np.arccos(np.clip(np.dot(u_ts, u_ts.T), -1, 1))\n A[np.ones((nStars), dtype=bool)] = angdists\n A = self.coeffs[0]*(A)/np.pi\n \n # add factor due to completeness\n A = A + self.coeffs[1]*(1 - comps)\n \n # add factor due to unvisited ramp\n f_uv = np.zeros(nStars)\n unvisited = self.starVisits[sInds]==0\n f_uv[unvisited] = float(TK.currentTimeNorm.copy()/TK.missionLife.copy())**2\n A = A - self.coeffs[2]*f_uv\n\n # add factor due to revisited ramp\n # f2_uv = np.where(self.starVisits[sInds] > 0, 1, 0) *\\\n # (1 - (np.in1d(sInds, self.starRevisit[:,0],invert=True)))\n f2_uv = 1 - (np.in1d(sInds, self.starRevisit[:,0]))\n A = A + self.coeffs[3]*f2_uv\n \n # kill diagonal\n A = A + np.diag(np.ones(nStars)*np.Inf)\n \n # take two traversal steps\n step1 = np.tile(A[sInds==old_sInd,:], (nStars, 1)).flatten('F')\n step2 = A[np.array(np.ones((nStars, nStars)), dtype=bool)]\n tmp = np.argmin(step1 + step2)\n sInd = sInds[int(np.floor(tmp/float(nStars)))]\n\n else:\n nStars = len(sInds)\n\n # 1/ Choose next telescope target\n comps = Comp.completeness_update(TL, sInds, self.starVisits[sInds], TK.currentTimeNorm.copy())\n\n # add weight for star revisits\n ind_rev = []\n if self.starRevisit.size != 0:\n dt_rev = self.starRevisit[:,1]*u.day - TK.currentTimeNorm.copy()\n ind_rev = [int(x) for x in self.starRevisit[dt_rev < 0 , 0] if x in sInds]\n\n f2_uv = np.where((self.starVisits[sInds] > 0) & (self.starVisits[sInds] < self.nVisitsMax), \n self.starVisits[sInds], 0) * (1 - (np.in1d(sInds, ind_rev, invert=True)))\n\n weights = (comps + self.revisit_weight*f2_uv/float(self.nVisitsMax))/intTimes\n\n sInd = np.random.choice(sInds[weights == max(weights)])\n\n waitTime = slewTimes[sInd]\n #Check if exoplanetObsTime would be exceeded\n mode = list(filter(lambda mode: mode['detectionMode'] == True, allModes))[0]\n maxIntTimeOBendTime, maxIntTimeExoplanetObsTime, maxIntTimeMissionLife = TK.get_ObsDetectionMaxIntTime(Obs, mode)\n maxIntTime = min(maxIntTimeOBendTime, maxIntTimeExoplanetObsTime, maxIntTimeMissionLife)#Maximum intTime allowed\n intTimes2 = self.calc_targ_intTime(sInd, TK.currentTimeAbs.copy(), mode)\n if intTimes2 > maxIntTime: # check if max allowed integration time would be exceeded\n self.vprint('max allowed integration time would be exceeded')\n sInd = None\n waitTime = 1.*u.d\n \n return sInd, waitTime",
"def decide_next_query(self):\n for gp in self.gps:\n build_gp_posterior(gp)\n # Find the best mean values for each gp.\n best_f, best_pt, best_gain = None, None, float('-inf')\n queries = self._get_queried_pts()\n for f_idx, f_name in enumerate(self.f_names):\n gp = self.gps[f_idx]\n f_qs = queries[f_name]\n # Assemble points to draw sample from.\n low, high = zip(*self.domains[f_idx])\n rand_pts = np.random.uniform(low, high,\n (self.options.max_opt_evals, len(low)))\n samp_pts = np.vstack([f_qs, rand_pts])\n samp_vals = gp.draw_sample(samp_pts=samp_pts).ravel()\n max_prev = np.max(samp_vals[:len(f_qs)])\n best_new_idx = np.argmax(samp_vals[len(f_qs):]) + len(f_qs)\n gain = samp_vals[best_new_idx] - max_prev\n if gain > best_gain:\n best_f = f_idx\n best_pt = samp_pts[best_new_idx]\n best_gain = gain\n return best_f, best_pt",
"def _choose_best_option(self):",
"def choose_next_player(self):\n player_index = self.players.index(self.current_player)\n if self.direction_clock_wise:\n if player_index >= len(self.players) - 1:\n self.current_player = self.players[0]\n else:\n self.current_player = self.players[player_index + 1]\n else:\n if player_index <= 0:\n self.current_player = self.players[len(self.players) - 1]\n else:\n self.current_player = self.players[player_index - 1]",
"def visit_all_possibilities(self, first_station, track, grid):\n # loops over connections of station\n for connection in first_station.connections:\n # keeps adding untill the max length of a track is reached\n if track.add_station(grid, self.stations[connection].name):\n # calculates the quality of adding the station and remembers it if it is the best score yet\n if grid.get_quality() > self.best_score:\n self.best_score = grid.get_quality()\n self.grid = copy.deepcopy(grid)\n print(f\"new best score: {self.best_score}:\\n{self.grid}\\n\\n\")\n\n # repeat untill there are no more configurations left\n self.visit_all_possibilities(self.stations[connection], track, grid)\n track.remove_last_station()",
"def get_best_move(self):\n moves1 = self.get_next_moves1() # moves1 represents all legal moves.\n moves2 = self.get_next_moves2() # moves2 represents the moves that allow the AI to score a box.\n moves3 = self.get_next_moves3() # moves3 represents the moves that will allow the player to score a box.\n\n\n if len(moves1) == 0: # the siuation that there is no legal move\n return self\n if len(moves2) != 0:\n return moves2[len(moves2) // 2] # the siuation that there is(are) move(s) to score\n\n elif len(moves3) != 0:\n return moves3[len(moves3) // 2] # the siuation that there is(are) moves(s) to allow the player to score\n\n else:\n return moves1[len(moves1) // 2] # if there is no better moves, the AI will play sequentially, starting from the top left.",
"def _pick_server(self, key, inport): #key = ipp.srcip, ipp.dstip, tcpp.srcport, tcpp.dstport\n\n if len(self.total_connection) == 0: # {server_IP : total connection}\n return self.live_servers.keys()[0] #{IP : MAC,port}\n ipserver = self.total_connection.keys()[0]\n totalconns = self.total_connection[ipserver]\n \"\"\"\n Select server with least connections\n \"\"\"\n if len(self.total_connection) == 0:\n return self.live_servers.keys()[0]\n ipserver = self.total_connection.keys()[0]\n totalconns = self.total_connection[ipserver]\n \n for x in self.total_connection: #finding the server IP having least no. of connections\n if self.total_connection[x] < totalconns:\n ipserver = x\n totalconns = self.total_connection[x]\n self.log.debug(\"Best available server: %s\" % ipserver)\n return ipserver",
"def get_best_link_station_with_power(self, link_stations):\n best_power = 0\n best_station = link_stations[0]\n for station in link_stations:\n power = station.get_power(self)\n if power > best_power:\n best_station = station\n best_power = power\n return self._str_best_link_station_with_power(best_station, best_power)",
"def choose_next(self, round):\n return random.choice(self.possible_coords)",
"def choose_bestnext(self, round):\n board_percentage = []\n \n for i in self.possible_coords:\n iSq = round.getSq(i[0], i[1])\n \n if round.pr_hook(iSq) == ' X ':\n sq_percentage = []\n surroundings = iSq.point_neighbors()\n \n for j in surroundings:\n jSq = round.getSq(j[0], j[1])\n\n if round.as_int(jSq) != None:\n count_X = 0\n count_F = 0\n check = jSq.point_neighbors()\n\n for k in check:\n kSq = round.getSq(k[0], k[1])\n if round.pr_hook(kSq) == ' X ':\n count_X += 1\n elif round.pr_hook(kSq) == ' f ':\n count_F += 1 \n if count_X != 0:\n sq_percentage.append((jSq.mine_neighbors() - count_F)/ count_X)\n\n avg_percent = 0\n if len(sq_percentage) == 0:\n avg_percent = 0.8\n elif sq_percentage.count(1) != 0:\n avg_percent = 1\n round.flagSq(i[0], i[1])\n else:\n sum_so_far = 0\n for p in sq_percentage:\n sum_so_far += p\n avg_percent = sum_so_far / len(sq_percentage)\n \n board_percentage.append(avg_percent)\n\n else:\n board_percentage.append(100)\n\n sorted_percentages = board_percentage.copy()\n sorted_percentages.sort()\n\n best_choice = board_percentage.index(sorted_percentages[0])\n\n return self.possible_coords[best_choice]",
"def _choose_best_option(self) -> None:\r\n pawn = choice(list(self._state.game.engine.get_movable_pawns()))\r\n move = choice(self._state.game.engine.get_moves_for_pawn(pawn))\r\n self._selected_pawn = pawn\r\n self._selected_move = move",
"def select_leader(self):\n\n if self.leaders.size() == 1:\n return self.leaders.rand_choice()\n\n candidates = self.leaders.rand_sample(2)\n\n # randomly favourize one of them\n # best_global = choice(candidates)\n\n # should select those which has bigger fitness\n # # if one of them dominates, it will be selected as global best\n # dom = self.dominance.compare(candidates[0].costs_signed, candidates[1].costs_signed)\n #\n # if dom == 1:\n # best_global = candidates[0]\n #\n # if dom == 2:\n # best_global = candidates[1]\n\n if candidates[1].features['crowding_distance'] > candidates[0].features['crowding_distance']:\n best_global = candidates[1]\n else:\n best_global = candidates[0]\n return best_global",
"def select_leader(self):\n\n if self.leaders.size() == 1:\n return self.leaders.rand_choice()\n\n candidates = self.leaders.rand_sample(2)\n\n # randomly favourize one of them\n # best_global = choice(candidates)\n\n # should select those which has bigger fitness\n # # if one of them dominates, it will be selected as global best\n # dom = self.dominance.compare(candidates[0].costs_signed, candidates[1].costs_signed)\n #\n # if dom == 1:\n # best_global = candidates[0]\n #\n # if dom == 2:\n # best_global = candidates[1]\n\n if candidates[1].features['crowding_distance'] > candidates[0].features['crowding_distance']:\n best_global = candidates[1]\n else:\n best_global = candidates[0]\n return best_global",
"def test_get_best_candidate(self):\n optimizer = \"RandomSearch\"\n name = \"test_init_experiment\"\n param_defs = {\n \"x\": MinMaxNumericParamDef(0, 1),\n \"name\": NominalParamDef([\"A\", \"B\", \"C\"])\n }\n minimization = True\n\n LAss = PrettyLabAssistant()\n LAss.init_experiment(name, optimizer, param_defs, minimization=minimization)\n cand_one = LAss.get_next_candidate(name)\n cand_one.result = 1\n LAss.update(name, cand_one)\n\n cand_two = LAss.get_next_candidate(name)\n cand_two.result = 0\n LAss.update(name, cand_two)\n\n assert_equal(cand_two, LAss.get_best_candidate(name))",
"def __get_next_greedy_move(self, game_state): \n best_move = None\n best_score = None\n for free_seat in self.__get_free_seats(game_state):\n next_game_state_score = self.__get_score(game_state, free_seat)\n if best_score is None:\n best_score = next_game_state_score\n best_move = free_seat\n continue\n if next_game_state_score > best_score:\n best_score = next_game_state_score\n best_move = free_seat\n return best_move",
"def getNextOptimal(self):\n\t\tnodes=self.optNodes\n\t\texceeds=self.m.exceedsAngleLim\n\t\tif self.optNode is len(nodes)-1: #last node\n\t\t\tself.noMoreSpots=True\n\t\t\treturn self.pos\n\t\telif len(nodes) is 0 or (self.otherDevice is not None and exceeds(self,nodes[self.optNode+1],self.otherDevice)):\n\t\t\treturn self.pos #could not go to next ideal, other arm is blocking.\n\t\telse:\n\t\t\t#get the next optimal in list and iterate until it is \"forward\" angularly.\n\t\t\tself.optNode+=1\n\t\t\tif '2a' in self.m.type:\n\t\t\t\twhile self.m.getCylindrical(nodes[self.optNode])[1] > self.posCyl[1] and self.optNode<len(nodes)-1 and not exceeds(self,nodes[self.optNode+1],self.otherDevice): \n\t\t\t\t\tself.optNode+=1\n\t\treturn nodes[self.optNode]",
"def next_candidate():\r\n candidate_bidder = -1\r\n candidate_value = -1\r\n for n in range(len(bidders)):\r\n if (is_active[n] == 0 and cur_value(n) is not None\r\n and cur_value(n) > max(candidate_value, cur_bid)):\r\n candidate_value = bidders[n].values[cur_value_idx[n]]\r\n candidate_bidder = n\r\n return candidate_value, candidate_bidder",
"def get_best_solution(self):\n if not self.tours:\n raise Exception('No solution has been computed yet')\n scores = {s:get_cost(self.tours[s],self) for s in self.tours}\n best = min(scores,key=scores.get)\n print('The best solution is given by {} with score {}'.format(best,scores[best]))\n return self.tours[best]",
"def get_best_candidate(self):\n if not self.scores:\n return None\n return self.te_list[self.scores.index(max(self.scores))]",
"def greedy_selector(self):\n r_k = 0 \n best_route = []\n cities_to_visit = [i for i in range(1, self.city_count)]\n for _ in range(1, self.city_count):\n s_ind = np.argmax([self.tau[(r_k, u)] for u in cities_to_visit])\n s_k = cities_to_visit.pop(s_ind)\n best_route.append((r_k, s_k))\n r_k = s_k\n best_route.append((r_k, 0))\n \n shortest_path = np.sum([self.phi[(p)] for p in best_route])\n return best_route, shortest_path",
"def find_best_cycle(road_map):\n #Assume the best_cycle is the initial road map and calculate total distance \n best_cycle = road_map\n best_cycle_dist = compute_total_distance(road_map)\n best_attempts = [best_cycle_dist]\n # For each city in the road map\n for i in range(len(road_map)):\n for swaps in range(10000):\n #A random number between 0 and total number of cities is generated.\n number = int(len(road_map) * random.random())\n # Create a test tuple where the first field in the tuple is the road map,\n # with two cities swapped, and second field is total distance.\n # The type of swap depends on whether the random number is odd or even.\n # If even or if i is equal to number, the cities at index i and i+1 is swapped.\n # If odd and i is not equal to number, the cities at index i and number are swapped.\n # As a result, on each swap, there is\n # 50% chance of either type of swap being selected.\n if number % 2 == 1 and i != number:\n test = swap_cities(best_cycle,i,number)\n else:\n test = swap_adjacent_cities(best_cycle,i)\n # Compare the second field with current best cycle distance\n # If current best cycle distance is greater, then set best cycle\n # to the road map after swapping \n if best_cycle_dist > test[1]:\n best_cycle = test[0]\n best_cycle_dist = test[1]\n if best_attempts[len(best_attempts)-1] > best_cycle_dist:\n best_attempts.append(best_cycle_dist)\n return best_cycle, best_cycle_dist, best_attempts",
"def calc_nearest_state(self): # TODO: Check if we need here state, instead of self.state\n self.stateC = self.toConceptual(self.state)\n CTP, winners = self.find_winner()\n\n state_name = self.find_TPname(filleridx=winners)\n binding = self.find_symBinding(filleridx=winners)\n state_num = self.find_TPnum(stateName=state_name)\n TP_state = self.TP.matmul(fortran_reshape(CTP, (torch.numel(CTP), 1)))\n Cdist = torch.norm(CTP - self.stateC, p='fro') # Frobenius Norm\n Sdist = self.L2norm(TP_state - self.state)\n TP_h = self.calc_harmony(state=TP_state)\n\n return TP_state, winners, state_name, binding, Cdist, Sdist, state_num, TP_h",
"def greedy_initial(self):\r\n sol = [] # [[0;2;5;0;4;6;0],[],...]\r\n sol_veh_type = [] # corresponding vehicle type for the solution\r\n route_way_time = []\r\n\r\n to_vist = [i+1 for i in range(store_num - 1)] # [1,5,8,...]\r\n itr = 0\r\n\r\n while len(to_vist) > 0 and itr < 500:\r\n itr += 1\r\n\r\n if itr <= small_veh_cnt:\r\n vehicle_type0 = 2\r\n elif itr <= small_veh_cnt + medium_veh_cnt:\r\n vehicle_type0 = 3\r\n else:\r\n vehicle_type0 = 5\r\n\r\n sol_veh_type.append(vehicle_type0)\r\n\r\n used_res = [0, 0, 0, 0] # used volume, and travel time of the vehicle, leave time, travel distance\r\n veh_rout = [0]\r\n\r\n # print '\\nA new vehicle will be used.'\r\n way_time = 0 # travel time of coming to the store + wait time at the store + operation time at this store\r\n while True:\r\n curr_cust = veh_rout[-1]\r\n\r\n next_one, way_time = self.time_nn(way_time, curr_cust, to_vist, used_res, len(veh_rout), vehicle_type0)\r\n next_cust, next_start = next_one[0], next_one[1]\r\n # print('next start', next_cust, next_start)\r\n if next_cust == 0: # next visiting customer is depot\r\n # print 'Get back to the depot, and ready for a new round.'\r\n veh_rout.append(next_cust)\r\n break\r\n\r\n else: # next visiting customer is a store\r\n used_res[0] += (num_demd[next_cust][0] * bskt_vol + num_demd[next_cust][1] * trsf_vol + (num_demd[next_cust][2] + \\\r\n num_demd[next_cust][3]) * milk_vol + num_demd[next_cust][4] * paper_bskt)\r\n used_res[2] = (next_start + oprt_t)\r\n used_res[3] += dist_mat[curr_cust, next_cust]\r\n\r\n\r\n veh_rout.append(next_cust)\r\n # print 'Vehicle used resource: ', used_res\r\n to_vist.remove(next_cust)\r\n\r\n sol.append(veh_rout)\r\n route_way_time.append(way_time)\r\n\r\n # print 'Last point 0 earliest leave time: ', int(used_res[-1]) / 60, ':', int(used_res[-1]) % 60\r\n # print 'Route %s is: ' % itr, veh_rout\r\n print('*'*10, 'Iteration:', itr, '*'*10)\r\n\r\n\r\n if len(to_vist) > 0:\r\n print('number of stores remained: ', len(to_vist))\r\n\r\n return sol, sol_veh_type, route_way_time",
"def _get_nearest_slot(self):\n available_slots = [pslot for pslot in self.slots.values() if pslot.available]\n if not available_slots:\n return None\n\n return sorted(available_slots, key=lambda x: x.slot_no)[0]",
"def next(self):\n self.current_state = self.next_state\n self.next_state = self.clear_screen() # set values to 0\n for x in range(1, 101):\n for y in range(1, 101):\n # calculate the number of alive neighbours at given coordinates\n self.neighbours_alive = self.check_neighbours_alive(x, y)\n\n # assign the result value from rule sets\n self.next_state[x][y] = self.rule_sets[self.selected_rule][ # selected rule name\n str(self.current_state[x][y])][ # 0 or 1 (dead or alive)\n self.neighbours_alive] # number between 0 to 8\n return self.next_state",
"def shortest_path(self):\n\t\t#dict that will hold the cost of traveling to each station\n\t\t#add the initial cost of the starting station, which is 0\n\t\tD = {0:0}\n\n\t\t#add all of our dict keys (stations) to our queue\n\t\tstation_queue = self.station_graph.keys()\n\n\t\t#sort the keys! since the graph is directed and acyclic, the stations\n\t\t#can be explored one at a time, in order, without having to adjust\n\t\t#for the lowest distance value via priority queue.\n\t\t#\n\t\t#sort them with reverse=True so that they can be popped from the\n\t\t#end of the list instead of from the beginning. This should save\n\t\t#some cpu time.\n\t\tstation_queue.sort(reverse=True)\n\t\twhile len(station_queue) > 0:\n\n\t\t\tstation = station_queue.pop() #grab the next node in the queue\n\n\t\t\tfor next_st, next_cost in self.station_graph[station].iteritems():\n\t\t\t\t#loops through the current station's neighbors, and calculates\n\t\t\t\t#their costs from the starting node, making sure to store\n\t\t\t\t#the lowest cost in our D dict\n\t\t\t\talt = D[station] + next_cost #sum the costs\n\t\t\t\tif not D.has_key(next_st) or alt < D[next_st]:\n\t\t\t\t\t#if there is no cost on record, or if the newly calculated\n\t\t\t\t\t#cost is lower than the currently recorded one, then\n\t\t\t\t\t#record the newly calculated cost as the lowest\n\t\t\t\t\tD[next_st] = alt #set the cost to get to next_st\n\n\t\treturn D[self.final_stop]",
"def choose(self):\n\t\test = []\n\t\tfor i in self.S:\n\t\t\tem1 = self.getD(self.SJ(i)[0]) + self.gett(self.SJ(i)[0])\n\t\t\ttry:\n\t\t\t\tem2 = max([ self.getD(j) + self.gett(j) for j in self.SM(i)])\n\t\t\texcept:\n\t\t\t\tem2 = 0\n\t\t\te = self.getr(i) + self.getD(i) + max(em1,em2)\n\t\t\test.append(e)\n\t\tind = random.choice(sorted(est)[:self.c])\n\t\treturn self.S[est.index(ind)]"
]
| [
"0.7436752",
"0.6377942",
"0.6055019",
"0.56210613",
"0.56195927",
"0.55932504",
"0.5592113",
"0.5579121",
"0.55658734",
"0.553037",
"0.5499738",
"0.54976356",
"0.5447143",
"0.54371697",
"0.54167473",
"0.54167473",
"0.54080623",
"0.5369538",
"0.53499717",
"0.5339296",
"0.5330009",
"0.5316332",
"0.52908653",
"0.5275893",
"0.5272174",
"0.526543",
"0.52546895",
"0.5251181",
"0.52426964",
"0.5228548"
]
| 0.83878165 | 0 |
Predicts the sound class (0 > Kick, 1 > Snare) for a single sound using an XGBoost model | def predictSoundClass(sound, boostModel, sampleRate=44100, nCoeffs=32):
sound = util.normalize(sound)
mfcc = extractFeatures(sound, sampleRate, nCoeffs)
mfcc = mfcc.reshape(1, len(mfcc))
dTest = xgb.DMatrix(mfcc)
return boostModel.predict(dTest) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def predict(x):\n\n scores = np.zeros(shape=(len(classes_def), len(x)))\n\n for idx, c in enumerate(classes_def):\n\n model_name = model_name_pre + c + model_name_post\n print('Loading model', model_name, 'and making predictions..')\n model = load_model(model_name)\n\n scores[idx] = model.predict(x).reshape(len(x))\n\n out = []\n\n for predictions in scores.T:\n # Majority vote\n max_idx_for_sample = 0\n max_prob_for_sample = 0\n for i, prediction in enumerate(predictions):\n if prediction > max_prob_for_sample:\n max_idx_for_sample = i\n\n out.append(classes_def[max_idx_for_sample])\n\n return out",
"def xgboost_predict(self, x) -> np.array:\r\n if self.xgboostModel is None:\r\n print(\"xgboost not trained, please run xgboost_fit first!\")\r\n return None\r\n else:\r\n return self.xgboostModel.predict(x)",
"def makepredictions(self):\n data, sampling_rate = librosa.load(self.file)\n mfccs = np.mean(librosa.feature.mfcc(y=data, sr=sampling_rate, n_mfcc=40).T, axis=0)\n x = np.expand_dims(mfccs, axis=1)\n x = np.expand_dims(x, axis=0)\n predictions = self.loaded_model.predict_classes(x)\n predict = self.convertclasstoemotion(predictions)\n print(\"Prediction is\", \" \", self.convertclasstoemotion(predictions))\n return predict",
"def predict(self, x):\n sleep(10)\n preds = [choice(['happy', 'sad', 'angry']) for i in range(len(x))]\n out = [{'text': t.text, 'sentiment': s} for t, s in zip(x, preds)]\n return out",
"def predict(self, x, **kwargs):\n return self.tuner.get_best_models(1)[0].predict(x, **kwargs)",
"def predict(self, model, x_test):\n pass",
"def predict(self, X):",
"def predict(self, X):",
"def predict(self, x):\n # *** START CODE HERE ***\n return self.clf.predict_classes(x.reshape(x.shape[0], 28, 28, 1))\n # *** END CODE HERE ***",
"def predict_XGB():\n #read future games\n future_mstr = pd.read_csv(FG_filename)\n #extract features and prepare X for predictions\n features = future_mstr.columns[6:]\n X_new = future_mstr[features]\n #load saved model\n xgb = joblib.load(XG_filename)\n #make predictions\n XG_preds = xgb.predict(X_new)\n future_mstr['Predictions'] = XG_preds\n #return the relevant prediction information\n cols = ['Gamecode', 'Date', 'Team', 'Opp', 'Predictions']\n prediction_mstr_all = future_mstr[cols]\n \n return prediction_mstr_all",
"def predict(data_gen, index, partition, model, verbose=True):\n audio_path,data_point,transcr,prediction = predict_raw(data_gen, index, partition, model)\n output_length = [model.output_length(data_point.shape[0])]\n pred_ints = (K.eval(K.ctc_decode(\n prediction, output_length, greedy=False)[0][0])+1).flatten().tolist()\n predicted = ''.join(int_sequence_to_text(pred_ints)).replace(\"<SPACE>\", \" \")\n wer_val = wer(transcr, predicted)\n if verbose:\n # display(Audio(audio_path, embed=True))\n print('Truth: ' + transcr)\n print('Predicted: ' + predicted)\n print(\"wer: %d\" % wer_val)\n return wer_val",
"def MultiClassif(synth_sample, real_sample, label, n_cores=1):\n synth_sample, real_sample = MultiClassLabelCheck(synth_sample, real_sample, label)\n\n train_col = list(set(synth_sample.columns) - set([label]))\n \n X_test = real_sample[train_col]\n y_test = real_sample[label]\n \n X_train = synth_sample[train_col]\n y_train = synth_sample[label]\n \n model = XGBClassifier(n_estimators=512,\n use_label_encoder=False,\n max_depth=64,\n verbosity=0,\n objective = 'multi:softmax',\n num_class = np.unique(y_train).size,\n eval_metric = 'merror',\n maximize=False,\n n_jobs=n_cores,\n )\n y_pred = model.fit(X_train, y_train).predict(X_test)\n \n return matthews_corrcoef(y_test, y_pred)",
"def recognize(models: dict, test_set: SinglesData):\n\n # TODO implement the recognizer\n # return probabilities, guesses\n\n # Create blank lists for probabilities and X's\n probabilities = []\n\n # Needs to be guesses as per utils, this confused me for a bit.\n guesses = []\n\n # So what we're going to do is get the list of x lengths.\n # We need to use the asl_utils file for this.\n x_lengths = test_set.get_all_Xlengths()\n\n # Then we iterate over the x values and their corresponding lengths:\n for x, length in x_lengths.values():\n\n # save a blank dictionary of the likelihood score:\n ld = {}\n\n # store minimum possible score to save max score later:\n current_best_score = float('-inf')\n\n # store the highest probability word, keep n/a for start.\n current_best_word = None\n\n # Then we iterate over the word and the model that we developed for each word.\n # The information is stored in models within items.\n for word, model in models.items():\n\n # Try was added because some words don't have a return.\n try:\n\n # We label the score\n score = model.score(x, length)\n\n # we label the blank dictionary\n ld[word] = score\n\n # if the word's score is greater than our current max score\n if score > current_best_score:\n\n # The word's score is now the max score\n current_best_score = score\n current_best_word = word\n\n # Exception for when we have no response.\n except:\n\n # When we have no response the likelihood is infinitely low.\n # I also had word as 'word' need to remember to not do that.\n ld[word] = float('-inf')\n\n # I messed this up very badly before I got it.\n guesses.append(current_best_word)\n probabilities.append(ld)\n\n\n return probabilities, guesses",
"def predict(self, X):\n ...",
"def predict(self, X):\n ...",
"def predict(self, X):\n ...",
"def predict(self, X):\n words = X.split()\n chance = []\n for cur_label in self.model[\"labels\"]:\n probability = self.model[\"labels\"][cur_label][\"probability\"]\n total_grade = math.log(probability, math.e)\n for word in words:\n word_dict = self.model[\"words\"].get(word, None)\n if word_dict:\n total_grade += math.log(word_dict[cur_label], math.e)\n chance.append((total_grade, cur_label))\n _, prediction = max(chance)\n return prediction",
"def predict(wav, labels, graph, input_name, output_name, how_many_labels):\n pred_lab, pred_prob=label_wav(wav, labels, graph, input_name, output_name, how_many_labels)\n return pred_lab, pred_prob",
"def predict(self, infile, model_path=None, eval_gold=False, as_text=False):\n\n\t\tif model_path is None: # Try default model location\n\t\t\tmodel_path = script_dir + os.sep + \"models\" + os.sep + self.corpus + \"_ensemble_sent.pkl\"\n\n\t\tclf, num_labels, cat_labels, multicol_dict, vocab, firsts, lasts = joblib.load(model_path)\n\n\t\tif as_text:\n\t\t\tconllu = infile\n\t\telse:\n\t\t\tconllu = io.open(infile,encoding=\"utf8\").read()\n\n\t\t#tagged = udpipe_tag(conllu,self.udpipe_model)\n\t\ttagged = tt_tag(conllu,self.lang)\n\n\t\ttrain_feats, _, toks, _, _ = read_conll(tagged,genre_pat=self.genre_pat,mode=\"sent\",as_text=True,char_bytes=self.lang==\"zho\")\n\t\theaders = sorted(list(train_feats[0].keys()))\n\n\t\tdata = []\n\n\t\tpreds = {}\n\t\tfor e in self.estimators:\n\t\t\tpred = e.predict(tagged)\n\t\t\t_, preds[e.name + \"_prob\"] = [list(x) for x in zip(*pred)]\n\t\t\theaders.append(e.name + \"_prob\")\n\n\t\tgenre_warning = False\n\t\tfor i, item in enumerate(train_feats):\n\t\t\titem[\"first\"] = item[\"word\"][0] if item[\"word\"][0] in firsts else \"_\"\n\t\t\titem[\"last\"] = item[\"word\"][-1] if item[\"word\"][-1] in lasts else \"_\"\n\t\t\tif \"genre\" in cat_labels:\n\t\t\t\tif item[\"genre\"] not in multicol_dict[\"encoder_dict\"][\"genre\"].classes_: # New genre not in training data\n\t\t\t\t\tif not genre_warning:\n\t\t\t\t\t\tsys.stderr.write(\"! WARN: Genre not in training data: \" + item[\"genre\"] + \"; suppressing further warnings\\n\")\n\t\t\t\t\t\tgenre_warning = True\n\t\t\t\t\titem[\"genre\"] = \"_\"\n\t\t\tif \"pos\" in cat_labels:\n\t\t\t\tif item[\"pos\"] not in multicol_dict[\"encoder_dict\"][\"pos\"].classes_:\n\t\t\t\t\titem[\"pos\"] = \"_\"\n\t\t\tif \"cpos\" in cat_labels:\n\t\t\t\tif item[\"cpos\"] not in multicol_dict[\"encoder_dict\"][\"cpos\"].classes_:\n\t\t\t\t\titem[\"cpos\"] = \"_\"\n\t\t\tif item[\"word\"] not in vocab and \"word\" in multicol_dict[\"encoder_dict\"]:\n\t\t\t\tif item[\"pos\"] in multicol_dict[\"encoder_dict\"][\"word\"].classes_:\n\t\t\t\t\titem[\"word\"] = item[\"pos\"]\n\t\t\t\telse:\n\t\t\t\t\titem[\"word\"] = \"_\"\n\t\t\tfor e in self.estimators:\n\t\t\t\titem[e.name + \"_prob\"] = preds[e.name + \"_prob\"][i]\n\n\t\t\tfeats = []\n\t\t\tfor k in headers:\n\t\t\t\tfeats.append(item[k])\n\n\t\t\tdata.append(feats)\n\n\t\tdata, headers, _, _ = self.n_gram(data,headers,[],[])\n\n\t\tdata = pd.DataFrame(data, columns=headers)\n\t\tdata_encoded = self.multicol_transform(data,columns=multicol_dict[\"columns\"],all_encoders_=multicol_dict[\"all_encoders_\"])\n\n\t\tdata_x = data_encoded[cat_labels+num_labels].values\n\t\tpred = clf.predict(data_x)\n\n\t\t# Ensure first token in document is always a sentence break\n\t\tfor i, x in enumerate(data_encoded[\"tok_id\"].values):\n\t\t\tif x == 1:\n\t\t\t\tpred[i] = 1\n\n\t\tif eval_gold:\n\t\t\tgold_feats, _,_,_,_ = read_conll(conllu,genre_pat=self.genre_pat,mode=\"sent\",as_text=True)\n\t\t\tgold = [int(t['wid'] == 1) for t in gold_feats]\n\t\t\tconf_mat = confusion_matrix(gold, pred)\n\t\t\tsys.stderr.write(str(conf_mat) + \"\\n\")\n\t\t\ttrue_positive = conf_mat[1][1]\n\t\t\tfalse_positive = conf_mat[0][1]\n\t\t\tfalse_negative = conf_mat[1][0]\n\t\t\tprec = true_positive / (true_positive + false_positive)\n\t\t\trec = true_positive / (true_positive + false_negative)\n\t\t\tf1 = 2*prec*rec/(prec+rec)\n\t\t\tsys.stderr.write(\"P: \" + str(prec) + \"\\n\")\n\t\t\tsys.stderr.write(\"R: \" + str(rec) + \"\\n\")\n\t\t\tsys.stderr.write(\"F1: \" + str(f1) + \"\\n\")\n\t\t\twith io.open(\"diff.tab\",'w',encoding=\"utf8\") as f:\n\t\t\t\tfor i in range(len(gold)):\n\t\t\t\t\tf.write(\"\\t\".join([toks[i],str(gold[i]),str(pred[i])])+\"\\n\")\n\t\t\treturn conf_mat, prec, rec, f1\n\t\telse:\n\t\t\treturn pred",
"def predict(model, X):\n\tmodel.eval()\n\t# make the predictions\n\tscores = model.forward(X)\n\n\t# scores contains, for each example, two scores that can be interpreted as the\n\t# probability of each example belonging to each of the classes. To select the\n\t# final predicted label, we will select the class with higher probability.\n\tpredicted_labels = scores.argmax(dim=-1) # predicted_labels shape: (n_examples)\n\n\treturn predicted_labels",
"def predict(self, x, **kwargs):\n kwargs = self.filter_sk_params(Sequential.predict_classes, kwargs)\n classes = self.model.predict_classes(x, **kwargs)\n return self.classes_[classes]",
"def predict(self, X):\n X = self.move_data_device(X)\n #Special case to compare with Sebastiani\n if X.shape[0]==self.bag_size:\n with torch.no_grad():\n self.model.eval()\n return self.model.forward(X).cpu().detach().numpy()\n else:\n predictions=torch.zeros((self.n_bags*self.test_epochs,self.n_classes),device=self.device)\n for epoch in range(self.test_epochs):\n start_epoch = time.time()\n if self.verbose>10:\n print(\"[{}] Starting testing epoch {}... \".format(self.dataset_name,epoch),end='')\n samples_indexes= self.bag_generator.compute_prediction_bags(dataset_size=X.shape[0],n_bags=self.n_bags,bag_size=self.bag_size)\n with torch.no_grad():\n self.model.eval()\n for i,sample_indexes in enumerate(samples_indexes):\n predictions[(epoch*self.n_bags)+i,:] = self.model.forward(X[sample_indexes,:])\n end_epoch = time.time()\n elapsed = end_epoch - start_epoch\n print(\"[Time:{:.2f}s]\".format(elapsed),end='')\n print(\"done.\")\n\n return torch.mean(predictions,axis=0).cpu().detach().numpy()",
"def nonlearning():\n\taT.featureAndTrain(['../../AudioData/chunked_data_sorted/pos', '../../AudioData/chunked_data_sorted/neg'], \n\t\t\t\t\t\t1.0, 1.0, aT.shortTermWindow, aT.shortTermStep, \n \"svm\", \"emotion_classifier\", True)",
"def algo(self):\n audio = np.array([self.audio.popleft() for _ in range(self.count)])\n # Run Classifier\n wav_data = np.abs(np.fft.rfft(audio.flatten()))\n if len(wav_data) > 0:\n pred = self.clf.predict(np.expand_dims(wav_data, 0))\n if self.verbose > 1:\n print('The prediction is : ' + str(pred))\n self.finished.emit(int(pred[-1]))\n else:\n self.finished.emit(0)",
"def predict(self, text, threshold=.0):\n if not self.k_model or not self.w2v_model:\n raise RuntimeError(\"Model not in memory, please load it train new model\")\n start_at = time.time()\n x_test = keras.preprocessing.sequence.pad_sequences(\n self.tokenizer.texts_to_sequences(gensim.utils.simple_preprocess(text)),\n maxlen=self.k_max_sequence_len)\n # Predict\n confidences = self.k_model.predict(x_test)[0]\n # Get mex prediction\n idx = np.argmax(confidences)\n elapsed_time = time.time() - start_at\n if float(confidences[idx]) > threshold:\n return {\"label\": self.label_encoder.classes_[idx], \"confidence\": float(confidences[idx]),\n \"elapsed_time\": elapsed_time}\n return {\"label\": \"__OTHER__\", \"confidence\": float(confidences[idx]), \"elapsed_time\": elapsed_time}",
"def test():\r\n le = preprocessing.LabelEncoder()\r\n le.fit([\"Door Knocking\",\"Shower Running\",\"Toilet Flushing\",\"Vacuum Cleaning\",\"Keyboard Typing\", # encode class labels as numeric id values\r\n \"Coughing\",\"Neutral\"])\r\n \r\n if torch.cuda.is_available():\r\n device = \"cuda:0\"\r\n use_cuda = True\r\n else:\r\n device = \"cpu\"\r\n use_cuda = False\r\n \r\n myModel, start_epoch, train_hist = loadCheckpoint(31, use_cuda)\r\n \r\n #myModel = myModel.double()\r\n myModel = myModel.to(device, dtype=torch.double)\r\n next(myModel.parameters()).device # Check that it is on Cuda\r\n \r\n file_names = []\r\n class_ids = []\r\n max_s = 1\r\n sr = 44100 \r\n for entry in os.scandir(\"test wavs/\"): # for each folder corresponding to a class in dataset\r\n class_id = entry.name # get class numeric id according to label encoder\r\n relative_path = \"test wavs/\"+entry.name # get path location of data sample for loading audio\r\n file_names.append(relative_path) # append to list\r\n class_ids.append(class_id)\r\n\r\n max_s = 1\r\n sr = 44100\r\n X_test = [] \r\n for i in range(len(file_names)):\r\n audio = LoadAudio.load(file_names[i]) # load audio file\r\n audio = LoadAudio.resample(audio, sr) # resample audio\r\n audio = LoadAudio.mono(audio) # make audio stereo\r\n audio = LoadAudio.resize(audio, max_s) # resize audio \r\n sgram = LoadAudio.spectrogram(audio, n_mels=128, n_fft=1024, hop_len=None) # create spectrogram \r\n sgram = LoadAudio.hpssSpectrograms(audio,sgram)\r\n sgram_tensor = torch.tensor(sgram)\r\n X_test.append(sgram_tensor)\r\n\r\n pred = np.array([])\r\n for i in range(len(X_test)):\r\n inputs = X_test[i]\r\n # Normalize the inputs\r\n inputs_m, inputs_s = inputs.mean(), inputs.std()\r\n inputs = (inputs - inputs_m) / inputs_s\r\n inputs = inputs.unsqueeze(0)\r\n inputs = inputs.double()\r\n \r\n # Get predictions\r\n outputs = myModel(inputs)\r\n\r\n # Get the predicted class with the highest score\r\n _, predicted = torch.max(outputs.data, 1)\r\n \r\n pred = np.append(pred, le.inverse_transform(predicted.detach().cpu().numpy()))\r\n \r\n\r\n df = pd.DataFrame(pred, columns=[\"Predicted\"]) # save predictions as a datafram column\r\n df['True'] = class_ids # save true class as a datafram column\r\n print(\"\\nPredicted:\", df)",
"def recognition_model(self, x, c):\n x = self.encoder(x)\n c = self.conditional(c)\n temp = self.recog_head(x + c)\n mu = self.mu(temp)\n logvar = self.logvar(temp)\n return mu, logvar",
"def recognition_model(self, x, c):\n x = self.encoder(x)\n c = self.conditional(c)\n temp = self.recog_head(x + c)\n mu = self.mu(temp)\n logvar = self.logvar(temp)\n return mu, logvar",
"def predict_raw(data_gen, index, partition, model):\n\n if partition == 'validation':\n transcr = data_gen.texts_valid[index]\n audio_path = \"\"\n data_point=data_gen.features_valid[index].T\n elif partition == 'train':\n transcr = data_gen.texts[index]\n # audio_path = data_gen.train_audio_paths[index]\n # data_point = data_gen.normalize(data_gen.featurize(audio_path))\n audio_path=\"\"\n data_point=data_gen.features[index].T\n else:\n raise Exception('Invalid partition! Must be \"train\" or \"validation\"')\n \n prediction = model.predict(np.expand_dims(data_point, axis=0))\n return (audio_path,data_point,transcr,prediction)",
"def predict(self, X, dropout = False):\n a1, z2, a2, z3, a3 = self.forward(X, self.w1, self.w2, do_dropout = False)\n #z3 is of dimension output units x num_samples. each row is an array representing the likelihood that the sample belongs to the class label given by the index...\n #ex: first row of z3 = [0.98, 0.78, 0.36]. This means our network has 3 output units = 3 class labels. And this instance most likely belongs to the class given by the label 0.\n y_pred = np.argmax(a3, axis = 0)\n return y_pred"
]
| [
"0.6329133",
"0.6319796",
"0.601781",
"0.6012411",
"0.59950215",
"0.59683543",
"0.5959344",
"0.5959344",
"0.5952556",
"0.59423214",
"0.59318",
"0.59194446",
"0.5901314",
"0.5898736",
"0.5898736",
"0.5898736",
"0.5892773",
"0.5889295",
"0.58853424",
"0.5880142",
"0.5864906",
"0.58544296",
"0.5846509",
"0.58374685",
"0.58325475",
"0.58081794",
"0.5805392",
"0.5805392",
"0.57928264",
"0.5791579"
]
| 0.70426357 | 0 |
Get a amenity with id as amenity_id | def get_amenity(amenity_id):
try:
amenity = Amenity.get(Amenity.id == amenity_id)
except Exception:
return {'code': 404, 'msg': 'Amenity not found'}, 404
return amenity.to_dict(), 200 | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def amenity_get_by_id(amenity_id):\n obj = storage.get(\"Amenity\", amenity_id)\n if obj is None:\n abort(404)\n else:\n return jsonify(obj.to_dict())",
"def amenities_id(amenity_id):\r\n for val in storage.all(\"Amenity\").values():\r\n if val.id == amenity_id:\r\n return jsonify(val.to_dict())\r\n abort(404)",
"def amenity_by_id(amenity_id):\n\n fetched_obj = storage.get(\"Amenity\", str(amenity_id))\n\n if fetched_obj is None:\n abort(404)\n\n return jsonify(fetched_obj.to_json())",
"def amenities_id(amenity_id):\n my_amenity = storage.get('Amenity', amenity_id)\n try:\n return jsonify(my_amenity.to_dict())\n except:\n abort(404)",
"def get_specific_amenity(amenity_id):\n data = storage.all('Amenity')\n name = 'Amenity.' + amenity_id\n amenity = [v.to_dict() for k, v in data.items() if k == name]\n if len(amenity) != 1:\n abort(404)\n return jsonify(amenity[0])",
"def amenities_no_id(amenity_id=None):",
"def get_amenity(amenity_id):\n amenity = storage.get(Amenity, amenity_id)\n if amenity is None:\n abort(404)\n return jsonify(amenity.to_dict())",
"def get_amenity(amenity_id):\n amenity = storage.get(Amenity, amenity_id)\n if amenity is None:\n abort(404)\n return jsonify(amenity.to_dict())",
"def get_amenity_id(amenity_id):\n amenity = storage.get(\"Amenity\", amenity_id)\n if not amenity:\n abort(404)\n return jsonify(amenity.to_dict())",
"def amenity_one(amenity_id):\n amenity_one = models.storage.get(\"Amenity\", amenity_id)\n if amenity_one is None:\n abort(404)\n return_holder = jsonify(amenity_one.to_dict())\n return return_holder",
"def show_amenity_with_id(amenity_id):\n\n data = storage.get(Amenity, amenity_id)\n if data is None:\n abort(404)\n return jsonify(data.to_dict())",
"def amenities_id(amenity_id):\n if request.method == \"GET\":\n amenity_info = storage.get(Amenity, amenity_id)\n if amenity_info is not None:\n return amenity_info.to_dict()\n abort(404)\n\n if request.method == \"PUT\":\n amenity_info = storage.get(Amenity, amenity_id)\n if amenity_info is not None:\n if not request.is_json:\n return \"Not a JSON\", 400\n\n for k, v in request.get_json().items():\n setattr(amenity_info, k, v)\n storage.save()\n return amenity_info.to_dict()\n abort(404)\n\n if request.method == \"DELETE\":\n amenity_info = storage.get(Amenity, amenity_id)\n if amenity_info:\n amenity_info.delete()\n storage.save()\n return {}, 200\n abort(404)",
"def amenity_obj(amenity_id):\n amenity = storage.get(Amenity, amenity_id)\n if amenity is None:\n abort(404)\n return jsonify(amenity.to_dict())",
"def get_amenity_obj(amenity_id):\n amenity = storage.get(Amenity, amenity_id)\n if amenity:\n return jsonify(amenity.to_dict())\n else:\n abort(404)",
"def one_amenity(a_id):\n the_amenity = storage.get(Amenity, a_id)\n if the_amenity is not None:\n return jsonify(the_amenity.to_dict())\n abort(404)",
"def amenities(amenity_id=None):\n if request.method == 'GET' and amenity_id is None:\n return all_amenities()\n elif request.method == 'GET' and amenity_id:\n return get_amenity(amenity_id)\n elif request.method == 'DELETE':\n return delete_amenity(amenity_id)\n elif request.method == 'POST':\n return create_amenity()\n elif request.method == 'PUT':\n return update_amenity(amenity_id)",
"def getArmy(self, id):\n return self.__armies[id];",
"def amenities(self, amn):\n if type(amn) is Amenity:\n self.amenity_ids.append(str(amn.id))",
"def amenities(self):\n list_amenities = []\n for amenity_obj in amenity_ids:\n if amenity_obj.id == self.id:\n list_amenities.append(amenity_obj)\n\n return list_amenities",
"def amenities(self, amenity_obj):\n if amenity_obj.__class__.name == \"Amenity\":\n self.amenity_ids.append(amenity_obj)",
"def amenities(self):\n ''' for row in place_amenity: row.place_id and amenity.id\n == row.amenity_id:'''\n amenList = []\n for amenity in storage.all(Amenity).value():\n if self.id == amenity.place_id:\n amenList.append(amenity)\n return(amenList)",
"def dev_get_amenity_id(amenity_id=None, place_id=None):\n amenity = storage.get(Amenity, amenity_id)\n place = storage.get(Place, place_id)\n if amenity is None or place is None:\n abort(404)\n\n if amenity not in place.amenities:\n abort(404)\n\n if request.method == \"DELETE\":\n if os.getenv(\"HBNB_TYPE_STORAGE\") == \"db\":\n place.amenities.remove(amenity)\n storage.save()\n return jsonify({})\n\n if request.method == \"POST\":\n if amenity in place.amenities:\n return jsonify(amenity.to_dict())\n else:\n place.amenities.append(amenity)\n place.save()\n return make_response(jsonify(amenity.to_dict()), 201)",
"def amenities(amenity_id=None):\n if request.method == 'GET':\n all_amenities = storage.all('Amenity')\n all_amenities = [obj.to_json() for obj in all_amenities.values()]\n return jsonify(all_amenities)\n\n if request.method == 'POST':\n req_json = request.get_json()\n if req_json is None:\n abort(400, 'Not a JSON')\n if req_json.get('name') is None:\n abort(400, 'Missing name')\n new_object = Amenity(**req_json)\n new_object.save()\n return jsonify(new_object.to_json()), 201",
"def amenities(self, obj):\n if type(obj) == Amenity:\n self.amenity_ids.append(obj.id)",
"def list_amenities(amenity_id=None):\n amenity_list = []\n try:\n if amenity_id is None:\n for value in storage.all('Amenity').values():\n amenity_list.append(value.to_dict())\n else:\n amenity_list = storage.get('Amenity', amenity_id).to_dict()\n return jsonify(amenity_list)\n except Exception:\n abort(404)",
"def get_examen(self, id_examen):\n\n self.logger.info(\"\\t[+] get_examen [+]\")\n self.logger.info(f\"\\t[+] id_examen {id_examen} [+]\")\n try:\n return self.examens.select().where(self.examens.columns.id_examen == id_examen).execute()\n except Exception as e:\n self.logger.critical(\"\\t[-] Exception occured [-]\")\n self.logger.critical(\"\\t\" + str(e))\n self.logger.critical(\"\\t[-] Exception occured [-]\")",
"def delete_amenity(amenity_id):\n try:\n amenity = Amenity.get(Amenity.id == amenity_id)\n except Exception:\n return {'code': 404, 'msg': 'Amenity not found'}, 404\n amenity = Amenity.delete().where(Amenity.id == amenity_id)\n amenity.execute()\n res = {}\n res['code'] = 201\n res['msg'] = \"Amenity was deleted successfully\"\n return res, 201",
"def amenities(self):\n ats = storage.all(Amenity)\n ltats = []\n for objects in ats.values():\n if self.amenity_ids == objects.id:\n ltats.append(objects)\n return ltats",
"def amenity_delete_by_id(amenity_id):\n\n fetched_obj = storage.get(\"Amenity\", str(amenity_id))\n\n if fetched_obj is None:\n abort(404)\n\n storage.delete(fetched_obj)\n storage.save()\n\n return jsonify({})",
"def amenities(self):\n all_amenities = models.storage.all(Amenity)\n places = []\n for k, v in all_amenities.items():\n if v.id in self.amenity_ids:\n places.append(v)\n return places"
]
| [
"0.77794546",
"0.770642",
"0.76352286",
"0.7437263",
"0.7428258",
"0.7405687",
"0.7220406",
"0.7220406",
"0.71572345",
"0.71449107",
"0.7110059",
"0.7098071",
"0.70148665",
"0.69844514",
"0.69547874",
"0.6646272",
"0.6347817",
"0.6306251",
"0.62925506",
"0.6254703",
"0.6244441",
"0.6241999",
"0.6176637",
"0.6120414",
"0.607797",
"0.5993558",
"0.5948358",
"0.5936301",
"0.5879034",
"0.57819396"
]
| 0.77215296 | 1 |
Delete amenity with id as amenity_id | def delete_amenity(amenity_id):
try:
amenity = Amenity.get(Amenity.id == amenity_id)
except Exception:
return {'code': 404, 'msg': 'Amenity not found'}, 404
amenity = Amenity.delete().where(Amenity.id == amenity_id)
amenity.execute()
res = {}
res['code'] = 201
res['msg'] = "Amenity was deleted successfully"
return res, 201 | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def amenity_delete(amenity_id=None):\n obj = storage.get(\"Amenity\", amenity_id)\n if obj is None:\n abort(404)\n storage.delete(obj)\n storage.save()\n return jsonify({}), 200",
"def amenities_delete(amenity_id):\r\n amenities = storage.get(\"Amenity\", amenity_id)\r\n if amenities is None:\r\n abort(404)\r\n storage.delete(amenities)\r\n storage.save()\r\n storage.close()\r\n return jsonify({}), 200",
"def delete_amenity(amenity_id):\n amenities = storage.all(Amenity)\n for key, value in amenities.items():\n if \"Amenity.{}\".format(amenity_id) == key:\n storage.delete(value)\n storage.save()\n return {}\n abort(404)",
"def delete_amenity(amenity_id):\n amenity = storage.get(Amenity, amenity_id)\n if amenity is None:\n abort(404)\n empty_dict = {}\n amenity.delete()\n storage.save()\n return jsonify(empty_dict), 200",
"def delete_amenity(amenity_id):\n amenity = storage.get(Amenity, amenity_id)\n if amenity is None:\n abort(404)\n storage.delete(amenity)\n storage.save()\n return jsonify({}), 200",
"def amenity_delete_by_id(amenity_id):\n\n fetched_obj = storage.get(\"Amenity\", str(amenity_id))\n\n if fetched_obj is None:\n abort(404)\n\n storage.delete(fetched_obj)\n storage.save()\n\n return jsonify({})",
"def amenity_delete(amenity_id):\n remove_help = models.storage.get(\"Amenity\", amenity_id)\n if remove_help is None:\n abort(404)\n remove_help.delete()\n models.storage.save()\n return_holder = jsonify({})\n return return_holder",
"def delete_amenity(amenity_id):\n amenity = storage.get(\"Amenity\", amenity_id)\n if not amenity:\n abort(404)\n storage.delete(amenity)\n storage.save()\n return jsonify({})",
"def delete_amenity_obj(amenity_id):\n amenity = storage.get(Amenity, amenity_id)\n if amenity:\n amenity.delete()\n storage.save()\n return jsonify({}), 200\n else:\n abort(404)",
"def delete_amenity_with_id(amenity_id):\n\n data = storage.get(Amenity, amenity_id)\n if data is None:\n abort(404)\n storage.delete(data)\n storage.save()\n return jsonify({}), 200",
"def delete_amenities_id(amenity_id):\n my_object = storage.get('Amenity', amenity_id)\n if my_object is not None:\n storage.delete(my_object)\n storage.save()\n else:\n abort(404)\n return jsonify({}), 200",
"def del_amenity(a_id):\n the_amenity = storage.get(Amenity, a_id)\n if the_amenity is not None:\n storage.delete(the_amenity)\n storage.save()\n return jsonify({}), 200\n abort(404)",
"def delete_specific_amenity(amenity_id):\n amenity = storage.get('Amenity', amenity_id)\n if not amenity:\n abort(404)\n storage.delete(amenity)\n storage.save()\n return make_response(jsonify({}), 200)",
"def delete_amenity(place_id, amenity_id):\n place = storage.get(Place, place_id)\n if place is None:\n abort(404)\n amenity = storage.get(Amenity, amenity_id)\n if amenity is None:\n abort(404)\n if amenity not in place.amenities:\n abort(404)\n place.amenities.remove(amenity)\n storage.save()\n return jsonify({})",
"def delete_amenity_place(place_id, amenity_id):\n place = storage.get(Place, place_id)\n if place is None:\n abort(404, description=\"Not Found\")\n amenity = storage.get(Amenity, amenity_id)\n if amenity is None:\n abort(404, description=\"Not Found\")\n if amenity in place.amenities:\n amenity.delete()\n storage.save()\n return {}\n abort(404, description=\"Not Found\")",
"def delete_place_amenities(place_id, amenity_id):\n try:\n delete = PlaceAmenities.delete().where(\n PlaceAmenities.amenity == amenity_id,\n PlaceAmenities.place == place_id\n )\n delete.execute()\n res = {}\n res['code'] = 200\n res['msg'] = 'Amenity deleted successfully'\n return res, 200\n except Exception as error:\n response = {}\n response['code'] = 409\n response['msg'] = str(error)\n return response, 409",
"def del_place_amen(place_id, amenity_id):\n place = storage.get('Place', place_id)\n if not place:\n abort(404)\n\n amenity = storage.get('Amenity', amenity_id)\n if not amenity:\n abort(404)\n\n if amenity not in place.amenities:\n abort(404)\n\n storage.delete(amenity)\n storage.save()\n return jsonify({}), 200",
"def amenities_no_id(amenity_id=None):",
"def delete(self, _id):",
"def delete(self, id=None):\n\n if not id:\n return {'msg':'Missing achievement id.'}, 400\n\n try:\n ach = AcademicAchievement.query.get(id)\n\n if not ach:\n return {'msg':'Academic achievement not found'}, 404\n\n ach.remove()\n return {'msg':'Academic achievement deleted.'}, 200\n\n except Exception as e:\n print(e)\n return {'msg':'Could not delete academic achievement.'}, 500",
"def delete(self, id):\n r = validate_get(id)\n tareaID = r.tarea.id\n r.destroySelf()\n flash(_(u'El %s fue eliminado permanentemente.') % name)\n raise redirect('../list/%d' % tareaID)",
"def post_delete(self, *args, **kw):\n id_atributo = int(args[0])\n transaction.begin()\n attr = AtributosPorTipoItem.por_id(id_atributo)\n DBSession.delete(attr)\n transaction.commit()\n flash(\"Atributo Eliminado\")\n redirect(\"./\")",
"def delete_meal():",
"def delete(self):\n self.model.remove_agents(self)",
"def delete_anime(utoa):\n db.session.query(UserToAnime)\\\n .filter(UserToAnime.userId == utoa.userId, UserToAnime.malId == utoa.malId)\\\n .delete()\n db.session.commit()",
"def delete_menu_section(id: int):\n menu_section = MenuSection.query.get(id)\n if not menu_section:\n return jsonify(success=False, MenuSection=\"No MenuSection with id={}\".format(id))\n\n db.session.delete(menu_section)\n db.session.commit()\n\n return jsonify(success=True)",
"async def rolemenu_delete(self, interaction: discord.Interaction,\n name: str):\n doc = await self.db.find_one({\n \"guild_id\": interaction.guild.id,\n \"name\": name\n })\n if not doc:\n return await interaction.response.send_message(\n \"Role menu with that name does not exist.\", ephemeral=True)\n await interaction.response.defer(ephemeral=True)\n await self.db.delete_one({\"_id\": doc[\"_id\"]})\n await interaction.followup.send(\"Role menu removed.\", ephemeral=True)",
"def delete():",
"def delete_incident(self, id):\n sql = f\"DELETE FROM incidences WHERE incidences.id ={id}\"\n conn = Db().con\n curr = conn.cursor()\n curr.execute(sql)\n conn.commit()",
"def delete_location(self, location_id):"
]
| [
"0.828099",
"0.81136984",
"0.80685514",
"0.79795176",
"0.7977136",
"0.7969661",
"0.79693115",
"0.7922059",
"0.78571624",
"0.78356045",
"0.77734894",
"0.7765752",
"0.7712711",
"0.7606925",
"0.7024841",
"0.6947486",
"0.6862664",
"0.6641325",
"0.65328556",
"0.6338358",
"0.6331964",
"0.6268165",
"0.62606335",
"0.6123022",
"0.6085305",
"0.60642964",
"0.60445917",
"0.6008089",
"0.5996647",
"0.59433943"
]
| 0.82829297 | 0 |
Delete amenities with id as amenity_id and place with id as place_id | def delete_place_amenities(place_id, amenity_id):
try:
delete = PlaceAmenities.delete().where(
PlaceAmenities.amenity == amenity_id,
PlaceAmenities.place == place_id
)
delete.execute()
res = {}
res['code'] = 200
res['msg'] = 'Amenity deleted successfully'
return res, 200
except Exception as error:
response = {}
response['code'] = 409
response['msg'] = str(error)
return response, 409 | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def delete_amenity_place(place_id, amenity_id):\n place = storage.get(Place, place_id)\n if place is None:\n abort(404, description=\"Not Found\")\n amenity = storage.get(Amenity, amenity_id)\n if amenity is None:\n abort(404, description=\"Not Found\")\n if amenity in place.amenities:\n amenity.delete()\n storage.save()\n return {}\n abort(404, description=\"Not Found\")",
"def delete_amenity(place_id, amenity_id):\n place = storage.get(Place, place_id)\n if place is None:\n abort(404)\n amenity = storage.get(Amenity, amenity_id)\n if amenity is None:\n abort(404)\n if amenity not in place.amenities:\n abort(404)\n place.amenities.remove(amenity)\n storage.save()\n return jsonify({})",
"def del_place_amen(place_id, amenity_id):\n place = storage.get('Place', place_id)\n if not place:\n abort(404)\n\n amenity = storage.get('Amenity', amenity_id)\n if not amenity:\n abort(404)\n\n if amenity not in place.amenities:\n abort(404)\n\n storage.delete(amenity)\n storage.save()\n return jsonify({}), 200",
"def amenities_delete(amenity_id):\r\n amenities = storage.get(\"Amenity\", amenity_id)\r\n if amenities is None:\r\n abort(404)\r\n storage.delete(amenities)\r\n storage.save()\r\n storage.close()\r\n return jsonify({}), 200",
"def delete_amenity(amenity_id):\n try:\n amenity = Amenity.get(Amenity.id == amenity_id)\n except Exception:\n return {'code': 404, 'msg': 'Amenity not found'}, 404\n amenity = Amenity.delete().where(Amenity.id == amenity_id)\n amenity.execute()\n res = {}\n res['code'] = 201\n res['msg'] = \"Amenity was deleted successfully\"\n return res, 201",
"def delete_amenity(amenity_id):\n amenities = storage.all(Amenity)\n for key, value in amenities.items():\n if \"Amenity.{}\".format(amenity_id) == key:\n storage.delete(value)\n storage.save()\n return {}\n abort(404)",
"def amenity_delete(amenity_id=None):\n obj = storage.get(\"Amenity\", amenity_id)\n if obj is None:\n abort(404)\n storage.delete(obj)\n storage.save()\n return jsonify({}), 200",
"def delete_amenities_id(amenity_id):\n my_object = storage.get('Amenity', amenity_id)\n if my_object is not None:\n storage.delete(my_object)\n storage.save()\n else:\n abort(404)\n return jsonify({}), 200",
"def delete_amenity(amenity_id):\n amenity = storage.get(Amenity, amenity_id)\n if amenity is None:\n abort(404)\n empty_dict = {}\n amenity.delete()\n storage.save()\n return jsonify(empty_dict), 200",
"def delete_amenity(amenity_id):\n amenity = storage.get(Amenity, amenity_id)\n if amenity is None:\n abort(404)\n storage.delete(amenity)\n storage.save()\n return jsonify({}), 200",
"def amenity_delete(amenity_id):\n remove_help = models.storage.get(\"Amenity\", amenity_id)\n if remove_help is None:\n abort(404)\n remove_help.delete()\n models.storage.save()\n return_holder = jsonify({})\n return return_holder",
"def delete_specific_amenity(amenity_id):\n amenity = storage.get('Amenity', amenity_id)\n if not amenity:\n abort(404)\n storage.delete(amenity)\n storage.save()\n return make_response(jsonify({}), 200)",
"def amenity_delete_by_id(amenity_id):\n\n fetched_obj = storage.get(\"Amenity\", str(amenity_id))\n\n if fetched_obj is None:\n abort(404)\n\n storage.delete(fetched_obj)\n storage.save()\n\n return jsonify({})",
"def del_amenity(a_id):\n the_amenity = storage.get(Amenity, a_id)\n if the_amenity is not None:\n storage.delete(the_amenity)\n storage.save()\n return jsonify({}), 200\n abort(404)",
"def delete_amenity_obj(amenity_id):\n amenity = storage.get(Amenity, amenity_id)\n if amenity:\n amenity.delete()\n storage.save()\n return jsonify({}), 200\n else:\n abort(404)",
"def delete_amenity(amenity_id):\n amenity = storage.get(\"Amenity\", amenity_id)\n if not amenity:\n abort(404)\n storage.delete(amenity)\n storage.save()\n return jsonify({})",
"def place_amenities(place_id=\"\", amenity_id=\"\"):\n place_list = [p for p in storage.all(\"Place\").values()]\n if place_id not in [p.id for p in place_list]:\n abort(404)\n if request.method == \"GET\" and not amenity_id:\n for p in place_list:\n if p.id == place_id:\n return jsonify([a.to_dict() for a in p.amenities])\n abort(404)\n amenity_list = [a for a in storage.all(\"Amenity\").values()]\n if amenity_id not in [a.id for a in amenity_list]:\n abort(404)\n p = storage.get(\"Place\", place_id)\n a = storage.get(\"Amenity\", amenity_id)\n if request.method == \"DELETE\":\n if a in p.amenities:\n p.amenities.remove(a)\n p.save()\n return jsonify({}), 200\n abort(404)\n if request.method == \"POST\":\n if a in p.amenities:\n return jsonify(a.to_dict()), 200\n p.amenities.append(a)\n p.save()\n return jsonify(a.to_dict()), 201",
"def delete_amenity_with_id(amenity_id):\n\n data = storage.get(Amenity, amenity_id)\n if data is None:\n abort(404)\n storage.delete(data)\n storage.save()\n return jsonify({}), 200",
"def post_amenity2(place_id, amenity_id):\n place = storage.get(Place, place_id)\n if place is None:\n abort(404)\n amenity = storage.get(Amenity, amenity_id)\n if amenity is None:\n abort(404)\n if amenity in place.amenities:\n return (jsonify(amenity.to_dict()), 200)\n place.amenities.append(obj)\n storage.save()\n return (jsonify(amenity.to_dict(), 201))",
"def delete_place(place_id):\n place = storage.get(Place, place_id)\n if place is None:\n abort(404)\n storage.delete(place)\n storage.save()\n return jsonify({}), 200",
"def delete_place(place_id):\n place = storage.get('Place', place_id)\n if place is None:\n abort(404)\n storage.delete(place)\n storage.save()\n return (jsonify({}), 200)",
"def delete_place(place_id):\n place = storage.get(\"Place\", place_id)\n if not place:\n abort(404)\n storage.delete(place)\n storage.save()\n return jsonify({})",
"def delete_place(place_id):\n place = storage.get(Place, place_id)\n if place is None:\n abort(404)\n storage.delete(place)\n storage.save()\n storage.reload()\n return jsonify({}), 200",
"def manipulate_amenties_place(place_id=None):\n if place_id not in storage.all('Place'):\n abort(404)\n\n if amenity_id not in storage.all('Amenity'):\n abort(404)\n\n if request.method == 'DELETE':\n storage.delete(storage.get('Place', place_id))\n storage.save()\n return(jsonify({}))\n\n if request.method == 'POST':\n post_obj = request.get_json()\n if post_obj is None:\n return(\"Not a JSON\", 400)\n if 'name' not in post_obj:\n return(\"Missing name\", 400)\n new_obj = City(**post_obj)\n new_obj.state_id = state_id\n new_obj.save()\n return(jsonify(new_obj.to_json()), 201)",
"def delete_location(self, location_id):",
"def dev_get_amenity_id(amenity_id=None, place_id=None):\n amenity = storage.get(Amenity, amenity_id)\n place = storage.get(Place, place_id)\n if amenity is None or place is None:\n abort(404)\n\n if amenity not in place.amenities:\n abort(404)\n\n if request.method == \"DELETE\":\n if os.getenv(\"HBNB_TYPE_STORAGE\") == \"db\":\n place.amenities.remove(amenity)\n storage.save()\n return jsonify({})\n\n if request.method == \"POST\":\n if amenity in place.amenities:\n return jsonify(amenity.to_dict())\n else:\n place.amenities.append(amenity)\n place.save()\n return make_response(jsonify(amenity.to_dict()), 201)",
"def delete_anime(utoa):\n db.session.query(UserToAnime)\\\n .filter(UserToAnime.userId == utoa.userId, UserToAnime.malId == utoa.malId)\\\n .delete()\n db.session.commit()",
"def amenities_no_id(amenity_id=None):",
"def create_place_amenity(place_id, amenity_id):\n place = storage.get('Place', place_id)\n if not place:\n abort(404)\n\n amenity = storage.get('Amenity', amenity_id)\n if not amenity:\n abort(404)\n\n if amenity not in place.amenities:\n place.amenities.append(amenity)\n place.save()\n return jsonify(amenity.to_dict()), 201\n else:\n return jsonify(amenity.to_dict()), 200",
"def test_ipam_roles_delete(self):\n pass"
]
| [
"0.82304513",
"0.8189802",
"0.7973082",
"0.7140258",
"0.6936414",
"0.68594134",
"0.66304946",
"0.6570661",
"0.6530816",
"0.64950216",
"0.6485565",
"0.646877",
"0.6443163",
"0.64373386",
"0.6418685",
"0.64175266",
"0.6345153",
"0.62653947",
"0.5971674",
"0.58708566",
"0.5855382",
"0.5852429",
"0.58080626",
"0.5726335",
"0.5719549",
"0.5711823",
"0.5709195",
"0.56803894",
"0.5576216",
"0.55620444"
]
| 0.8434596 | 0 |
Return a rate to convert the metric data to new unit, as below. value in old unit / rate = value in new unit | def get_conversion_rate(self, old_unit, new_unit):
for i in [old_unit, new_unit]:
if i not in self.units:
raise Exception("Can't find unit %s in unitgroup '%s'" %
(i, self.name))
return float(self.units[new_unit]) / float(self.units[old_unit]) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_conversion_rate(self, newunit):\n if not self.unit or not self.unitgroup:\n logging.error(\"Metric %s can't be converted into %s unit. \"\n \"Please correct your config file.\" % (self.name,\n newunit))\n sys.exit(1)\n\n try:\n return self.unitgroup.get_conversion_rate(self.unit, newunit)\n except Exception as e:\n logging.error(\"Failed to convert metric %s into unit %s. \"\n \"Reason: %s. Please correct your config file.\"\n % (self.name, newunit, e.message))\n sys.exit(1)",
"def _set_rate(self):\r\n interval = self.data.iloc[2, 0] - self.data.iloc[1, 0]\r\n self.rate = int(1 / interval)",
"def conversion_rate(self, init, new_currency):\r\n\r\n curr = CurrencyRates()\r\n curr_conv_rate = curr.get_rate(init, new_currency)\r\n return curr_conv_rate",
"def convert_rate_constant( # noqa: C901, PLR0912, PLR0913\n val,\n new_scale,\n old_scale=\"l mol-1 s-1\",\n molecularity=1,\n temperature=298.15,\n pressure=constants.atm,\n):\n for alt, ref in [(\"M-1\", \"l mol-1\"), (\"ml\", \"cm3\"), (\"torr-1\", \"mmHg-1\")]:\n new_scale, old_scale = new_scale.replace(alt, ref), old_scale.replace(alt, ref)\n\n # no need to convert if same units or if molecularity is one\n if old_scale == new_scale or np.all(molecularity == 1):\n return val\n\n # we first convert to l mol-1 s-1\n if old_scale == \"cm3 mol-1 s-1\":\n factor = 1.0 / constants.kilo\n elif old_scale == \"l mol-1 s-1\":\n factor = 1.0\n elif old_scale == \"m3 mol-1 s-1\":\n factor = constants.kilo\n elif old_scale == \"cm3 particle-1 s-1\":\n factor = constants.N_A / constants.kilo\n elif old_scale == \"mmHg-1 s-1\":\n factor = (\n rx.thermo.molar_volume(temperature, pressure)\n * pressure\n * constants.kilo\n / constants.torr\n )\n elif old_scale == \"atm-1 s-1\":\n factor = rx.thermo.molar_volume(temperature, pressure) * constants.kilo\n else:\n raise ValueError(f\"old unit not recognized: {old_scale}\") # noqa: EM102, TRY003\n\n # now we convert l mol-1 s-1 to what we need\n if new_scale == \"cm3 mol-1 s-1\":\n factor *= constants.kilo\n elif new_scale == \"l mol-1 s-1\":\n factor *= 1.0\n elif new_scale == \"m3 mol-1 s-1\":\n factor *= 1.0 / constants.kilo\n elif new_scale == \"cm3 particle-1 s-1\":\n factor *= constants.kilo / constants.N_A\n elif new_scale == \"mmHg-1 s-1\":\n factor *= constants.torr / (\n rx.thermo.molar_volume(temperature, pressure) * pressure * constants.kilo\n )\n elif new_scale == \"atm-1 s-1\":\n factor *= 1.0 / (rx.thermo.molar_volume(temperature, pressure) * constants.kilo)\n else:\n raise ValueError(f\"new unit not recognized: {new_scale}\") # noqa: EM102, TRY003\n\n factor **= molecularity - 1\n logger.info(\n f\"conversion factor ({old_scale} to {new_scale}) = {factor}\", # noqa: G004\n )\n return val * factor",
"def rate_per_unit(self, rate_per_unit):\n\n self._rate_per_unit = rate_per_unit",
"def calculateDataRate(self):\n pass",
"def rate(self, newrate):\n command = 'rate ' + str(newrate)\n self.run_command(command)",
"def compute_rate(self):\n bg_rate = self.counts.data / self.livetime.data\n\n bg_rate /= self.counts.bin_volume\n\n bg_rate = bg_rate.to('MeV-1 sr-1 s-1')\n\n self.bg_rate.data = bg_rate\n self.bg_rate.data_err = (np.sqrt(self.counts.data) / (self.counts.bin_volume * self.livetime.data)).to(\n 'MeV-1 sr-1 s-1')",
"def get_rate(self, t):\n return self.rates[bisect.bisect(self.change_times, t) - 1]",
"def convert(self, value, units, newunits):\n return value * self._units[units] / self._units[newunits]",
"def changeTo(self, new_unit):\n self.value = (self.value / Ccy.currencies[self.unit] * Ccy.currencies[new_unit])\n self.unit = new_unit",
"def update_rate(self):\n self._rate = (\n (self._received - self._samples[0]) / float(self.sample_size)\n )\n self._samples.append(self._received)",
"def getRate(self, context):\n try:\n return VTypeHelper.toDouble(context.getDevice(\"rate\").read())\n except:\n return 60.0",
"def get_current_rate(self):\n pass",
"def convert(rates, value, from_string, to_string):\n tuple_list = (\n [x for x in rates if x[0] == from_string and x[1] == to_string])\n if tuple_list:\n return round(tuple_list[0][2] * value, 2)\n else:\n tuple_list2 = (\n [x for x in rates if x[1] == from_string and x[0] == to_string])\n return round((1/tuple_list2[0][2]) * value, 2)",
"def convertRate(row):\n if pd.isnull(row):\n return 1.0\n elif ':' in str(row):\n rows = row.split(':')\n return 1.0 - float(rows[1]) / float(rows[0])\n else:\n return float(row)",
"def convertRate(row):\n if row == 'null':\n return 1.0\n elif ':' in row:\n rows = row.split(':')\n return 1.0 - float(rows[1])/float(rows[0])\n else:\n return float(row)",
"def reverse_rate(rate_tuple):\n return 1 / rate_tuple[2]",
"def rate(self):\n return self.brate / FAC",
"def data_rate(self):\n return self._data_rate",
"def convert_frequency_2_current_u(self,val):\n return val/conversion_facs_frequency[self.current_units[\"frequency\"]]",
"def to(self, new_unit, **kwargs):\n new_unit = u.Unit(new_unit)\n return self * (self.unit.to(new_unit, **kwargs) * new_unit / self.unit)",
"def update(self) -> None:\n self.data.update()\n self._state = round(self.data.rate[\"rates\"][self._target], 3)",
"def rate(self, t, i):\n return np.real(self._rates[i](t))",
"def conversion_rate(self, price):\n\n price = ( price - 20 ) / 2\n\n a = self.a_conversion_rate\n b = self.b_conversion_rate\n c = self.c_conversion_rate\n d = self.d_conversion_rate\n e = self.e_conversion_rate\n # price_min = self.price_min\n # Probabilities of conversion given a price\n return c * np.exp ( a * ( price - e) ** (1/ (2 * b) ) ) * (d - 2*price) ** (3/2)",
"def update_rate_hz(self) -> float:\n return self._update_rate_hz",
"def exchange_rate(self):\n res = r.get(self.url + self.current_rate)\n return self.execute(res)",
"def getRate(self) -> int:\n if (self._total_stake.get() + self._daily_reward.get()) == 0:\n rate = DENOMINATOR\n else:\n rate = (self._total_stake.get() + self._daily_reward.get()) * DENOMINATOR // self.sICX_score.totalSupply()\n return rate",
"def rates(self):\n raise NotImplementedError(\"Must be implemented by subclass.\")",
"def rate(self):\n return self.__rate"
]
| [
"0.70476955",
"0.67164433",
"0.6425538",
"0.6421992",
"0.63206935",
"0.6272851",
"0.6172931",
"0.61639756",
"0.6147183",
"0.60993135",
"0.6065548",
"0.60554105",
"0.59908634",
"0.59607494",
"0.5956799",
"0.594856",
"0.5939248",
"0.59317255",
"0.59281486",
"0.5904123",
"0.590133",
"0.5889678",
"0.5882855",
"0.5846505",
"0.584077",
"0.58179724",
"0.58136",
"0.58077353",
"0.57711256",
"0.57538867"
]
| 0.74816525 | 0 |
Return the fields of a host data record in a list. | def get_host_data_fields(self):
raise NotImplementedError | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def list_fields(fc):\n return [f.name for f in arcpy.ListFields(fc)]",
"def extractFields(deerfootRDDRecord):\n fieldsList = deerfootRDDRecord.split(\",\")\n return (fieldsList[0], [fieldsList[1], fieldsList[15], fieldsList[46]])",
"def listFields(self):\n return self.get_json('/field')",
"def list_fields(fixture_file=settings.FIXTURE_PATH, list_to_shell=True):\n fields = []\n with open(fixture_file, 'r') as posts:\n posts = json.load(posts, encoding='utf8')\n i = 0\n for post in posts:\n for field in post['fields']:\n fields.append(field)\n i += 1\n if list_to_shell:\n print yellow(\"All available BlogPost fields:\")\n print fields\n print yellow(\"%i fields total\" % i)\n return fields",
"def get_fields_list(self):\n return self.description[\"fields\"][\"values\"].keys()",
"def get_drupal_field_list(db_obj, db_cur, entity_type, bundle):\n\n # query string and arguments\n query_str = (\n'''\nSELECT fci.field_name\nFROM field_config_instance as fci\nLEFT JOIN field_config as fc\nON fc.id = fci.field_id\nWHERE fci.entity_type = %s\nAND fci.bundle = %s\nAND fc.deleted = 0\n'''\n )\n query_args = [entity_type, bundle]\n\n # execute the query\n if not db_obj.execute(db_cur, query_str.strip(), query_args,\n has_results=True):\n return None\n ret = db_obj.fetchall(db_cur)\n if not ret[0]:\n return None\n if not ret[1]:\n return []\n\n return [x[0][6:] for x in ret[1] if x[0].startswith('field_')]",
"def fields(self) -> List[Field]: # pragma: no cover\n pass",
"def fields(self):\n return [f[1] for f in sorted(self.dd.fields.items())]",
"def get_fields(self, pager=None):\n return Field.deserialize_list(self._get_multiple('fields', {}, pager))",
"def fields(proto):\n return [x[0].name for x in proto.ListFields()]",
"def dig_fields(json_data):\n data = json.loads(json_data)\n fields = [f for f in data]\n return fields",
"def get_fieldlist(cls):\n return cls.fieldlist",
"def parse(self):\n result = []\n for field in self.get_fields():\n result.append(field.get_field())\n return result",
"def get_record_meta(record_list):\n acc_code = record_list[0]\n organism = record_list[1]\n EC_code = record_list[2].replace(\"__\", \" \")\n species = record_list[3].replace(\"__\", \" \")\n note = record_list[4]\n return acc_code, organism, EC_code, species, note",
"async def get_fields(self) -> List[Field]:\n schema = await self.get_schema()\n fields = []\n if schema:\n # The faust-avro parser expects a json-parsed avro schema\n # https://github.com/masterysystems/faust-avro/blob/master/faust_avro/parsers/avro.py#L20\n parsed_schema = self._parse(json.loads(schema))\n for field in parsed_schema.fields:\n fields.append(Field(field.name, field.type.python_type))\n\n return fields",
"def get_fields(self):\n return list(self.metadata.keys())",
"def getvaluelist(doclist, fieldname):\n\tl = []\n\tfor d in doclist:\n\t\tl.append(d.fields[fieldname])\n\treturn l",
"def fields(self):\r\n return self._by_name.iteritems()",
"def get_fields(self):\n \n fields = []\n for img in self.img_lst:\n fields += img.get_fields()\n \n fields = list(set(fields))\n \n return fields",
"def _datastore_fields(fs):\n return [{\n 'id': f['datastore_id'],\n 'type': _column_type(f['datastore_type'])}\n for f in fs]",
"def extract(self):\n self.field_list = []\n \n try:\n self.mfields = self.getModel()._meta.fields\n if(self.mfields):\n try:\n for model_fields in self.mfields:\n if(model_fields.name == \"id\"):\n pass \n \n elif(model_fields.name == \"pci\"):\n pass \n elif(model_fields.name == \"sci\"):\n pass \n elif(model_fields.name == \"validated\"):\n pass \n else:\n self.field_list.append(model_fields.name)\n return self.field_list\n except:\n raise \n else:\n return None \n except:\n raise",
"def read_data(self) -> List[BaseRecord]:\n pass",
"def _fieldList(self, key, year, month=None, day=None, hour=None, status=1, metaData=None):\n fields = [StatusLog.FIELD_TIMESTAMP]\n if (key is not None):\n fields.append(StatusLog.FIELD_KEY)\n if (year is not None):\n fields.append(StatusLog.FIELD_YEAR)\n if (month is not None):\n fields.append(StatusLog.FIELD_MONTH)\n if (day is not None):\n fields.append(StatusLog.FIELD_DAY)\n if (hour is not None):\n fields.append(StatusLog.FIELD_HOUR)\n if (status is not None):\n fields.append(StatusLog.FIELD_STATUS)\n if (metaData is not None):\n fields.append(StatusLog.FIELD_METADATA)\n \n # Make a string\n return '(' + ', '.join(fields) + ')'",
"def fields(self) -> List[SingleField]:\n return self._fields",
"def _get_fields(self):\n if not self._cursor.description:\n return {}\n\n results = {}\n column = 0\n\n for des in self._cursor.description:\n fieldname = des[0]\n results[column] = fieldname\n column = column + 1\n\n return results",
"def _list_fields(self):\n return list(self._state.keys())",
"def data(self, *keys: _TResultKey) -> t.List[t.Dict[str, t.Any]]:\n return [record.data(*keys) for record in self]",
"def _create_field_list(entities: List[Entity], features: List[Feature]) -> List[Field]:\n fields: List[Field] = []\n\n for entity in entities:\n if isinstance(entity, Field):\n fields.append(entity)\n\n for feature in features:\n if isinstance(feature, Field):\n fields.append(feature)\n\n return fields",
"def make_field_list(field_desc_list, names):\n for index, field_desc in enumerate(field_desc_list):\n yield RecordSchema._make_field(index, field_desc, names)",
"def extract_data_props(vl_spec):\n\tfield_props = []\n\tvspec = vl2obj(vl_spec)\n\tdata = vl_spec[\"data\"][\"values\"]\n\tfor enc in vspec[\"encoding\"]:\n\t\tfield_prop = {}\n\t\tif enc[\"field\"] is not None:\n\t\t\tfield_prop[\"field\"] = enc[\"field\"]\n\t\t\tfield_prop[\"enc_type\"] = enc[\"type\"]\n\t\t\tcolumn_values = [d[field_prop[\"field\"]] for d in data]\n\t\t\tdtype = pd.api.types.infer_dtype(column_values)\n\t\t\tfield_prop[\"dtype\"] = dtype\n\t\t\tif dtype in [\"integer\", \"float\", \"mixed-integer-float\"]:\n\t\t\t\tfield_prop[\"min\"] = min(column_values)\n\t\t\t\tfield_prop[\"max\"] = max(column_values)\n\t\t\tfield_prop[\"cardinality\"] = len(set(column_values))\n\t\t\tfield_props.append(field_prop)\n\treturn field_props"
]
| [
"0.6213986",
"0.60264015",
"0.5972971",
"0.5965215",
"0.59143746",
"0.58616215",
"0.5858335",
"0.5854301",
"0.58492804",
"0.58246523",
"0.58062214",
"0.5797772",
"0.5766629",
"0.57091284",
"0.5703255",
"0.5699028",
"0.56777984",
"0.5672483",
"0.566576",
"0.56613034",
"0.56385344",
"0.5609195",
"0.5593115",
"0.5586352",
"0.55827343",
"0.555857",
"0.5540665",
"0.5539605",
"0.5531021",
"0.5513019"
]
| 0.66660905 | 0 |
Return the fields of a VM data record in a list. | def get_vm_data_fields(self):
raise NotImplementedError | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_fields(self, pager=None):\n return Field.deserialize_list(self._get_multiple('fields', {}, pager))",
"def list_fields(fc):\n return [f.name for f in arcpy.ListFields(fc)]",
"def listFields(self):\n return self.get_json('/field')",
"def get_fieldlist(cls):\n return cls.fieldlist",
"def fields(self):\n return [f[1] for f in sorted(self.dd.fields.items())]",
"def extract(self):\n self.field_list = []\n \n try:\n self.mfields = self.getModel()._meta.fields\n if(self.mfields):\n try:\n for model_fields in self.mfields:\n if(model_fields.name == \"id\"):\n pass \n \n elif(model_fields.name == \"pci\"):\n pass \n elif(model_fields.name == \"sci\"):\n pass \n elif(model_fields.name == \"validated\"):\n pass \n else:\n self.field_list.append(model_fields.name)\n return self.field_list\n except:\n raise \n else:\n return None \n except:\n raise",
"def get_fields_list(self):\n return self.description[\"fields\"][\"values\"].keys()",
"def fields(self) -> List[Field]: # pragma: no cover\n pass",
"def extractFields(deerfootRDDRecord):\n fieldsList = deerfootRDDRecord.split(\",\")\n return (fieldsList[0], [fieldsList[1], fieldsList[15], fieldsList[46]])",
"def get_fields(self):\n \n fields = []\n for img in self.img_lst:\n fields += img.get_fields()\n \n fields = list(set(fields))\n \n return fields",
"def list_fields(fixture_file=settings.FIXTURE_PATH, list_to_shell=True):\n fields = []\n with open(fixture_file, 'r') as posts:\n posts = json.load(posts, encoding='utf8')\n i = 0\n for post in posts:\n for field in post['fields']:\n fields.append(field)\n i += 1\n if list_to_shell:\n print yellow(\"All available BlogPost fields:\")\n print fields\n print yellow(\"%i fields total\" % i)\n return fields",
"def get_fields(self):\n fields = []\n for items in self.order_items:\n fields += items.get_fields()\n \n fields = list(set(fields))\n \n field_order = ['recordId', 'orderId', 'itemId', 'collectionId']\n \n out_fields = field_order\n \n for f in fields:\n if f not in field_order:\n out_fields.append(f)\n \n return out_fields",
"def get_fields(self):\n return list(self.metadata.keys())",
"def parse(self):\n result = []\n for field in self.get_fields():\n result.append(field.get_field())\n return result",
"async def get_fields(self) -> List[Field]:\n schema = await self.get_schema()\n fields = []\n if schema:\n # The faust-avro parser expects a json-parsed avro schema\n # https://github.com/masterysystems/faust-avro/blob/master/faust_avro/parsers/avro.py#L20\n parsed_schema = self._parse(json.loads(schema))\n for field in parsed_schema.fields:\n fields.append(Field(field.name, field.type.python_type))\n\n return fields",
"def fields(self):\r\n return self._by_name.iteritems()",
"def fields(self) -> List[SingleField]:\n return self._fields",
"def dig_fields(json_data):\n data = json.loads(json_data)\n fields = [f for f in data]\n return fields",
"def getvaluelist(doclist, fieldname):\n\tl = []\n\tfor d in doclist:\n\t\tl.append(d.fields[fieldname])\n\treturn l",
"def abc_get_model_fields(self, record):\n if record._name == 'stock.picking':\n return [\n 'name',\n ('product_id', ['is_offer']),\n 'state',\n ('partner_id', ['display_name']),\n ]\n if record._name == 'stock.transfer_details_items':\n return [\n ('product_id', ['display_name']),\n ('product_uom_id', ['display_name', 'factor']),\n 'quantity',\n ('package_id', []),\n ('packop_id', []),\n ('result_package_id', ['display_name']),\n ('sourceloc_id', ['display_name']),\n ('destinationloc_id', ['display_name']),\n ('lot_id', ['display_name']),\n ]\n if record._name == 'product.product':\n return [\n 'display_name',\n 'default_code',\n 'ean13',\n 'is_offer',\n 'weight',\n ('uom_id', ['display_name', 'factor']),\n ]\n if record._name == 'stock.location':\n return [\n 'display_name',\n ]\n if record._name == 'product.uom':\n return [\n 'display_name',\n 'factor',\n ]\n return ['id']",
"def fields(proto):\n return [x[0].name for x in proto.ListFields()]",
"def get_data_fields(self, fields=None, skip=set(), override=dict()):\n fields = fields or self.fields\n result = []\n for field in fields:\n if field.name in skip: continue\n result.append(field.get_data(override=override.get(field.name,\n dict())))\n return result",
"def _get_fields(self):\n table = self.ui.tableFields\n rows = table.rowCount()\n cols = table.columnCount()\n fields = []\n for i in range(rows):\n fields.append(\n tuple(map(lambda x: table.item(i, x).text(), range(cols)))\n )\n return fields",
"def getFields(iface):\n return getFieldsInOrder(iface)",
"def _list_fields(self):\n return list(self._state.keys())",
"def _datastore_fields(fs):\n return [{\n 'id': f['datastore_id'],\n 'type': _column_type(f['datastore_type'])}\n for f in fs]",
"def get_returnable_fields(result, verbose=False):\n check_result(result)\n result_info = get_result(result)\n returnable_fields = result_info[\"returnable_fields\"]\n if verbose:\n pprint(returnable_fields)\n return returnable_fields",
"def dict_to_fm_field_list(\n self, data: Dict[str, Any], language_code: str, line: int = 0\n ) -> nodes.field_list:\n field_list = nodes.field_list()\n\n bibliofields = get_language(language_code).bibliographic_fields\n state_machine = MockStateMachine(self, line)\n state = MockState(self, state_machine, line)\n\n for key, value in data.items():\n if not isinstance(value, (str, int, float, date, datetime)):\n value = json.dumps(value)\n value = str(value)\n if key in bibliofields:\n para_nodes, _ = state.inline_text(value, line)\n body_children = [nodes.paragraph(\"\", \"\", *para_nodes)]\n else:\n body_children = [nodes.Text(value, value)]\n\n field_node = nodes.field()\n field_node.source = value\n field_node += nodes.field_name(key, \"\", nodes.Text(key, key))\n field_node += nodes.field_body(value, *body_children)\n field_list += field_node\n\n return field_list",
"def all_fields(item):\n return scom.all_fields(item)",
"def umm_fields(item):\n return scom.umm_fields(item)"
]
| [
"0.6668574",
"0.64598054",
"0.6448825",
"0.63467366",
"0.63245344",
"0.6320131",
"0.6319845",
"0.631197",
"0.62458086",
"0.6242731",
"0.6185522",
"0.61597586",
"0.6136137",
"0.61244106",
"0.60978943",
"0.60761255",
"0.6071319",
"0.6056425",
"0.60514027",
"0.6050539",
"0.5990737",
"0.5959495",
"0.59539175",
"0.59489036",
"0.5933992",
"0.59299743",
"0.59269667",
"0.59251773",
"0.59211403",
"0.59125155"
]
| 0.656898 | 1 |
Return data time information in a tuple. | def get_time_info(self):
raise NotImplementedError | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _get_time_info(self, keys: list[str]):\n if self.is_info_v2:\n if not self.is_on:\n return 0\n return self.int_or_none(self._data.get(keys[1]))\n return self._data.get(keys[0])",
"def CopyToStatTimeTuple(self):\n if self._number_of_seconds is None:\n return None, None\n\n if self._microseconds is not None:\n return self._number_of_seconds, self._microseconds * 10\n\n return self._number_of_seconds, None",
"def utctimetuple(self):\n offset = self.utcoffset()\n if offset:\n self -= offset\n y, m, d = self.year, self.month, self.day\n hh, mm, ss = self.hour, self.minute, self.second\n return _build_struct_time(y, m, d, hh, mm, ss, 0)",
"def __get_times(self):\n data = self.simulate_file.readlines()\n data = list(map(str.strip, data))\n data = list(map(float, data))\n start = data[0]\n times = data[1:]\n return (start, times)",
"def timetuple(self):\n dst = self.dst()\n if dst is None:\n dst = -1\n elif dst:\n dst = 1\n else:\n dst = 0\n return _build_struct_time(\n self.year, self.month, self.day, self.hour, self.minute, self.second, dst\n )",
"def timetuple(self):\n return _build_struct_time(self._year, self._month, self._day, 0, 0, 0, -1)",
"def GetTime(self):\n return self.hour, self.minute, self.second",
"def time(self):\n return self.time_array",
"def time(self):\n return self[self.time_columns]",
"def time(self):\n return self[self.time_columns]",
"def time(self):\n return parse_time(self['timestamp'])",
"def getTimestamp(self):\r\n\t\treturn self.pair.data['timestamp']",
"def parse_time(self):\n\n # parse time\n year = int(self.start[:4])\n month = int(self.start[5:7])\n day = int(self.start[8:10])\n hours = int(self.start[11:13])\n minutes = int(self.start[14:16])\n seconds = int(self.start[17:19])\n time = datetime.datetime(year, month, day, hours, minutes, seconds)\n\n # advance time\n time = time + datetime.timedelta(minutes=self.rain_interval)\n time = time.isoformat(\" \")\n\n # timestamp\n # elevation (m)\n evolved_elevation = (\n 'elevation_'\n + time.replace(\" \", \"_\").replace(\"-\", \"_\").replace(\":\", \"_\"))\n # water depth (m)\n depth = (\n 'depth_'\n + time.replace(\" \", \"_\").replace(\"-\", \"_\").replace(\":\", \"_\"))\n # sediment flux (kg/ms)\n sediment_flux = (\n 'flux_'\n + time.replace(\" \", \"_\").replace(\"-\", \"_\").replace(\":\", \"_\"))\n # erosion-deposition (kg/m2s)\n erosion_deposition = (\n 'erosion_deposition_'\n + time.replace(\" \", \"_\").replace(\"-\", \"_\").replace(\":\", \"_\"))\n # elevation difference (m)\n difference = (\n 'difference_'\n + time.replace(\" \", \"_\").replace(\"-\", \"_\").replace(\":\", \"_\"))\n\n return (evolved_elevation, time, depth, sediment_flux,\n erosion_deposition, difference)",
"def get_time_start():\n sql = sqlite3.connect('data.db')\n cursor = sql.cursor()\n\n get_ip = \"\"\"SELECT ip FROM Status\"\"\"\n\n ip = cursor.execute(get_ip).fetchall()\n\n get_time = \"\"\"SELECT t_start FROM Status\"\"\"\n\n time = cursor.execute(get_time).fetchall()\n\n get_protocol = \"\"\"SELECT protocol FROM Status\"\"\"\n\n protocol = cursor.execute(get_protocol).fetchall()\n\n cursor.close()\n\n return zip(ip, time, protocol)",
"def _get_time(self) -> None:\n self.data[\"time\"] = np.zeros(len(self.data[\"yyyymmdd\"]), dtype=object)\n \n for idx, (yyyymmdd, hhmmss) in enumerate(zip(self.data[\"yyyymmdd\"], self.data[\"hhmmss\"])):\n year, month, day = yyyymmdd.split(\"/\")\n hour, minute, second = hhmmss.split(\":\")\n self.data[\"time\"][idx] = datetime(int(year), int(month), int(day), int(hour), int(minute), int(second))\n \n del self.data[\"yyyymmdd\"]\n del self.data[\"hhmmss\"]",
"def timetuple(self, *args, **kwargs): # real signature unknown\r\n pass",
"def get_time_points(self):\n return self._time",
"def _parse_time_metadata(self, data, kwargs):\n try:\n time = self._get_time_range(data)\n except KeyError:\n time = []\n try:\n time_steps = data.coords[self.time_field].size\n except KeyError:\n time_steps = kwargs.get('limit')\n return time, time_steps",
"def gettime(self):\n return self.t",
"def _get_time(self):\n # get the current time in UTC (make sure we are timezone aware)\n now_utc = datetime.datetime.now(pytz.UTC)\n \n # convert to our local timezone\n timenow = now_utc.astimezone(self.timezone)\n \n # save the data to our data\n self.data['year'][0] = timenow.year\n self.data['month'][0] = timenow.month\n self.data['day'][0] = timenow.day\n self.data['hour'][0] = timenow.hour\n self.data['minute'][0] = timenow.minute\n self.data['second'][0] = timenow.second\n \n return",
"def time(self):\n return self.raw[\"logTime\"]",
"def struct_time(self):\n _, month, day, hour, minute, second, weekday, _, _ = self.current_time\n # Bluetooth weekdays count from 1. struct_time counts from 0.\n return time.struct_time((month, day, hour, minute, second, weekday - 1, -1))",
"def get_timestamp(self, data):\n timestamp = data['timestamp']\n return timestamp",
"def getTimes():",
"def getTimes():",
"def getTimes():",
"def tt(obs_time, *whatevers):\n n = whatevers[0].size\n return tuple(\n [obs_time[:n], ] +\n list(whatevers)\n )",
"def time(self) -> int:\n return self.raw[\"time\"]",
"def load_time(self) -> Tuple[np.ndarray, np.ndarray]:\n # filename = self.casedir / Path(\"times.txt\")\n filename = self.casedir\n assert filename.exists(), \"Cannot find {filename}\".format(filename=filename)\n return load_times(filename)",
"def getTimeStamps():\n\n # Initialize\n results = dict()\n\n # UT time\n ut = utils.getUT(pointing=True).split()\n results['utday'] = ut[0]\n results['ut'] = float(ut[1])\n\n # year/month/day/second\n utStamp = time.gmtime()\n utHour = maybeAddAZero(utStamp[3])\n utMin = maybeAddAZero(utStamp[4])\n utSec = maybeAddAZero(utStamp[5])\n results['timeLab'] = ''.join([commands.yearMonthDay(),'_',utHour,utMin,utSec])\n\n # Done\n return results"
]
| [
"0.6896615",
"0.6830696",
"0.67479175",
"0.6706535",
"0.6693293",
"0.6687859",
"0.6653777",
"0.65909964",
"0.6582156",
"0.6582156",
"0.6352521",
"0.6329912",
"0.63027865",
"0.6289983",
"0.6285771",
"0.6283323",
"0.62765485",
"0.62752587",
"0.62750447",
"0.6242026",
"0.6214285",
"0.62081075",
"0.61647403",
"0.6151805",
"0.6151805",
"0.6151805",
"0.61393803",
"0.61381763",
"0.60962796",
"0.60935503"
]
| 0.7061362 | 0 |
Return host output files in a tuple. | def get_host_outfiles(self):
raise NotImplementedError | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def list_output_files(self):\r\n fname = self.__get_output_filename()\r\n return [fname] if fname else []",
"def output_files(self):\n return [self.input_files()[0].replace(\".lhe.gz\", \".stdhep\").replace(\".lhe\", \".stdhep\")]",
"def output_files(self):\n output_files = []\n for split in self.split_files:\n output_files.extend(split.filepaths)\n return output_files",
"def get_output_data(filenames):\n output = []\n for filename in filenames:\n file_info = get_file_info(filename)\n output.append(file_info)\n return output",
"def get_all_output_files():\n files = []\n for fmt in RunInfo.FORMATS:\n files.append(RunInfo.get_runinfo_basename() + \".\" + fmt)\n return files",
"def output_files(self):\n o = []\n if 'unweighted' in self.event_types:\n o.append(self.name + \"_unweighted_events.lhe.gz\")\n if 'weighted' in self.event_types:\n o.append(self.name + \"_events.lhe.gz\")\n return o",
"def _get_results_from_all_files(self, output_ojt):\n return [output[output_ojt] for output in self.output_objects]",
"def _list_outputs(self):\n outputs = self._outputs().get()\n\n out_dir = os.path.abspath(os.path.join(os.getcwd(), \"slicesdir\"))\n outputs[\"out_dir\"] = out_dir\n outputs[\"out_files\"] = [\n self._gen_fname(\n basename=f.replace(os.sep, \"_\"),\n cwd=out_dir,\n ext=self.inputs.out_extension,\n )\n for f in self.inputs.in_files\n ]\n return outputs",
"def output_files(self):\n # Output file for Moller generation\n if 'moller' in self.name:\n return ['moller.stdhep']\n # Output file for beam generation\n return ['beam.stdhep']",
"def get_output_files(self, action):\n assert action == \"run\", \"Unsupported action\"\n return expand(self.base_pattern_out, ext=self.extensions)",
"def get_outputs(self):\n return [x[1] for x in self.io_mapping]",
"def read_outputs(self):\n out_dir = 'example/output/pm_abcd_mrtm_watch_1971_2001/'\n out_file_names = glob.glob('{}*.csv'.format(out_dir))\n\n out_files = {}\n for f in out_file_names:\n df = pd.read_csv(f)\n out_files[f] = df\n\n return(out_files)",
"def _fetch_output_files(self, retrieved):\n # pylint: disable=protected-access,no-member\n import os\n\n # check in order not to overwrite anything\n # state = self._calc.get_state()\n # if state != calc_states.PARSING:\n # raise InvalidOperation(\"Calculation not in {} state\"\n # .format(calc_states.PARSING) )\n # Check that the retrieved folder is there\n try:\n out_folder = retrieved[self._calc._get_linkname_retrieved()]\n except KeyError:\n raise IOError('No retrieved folder found')\n\n list_of_files = out_folder.get_folder_list()\n\n output_path = None\n error_path = None\n\n if self._calc._DEFAULT_OUTPUT_FILE in list_of_files:\n output_path = os.path.join(out_folder.get_abs_path('.'), self._calc._DEFAULT_OUTPUT_FILE)\n if self._calc._DEFAULT_ERROR_FILE in list_of_files:\n error_path = os.path.join(out_folder.get_abs_path('.'), self._calc._DEFAULT_ERROR_FILE)\n\n return output_path, error_path",
"def get_output_files(self, action):\n assert action == \"run\"\n for ext in self.extensions:\n yield ext[1:].replace(\".\", \"_\"), self.base_path_out.format(mapper=self.name, ext=ext)\n for ext in (\".bamstats.html\", \".bamstats.txt\", \".flagstats.txt\", \".idxstats.txt\"):\n path = (\n \"work/{mapper}.{{library_name}}/report/bam_qc/\" \"{mapper}.{{library_name}}.bam{ext}\"\n ).format(mapper=self.name, ext=ext)\n yield \"report_\" + \".\".join(ext.split(\".\")[1:3]).replace(\".\", \"_\"), path\n for ext in (\n \".bamstats.html.md5\",\n \".bamstats.txt.md5\",\n \".flagstats.txt.md5\",\n \".idxstats.txt.md5\",\n ):\n path = (\n \"work/{mapper}.{{library_name}}/report/bam_qc/\" \"{mapper}.{{library_name}}.bam{ext}\"\n ).format(mapper=self.name, ext=ext)\n yield \"report_\" + \".\".join(ext.split(\".\")[1:3]).replace(\".\", \"_\") + \"_md5\", path",
"def get_output_names(hf):\n return sorted(map(str, hf['/output/data'].keys()))",
"def get_outputs(self):\n outputs = []\n missing = []\n for i, name in enumerate(self.output_names[:]):\n try:\n value = self.proto.output_env.look_up(name).unwrapped\n except Exception:\n if self.optional_flags[i]:\n value = None\n missing.append((i, name))\n else:\n raise\n outputs.append(value)\n for i, name in reversed(missing):\n del outputs[i]\n del self.output_names[i]\n del self.optional_flags[i]\n if missing:\n return outputs, reversed(missing)\n return outputs",
"def get_output(self, output_dir=\"tools_output\"):\n\n output_dir = self.project_dir / output_dir / self.name\n # create output directory if didn't exist\n if not output_dir.exists():\n os.makedirs(output_dir)\n logger.info(f\"Created {output_dir}\")\n\n for outfile in self.output:\n outfile = self.project_dir / outfile\n if outfile.exists():\n src = os.fspath(outfile)\n dst = os.fspath(output_dir / outfile.name)\n shutil.move(src, dst)\n logger.info(f\"Moved {outfile.name} to {output_dir}\")\n else:\n msg = f\"File not found: {outfile} - did you execute run() before?\"\n logger.error(msg)\n raise FileNotFoundError(msg)",
"def getOutputNames(self):\n return self.dirs",
"def _get_output_filenames(output_path, dpp=None):\n ret = []\n for fname in os.listdir(output_path):\n ext = _ext(dpp)\n if re.match(r\"get[^_]+[_free\\d?]?\" + ext, fname):\n ret.append(fname)\n return ret",
"def get_outputs(self):\n outputs = []\n for pkg in self._po_packages():\n outputs.append(pkg['build_dir'])\n for item in self._po_package_contents(pkg):\n outputs.append(item['lang_dir'])\n outputs.append(item['msg_dir'])\n outputs.append(item['mo_file'])\n return outputs",
"def outputs(self):\n if not self.output_dir:\n raise Exception('No output directory is configured.')\n\n outputs = [os.path.join(root, name)\n for root, dirs, files in os.walk(self.output_dir)\n for name in files]\n\n for f in outputs:\n print(f.replace(self.output_dir + '/', ''))",
"def outputs(self):\n return {\"path_to_result_folder\": File_IO(\n self.node.outputs[0])}",
"def get_outputs(self, inputs):\n \n # Paths\n input_path = self.input_path\n output_path = self.output_path\n\n # Filename changes\n output_extension = stringify(self.executor.output_extension)\n output_prefix = stringify(self.executor.output_prefix) or ''\n\n if self.output:\n # Combine all inputs into one output\n output = output_prefix + change_extension(self.output, output_extension)\n output = join_path(output_path, output)\n \n if self.output_transform:\n output = self.output_transform(output)\n \n if self.run_output:\n if self.run_command:\n verify_type(self.run_command, list)\n run_command = [stringify(v).format(output=output) for v in self.run_command]\n else:\n run_command = [output]\n with current_context() as ctx:\n ctx.current.project.run[self.run_output] = run_command\n \n return True, [Output(output_path, output)]\n elif inputs:\n # Each input matches an output\n \n # Strip prefix\n if self.output_strip_prefix_from:\n with current_context() as ctx:\n _, p = ctx.current.project.get_phase_for(self.output_strip_prefix_from,\n 'output_strip_prefix_from')\n if p:\n output_strip_prefix = p.output_path\n else:\n output_strip_prefix = None\n else:\n output_strip_prefix = stringify(self.output_strip_prefix)\n if output_strip_prefix is None:\n output_strip_prefix = input_path\n if not output_strip_prefix.endswith(os.sep):\n output_strip_prefix += os.sep\n output_strip_prefix_length = len(output_strip_prefix)\n \n outputs = [] \n for the_input in inputs:\n output = the_input\n \n # Strip prefix\n if output.startswith(output_strip_prefix):\n output = output[output_strip_prefix_length:]\n\n # Filename changes\n if output_prefix:\n p, f = os.path.split(output)\n output = join_path(p, output_prefix + f)\n output = change_extension(output, output_extension)\n \n output = join_path(output_path, output)\n\n if self.output_transform:\n output = self.output_transform(output)\n\n outputs.append(Output(output_path, output))\n \n return False, outputs\n else:\n return False, []",
"def extract_files(self) -> list:\n pass",
"def get_vm_outfiles(self):\n\n raise NotImplementedError",
"def outputs(folderName):\n for i in itertools.count(1):\n yield io.open('%s/Video_%s.h264' %\n (folderName,\n datetime.now().strftime('%Y_%m_%d_%H_%M_%S')),\n 'wb')",
"def collect_output(workdir_path, outputs):\n output_dict = {}\n for output_parameter in outputs:\n if 'id' not in output_parameter:\n exit_validation(\"Error: output without id member\")\n if 'type' not in output_parameter:\n exit_validation(\"Error: output without type member\")\n if output_parameter['type'] != 'File':\n exit_system_error(\"Sorry, I only know about File outputs\")\n if 'outputBinding' in output_parameter:\n binding = output_parameter['outputBinding']\n paths = []\n if 'glob' in binding:\n paths = glob.glob(os.path.join(workdir_path, binding['glob']))\n log(\"Paths after globbing: \" + str(paths))\n if paths != []:\n output_dict[output_parameter['id']] = {\n 'class': 'File',\n 'location': 'file:///' + paths[0]\n }\n return output_dict",
"def output_files(filepath):\n\n infile = open(filepath, 'r')\n lines = infile.readlines()\n\n rel_path = './'\n rel_path += lines[6][lines[6].find(':')+1:].strip()\n rel_path += lines[7][lines[7].find(':')+1:].strip()\n\n filename_I1 = lines[9][lines[9].find(':')+1:].strip()\n filename_I2 = lines[10][lines[10].find(':')+1:].strip()\n filename_IW = lines[12][lines[12].find(':')+1:].strip()\n filename_WE = lines[13][lines[13].find(':')+1:].strip()\n filename_CFLx = lines[15][lines[15].find(':')+1:].strip()\n filename_CFLv = lines[16][lines[16].find(':')+1:].strip()\n filename_S = lines[18][lines[18].find(':')+1:].strip()\n\n filepath_I1 = rel_path + filename_I1\n filepath_I2 = rel_path + filename_I2\n filepath_IW = rel_path + filename_IW\n filepath_WE = rel_path + filename_WE\n # filepath_CFLx = rel_path + filename_CFLx\n # filepath_CFLv = rel_path + filename_CFLv\n filepath_S = rel_path + filename_S\n\n outfile_I1 = open(filepath_I1, 'w')\n outfile_I2 = open(filepath_I2, 'w')\n outfile_IW = open(filepath_IW, 'w')\n outfile_WE = open(filepath_WE, 'w')\n # outfile_CFLx = open(filepath_CFLx, 'w')\n # outfile_CFLv = open(filepath_CFLv, 'w')\n outfile_S = open(filepath_S, 'w')\n\n outfiles = dict(I1 = outfile_I1,\n I2 = outfile_I2,\n IW = outfile_IW,\n WE = outfile_WE,\n # CFLx = outfile_CFLx,\n # CFLv = outfile_CFLv,\n S = outfile_S)\n\n return outfiles",
"def output_files(filepath):\n\n infile = open(filepath, 'r')\n lines = infile.readlines()\n\n rel_path = './'\n rel_path += lines[6][lines[6].find(':')+1:].strip()\n rel_path += lines[7][lines[7].find(':')+1:].strip()\n\n filename_I1 = lines[9][lines[9].find(':')+1:].strip()\n filename_I2 = lines[10][lines[10].find(':')+1:].strip()\n filename_IW = lines[12][lines[12].find(':')+1:].strip()\n filename_WE = lines[13][lines[13].find(':')+1:].strip()\n filename_CFLx = lines[15][lines[15].find(':')+1:].strip()\n filename_CFLv = lines[16][lines[16].find(':')+1:].strip()\n filename_S = lines[18][lines[18].find(':')+1:].strip()\n\n filepath_I1 = rel_path + filename_I1\n filepath_I2 = rel_path + filename_I2\n filepath_IW = rel_path + filename_IW\n filepath_WE = rel_path + filename_WE\n # filepath_CFLx = rel_path + filename_CFLx\n # filepath_CFLv = rel_path + filename_CFLv\n filepath_S = rel_path + filename_S\n\n outfile_I1 = open(filepath_I1, 'w')\n outfile_I2 = open(filepath_I2, 'w')\n outfile_IW = open(filepath_IW, 'w')\n outfile_WE = open(filepath_WE, 'w')\n # outfile_CFLx = open(filepath_CFLx, 'w')\n # outfile_CFLv = open(filepath_CFLv, 'w')\n outfile_S = open(filepath_S, 'w')\n\n outfiles = dict(I1 = outfile_I1,\n I2 = outfile_I2,\n IW = outfile_IW,\n WE = outfile_WE,\n # CFLx = outfile_CFLx,\n # CFLv = outfile_CFLv,\n S = outfile_S)\n\n return outfiles",
"def get_output_files(self, action):\n assert action == \"run\", \"Unsupported action\"\n realigned_infix = \".\" + self.config[\"gatk_post_bam\"][\"realigned_infix\"]\n recalibrated_infix = \".\" + self.config[\"gatk_post_bam\"][\"recalibrated_infix\"]\n if self.config[\"gatk_post_bam\"][\"do_realignment\"]:\n for ext_name, ext in zip(EXT_NAMES, EXT_VALUES):\n yield ext_name + \"_realigned\", self.path_tpl.format(infix=realigned_infix, ext=ext)\n recalibrated_infix = realigned_infix + recalibrated_infix\n if self.config[\"gatk_post_bam\"][\"do_recalibration\"]:\n for ext_name, ext in zip(EXT_NAMES, EXT_VALUES):\n yield ext_name + \"_recalibrated\", self.path_tpl.format(\n infix=recalibrated_infix, ext=ext\n )"
]
| [
"0.6888449",
"0.66737944",
"0.6598473",
"0.6436748",
"0.6293716",
"0.6286534",
"0.628374",
"0.6179701",
"0.60801107",
"0.60670453",
"0.5928418",
"0.5906354",
"0.5890646",
"0.5887146",
"0.5846622",
"0.5820629",
"0.58062935",
"0.58001685",
"0.57891005",
"0.578708",
"0.5781865",
"0.5756345",
"0.5738085",
"0.57202995",
"0.5691152",
"0.5686457",
"0.56845546",
"0.56567436",
"0.56567436",
"0.5655975"
]
| 0.7225652 | 0 |
Return VM output files in a tuple. | def get_vm_outfiles(self):
raise NotImplementedError | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def list_output_files(self):\r\n fname = self.__get_output_filename()\r\n return [fname] if fname else []",
"def _list_outputs(self):\n outputs = self._outputs().get()\n\n out_dir = os.path.abspath(os.path.join(os.getcwd(), \"slicesdir\"))\n outputs[\"out_dir\"] = out_dir\n outputs[\"out_files\"] = [\n self._gen_fname(\n basename=f.replace(os.sep, \"_\"),\n cwd=out_dir,\n ext=self.inputs.out_extension,\n )\n for f in self.inputs.in_files\n ]\n return outputs",
"def get_all_output_files():\n files = []\n for fmt in RunInfo.FORMATS:\n files.append(RunInfo.get_runinfo_basename() + \".\" + fmt)\n return files",
"def output_files(self):\n output_files = []\n for split in self.split_files:\n output_files.extend(split.filepaths)\n return output_files",
"def output_files(self):\n o = []\n if 'unweighted' in self.event_types:\n o.append(self.name + \"_unweighted_events.lhe.gz\")\n if 'weighted' in self.event_types:\n o.append(self.name + \"_events.lhe.gz\")\n return o",
"def output_files(self):\n return [self.input_files()[0].replace(\".lhe.gz\", \".stdhep\").replace(\".lhe\", \".stdhep\")]",
"def get_host_outfiles(self):\n\n raise NotImplementedError",
"def output_files(self):\n # Output file for Moller generation\n if 'moller' in self.name:\n return ['moller.stdhep']\n # Output file for beam generation\n return ['beam.stdhep']",
"def getOutputNames(self):\n return self.dirs",
"def get_outputs(self):\n outputs = []\n for pkg in self._po_packages():\n outputs.append(pkg['build_dir'])\n for item in self._po_package_contents(pkg):\n outputs.append(item['lang_dir'])\n outputs.append(item['msg_dir'])\n outputs.append(item['mo_file'])\n return outputs",
"def get_output_files(self, action):\n assert action == \"run\"\n for ext in self.extensions:\n yield ext[1:].replace(\".\", \"_\"), self.base_path_out.format(mapper=self.name, ext=ext)\n for ext in (\".bamstats.html\", \".bamstats.txt\", \".flagstats.txt\", \".idxstats.txt\"):\n path = (\n \"work/{mapper}.{{library_name}}/report/bam_qc/\" \"{mapper}.{{library_name}}.bam{ext}\"\n ).format(mapper=self.name, ext=ext)\n yield \"report_\" + \".\".join(ext.split(\".\")[1:3]).replace(\".\", \"_\"), path\n for ext in (\n \".bamstats.html.md5\",\n \".bamstats.txt.md5\",\n \".flagstats.txt.md5\",\n \".idxstats.txt.md5\",\n ):\n path = (\n \"work/{mapper}.{{library_name}}/report/bam_qc/\" \"{mapper}.{{library_name}}.bam{ext}\"\n ).format(mapper=self.name, ext=ext)\n yield \"report_\" + \".\".join(ext.split(\".\")[1:3]).replace(\".\", \"_\") + \"_md5\", path",
"def _get_results_from_all_files(self, output_ojt):\n return [output[output_ojt] for output in self.output_objects]",
"def get_output_data(filenames):\n output = []\n for filename in filenames:\n file_info = get_file_info(filename)\n output.append(file_info)\n return output",
"def getResult(self):\n return (self.__output, self.__errors, self.__fileSeparators)",
"def get_output_files(self, action):\n assert action == \"run\", \"Unsupported action\"\n return expand(self.base_pattern_out, ext=self.extensions)",
"def outputs(self):\n return {\"path_to_result_folder\": File_IO(\n self.node.outputs[0])}",
"def get_allinone_outfiles(self):\n\n raise NotImplementedError",
"def read_outputs(self):\n out_dir = 'example/output/pm_abcd_mrtm_watch_1971_2001/'\n out_file_names = glob.glob('{}*.csv'.format(out_dir))\n\n out_files = {}\n for f in out_file_names:\n df = pd.read_csv(f)\n out_files[f] = df\n\n return(out_files)",
"def get_output_files(self, action):\n assert action == \"run\", \"Unsupported action\"\n realigned_infix = \".\" + self.config[\"gatk_post_bam\"][\"realigned_infix\"]\n recalibrated_infix = \".\" + self.config[\"gatk_post_bam\"][\"recalibrated_infix\"]\n if self.config[\"gatk_post_bam\"][\"do_realignment\"]:\n for ext_name, ext in zip(EXT_NAMES, EXT_VALUES):\n yield ext_name + \"_realigned\", self.path_tpl.format(infix=realigned_infix, ext=ext)\n recalibrated_infix = realigned_infix + recalibrated_infix\n if self.config[\"gatk_post_bam\"][\"do_recalibration\"]:\n for ext_name, ext in zip(EXT_NAMES, EXT_VALUES):\n yield ext_name + \"_recalibrated\", self.path_tpl.format(\n infix=recalibrated_infix, ext=ext\n )",
"def get_outputs(self):\n return [x[1] for x in self.io_mapping]",
"def outputs(folderName):\n for i in itertools.count(1):\n yield io.open('%s/Video_%s.h264' %\n (folderName,\n datetime.now().strftime('%Y_%m_%d_%H_%M_%S')),\n 'wb')",
"def outputs(self):\n if not self.output_dir:\n raise Exception('No output directory is configured.')\n\n outputs = [os.path.join(root, name)\n for root, dirs, files in os.walk(self.output_dir)\n for name in files]\n\n for f in outputs:\n print(f.replace(self.output_dir + '/', ''))",
"def _get_output_filenames(output_path, dpp=None):\n ret = []\n for fname in os.listdir(output_path):\n ext = _ext(dpp)\n if re.match(r\"get[^_]+[_free\\d?]?\" + ext, fname):\n ret.append(fname)\n return ret",
"def outList(self,list=False):\n txt = ''\n txt += 'echo \">>> list of expected files on output sandbox\"\\n'\n listOutFiles = []\n stdout = 'CMSSW_$NJob.stdout'\n stderr = 'CMSSW_$NJob.stderr'\n if len(self.output_file) <= 0:\n msg =\"WARNING: no output files name have been defined!!\\n\"\n msg+=\"\\tno output files will be reported back/staged\\n\"\n common.logger.info(msg)\n\n if (self.return_data == 1):\n for file in (self.output_file):\n listOutFiles.append(numberFile(file, '$OutUniqueID'))\n for file in (self.output_file_sandbox):\n listOutFiles.append(numberFile(file, '$NJob'))\n listOutFiles.append(stdout)\n listOutFiles.append(stderr)\n listOutFiles.append('Watchdog_$NJob.log.gz')\n\n txt += 'echo \"output files: '+string.join(listOutFiles,' ')+'\"\\n'\n txt += 'filesToCheck=\"'+string.join(listOutFiles,' ')+'\"\\n'\n txt += 'export filesToCheck\\n'\n taskinfo={}\n taskinfo['outfileBasename'] = self.output_file\n common._db.updateTask_(taskinfo)\n\n if list : return self.output_file\n return txt",
"def extract_files(self) -> list:\n pass",
"def getResults(self, cleanup=True):\n self.wait_on_job()\n stdout_str = self.ofile_string()\n stderr_str = self.efile_string()\n if cleanup:\n self.erase_files()\n return (stdout_str, stderr_str)",
"def _fetch_output_files(self, retrieved):\n # pylint: disable=protected-access,no-member\n import os\n\n # check in order not to overwrite anything\n # state = self._calc.get_state()\n # if state != calc_states.PARSING:\n # raise InvalidOperation(\"Calculation not in {} state\"\n # .format(calc_states.PARSING) )\n # Check that the retrieved folder is there\n try:\n out_folder = retrieved[self._calc._get_linkname_retrieved()]\n except KeyError:\n raise IOError('No retrieved folder found')\n\n list_of_files = out_folder.get_folder_list()\n\n output_path = None\n error_path = None\n\n if self._calc._DEFAULT_OUTPUT_FILE in list_of_files:\n output_path = os.path.join(out_folder.get_abs_path('.'), self._calc._DEFAULT_OUTPUT_FILE)\n if self._calc._DEFAULT_ERROR_FILE in list_of_files:\n error_path = os.path.join(out_folder.get_abs_path('.'), self._calc._DEFAULT_ERROR_FILE)\n\n return output_path, error_path",
"def get_outputs(self):\n outputs = []\n missing = []\n for i, name in enumerate(self.output_names[:]):\n try:\n value = self.proto.output_env.look_up(name).unwrapped\n except Exception:\n if self.optional_flags[i]:\n value = None\n missing.append((i, name))\n else:\n raise\n outputs.append(value)\n for i, name in reversed(missing):\n del outputs[i]\n del self.output_names[i]\n del self.optional_flags[i]\n if missing:\n return outputs, reversed(missing)\n return outputs",
"def get_result_files(self):\n name_pattern = \"{mapper}.{ngs_library.name}\"\n yield from self._yield_result_files(\n os.path.join(\"output\", name_pattern, \"out\", name_pattern + \"{ext}\"), ext=EXT_VALUES\n )\n yield from self._yield_result_files(\n os.path.join(\"output\", name_pattern, \"log\", \"{mapper}.{ngs_library.name}.{ext}\"),\n ext=(\n \"log\",\n \"conda_info.txt\",\n \"conda_list.txt\",\n \"log.md5\",\n \"conda_info.txt.md5\",\n \"conda_list.txt.md5\",\n ),\n )\n yield from self._yield_result_files(\n os.path.join(\n \"output\", name_pattern, \"report\", \"bam_qc\", name_pattern + \".bam.{report}.txt\"\n ),\n report=(\"bamstats\", \"flagstats\", \"idxstats\"),\n )\n yield from self._yield_result_files(\n os.path.join(\n \"output\", name_pattern, \"report\", \"bam_qc\", name_pattern + \".bam.{report}.txt.md5\"\n ),\n report=(\"bamstats\", \"flagstats\", \"idxstats\"),\n )\n yield from self._yield_result_files(\n os.path.join(\n \"output\", name_pattern, \"report\", \"bam_qc\", name_pattern + \".bam.bamstats.html\"\n )\n )\n yield from self._yield_result_files(\n os.path.join(\n \"output\", name_pattern, \"report\", \"bam_qc\", name_pattern + \".bam.bamstats.html.md5\"\n )\n )\n\n for sheet in self.shortcut_sheets:\n for ngs_library in sheet.all_ngs_libraries:\n if ngs_library.name in self.ngs_library_to_kit:\n extraction_type = ngs_library.test_sample.extra_infos[\"extractionType\"]\n suffix = (\n \"_long\"\n if ngs_library.extra_infos[\"seqPlatform\"] in (\"PacBio\", \"ONP\")\n else \"\"\n )\n # Per-sample target coverage report.\n yield from expand(\n os.path.join(\n \"output\", name_pattern, \"report\", \"cov_qc\", name_pattern + \".{ext}\"\n ),\n mapper=self.config[\"tools\"][extraction_type.lower() + suffix],\n ngs_library=[ngs_library],\n ext=[\"txt\", \"txt.md5\"],\n )\n yield \"output/target_cov_report/out/target_cov_report.txt\"\n yield \"output/target_cov_report/out/target_cov_report.txt.md5\"\n if (\n self.config[\"picard_hs_metrics\"][\"path_targets_interval_list\"]\n and self.config[\"picard_hs_metrics\"][\"path_baits_interval_list\"]\n ):\n yield from self._yield_result_files(\n os.path.join(\n \"output\", name_pattern, \"report\", \"picard_hs_metrics\", name_pattern + \".txt\"\n )\n )\n yield from self._yield_result_files(\n os.path.join(\n \"output\", name_pattern, \"report\", \"picard_hs_metrics\", name_pattern + \".txt.md5\"\n )\n )\n if self.config[\"compute_coverage_bed\"]:\n yield from self._yield_result_files(\n os.path.join(\"output\", name_pattern, \"report\", \"coverage\", name_pattern + \"{ext}\"),\n ext=(\".bed.gz\", \".bed.gz.tbi\"),\n )\n else:\n print(\n \"Genome-wide coverage BED generation disabled\", file=sys.stderr\n ) # pragma: no cover",
"def get_files_from_directory(self, folder):\n return ['{}/{}'.format(folder, each) for each in os.listdir(folder) if each.endswith('.vm')]"
]
| [
"0.7118414",
"0.6668537",
"0.66680706",
"0.66119856",
"0.6590606",
"0.6571721",
"0.6446606",
"0.64158994",
"0.6320247",
"0.6251037",
"0.6235383",
"0.619133",
"0.61899394",
"0.60789007",
"0.60338473",
"0.59975857",
"0.5989016",
"0.5984177",
"0.5969433",
"0.59565455",
"0.5945257",
"0.5941953",
"0.5903788",
"0.5882377",
"0.58511394",
"0.5802856",
"0.5795576",
"0.57768404",
"0.57486206",
"0.57458705"
]
| 0.74231887 | 0 |
Initialize a CSVFile object with a directory. The directory is CBTOOL experiment result directory generated by monextract command. It contains a few CSV files. Among those files, one contains host OS metric data, and another VM OS metric data. | def __init__(self, expdir):
self.expdir = expdir
self.expid = basename(expdir)
self.host_csvfile = "%s/%s_%s.csv" % (expdir,
self.HOST_FILE_PREFIX, self.expid)
self.host_data = {}
self.host_data_fields = []
self.host_outfiles = {}
self.vm_csvfile = "%s/%s_%s.csv" % (expdir,
self.VM_FILE_PREFIX, self.expid)
self.vm_data = {}
self.vm_data_fields = []
self.vm_outfiles = {}
self.allinone_outfiles = {}
self.start = None
self.end = None
self.step = None
# Check if the CSV files exist
for f in [self.host_csvfile, self.vm_csvfile]:
if not isfile(f):
logging.error("%s doesn't exist. Aborted." % f)
sys.exit(1)
logging.info("Processing files in %s directory" % basename(expdir))
self.get_host_data()
self.get_vm_data()
self.get_time_info() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __openAndInitCSVFile(self, modelInfo):\n # Get the base path and figure out the path of the report file.\n basePath = self.__outputDirAbsPath\n\n # Form the name of the output csv file that will contain all the results\n reportCSVName = \"%s_Report.csv\" % (self.__outputLabel,)\n reportCSVPath = self.__reportCSVPath = os.path.join(basePath, reportCSVName)\n\n # If a report CSV file already exists, back it up\n backupCSVPath = None\n if os.path.exists(reportCSVPath):\n backupCSVPath = self.__backupCSVPath = _backupFile(reportCSVPath)\n\n\n # Open report file\n if self.__replaceReport:\n mode = \"w\"\n else:\n mode = \"a\"\n csv = self.__csvFileObj = open(reportCSVPath, mode)\n\n # If we are appending, add some blank line separators\n if not self.__replaceReport and backupCSVPath:\n print >> csv\n print >> csv\n\n # Print the column names\n print >> csv, \"jobID, \",\n print >> csv, \"modelID, \",\n print >> csv, \"status, \" ,\n print >> csv, \"completionReason, \",\n print >> csv, \"startTime, \",\n print >> csv, \"endTime, \",\n print >> csv, \"runtime(s), \" ,\n print >> csv, \"expDesc, \",\n print >> csv, \"numRecords, \",\n\n for key in self.__sortedVariableNames:\n print >> csv, \"%s, \" % key,\n for key in self.__sortedMetricsKeys:\n print >> csv, \"%s, \" % key,\n print >> csv",
"def __init__(self, trace, directory):\n #Public attributes\n self._trace = trace\n self._file_name = directory + '/trace.csv'\n \n #Private attributes\n self._writer = None",
"def init_csv_file(self):\n folder = \"/home/pi/data/\" + datetime.now().strftime(\"%Y_%m_%d\") + \"/\"\n if not os.path.isdir(folder):\n # append 'a' to the folder name until we find a name that does not exist\n while os.path.exists(folder):\n folder = folder[:-1] + \"a\" + \"/\"\n os.mkdir(folder)\n filename = folder + 'particledata_' + datetime.now().strftime (\"%H-%M-%S\") \n while os.path.exists(filename):\n filename = filename + '_a'\n filename += '.csv'\n log.info('Writing data to: ' + filename)\n self.file = open(filename, \"w\")\n self.file.write('Unix Time;Human Readable Time;pm 2.5;pm 10;Has Fix;Longitude;Latitude;Altitude;GPS Unix Time\\n')\n self.file.flush()\n self.synced_time = False",
"def main():\n\n # Ensure the output directory exists\n if not os.path.exists(OUTPUT_DIR):\n os.makedirs(OUTPUT_DIR)\n\n process_csv()",
"def __init__(self, root_dir):\n self.paths = glob.glob(root_dir + \"/*.csv\")\n self.target = 'Default'\n # Grouping variable names",
"def __init__(self, *, csv_file_path: str = ''):\n self.__csv_file_path = csv_file_path\n self._parse_csv()",
"def init_csv_file(csv_path):\n with open(csv_path, 'w', newline='') as csv_file:\n writer = csv.writer(csv_file)\n header = ['file_name', 'chart_in_file',\n 'year', 'month', 'row_no', 'bird_species']\n header += list(range(1, 32))\n writer.writerow(header)",
"def csv_dir(self):\n return op.join(self.root_dir, 'csv')",
"def __init__(self, directory):\n self._path = os.path.join(\"../../datasets\", directory)\n self.airlines = pd.read_csv(os.path.join(self._path, 'airlines.csv'))\n self.airports = pd.read_csv(os.path.join(self._path, 'airports.csv'))\n self.planes = pd.read_csv(os.path.join(self._path, 'planes.csv'))\n self.countries = pd.read_csv(os.path.join(self._path, 'countries.csv'))\n self.routes = pd.read_csv(os.path.join(self._path, 'routes.csv'))\n self._CreateGraph()",
"def __init__(\n self, figures_dir: os.PathLike = None, csv_path: os.PathLike = CSV_PATH\n ):\n if not figures_dir:\n self.figures_dir = Path(csv_path).parent / \"responses\"\n else:\n figures_dir = Path(figures_dir)\n self.figures_dir = figures_dir\n if not Path(self.figures_dir).is_dir():\n self.figures_dir.mkdir()\n print(f\"Generated directory for reponses figures in {self.figures_dir}\")\n self.df = pd.read_csv(csv_path).drop(\"Timestamp\", axis=1)\n self.stim_dict = self.gen_dict()\n self.transform_answers(self.stim_dict)",
"def write_csv(self, directory = None):\n if ((directory is None) and\n (self._session.config.folder_basename is not None)):\n directory = self._session.results._full_path\n else:\n return\n \n file = CSV_file(self, directory)\n file.write()\n return file",
"def __init__(\n self,\n output_dir=None,\n output_fname='statistics.tsv',\n exp_name=None,\n log_freq=1,\n ):\n # Set up output file\n self.output_dir = output_dir or '/tmp/experiments/%i' % int(time.time())\n if not os.path.exists(self.output_dir):\n os.makedirs(self.output_dir)\n output_filepath = os.path.join(output_dir, output_fname)\n self.output_file = open(output_filepath, 'w')\n self.file_writer = csv.writer(self.output_file, delimiter='\\t')\n atexit.register(self.output_file.close)\n\n self.exp_name = exp_name\n self.log_freq = log_freq\n self.first_row = True\n self.log_headers = None\n self.counter = 0 # keeps track of how often log_stats is called",
"def __init__(self):\r\n self.parent_directory = \"..\\csv\\\\\"\r\n self.file_parse_error_msg = \"An error occurred while paring the file\"",
"def __init__(self, csv_file: str = None) -> None:\n super().__init__(csv_file)",
"def __init__(self, csv_file: str = None) -> None:\n super().__init__(csv_file)",
"def generate_Struct(csv_file, pathToDir):\n\n df = extract_structure_from_csv(csv_file)\n\n df = df[ESSENTIAL_CSV_COLUMNS]\n\n for session_kwargs in df.to_dict('index').values():\n session = AnDOData(**session_kwargs)\n session.basedir = pathToDir\n session.generate_structure()",
"def __init__(self, path=None):\n super().__init__(path=path)\n self.path += '{}.csv'",
"def __init__(self, path):\n self.csv_path = path\n # check if csv format is valid or not\n self.check_valid_csvformat(self.csv_path)\n \"\"\" empty dict to store all company names\n prepare initial company data in dictionary format \"\"\"\n self.company_data = dict()",
"def __init__(self, file_name: str):\n self.case_metrics = []\n self.cluster_metrics = []\n self.file_name = file_name\n\n self.path_to_pmg_metrics = f'metrics/{file_name}_process_model_graphs'\n self.path_to_pmg_vis = f'visualization/{file_name}_process_model_graphs'\n self.path_to_drifts = 'visualization/drifts'\n self.path_to_case_metrics = 'metrics/case_metrics'\n self.path_to_cluster_metrics = 'metrics/cluster_metrics'\n try:\n makedirs(self.path_to_pmg_metrics, exist_ok=True)\n makedirs(self.path_to_pmg_vis, exist_ok=True)\n makedirs(self.path_to_drifts, exist_ok=True)\n makedirs(self.path_to_case_metrics, exist_ok=True)\n makedirs(self.path_to_cluster_metrics, exist_ok=True)\n\n pd.DataFrame(columns=['stream_index', 'timestamp', 'check point', 'case',\n 'graph distance', 'time distance', 'label']) \\\n .to_csv(f'{self.path_to_case_metrics}/{file_name}.csv', index=False)\n pd.DataFrame(columns=['stream_index', 'timestamp', 'check point', 'cluster id',\n 'x', 'y', 'radius', 'weight', 'cluster type']) \\\n .to_csv(f'{self.path_to_cluster_metrics}/{file_name}.csv', index=False)\n except Exception as e:\n print(e)",
"def __init__(self, filename='/var/humtemp/file.csv'):\n self.filename = filename",
"def test_create_csv(self):\n\n # absolute path to xml file to parse\n xml_file = os.path.join(self.xmlfilepath, \"DLTINS_20210117_01of01.xml\")\n\n # absolute path to the csv file to create\n csv_file = os.path.join(self.csvfile, \"DLTINS_20210117_01of01.csv\")\n\n # Test for correct data\n self.assertEqual(create_csv(xml_file, self.csvfile), csv_file)\n\n # Test for incorrect input xml file\n self.assertEqual(create_csv(\"somerandomfile\", self.csvfile), None)\n\n # Test for incorrect path to write csv to\n self.assertEqual(create_csv(xml_file, r\"D:\\kqcA CK j \"), None)",
"def __init__(self, datadir):\n\n print('\\rStatus: Verifying Data', end = \"\\t\\t\\t\\r\")\n\n self.datadir = datadir\n\n # Load file info and verify data validity\n info = pd.read_csv(os.path.join(self.datadir, 'info.csv'))\n nt = len(info)\n\n self.t = [dt.datetime.strptime(str(info.ix[i,'DATE'])\n + str(info.ix[i,'TIME']),\n '%Y%m%d%H%M')\n for i in range(nt)]\n\n self.xyzdir = os.path.join(datadir, 'xyz')\n self.xyznames = info.ix[:,'FILENAME']\n\n self.xyzpaths = [os.path.join(self.xyzdir, self.xyznames[i])\n for i in range(nt)]\n\n xyzglobpath = os.path.join(self.datadir, 'xyz', '*')\n assert (set(self.xyzpaths).issubset(set(glob.glob(xyzglobpath)))), \\\n \"FILENAME listed in 'info.csv' not found in datadir\"",
"def create_initial_csv():\n\tif os.path.exists(args.train):\n\t\tprint(\"--Training data input found: \", args.train)\n\t\t#quick and dirty create csv file\n\t\theaders = os.system(\"echo idorigh,idresph,origbytes,respbytes,origpkts,resppkts,duration > log.csv\")\n\t\tbrocut = os.system(\"cat \"+str(args.train)+\"| bro-cut id.orig_h id.resp_h orig_bytes resp_bytes orig_pkts resp_pkts duration | sed 's/\t/\\,/g' | sed '/-/d'>> log.csv\")\n\t\t\n\telse:\n\t\tprint(\"Bro training data input \"+str(args.train)+\" not found - needs to be in working directory\")\n\t\texit()",
"def __init__(self, directory: str):\n if not os.path.exists(directory):\n raise FileNotFoundError(\n f\"The specified directory ‘{directory}’ is not found\")\n\n self.directory: str = directory # NOT mandatory!\n self.files_summary: Dict[str, Dict[str, int]] = dict()\n\n self.analyze_files() # summarize the python files data",
"def __init__(self, data):\n\n self.produce_csv = data['produce_csv']\n self.produce_graphics = data['produce_graphics']\n self.report_name = data['report_name']\n self.file_name = self.report_name + '.csv'\n self.annual_file_name = self.report_name + '_annual.csv'\n self.csv_dir = ''\n self.diagnostic_dir = ''\n\n self.daily_variables = {\n 'year': ['time.cal_year', '', []],\n 'j_day': ['time.day', '', []]\n }\n\n self.annual_variables = {\n 'year': ['time.cal_year', '', 0]\n }",
"def load_log(dir_):\n df = pandas.read_csv(os.path.join(dir_, 'log.csv'),\n error_bad_lines=False,\n warn_bad_lines=True)\n if not len(df):\n print(\"empty df at {}\".format(dir_))\n return\n df['model'] = dir_\n return df",
"def initialize(self, cwd: str, option: str = \"\", **kwargs):\n # TODO: make the times when the courses start prettier\n path = os.path.join(cwd, option)\n\n def recursive_dictionary_clear(d):\n \"\"\"Recursively clear dictionary keys with empty values.\"\"\"\n for key in list(d):\n if isinstance(d[key], dict):\n recursive_dictionary_clear(d[key])\n\n if d[key] == \"\" or d[key] == {}:\n del d[key]\n\n def format_teacher(teacher):\n \"\"\"An ungly, hard-coded way to format the names of the teachers. Couldn't\n find something more solid, so this will have to do for now.\"\"\"\n l = split(\n \"|\".join(\n [\n \"doc\\.\",\n \"Ing\\.\",\n \"Ph.D\\.\",\n \"CSc\\.\",\n \"PhDr\\.\",\n \"DrSc\\.\",\n \"Mgr\\.\",\n \"RNDr\\.\",\n \"M\\.Sc\\.\",\n \"Bc\\.\",\n \"Dr\\.\",\n \"D\\.Phil\\.\",\n \"Ph\\.\",\n \"r\\.\",\n ]\n ),\n teacher,\n )\n l = [i.strip().strip(\",\").strip() for i in l]\n l = [i for i in l if i not in (\",\", \"\")]\n return \" / \".join([\" \".join(list(reversed(i.split()))) for i in l])\n\n if option == \"\":\n exit_with_error(\"No CSV to initialize from specified.\")\n\n if not os.path.exists(path):\n exit_with_error(\"Specified file doesn't exist.\")\n\n with open(path, \"rb\") as f:\n # SIS uses cp1250 :(\n contents = f.read().decode(\"cp1250\")\n\n course_count = 0\n for l in list(csv.reader(contents.splitlines(), delimiter=\";\"))[1:]:\n uid, _, code, name, day, start, self, dur, _, _, _, weeks, teacher = l\n\n teacher = format_teacher(teacher)\n\n # ATTENTION: watch out for 'and's here\n # in order for the code not to crash, they do the following:\n # '' and x -> ''\n # 'something' and x -> x\n out = {\n \"teacher\": {\"name\": teacher},\n \"classroom\": {\"number\": self},\n \"time\": {\n \"day\": day and WD_EN[int(day) - 1].capitalize(),\n \"start\": start and int(start), # TODO HH:MM formatting\n \"end\": start and int(start) + int(dur), # TODO HH:MM formatting\n \"weeks\": \"even\"\n if weeks == \"sude\"\n else \"odd\"\n if weeks == \"liche\"\n else \"\",\n },\n \"code\": code,\n }\n\n # don't print empty dictionary parts\n recursive_dictionary_clear(out)\n\n # create a basic abbreviation from taking first letters of each word\n abbreviation = \"\".join(\n [\n word[0].upper()\n if word[0].isalpha() or word[0].isdigit()\n else \"\"\n for word in name.split()\n ]\n )\n\n # create the directory with the name of the course\n course_dir = os.path.join(cwd, f\"{name} ({abbreviation})\")\n if not os.path.exists(course_dir):\n os.mkdir(course_dir)\n\n # lecture / lab\n # based on the ID of the SIS ticket - labs end with x** and lectures with p*\n course_type = \"přednáška\" if uid[:-1].endswith(\"p\") else \"cvičení\"\n\n if not os.path.exists(os.path.join(course_dir, course_type)):\n os.mkdir(os.path.join(course_dir, course_type))\n\n with open(os.path.join(course_dir, course_type, \"info.yaml\"), \"w\") as f:\n yaml.dump(out, stream=f, allow_unicode=True)\n\n course_count += 1\n\n exit_with_success(f\"New semester with {course_count} courses initialized.\")",
"def __init__(self, directory):\n self.directory = directory\n self.files_summary = self.analyze_files()",
"def __init__(self, log_file, csv_file, data_type):\n self.logfile = log_file\n self.csvfile = csv_file\n self.type = data_type",
"def process_csv(csv_file=None):\n if csv_file:\n _process_csv_data(csv_file, USER_DATA_MAP)\n else:\n csv_files_list = [os.path.join(DATA_DIR, f) for f in os.listdir(DATA_DIR) if f.endswith('.csv')]\n for fl in csv_files_list:\n _process_csv_data(fl, USER_DATA_MAP)\n return USER_DATA_MAP"
]
| [
"0.6246445",
"0.6127223",
"0.6095094",
"0.60938084",
"0.6007543",
"0.59912115",
"0.58670706",
"0.5842792",
"0.5808693",
"0.57927597",
"0.5757253",
"0.57462543",
"0.57432365",
"0.57250065",
"0.57250065",
"0.56792647",
"0.56714016",
"0.5650973",
"0.5606621",
"0.5605827",
"0.5578337",
"0.5544328",
"0.5538428",
"0.5533052",
"0.5530666",
"0.55140454",
"0.55109286",
"0.55030197",
"0.5501575",
"0.54867834"
]
| 0.61461604 | 1 |
Implement DataSource.get_hosts() method. The returned host names are sorted in alphabetic order. | def get_hosts(self):
return sorted(self.host_data.keys()) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_hosts(self):\n\n raise NotImplementedError",
"def getHosts(self):\n raise \"not implemented\"",
"def hosts(self) -> t.List[str]:\n if not self._hosts:\n self._hosts = self._get_db_hosts()\n return self._hosts",
"def host_names(self):\n resp = self._cmd(uri = '/jenkins_hosts')\n names = []\n for item in resp.get('hosts'):\n names.append(item.get('host_name'))\n return sorted(names)",
"def hosts(self):\n return self._hosts",
"def hosts(self):\n return self._hosts",
"def get_hosts(self):\n\n hosts = self.client.service.getHosts()\n return hosts",
"def get_allhosts():\n connection, tablename = HomeNetwork.get_connection_info()\n query = 'SELECT hostname from {}'.format(tablename)\n output = pandas.read_sql_query(query, connection).to_json(orient='records')\n\n for host in json.loads(output):\n yield host[\"hostname\"]",
"def hosts(self):\n return tuple(self.hosts_)",
"def hostnames(self) -> Sequence[str]:\n return pulumi.get(self, \"hostnames\")",
"def all_hosts(self):\n ...",
"def get_hosts(self):\n if self._scanned:\n return self._scanner.all_hosts()\n else:\n raise ScannerError(\"ERROR: A scan has not yet been conducted!\")",
"def hosts(self):\n return HostsTable(self.rpc, self.name)",
"def all_hosts(self):\n if not 'scan' in list(self._scan_result.keys()):\n return []\n listh = list(self._scan_result['scan'].keys())\n listh.sort()\n return listh",
"def hosts(self):\n\n return self._get_list_field(\"hosts\", lambda x: HostSettingContext(x))",
"def hosts(self):\n\n return self._get_list_field(\"hosts\", lambda x: HostSettingContext(x))",
"def hosts(self) -> List[str]:\n if self.head_host:\n return [self.head_host]\n else:\n return [replica.host for replica in self.pod_args['pods'][0]]",
"def hostnames(self) -> pulumi.Output[Sequence[str]]:\n return pulumi.get(self, \"hostnames\")",
"def hosts(self) -> dict:\n return self._hosts",
"def hostgroup_list(self):\n return self.ezx.get_hostgroup_list()",
"def hosts(self, hosts):\n return self._set_list_field(\"hosts\", hosts)",
"def mon_hosts(self, mon_ips):\n hosts = []\n for ceph_addrs in mon_ips:\n # NOTE(jamespage): This looks odd but deals with\n # use with ceph-proxy which\n # presents all monitors in\n # a single space delimited field.\n for addr in ceph_addrs.split(' '):\n hosts.append(ch_ip.format_ipv6_addr(addr) or addr)\n hosts.sort()\n return hosts",
"def hostnames(self) -> pulumi.Input[Sequence[pulumi.Input[str]]]:\n return pulumi.get(self, \"hostnames\")",
"def get_hosts(self, filename):\n\n data = parse_inventory(filename)\n\n for host in data['routers']['hosts']:\n self.router_names.append(str(host))\n for host in data['brokers']['hosts']:\n self.broker_names.append(str(host))",
"def list(self, **kwargs):\n\n return self.getResourceManager() \\\n .getSdk() \\\n .hosts \\\n .list(**kwargs)",
"def get_etc_hostnames():\n with open(\"/etc/hosts\", \"r\") as f:\n hostlines = f.readlines()\n hostlines = [\n line.strip()\n for line in hostlines\n if not line.startswith(\"#\") and line.strip() != \"\"\n ]\n hosts = []\n for line in hostlines:\n hostnames = line.split(\"#\")[0].split()[1:]\n hosts.extend(hostnames)\n return hosts",
"def hosts(self):\n hosts = set()\n for p, c in self.configs_:\n hosts.update(c.hosts())\n return tuple(hosts)",
"def hosts(self):\n loc = self.caller.location\n if loc.ndb.event_line_hosts is None:\n loc.ndb.event_line_hosts = []\n loc.ndb.event_line_hosts = [\n ob for ob in loc.ndb.event_line_hosts if ob.location == loc\n ]\n return loc.ndb.event_line_hosts",
"def Hosts(self):\n if not self._hosts:\n hs = self._get_objects(vim.HostSystem)\n for h in hs:\n self._hosts[h.name] = h\n return self._hosts",
"def get_hostkey_list(self):\n return self.hostkey"
]
| [
"0.81791395",
"0.785863",
"0.77301955",
"0.74106723",
"0.7362429",
"0.7362429",
"0.7345172",
"0.7278661",
"0.7235248",
"0.72335577",
"0.7223325",
"0.72067803",
"0.71669626",
"0.7105632",
"0.7082958",
"0.7082958",
"0.69979745",
"0.69384414",
"0.6874702",
"0.68602747",
"0.6834984",
"0.680843",
"0.68062663",
"0.6727007",
"0.67251116",
"0.6721806",
"0.67202497",
"0.66135854",
"0.6584363",
"0.6545288"
]
| 0.82562816 | 0 |
Implement DataSource.get_vms() method. VM names in CSV files are of "vm_" format. The returned VM names are sorted in integer id order. | def get_vms(self):
vms = [v for v in self.vm_data.keys()]
vms.sort(lambda x, y: cmp(int(x[3:]), int(y[3:])))
return vms | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_vms(self):\n\n raise NotImplementedError",
"def get_vms(self, user=None, count=None):\n crit = dict()\n if count is not None:\n crit['count'] = count\n s = self._NDL_API('getvms', crit, user)\n if len(s) == 0:\n return []\n ips = s.split(',')\n # if the localhost's IP is in the list, move it to the front\n localips = getmyips()\n for i in range(len(ips)):\n if ips[i] in localips:\n x = ips[i]\n del ips[i]\n return [ x, ] + ips\n # otherwise order does not matter?\n return ips",
"def pre_process_file(filename):\n\n num_lines = 0\n vm_ids = set()\n with open(filename) as trace:\n for item in csv.reader(trace, delimiter=','):\n num_lines += 1\n disk_id = int(item[2])\n vm_ids.add(disk_id) # can make it more efficient\n no_of_vms = len(vm_ids)\n return (num_lines, no_of_vms, vm_ids)",
"def getListOfVMs(name = None,selector = None):\n if name:\n vms = rhevGet(\"/api/vms?search=name%3D*\" + name + \"*\")\n else:\n vms = rhevGet(\"/api/vms\")\n doc = libxml2.parseDoc(vms)\n ctxt = doc.xpathNewContext()\n res = ctxt.xpathEval(\"/vms/vm[cluster/@id='\"+ getClusterData(rhev_settings.CLUSTER ,\"id\") + \"']\")\n if not selector:\n for vm in res:\n print \"Name %s\\t\\t\\t\\t\\tID: %s\"%(vm.firstElementChild().get_content(),vm.prop(\"id\"))\n return None\n vms = []\n for v in res:\n vm = {}\n vm[\"name\"] = v.firstElementChild().get_content()\n vm[\"id\"] = v.prop(\"id\")\n vms.append(vm)\n return vms",
"def get_all_vms(self):\n available_servers = self.connection.compute.servers()\n if available_servers:\n vm_names = [server.name for server in available_servers]\n return vm_names\n else:\n return []",
"def list_vm_impl(**kwargs: Any) -> None:\n try:\n config = configuration.create_transient_list_vm_config(kwargs)\n except configuration.CLIArgumentError as e:\n print(e, file=sys.stderr)\n sys.exit(1)\n\n instances = scan.find_transient_instances(\n name=config.name, with_ssh=config.with_ssh, timeout=None\n )\n if len(instances) == 0:\n print(\"No running VMs found matching criteria\", file=sys.stderr)\n sys.exit(1)\n\n print(scan.format_instance_table(instances))\n sys.exit(0)",
"def get_all_vms_import_info(provider, vms_to_import):\n vms_import_info = []\n hosts_up = filter(\n lambda host: ll_hosts.is_host_up(positive=True, host=host),\n config.HOSTS\n )\n schedule_hosts = itertools.cycle(hosts_up)\n for vms in vms_to_import:\n vms_import_info.append(\n set_import_data(\n vm_name_on_provider=vms[0],\n new_vm_name=vms[1],\n windows_drivers=vms[2],\n host=schedule_hosts.next(),\n provider=provider\n )\n )\n logger.info(\"VMs import data: %s\", vms_import_info)\n return vms_import_info",
"def find_vms(self, name):\n script = (\n 'Get-SCVirtualMachine -Name \\\"{}\\\" -VMMServer $scvmm_server')\n data = self.get_json(script.format(name))\n # Check if the data returned to us was a list or 1 dict. Always return a list\n if not data:\n return []\n elif isinstance(data, list):\n return [SCVirtualMachine(system=self, raw=vm_data) for vm_data in data]\n return [SCVirtualMachine(system=self, raw=data)]",
"def list(self):\n return self._list('/os-psvm', 'psvms')",
"def get_videos(self):\n\n videos = []\n with open(self.filename, newline='') as csvfile:\n reader = csv.reader(csvfile, delimiter=' ', quotechar='|')\n for row in reader:\n for col in row:\n videos.append(col)\n videos = list(filter(None, list(set(videos))))\n return videos",
"def _vm_templates(self, vm, log=None):\n vm_kwargs = self._vm_kwargs(vm)\n tids = self._get_templates(vm_kwargs, django_settings._MON_ZABBIX_TEMPLATES_VM, log=log)\n tids.update(self._get_vm_nic_templates(vm, vm_kwargs, django_settings._MON_ZABBIX_TEMPLATES_VM_NIC, log=log))\n tids.update(self._get_vm_disk_templates(vm, vm_kwargs, django_settings._MON_ZABBIX_TEMPLATES_VM_DISK, log=log))\n\n return tids",
"def _get_vmware_source_ids(name, vm_list):\n cohesity_client = _get_client()\n source_id_list = []\n parent_id = -1\n log.info(\"Fetching Vcenter and Vm ids\")\n try:\n result = cohesity_client.protection_sources.list_protection_sources_root_nodes(\n environments=env_enum.K_VMWARE)\n for each_source in result:\n endpoint = each_source.registration_info.access_info.endpoint\n v_name = each_source.protection_source.name\n\n # Check for both endpoint and source name.\n if name in [endpoint, v_name]:\n parent_id = each_source.protection_source.id\n if parent_id == -1:\n log.error(\"Vcenter %s not available in the cluster\", name)\n vms = cohesity_client.protection_sources.list_virtual_machines(\n v_center_id=parent_id, names=vm_list)\n vm_names = copy.deepcopy(vm_list)\n for vm in vms:\n vm_names.remove(vm.name)\n source_id_list.append(vm.id)\n if vm_names:\n log.error(\n \"Following list of vms '%s' are not available in vcenter, \"\n \"please make sure the virtual machine names are correct\",\n \",\".join(vm_names),\n )\n return parent_id, source_id_list\n except APIException as err:\n log.error(err)\n return -1, []",
"def getVMs(**kwargs):\n proxy_url = kwargs[\"proxy\"]\n session_token = kwargs[\"sessiontoken\"]\n\n json_response = get_vms_json(proxy_url, session_token)\n\n if json_response == None:\n sys.exit(1)\n\n extracted_dictionary = json_response['results']\n table = PrettyTable(['Display_Name', 'Status', 'External_ID'])\n for i in extracted_dictionary:\n table.add_row([i['display_name'], i['power_state'], i['external_id']])\n print(\"Virtual Machine List:\")\n print(table)",
"def get_all_vulnerable_vm_id(self, vm_id: str) -> List[str]:\n if not self._validated:\n raise VmAttackServiceSetUpException('First you should call \"set_cloud_environment\" successfully')\n\n # Find this vm index\n attacker_vm_index = self._get_vm_index(vm_id)\n\n # Go through the graph\n indexes = self._get_all_accessible_vertex_from_index(attacker_vm_index)\n\n # Return only vm ids\n return [self._vms[index]['vm_id'] for index in indexes]",
"def get_vm_list(self):\n\t\treturn Job(SDK.PrlSrv_GetVmList(self.handle)[0])",
"def guest_list(file):\n with open(file) as f:\n reader = csv.reader(f)\n next(reader) # skip header\n names = []\n emails = []\n for row in reader:\n names.append(row[0])\n emails.append(row[1])\n return names, emails",
"async def get_vms(self) -> List[CachingVirtualMachine]:\n return await self._vm_fetcher.get_vms()",
"def get_all_vms_in_pool(self, name, dc, cluster):\n rp = self.get_pool(name, dc, cluster)\n return [vm for vm in rp.vm]",
"def _list_vlans_by_name(self, name):\r\n results = self.list_vlans(name=name, mask='id')\r\n return [result['id'] for result in results]",
"def get_running_vms():\n output = subprocess.Popen(['VBoxManage', 'list', 'runningvms'], stdout=subprocess.PIPE).communicate()[0]\n vms = []\n if output is not None:\n lines = output.split('\\n')\n for line in lines:\n pattern = re.compile(r'.*{(.*)}')\n match = pattern.match(line)\n print(match)\n if match:\n\t\t#print(\"got a match: \" + match.group(0))\n vms.append(line)\n return vms",
"def test_list_vips_sort(self):\r\n resources = \"vips\"\r\n cmd = vip.ListVip(test_cli20.MyApp(sys.stdout), None)\r\n self._test_list_resources(resources, cmd,\r\n sort_key=[\"name\", \"id\"],\r\n sort_dir=[\"asc\", \"desc\"])",
"def test_list_vips_sort(self):\n resources = \"vips\"\n cmd = vip.ListVip(test_cli20.MyApp(sys.stdout), None)\n self._test_list_resources(resources, cmd,\n sort_key=[\"name\", \"id\"],\n sort_dir=[\"asc\", \"desc\"])",
"def vm_by_datastore(cls, container, cluster, datastore_name):\n obj = Query.get_obj(container, cluster)\n vms = []\n if hasattr(obj, 'datastore'):\n for datastore in obj.datastore:\n if datastore.name == datastore_name:\n for virtual_machine in datastore.vm:\n vms.append(virtual_machine.name)\n return sorted(vms)",
"def get_ids_from_web_table(self, path_file: str = None) -> List[str]:\n if path_file is None:\n path_file = \"_Materials Project.csv\"\n ids_data = pd.read_csv(path_file)\n ids = ids_data[\"Materials Id\"]\n self.ids = ids\n return ids",
"def get_all_vms(self):\n for folder in self.get_first_level_of_vm_folders():\n for vm in get_all_vms_in_folder(folder):\n yield vm",
"def _list_pool_vm(args):\n _logger.debug('_list_pool_vm')\n #\n #\n _data_struct = {'name': {'head': 'Name', 'func': 'name', 'type': 'str'},\n 'uuid': {'head': 'UUID', 'func': 'UUIDString', 'type': 'str'},\n 'autostart': {'head': 'Autostart', 'func': 'autostart', 'type': 'yesno', 'convert': get_yesno},\n 'active': {'head': 'Active', 'func': 'isActive', 'type': 'yesno', 'convert': get_yesno},\n 'persistent': {'head': 'Persistent', 'func': 'isPersistent', 'type': 'yesno', 'convert': get_yesno},\n 'volumes': {'head': 'Volumes', 'func': 'numOfVolumes', 'type': 'int'},\n 'state': {'head': 'State', 'func': 'info', 'type': 'list', 'index': 0, 'convert': get_pool_state},\n 'capacity': {'head': 'Capacity', 'func': 'info', 'type': 'list', 'index': 1, 'convert': format_size},\n 'allocation': {'head': 'Allocation', 'func': 'info', 'type': 'list', 'index': 2, 'convert': format_size},\n 'available': {'head': 'Available', 'func': 'info', 'type': 'list', 'index': 3, 'convert': format_size},\n 'type': {'head': 'Type', 'func': None, 'type': 'str'}\n }\n #\n # get the pools\n _sps_fs, _sps_netfs = _get_pools()\n _sps = _sps_fs + _sps_netfs\n if len(_sps) == 0:\n _logger.info('No pools found.')\n return\n #\n # initialise the column widths\n _data_struct = initalise_column_lengths(_data_struct)\n #\n # column cantains only 'fs' or 'net fs'\n _data_struct['type']['len'] = 6\n #\n # format data and determine optimal length of fields.\n pool_data = list()\n for _sp in _sps:\n _sp_data = dict()\n for key, value in _data_struct.items():\n value_data = get_value_data(_sp, _data_struct[key])\n _sp_data[key] = value_data[0]\n val_length = value_data[1]\n _data_struct[key]['collen'] = max(val_length, _data_struct[key]['collen'])\n _sp_data['type'] = 'fs' if _sp in _sps_fs else 'net fs'\n pool_data.append(_sp_data)\n #\n # compose data\n _title = 'VM pool Information:'\n _columns = list()\n for key, value in _data_struct.items():\n _columns.append([value['head'], value['collen']+2, key])\n #\n printerKlass = get_row_printer_impl(args.output_mode)\n printer = printerKlass(title=_title, columns=_columns)\n printer.printHeader()\n #\n # print\n for _sp in pool_data:\n printer.rowBreak()\n printer.printRow(_sp)\n printer.printFooter()\n printer.finish()\n return",
"def loadCSVSeeds(self, csvFilePath):\n labels = []\n with open(csvFilePath) as csvfile:\n reader = csv.reader(csvfile, delimiter=',', quotechar='|')\n for row in reader:\n labels.append([row[0], row[1], [float(row[2]), float(row[3]), float(row[4]) ]])\n print(csvFilePath + \": labels loaded\")\n return labels",
"def _get_vm_ids_and_names_dict(self):\r\n vm_ids = {}\r\n vm_names = {}\r\n\r\n for content in self.content:\r\n if content['type'].lower() in ('vm', 'virtual machine'):\r\n vm_ids[content['id']] = content['display_name']\r\n vm_names[content['display_name']] = content['id']\r\n\r\n return vm_ids, vm_names",
"def ListVms(content):\n\n listofvms = []\n children = content.rootFolder.childEntity\n for child in children: # Iterate though DataCenters\n dc = child\n #data[dc.name] = {} # Add data Centers to data dict\n clusters = dc.hostFolder.childEntity\n for cluster in clusters: # Iterate through the clusters in the DC\n # Add Clusters to data dict\n #data[dc.name][cluster.name] = {}\n hosts = cluster.host # Variable to make pep8 compliance\n for host in hosts: # Iterate through Hosts in the Cluster\n hostname = host.summary.config.name\n # Add VMs to data dict by config name\n #data[dc.name][cluster.name][hostname] = {}\n vms = host.vm\n for vm in vms: # Iterate through each VM on the host\n listofvms.append(vm.summary.config.name)\n return listofvms",
"def get_source_vectors(testsmells):\n\n for testsmell in testsmells:\n df = pd.read_csv('data/' + testsmell + '_data.csv')\n df['Vector'] = ''\n\n repnames = df['App'].unique().tolist()\n for repname in repnames:\n print('Processing project \\'' + repname + '\\' for ' + testsmell + '...')\n currdf = df[df['App'] == repname]\n repo = Repo('repositories/' + repname)\n vectors = []\n \n # Get the vectors for each Java file in the dataframe\n for _, row in tqdm(list(currdf.iterrows())): \n try:\n repo.git.checkout(row['CommitSHA'], force=True)\n file_path = 'repositories/' + repname + '/' + row['RelativeTestFilePath']\n vectors.append(get_vector(file_path))\n except GitCommandError as err:\n print('Failed for ' + row['App'] + ':' + row['CommitSHA'])\n print(err)\n vectors.append('')\n \n df.loc[df['App'] == repname, 'Vector'] = vectors # Set the vectors on the dataframe\n \n filename = 'data/' + testsmell + '_vectors.csv'\n df.to_csv(filename, index=False)"
]
| [
"0.5869199",
"0.5651854",
"0.55376905",
"0.551105",
"0.5461269",
"0.54513437",
"0.544437",
"0.5337419",
"0.5264466",
"0.52439404",
"0.5195252",
"0.5182719",
"0.51786023",
"0.51264375",
"0.5096237",
"0.5057847",
"0.5049899",
"0.503902",
"0.5005499",
"0.49837598",
"0.49673596",
"0.49368492",
"0.49269098",
"0.4910862",
"0.48971912",
"0.48591104",
"0.48068538",
"0.47750646",
"0.47705436",
"0.47570977"
]
| 0.62985337 | 0 |
Parse a datatime string and return seconds since epoch. | def parse_timestr(self, timestr):
epoch = datetime.datetime(1970, 1, 1, 0, 0, 0, 0, tzutc())
return int((parsedate(timestr) - epoch).total_seconds()) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def parse_time(s):\n\n dt = dateutil.parser.parse(s)\n# epoch_time = int((dt - datetime(1970, 1, 1, tzinfo=timezone.utc)).total_seconds())\n epoch_time = int(dt.replace(tzinfo=timezone.utc).timestamp())\n\n return epoch_time",
"def datumToSeconds(timestr):\n return (datetime.datetime(int(timestr.split(\"-\")[0]), \n int(timestr.split(\"-\")[1]), \n int(timestr.split(\"-\")[2])) - datetime.datetime(1970,1,1)).total_seconds()",
"def date_to_seconds(self, date_str):\r\n # get epoch value in UTC\r\n epoch = datetime.datetime.utcfromtimestamp(0).replace(tzinfo=pytz.utc)\r\n # parse our date string\r\n d = dateparser.parse(date_str)\r\n # if the date is not timezone aware apply UTC timezone\r\n if d.tzinfo is None or d.tzinfo.utcoffset(d) is None:\r\n d = d.replace(tzinfo=pytz.utc)\r\n\r\n # return the difference in time\r\n return int((d - epoch).total_seconds())",
"def parse_timestamp(input_time: str) -> int:\n ts = int(float(input_time)) # either integer or real UNIX epoch time\n # milliseconds\n if 1e12 < ts < 1e13:\n return round(ts / 1000) * 1000 # round to second\n # seconds\n if 1e9 < ts < 1e10:\n return ts * 1000\n print(f'ERROR: {input_time} is neither in epoch milliseconds or seconds.')\n raise ValueError(\"{input_time} is not a timestamp\")",
"def date_to_seconds(date_str):\n # get epoch value in UTC\n epoch = datetime.utcfromtimestamp(0).replace(tzinfo=pytz.utc)\n\n # parse our date string\n d = dateparser.parse(date_str)\n # if the date is not timezone aware apply UTC timezone\n if d.tzinfo is None or d.tzinfo.utcoffset(d) is None:\n d = d.replace(tzinfo=pytz.utc)\n\n # return the difference in time\n return int((d - epoch).total_seconds())",
"def parse_time(value):\n # allow an empty value\n if value == '':\n return 0\n\n # seconds should be always 0\n # minutes should be always 1 ecc.\n parts = value.split(':')\n if len(parts) > 4:\n raise ValueError('The provided time does not respect the supported formats: SS, MM:SS, HH:MM:SS, DD:HH:MM:SS.')\n\n parts.reverse()\n seconds = float(parts[0])\n\n # minutes (mm:ss)\n if len(parts) > 1:\n seconds += int(parts[1]) * 60\n\n # hours (hh:mm:ss)\n if len(parts) > 2:\n seconds += float(parts[2]) * 3600\n\n # days (dd:hh:mm:ss)\n if len(parts) > 3:\n seconds += float(parts[3]) * 86400\n\n return seconds",
"def getSecondsFromStringDateTime(date_time):\n split_date_time = date_time.split(Constants.record_name_sep)\n if len(split_date_time) == 5:\n for value in split_date_time:\n if not isNumber(value):\n return Constants.return_value_invalid_datetime_value\n result = int(split_date_time[0]) * 31622400 # Year in seconds\n result += int(split_date_time[1]) * 2592000 # Month in seconds\n result += int(split_date_time[2]) * 86400 # Day in seconds\n result += int(split_date_time[3]) * 3600 # Hour in seconds\n result += int(split_date_time[4]) * 60 # Minute in seconds\n return result\n else:\n return Constants.return_value_invalid_datetime_value",
"def parse_date(input_time: str) -> int:\n return int(dateutil.parser.parse(input_time).timestamp() * 1000)",
"def parse_time(s):\n return time.gmtime(float(s))",
"def convert_time_to_seconds(self, time_value):\n time_epoch = []\n mylog.debug('Converting %s to epoch time' % time_value)\n for value in time_value:\n try:\n pattern = ' %I:%M:%S%p'\n time_epoch_mini = int(time.mktime(time.strptime(value, pattern))) \n time_epoch.append(time_epoch_mini)\n except:\n mylog.debug('%s Does not seem to be in format with leading space' % value)\n try:\n pattern = '%I:%M:%S%p'\n time_epoch_mini = int(time.mktime(time.strptime(value, pattern))) \n time_epoch.append(time_epoch_mini)\n except:\n mylog.debug('%s Does not appear to be in format without leading space' % value)\n return time_epoch",
"def str_to_epoch(dt: Union[str, datetime]) -> int:\n return int(str_to_datetime(dt).timestamp())",
"def parse_to_timestamp(s):\n return __date_to_millisecond_ts(parse_input(s))",
"def get_sec(time_str):\n h, m, s = time_str.split(':')\n return int(h) * 3600 + int(m) * 60 + int(float(s))",
"def epoch_seconds(date):\r\n td = date - epoch\r\n return td.days * 86400 + td.seconds + (float(td.microseconds) / 1000000)",
"def epoch_seconds(date):\n td = date - epoch\n return td.days * 86400 + td.seconds + (float(td.microseconds) / 1000000)",
"def epoch_seconds(date):\n td = date - epoch\n return td.days * 86400 + td.seconds + (float(td.microseconds) / 1000000)",
"def time2secs( s ):\n t = s.split( ':' )\n nf = len( t )\n if nf == 1:\n # Seconds only!\n secs = int( t[0] )\n elif nf == 2:\n # Minutes & seconds!\n secs = int( t[1] ) + int( t[0] ) * 60\n elif nf == 3:\n # Hours, minutes & seconds!\n secs = int( t[2] ) + int( t[1] ) * 60 + int( t[0] ) * 60 * 60 \n elif nf == 4:\n # Days, hours, minutes, & seconds!\n secs = int( t[3] ) + int( t[2] ) * 60 + int( t[1] ) * 60 * 60\n secs += int( t[0] ) * 60 * 60 * 24\n\n return secs",
"def get_sec(time_str):\n h, m, s = time_str.split(':')\n return int(h) * 3600 + int(m) * 60 + int(s)",
"def get_sec(time_str):\n h, m, s = time_str.split(':')\n return int(h) * 3600 + int(m) * 60 + int(s)",
"def time_to_int(str_time):\n dt = time.mktime(\n datetime.datetime.strptime(str_time, \"%Y-%m-%dT%H:%M:%S\").timetuple()\n )\n return dt",
"def str_to_seconds(tstring):\n if tstring.endswith('m'):\n secs = 60 * int(tstring.replace('m', ''))\n elif tstring.endswith('h'):\n secs = 60 * 60 * int(tstring.replace('h', ''))\n elif tstring.endswith('d'):\n secs = 24 * 60 * 60 * int(tstring.replace('d', ''))\n elif tstring.endswith('y'):\n secs = 365 * 24 * 60 * 60 * int(tstring.replace('y', ''))\n else:\n secs = 60 * int(tstring)\n if secs < 0:\n secs = -1\n\n return secs",
"def _hx_time_to_epoch(self, timestr: str) -> int: # pragma: no cover\n\n time_obj = datetime.datetime.strptime(timestr, \"%Y-%m-%dT%H:%M:%S.%fZ\")\n\n return int(time_obj.strftime(\"%s\"))",
"def parse_timestamp(ts_str):\n dt = dateutil.parser.parse(ts_str)\n return (time.mktime(dt.timetuple()) + dt.microsecond/1000000.0)",
"def parse_time(time_string):\n minutes = re.search(r\"(\\d+) minutes\", time_string)\n if minutes:\n minutes = int(minutes.groups()[0])\n else:\n minutes = 0\n\n seconds = re.search(r\"(\\d+\\.\\d+) seconds\", time_string)\n if seconds:\n seconds = float(seconds.groups()[0])\n else:\n seconds = 0.0\n\n seconds += minutes * 60\n\n return seconds",
"def _dateToSecs(date_str):\n if date_str not in [\"\", None]:\n timestamp = datetime.datetime.strptime(date_str,\n \"%Y-%m-%dT%H:%M:%SZ\")\n delta = timestamp - datetime.datetime(1970, 1, 1)\n return (delta.days*24*3600) + delta.seconds\n else:\n return None",
"def epoch2datetime(t):\n return datetime.fromtimestamp(t/1000.0)",
"def parse_time_interval_seconds(time_str):\n cal = parsedatetime.Calendar()\n parse_result = cal.parseDT(time_str, sourceTime=datetime.min)\n if parse_result[1] == 0:\n raise ValueError(\"Could not understand time {time}\".format(time=time_str))\n return (parse_result[0] - datetime.min).total_seconds()",
"def ParseDatetimeIntoSecs(dom, tag):\n el = dom.getElementsByTagName(tag)\n if not el:\n return None\n assert el[0].getAttribute('type') == 'datetime'\n data = el[0].firstChild.data\n\n # Tracker emits datetime strings in UTC or GMT.\n # The [:-4] strips the timezone indicator\n parsable_date=(\"{}\".format(data[:-4])).strip()\n when = time.strptime(parsable_date, '%Y/%m/%d %H:%M:%S')\n # calendar.timegm treats the tuple as GMT\n return calendar.timegm(when)",
"def seconds_since_epoch(date_time, epoch=None):\n return microseconds_since_epoch(date_time) / 10.0**6",
"def parse_timestamp(ts):\n m = re.match('(?P<Y>\\d\\d\\d\\d)(?P<M>\\d\\d)?(?P<D>\\d\\d)?' +\n '(?P<HM>\\d\\d\\d\\d)?(?P<S>\\d\\d)?(?P<MS>\\.\\d+)?' +\n '(?P<Z>[\\+\\-]\\d\\d\\d\\d)?(?P<P>\\^\\d+)?', ts)\n if m:\n year = int(m.group('Y'))\n month = int((m.group('M') or 0))\n day = int((m.group('D') or 0))\n if m.group('HM'):\n hour = int(m.group('HM')[0:2])\n minute = int(m.group('HM')[2:])\n else:\n hour = minute = 0\n seconds = int((m.group('S') or 0))\n if m.group('MS'):\n millis = int(m.group('MS')[1:]) * 100000\n else:\n millis = 0\n # This raises ValueError on bad input\n return datetime.datetime(year, month, day, hour,\n minute, seconds, millis,\n tzinfo=datetime.timezone.utc)\n else:\n raise ValueError('invalid format (%s) for timestamp' % ts)"
]
| [
"0.76526344",
"0.7463628",
"0.682279",
"0.6693088",
"0.6685308",
"0.65293956",
"0.6505844",
"0.6494147",
"0.6421463",
"0.64137083",
"0.63933265",
"0.63811636",
"0.6282634",
"0.62804013",
"0.62677103",
"0.62677103",
"0.6256727",
"0.6202523",
"0.6202523",
"0.6192253",
"0.6180091",
"0.6156771",
"0.61490846",
"0.60518444",
"0.6040334",
"0.6032942",
"0.60087055",
"0.5978873",
"0.5975129",
"0.5927121"
]
| 0.76556337 | 0 |
Plot graph using information in graphinfo object. | def plot_graph(self, graphinfo):
WIDTH = 450
HEIGHT = WIDTH * 0.55
opts = []
# Generate outfile name
if not self.rrdfile:
self.outfiles[graphinfo.name] = self.SKIPPED
return
logging.info("Plotting %s graph for %s" % (graphinfo.name, self.node))
self.outfiles[graphinfo.name] = "%s/%s_%s_%s.png" % (self.topdir,
self.file_prefix,
self.node,
graphinfo.name)
opts = opts + [self.outfiles[graphinfo.name]]
# Generate general image options
opts = opts + ["--width", str(WIDTH),
"--height", str(HEIGHT),
"--slope-mode"]
# Generate title
if graphinfo.title:
opts = opts + ["--title", "%s (%s)" % (graphinfo.title, node)]
# Generate X-axis options
start, end, step = ds.get_time_info()
duration = end - start
mg_step = duration / 10
bg_step = mg_step / 5
label_step = mg_step
if mg_step == 0 or bg_step == 0:
# This is unlikely to happen, but just to be on the safe side.
x_grid = "SECOND:1:SECOND:10:SECOND:10:0:%R"
else:
x_grid = "SECOND:%s:SECOND:%s:SECOND:%s:0:%%R" % \
(bg_step, mg_step, label_step)
opts = opts + ["--start", str(self.start),
"--end", str(self.end),
"--step", str(self.rrdtool_step),
"--x-grid", x_grid]
# Generate Y-axis options
if graphinfo.y_axis_label:
opts = opts + ["--vertical-label", graphinfo.y_axis_label]
if graphinfo.y_axis_min_value == 0 or graphinfo.y_axis_min_value:
opts = opts + ["--lower-limit", str(graphinfo.y_axis_min_value)]
if graphinfo.y_axis_max_value == 0 or graphinfo.y_axis_max_value:
opts = opts + ["--upper-limit", str(graphinfo.y_axis_max_value)]
if graphinfo.y_axis_rigid:
opts = opts + ["--rigid"]
# Generate metric parameters
stack_opt = ""
if graphinfo.stack:
stack_opt = ":STACK"
deflist = []
cdeflist = []
arealist = []
for i in graphinfo.metrics:
name, name_in_graph, unit_in_graph, color = i
if unit_in_graph:
new_unit, rate = unit_in_graph
newname = "%s_%s" % (name, new_unit)
deflist.append("DEF:%s=%s:%s:AVERAGE" %
(name, self.rrdfile, name))
cdeflist.append("CDEF:%s=%s,%s,/" %
(newname, name, rate))
arealist.append("AREA:%s%s:%s%s" %
(newname, color, name_in_graph, stack_opt))
else:
deflist.append("DEF:%s=%s:%s:AVERAGE" %
(name, self.rrdfile, name))
arealist.append("AREA:%s%s:%s%s" %
(name, color, name_in_graph, stack_opt))
opts = opts + deflist + cdeflist + arealist
self.rrdtool_cmd("graph", opts, log_level=logging.DEBUG) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def plot_graph(self) -> None:",
"def _PlotGraph(self, event):\n self._rcvLock.acquire()\n for j in event.data[0].keys():\n data = event.data[0][j]\n #print data\n line = []\n for k in data.keys():\n if k in COLORS.keys():\n c = COLORS[k]\n else:\n c = 'black'\n line.append(plot.PolyLine(data[k], colour=c, width=1,\n legend=\"Node %d\"%(k,)))\n # To draw markers: default colour = black, size = 2\n # shapes = 'circle', 'cross', 'square', 'dot', 'plus'\n #marker = plot.PolyMarker(event.data[1], marker='triangle')\n\n # set up text, axis and draw\n if j == ERRORPLOT:\n t = \"Synchronization Error\"\n xa = \"Time [s]\"\n ya = \"Error [ms]\"\n elif j == TEMPPLOT:\n t = \"Temperature Index\"\n xa = \"Time [s]\"\n ya = \"Index\"\n elif j == SKEWPLOT:\n t = \"Frequency Error\"\n xa = \"Time [s]\"\n ya = \"Frequency Error [ppm]\"\n gc = plot.PlotGraphics(line, t, xa, ya)\n # Draw graphs for each plot\n self.plotter[j].Draw(gc, xAxis=(self._x_lower,\n self._x_upper), yAxis=(float(self._y_lower[j]),\n float(self._y_upper[j])))\n self._rcvLock.release()",
"def plot_graph(self) -> None:\n\n nodes_on_graph = self.dw_graph.get_all_v()\n for k, v in nodes_on_graph.items():\n if v.position is None:\n x_rand = random.uniform(0.5, self.dw_graph.v_size())\n y_rand = random.uniform(0.5, self.dw_graph.v_size())\n v.position = (x_rand, y_rand)\n x_vals = []\n y_vals = []\n n = list(nodes_on_graph.keys())\n for k, v in nodes_on_graph.items(): # draw nodes\n x_vals.append(v.position[0])\n y_vals.append(v.position[1])\n\n fig, ax = plt.subplots()\n plt.plot(x_vals, y_vals, 'ro', markersize=5, data=\"d\")\n\n for p, txt in enumerate(n):\n ax.annotate(n[p], (x_vals[p]+0.00003, y_vals[p]), color='g')\n\n for n in nodes_on_graph:\n n1 = self.dw_graph.get_nodes(n)\n x = n1.position[0]\n y = n1.position[1]\n for r in self.dw_graph.all_out_edges_of_node(n):\n dx = self.dw_graph.get_nodes(r).position[0]\n dy = self.dw_graph.get_nodes(r).position[1]\n ax.quiver(x, y, dx-x, dy-y, angles='xy', scale_units='xy', scale=1)\n #plt.arrow(x, y, dx - x, dy - y, head_width=0.0009, width=0.00005, length_includes_head=True)\n\n\n plt.xlabel(\"x axis \")\n plt.ylabel(\"y axis \")\n plt.title(\"The title of the graph\")\n plt.show()",
"def plot(self, *args, **kwargs):\n pass",
"def plot_graph(self):\n g = self.get_graph()\n plt.title(\"Our graph:\" + g.__str__())\n plt.xlabel(\"X\")\n plt.ylabel(\"-<\") # I should flip 'Y' letter so I decided to write it by a tricky way. :)\n for src, node in g.get_all_v().items():\n # Print the node point\n if node.location is None:\n pos = self.get_random_location() # get a elegant location\n node.location = GeoLocation(pos)\n plt.plot(node.location.x, node.location.y, marker='o', markerfacecolor='red', markersize=3, color='yellow')\n plt.text(node.location.x, node.location.y, str(node.key))\n # Print the edge line\n for dest in g.all_out_edges_of_node(src).keys():\n x1 = g.get_all_v()[src].location.x\n y1 = g.get_all_v()[src].location.y\n if g.get_all_v()[dest].location is None:\n pos = self.get_random_location()\n g.get_all_v()[dest].location = GeoLocation(pos)\n g.get_all_v()[dest].location = GeoLocation(pos)\n x2 = g.get_all_v()[dest].location.x\n y2 = g.get_all_v()[dest].location.y\n plt.arrow(x1, y1, x2 - x1, y2 - y1, width=0.00001, linewidth=0.05)\n plt.show()",
"def plot(self):\n pass",
"def plot(self, ax=..., *, name=..., **kwargs):\n ...",
"def plot_graph():\n name = request.args.get('instance')\n name = str(name)\n distance = request.args.get('distance')\n path = request.args.get('path')\n if name == 'Custom':\n coords = request.args.get('coords')\n coords = str(coords)\n nodes = custom_nodes(coords)\n else:\n nodes = create_nodes(name)\n fig = Figure()\n axis = fig.add_subplot(1, 1, 1)\n\n axis.set_title(name + \" - Distance: \"+ str(distance))\n path = str(path).split(',')\n path = [int(i) for i in path]\n for i in range(len(path) - 1):\n\n start_node = nodes[path[i]]\n x1, y1 = start_node.x, start_node.y\n axis.scatter(x1, y1, c = 'b', label = str(path[i]))\n axis.text(x1,y1, str(path[i]))\n end_node = nodes[path[i+1]]\n x2, y2 = end_node.x, end_node.y\n axis.plot([x1,x2], [y1, y2])\n\n last_node = nodes[path[len(path)-1]]\n x1, y1 = last_node.x, last_node.y\n axis.text(x1,y1, str(path[len(path)-1]))\n\n begin_node = nodes[path[0]]\n x2, y2 = begin_node.x, begin_node.y\n axis.scatter(x1, y1, c = 'b', label = str(path[len(path)-1]))\n axis.plot([x1,x2], [y1, y2])\n\n output = io.BytesIO()\n FigureCanvas(fig).print_png(output)\n return Response(output.getvalue(), mimetype=\"image/png\")",
"def plot(self):\n self.plotsite()\n self.plotbond()\n plt.show()",
"def plot(self, *args, **kwargs):\n raise NotImplementedError",
"def plot():\n pass",
"def show_plot(self):\r\n\t\tself.generate_plot()\r\n\t\tplt.show()",
"def plot(self):\n layout = self.graph.layout(\"kk\")\n bbox = igraph.BoundingBox(600, 600)\n figure = igraph.Plot(bbox=bbox, background=\"white\")\n bbox = bbox.contract(100)\n figure.add(self.graph, layout = layout, bbox=bbox)\n figure.show()",
"def __plot(name, x, y):\n import matplotlib.pyplot as plt\n\n plt.plot(x, y)\n plt.xlabel('elements')\n plt.ylabel('time (seconds)')\n plt.savefig(\"{}\".format(name))",
"def show_graph(g):\r\n net.draw(g,with_labels= True,font_size=16)\r\n plt.show()",
"def show_custom_graph(self):\n pass",
"def plot_data(self):",
"def plot_graph(self, p, graph_type='initial'):\n n = self.n\n self.handles = [] # Reinitialize legend handles\n self.sample = self._get_sample(p)\n self.ax = self.create_figure()\n self.ax.set_title(f'{self.n} x {self.n - 1}-grid, $p = {p}$',\n fontsize=10)\n\n if graph_type == 'initial':\n InitialGraph(self).plot()\n elif graph_type == 'dual':\n DualGraph(self).plot()\n elif graph_type == 'both':\n InitialGraph(self).plot()\n DualGraph(self).plot()\n\n self.ax.legend(handles=self.handles)",
"def plot(self):\n\t\tself.plotOfTF().plot()",
"def plot_graph(self, graph, subplot=False, axes=None):\n if subplot:\n plt.sca(axes[1, 1])\n axes[1, 1].axis('off')\n else:\n plt.figure(figsize=(5, 5))\n if len(graph.nodes) == 4:\n pos = {(0, 0): [0, 1], (0, 1): [1, 1], (1, 0): [0, 0], (1, 1): [1, 0]}\n else:\n pos = nx.circular_layout(graph)\n nx.draw_networkx_nodes(\n graph, pos, node_size=1800, node_color='w', edgecolors='k')\n nx.draw_networkx_edges(\n graph,\n pos,\n node_size=1800,\n edge_color='k',\n arrowstyle='->',\n arrowsize=10,\n width=3)\n nx.draw_networkx_labels(self.G, pos, {x: x for x in self.V}, font_size=14)",
"def plot_graph(self, input_graph, NX_GRAPHS):\n self.dgl_graph = input_graph\n self.NX_GRAPHS = NX_GRAPHS\n \n self.get_nodes()\n color_monomer = self.get_colors()\n \n print(dict(zip(range(len(self.nodes_list)), self.nodes_list)))\n print('Key Monomer is', self.nodes_list[np.argmax(self.node_weights)])\n \n fig, ax = plt.subplots()\n nx.draw_networkx(\n dgl.to_networkx(self.dgl_graph),\n arrows=False,\n node_size = 300*10**self.node_weights,\n node_color = [color_monomer[node] for node in self.nodes_list],\n font_size = 18,\n font_color = 'w',\n font_weight = 'bold',)\n\n plt.axis('off')\n ax.set_xlim([1.2*x for x in ax.get_xlim()])\n ax.set_ylim([1.2*y for y in ax.get_ylim()])\n plt.show()",
"def plot(self):\n\t\tself.plotOfHeatingCurrent().plot()",
"def plotGraph(self, title = \"Multi Layer Perceptron (MLP)\"):\n graph, pos, colorMap = self.getGraph()\n\n fig = plt.figure()\n fig.canvas.set_window_title(\"Neural Network\")\n plt.plot()\n nx.draw_networkx_nodes(graph,pos, node_color = colorMap)\n nx.draw_networkx_edges(graph,pos)\n plt.axis('off')\n plt.title(title)\n #plt.savefig(\"autoencoder.svg\", transparent = True)\n plt.show()",
"def graph(x, y, xlabel = \"\", ylabel = \"\", legend = \"\", color = \"\"):\n plt.plot(x, y, color, label = legend)\n plt.xlabel(xlabel)\n plt.ylabel(ylabel)\n plt.legend(loc = 'best')\n plt.grid()",
"def builtin_plot(self, **kwargs):\n self.gp.plot(**kwargs)\n return",
"def plot_graph(x,y,plot_title = None,plot_props=None,x_props=None,y_props=None,figsize=None):\n import matplotlib.pyplot as plt\n if figsize is not None and ((figsize and type(figsize)) is list or (figsize and type(figsize)) is tuple):\n if len(figsize)==2:\n x_size,y_size = figsize\n fig,ax = plt.subplots(figsize=(x_size,y_size))\n else:\n fig,ax = plt.subplots()\n else:\n fig,ax = plt.subplots()\n if plot_props is not None:\n ax.plot(x,y,plot_props)\n else:\n ax.plot(x,y)\n if plot_title is not None and ((plot_title and type(plot_title)) is dict):\n if 'title' in plot_title.keys():\n if 'fontsize' in plot_title.keys():\n ax.set_title(plot_title['title'],fontsize=plot_title['fontsize'])\n else:\n ax.set_title(plot_title['title'])\n if x_props is not None and ((x_props and type(x_props)) is dict):\n if 'title' in x_props.keys():\n if 'fontsize_title' in x_props.keys():\n ax.set_xlabel(x_props['title'],fontsize=x_props['fontsize_title'])\n else:\n ax.set_xlabel(x_props['title'])\n if 'fontsize_ticks' in x_props.keys():\n for tick in ax.xaxis.get_major_ticks():\n tick.label.set_fontsize(x_props['fontsize_ticks']) \n # specify integer or one of preset strings, e.g.\n #tick.label.set_fontsize('x-small') \n if 'ticks_orientation' in x_props.keys():\n tick.label.set_rotation(x_props['ticks_orientation'])\n if y_props is not None and ((y_props and type(y_props)) is dict):\n if 'title' in y_props.keys():\n if 'fontsize_title' in y_props.keys():\n ax.set_ylabel(y_props['title'],fontsize=y_props['fontsize_title'])\n else:\n ax.set_ylabel(y_props['title'])\n if 'fontsize_ticks' in y_props.keys():\n for tick in ax.yaxis.get_major_ticks():\n tick.label.set_fontsize(y_props['fontsize_ticks']) \n # specify integer or one of preset strings, e.g.\n #tick.label.set_fontsize('x-small') \n if 'ticks_orientation' in y_props.keys():\n tick.label.set_rotation(y_props['ticks_orientation'])\n return fig,ax",
"def _plot_graph(self) -> None:\n ghg_data, bird_data = self._datasets\n model = self._selection.get_model(ghg_data, bird_data)\n model.plot_data('Percent Change in Bird population (from 1970) vs '\n 'Amount of Greenhouse gas produced in a year',\n 'Amount of Greenhouse gas produced in a year (kt)',\n 'Percent Change in Bird population (from 1970)')",
"def visualize_dependency_graph(self):\n if self.graph is None:\n self.logger.error(\"Graph value none cannot be plotted\")\n return\n\n nx.draw(self.graph, cmap=plt.get_cmap('jet'), with_labels=True)\n plt.show()",
"def plotGraph(self, dayArray, commentsArray, upvotesArray, retweetsArray, likesArray):\n self.canvas.get_tk_widget().place(relx=0.219, rely=0.519, relheight=0.389, relwidth=0.352)\n\n # Clears graph before plotting to prevent appending two graphs at once\n self.figure.clear()\n # self.figure.\n plt = self.figure.add_subplot(1, 1, 1)\n x = []\n max_log_size = 5000\n for i in dayArray:\n i = ''.join(i.split())\n i = i[:-5]\n x.append(i)\n\n # now there's 3 sets of points\n yCO = commentsArray\n yUV = upvotesArray\n yRT = retweetsArray\n yLK = likesArray\n\n if max(yCO)>=max_log_size or max(yUV)>=max_log_size or max(yRT)>=max_log_size or max(yLK)>=max_log_size:\n plt.set(yscale=\"log\")\n plt.plot(x, yCO, label='Comments', marker='o', color='red')\n plt.plot(x, yUV, label='Upvotes', marker='o', color='#fa93b0')\n plt.plot(x, yRT, label='Retweets', marker='o', color='#2374f7')\n plt.plot(x, yLK, label='Likes', marker='o', color='#accafa')\n\n plt.legend()\n self.figure.canvas.draw()",
"def graph(df):\n df.plot()\n plt.show()"
]
| [
"0.77446437",
"0.6901284",
"0.6651679",
"0.66496754",
"0.65762436",
"0.6498491",
"0.64938074",
"0.64504176",
"0.64067835",
"0.6402517",
"0.63337934",
"0.6324918",
"0.6258363",
"0.6218747",
"0.620347",
"0.6173851",
"0.6162113",
"0.61547357",
"0.6136123",
"0.6123041",
"0.61205095",
"0.61115545",
"0.60969704",
"0.6088798",
"0.60882926",
"0.60860276",
"0.60820585",
"0.6032299",
"0.60231864",
"0.60080904"
]
| 0.74431366 | 1 |
A wrapper of rrdtool functions, with additional logging function. | def rrdtool_cmd(self, cmd, *args, **kwargs):
fn_table = {"create": rrdtool.create,
"update": rrdtool.update,
"graph": rrdtool.graph}
fn = fn_table[cmd]
cmdline = "rrdtool %s %s" % (cmd,
" ".join([i if isinstance(i, str) else " ".join(i) for i in args]))
log_level = kwargs.get("log_level", None)
if log_level:
# rrdtool command arguments are either string or list of strings.
logging.log(log_level, cmdline)
try:
fn(*args)
except Exception as e:
logging.exception("RRDTool command failed. See stack trace below:")
logging.error("Failed command: %s" % cmdline)
sys.exit(1) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def logger():\n return RPLogger('pytest_reportportal.test')",
"def rrd(*args):\n # We have one rrdtool instance per thread\n global _rrdtool\n thisThread = threading.currentThread()\n if not thisThread in _rrdtool:\n _rrdtool[thisThread] = popen2.Popen3(\"rrdtool -\")\n rrdtool = _rrdtool[thisThread]\n\n command = \" \".join(['\"%s\"' % str(arg).replace('\"','') for arg in args])\n rrdtool.tochild.write(command + \"\\n\")\n rrdtool.tochild.flush()\n\n # Collect results until the \"OK\" line\n results = []\n while 1:\n line = rrdtool.fromchild.readline()\n if not line:\n _rrdtool[thisThread] = None\n raise RRDException(\"Unexpected EOF from rrdtool\")\n line = line.strip()\n results.append(line)\n if line.startswith(\"OK \"):\n break\n\n # Look for errors only after receiving everything, so our rrdtool\n # instance stays in sync.\n for line in results:\n if line.startswith(\"ERROR:\"):\n raise RRDException(\"Error %r in command %r\" % (line, command))\n\n return results",
"def util():\n pass",
"def util():\n pass",
"def main(ctx, verbose):\n ctx.ensure_object(dict)\n if verbose:\n log_level = 'DEBUG'\n else:\n log_level = 'INFO'\n\n init_logger('reVX', log_level=log_level)",
"def main():\n custom_logger=Custom_log(logger_name=\"custom_name\",logger_level=logging.DEBUG,console_log=True,console_stream_level=logging.DEBUG,console_format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',file_log=False)\n custom_logger.logger.info(\"log this\")\n custom_logger.logger.debug(\"this is debbuging message\")\n custom_logger.logger.error(\"oops something bad happened\")\n custom_logger.logger.critical(\"this will break\")\n custom_logger2=Custom_log(logger_name=\"custom_logger2\",logger_level=logging.DEBUG,console_log=True,console_stream_level=logging.DEBUG,console_format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',file_log=True,file_path=\"logs.log\",file_format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',file_stream_level=logging.INFO)\n custom_logger2.logger.info(\"first log\")\n #custom_logger.print_all(logger_name=\"custom_name\",logger_level=logging.DEBUG,console_log=True,console_stream_level=logging.INFO,console_format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',file_log=False)",
"def _log(fn):\n @wraps(fn)\n def wrapper(self, *args, **kwargs):\n a = fn(self, *args, **kwargs)\n self.log.append(fn.__name__ + ' :: args={} kwargs={}'.format(args, kwargs))\n return a\n return wrapper",
"def logtool(self, action, **options):\n pass",
"def __call__(self, func):\n\n # set logger if it was not set earlier\n if not self.logger:\n logging.basicConfig()\n self.logger = logging.getLogger(func.__module__)\n\n @functools.wraps(func)\n def wrapper(*args, **kwds):\n self.logger.debug(\n self.ENTRY_MESSAGE.format(func.__name__)) # logging level .info(). Set to .debug() if you want to\n f_result = func(*args, **kwds)\n self.logger.debug(\n self.EXIT_MESSAGE.format(func.__name__)) # logging level .info(). Set to .debug() if you want to\n return f_result\n\n return wrapper",
"def some(func):\n def wrapper(* args,** kwargs):\n logging.basicConfig(filename='error.log',level=logging.DEBUG)\n logging.info(request.url + \" : \" + str(request.remote_addr)+\" using function \"+func.__name__ )\n return func(* args,** kwargs)\n\n wrapper.__name__ = func.__name__ \n return wrapper",
"def fetch_data_logger(fn):\n @functools.wraps(fn)\n def wrapper():\n\n data = fn()\n\n create_or_update_log(data, LOG_TYPES['D'])\n\n return data\n return wrapper",
"def _log(fn):\n @wraps(fn)\n def wrapper(self, *args, **kwargs):\n self.log.append(fn.__name__ + ' :: args={} kwargs={}'.format(args, kwargs))\n return fn(self, *args, **kwargs)\n return wrapper",
"def logging(func):\n @wraps(func)\n def wrapper(*args, **kwargs):\n res = func(*args, **kwargs)\n print(func.__name__, args, kwargs)\n return res\n return wrapper",
"def function_logger(orig_func):\n # Logging initialization\n import logging\n logger = logging.getLogger(__name__)\n\n @wraps(orig_func)\n def wrapper(*args, **kwargs):\n logger.info('Ran with args: {}, and kwargs: {}'.format(args, kwargs))\n return orig_func(*args, **kwargs)\n\n return wrapper",
"def _get_logger(self):",
"def main():\n logger = setup_logger()\n\n logger.debug('a debug message')\n logger.info('an info message')\n logger.warning('a warning message')\n logger.error('an error message')\n logger.critical('a critical message')",
"def _logger(cls, func):\n\t\t@wraps(func)\n\t\tdef wrapper(self, *args, **kwargs):\n\t\t\tlogging.info('func - {}, args - {}, kwargs - {}'.format(func.__name__, args, kwargs))\n\t\t\treturn func(*args, **kwargs)\n\n\t\treturn wrapper",
"def logged(meth):\n def wrapper(*args):\n print(\"LOGGING {meth} {args}\".format(**locals()))\n return meth(*args) #self, ... other args\n return wrapper",
"def _debug_wrap(func):\n\n def wrapper(*args, **kwargs):\n _debug_print(f\"{datetime.datetime.now()} - About to run: {func.__name__}\")\n ret_val = func(*args, **kwargs)\n _debug_print(f\"{datetime.datetime.now()} - Completed run: {func.__name__}\")\n return ret_val\n\n return wrapper",
"def _performance_log(func):\n\n def wrapper(*arg):\n \"\"\" wrapper \"\"\"\n\n start = datetime.datetime.now()\n\n # Code execution\n res = func(*arg)\n\n if _log_performance:\n usage = resource.getrusage(resource.RUSAGE_SELF)\n memory_process = (usage[2])/1000\n\n delta = datetime.datetime.now() - start\n delta_milliseconds = int(delta.total_seconds() * 1000)\n\n _logger.info(\"PERFORMANCE - {0} - milliseconds |{1:>8,}| - memory MB |{2:>8,}|\"\n .format(func.__name__,\n delta_milliseconds,\n memory_process))\n\n return res\n\n return wrapper",
"def _regr_basic():",
"def logged(func):\n def wrapper(*args, **kwargs):\n print(’you called {.__name__}({}{}{})’.format(\n func,\n str(list(args))[1:-1], \n ’, ’ if kwargs else ’’,\n ’, ’.join(’{}={}’.format(*pair) for pair in kwargs.items()),\n ))\n val = func(*args, **kwargs)\n print(’it returned’, val)\n return val",
"def get_logger():\n return PLLogger.GetLogger(\"testintel\")",
"def logIt(msg):\n utils = CONFIG['utils'].logIt(msg) #@UnusedVariable",
"def convertToREST(function):\n def wrapper(self, *args, **kwd):\n \"\"\"\n Log the call, and the result of the call\n \"\"\"\n try:\n retval = function(self, *args, **kwd)\n except (ValueError, AttributeError), log:\n LOG('SlapTool', INFO, 'Converting ValueError to NotFound, real error:',\n error=True)\n raise NotFound(log)\n except SoftwareInstanceNotReady, log:\n self.REQUEST.response.setStatus(408)\n self.REQUEST.response.setHeader('Cache-Control', 'private')\n return self.REQUEST.response\n except ValidationFailed:\n LOG('SlapTool', INFO, 'Converting ValidationFailed to ValidationFailed,'\\\n ' real error:',\n error=True)\n raise ValidationFailed\n except Unauthorized:\n LOG('SlapTool', INFO, 'Converting Unauthorized to Unauthorized,'\\\n ' real error:',\n error=True)\n raise Unauthorized\n\n self.REQUEST.response.setHeader('Content-Type', 'text/xml; charset=utf-8')\n return '%s' % retval\n wrapper.__doc__ = function.__doc__\n return wrapper",
"def do_log(self, arg):\n arg = \" %s :custom log\" % (arg)\n log(arg)",
"def wrapper(*args):",
"def openPyLangLogger(self, logger_fn, rm_obj_prefix='dut.rm.',\n logger_header='', logger_footer='',\n logger_label='rm_py_source'):\n pass",
"def important(func):\n\n def decorated(*args, **kwargs):\n \"\"\"Decorated method.\"\"\"\n runLog.important(func(*args, **kwargs))\n\n return decorated",
"def quick_debug(func):\n def func_wrapper(*args, **kwargs):\n # set logging level as DEBUG for this function sepecifically.\n debug = kwargs.get('debug', False)\n if debug:\n logging.disable(logging.NOTSET)\n logger.setLevel(logging.DEBUG)\n logger.debug(\"============================================= {}\".format(func.__name__))\n\n # remove the debug argument before passing onto the function.\n kwargs.pop('debug', None)\n\n # pass args and kwargs as is to the function\n ret_val = func(*args, **kwargs)\n\n # reset logging level to the glo\n logging.disable(logging.DEBUG)\n\n # return ret_val as is\n return ret_val\n\n return func_wrapper"
]
| [
"0.56776714",
"0.56146",
"0.55475384",
"0.55475384",
"0.5509798",
"0.5460044",
"0.5414567",
"0.5409551",
"0.5404668",
"0.53456235",
"0.53394705",
"0.5327236",
"0.53170985",
"0.5291978",
"0.5214487",
"0.52052015",
"0.5194438",
"0.51273423",
"0.5111953",
"0.51106703",
"0.5083045",
"0.507145",
"0.50707555",
"0.503924",
"0.5025247",
"0.5020203",
"0.4956821",
"0.49515384",
"0.49490604",
"0.49375457"
]
| 0.6281851 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.