query
stringlengths 9
9.05k
| document
stringlengths 10
222k
| metadata
dict | negatives
sequencelengths 30
30
| negative_scores
sequencelengths 30
30
| document_score
stringlengths 4
10
| document_rank
stringclasses 2
values |
---|---|---|---|---|---|---|
Adds extra device names that we know explicitly from some external source. | def addExtraDevices(self):
# These tables were extracted from
# pirates/src/piratesgui/GameOptions.py.
ati_device_list = [
["ATI MOBILITY/RADEON X700", 0x5653],
[1, "Radeon X1950 XTX Uber - Limited Edition", 0x7248],
[1, "Radeon X1950 XTX Uber - Limited Edition Secondary", 0x7268],
[1, "Radeon X800 CrossFire Edition", 0x554D],
[1, "Radeon X800 CrossFire Edition Secondary", 0x556D],
[1, "Radeon X850 CrossFire Edition", 0x5D52],
[1, "Radeon X850 CrossFire Edition Secondary", 0x5D72],
["Radeon X550/X700 Series", 0x564F],
["ATI FireGL T2", 0x4154],
["ATI FireGL T2 Secondary", 0x4174],
["ATI FireGL V3100", 0x5B64],
["ATI FireGL V3100 Secondary", 0x5B74],
["ATI FireGL V3200", 0x3E54],
["ATI FireGL V3200 Secondary", 0x3E74],
["ATI FireGL V3300", 0x7152],
["ATI FireGL V3300 Secondary", 0x7172],
["ATI FireGL V3350", 0x7153],
["ATI FireGL V3350 Secondary", 0x7173],
["ATI FireGL V3400", 0x71D2],
["ATI FireGL V3400 Secondary", 0x71F2],
["ATI FireGL V5000", 0x5E48],
["ATI FireGL V5000 Secondary", 0x5E68],
["ATI FireGL V5100", 0x5551],
["ATI FireGL V5100 Secondary", 0x5571],
["ATI FireGL V5200", 0x71DA],
["ATI FireGL V5200 Secondary", 0x71FA],
["ATI FireGL V5300", 0x7105],
["ATI FireGL V5300 Secondary", 0x7125],
["ATI FireGL V7100", 0x5550],
["ATI FireGL V7100 Secondary", 0x5570],
["ATI FireGL V7200", 0x5D50],
["ATI FireGL V7200 ", 0x7104],
["ATI FireGL V7200 Secondary", 0x5D70],
["ATI FireGL V7200 Secondary ", 0x7124],
["ATI FireGL V7300", 0x710E],
["ATI FireGL V7300 Secondary", 0x712E],
["ATI FireGL V7350", 0x710F],
["ATI FireGL V7350 Secondary", 0x712F],
["ATI FireGL X1", 0x4E47],
["ATI FireGL X1 Secondary", 0x4E67],
["ATI FireGL X2-256/X2-256t", 0x4E4B],
["ATI FireGL X2-256/X2-256t Secondary", 0x4E6B],
["ATI FireGL X3-256", 0x4A4D],
["ATI FireGL X3-256 Secondary", 0x4A6D],
["ATI FireGL Z1", 0x4147],
["ATI FireGL Z1 Secondary", 0x4167],
["ATI FireMV 2200", 0x5B65],
["ATI FireMV 2200 Secondary", 0x5B75],
["ATI FireMV 2250", 0x719B],
["ATI FireMV 2250 Secondary", 0x71BB],
["ATI FireMV 2400", 0x3151],
["ATI FireMV 2400 Secondary", 0x3171],
["ATI FireStream 2U", 0x724E],
["ATI FireStream 2U Secondary", 0x726E],
["ATI MOBILITY FIRE GL 7800", 0x4C58],
["ATI MOBILITY FIRE GL T2/T2e", 0x4E54],
["ATI MOBILITY FireGL V3100", 0x5464],
["ATI MOBILITY FireGL V3200", 0x3154],
["ATI MOBILITY FireGL V5000", 0x564A],
["ATI MOBILITY FireGL V5000 ", 0x564B],
["ATI MOBILITY FireGL V5100", 0x5D49],
["ATI MOBILITY FireGL V5200", 0x71C4],
["ATI MOBILITY FireGL V5250", 0x71D4],
["ATI MOBILITY FireGL V7100", 0x7106],
["ATI MOBILITY FireGL V7200", 0x7103],
["ATI MOBILITY RADEON", 0x4C59],
["ATI MOBILITY RADEON 7500", 0x4C57],
["ATI MOBILITY RADEON 9500", 0x4E52],
["ATI MOBILITY RADEON 9550", 0x4E56],
["ATI MOBILITY RADEON 9600/9700 Series", 0x4E50],
["ATI MOBILITY RADEON 9800", 0x4A4E],
["ATI Mobility Radeon HD 2300", 0x7210],
["ATI Mobility Radeon HD 2300 ", 0x7211],
["ATI Mobility Radeon HD 2400", 0x94C9],
["ATI Mobility Radeon HD 2400 XT", 0x94C8],
[1, "ATI Mobility Radeon HD 2600", 0x9581],
[1, "ATI Mobility Radeon HD 2600 XT", 0x9583],
["ATI Mobility Radeon X1300", 0x714A],
["ATI Mobility Radeon X1300 ", 0x7149],
["ATI Mobility Radeon X1300 ", 0x714B],
["ATI Mobility Radeon X1300 ", 0x714C],
["ATI Mobility Radeon X1350", 0x718B],
["ATI Mobility Radeon X1350 ", 0x718C],
["ATI Mobility Radeon X1350 ", 0x7196],
["ATI Mobility Radeon X1400", 0x7145],
["ATI Mobility Radeon X1450", 0x7186],
["ATI Mobility Radeon X1450 ", 0x718D],
["ATI Mobility Radeon X1600", 0x71C5],
["ATI Mobility Radeon X1700", 0x71D5],
["ATI Mobility Radeon X1700 ", 0x71DE],
["ATI Mobility Radeon X1700 XT", 0x71D6],
[1, "ATI Mobility Radeon X1800", 0x7102],
[1, "ATI Mobility Radeon X1800 XT", 0x7101],
[1, "ATI Mobility Radeon X1900", 0x7284],
[1, "ATI Mobility Radeon X2300", 0x718A],
[1, "ATI Mobility Radeon X2300 ", 0x7188],
["ATI MOBILITY RADEON X300", 0x5461],
["ATI MOBILITY RADEON X300 ", 0x5460],
["ATI MOBILITY RADEON X300 ", 0x3152],
["ATI MOBILITY RADEON X600", 0x3150],
["ATI MOBILITY RADEON X600 SE", 0x5462],
["ATI MOBILITY RADEON X700", 0x5652],
["ATI MOBILITY RADEON X700 ", 0x5653],
["ATI MOBILITY RADEON X700 Secondary", 0x5673],
[1, "ATI MOBILITY RADEON X800", 0x5D4A],
[1, "ATI MOBILITY RADEON X800 XT", 0x5D48],
["ATI Radeon 9550/X1050 Series", 0x4153],
["ATI Radeon 9550/X1050 Series Secondary", 0x4173],
["ATI RADEON 9600 Series", 0x4150],
["ATI RADEON 9600 Series ", 0x4E51],
["ATI RADEON 9600 Series ", 0x4151],
["ATI RADEON 9600 Series ", 0x4155],
["ATI RADEON 9600 Series ", 0x4152],
["ATI RADEON 9600 Series Secondary", 0x4E71],
["ATI RADEON 9600 Series Secondary ", 0x4171],
["ATI RADEON 9600 Series Secondary ", 0x4170],
["ATI RADEON 9600 Series Secondary ", 0x4175],
["ATI RADEON 9600 Series Secondary ", 0x4172],
[1, "ATI Radeon HD 2900 XT", 0x9402],
[1, "ATI Radeon HD 2900 XT ", 0x9403],
[1, "ATI Radeon HD 2900 XT ", 0x9400],
[1, "ATI Radeon HD 2900 XT ", 0x9401],
["ATI Radeon X1200 Series", 0x791E],
["ATI Radeon X1200 Series ", 0x791F],
[1, "ATI Radeon X1950 GT", 0x7288],
[1, "ATI Radeon X1950 GT Secondary", 0x72A8],
[1, "ATI RADEON X800 GT", 0x554E],
[1, "ATI RADEON X800 GT Secondary", 0x556E],
[1, "ATI RADEON X800 XL", 0x554D],
[1, "ATI RADEON X800 XL Secondary", 0x556D],
[1, "ATI RADEON X850 PRO", 0x4B4B],
[1, "ATI RADEON X850 PRO Secondary", 0x4B6B],
[1, "ATI RADEON X850 SE", 0x4B4A],
[1, "ATI RADEON X850 SE Secondary", 0x4B6A],
[1, "ATI RADEON X850 XT", 0x4B49],
[1, "ATI RADEON X850 XT Platinum Edition", 0x4B4C],
[1, "ATI RADEON X850 XT Platinum Edition Secondary", 0x4B6C],
[1, "ATI RADEON X850 XT Secondary", 0x4B69],
["ATI Radeon Xpress 1200 Series", 0x793F],
["ATI Radeon Xpress 1200 Series ", 0x7941],
["ATI Radeon Xpress 1200 Series ", 0x7942],
["ATI Radeon Xpress Series", 0x5A61],
["ATI Radeon Xpress Series ", 0x5A63],
["ATI Radeon Xpress Series ", 0x5A62],
["ATI Radeon Xpress Series ", 0x5A41],
["ATI Radeon Xpress Series ", 0x5A43],
["ATI Radeon Xpress Series ", 0x5A42],
["ATI Radeon Xpress Series ", 0x5954],
["ATI Radeon Xpress Series ", 0x5854],
["ATI Radeon Xpress Series ", 0x5955],
["ATI Radeon Xpress Series ", 0x5974],
["ATI Radeon Xpress Series ", 0x5874],
["ATI Radeon Xpress Series ", 0x5975],
["Radeon 9500", 0x4144],
["Radeon 9500 ", 0x4149],
["Radeon 9500 PRO / 9700", 0x4E45],
["Radeon 9500 PRO / 9700 Secondary", 0x4E65],
["Radeon 9500 Secondary", 0x4164],
["Radeon 9500 Secondary ", 0x4169],
["Radeon 9600 TX", 0x4E46],
["Radeon 9600 TX Secondary", 0x4E66],
["Radeon 9600TX", 0x4146],
["Radeon 9600TX Secondary", 0x4166],
["Radeon 9700 PRO", 0x4E44],
["Radeon 9700 PRO Secondary", 0x4E64],
["Radeon 9800", 0x4E49],
["Radeon 9800 PRO", 0x4E48],
["Radeon 9800 PRO Secondary", 0x4E68],
["Radeon 9800 SE", 0x4148],
["Radeon 9800 SE Secondary", 0x4168],
["Radeon 9800 Secondary", 0x4E69],
["Radeon 9800 XT", 0x4E4A],
["Radeon 9800 XT Secondary", 0x4E6A],
["Radeon X1300 / X1550 Series", 0x7146],
["Radeon X1300 / X1550 Series Secondary", 0x7166],
["Radeon X1300 Series", 0x714E],
["Radeon X1300 Series ", 0x715E],
["Radeon X1300 Series ", 0x714D],
["Radeon X1300 Series ", 0x71C3],
["Radeon X1300 Series ", 0x718F],
["Radeon X1300 Series Secondary", 0x716E],
["Radeon X1300 Series Secondary ", 0x717E],
["Radeon X1300 Series Secondary ", 0x716D],
["Radeon X1300 Series Secondary ", 0x71E3],
["Radeon X1300 Series Secondary ", 0x71AF],
["Radeon X1300/X1550 Series", 0x7142],
["Radeon X1300/X1550 Series ", 0x7180],
["Radeon X1300/X1550 Series ", 0x7183],
["Radeon X1300/X1550 Series ", 0x7187],
["Radeon X1300/X1550 Series Secondary", 0x7162],
["Radeon X1300/X1550 Series Secondary ", 0x71A0],
["Radeon X1300/X1550 Series Secondary ", 0x71A3],
["Radeon X1300/X1550 Series Secondary ", 0x71A7],
["Radeon X1550 64-bit", 0x7147],
["Radeon X1550 64-bit ", 0x715F],
["Radeon X1550 64-bit ", 0x719F],
["Radeon X1550 64-bit Secondary", 0x7167],
["Radeon X1550 64-bit Secondary ", 0x717F],
["Radeon X1550 Series", 0x7143],
["Radeon X1550 Series ", 0x7193],
["Radeon X1550 Series Secondary", 0x7163],
["Radeon X1550 Series Secondary ", 0x71B3],
["Radeon X1600 Pro / Radeon X1300 XT", 0x71CE],
["Radeon X1600 Pro / Radeon X1300 XT Secondary", 0x71EE],
["Radeon X1600 Series", 0x7140],
["Radeon X1600 Series ", 0x71C0],
["Radeon X1600 Series ", 0x71C2],
["Radeon X1600 Series ", 0x71C6],
["Radeon X1600 Series ", 0x7181],
["Radeon X1600 Series ", 0x71CD],
["Radeon X1600 Series Secondary", 0x7160],
["Radeon X1600 Series Secondary ", 0x71E2],
["Radeon X1600 Series Secondary ", 0x71E6],
["Radeon X1600 Series Secondary ", 0x71A1],
["Radeon X1600 Series Secondary ", 0x71ED],
["Radeon X1600 Series Secondary ", 0x71E0],
["Radeon X1650 Series", 0x71C1],
["Radeon X1650 Series ", 0x7293],
["Radeon X1650 Series ", 0x7291],
["Radeon X1650 Series ", 0x71C7],
["Radeon X1650 Series Secondary", 0x71E1],
["Radeon X1650 Series Secondary ", 0x72B3],
["Radeon X1650 Series Secondary ", 0x72B1],
["Radeon X1650 Series Secondary ", 0x71E7],
[1, "Radeon X1800 Series", 0x7100],
[1, "Radeon X1800 Series ", 0x7108],
[1, "Radeon X1800 Series ", 0x7109],
[1, "Radeon X1800 Series ", 0x710A],
[1, "Radeon X1800 Series ", 0x710B],
[1, "Radeon X1800 Series ", 0x710C],
[1, "Radeon X1800 Series Secondary", 0x7120],
[1, "Radeon X1800 Series Secondary ", 0x7128],
[1, "Radeon X1800 Series Secondary ", 0x7129],
[1, "Radeon X1800 Series Secondary ", 0x712A],
[1, "Radeon X1800 Series Secondary ", 0x712B],
[1, "Radeon X1800 Series Secondary ", 0x712C],
[1, "Radeon X1900 Series", 0x7243],
[1, "Radeon X1900 Series ", 0x7245],
[1, "Radeon X1900 Series ", 0x7246],
[1, "Radeon X1900 Series ", 0x7247],
[1, "Radeon X1900 Series ", 0x7248],
[1, "Radeon X1900 Series ", 0x7249],
[1, "Radeon X1900 Series ", 0x724A],
[1, "Radeon X1900 Series ", 0x724B],
[1, "Radeon X1900 Series ", 0x724C],
[1, "Radeon X1900 Series ", 0x724D],
[1, "Radeon X1900 Series ", 0x724F],
[1, "Radeon X1900 Series Secondary", 0x7263],
[1, "Radeon X1900 Series Secondary ", 0x7265],
[1, "Radeon X1900 Series Secondary ", 0x7266],
[1, "Radeon X1900 Series Secondary ", 0x7267],
[1, "Radeon X1900 Series Secondary ", 0x7268],
[1, "Radeon X1900 Series Secondary ", 0x7269],
[1, "Radeon X1900 Series Secondary ", 0x726A],
[1, "Radeon X1900 Series Secondary ", 0x726B],
[1, "Radeon X1900 Series Secondary ", 0x726C],
[1, "Radeon X1900 Series Secondary ", 0x726D],
[1, "Radeon X1900 Series Secondary ", 0x726F],
[1, "Radeon X1950 Series", 0x7280],
[1, "Radeon X1950 Series ", 0x7240],
[1, "Radeon X1950 Series ", 0x7244],
[1, "Radeon X1950 Series Secondary", 0x72A0],
[1, "Radeon X1950 Series Secondary ", 0x7260],
[1, "Radeon X1950 Series Secondary ", 0x7264],
["Radeon X300/X550/X1050 Series", 0x5B60],
["Radeon X300/X550/X1050 Series ", 0x5B63],
["Radeon X300/X550/X1050 Series Secondary", 0x5B73],
["Radeon X300/X550/X1050 Series Secondary ", 0x5B70],
["Radeon X550/X700 Series ", 0x5657],
["Radeon X550/X700 Series Secondary", 0x5677],
["Radeon X600 Series", 0x5B62],
["Radeon X600 Series Secondary", 0x5B72],
["Radeon X600/X550 Series", 0x3E50],
["Radeon X600/X550 Series Secondary", 0x3E70],
["Radeon X700", 0x5E4D],
["Radeon X700 PRO", 0x5E4B],
["Radeon X700 PRO Secondary", 0x5E6B],
["Radeon X700 SE", 0x5E4C],
["Radeon X700 SE Secondary", 0x5E6C],
["Radeon X700 Secondary", 0x5E6D],
["Radeon X700 XT", 0x5E4A],
["Radeon X700 XT Secondary", 0x5E6A],
["Radeon X700/X550 Series", 0x5E4F],
["Radeon X700/X550 Series Secondary", 0x5E6F],
[1, "Radeon X800 GT", 0x554B],
[1, "Radeon X800 GT Secondary", 0x556B],
[1, "Radeon X800 GTO", 0x5549],
[1, "Radeon X800 GTO ", 0x554F],
[1, "Radeon X800 GTO ", 0x5D4F],
[1, "Radeon X800 GTO Secondary", 0x5569],
[1, "Radeon X800 GTO Secondary ", 0x556F],
[1, "Radeon X800 GTO Secondary ", 0x5D6F],
[1, "Radeon X800 PRO", 0x4A49],
[1, "Radeon X800 PRO Secondary", 0x4A69],
[1, "Radeon X800 SE", 0x4A4F],
[1, "Radeon X800 SE Secondary", 0x4A6F],
[1, "Radeon X800 Series", 0x4A48],
[1, "Radeon X800 Series ", 0x4A4A],
[1, "Radeon X800 Series ", 0x4A4C],
[1, "Radeon X800 Series ", 0x5548],
[1, "Radeon X800 Series Secondary", 0x4A68],
[1, "Radeon X800 Series Secondary ", 0x4A6A],
[1, "Radeon X800 Series Secondary ", 0x4A6C],
[1, "Radeon X800 Series Secondary ", 0x5568],
[1, "Radeon X800 VE", 0x4A54],
[1, "Radeon X800 VE Secondary", 0x4A74],
[1, "Radeon X800 XT", 0x4A4B],
[1, "Radeon X800 XT ", 0x5D57],
[1, "Radeon X800 XT Platinum Edition", 0x4A50],
[1, "Radeon X800 XT Platinum Edition ", 0x554A],
[1, "Radeon X800 XT Platinum Edition Secondary", 0x4A70],
[1, "Radeon X800 XT Platinum Edition Secondary ", 0x556A],
[1, "Radeon X800 XT Secondary", 0x4A6B],
[1, "Radeon X800 XT Secondary ", 0x5D77],
[1, "Radeon X850 XT", 0x5D52],
[1, "Radeon X850 XT Platinum Edition", 0x5D4D],
[1, "Radeon X850 XT Platinum Edition Secondary", 0x5D6D],
[1, "Radeon X850 XT Secondary", 0x5D72],
]
vendorId = 0x1002
for entry in ati_device_list:
if len(entry) == 3:
flag, deviceName, deviceId = entry
else:
deviceName, deviceId = entry
self.devices[(vendorId, deviceId)] = deviceName.strip()
nvidia_device_list = [
[0x014F, "GeForce 6200"],
[0x00F3, "GeForce 6200"],
[0x0221, "GeForce 6200"],
[0x0163, "GeForce 6200 LE"],
[0x0162, "GeForce 6200SE TurboCache(TM)"],
[0x0161, "GeForce 6200 TurboCache(TM)"],
[0x0162, "GeForce 6200SE TurboCache(TM)"],
[0x0160, "GeForce 6500"],
[1, 0x0141, "GeForce 6600"],
[1, 0x00F2, "GeForce 6600"],
[1, 0x0140, "GeForce 6600 GT"],
[1, 0x00F1, "GeForce 6600 GT"],
[1, 0x0142, "GeForce 6600 LE"],
[1, 0x00F4, "GeForce 6600 LE"],
[1, 0x0143, "GeForce 6600 VE"],
[1, 0x0147, "GeForce 6700 XL"],
[1, 0x0041, "GeForce 6800"],
[1, 0x00C1, "GeForce 6800"],
[1, 0x0047, "GeForce 6800 GS"],
[1, 0x00F6, "GeForce 6800 GS"],
[1, 0x00C0, "GeForce 6800 GS"],
[1, 0x0045, "GeForce 6800 GT"],
[1, 0x00F9, "GeForce 6800 Series GPU"],
[1, 0x00C2, "GeForce 6800 LE"],
[1, 0x0040, "GeForce 6800 Ultra"],
[1, 0x00F9, "GeForce 6800 Series GPU"],
[1, 0x0043, "GeForce 6800 XE"],
[1, 0x0048, "GeForce 6800 XT"],
[1, 0x0218, "GeForce 6800 XT"],
[1, 0x00C3, "GeForce 6800 XT"],
[0x01DF, "GeForce 7300 GS"],
[0x0393, "GeForce 7300 GT"],
[0x01D1, "GeForce 7300 LE"],
[0x01D3, "GeForce 7300 SE"],
[0x01DD, "GeForce 7500 LE"],
[1, 0x0392, "GeForce 7600 GS"],
[1, 0x0392, "GeForce 7600 GS"],
[1, 0x02E1, "GeForce 7600 GS"],
[1, 0x0391, "GeForce 7600 GT"],
[1, 0x0394, "GeForce 7600 LE"],
[1, 0x00F5, "GeForce 7800 GS"],
[1, 0x0092, "GeForce 7800 GT"],
[1, 0x0091, "GeForce 7800 GTX"],
[1, 0x0291, "GeForce 7900 GT/GTO"],
[1, 0x0290, "GeForce 7900 GTX"],
[1, 0x0293, "GeForce 7900 GX2"],
[1, 0x0294, "GeForce 7950 GX2"],
[0x0322, "GeForce FX 5200"],
[0x0321, "GeForce FX 5200 Ultra"],
[0x0323, "GeForce FX 5200LE"],
[0x0326, "GeForce FX 5500"],
[0x0326, "GeForce FX 5500"],
[0x0312, "GeForce FX 5600"],
[0x0311, "GeForce FX 5600 Ultra"],
[0x0314, "GeForce FX 5600XT"],
[0x0342, "GeForce FX 5700"],
[0x0341, "GeForce FX 5700 Ultra"],
[0x0343, "GeForce FX 5700LE"],
[0x0344, "GeForce FX 5700VE"],
[0x0302, "GeForce FX 5800"],
[0x0301, "GeForce FX 5800 Ultra"],
[0x0331, "GeForce FX 5900"],
[0x0330, "GeForce FX 5900 Ultra"],
[0x0333, "GeForce FX 5950 Ultra"],
[0x0324, "GeForce FX Go5200 64M"],
[0x031A, "GeForce FX Go5600"],
[0x0347, "GeForce FX Go5700"],
[0x0167, "GeForce Go 6200/6400"],
[0x0168, "GeForce Go 6200/6400"],
[1, 0x0148, "GeForce Go 6600"],
[1, 0x00c8, "GeForce Go 6800"],
[1, 0x00c9, "GeForce Go 6800 Ultra"],
[1, 0x0098, "GeForce Go 7800"],
[1, 0x0099, "GeForce Go 7800 GTX"],
[1, 0x0298, "GeForce Go 7900 GS"],
[1, 0x0299, "GeForce Go 7900 GTX"],
[0x0185, "GeForce MX 4000"],
[0x00FA, "GeForce PCX 5750"],
[0x00FB, "GeForce PCX 5900"],
[0x0110, "GeForce2 MX/MX 400"],
[0x0111, "GeForce2 MX200"],
[0x0110, "GeForce2 MX/MX 400"],
[0x0200, "GeForce3"],
[0x0201, "GeForce3 Ti200"],
[0x0202, "GeForce3 Ti500"],
[0x0172, "GeForce4 MX 420"],
[0x0171, "GeForce4 MX 440"],
[0x0181, "GeForce4 MX 440 with AGP8X"],
[0x0173, "GeForce4 MX 440-SE"],
[0x0170, "GeForce4 MX 460"],
[0x0253, "GeForce4 Ti 4200"],
[0x0281, "GeForce4 Ti 4200 with AGP8X"],
[0x0251, "GeForce4 Ti 4400"],
[0x0250, "GeForce4 Ti 4600"],
[0x0280, "GeForce4 Ti 4800"],
[0x0282, "GeForce4 Ti 4800SE"],
[0x0203, "Quadro DCC"],
[0x0309, "Quadro FX 1000"],
[0x034E, "Quadro FX 1100"],
[0x00FE, "Quadro FX 1300"],
[0x00CE, "Quadro FX 1400"],
[0x0308, "Quadro FX 2000"],
[0x0338, "Quadro FX 3000"],
[0x00FD, "Quadro PCI-E Series"],
[1, 0x00F8, "Quadro FX 3400/4400"],
[1, 0x00CD, "Quadro FX 3450/4000 SDI"],
[1, 0x004E, "Quadro FX 4000"],
[1, 0x00CD, "Quadro FX 3450/4000 SDI"],
[1, 0x00F8, "Quadro FX 3400/4400"],
[1, 0x009D, "Quadro FX 4500"],
[1, 0x029F, "Quadro FX 4500 X2"],
[0x032B, "Quadro FX 500/FX 600"],
[0x014E, "Quadro FX 540"],
[0x014C, "Quadro FX 540 MXM"],
[0x032B, "Quadro FX 500/FX 600"],
[0X033F, "Quadro FX 700"],
[0x034C, "Quadro FX Go1000"],
[0x00CC, "Quadro FX Go1400"],
[0x031C, "Quadro FX Go700"],
[0x018A, "Quadro NVS with AGP8X"],
[0x032A, "Quadro NVS 280 PCI"],
[0x00FD, "Quadro PCI-E Series"],
[0x0165, "Quadro NVS 285"],
[0x017A, "Quadro NVS"],
[0x018A, "Quadro NVS with AGP8X"],
[0x0113, "Quadro2 MXR/EX"],
[0x017A, "Quadro NVS"],
[0x018B, "Quadro4 380 XGL"],
[0x0178, "Quadro4 550 XGL"],
[0x0188, "Quadro4 580 XGL"],
[0x025B, "Quadro4 700 XGL"],
[0x0259, "Quadro4 750 XGL"],
[0x0258, "Quadro4 900 XGL"],
[0x0288, "Quadro4 980 XGL"],
[0x028C, "Quadro4 Go700"],
[1, 0x0295, "NVIDIA GeForce 7950 GT"],
[0x03D0, "NVIDIA GeForce 6100 nForce 430"],
[0x03D1, "NVIDIA GeForce 6100 nForce 405"],
[0x03D2, "NVIDIA GeForce 6100 nForce 400"],
[0x0241, "NVIDIA GeForce 6150 LE"],
[0x0242, "NVIDIA GeForce 6100"],
[0x0245, "NVIDIA Quadro NVS 210S / NVIDIA GeForce 6150LE"],
[1, 0x029C, "NVIDIA Quadro FX 5500"],
[1, 0x0191, "NVIDIA GeForce 8800 GTX"],
[1, 0x0193, "NVIDIA GeForce 8800 GTS"],
[1, 0x0400, "NVIDIA GeForce 8600 GTS"],
[1, 0x0402, "NVIDIA GeForce 8600 GT"],
[0x0421, "NVIDIA GeForce 8500 GT"],
[0x0422, "NVIDIA GeForce 8400 GS"],
[0x0423, "NVIDIA GeForce 8300 GS"],
]
vendorId = 0x10de
for entry in nvidia_device_list:
if len(entry) == 3:
flag, deviceId, deviceName = entry
else:
deviceId, deviceName = entry
self.devices[(vendorId, deviceId)] = deviceName.strip() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def add_extra_args(self):\n self.parser.add_argument('--device', dest='device', type=str, help='Device ID, e.g. d--0001')",
"def addDevice(self, node, fullDeviceName, device):",
"def load_devices():",
"def addDeviceDescriptor(string: str, deviceDescriptor: cern.japc.core.DeviceDescriptor) -> None:\n ...",
"def _mergeDevicesByNameXMEGA(self, devices):\n\t\t# copy the devices, since this array will be modified\n\t\tdevs = list(devices)\n\t\tmerged = []\n\n\t\twhile len(devs) > 0:\n\t\t\tcurrent = devs[0]\n\t\t\tdevs.remove(current)\n\n\t\t\tmatches = []\n\t\t\tdevice_type = current.ids.getAttribute('type')[0]\n\n\t\t\tif device_type != None:\n\t\t\t\tself.log.info(\"ByName: Searching for device with type '%s'\" % device_type)\n\n\t\t\t\tfor dev in devs:\n\t\t\t\t\tif dev.ids.getAttribute('type')[0] == device_type:\n\t\t\t\t\t\t# A3 none|b and bu|u are different enough to warrant\n\t\t\t\t\t\t# a new device file\n\t\t\t\t\t\tif device_type == 'a3':\n\t\t\t\t\t\t\tif dev.ids.getAttribute('pin_id')[0] in self._getCategoryPinIdAVR(current):\n\t\t\t\t\t\t\t\tmatches.append(dev)\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tmatches.append(dev)\n\n\t\t\tfor match in matches:\n\t\t\t\tdevs.remove(match)\n\t\t\t\tcurrent = current.getMergedDevice(match)\n\n\t\t\tif len(matches) == 0:\n\t\t\t\tself.log.info(\"ByName: no match for device: \" + current.id.string)\n\n\t\t\tself.log.debug(\"ByName:\\nResulting device:\\n\" + str(current))\n\t\t\tmerged.append(current)\n\n\t\treturn merged",
"def _derive_extra_metadata(self, extra_metadata):\n extra_metadata['platform']['Family'] = extra_metadata['platform']['Platform Family Name']\n\n # Add platform number if derivable from file\n if self.__class__ is not SAFESentinel1:\n extra_metadata['platform']['Family'] += \"-%s\" % extra_metadata['platform']['Platform Number']",
"def AddExtraDevice(self, guid_to_code_map: Dict[str, str]) -> None:\n self._extra_devices.update(guid_to_code_map)",
"def add_device(self, noInit=True, **kwargs):\n self.epicsLive.add_device(noInit=noInit, **kwargs)\n aliases = self.epicsLive._aliases\n if not self._det.get('epicsLive'):\n self._det['epicsLive'] = {}\n self._det['epicsLive'].update({'attrs': aliases})",
"async def get_discovered_device_names(self):\n json = self._api_call(\"app/monitors/%s/devices\" % self.sense_monitor_id)\n self._devices = await [entry[\"name\"] for entry in json]\n return self._devices",
"def combine_device_addrs(*args, **kwargs):\n return _uhd_swig.combine_device_addrs(*args, **kwargs)",
"def add_device(self, field, device, uc):\n self._devices[field] = device\n self._uc[field] = uc",
"def get_extra_attributes(self, device: str) -> dict:\n raise NotImplementedError()",
"def _find_devices_mac(self):\n self.keyboards.append(Keyboard(self))\n self.mice.append(MightyMouse(self))\n self.mice.append(Mouse(self))",
"def _mergeDevicesByName(self, devices):\n\t\tavrDevices = []\n\t\txmegaDevices = []\n\t\tstm32Devices = []\n\t\tresult = []\n\n\t\tfor dev in devices:\n\t\t\tif dev.ids.intersection.platform == 'avr':\n\t\t\t\tif dev.ids.intersection.family == 'xmega':\n\t\t\t\t\txmegaDevices.append(dev)\n\t\t\t\telse:\n\t\t\t\t\tavrDevices.append(dev)\n\t\t\telif dev.ids.intersection.platform == 'stm32':\n\t\t\t\tstm32Devices.append(dev)\n\t\t\telse:\n\t\t\t\tresult.append(dev)\n\n\t\tavrDevices = self._mergeDevicesByNameAVR(avrDevices)\n\t\txmegaDevices = self._mergeDevicesByNameXMEGA(xmegaDevices)\n\t\tstm32Devices = self._mergeDevicesByNameSTM32(stm32Devices)\n\n\t\tresult.extend(avrDevices)\n\t\tresult.extend(xmegaDevices)\n\t\tresult.extend(stm32Devices)\n\n\t\treturn result",
"def separate_device_addr(*args, **kwargs):\n return _uhd_swig.separate_device_addr(*args, **kwargs)",
"def default_device_names_for_instance(self,\n instance,\n root_device_name,\n *block_device_lists):\n self.prep_for_spawn(context=None, instance=instance)",
"def _find_special(self):\n charnames = self._get_char_names()\n for eventdir in glob.glob('/sys/class/input/event*'):\n char_name = os.path.split(eventdir)[1]\n if char_name in charnames:\n continue\n name_file = os.path.join(eventdir, 'device', 'name')\n with open(name_file) as name_file:\n device_name = name_file.read().strip()\n if device_name in self.codes['specials']:\n self._parse_device_path(\n self.codes['specials'][device_name],\n os.path.join('/dev/input', char_name))",
"def _import_devices(self) -> None:\n self._devices.clear()\n\n # Exctract all devices\n for device in self._udev.list_devices():\n # Skip devices without mapping\n if not device.device_node or self.helper.hide_virtual_device(device):\n continue\n self._devices[device.sys_name] = Device.import_udev(device)",
"def load_devices(self, source=None):\n if source is not None:\n return\n init = self.measure['init']\n devices_file = init['devices']\n devices_list = from_yaml_to_devices(devices_file)\n for dev in devices_list:\n self.devices[dev.properties['name']] = dev\n if 'outputs' in dev.properties:\n self.output_devices.append(dev)\n print('Added %s to the experiment' % dev)\n if dev.properties['type'] == \"Rotation Stage\":\n self.rotation_stages.append(dev.properties['name'])",
"def _load_device_names_and_attributes(\n scanner: DeviceScanner,\n device_name_uses_executor: bool,\n extra_attributes_uses_executor: bool,\n seen: set[str],\n found_devices: list[str],\n) -> tuple[dict[str, str | None], dict[str, dict[str, Any]]]:\n host_name_by_mac: dict[str, str | None] = {}\n extra_attributes_by_mac: dict[str, dict[str, Any]] = {}\n for mac in found_devices:\n if device_name_uses_executor and mac not in seen:\n host_name_by_mac[mac] = scanner.get_device_name(mac)\n if extra_attributes_uses_executor:\n try:\n extra_attributes_by_mac[mac] = scanner.get_extra_attributes(mac)\n except NotImplementedError:\n extra_attributes_by_mac[mac] = {}\n return host_name_by_mac, extra_attributes_by_mac",
"def test_device_add_from_file(self, gateway_with_devs):\n assert 'daq' in gateway_with_devs._devs\n assert 'pel' in gateway_with_devs._devs\n assert 'sg' in gateway_with_devs._devs\n assert 'not_a_driver' not in gateway_with_devs._devs",
"def test_change_name_of_the_devicetrue():",
"def addAllDescriptors(string: str, deviceDescriptor: cern.japc.core.DeviceDescriptor, parameterDescriptor: cern.japc.core.ParameterDescriptor, valueDescriptor: cern.japc.value.ValueDescriptor) -> None:\n ...",
"def _modify_other_devices(self, node, other_devices, kernel_devices, dpdk_devices):\n\n odevices_len = len(other_devices)\n if odevices_len > 0:\n print(\n \"\\nThese device(s) are currently NOT being used \" \"by VPP or the OS.\\n\"\n )\n VppPCIUtil.show_vpp_devices(other_devices, show_interfaces=False)\n question = \"\\nWould you like to give any of these devices\"\n question += \" back to the OS [Y/n]? \"\n answer = self._ask_user_yn(question, \"Y\")\n if answer == \"y\":\n vppd = {}\n for dit in other_devices.items():\n dvid = dit[0]\n device = dit[1]\n question = \"Would you like to use device {} for\".format(dvid)\n question += \" the OS [y/N]? \"\n answer = self._ask_user_yn(question, \"n\")\n if answer == \"y\":\n if (\n \"unused\" in device\n and len(device[\"unused\"]) != 0\n and device[\"unused\"][0] != \"\"\n ):\n driver = device[\"unused\"][0]\n ret = VppPCIUtil.bind_vpp_device(node, driver, dvid)\n if ret:\n logging.debug(\"Could not bind device {}\".format(dvid))\n else:\n vppd[dvid] = device\n for dit in vppd.items():\n dvid = dit[0]\n device = dit[1]\n kernel_devices[dvid] = device\n del other_devices[dvid]\n\n odevices_len = len(other_devices)\n if odevices_len > 0:\n print(\"\\nThese device(s) are still NOT being used \" \"by VPP or the OS.\\n\")\n VppPCIUtil.show_vpp_devices(other_devices, show_interfaces=False)\n question = \"\\nWould you like use any of these for VPP [y/N]? \"\n answer = self._ask_user_yn(question, \"N\")\n if answer == \"y\":\n vppd = {}\n for dit in other_devices.items():\n dvid = dit[0]\n device = dit[1]\n question = \"Would you like to use device {} \".format(dvid)\n question += \"for VPP [y/N]? \"\n answer = self._ask_user_yn(question, \"n\")\n if answer == \"y\":\n vppd[dvid] = device\n for dit in vppd.items():\n dvid = dit[0]\n device = dit[1]\n if (\n \"unused\" in device\n and len(device[\"unused\"]) != 0\n and device[\"unused\"][0] != \"\"\n ):\n driver = device[\"unused\"][0]\n logging.debug(\n \"Binding device {} to driver {}\".format(dvid, driver)\n )\n ret = VppPCIUtil.bind_vpp_device(node, driver, dvid)\n if ret:\n logging.debug(\"Could not bind device {}\".format(dvid))\n else:\n dpdk_devices[dvid] = device\n del other_devices[dvid]",
"def pop_adv_devices(self):\r\n if self.localSDK.devList:\r\n del self.localSDK.devList[:]\r\n try:\r\n self.localSDK.get_devices() # Get list of boards from KSDK manifest file\r\n except IOError:\r\n self.localSDK.devList = ['None']\r\n return",
"def fillNameFromDiscoverResponse(tv):\n if ('informations' in tv and\n 'general' in tv['informations'] and\n 'device' in tv['informations']['general'] and\n 'friendlyName' in tv['informations']['general']['device']):\n tv['computed']['name'] = tv['informations']['general']['device']['friendlyName']",
"def device_adcs(self):\n return [\"SIS 3302\", \"SIS 3305\"]",
"def test_change_name_of_the_devicefalse():",
"def _mergeDevicesByNameAVR(self, devices):\n\t\t# copy the devices, since this array will be modified\n\t\tdevs = list(devices)\n\t\tmerged = []\n\n\t\twhile len(devs) > 0:\n\t\t\tcurrent = devs[0]\n\t\t\tdevs.remove(current)\n\n\t\t\tmatches = []\n\t\t\tsize_id = current.ids.getAttribute('size_id')[0]\n\n\t\t\tif size_id != None:\n\t\t\t\tname = current.ids.getAttribute('name')[0]\n\t\t\t\tdevice_type = current.ids.getAttribute('type')[0]\n\t\t\t\tfamily = name[len(size_id):]\n\n\t\t\t\tif not (family == \"\" and device_type == None):\n\t\t\t\t\tdevice_type = self._getCategoryTypeAVR(current)\n\n\t\t\t\t\tself.log.info(\"ByName: Searching for device ending in '\"\n\t\t\t\t\t\t\t\t + family + \"' and '\" + str(device_type) + \"'\")\n\n\t\t\t\t\tfor dev in devs:\n\t\t\t\t\t\tdname = dev.ids.getAttribute('name')[0]\n\t\t\t\t\t\tdsize_id = dev.ids.getAttribute('size_id')[0]\n\n\t\t\t\t\t\t# if they do not have a size-id they are probably unmergable\n\t\t\t\t\t\tif dsize_id != None:\n\t\t\t\t\t\t\tdfamily = dname[len(dsize_id):]\n\n\t\t\t\t\t\t\t# perpare for type comparison\n\t\t\t\t\t\t\t# we should only merge when the family is the same,\n\t\t\t\t\t\t\t# and if the type is the same\n\n\t\t\t\t\t\t\tif dfamily == family and dev.ids.getAttribute('type')[0] in device_type:\n\t\t\t\t\t\t\t\tmatches.append(dev)\n\n\t\t\t# The following code is Atmel's fault with their stupid naming schemes.\n\t\t\t# the AT90's, ATmega's and ATtiny's have special merging rules\n\t\t\tif current.id.family == \"at90\":\n\t\t\t\tname = current.id.name\n\n\t\t\t\t# Some Devices are just not in the same group\n\t\t\t\tif name in ['1', '2', '3', '216', '316', '646', '647', '1286', '1287']:\n\t\t\t\t\t# these are not the matches you are looking for *move hand*\n\t\t\t\t\tmatches = []\n\t\t\t\t# these are not the devices you want to matched with\n\t\t\t\tfor match in matches:\n\t\t\t\t\tif match.id.name in ['1', '2', '3', '216', '316', '646', '647', '1286', '1287']:\n\t\t\t\t\t\tmatches.remove(match)\n\t\t\t\t\t\tbreak\n\t\t\t\t# but these are:\n\t\t\t\tnamesA = [ ['1', '2', '216'], ['3', '316'], ['646', '647', '1286', '1287'] ]\n\t\t\t\tfor names in namesA:\n\t\t\t\t\tif name in names:\n\t\t\t\t\t\tfor dev in [d for d in devs if dev.id.family == \"at90\"]:\n\t\t\t\t\t\t\tfor dname in dev.ids.getAttribute('name'):\n\t\t\t\t\t\t\t\tif dname in names:\n\t\t\t\t\t\t\t\t\tmatches.append(dev)\n\n\t\t\tif current.id.family == \"atmega\":\n\t\t\t\tname = current.id.name\n\n\t\t\t\tif current.ids.getAttribute('type')[0] in [None, 'none', 'p', 'a', 'pa']:\n\t\t\t\t\t# Some Devices are just not in the same group\n\t\t\t\t\tif name in ['8', '16', '32', '64', '128']:\n\t\t\t\t\t\t# these are not the matches you are looking for *move hand*\n\t\t\t\t\t\tmatches = []\n\t\t\t\t\t# these are not the devices you want to be matched with\n\t\t\t\t\tfor match in matches:\n\t\t\t\t\t\tif match.id.name in ['8', '16', '32', '64', '128']:\n\t\t\t\t\t\t\tmatches.remove(match)\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\t# but these are:\n\t\t\t\t\tnamesA = [ ['16', '32'], ['64', '128'] ]\n\t\t\t\t\tfor names in namesA:\n\t\t\t\t\t\tif name in names:\n\t\t\t\t\t\t\tfor dev in devs:\n\t\t\t\t\t\t\t\tif dev.id.family == \"atmega\" and dev.ids.getAttribute('type')[0] in [None, 'none', 'p', 'a', 'pa']:\n\t\t\t\t\t\t\t\t\tfor dname in dev.ids.getAttribute('name'):\n\t\t\t\t\t\t\t\t\t\tif dname in names:\n\t\t\t\t\t\t\t\t\t\t\tmatches.append(dev)\n\n\t\t\tif current.id.family == \"attiny\":\n\t\t\t\tname = current.id.name\n\t\t\t\tnames = ['4', '5', '9', '10']\n\t\t\t\tif name in names:\n\t\t\t\t\tfor dev in devs:\n\t\t\t\t\t\tif dev.id.family == \"attiny\":\n\t\t\t\t\t\t\tfor dname in dev.ids.getAttribute('name'):\n\t\t\t\t\t\t\t\tif dname in names:\n\t\t\t\t\t\t\t\t\tmatches.append(dev)\n\n\t\t\t\t# Some Devices are just not in the same group\n\t\t\t\tif name in ['28', '20', '40']:\n\t\t\t\t\t# these are not the matches you are looking for *move hand*\n\t\t\t\t\tmatches = []\n\t\t\t\t# these are not the devices you want to matched with\n\t\t\t\tfor match in matches:\n\t\t\t\t\tif match.id.name in ['28', '20', '40']:\n\t\t\t\t\t\tmatches.remove(match)\n\t\t\t\t\t\tbreak\n\n\t\t\tfor match in matches:\n\t\t\t\tdevs.remove(match)\n\t\t\t\tcurrent = current.getMergedDevice(match)\n\n\t\t\tif len(matches) == 0:\n\t\t\t\tself.log.info(\"ByName: no match for device: \" + current.id.string)\n\n\t\t\tself.log.debug(\"ByName:\\nResulting device:\\n\" + str(current))\n\t\t\tmerged.append(current)\n\n\t\treturn merged",
"def addDevice(self, device):\n if device.name in self.devices:\n log.error(\"'%s' already part of '%s'\", device.name, self.name)\n else:\n self.devices[device.name] = device\n return self"
] | [
"0.6235411",
"0.6018107",
"0.5875037",
"0.5769226",
"0.5737719",
"0.5699172",
"0.5628838",
"0.56115973",
"0.55473393",
"0.5530301",
"0.5512926",
"0.55042124",
"0.54661566",
"0.5442146",
"0.54258853",
"0.5409304",
"0.53862745",
"0.5350149",
"0.53231454",
"0.52631706",
"0.52626175",
"0.5261758",
"0.5253251",
"0.5244742",
"0.5227782",
"0.5210206",
"0.51869375",
"0.5185414",
"0.51717913",
"0.5170142"
] | 0.64580566 | 0 |
Counts the frequencies of samples of given variables ``vars`` and calculates probabilities with additive smoothing. | def get_probs(self, *vars):
freqs = self.freq_counts([self.data.get_column_view(v)[0] for v in vars], [len(v.values) for v in vars])
k = np.prod([len(v.values) for v in vars])
return (freqs + self.alpha) / (np.sum(freqs) + self.alpha*k) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def update_posterior_probs(vars_):\n vars_.weighted_sums += np.power(vars_.dprime_map[vars_.focus],2) * vars_.visual_field\n vars_.post_probs = np.exp(vars_.weighted_sums) * vars_.prior_prob\n vars_.post_probs /= np.sum(vars_.post_probs)",
"def countVarFreq(list_models_vars_freq):\n list_variables_total = []\n for model_var_freq in list_models_vars_freq:\n variables_names = list(model_var_freq.dict_freq_var.keys())\n list_variables_total.extend(variables_names)\n \n counter_frec_variables = Counter(list_variables_total)\n dict_frec_variables = dict(counter_frec_variables)\n return dict_frec_variables",
"def calc_feature_probs(image_type, image_data, smoothing):\n counts = np.array([np.sum(image_data.features[image_data.labels == value], axis=0) + smoothing for value in range(image_type.categories)])\n denoms = np.array([np.count_nonzero(image_data.labels == value) + (smoothing * image_type.feature_kinds) for value in range(image_type.categories)])\n return counts / denoms[:, np.newaxis, np.newaxis]",
"def count_vars(scope=''):\n v = get_vars(scope)\n return sum([np.prod(var.shape.as_list()) for var in v])",
"def estimate_pxy(x,y,label,smoothing,vocab):\n log_probabilities = defaultdict(float)\n corpus_counts = get_corpus_counts(x, y, label)\n total = sum(corpus_counts.values())\n for word in vocab:\n log_probabilities[word] = np.log(((corpus_counts[word] if word in corpus_counts else 0) + smoothing) / (total + len(vocab) * smoothing))\n return log_probabilities",
"def sum_model_probs(model, uniq_words):\n sum_probs = 0\n for word in uniq_words:\n sum_probs += model.get_prob(word)\n sum_probs += (voc_size - len(uniq_words)) * model.get_prob_by_word_freq(0)\n return sum_probs",
"def plot_vars(\n self,\n vars,\n axes=None,\n bins=None,\n start=None,\n stop=None,\n edges=None,\n transform=None,\n ):\n if self._delayed_mode:\n for name, var in vars.items():\n if not compatible_partitions(var, self._masks[0]):\n raise IncompatiblePartitions(\"plot_vars\", var, self._masks[0])\n else:\n for name, var in vars.items():\n if len(var) != len(self._masks[0]):\n raise ValueError(\n f\"The variable '{name}' has length '{len(var)}', but the masks have length '{len(self._masks[0])}'\"\n )\n\n hists = []\n labels = [\"initial\"] + [f\"N - {i}\" for i in self._names] + [\"N\"]\n\n bins = [None] * len(vars) if bins is None else bins\n start = [None] * len(vars) if start is None else start\n stop = [None] * len(vars) if stop is None else stop\n edges = [None] * len(vars) if edges is None else edges\n transform = [None] * len(vars) if transform is None else transform\n\n if axes is not None:\n axes = axes\n else:\n axes = []\n for (name, var), b, s1, s2, e, t in zip(\n vars.items(), bins, start, stop, edges, transform\n ):\n ax = coffea.util._gethistogramaxis(\n name, var, b, s1, s2, e, t, self._delayed_mode\n )\n axes.append(ax)\n\n checklengths = [\n len(x) == len(vars) for x in (axes, bins, start, stop, edges, transform)\n ]\n if not all(checklengths):\n raise ValueError(\n \"vars, axes, bins, start, stop, edges, and transform must be the same length\"\n )\n\n if not self._delayed_mode:\n for (name, var), axis in zip(vars.items(), axes):\n h = hist.Hist(\n axis,\n hist.axis.Integer(0, len(labels), name=\"N-1\"),\n )\n arr = awkward.flatten(var)\n h.fill(arr, awkward.zeros_like(arr))\n for i, mask in enumerate(self.result().masks, 1):\n arr = awkward.flatten(var[mask])\n h.fill(arr, awkward.full_like(arr, i, dtype=int))\n hists.append(h)\n\n else:\n for (name, var), axis in zip(vars.items(), axes):\n h = hist.dask.Hist(\n axis,\n hist.axis.Integer(0, len(labels), name=\"N-1\"),\n )\n arr = dask_awkward.flatten(var)\n h.fill(arr, dask_awkward.zeros_like(arr))\n for i, mask in enumerate(self.result().masks, 1):\n arr = dask_awkward.flatten(var[mask])\n h.fill(arr, dask_awkward.full_like(arr, i, dtype=int))\n hists.append(h)\n\n return hists, labels",
"def probability(self, item):\n count = self.counter.get(item, 0)\n if self.smoothing_dict:\n smooth_count = self.smoothing_dict.get(count, count)\n assert smooth_count > 0\n return smooth_count / self.smooth_total\n else:\n return count / self.total",
"def probability(cpts, term, obs):\r\n \r\n \r\n # term is a list e.g., ['x_1', '0']\r\n # flip refers to the assignment either '0' false or '1' true\r\n flip = term[1]\r\n # the term itself\r\n term = term[0]\r\n # accumulator variable\r\n answer = 0\r\n # this loop locates where in the CPT we're looking\r\n for clause in range(len(cpts)):\r\n if cpts[clause][0] == term:\r\n index = clause\r\n # focus on our term\r\n cpt = cpts[index]\r\n # this loop checks if there are no preconditions\r\n # if not, then we immediately know the probability and can return\r\n for m in range(len(cpt[1])):\r\n if cpt[1][m][-2][1] == '1':\r\n if cpt[1][m][0] == [[]]:\r\n answer = cpt[1][m][-1]\r\n # list of the variables we have observed\r\n have = []\r\n if obs != []:\r\n for k in obs:\r\n have.append(k[0])\r\n # list of variables we need to know in order to calculate the probability\r\n needed = []\r\n for prob in range(len(cpt[1])):\r\n for j in cpt[1][prob][0]:\r\n if j != []:\r\n if j[0] not in needed:\r\n needed.append(j[0])\r\n # conditional logic based on the known variables\r\n for required in needed:\r\n if required not in have:\r\n # deep copy our observations list\r\n obs2 = []\r\n obs3 = []\r\n for observs in obs:\r\n obs2.append(observs)\r\n obs3.append(observs)\r\n # if we need to know a variable but don't have it\r\n # then we allow it to be either 0 or 1\r\n obs3.append([required,'1'])\r\n obs2.append([required,'0'])\r\n # computes probability if the unknown term is true, times \r\n # the probability that the unknown term is true, plus the\r\n # probability if the unknown term is false, times the \r\n # probability that the unknown term is false\r\n answer = (probability(cpts, [term,flip], obs3) * probability(cpts, [required,'1'], obs)) + (probability(cpts, [term,flip], obs2) * (probability(cpts, [required,'0'], obs)))\r\n # this loop looks complicated but all it's doing is finding the correct\r\n # line in the CPT\r\n if cpt[1][prob][-2][1] == '1':\r\n count = 1\r\n for i in range(len(cpt[1][prob][0])):\r\n if cpt[1][prob][0][i] in obs:\r\n count *= 1\r\n else:\r\n count = 0\r\n if count == 1:\r\n answer += cpt[1][prob][-1]\r\n\r\n\r\n # this computes the probability that the term is true, so if we asked \r\n # for the probability that it is false, just return 1 - answer\r\n if flip == '0':\r\n return 1 - answer\r\n return answer",
"def direct_sample(self, trial_count):\n count = 0\n\n for i in xrange(trial_count):\n values = {}\n\n for letter in self.letters:\n prob = self.variables[letter].get_prob(values)\n values[letter] = self.sample(prob)\n\n if values[self.query.variable]:\n count += 1\n\n return float(count) / trial_count",
"def _aggregate_grads_and_vars(self, grads_and_vars_list, weights=None):\n aggregated = {}\n log.info('Number of grads and vars to aggregate: {}'.format(\n len(grads_and_vars_list)))\n if weights is None:\n assert False, 'Equally aggregated, debug point'\n weights = [None] * len(grads_and_vars_list)\n for gv_list, wt in zip(grads_and_vars_list, weights):\n for g, v in gv_list:\n if g is not None:\n if v in aggregated:\n log.info('Variable matched in the dictionary: {}'.format(v.name))\n if wt is None:\n aggregated[v].append(g)\n log.info('Applied default weight 1.0')\n else:\n aggregated[v].append(g * wt)\n log.info('Applied weight {}'.format(wt))\n else:\n log.info('Variable created in the dictionary: {}'.format(v.name))\n if wt is None:\n aggregated[v] = [g]\n log.info('Applied default weight 1.0')\n else:\n aggregated[v] = [g * wt]\n log.info('Applied weight {}'.format(wt))\n result = []\n for v in aggregated.keys():\n log.info('Variable {} Count {}'.format(v.name, len(aggregated[v])))\n aggregated[v] = tf.add_n(aggregated[v])\n result.append((aggregated[v], v))\n return result",
"def observed_species(counts):\n return (counts!=0).sum()",
"def inv_freq(x, count, bins):\n total = float(count.sum())\n weight = 0.0\n for k in range(count.shape[0]-1):\n c, b0,b1 = float(count[k]),float(bins[k]),float(bins[k+1])\n try:\n weight += (total/c)*((x>=b0).Managerfloat())*((x<b1).float())\n except:\n weight = (total/c)*((x>=b0).float())*((x<b1).float())\n return weight",
"def add_variables_summaries(grads_and_vars, step=None, with_histogram=True):\n if not grads_and_vars:\n return\n vars = [v for g, v in grads_and_vars]\n for var, var_name in zip(vars, unique_var_names(vars)):\n if isinstance(var, tf.IndexedSlices):\n var_values = var.values\n else:\n var_values = var\n if with_histogram:\n tf.summary.histogram(\n name='summarize_vars/' + var_name + '_value',\n data=var_values,\n step=step)\n tf.summary.scalar(\n name='summarize_vars/' + var_name + '_value_norm',\n data=tf.linalg.global_norm([var_values]),\n step=step)",
"def prob(self, w):\n return self.counts[w] / self.total_count",
"def calc_weights(freqs, delay):\n if np.array(freqs).shape[0] == 1:\n phase = 2.0 * np.pi * freqs * delay\n\n else:\n phase = 2.0 * np.pi * freqs * delay[:, np.newaxis]\n\n return np.exp(-1j * phase)",
"def probability_density(dic):\n\n var = dic['var']\n par = dic['par']\n y1 = dic['y']\n y = y1.conjugate() * y\n return dic_result(var,par,y)",
"def plot_vars(\n self,\n vars,\n axes=None,\n bins=None,\n start=None,\n stop=None,\n edges=None,\n transform=None,\n ):\n if self._delayed_mode:\n for name, var in vars.items():\n if not compatible_partitions(var, self._masksonecut[0]):\n raise IncompatiblePartitions(\"plot_vars\", var, self._masksonecut[0])\n else:\n for name, var in vars.items():\n if len(var) != len(self._masksonecut[0]):\n raise ValueError(\n f\"The variable '{name}' has length '{len(var)}', but the masks have length '{len(self._masksonecut[0])}'\"\n )\n\n histsonecut, histscutflow = [], []\n labels = [\"initial\"] + list(self._names)\n\n bins = [None] * len(vars) if bins is None else bins\n start = [None] * len(vars) if start is None else start\n stop = [None] * len(vars) if stop is None else stop\n edges = [None] * len(vars) if edges is None else edges\n transform = [None] * len(vars) if transform is None else transform\n\n if axes is not None:\n axes = axes\n else:\n axes = []\n for (name, var), b, s1, s2, e, t in zip(\n vars.items(), bins, start, stop, edges, transform\n ):\n ax = coffea.util._gethistogramaxis(\n name, var, b, s1, s2, e, t, self._delayed_mode\n )\n axes.append(ax)\n\n checklengths = [\n len(x) == len(vars) for x in (axes, bins, start, stop, edges, transform)\n ]\n if not all(checklengths):\n raise ValueError(\n \"vars, axes, bins, start, stop, edges, and transform must be the same length\"\n )\n\n if not self._delayed_mode:\n for (name, var), axis in zip(vars.items(), axes):\n honecut = hist.Hist(\n axis,\n hist.axis.Integer(0, len(labels), name=\"onecut\"),\n )\n hcutflow = honecut.copy()\n hcutflow.axes.name = name, \"cutflow\"\n\n arr = awkward.flatten(var)\n honecut.fill(arr, awkward.zeros_like(arr))\n hcutflow.fill(arr, awkward.zeros_like(arr))\n\n for i, mask in enumerate(self.result().masksonecut, 1):\n arr = awkward.flatten(var[mask])\n honecut.fill(arr, awkward.full_like(arr, i, dtype=int))\n histsonecut.append(honecut)\n\n for i, mask in enumerate(self.result().maskscutflow, 1):\n arr = awkward.flatten(var[mask])\n hcutflow.fill(arr, awkward.full_like(arr, i, dtype=int))\n histscutflow.append(hcutflow)\n\n else:\n for (name, var), axis in zip(vars.items(), axes):\n honecut = hist.dask.Hist(\n axis,\n hist.axis.Integer(0, len(labels), name=\"onecut\"),\n )\n hcutflow = honecut.copy()\n hcutflow.axes.name = name, \"cutflow\"\n\n arr = dask_awkward.flatten(var)\n honecut.fill(arr, dask_awkward.zeros_like(arr))\n hcutflow.fill(arr, dask_awkward.zeros_like(arr))\n\n for i, mask in enumerate(self.result().masksonecut, 1):\n arr = dask_awkward.flatten(var[mask])\n honecut.fill(arr, dask_awkward.full_like(arr, i, dtype=int))\n histsonecut.append(honecut)\n\n for i, mask in enumerate(self.result().maskscutflow, 1):\n arr = dask_awkward.flatten(var[mask])\n hcutflow.fill(arr, dask_awkward.full_like(arr, i, dtype=int))\n histscutflow.append(hcutflow)\n\n return histsonecut, histscutflow, labels",
"def __smooth_emission_params(self):\n params_count = {}\n unique_symbols = []\n for key, value in self.emission_dict.items():\n if key[0] not in unique_symbols:\n unique_symbols.append(key[0])\n \n n = len(unique_symbols)\n # n refers to the number of observations/symbols \n\n for state in self.states:\n params_count[state] = [0,0,0]\n # print(params_count[state])\n # key is the state, value is list [total no. of symbols, total no. of non-zero probability, probability p]\n # i.e. [Ts, v, p]\n for key, value in self.emission_dict.items():\n if state in key:\n params_count[state][0] += 1\n if value != 0:\n params_count[state][1] += 1\n else:\n continue\n params_count[state][2] += 1/(params_count[state][0] + params_count[state][1])\n # p = 1/(Ts+v)\n \n for state in self.states:\n for key, value in self.emission_dict.items():\n if state in key:\n if value != 0:\n self.emission_dict[key] = value - params_count[state][2]\n else:\n self.emission_dict[key] = (params_count[state][2]*params_count[state][2])/n-params_count[state][2]\n # v*p/n-v",
"def likelihood_sample(self, trial_count):\n count = 0\n\n sum_query_weights = 0\n sum_total_weights = 0\n\n for i in xrange(trial_count):\n values = {}\n\n sample_weight = 1.0\n\n for letter in self.letters:\n prob = self.variables[letter].get_prob(values)\n\n # Fix the evidence variables\n if letter in self.query.evidence:\n values[letter] = self.query.evidence[letter]\n\n if (values[letter]):\n sample_weight *= prob\n else:\n sample_weight *= (1 - prob)\n else:\n values[letter] = self.sample(prob)\n\n if values[self.query.variable]:\n sum_query_weights += sample_weight\n\n sum_total_weights += sample_weight\n\n return float(sum_query_weights) / sum_total_weights",
"def add_pseudocounts(self, pseudocounts):\n logging.info(\"Adding pseudocounts...\")\n if pseudocounts[0] > 0:\n self._t += pseudocounts[0]\n self.normalize_transition()\n if pseudocounts[1] > 0:\n self._e += pseudocounts[1]\n self.normalize_emission()\n if pseudocounts[2] > 0:\n self._i += pseudocounts[2]\n self.normalize_initial()",
"def get_noise_distribution(corpus: List[str],\n vocabulary: np.ndarray,\n dist_alpha: float\n ) -> List[int]:\n all_words = [word for text in corpus for word in text]\n arr = np.array(list(map(\n lambda x: all_words.count(x)**dist_alpha, vocabulary\n )))\n return arr/arr.sum() # frequencies, normalised, in order of vocabulary",
"def update_frequencies():\n pass",
"def probability(prods, prod_dict_As, count_dict):\n for p in prods:\n if p not in prod_dict_As:\n raise Exception(\"Think we cannot make the product {}.\".format(p))\n # Argh, Python, this is a reference!\n #possible_As = prod_dict_As[prods[0]]\n possible_As = set( prod_dict_As[prods[0]] )\n for p in prods[1:]:\n possible_As &= prod_dict_As[p]\n ret = []\n for A in possible_As:\n count = 1\n for p in prods:\n count *= count_dict[(p,A)]\n ret.append((A,count))\n return ret",
"def log_prob(self, sents):\n log_prob = 0\n for sent in sents:\n log_prob += self.sent_log_prob(sent)\n return log_prob",
"def smooth(item_count, nr_tokens, type=\"min\"): #change type of smoothing? NLTK if freq dists\n if type == \"ele\":\n smoothed_count = item_count + nr_tokens * 0.5\n else:\n smoothed_count = item_count + (1 / nr_tokens)\n return smoothed_count",
"def freqs(self, xs):\n return [self.freq(x) for x in xs]",
"def sslm_counts_init(self, obs_variance, chain_variance, sstats):\n W = self.vocab_len\n T = self.num_time_slices\n\n log_norm_counts = np.copy(sstats)\n log_norm_counts /= sum(log_norm_counts)\n log_norm_counts += 1.0 / W\n log_norm_counts /= sum(log_norm_counts)\n log_norm_counts = np.log(log_norm_counts)\n\n # setting variational observations to transformed counts\n self.obs = (np.repeat(log_norm_counts, T, axis=0)).reshape(W, T)\n # set variational parameters\n self.obs_variance = obs_variance\n self.chain_variance = chain_variance\n\n # compute post variance, mean\n for w in range(W):\n self.variance[w], self.fwd_variance[w] = self.compute_post_variance(w, self.chain_variance)\n self.mean[w], self.fwd_mean[w] = self.compute_post_mean(w, self.chain_variance)\n\n self.zeta = self.update_zeta()\n self.e_log_prob = self.compute_expected_log_prob()",
"def get_spike_counts(self, gather=True):\n raise NotImplementedError",
"def compute_weight_by_frequency(labels):\n p = tf.greater(labels, 0)\n pf = tf.to_float(p)\n positives = tf.reduce_sum(pf, axis=-1, keepdims=True) + tf.zeros_like(pf)\n negatives = tf.reduce_sum(1 - pf, axis=-1, keepdims=True) + tf.zeros_like(pf)\n total = positives + negatives\n weights = tf.where(p, negatives / total, positives / total)\n return weights"
] | [
"0.57587546",
"0.5575326",
"0.53477293",
"0.52793074",
"0.51654255",
"0.5154966",
"0.5104432",
"0.50244606",
"0.49709153",
"0.49697486",
"0.49534038",
"0.491249",
"0.4908834",
"0.48904198",
"0.48888883",
"0.48864844",
"0.48855996",
"0.48635137",
"0.4834516",
"0.48303828",
"0.48217052",
"0.47621202",
"0.47539562",
"0.47398427",
"0.4729863",
"0.47260767",
"0.47241467",
"0.47239938",
"0.47229415",
"0.47207573"
] | 0.7346631 | 0 |
If not given, computes the absolute total info gain for attributes a and b. Generates an Interaction object. | def attribute_interactions(self, a, b, total_rel_ig_ab=None):
var_a = self.data.domain.variables[a]
var_b = self.data.domain.variables[b]
ig_a = self.info_gains[var_a.name]
ig_b = self.info_gains[var_b.name]
if not total_rel_ig_ab:
ig_ab = ig_a + ig_b - (self.class_entropy + self.h(self.get_probs(var_a, var_b))) + \
self.h(self.get_probs(var_a, var_b, self.data.domain.variables[-1]))
else:
ig_ab = ig_a + ig_b - total_rel_ig_ab * self.class_entropy
inter = Interaction(var_a, var_b, ig_a, ig_b, ig_ab, self.class_entropy)
return inter | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def infoGain(self,attr, data, target_attr):\n remainder = 0\n p = 0\n ent = 0\n for ele in target_attr:\n if ele == 1:\n p +=1\n \n q = p / (len(target_attr)) \n if 0 < q < 1:\n ent = -((q * math.log2(q)) + ((1-q) * math.log2(1-q))) \n \n unique = list(pd.unique(self.data_set[attr])) \n l = self.data_set[attr]\n for ele in unique:\n pk =0\n nk=0\n j=0\n for i in range (0, len(data)): #len (l) changed to len(data)\n j = j+1\n ele1 = l[i]\n if ele1 == ele:\n out = target_attr[i]\n if out == 1:\n pk += 1\n else:\n nk += 1\n if (pk+nk) != 0:\n q1 = pk / (pk +nk)\n if 0 < q1 < 1:\n e = -((q1 * math.log2(q1)) + ((1-q1) * math.log2(1-q1)))\n remainder += (pk + nk)/(len(target_attr)) * e\n \n return (ent - remainder, attr)",
"def IMPORTANCE(attribute_name, examples, attribues):\n p = 0\n n = 0\n # First count the number of p and n \n for item in examples:\n if item[-1] == 'Yes':\n p += 1\n else:\n n += 1\n Gain = B(p/(p+n)) - REMAINDER(attribute_name, examples, attribues)\n return Gain",
"def info_gain(Ex, a, nan=True):\n # Check whether examples and attributes have the same lengths.\n if len(Ex) != len(a):\n raise ValueError(\"Ex and a must be of the same size.\")\n\n # Compute the entropy of examples\n H_Ex = entropy(list(Counter(Ex).values()))\n\n # If nan is True, replace all nan values in a by the string \"__nan__\"\n if nan:\n a = ['__nan__' if isinstance(x, float) and math.isnan(x) else x for x in a]\n \n # Compute the sum of all values v in a\n sum_v = 0\n for v in set(a):\n Ex_a_v = [x for x, t in zip(Ex, a) if t == v]\n sum_v += (len(Ex_a_v) / len(Ex)) *\\\n (entropy(list(Counter(Ex_a_v).values())))\n\n # Return result\n return H_Ex - sum_v",
"def information_gain(Y, attr):\n initial_gain = entropy(Y)\n\n temp_Y = Y.tolist()\n temp_attr = attr.tolist()\n\n temp_attr = list(np.unique(attr))\n\n for a in temp_attr:\n l = []\n count = 0\n for j in attr:\n if (j == a):\n l.append(temp_Y[count])\n count+=1\n initial_gain -= ((len(l) / len(temp_Y)) * entropy(pd.Series(l)))\n return initial_gain",
"def gain(self, target_attr, attr, debug=False):\n current_entropy = self.entropy(target_attr)[0]\n # print\n # print attr\n\n gain = current_entropy - self.remainder(target_attr=target_attr, attr=attr)\n if debug is True:\n print attr, \": \", gain\n return gain",
"def _calculate_information_gain(self, cur_state, next_state, next_label):\n n = len(cur_state)\n information_gain_per_action = np.zeros((n, self.action_dim))\n prob_prev = self.classifier.get_class1_prob(obs=cur_state)\n\n for i in range(self.action_dim):\n obs_i = np.copy(next_state)\n obs_i[:, -self.action_dim:] = cur_state[:, -self.action_dim:]\n obs_i[:, - i - 1] = next_state[:, -i - 1]\n\n prob_i = self.classifier.get_class1_prob(obs=obs_i)\n class_1_gain = (prob_i - prob_prev) * next_label[:, 0]\n class_0_gain = (prob_i - prob_prev) * (1 - next_label)[:, 0]\n\n if self.positive_only:\n class_1_gain[class_1_gain < 0] = 0\n class_0_gain[class_0_gain < 0] = 0\n else:\n class_0_gain = - class_0_gain\n\n information_gain_per_action[:, - i - 1] = (class_1_gain + class_0_gain)\n\n return information_gain_per_action",
"def _calculate_information_gain(self, obs, label):\n n = len(obs)\n information_gain_per_action = np.zeros((n, self.action_dim))\n\n obs_null = np.copy(obs)\n obs_null[:, -self.action_dim:] = self.classifier.missing_value\n prob_null = self.classifier.get_class1_prob(obs=obs_null)\n\n for i in range(self.action_dim):\n obs_i = np.copy(obs)\n for j in range(self.action_dim):\n if i != j:\n obs_i[:, - j - 1] = self.classifier.missing_value\n prob_i = self.classifier.get_class1_prob(obs=obs_i)\n class_1_gain = (prob_i - prob_null) * label[:, 0]\n class_0_gain = (prob_i - prob_null) * (1 - label)[:, 0]\n\n if self.positive_only:\n class_1_gain[class_1_gain < 0] = 0\n class_0_gain[class_0_gain < 0] = 0\n else:\n class_0_gain = - class_0_gain\n\n information_gain_per_action[:, - i - 1] = (class_1_gain + class_0_gain)\n\n return information_gain_per_action",
"def info_gain_ratio(Ex, a, nan=True):\n # Check whether examples and attributes have the same lengths.\n if len(Ex) != len(a):\n raise ValueError(\"Ex and a must be of the same size.\")\n\n # Compute information gain ratio as IG/IV\n return info_gain(Ex, a, nan) / intrinsic_value(Ex, a, nan)",
"def calc_iam(a_1, a_2, a_3, a_4, a_5, a_6, aoi, loss_method):\n if loss_method == 'Janotte':\n iam = 1 - a_1 * abs(aoi) - a_2 * aoi**2\n\n if loss_method == 'Andasol':\n iam = (1 - a_1 * abs(aoi) - a_2 * aoi**2 - a_3 * aoi**3 - a_4 * aoi**4\n - a_5 * aoi**5 - a_6 * aoi**6)\n return iam",
"def elemental_descriptor(A1_ion, A2_ion, B_ion):\n ele_A1 = mg.Element(A1_ion)\n ele_A2 = mg.Element(A2_ion)\n ele_B = mg.Element(B_ion)\n ele_O = mg.Element('O') \n # A/B ion oxidation state \n common_oxidation_states_A1 = ele_A1.common_oxidation_states[0]\n common_oxidation_states_A2 = ele_A2.common_oxidation_states[0]\n common_oxidation_states_A = np.mean(common_oxidation_states_A1 + common_oxidation_states_A2)\n common_oxidation_states_B = ele_B.common_oxidation_states[0]\n # ionic radius property\n ionic_radius_A1 = float(str(ele_A1.average_ionic_radius)[:-4])\n ionic_radius_A2 = float(str(ele_A2.average_ionic_radius)[:-4])\n ionic_radius_A = (ionic_radius_A1+ ionic_radius_A2)/2\n ionic_radius_B = float(str(ele_B.average_ionic_radius)[:-4])\n ionic_radius_O = float(str(ele_O.average_ionic_radius)[:-4])\n # Tolerance factor \n TF = (ionic_radius_A + ionic_radius_O)/(np.sqrt(2)*(ionic_radius_B + ionic_radius_O))\n # Octahedral factor\n OF = ionic_radius_B/ionic_radius_O \n # ionic_radius ratios\n ionic_ration_AO = ionic_radius_A / ionic_radius_O\n ionic_ration_BO = ionic_radius_B / ionic_radius_O\n # averaged electronegativity for A and B atoms\n Pauling_electronegativity_A1 = ele_A1.X\n Pauling_electronegativity_A2 = ele_A2.X\n Pauling_electronegativity_A = (Pauling_electronegativity_A1 + Pauling_electronegativity_A2)/2\n Pauling_electronegativity_B = ele_B.X\n Pauling_electronegativity_O = ele_O.X\n # Difference in the electronegativity for A-O and B-O\n Diff_A_O = Pauling_electronegativity_A - Pauling_electronegativity_O\n Diff_B_O = Pauling_electronegativity_B - Pauling_electronegativity_O\n return [common_oxidation_states_A, common_oxidation_states_B, Pauling_electronegativity_A, Pauling_electronegativity_B, TF, OF, ionic_ration_AO, ionic_ration_BO, Diff_A_O, Diff_B_O]",
"def _calculate_information_gain(self, cur_state, next_state):\n\n n = len(cur_state)\n information_gain_per_action = np.zeros((n, self.action_dim))\n\n prob_cur = self.classifier.get_class1_prob(obs=cur_state)\n prob_next = self.classifier.get_class1_prob(obs=next_state)\n information_gain_true = (prob_next - prob_cur).reshape(-1, 1)\n\n next_state_null = np.copy(next_state)\n next_state_null[:, -self.action_dim:] = self.classifier.missing_value\n prob_next_null = self.classifier.get_class1_prob(next_state_null)\n\n for i in range(self.action_dim):\n next_state_i = np.copy(next_state)\n next_state_i[:, -self.action_dim:] = self.classifier.missing_value\n next_state_i[:, -i - 1] = next_state[:, -i - 1]\n\n prob_next_i = self.classifier.get_class1_prob(obs=next_state_i)\n information_gain_per_action[:, -i - 1] = prob_next_i - prob_next_null\n\n information_gain_sum = np.sum(information_gain_per_action, axis=1, keepdims=True)\n ratio = information_gain_true / information_gain_sum\n ratio[information_gain_sum == 0] = 0\n information_gain_per_action = information_gain_per_action * ratio\n return information_gain_per_action",
"def information_gain(features, attribute_index, targets):\r\n\r\n possible_feature_values = [0,1]\r\n \r\n possible_classifications = [0,1]\r\n \r\n feature = features[:,attribute_index]\r\n \r\n \r\n number_of_samples = len(feature)\r\n \r\n import math\r\n \r\n \r\n #current_entropy = np.sum([-(len(targets[targets==possible_classification])/number_of_samples)*math.log(len(targets[targets==possible_classification])/number_of_samples, 2) for possible_classification in possible_classifications])\r\n \r\n terms_to_be_summed_for_current_entropy = []\r\n \r\n for classification in possible_classifications:\r\n \r\n number_of_elements_with_this_classification = len(targets[targets==classification])\r\n \r\n p_for_this_classification = number_of_elements_with_this_classification/len(targets)\r\n \r\n if p_for_this_classification != 0:\r\n terms_to_be_summed_for_current_entropy.append(-p_for_this_classification*math.log(p_for_this_classification,2))\r\n else:\r\n terms_to_be_summed_for_current_entropy.append(0)\r\n \r\n current_entropy = np.sum(terms_to_be_summed_for_current_entropy)\r\n \r\n \r\n \r\n terms_to_be_summed_for_weighted_entropy = []\r\n \r\n for possible_value in possible_feature_values:\r\n \r\n targets_split_by_feature_value = targets[feature.flatten() == possible_value]\r\n \r\n if len(targets_split_by_feature_value) != 0:\r\n \r\n \r\n weight_of_feature_value = len(targets_split_by_feature_value)/len(targets)\r\n \r\n terms_for_entropy_within_subset = []\r\n \r\n for classification in possible_classifications:\r\n \r\n number_of_subset_elements_with_this_classification = len(targets_split_by_feature_value[targets_split_by_feature_value==classification])\r\n \r\n p_in_subset_for_this_classification = number_of_subset_elements_with_this_classification/len(targets_split_by_feature_value)\r\n \r\n if p_in_subset_for_this_classification != 0:\r\n terms_for_entropy_within_subset.append(-p_in_subset_for_this_classification*math.log(p_in_subset_for_this_classification,2))\r\n else:\r\n terms_for_entropy_within_subset.append(0)\r\n \r\n entropy_within_subset = np.sum(terms_for_entropy_within_subset)\r\n \r\n terms_to_be_summed_for_weighted_entropy.append(weight_of_feature_value*entropy_within_subset)\r\n \r\n weighted_entropy = np.sum(terms_to_be_summed_for_weighted_entropy)\r\n \r\n \r\n #current_entropy = np.sum(terms_to_be_summed_for_current_entropy)\r\n \r\n #weighted_entropy = np.sum([(len(feature[feature==possible_value])/number_of_samples)*(len(targets[feature==possible_value][targets[feature==possible_value]==possible_classification])/len(targets[feature==possible_value]))*math.log((len(targets[feature==possible_value][targets[feature==possible_value]==possible_classification])/len(targets[feature==possible_value])), 2) for possible_classification in possible_classifications for possible_value in possible_feature_values])\r\n\r\n information_gain = current_entropy - weighted_entropy \r\n \r\n return information_gain",
"def _calculate_information_gain(self, obs, label):\n n = len(obs)\n information_gain_per_action = np.zeros((n, self.action_dim))\n\n obs_null = np.copy(obs)\n obs_null[:, -self.action_dim:] = self.classifier.missing_value\n ce_loss_null = self.classifier.calculate_ce_loss(obs=obs_null, label=label)\n\n for i in range(self.action_dim):\n obs_i = np.copy(obs)\n for j in range(self.action_dim):\n if i != j:\n obs_i[:, - j - 1] = self.classifier.missing_value\n ce_loss_i = self.classifier.calculate_ce_loss(obs=obs_i, label=label)\n\n information_gain_per_action[:, - i - 1] = (ce_loss_null - ce_loss_i)[:, 0]\n\n return information_gain_per_action",
"def calculate_distance(a,b,data):\r\n d=0 #distance\r\n for i in range(data.numAttributes):\r\n if a[i]!=data.labelMissingData and b[i]!=data.labelMissingData: \r\n if not data.attributeInfo[i][0]: #Discrete Attribute\r\n if a[i] != b[i]:\r\n d+=1\r\n else: #Continuous Attribute\r\n min_bound=float(data.attributeInfo[i][1][0])\r\n max_bound=float(data.attributeInfo[i][1][1])\r\n d+=abs(float(a[i])-float(b[i]))/float(max_bound-min_bound) #Kira & Rendell, 1992 -handling continiuous attributes\r\n return d",
"def informationGain(data, class_label, attribute, indices=None):\n\tsubset = data[:] if indices == None else data.loc[indices]\n\t\n\tsublist = subset[attribute].tolist()\n\tvalues = list(set(sublist))\n\tinfoGain = entropyOnSubset(subset, class_label)\n\t\n\t#print (sublist)\n\t\n\tfor i in values:\n\t\tindex = list(subset.index[subset[attribute] == i])\n\t\tinfoGain -= sublist.count(i)/len(sublist) * entropyOnSubset(subset, class_label, index)\n\n\t\n\treturn infoGain",
"def info_gain(left, right, current_uncertainty):\n p = float(len(left)) / (len(left) + len(right))\n return current_uncertainty - p * gini(left) - (1 - p) * gini(right)",
"def info_gain(self, left, right, current_uncertainty):\n p = float(len(left)) / (len(left) + len(right))\n return current_uncertainty - p * self.gini(left) - (1 - p) * self.gini(right)",
"def getInfoGain(self, data, index):\n # count for True Positive, True Negitive, False Positive and False Negitive\n TP = 0\n TN = 0\n FP = 0\n FN = 0\n\n for record in data:\n attrValue = record[index]\n clsValue = record[-1]\n\n if attrValue == 'True':\n if clsValue == 'en':\n TP += 1\n else:\n TN += 1\n elif attrValue == 'False':\n if clsValue == 'en':\n FP += 1\n else:\n FN += 1\n\n # calculate class entropy and entropy of each value of an attribute\n E_init = self.getEntropy(self.enClass, self.nlClass)\n E_attr = self.getAttrEntropy(TP, TN, FP, FN)\n\n infoGain = E_init - E_attr\n\n self.entropy = E_init\n return infoGain",
"def attributeSelection(data, attributes, class_label, indices=None):\n\tbest = 0\n\tbestIndex = 0\n\tcounter = 0\n\tfor i in attributes:\n\t\tinfoG = informationGain(data, class_label, i, indices)\n\t\tif infoG > best:\n\t\t\tbest = infoG\n\t\t\tbestIndex = counter\n\t\tcounter += 1 \n\t\n\treturn bestIndex",
"def information_gain(f1, f2):\n\n ig = ee.entropyd(f1) - conditional_entropy(f1, f2)\n return ig",
"def impurity_gain(node, attribute):\n data_subset1 = filter_data(node.data,node.ancestors)\n data_counts = list(Counter(data_subset1['Class']).values())\n base_impurity = impurity(data_counts)\n num_values = len(data_subset1)\n impurity_sum = 0\n \n for value in [0,1]:\n data_subset2 = filter_data(node.data, node.ancestors + [(attribute,value)])\n subset_counts = list(Counter(data_subset2['Class']).values())\n impurity_sum += (len(data_subset2)/num_values) * impurity(subset_counts)\n \n return base_impurity - impurity_sum",
"def synergy(g1, g2, c):\n return mutual_info(joint_dataset(g1, g2), c) -\\\n mutual_info(g1, c) - mutual_info(g2, c)",
"def attribute(self, attribute):\n value = 3\n if self.age == \"child\":\n value -= 1\n if attribute == \"physique\" or attribute == \"phy\":\n if self.age == \"adult\":\n value += 1\n if self.gender == \"male\":\n value += 1\n elif self.gender == \"female\":\n value -= 1\n\n if attribute == \"sensitivity\" or attribute == \"sns\":\n if self.age == \"child\":\n value += 2\n if self.gender == \"male\":\n value -= 1\n elif self.gender == \"female\":\n value += 1\n\n if attribute == \"agility\" or attribute == \"agi\":\n if self.age == \"child\":\n value += 1 # to be equally as high as adult and young\n elif self.age == \"elder\":\n value -= 1\n\n if attribute == \"mind\" or attribute == \"mnd\":\n if self.age == \"elder\":\n value += 1\n\n for feature in self.features:\n if feature.name == \"blood\":\n for key in feature.modifiers:\n if attribute == key:\n value += feature.modifiers[key]\n\n if value < 1:\n value = 1\n return value",
"def return_infogain(instances, labels):\n # some initial calculations\n infogain = dict.fromkeys(range(instances.shape[1]), 0)\n cnt = Counts(instances, labels)\n len_instances = instances.shape[0]\n feature_frequency = cnt.count_document_frequency()\n label_frequency = cnt.count_label_frequency()\n label_feature_frequency = cnt.count_label_feature_frequency()\n label_probability = [(label_frequency[label] / len_instances) for label in label_frequency.keys()]\n initial_entropy = -sum([prob * math.log(prob, 2) for prob in label_probability if prob != 0])\n # assign infogain values to each feature\n for feature in feature_frequency.keys():\n # calculate positive entropy\n frequency = feature_frequency[feature]\n if frequency > 0:\n feature_probability = frequency / len_instances\n positive_label_probabilities = []\n for label in labels:\n if label_feature_frequency[label][feature] > 0:\n positive_label_probabilities.append(label_feature_frequency[label][feature] / frequency)\n else:\n positive_label_probabilities.append(0)\n positive_entropy = -sum([prob * math.log(prob, 2) for prob in positive_label_probabilities if prob != 0])\n else:\n positive_entropy = 0\n # calculate negative entropy\n inverse_frequency = len_instances - feature_frequency[feature]\n negative_probability = inverse_frequency / len_instances\n negative_label_probabilities = [((label_frequency[label] - label_feature_frequency[label][feature]) / inverse_frequency) for label in labels]\n negative_entropy = -sum([prob * math.log(prob, 2) for prob in negative_label_probabilities if prob != 0])\n # based on positive and negative entropy, calculate final entropy\n final_entropy = positive_entropy - negative_entropy\n infogain[feature] = initial_entropy - final_entropy\n return infogain",
"def calc_tohit(attr, level):\n return level + calc_attr_mod(attr)",
"def informationGain2(data, attribute):\n \n split_data = splitBy(data, attribute) \n weighted_entropies = 0\n \n for set in split_data:\n weighted_entropies += len(set) / len(data) * entropy2(set) \n \n columnIG = entropy2(data) - weighted_entropies\n \n return columnIG",
"def _impurity(y, y1, y2, sample_weights=None):\n # YOUR CODE HERE\n # begin answer\n weight_1=len(y1)\n weight_2=len(y2)\n meal_1=np.sum(y1)/float(weight_1)\n meal_2=np.sum(y2)/float(weight_2)\n diff=meal_1-meal_2\n sum_var=weight_1*weight_2*diff*diff/(weight_1+weight_2)\n # end answer\n return sum_var",
"def attenuation(distance, specific_attenuation, maximum_attenuation):\n return maximum_attenuation * (1. - exp(-distance * specific_attenuation / maximum_attenuation))",
"def getCombination(mu1, mu2, sig1, sig2, confidence1, confidence2):\n\tglobal alpha, beta, gamma\n\n\t#Standard Bayesian\n\t# sigNew = math.sqrt(math.pow(sig1, 2) + math.pow(sig2, 2))\n\t# muNew = u1 + u2\n\t# return muNew, sigNew \n\n\t##In accordance with the nature papers:\n\tsigNew = (math.pow(sig1,2) * math.pow(sig2, 2)) \\\n\t/ float((math.pow(sig1,2) + math.pow(sig2, 2)))\n\tinv1 = 1 / float((math.pow(sig1, 2)))\n\tinv2 = 1 / float((math.pow(sig2, 2)))\n\tsumInverses = inv1 + inv2\n\n\t##inverse standard deviations squared\n\t# w1 = inv1 / float(sumInverses)\n\t# w2 = inv2 / float(sumInverses)\n\n\t## equal weighting\n\t# w1 = .5\n\t# w2 = .5\n\n\t## weightings based off of confidence\n\t# summation = confidence1 + confidence2\n\t# w1 = confidence1 / float(summation)\n\t# w2 = confidence2 / float(summation)\n\n\t##weightings with exponentials\n\t# w1 = w1**.001\n\t# w2 = w2**.001\n\t# newSummation = w1 + w2\n\t# w1 = w1 / float(newSummation)\n\t# w2 = w2 / float(newSummation)\n\n\t##weightings with polynomial factors\n\tw1 = (beta * confidence1 + alpha)**gamma \n\tw2 = (beta * confidence2 + alpha)**gamma \n\tnewSummation = w1 + w2\n\tw1 = w1 / float(newSummation)\n\tw2 = w2 / float(newSummation)\n\n\tmuNew = w1 * mu1 + w2 * mu2\n\treturn muNew, sigNew",
"def get_info_gain(true_rows, false_rows, current_impurity):\n avg_impurity = (len(true_rows)/(len(true_rows)+len(false_rows))) * get_gini(true_rows) + \\\n (len(false_rows)/(len(true_rows)+len(false_rows))) * get_gini(false_rows)\n return current_impurity - avg_impurity"
] | [
"0.6119889",
"0.5544115",
"0.5507098",
"0.54734284",
"0.54698795",
"0.53078294",
"0.53001773",
"0.52430487",
"0.52211547",
"0.5215387",
"0.52137595",
"0.5157385",
"0.51392186",
"0.5137646",
"0.50982416",
"0.5096558",
"0.50840324",
"0.50741106",
"0.50459766",
"0.50332594",
"0.50250083",
"0.50064546",
"0.499943",
"0.49883932",
"0.49685577",
"0.49639657",
"0.49429196",
"0.49381125",
"0.4896013",
"0.4885274"
] | 0.6960144 | 0 |
Computes the Interaction objects for n most informative pairs of attributes. For this to work, ``interaction_matrix`` must be called first. It uses a partial sort and then a full sort on the remaining n elements to get the indices of attributes. | def get_top_att(self, n):
if not self.int_M_called:
raise IndexError("Call interaction_matrix first!")
flat_indices = np.argpartition(np.tril(-self.int_matrix, -1).ravel(), n - 1)[:n]
# TODO: Consider using the partial sort from the bottleneck module for faster sorting
row_indices, col_indices = np.unravel_index(flat_indices, self.int_matrix.shape)
min_elements_order = np.argsort(-self.int_matrix[row_indices, col_indices])
row_indices, col_indices = row_indices[min_elements_order], col_indices[min_elements_order]
return [self.attribute_interactions(row_indices[k], col_indices[k],
self.int_matrix[row_indices[k], col_indices[k]]) for k in range(n)] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def interaction_matrix(self):\n\n self.int_M_called = True\n int_M = np.zeros((self.n, self.n))\n for k in range(self.n):\n for j in range(k+1):\n o = self.attribute_interactions(k, j)\n int_M[k, j] = o.rel_total_ig_ab # Store total information gain\n int_M[j, k] = o.rel_total_ig_ab # TODO: Maybe storing interactions too is not a bad idea\n # TODO: We can than easily sort either by total gain or by positive interaction\n for k in range(self.n):\n int_M[k, k] = self.info_gains[self.data.domain.attributes[k].name]\n self.int_matrix = Orange.misc.distmatrix.DistMatrix(int_M)",
"def get_object_intent_by_index(self, i):\n obj_row = self.np_table[i, :]\n att_inds = obj_row.nonzero()[0]\n atts = [self.attributes[j] for j in att_inds]\n return set(atts)",
"def top_n_combined(self, n):\n top = set()\n for feat_set in self.itervalues():\n tuples = sorted(feat_set.items(), reverse=True, key=itemgetter(1))\n best = {feat for feat, _ in tuples[:n]}\n top |= best\n return top",
"def top_n(self, n):\n top = {}\n for code, feat_set in self.iteritems():\n tuples = sorted(feat_set.items(), reverse=True, key=itemgetter(1))\n best = {feat for feat, _ in tuples[:n]}\n top[code] = best\n return top",
"def top_items(self, n=10, filter=None):\n if n > len(self): n = len(self)\n order = np.argsort(self)\n if filter is None:\n indices = order[-1:-n-1:-1]\n return [(self.label(idx), self[idx]) for idx in indices]\n idx = -1\n results = []\n while len(results) != n and idx >= -len(order):\n where = order[idx]\n label = self.label(where)\n if filter(label):\n results.append((label, self[where]))\n idx -= 1\n return results",
"def omission_index(n, sample_size):\n \n \"randomly pick some subset of sample_size agents\"\n index = np.sort(np.random.choice(n,sample_size,replace=False))\n \"double up index to choose x and y positions columns. both are used.\"\n index2 = np.repeat(2*index,2) \n \"nudge every second item to take the ith+1 column (y coordinate corresponding to chosen x)\"\n index2[1::2] += 1\n return index, index2",
"def calculate_top_interventions(X: pd.DataFrame, y_proba: np.array, n: int):\n top_n_rec = (\n pd.DataFrame(y_proba, columns=[\"false\", \"true\"])\n .sort_values(\"true\", ascending=False)\n .head(n)\n )\n\n intervention_cols = [col for col in X.columns if \"i_\" in col[:2]]\n df_result = X.loc[top_n_rec.index, intervention_cols]\n\n rec_interv = list()\n for i in range(len(df_result)):\n row = df_result.iloc[i]\n interventions = row[row == 1]\n interventions = (\n interventions.index.str.replace(\"i_\", \"\").str.replace(\"_\", \" \").tolist()\n )\n rec_interv.append(interventions + [top_n_rec.iloc[i][\"true\"]])\n\n df_rec_interv = pd.DataFrame(rec_interv)\n return df_rec_interv",
"def topMatches(prefs, person, n=5, similarity=sim_pearson):\n all_matches = [(similarity(prefs, person, other), other) \n for other in prefs.keys()\n if person != other]\n all_matches.sort()\n all_matches.reverse()\n return all_matches[0:n]",
"def get_similar_products(user_input_emb, ref_catalog, n = 5):\r\n sim_list = []\r\n for i in range(len(ref_catalog)):\r\n desc_id = ref_catalog.iloc[i]['id']\r\n emb = ref_catalog.iloc[i]['desc_embedding']\r\n cos_sim = compute_cosine_sim(emb,user_input_emb)\r\n sim_list.append((desc_id, cos_sim))\r\n top_n = sorted(sim_list, key= lambda tup: tup[1], reverse = True)[:n]\r\n return top_n",
"def interaction_context(interaction, i):\n if i == 0: return None\n\n tgt_eid = interaction[i].behavior_target_id()\n if tgt_eid is None: return None\n\n for j in range(i-1,-1,-1):\n print(\"interaction: {} {}\".format(interaction[j].agent_eid, tgt_eid))\n if interaction[j].agent_eid == tgt_eid:\n return interaction[j]\n\n return None",
"def _interaction(self, entity):\n\n # Get parameters\n att_range = np.array([agent.a_range for agent in entity], dtype=float)[:,None]\n att_strength = np.array([agent.get_advantage for agent in entity])[:,None]\n team_index = np.array([agent.team for agent in entity])\n alliance_matrix = team_index[:,None]==team_index[None,:]\n att_strength[team_index==TEAM1_BACKGROUND,] += self.BLUE_ADV_BIAS\n att_strength[team_index==TEAM2_BACKGROUND,] += self.RED_ADV_BIAS\n\n # Get distance between all agents\n x, y = np.array([agent.get_loc() for agent in entity]).T\n dx = np.subtract(*np.meshgrid(x,x))\n dy = np.subtract(*np.meshgrid(y,y))\n distance = np.hypot(dx, dy)\n\n # Get influence matrix\n infl_matrix = np.less(distance, att_range)\n infl_matrix = infl_matrix * att_strength\n friend_count = (infl_matrix*alliance_matrix).sum(axis=0)-1 # -1 to not count self\n enemy_count = (infl_matrix*~alliance_matrix).sum(axis=0)\n mask = enemy_count == 0\n\n # Add background advantage bias\n loc_background = [self._static_map[agent.get_loc()] for agent in entity]\n friend_count[loc_background==team_index] += self.STOCH_ATTACK_BIAS\n enemy_count[~(loc_background==team_index)] += self.STOCH_ATTACK_BIAS\n\n # Interaction\n if self.STOCH_ATTACK:\n result = self.np_random.rand(*friend_count.shape) < friend_count / (friend_count + enemy_count)\n else:\n result = friend_count > enemy_count\n result[mask] = True\n\n return result",
"def _combinations(n_features, n_args, interaction_only):\n comb = combinations if interaction_only else combinations_w_r\n return comb(range(n_features), n_args)",
"def _compute_ind_mat(n, m, nb_coeff):\r\n\r\n ind_mat = np.zeros((nb_coeff, n))\r\n curr_idx = 0\r\n for indexes in itr.combinations_with_replacement(range(m), n):\r\n ind_mat[curr_idx] = np.array(indexes)\r\n curr_idx += 1\r\n\r\n return ind_mat",
"def test_sort_more_than_n(self):\n e1 = Experience(rid=1, uid=3, experience=100)\n e2 = Experience(rid=1, uid=1, experience=89)\n e3 = Experience(rid=1, uid=12, experience=1343)\n e4 = Experience(rid=1, uid=22, experience=1839)\n e5 = Experience(rid=1, uid=2, experience=20)\n db.session.add(e1)\n db.session.add(e2)\n db.session.add(e3)\n db.session.add(e4)\n db.session.add(e5)\n db.session.commit()\n list = top_n_in_order(1, 3)\n self.assertEqual([(22, 1839), (12, 1343), (3, 100)], list)",
"def top_matches(prefs, person, n=5, similarity=sim_pearson):\n scores = [(similarity(prefs, person, other), other)\n for other in prefs if other != person]\n\n scores.sort()\n scores.reverse()\n return scores[0:n]",
"def top_n_similar(base_h_id, comp_hotels, n_hotels=None, axes_omissions=[]):\n axes = get_axes(axes_omissions)\n similar_hotels = []\n base_hotel_chromosomes = get_hotel_chromosomes([base_h_id])[base_h_id]\n comp_hotel_chromosomes = get_hotel_chromosomes(comp_hotels)\n for c in comp_hotels:\n aggregate_similarity, similarity = get_similarity(\n base_hotel_chromosomes, comp_hotel_chromosomes[c], axes)\n similar_hotels.append((c, aggregate_similarity, similarity))\n similar_hotels.sort(key=itemgetter(1), reverse=True)\n if n_hotels:\n return similar_hotels[:n_hotels]\n else:\n return similar_hotels",
"def interaction_responses(interaction, i):\n\n agent_eid = interaction[i].agent_eid\n\n # no agent, no response (REM: that's not right, but is given how we're creating interactions right now)\n if agent_eid is None: return []\n\n end = interaction[i].end_clock\n\n for j in range(i+1, len(interaction)):\n next = interaction[j]\n if next.start_clock <= end and next.behavior_target_id() == agent_eid:\n yield (interaction[j])",
"def process_actions(self, n_steps, actions):\n # Each row of actions is one time step,\n # row contains action indices for all agents\n # Convert to [time, agents, l_action]\n # so each agent gets its own 1-hot row vector\n actions_1hot = np.zeros([n_steps, self.n_agents, self.l_action], dtype=int)\n grid = np.indices((n_steps, self.n_agents))\n actions_1hot[grid[0], grid[1], actions] = 1\n # Convert to format [time*agents, agents-1, l_action]\n # so that the set of <n_agent> actions at each time step\n # is duplicated <n_agent> times, and each duplicate\n # now contains all <n_agent>-1 actions representing\n # the OTHER agents actions\n list_to_interleave = []\n for n in range(self.n_agents):\n # extract all actions except agent n's action\n list_to_interleave.append( actions_1hot[:, np.arange(self.n_agents)!=n, :] )\n # interleave\n actions_others_1hot = np.zeros([self.n_agents*n_steps, self.n_agents-1, self.l_action])\n for n in range(self.n_agents):\n actions_others_1hot[n::self.n_agents, :, :] = list_to_interleave[n]\n # In-place reshape of actions to [time*n_agents, l_action]\n actions_1hot.shape = (n_steps*self.n_agents, self.l_action)\n\n return actions_1hot, actions_others_1hot",
"def most_informative_features(self, n=100):\n\t# The set of (fname, fval) pairs used by this classifier.\n\tfeatures = set()\n\t# The max & min probability associated w/ each (fname, fval)\n\t# pair. Maps (fname,fval) -> float.\n\tmaxprob = defaultdict(lambda: 0.0)\n\tminprob = defaultdict(lambda: 1.0)\n\n\tfor (label, fname), probdist in self._feature_probdist.items():\n\t\tfor fval in probdist.samples():\n\t\t\tfeature = (fname, fval)\n\t\t\tfeatures.add( feature )\n\t\t\tp = probdist.prob(fval)\n\t\t\tprint p\n\t\t\tmaxprob[feature] = max(p, maxprob[feature])\n\t\t\tminprob[feature] = min(p, minprob[feature])\n\t\t\tif minprob[feature] == 0:\n\t\t\t\tfeatures.discard(feature)\n\t\t\t# print maxprob\n\t\t\t# print minprob\n\n\n\t# Convert features to a list, & sort it by how informative\n\t# features are.\n\tfeatures = sorted(features,\n\t key=lambda feature_: minprob[feature_]/maxprob[feature_])\n\treturn features[:n]",
"def _generate_interaction_histogram(interactions, num_users, num_items):\n histogram = np.zeros(num_items)\n np.add.at(histogram, interactions, 1)\n # Check that there's one interaction per user\n if histogram.sum() != num_users:\n raise ValueError(\"The sum of interactions must be equal to the number of users\")\n return histogram",
"def _compute_attr_histograms(\n self, \n importance_list: List[np.ndarray], \n concept_list: List[np.ndarray],\n label_list: List[int],\n n_attrs: int\n ) -> Dict:\n all_histograms = {}\n for k in self.keep_nuclei_list:\n all_histograms[k] = {}\n\n attrs = [c[np.argsort(s)[-k:]] for c, s in zip(concept_list, importance_list)]\n attrs = np.concatenate(attrs, axis=0) # (#samples x k) x #attrs \n attrs[attrs == inf] = 0 # ensure no weird values in attributes \n attrs = minmax_scale(attrs) \n attrs = np.reshape(attrs, (-1, k, n_attrs)) # #samples x k x #attrs \n attrs = list(attrs)\n\n for t in range(self.n_tumors):\n\n # i. extract the samples of type t\n selected_attrs = [a for l, a in zip(label_list, attrs) if l==t]\n selected_attrs = np.concatenate(selected_attrs, axis=0)\n\n # iii. build the histogram for all the attrs (dim = #nuclei x attr_types)\n all_histograms[k][t] = np.array(\n [self.build_hist(selected_attrs[:, attr_id]) for attr_id in range(selected_attrs.shape[1])]\n )\n return all_histograms",
"def analytical_energies(n):\n\n energies = []\n for nx in range(n):\n for ny in range(n):\n energies.append(energy(nx,ny))\n energies = np.sort(energies)\n return energies",
"def specified_unchanging_attributes(self) -> List[int]:\n indices = []\n\n for idx, (cpi, epi) in enumerate(zip(self.condition, self.effect)):\n if isinstance(epi, ProbabilityEnhancedAttribute):\n if cpi != self.cfg.classifier_wildcard and \\\n epi.does_contain(cpi):\n indices.append(idx)\n else:\n if cpi != self.cfg.classifier_wildcard and \\\n epi == self.cfg.classifier_wildcard:\n indices.append(idx)\n\n return indices",
"def _nn(self, d, n=1):\n self._log.debug(\"generating hash for descriptor\")\n d_v = d.vector()\n d_h = self.lsh_functor.get_hash(d_v)\n\n def comp_descr_dist(d2_v):\n return self._distance_function(d_v, d2_v)\n\n with self._model_lock:\n self._log.debug(\"getting near hashes\")\n hi = self.hash_index\n if hi is None:\n # Make on-the-fly linear index\n hi = LinearHashIndex()\n # not calling ``build_index`` because we already have the int\n # hashes.\n hi.index = numpy.array(list(self.hash2uuids_kvstore.keys()))\n near_hashes, _ = hi.nn(d_h, n)\n\n self._log.debug(\"getting UUIDs of descriptors for nearby hashes\")\n neighbor_uuids = []\n for h_int in map(bit_vector_to_int_large, near_hashes):\n # If descriptor hash not in our map, we effectively skip it.\n # Get set of descriptor UUIDs for a hash code.\n #: :type: set[collections.Hashable]\n near_uuids = self.hash2uuids_kvstore.get(h_int, set())\n # Accumulate matching descriptor UUIDs to a list.\n neighbor_uuids.extend(near_uuids)\n self._log.debug(\"-- matched %d UUIDs\", len(neighbor_uuids))\n\n self._log.debug(\"getting descriptors for neighbor_uuids\")\n neighbors = \\\n list(self.descriptor_index.get_many_descriptors(neighbor_uuids))\n\n # Done with model parts at this point, so releasing lock.\n\n self._log.debug(\"ordering descriptors via distance method '%s'\",\n self.distance_method)\n self._log.debug('-- getting element vectors')\n neighbor_vectors = elements_to_matrix(neighbors,\n report_interval=1.0)\n self._log.debug('-- calculating distances')\n distances = list(map(comp_descr_dist, neighbor_vectors))\n self._log.debug('-- ordering')\n ordered = sorted(zip(neighbors, distances),\n key=lambda p: p[1])\n self._log.debug('-- slicing top n=%d', n)\n return list(zip(*(ordered[:n])))",
"def _compute_hist_distances(\n self,\n all_histograms: Dict,\n n_attr: int\n ) -> np.ndarray:\n all_distances = np.empty((self.n_keep_nuclei, self.n_class_pairs, n_attr))\n for k_id , k in enumerate(self.keep_nuclei_list):\n omega = 0\n for tx in range(self.n_tumors):\n for ty in range(self.n_tumors):\n if tx < ty:\n for attr_id in range(n_attr):\n all_distances[k_id, omega, attr_id] = wasserstein_distance(\n all_histograms[k][tx][attr_id],\n all_histograms[k][ty][attr_id]\n )\n omega += 1\n return all_distances",
"def test_sort_fewer_than_n(self):\n e1 = Experience(rid=1, uid=3, experience=100)\n e2 = Experience(rid=1, uid=1, experience=89)\n e3 = Experience(rid=1, uid=12, experience=1343)\n db.session.add(e1)\n db.session.add(e2)\n db.session.add(e3)\n db.session.commit()\n list = top_n_in_order(1,5)\n self.assertEqual([(12, 1343), (3, 100), (1, 89)], list)",
"def matIxs( n ):\n rows, cols = np.indices( (n,n) )\n row = rows.flatten()\n col = cols.flatten()\n \n return map( lambda x: Vector( x[0], x[1] ), zip( col, row ) )",
"def load_interaction(self):\n processed_file_path = os.path.join(\n self.processed_path, f\"{self.dataset_name}_interaction.npz\"\n )\n if not os.path.exists(os.path.join(processed_file_path)):\n try:\n self.preprocess()\n except FileNotFoundError:\n print(\"origin file is broken, re-download it\")\n raw_file_path = os.path.join(self.raw_path, f\"{self.dataset_name}.zip\")\n os.remove(raw_file_path)\n self.download()\n finally:\n self.preprocess()\n data = get_dataframe_from_npz(processed_file_path)\n print(\"-\" * 80)\n print(\"Raw interaction statistics\")\n print(\n tabulate(\n data.agg([\"count\", \"nunique\"]),\n headers=data.columns,\n tablefmt=\"psql\",\n disable_numparse=True,\n )\n )\n print(\"-\" * 80)\n if self.min_o_c > 0:\n data = filter_user_item_order(\n data, min_u_c=self.min_u_c, min_i_c=self.min_i_c, min_o_c=self.min_o_c\n )\n elif self.min_u_c > 0 or self.min_i_c > 0:\n data = filter_user_item(data, min_u_c=self.min_u_c, min_i_c=self.min_i_c)\n\n print(\"-\" * 80)\n print(\n \"Interaction statistics after filtering \"\n + f\"-- min_u_c:{self.min_u_c}, min_i_c:{self.min_i_c}, min_o_c:{self.min_o_c}.\"\n )\n print(\n tabulate(\n data.agg([\"count\", \"nunique\"]),\n headers=data.columns,\n tablefmt=\"psql\",\n disable_numparse=True,\n )\n )\n print(\"-\" * 80)\n return data",
"def cy_process_recommendations(entities, scores, n=10):\n r = c_funcs.cy_aggregate_scores(entities, scores, n)\n heapq.heapify(r)\n return {'result': [{\"item\": k, \"score\": v} for k, v in heapq.nlargest(\n n, r, key= lambda x: x[1])]}",
"def oprime_inds(self, obj_inds):\n if type(obj_inds) == set:\n obj_inds = list(obj_inds)\n try:\n common_intent = self.np_table[obj_inds[0], :].copy()\n except IndexError:\n return set(range(len(self.attributes)))\n else:\n for obj_ind in obj_inds[1:]:\n common_intent &= self.np_table[obj_ind, :]\n return common_intent.nonzero()[0]"
] | [
"0.60471845",
"0.54465884",
"0.52681583",
"0.5024728",
"0.4996432",
"0.49814323",
"0.49585322",
"0.49543592",
"0.49077043",
"0.4857076",
"0.48561823",
"0.48455074",
"0.4830593",
"0.48272932",
"0.48052135",
"0.47973144",
"0.47867075",
"0.4769542",
"0.47667968",
"0.47630936",
"0.4744796",
"0.4721506",
"0.47088555",
"0.4677185",
"0.46437612",
"0.4614968",
"0.46045065",
"0.45853588",
"0.45741454",
"0.45653203"
] | 0.72928554 | 0 |
Returns the list of names of args/kwargs without defaults from `fun` signature. | def get_required_kwargs(fun, skip_positional=0):
sig = inspect.signature(fun)
# the params from signature with up to skip_positional filtered out
# (less only if there is not enough of positional args)
params = [(name, param) for i, (name, param) in enumerate(sig.parameters.items())
if i >= skip_positional or param.kind not in
[inspect.Parameter.POSITIONAL_OR_KEYWORD, inspect.Parameter.POSITIONAL_ONLY]]
return [
name for name, param in params if param.default is inspect.Parameter.empty
and param.kind in [inspect.Parameter.POSITIONAL_OR_KEYWORD, inspect.Parameter.KEYWORD_ONLY]
] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def getArgs(func):\n # exclude the defaults at the end (hence the [:-1])\n args = list(utils.flatten(inspect.getargspec(func)[:-1]))\n return set(args).difference(set([None]))",
"def list_kwargs(func):\n \n details = inspect.getargspec(func)\n nopt = len(details.defaults)\n \n return details.args[-nopt:]",
"def get_kwd_args(func):\n try:\n sig = inspect.signature(func)\n except AttributeError:\n args, _, _, defaults = inspect.getargspec(func)\n if defaults:\n kwonlyargs = args[-len(defaults):]\n else:\n kwonlyargs = []\n else:\n kwonlyargs = {p.name:p.default for p in sig.parameters.values()\n if p.default is not p.empty}\n\n return kwonlyargs",
"def derive_args(func):\n args = inspect.getfullargspec(func).args\n if args and is_selfish_name(args[0]):\n del args[0]\n return args",
"def get_default_args(func):\n signature = inspect.signature(func)\n return {\n k: v.default\n for k, v in signature.parameters.items()\n if v.default is not inspect.Parameter.empty\n }",
"def get_keyword_args(function):\n argspec = inspect.getargspec(function)\n kwargs = argspec.args[len(argspec.args) - len(argspec.defaults):]\n kwargs = {arg: value for arg, value in zip(kwargs, argspec.defaults)}\n return kwargs",
"def extract_keywords(func):\n if hasattr(func, 'im_func'):\n func = func.im_func\n\n try:\n return func.func_code.co_varnames[-len(func.func_defaults):]\n except (TypeError, ValueError, IndexError):\n return tuple()",
"def missingArgs(func, argdict):\n return set(getRequiredArgs(func)).difference(argdict)",
"def get_arguments(callable, exclude):\n info = arginfo(callable)\n defaults = info.defaults or []\n defaults = [None] * (len(info.args) - len(defaults)) + list(defaults)\n return {name: default for (name, default) in zip(info.args, defaults)\n if name not in exclude}",
"def _getargs(fn_sig):\n params = fn_sig.parameters\n args = []\n for k, v in params.items():\n if (v.kind & v.POSITIONAL_OR_KEYWORD) == v.POSITIONAL_OR_KEYWORD:\n args.append(k)\n else:\n msg = \"%s argument type unsupported in jitclass\" % v.kind\n raise errors.UnsupportedError(msg)\n return args",
"def invalid_args(func, argdict):\r\n args, _, keywords, _ = inspect.getargspec(func)\r\n if keywords:\r\n return set() # All accepted\r\n return set(argdict) - set(args)",
"def invalidArgs(func, argdict):\n args, varargs, varkw, defaults = inspect.getargspec(func)\n if varkw:\n return set() # All accepted\n return set(argdict) - set(args)",
"def argnames(method):\n return [arg for arg in method.__code__.co_varnames if arg != \"self\"]",
"def func_var_names(func):\n names = func.__code__.co_varnames[:func.__code__.co_argcount]\n return names",
"def unused_kwargs(kw):\n fn_kw = dict(base_class=None,\n base_name=None, name=None, base_arg=None, base_kw=None, parent=None,\n infer_kw=None, in_shape='BCD', base_shape=None, out_shape='BCD', tuple_out=False,\n forward_arg=None, forward_kw=None, initialization=None, activation=None, )\n return {k:v for k, v in kw.items() if k not in fn_kw}",
"def _get_args(function, varargs=False):\n\n try:\n params = signature(function).parameters\n except ValueError:\n # Error on builtin C function\n return []\n args = [\n key\n for key, param in params.items()\n if param.kind not in (param.VAR_POSITIONAL, param.VAR_KEYWORD)\n ]\n if varargs:\n varargs = [\n param.name\n for param in params.values()\n if param.kind == param.VAR_POSITIONAL\n ]\n if len(varargs) == 0:\n varargs = None\n return args, varargs\n else:\n return args",
"def get_python_function_arguments(f):\n # Note that we only return non-optional arguments (we assume that any optional args are not specified).\n # This allows to, e.g., accept max(a, b, *more, name='') as a binary function\n param_specs = inspect.getfullargspec(f)\n annotations = param_specs.annotations\n arg_names = param_specs.args\n defaults = param_specs.defaults # \"if this tuple has n elements, they correspond to the last n elements listed\n # in args\"\n if defaults:\n arg_names = arg_names[:-len(defaults)]\n return (arg_names, annotations)",
"def filter_kwargs(function, **kwargs):\n\n kwargs = deepcopy(kwargs)\n if sys.version_info[0] >= 3:\n args = function.__code__.co_varnames\n else:\n args = function.func_code.co_varnames\n\n args = set(kwargs.keys()) - set(args)\n for key in args:\n kwargs.pop(key)\n\n return kwargs",
"def get_num_positional_args(fun):\n sig = inspect.signature(fun)\n return len([\n name for name, param in sig.parameters.items() if param.kind in\n [inspect.Parameter.POSITIONAL_OR_KEYWORD, inspect.Parameter.POSITIONAL_ONLY]\n ])",
"def remove_unused_args(args, thnn_args):\n def clean_name(name):\n name = name[:name.index('[')] if '[' in name else name\n if name.endswith('_'):\n name = name[:-1]\n return name\n uses = set([clean_name(arg['name']) for arg in thnn_args])\n uses.add('output_mask')\n args = [arg for arg in args if arg['name'] in uses]\n for arg in args:\n if 'default' in arg:\n del arg['default']\n return args",
"def get_misused_opt_arg_dec():\n return list(incompletely_used_decorators.values())",
"def filter_args(func, keys):\n filtered = {}\n sign = list(signature(func).parameters.keys())\n for k, v in {**keys}.items():\n if k in sign:\n filtered[k] = v\n return filtered",
"def filter_extra_accepted_kwargs(fun, kwargs, skip_positional=0):\n sig = inspect.signature(fun)\n # the params from signature with up to skip_positional filtered out\n # (less only if there is not enough of positional args)\n params = [(name, param) for i, (name, param) in enumerate(sig.parameters.items())\n if i >= skip_positional or param.kind not in\n [inspect.Parameter.POSITIONAL_OR_KEYWORD, inspect.Parameter.POSITIONAL_ONLY]]\n extra = [\n name for (name, param) in params\n if param.kind in [inspect.Parameter.POSITIONAL_OR_KEYWORD, inspect.Parameter.KEYWORD_ONLY]\n ]\n return {name: value for name, value in kwargs.items() if name in extra}",
"def get_arg_name(args):\n names = []\n for arg in args:\n if type(arg).__name__ == 'ID':\n names.append(arg.name)\n elif type(arg).__name__ == 'UnaryOp':\n names.append(arg.expr.name)\n elif type(arg).__name__ == 'StructRef':\n #############################################\n # So far, we don't care about this situation:\n # fun(a->b)\n # POSSIBLE CODE HERE\n #############################################\n names.append(None)\n return names",
"def _get_param_names(self):\r\n return sorted([p\r\n for p in self.__dict__\r\n if p != 'additional_args'])",
"def get_input_arguments(kwargs, function, warn=True):\n np.set_printoptions(threshold=20)\n print('\\narguments to {}:'.format(function.__qualname__))\n params = inspect.signature(function)\n input_kwargs = {}\n not_arguments = {}\n for k, v in kwargs.items():\n if k in params.parameters:\n input_kwargs[k] = v\n print_item(k, v)\n else:\n not_arguments[k] = v\n if warn:\n print('\\nother arguments:')\n for k, v in not_arguments.items():\n #print('{}: {}'.format(k, v))\n print_item(k, v)\n print('\\n')\n return input_kwargs",
"def get_input_arguments(kwargs, function, warn=True):\n np.set_printoptions(threshold=20)\n print('\\narguments to {}:'.format(function.__qualname__))\n params = inspect.signature(function)\n input_kwargs = {}\n not_arguments = {}\n for k, v in kwargs.items():\n if k in params.parameters:\n input_kwargs[k] = v\n print_item(k, v)\n else:\n not_arguments[k] = v\n if warn:\n print('\\nother arguments:')\n for k, v in not_arguments.items():\n #print('{}: {}'.format(k, v))\n print_item(k, v)\n print('\\n')\n return input_kwargs",
"def _get_function_defaults(func: FunctionType) -> dict[str, Any]:\n # extracted bit from inspect.signature... ~20x faster\n pos_count = func.__code__.co_argcount\n arg_names = func.__code__.co_varnames\n\n defaults = func.__defaults__ or ()\n\n non_default_count = pos_count - len(defaults)\n positional_args = arg_names[:pos_count]\n\n output = {\n name: defaults[offset]\n for offset, name in enumerate(positional_args[non_default_count:])\n }\n if func.__kwdefaults__:\n output.update(func.__kwdefaults__)\n return output",
"def getargspec(self,obj):\n\n if inspect.isfunction(obj):\n func_obj = obj\n elif inspect.ismethod(obj):\n func_obj = obj.im_func\n else:\n raise TypeError, 'arg is not a Python function'\n args, varargs, varkw = inspect.getargs(func_obj.func_code)\n return args, varargs, varkw, func_obj.func_defaults",
"def signature(function):\n\tdesc = inspect.getargspec(function)\n\tif desc[3]:\n\t\tldefault = len(desc[3])\n\t\tdefault = desc[3]\n\t\tsign = ','.join(desc[0][:-ldefault])\n\telse:\n\t\tldefault = 0\n\t\tdefault=[]\n\t\tsign = ','.join(desc[0])\t\n\tfor n,v in zip(desc[0][-ldefault:],default):\n\t\tsign += ','+n+\"=\"+str(v)\t\n\tif desc[1]:\n\t\tsign +=',*'+desc[1]\n\tif desc[2]:\n\t\tsign +=',**'+desc[2]\t\n\tif sign and sign[0]==',': sign = sign[1:]\n\treturn sign"
] | [
"0.7373131",
"0.70837766",
"0.70198065",
"0.6812793",
"0.6719347",
"0.66698635",
"0.66303456",
"0.65023136",
"0.6472078",
"0.6378711",
"0.63185936",
"0.63058794",
"0.62759775",
"0.6262447",
"0.6222361",
"0.62066025",
"0.6155284",
"0.59993124",
"0.59842724",
"0.5961036",
"0.5934872",
"0.5926313",
"0.5886774",
"0.58828527",
"0.5840137",
"0.5815416",
"0.5815416",
"0.5793373",
"0.57825714",
"0.5780995"
] | 0.7168854 | 1 |
When a team is created, its survey is automatically created. | def test_create_team_creates_survey(self):
user = User.create(name='User Foo', email='[email protected]')
user.put()
code = 'trout viper'
team_response = self.testapp.post_json(
'/api/teams',
{
'name': 'Team Foo',
'code': code,
'program_id': self.ep_program.uid,
},
headers=self.login_headers(user),
)
team_dict = json.loads(team_response.body)
survey_result = Survey.get(team_id=team_dict['uid'])
self.assertEqual(len(survey_result), 1)
survey = survey_result[0]
return user, team_dict | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_teams_create(self):\n pass",
"def test_create_team(self):\n pass",
"def test_generate_survey(self):\n\n result = generate_survey.apply((self.user.id,\n self.report.get_daily().id)).get()\n self.assertTrue(result, \"should create a survey given a valid daily report and user on the team\")\n\n result = generate_survey.apply((self.admin.id,\n self.report.get_daily().id)).get()\n self.assertFalse(result, \"User is not on this team therefore survey shouldn't be created\")",
"def create_challenge_team(request, challenge_pk):\n\tif request.method == \"POST\":\n\t\tteam_name = request.POST[\"team-name\"]\n\t\t\n\t\tnew_team = ChallengeTeam()\n\t\tnew_team.team_name = team_name\n\t\t\n\t\tselected_challenge = Challenge.objects.get(pk = challenge_pk)\n\t\tnew_team.challenge = selected_challenge\n\t\t\n\t\tnew_team.save()\n\t\t\n\t\treturn redirect(\"/challenge/view/\" + str(challenge_pk))\n\t\t\n\telse:\n\t\tselected_challenge = Challenge.objects.get(pk = challenge_pk)\n\t\t\n\t\tcontext = RequestContext(request, {\"challenge_name\" : selected_challenge.name})\n\t\treturn render_to_response(\"encourage/create_team.html\", context)",
"def perform_create(self, serializer):\n team = get_object_or_404(models.Team, pk=self.kwargs.get('pk'))\n\n return serializer.save(team=team)",
"def create(self, body):\n\t\tif self.has_permission('RightTPI') is False:\n\t\t\tself.no_access()\n\n\t\tid_survey = uuid.uuid4()\n\t\tid_language_content = MultiLang.set(body['name'], True)\n\n\t\twith Database() as db:\n\t\t\tdb.insert(Table(id_survey, id_language_content, body['survey_type']))\n\t\t\tdb.commit()\n\n\t\treturn {\n\t\t\t'id_survey': id_survey,\n\t\t\t'message': 'survey successfully created'\n\t\t}",
"def create_empty_survey(self, tournament_date: str) -> json:\n payload = {\n \"title\": \"Side ASS Community Poll - \" + tournament_date\n }\n return self.make_request(RequestTypes.POST, payload)",
"def test_teams_save_team_v1(self):\n pass",
"def test_get_for_team(self):\n user, team_dict = self.test_create_team_creates_survey()\n response = self.testapp.get(\n '/api/teams/{}/survey'.format(team_dict['uid']),\n headers=self.login_headers(user),\n )\n survey_dict = json.loads(response.body)\n self.assertTrue(survey_dict['uid'].startswith('Survey'))",
"def test_create_new_team(self):\n default_user = AnotherUserFactory(email_confirmed=True)\n token = Token.objects.get(user=default_user)\n self.client.credentials(\n HTTP_AUTHORIZATION=f'Token {token.key}')\n\n data = {\n 'name': 'Griffons',\n 'description': 'Only strong souls can be joined us.'\n }\n response = self.client.post(reverse('api:teams-list'), data)\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n self.assertTrue(Team.objects.filter(name=data['name']).exists())",
"def _create_test_survey(self):\n return SurveyForm.create(self.test_survey_name, self.test_form)",
"def create_team(request):\n if request.method == 'POST':\n email = request.session.get('email', None)\n team_name = request.POST.get('team_name', None)\n team = Team(name=team_name)\n team.save()\n\n message = \"Team created, please use the cool search feature and assign yourself to the team\"\n messages.add_message(request, messages.INFO, message)\n return redirect('teamsapp:teams')\n else:\n raise Http404('Not allowed')",
"def create_team_action(request):\n # Create the team.\n now = datetime.utcnow()\n user_id = request.context.user_id\n user = load_user(request.db, user_id)\n # Select a round based on the user's badges.\n round_ids = find_round_ids_with_badges(request.db, user['badges'], now)\n if len(round_ids) == 0:\n # The user does not have access to any open round.\n raise ApiError('not qualified for any open round')\n if len(round_ids) > 1:\n # XXX The case where a user has badges for multiple open rounds\n # is currently handled by picking the first one, which is the\n # one that has the greatest id. This is unsatisfactory.\n pass\n round_id = round_ids[0]\n round_ = load_round(request.db, round_id, now)\n if not round_['is_registration_open']:\n raise ApiError('registration is closed')\n # Create the team.\n team_id = create_user_team(request.db, user_id, now)\n # Create a participation.\n create_participation(request.db, team_id, round_id, now=now)\n # Ensure the user gets team credentials.\n reset_user_principals(request)\n return {'success': True}",
"def test_createteam(self):\n p1, p2, p3 = self.create3persons()\n t = model.Team(name='Tigers', persons=[p1, p2, p3])\n id = t.store()\n t2 = model.Team(id=id)\n self.assertEqual(t.name, t2.name)\n self.assertEqual(t.persons, t2.persons)",
"async def create_team(new_team: BaseTeam, db_handler: DBHandler = Depends(database_dependency)):\n try:\n inserted_record = await db_handler.insert_team(new_team=new_team)\n inserted_record = init_BaseTeam(inserted_record)\n except DBHandlerException as e:\n return JSONResponse(status_code=400)\n\n return inserted_record",
"def initialize_survey(self, **kwargs):",
"def test_create_new_form(self):\n\n survey = self._create_test_survey()\n assert survey is not None\n\n new_survey = SurveyForm.get(self.test_survey_name)\n assert new_survey is not None\n assert new_survey.form == self.test_form",
"def team_post_save_callback(sender, instance, **kwargs): # pylint: disable=unused-argument\n changed_fields = instance.field_tracker.changed()\n # Don't emit events when we are first creating the team.\n if not kwargs['created']:\n for field in changed_fields:\n if field not in instance.FIELD_BLACKLIST:\n truncated_fields = truncate_fields(\n str(changed_fields[field]),\n str(getattr(instance, field))\n )\n truncated_fields['team_id'] = instance.team_id\n truncated_fields['team_id'] = instance.team_id\n truncated_fields['field'] = field\n\n emit_team_event(\n 'edx.team.changed',\n instance.course_id,\n truncated_fields\n )",
"def save_team(name, tla, shortName, areaName, email):\n try:\n Teams(name=name, tla=tla, shortName=shortName, areaName=areaName, email=email).save()\n print(\"Success\")\n except:\n print(\"Failure\")",
"def creat_team(self):\n te = Teams()\n per = Persons()\n teamlist = []\n for one in per.find({'role':'leader'},{'team_name'}):\n if one['team_name'] not in teamlist:\n teamlist.append(one['team_name'])\n # print len(teamlist)\n for team in teamlist:\n tmp = {'name': '', 'leader_email': '', 'person_emails': []}\n tmp['name'] = team\n tmp['leader_email'] = per.get_one({'team_name':team,'role':'leader'})['email']\n for one in per.find({'team_name':team},{'email'}):\n tmp['person_emails'].append(one['email'])\n print tmp\n search_t = te.get_one({'name':team})\n if search_t is None:\n te.insert_one(tmp)\n else:\n te.update_one({'name':team,'leader_email':'','person_emails':''},tmp,cover=True)",
"def survey_new(request):\n if request.user.is_authenticated:\n if not request.user.groups.filter(name='Survey Creators').exists():\n raise Http404(\"Page not found\")\n else:\n raise Http404(\"Page not found\")\n\n my_surveys = Survey.objects.filter(author=request.user).order_by('title')\n\n if request.method == \"POST\":\n form = SurveyForm(request.POST)\n if form.is_valid():\n survey = form.save(commit=False)\n survey.author = request.user\n survey.save()\n messages.add_message(request, messages.INFO, \"Created new survey \" + survey.title,)\n return redirect('skip_logic:survey_detail', survey_slug=survey.slug)\n else:\n new_slug = ''.join(random.choice(string.ascii_uppercase +\n string.ascii_lowercase +\n string.digits) for _ in range(8))\n form = SurveyForm(initial={'slug': new_slug,\n 'title': \"My New Survey\"})\n\n return render(request, 'skip_logic/survey_edit.html', {'form': form, 'my_surveys': my_surveys})",
"def test_post_team(self):\n response = self.client.post(url_for('teams'),\n data={\n 'name': 'test team',\n 'capacity': 11,\n 'number_players': 6,\n 'pitch_postcode': 'E1 6LT',\n 'time': '2019-01-01 13:00'\n })\n self.assertEqual(response.status_code, 201)\n self.assertIn(b'Team created successfully', response.data)\n self.assertEqual(db.session.query(Team).count(), 1)",
"def add_team(self):\n team = Team(self.context, ResourcePath(\"team\", self.resource_path))\n team._parent_collection = self.parent_collection\n qry = ServiceOperationQuery(self, \"team\", None, team, None, team)\n self.context.add_query(qry)\n\n def _construct_create_team_request(request):\n cur_qry = self.context.current_query\n if cur_qry.id == qry.id:\n request.method = HttpMethod.Put\n request.set_header('Content-Type', \"application/json\")\n request.data = json.dumps(request.data)\n\n self.context.before_execute(_construct_create_team_request, False)\n return team",
"def setUp(self):\n TCBase.setUp(self)\n\n # ---\n\n resp = self.request(\n self.client.post,\n '/admin/survey',\n {\n 'title': 'title',\n 'description': 'description',\n 'start_date': '2018-01-01',\n 'end_date': '2018-03-01',\n 'target': ujson.dumps([1, 3])\n },\n self.admin_access_token\n )\n\n survey_id = self.get_response_data(resp)['id']\n\n self.json_request(\n self.client.post,\n '/admin/survey/question',\n {\n 'survey_id': survey_id,\n 'questions': [\n {\n 'title': 'title',\n 'is_objective': False\n },\n {\n 'title': 'title',\n 'is_objective': False\n }\n ]\n },\n self.admin_access_token\n )",
"def addTeam(request):\n registered = False\n if request.method == 'POST':\n team_form = TeamForm(data=request.POST)\n if team_form.is_valid():\n team = team_form.save()\n registered = True\n else:\n print(team_form.errors)\n else:\n team_form = TeamForm()\n return render(request,'footBallApp/team.html',\n {'team_form':team_form,\n 'registered':registered})",
"def test_posting_a_teammate(self):\n response = self.client.post(\n '/team/all/', {'name': 'New Name',\n 'email': '[email protected]',\n 'slackhandle': '@NewTeam'},\n format='json')\n self.assertEqual(response.data, {'status': 201,\n \"data\": {'id': 1, 'name': 'New Name',\n 'email': '[email protected]',\n 'slackhandle': '@NewTeam'}})",
"def test_create_invalid_name(self):\r\n print(\"Create survey with invalid name\")\r\n s_name = \"\"\r\n c_id = 1\r\n questions = [1, 2]\r\n\r\n prev_noSurveys = len(Survey.query.all())\r\n self.assertEqual(self.system.create_survey(s_name, c_id, questions), 0)\r\n curr_noSurveys = len(Survey.query.all())\r\n self.assertEqual(prev_noSurveys, curr_noSurveys)",
"def test_meeting_create(self):\n pass",
"def add_post():\n\tt_id = db.survey.insert(\n\t\tquestion = request.vars.question,\n\t\tuser_email = request.vars.email,\n\t\tuser_name = get_user_name_from_email(request.vars.email),\n\t\topt1 = request.vars.opt1,\n\t\topt2 = request.vars.opt2,\n\t\topt3 = request.vars.opt3,\n\t\topt4 = request.vars.opt4,\n\t\t#created_on_human = humanize.naturaltime(datetime.datetime.utcnow()),\n\n\t)\n\tt = db.survey(t_id)\n\treturn response.json(dict(post=t))",
"def createTimeLapseSurvey(self, fnames_obs, fnames_sim):\n return ValueError('Not yet implemented')"
] | [
"0.6586207",
"0.6538541",
"0.64314604",
"0.6378735",
"0.63344073",
"0.6198191",
"0.61724335",
"0.6105733",
"0.6045564",
"0.60214174",
"0.59957623",
"0.59651864",
"0.5957437",
"0.5896016",
"0.5873307",
"0.5823892",
"0.5811888",
"0.5769924",
"0.57607037",
"0.5720423",
"0.56908405",
"0.5682166",
"0.5680779",
"0.5661776",
"0.56235474",
"0.5620401",
"0.55943656",
"0.55890584",
"0.5584811",
"0.55694294"
] | 0.75194645 | 0 |
You can get the survey for a team you own. | def test_get_for_team(self):
user, team_dict = self.test_create_team_creates_survey()
response = self.testapp.get(
'/api/teams/{}/survey'.format(team_dict['uid']),
headers=self.login_headers(user),
)
survey_dict = json.loads(response.body)
self.assertTrue(survey_dict['uid'].startswith('Survey')) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_questionnaire(self, url, survey_path):\n pass",
"def getSurveys(self, **kwargs):\n response = self.request(\"getSurveys\", **kwargs)\n # print response\n surveys = None\n if response:\n surveys = OrderedDict()\n for survey in response[\"Result\"][\"Surveys\"]:\n surveys[survey['SurveyID']] = survey\n return surveys",
"def test_create_team_creates_survey(self):\n user = User.create(name='User Foo', email='[email protected]')\n user.put()\n\n code = 'trout viper'\n\n team_response = self.testapp.post_json(\n '/api/teams',\n {\n 'name': 'Team Foo',\n 'code': code,\n 'program_id': self.ep_program.uid,\n },\n headers=self.login_headers(user),\n )\n team_dict = json.loads(team_response.body)\n\n survey_result = Survey.get(team_id=team_dict['uid'])\n self.assertEqual(len(survey_result), 1)\n survey = survey_result[0]\n\n return user, team_dict",
"def show_surveys():\n\n return render_template('home.html', surveys=surveys)",
"def list_surveys(self):\n\n url = self.endpoint('surveys')\n survey_dict = issue_request('GET', url, headers=self.headers)\n\n return survey_dict",
"def visualize_survey(self):\n # Test if current nwb file contains Survey table\n if 'behavior' in self.model.nwb.processing:\n list_surveys = [v for v in self.model.nwb.processing['behavior'].data_interfaces.values()\n if v.neurodata_type == 'SurveyTable']\n if len(list_surveys) > 0:\n ShowSurveyDialog(nwbfile=self.model.nwb)",
"def sample_survey(self, **kwargs):",
"def survey_detail(request, survey_slug):\n if request.user.is_authenticated:\n if not request.user.groups.filter(name='Survey Creators').exists():\n raise Http404(\"Page not found\")\n else:\n raise Http404(\"Page not found\")\n\n survey = get_object_or_404(Survey, slug=survey_slug)\n my_surveys = Survey.objects.filter(author=request.user).order_by('title')\n\n if request.user == survey.author:\n return render(request,\n 'skip_logic/survey_detail.html',\n {'survey': survey, 'my_surveys': my_surveys,})\n else:\n raise Http404(\"Page not found\")",
"def get_teams():",
"def get(self, id_survey=None):\n\t\tif self.has_permission('RightTPI') is False:\n\t\t\tself.no_access()\n\n\t\twith Database() as db:\n\t\t\tif id_survey is None:\n\t\t\t\tdata = db.query(Table).all()\n\t\t\telse:\n\t\t\t\tdata = db.query(Table).get(id_survey)\n\n\t\treturn {\n\t\t\t'data': data\n\t\t}",
"def test_generate_survey(self):\n\n result = generate_survey.apply((self.user.id,\n self.report.get_daily().id)).get()\n self.assertTrue(result, \"should create a survey given a valid daily report and user on the team\")\n\n result = generate_survey.apply((self.admin.id,\n self.report.get_daily().id)).get()\n self.assertFalse(result, \"User is not on this team therefore survey shouldn't be created\")",
"def survey(request, survey_id):\n u = request.user\n survey_id = int(survey_id)\n if request.method =='POST':\n try:\n survey_meta = Survey.objects.get(id=survey_id)\n except Survey.DoesNotExist:\n return render_to_response('survey/m/notexist.html')\n survey = eval(\"%s.objects.get(user=request.user, uuid_token=request.POST['uuid_token'])\"%survey_meta.model_name)\n form = eval(\"%sForm( request.POST, instance=survey)\"%survey_meta.model_name)\n \n if form.is_valid():\n survey.completed = True\n survey.complete_date = datetime.datetime.now() \n form.save()\n return render_to_response('survey/m/completed.html')\n else:\n return render_to_response('survey/m/basic.html', \n {'form':form,\n 'survey_id': survey_id,\n 'uuid': survey.uuid_token,\n 'errors':form.errors})\n else:\n uuid = \"\"\n form = None \n try:\n s = Survey.objects.get(id=survey_id)\n status = eval(\"%s.objects.get(user=u,survey=s)\"%s.model_name)\n form = eval(\"%sForm()\"%s.model_name)\n except Survey.DoesNotExist:\n return render_to_response('survey/m/notexist.html')\n\n return render_to_response('survey/m/basic.html', {'form':form,\n 'survey_id': survey_id,\n 'uuid_token': status.uuid_token},\n context_instance=RequestContext(request))",
"def get_teams(self):\n url = 'teams'\n result = self.get(url)\n return result.get('teams', result)",
"def get_team(self):\n try:\n team_id = self.request.GET.get('team')\n if team_id is not None:\n team_id = int(team_id)\n return self.get_available_teams().get(pk=team_id)\n return self.get_available_teams().latest()\n except (Team.DoesNotExist, ValueError):\n return None",
"def inquiry_section(self):\n return self._get_child_page_of_type(ForTeamsPage)",
"def home_surveys(request):\n\n result = {}\n \n result['surveys'] = []\n\n u = request.user\n\n # get surveys\n surveys = Survey.objects.all()\n for s in surveys:\n status, created = eval(\"%s.objects.get_or_create(survey=s, user=u)\"%s.model_name)\n if created:\n status.uuid_token = uuid.uuid4()\n status.save()\n if not status.completed:\n result['surveys'].append(s.summary())\n\n return JSONHttpResponse( result )",
"def test_retrieve_team(self):\n pass",
"def test_spector_init_getsurvey_fromobj(obj_dirobj):\n\tobj = obj_dirobj\n\tobj.survey = 'cfht'\n\n\ts = spector.Spector(obj=obj)\n\n\tassert s.survey == 'cfht'\n\n\ts = spector.Spector(obj=obj, survey='hsc')\n\n\tassert s.survey == 'hsc'",
"def get_participant_team_details_for_challenge(request, challenge_pk):\n\n challenge = get_challenge_model(challenge_pk)\n if has_user_participated_in_challenge(request.user, challenge_pk):\n participant_team = get_participant_team_of_user_for_a_challenge(\n request.user, challenge_pk\n )\n serializer = ParticipantTeamSerializer(participant_team)\n return Response(serializer.data, status=status.HTTP_200_OK)\n else:\n response_data = {\n \"error\": f\"The user {request.user.username} has not participanted in {challenge.title}\"\n }\n return Response(response_data, status=status.HTTP_404_NOT_FOUND)",
"def for_teams(self):\n return self._get_child_page_of_type(ForTeamsPage)",
"def test_teams_get_team_v1(self):\n pass",
"def test_meeting_registrants_questions_get(self):\n pass",
"def get_team(self, team_reference, include_users=False):\n url = 'teams/{0}'.format(team_reference)\n result = self.get(url, {'include_users': include_users})\n #TODO: check how included users returned\n return result.get('team', result)",
"def build_teams(event, context):\n global surveys, projects\n\n if not event:\n raise (\"You must provide the cohort ID as data in the event\")\n\n # Use the DAO to grab the list of all of the surveys\n surveys = peopledao.get_all_student_surveys(event)\n print(\"Found {} surveys\".format(len(surveys)))\n\n # Sort the incoming surveys to help the algorithm produce the best results\n # Note: Can't have just one of the element reverse sorted, so must to multiple sorts\n # Multiple sorts must be performed _least_ significant to _most_\n surveys.sort(\n key=lambda survey: (str(survey[\"fields\"].get(SURVEY_TRACK_FIELD, \"\"))), reverse=False,\n )\n surveys.sort(\n key=lambda survey: (str(survey[\"fields\"].get(SURVEY_PRODUCT_OPT_OUT_FIELD, \"\"))), reverse=True,\n )\n surveys.sort(\n key=lambda survey: (str(survey[\"fields\"].get(SURVEY_ETHNICITIES_FIELD, \"\"))), reverse=False,\n )\n surveys.sort(\n key=lambda survey: (str(survey[\"fields\"].get(SURVEY_GENDER_FIELD, \"\"))), reverse=True,\n )\n\n for survey in surveys:\n print(\n f\"{survey['fields'].get('Track', '-'):<10}\"\n f\"{survey['fields'].get('Gender', '-'):<30}\"\n f\"{str(survey['fields'].get('Ethnicities', '-')):<50}\"\n )\n\n projects = projectsdao.get_all_active_projects(event)\n print(\"Found {} projects\".format(len(projects)))\n\n while surveys:\n # print(\"\\n\")\n # print(\"*\" * 120)\n # print(\"Making pass with {} students left\".format(len(surveys)))\n # print(\"*\" * 120)\n\n best_assignment = __get_best_assignment()\n\n if best_assignment.project is None:\n print(\"\\n\")\n print(\"*\" * 120)\n print(\"!!!Unable to match student: {}\", surveys.pop())\n print(\"*\" * 120)\n else:\n # project_name = best_assignment.project[\"fields\"][PROJECT_NAME_FIELD]\n # student_name = best_assignment.student[\"fields\"][SURVEY_STUDENT_NAME_FIELD][0]\n\n # print(\"\\n\")\n # print(\"*\" * 120)\n # print(\n # \"Assigning {} to project {} based on score: {}\".format(\n # student_name, project_name, best_assignment.score\n # )\n # )\n # print(\"*\" * 120)\n\n assignments.append(best_assignment)\n\n surveys.remove(best_assignment.student)\n\n print(\"\\n\")\n print(\"=\" * 120)\n print(\"Team assignments\")\n print(\"=\" * 120)\n\n # This sorting is just so they display nicely in the output\n assignments.sort(\n key=lambda x: (\n x[0][\"fields\"].get(PROJECT_NAME_FIELD),\n x[1][\"fields\"].get(SURVEY_TRACK_FIELD),\n x[1][\"fields\"].get(SURVEY_GENDER_FIELD, \"\"),\n str(x[1][\"fields\"].get(SURVEY_ETHNICITIES_FIELD, \"\")),\n )\n )\n\n # Output the final assignments and write them to the DAO\n TABLE_FORMAT_STRING = \"{:<35} {:>6} {:<30} {:<85} {:<55} {:>5}\"\n\n print(TABLE_FORMAT_STRING.format(\"Project\", SURVEY_TRACK_FIELD, \"Gender\", \"Ethnicities\", \"Opt Out\", \"TZ\",))\n\n print(\"=\" * 120)\n\n for assignment in assignments:\n print(\n TABLE_FORMAT_STRING.format(\n assignment.project[\"fields\"][PROJECT_NAME_FIELD],\n assignment.student[\"fields\"][SURVEY_TRACK_FIELD],\n assignment.student[\"fields\"].get(SURVEY_GENDER_FIELD, \"-\"),\n str(assignment.student[\"fields\"].get(SURVEY_ETHNICITIES_FIELD, list(\"-\"))).strip(\"[]\"),\n str(assignment.student[\"fields\"].get(\"Product Opt Out Text\", list(\"-\"))).strip(\"[]\"),\n assignment.student[\"fields\"].get(SURVEY_STUDENT_TIMEZONE_FIELD, \"-\"),\n )\n )\n\n # This actually writes the teams to the DAO\n projectsdao.assign_student_to_project(assignment.student, assignment.project, assignment.score)",
"def get(self) -> Optional[es.ExpectationSuite]:\n _client = client.get_instance()\n path_params = [\n \"project\",\n _client._project_id,\n \"featurestores\",\n self._feature_store_id,\n \"featuregroups\",\n self._feature_group_id,\n \"expectationsuite\",\n ]\n\n return es.ExpectationSuite.from_response_json(\n _client._send_request(\"GET\", path_params)\n )",
"def test_get_open_requests_by_team(self):\n pass",
"def get_participant_team_challenge_list(request, participant_team_pk):\n try:\n participant_team = ParticipantTeam.objects.get(pk=participant_team_pk)\n except ParticipantTeam.DoesNotExist:\n response_data = {\"error\": \"Participant Team does not exist\"}\n return Response(response_data, status=status.HTTP_404_NOT_FOUND)\n\n if request.method == \"GET\":\n challenge = Challenge.objects.filter(\n participant_teams=participant_team\n ).order_by(\"-id\")\n paginator, result_page = team_paginated_queryset(challenge, request)\n serializer = ChallengeSerializer(\n result_page, many=True, context={\"request\": request}\n )\n response_data = serializer.data\n return paginator.get_paginated_response(response_data)",
"def test_get_request_only_from_one_team(self):\n another_user3 = AnotherUserFactory(username='anotheruser3', email='[email protected]')\n another_user4 = AnotherUserFactory(username='anotheruser4', email='[email protected]')\n another_user5 = AnotherUserFactory(username='anotheruser5', email='[email protected]')\n another_user6 = AnotherUserFactory(username='anotheruser6', email='[email protected]')\n another_team = TeamFactory(owner=another_user3,\n name='Soul Eaters',\n description='We`ll destroy all the souls. And the age of darkness will come')\n UserTeamRequestFactory(\n user=another_user4,\n team=another_team,\n )\n UserTeamRequestFactory(\n user=another_user5,\n team=another_team,\n )\n UserTeamRequestFactory(\n user=another_user6,\n team=another_team,\n )\n params = {'teamID': self.team.id}\n response = self.client.get(reverse('api:user-team-requests-get-requests-for-team'), params)\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(len(response.data.get('results')), 2)",
"def team_tester(request):\n\n\t# Look for the team size entered by the user\n\tteam_size = int(request.GET.get('team_size', False))\n\n\t# If user has entered information...\n\tif team_size:\n\n\t\t# Get the rest of the information from the form\n\t\tscores_up = int(request.GET.get('scores_up', False))\n\t\tscores_count = int(request.GET.get('scores_count', False))\n\t\tsumstat = request.GET.get('sumstat', False)\n\t\ttime = request.GET.get('time', False)\n\t\tgymnast_list = []\n\t\tfor i in range(1, team_size+1):\n\t\t\tgymnast_search_id = \"gymnast_search\" + str(i)\n\t\t\tgymnast_list.append(request.GET.get(gymnast_search_id, False))\n\n\t\t# Set the date range \n\t\tnow = datetime.datetime.now()\n\t\tif time==\"year\":\n\t\t\tdate_range = [now-relativedelta(years=1), now]\n\t\telif time == \"season\":\n\t\t\tdate_range = [datetime.date(2019, 10, 13), now] # Since last world championships\n\t\telse:\n\t\t\tdate_range = [datetime.date(2016, 8, 21), now] # Since last olympics\n\n\t\t# Loop through the list of gymnasts and get scores\n\t\ttable_data = []\n\t\tfor gymnast in gymnast_list:\n\t\t\tgymnast = Gymnast.objects.get(name=gymnast)\n\t\t\tthis_gymnast_scores = []\n\t\t\tthis_gymnast_scores.append(gymnast)\n\t\t\tfor sub_event in [\"VT\", \"UB\", \"BB\", \"FX\"]:\n\t\t\t\tscores = Score.objects.filter(gymnast=gymnast, \n\t\t\t\t\tmeet__in=Meet.objects.filter(start_date__range=date_range), event__in=Event.objects.filter(name=sub_event), score_num=1)\n\t\t\t\tif scores.count() > 0:\n\t\t\t\t\tif sumstat == \"avg\":\n\t\t\t\t\t\tscores_sumstat = scores.aggregate(Avg('score'))['score__avg']\n\t\t\t\t\telif sumstat == \"max\":\n\t\t\t\t\t\tscores_sumstat = scores.aggregate(Max('score'))['score__max']\n\t\t\t\telse:\n\t\t\t\t\tscores_sumstat = \"\"\n\t\t\t\tthis_gymnast_scores.append(scores_sumstat)\n\t\t\ttable_data.append(this_gymnast_scores)\n\n\t\t# Select the scores that go up and the scores that count\n\t\tfor i in range(1, 5):\n\t\t\t# Get the list of all scores on this event\n\t\t\tevent_scores = [col[i] for col in table_data]\n\t\t\t# Get the sort order of these scores\n\t\t\tsort_order = np.argsort(np.argsort(event_scores)) # See https://github.com/numpy/numpy/issues/8757\n\t\t\tsort_order = team_size - 1 - sort_order\n\t\t\t# Replace each score with a tuple of the score and the class that we'll use for the td of each score\n\t\t\tfor j, row in enumerate(table_data):\n\t\t\t\t# For scores that count\n\t\t\t\tif sort_order[j] < scores_count:\n\t\t\t\t\ttable_data[j][i] = [table_data[j][i], \"counts\"]\n\t\t\t\telif sort_order[j] < scores_up:\n\t\t\t\t\ttable_data[j][i] = [table_data[j][i], \"up\"]\n\t\t\t\telse:\n\t\t\t\t\ttable_data[j][i] = [table_data[j][i], \"not_used\"]\n\n\t\t# Calculate total row\n\t\ttotal_row = [\"Team Total\", 0, 0, 0, 0]\n\t\tfor row in table_data:\n\t\t\tfor i in range(1, 5):\n\t\t\t\tif row[i][1] == \"counts\" and (not isinstance(row[i][0], str)):\n\t\t\t\t\ttotal_row[i] = total_row[i] + row[i][0]\n\t\ttable_data.append(total_row)\n\t\tteam_total = sum(total_row[1:5])\n\t\tprint(table_data)\n\telse:\n\t\tteam_size=5\n\t\tscores_up=4\n\t\tscores_count=3\n\t\tsumstat = \"avg\"\n\t\ttime = \"year\"\n\t\tgymnast_list = []\n\t\ttable_data = []\n\t\tteam_total = \"\"\n\n\n\n\tcontext = {\n\t\t'team_size': team_size,\n\t\t'scores_up': scores_up,\n\t\t'scores_count': scores_count,\n\t\t'sumstat': sumstat,\n\t\t'time': time,\n\t\t'gymnast_list': gymnast_list,\n\t\t'table_data': table_data,\n\t\t'team_total': team_total,\n\t}\n\n\treturn render(request, 'team_tester.html', context=context)",
"def getTeam(self):\n return self.team"
] | [
"0.6343129",
"0.631515",
"0.6284795",
"0.6031469",
"0.58984464",
"0.58517706",
"0.5640203",
"0.56234926",
"0.5607942",
"0.55881244",
"0.55276686",
"0.5523524",
"0.55202866",
"0.54980105",
"0.5456152",
"0.545253",
"0.53732574",
"0.5364306",
"0.53133166",
"0.52972543",
"0.52571255",
"0.52467567",
"0.52289015",
"0.5205351",
"0.5199649",
"0.51658607",
"0.5162792",
"0.5162286",
"0.51415884",
"0.51039827"
] | 0.757884 | 0 |
You can't get a survey for someone else's team. | def test_get_for_other_forbidden(self):
user, team_dict = self.test_create_team_creates_survey()
other = User.create(name='Other', email='[email protected]')
other.put()
self.testapp.get(
'/api/teams/{}/survey'.format(team_dict['uid']),
headers=self.login_headers(other),
status=403,
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_get_for_team(self):\n user, team_dict = self.test_create_team_creates_survey()\n response = self.testapp.get(\n '/api/teams/{}/survey'.format(team_dict['uid']),\n headers=self.login_headers(user),\n )\n survey_dict = json.loads(response.body)\n self.assertTrue(survey_dict['uid'].startswith('Survey'))",
"def test_survey_doesnt_exist(self):\n data = {\n 'experiment_version': '1',\n 'response_version': 1,\n 'person_id': 'joemamma',\n 'survey_id': 'foosurvey',\n 'flow_id': '20141113',\n 'question_id': '1',\n 'updated_ts': self.timestamp(),\n 'question_text': 'how was lunch?',\n 'variation_id': '1'\n }\n\n resp = self.client.post(\n reverse('heartbeat-api'),\n content_type='application/json',\n data=json.dumps(data))\n\n assert resp.status_code == 400\n errors = json.loads(resp.content)['errors']\n assert (\n errors['survey_id'] ==\n [u'Object with name=foosurvey does not exist.']\n )",
"def test_create_team_creates_survey(self):\n user = User.create(name='User Foo', email='[email protected]')\n user.put()\n\n code = 'trout viper'\n\n team_response = self.testapp.post_json(\n '/api/teams',\n {\n 'name': 'Team Foo',\n 'code': code,\n 'program_id': self.ep_program.uid,\n },\n headers=self.login_headers(user),\n )\n team_dict = json.loads(team_response.body)\n\n survey_result = Survey.get(team_id=team_dict['uid'])\n self.assertEqual(len(survey_result), 1)\n survey = survey_result[0]\n\n return user, team_dict",
"def get_questionnaire(self, url, survey_path):\n pass",
"def test_meeting_registrants_questions_get(self):\n pass",
"def test_generate_survey(self):\n\n result = generate_survey.apply((self.user.id,\n self.report.get_daily().id)).get()\n self.assertTrue(result, \"should create a survey given a valid daily report and user on the team\")\n\n result = generate_survey.apply((self.admin.id,\n self.report.get_daily().id)).get()\n self.assertFalse(result, \"User is not on this team therefore survey shouldn't be created\")",
"def test_getting_one_question_with_no_meetup(self):\n response = self.get_one_question_with_invalid_meetup()\n self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)",
"def test_survey_disabled(self):\n survey = SurveyFactory.create(enabled=False)\n\n data = {\n 'experiment_version': '1',\n 'response_version': 1,\n 'person_id': 'joemamma',\n 'survey_id': survey.name,\n 'flow_id': '20141113',\n 'question_id': '1',\n 'updated_ts': self.timestamp(),\n 'question_text': 'how was lunch?',\n 'variation_id': '1'\n }\n\n resp = self.client.post(\n reverse('heartbeat-api'),\n content_type='application/json',\n data=json.dumps(data))\n\n assert resp.status_code == 400\n errors = json.loads(resp.content)['errors']\n assert (\n errors['survey_id'] ==\n ['survey \"%s\" is not enabled' % survey.name]\n )",
"def test_getting_one_question_with_valid_meetup(self):\n response = self.get_one_question_with_valid_meetup()\n self.assertEqual(response.status_code, status.HTTP_200_OK)",
"def survey_detail(request, survey_slug):\n if request.user.is_authenticated:\n if not request.user.groups.filter(name='Survey Creators').exists():\n raise Http404(\"Page not found\")\n else:\n raise Http404(\"Page not found\")\n\n survey = get_object_or_404(Survey, slug=survey_slug)\n my_surveys = Survey.objects.filter(author=request.user).order_by('title')\n\n if request.user == survey.author:\n return render(request,\n 'skip_logic/survey_detail.html',\n {'survey': survey, 'my_surveys': my_surveys,})\n else:\n raise Http404(\"Page not found\")",
"def test_question_without_choices(self):\n set_up_user(self)\n self.assertFalse(self.user.is_superuser)\n\n question_no_choices = create_question_without_choices(question_text=\"Question wihout Choices.\", days=-1)\n url = reverse('polls:detail', args=(question_no_choices.id,))\n response = self.client.get(url)\n self.assertEqual(response.status_code, 404)",
"def test_no_question(self):\n set_up_user(self)\n self.assertFalse(self.user.is_superuser)\n\n url = reverse('polls:detail', args=(1,))\n response = self.client.get(url)\n self.assertEqual(response.status_code, 404)",
"def test_get_open_requests_by_team(self):\n pass",
"def test_vote_view_with_question_which_has_no_choices(self):\n past_question_without_choices = create_question(question_text=\"Test \\\n question without choices\", days=-30, create_choice=False)\n response = self.client.get(reverse('polls:vote', \n args=(past_question_without_choices.id,)))\n self.assertEqual(response.status_code, 404)",
"def survey(request, survey_id):\n u = request.user\n survey_id = int(survey_id)\n if request.method =='POST':\n try:\n survey_meta = Survey.objects.get(id=survey_id)\n except Survey.DoesNotExist:\n return render_to_response('survey/m/notexist.html')\n survey = eval(\"%s.objects.get(user=request.user, uuid_token=request.POST['uuid_token'])\"%survey_meta.model_name)\n form = eval(\"%sForm( request.POST, instance=survey)\"%survey_meta.model_name)\n \n if form.is_valid():\n survey.completed = True\n survey.complete_date = datetime.datetime.now() \n form.save()\n return render_to_response('survey/m/completed.html')\n else:\n return render_to_response('survey/m/basic.html', \n {'form':form,\n 'survey_id': survey_id,\n 'uuid': survey.uuid_token,\n 'errors':form.errors})\n else:\n uuid = \"\"\n form = None \n try:\n s = Survey.objects.get(id=survey_id)\n status = eval(\"%s.objects.get(user=u,survey=s)\"%s.model_name)\n form = eval(\"%sForm()\"%s.model_name)\n except Survey.DoesNotExist:\n return render_to_response('survey/m/notexist.html')\n\n return render_to_response('survey/m/basic.html', {'form':form,\n 'survey_id': survey_id,\n 'uuid_token': status.uuid_token},\n context_instance=RequestContext(request))",
"def test_no_participant(self):\n response = self.client.get(self.api_link)\n self.assertEqual(response.status_code, 404)",
"def test_geeting_meetups_with_no_questions(self):\n response = self.get_question_with_invalid_meetup()\n self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)\n self.assertEqual(response.content,\n b'{\"error\":\"There are no questions\"}')",
"def test_ask_question_non_existent_meetup(self):\n access_token = self.get_access_token(USER_REGISTRATION, USER_LOGIN)\n res = self.client().post(\n '/api/v2/meetups/3/questions',\n headers=self.get_authentication_headers(access_token),\n data=json.dumps(QUESTION)\n )\n response_msg = json.loads(res.data.decode(\"UTF-8\"))\n self.assertEqual(res.status_code, 404)\n self.assertEqual(response_msg[\"message\"][\"error\"], \"Meetup with id '3' doesn't exist!\")",
"def test_survey_has_no_answers(self):\n\n survey = self._create_test_survey()\n assert len(survey.get_answers()) == 0",
"def test_listing_for_none_existent_team(self):\n resp = self.client.get(\n reverse('incidents', kwargs={'team_id': '9de98e0c-8bf9-414c-b397-05acb136935f'})\n )\n\n self.assertEqual(resp.status_code, 400)\n self.assertEqual(resp.json(), {'error': '9de98e0c-8bf9-414c-b397-05acb136935f does not exist'})",
"def test_get_one_for_other_team_forbidden(self):\n team = Team.create(name='foo', captain_id='User_cap',\n program_id=self.program.uid)\n team.put()\n teammate = User.create(name='teammate', email='[email protected]',\n owned_teams=[team.uid])\n teammate.put()\n user = User.create(name='foo', email='[email protected]')\n user.put()\n response = self.testapp.get(\n '/api/teams/{}/users/{}'.format(team.uid, teammate.uid),\n headers=self.login_headers(user),\n status=403,\n )",
"def test_teams_get_team_v1(self):\n pass",
"def prompt_for_survey(console: io.IO):\n msg = ('Would you like to take a survey to provide your feedback for '\n 'the deployment process? [y/N]')\n\n do_survey = prompt.binary_prompt(msg, console, default=False)\n if do_survey:\n webbrowser.open_url(_SURVEY_LINK)",
"def test_multiple_users_same_survey(self):\n input_data = 'foo bar'\n\n # user A inputs an answer\n self.launchSurvey(self.client_a, 'test', 'textfield')\n self.post(self.client_a, {'name-answer': input_data})\n\n # user B gets taken straight to summary as survey is complete\n self.launchSurvey(self.client_b, 'test', 'textfield')\n last_url_b = self.cache[self.client_b]['last_url']\n self.assertIn('/questionnaire/test/textfield/789/group/0/summary', last_url_b)\n\n # user B manually navigates to answer and can view the value that user A entered\n self.get(self.client_b, '/questionnaire/test/textfield/789/group/0/block')\n last_response_b = self.cache[self.client_b]['last_response']\n self.assertEqual(last_response_b.status_code, 200)\n self.assertIn(input_data, last_response_b.get_data(True))\n\n # user A continues through playback page and submits\n self.post(self.client_a, {})\n self.post(self.client_a, action=None)\n\n # user B tries to submit value\n self.post(self.client_b, {'name-answer': 'bar baz'})\n last_response_b = self.cache[self.client_b]['last_response']\n self.assertEqual(last_response_b.status_code, 401)",
"def test_no_question_for_admin(self):\n set_up_super_user(self)\n self.assertTrue(self.user.is_superuser)\n\n url = reverse('polls:detail', args=(1,))\n response = self.client.get(url)\n self.assertEqual(response.status_code, 404)",
"def test_get_request_only_from_one_team(self):\n another_user3 = AnotherUserFactory(username='anotheruser3', email='[email protected]')\n another_user4 = AnotherUserFactory(username='anotheruser4', email='[email protected]')\n another_user5 = AnotherUserFactory(username='anotheruser5', email='[email protected]')\n another_user6 = AnotherUserFactory(username='anotheruser6', email='[email protected]')\n another_team = TeamFactory(owner=another_user3,\n name='Soul Eaters',\n description='We`ll destroy all the souls. And the age of darkness will come')\n UserTeamRequestFactory(\n user=another_user4,\n team=another_team,\n )\n UserTeamRequestFactory(\n user=another_user5,\n team=another_team,\n )\n UserTeamRequestFactory(\n user=another_user6,\n team=another_team,\n )\n params = {'teamID': self.team.id}\n response = self.client.get(reverse('api:user-team-requests-get-requests-for-team'), params)\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(len(response.data.get('results')), 2)",
"def test_question_with_out_choices(self):\n question = create_question(question_text='Question with out choices', days=0, choices=[])\n response = self.client.get(reverse('polls:details', args=(question.id, )))\n self.assertEqual(response.status_code, 404)",
"def sample_survey(self, **kwargs):",
"def test_user_has_no_answers(self):\n\n survey = self._create_test_survey()\n assert not survey.has_user_answered_survey(self.student)\n assert len(survey.get_answers()) == 0",
"def test_question_with_out_choices(self):\n question = create_question(question_text='Question with out choices', days=0, choices=[])\n response = self.client.get(reverse('polls:results', args=(question.id, )))\n self.assertEqual(response.status_code, 404)"
] | [
"0.7120516",
"0.6282421",
"0.626813",
"0.618336",
"0.6113319",
"0.6037205",
"0.598216",
"0.5848799",
"0.57429224",
"0.5724888",
"0.570673",
"0.5694278",
"0.5633859",
"0.5616271",
"0.56087244",
"0.5571042",
"0.5523372",
"0.55163455",
"0.5508447",
"0.55008894",
"0.54575586",
"0.5450695",
"0.5427534",
"0.5421945",
"0.542176",
"0.54206014",
"0.54193246",
"0.54139",
"0.5396808",
"0.53813386"
] | 0.67539746 | 1 |
Client dict should have portalfriendly metric labels. | def test_metric_labels(self):
team_id = 'Team_foo'
m1 = Metric.create(name='Foo Condition', label='foo_condition')
m2 = Metric.create(name='Bar Condition', label='bar_condition')
Metric.put_multi([m1, m2])
survey = Survey.create(team_id=team_id, metrics=[m1.uid, m2.uid])
survey.put()
user = User.create(name='foo', email='[email protected]',
owned_teams=[team_id])
user.put()
response = self.testapp.get(
'/api/surveys/{}'.format(survey.uid),
headers=self.login_headers(user),
)
logging.info(response.body)
self.assertEqual(
json.loads(response.body)['metric_labels'],
{m1.uid: 'foo_condition', m2.uid: 'bar_condition'},
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_get_host_configuration_metrics1(self):\n pass",
"def test_get_host_configuration_metrics(self):\n pass",
"def mock_client_fixture():\n with mock.patch(f\"{PROMETHEUS_PATH}.prometheus_client\") as client:\n counter_client = mock.MagicMock()\n client.Counter = mock.MagicMock(return_value=counter_client)\n setattr(counter_client, \"labels\", mock.MagicMock(return_value=mock.MagicMock()))\n yield counter_client",
"def mock_client_fixture():\n with mock.patch(f\"{PROMETHEUS_PATH}.prometheus_client\") as client:\n counter_client = mock.MagicMock()\n client.Counter = mock.MagicMock(return_value=counter_client)\n setattr(counter_client, \"labels\", mock.MagicMock(return_value=mock.MagicMock()))\n yield counter_client",
"def build_metric_labels(self):\n response = [ \n {\n \"key\": \"response_code\", \n \"value\": \"0\"\n }\n ]\n return response",
"def get_clients_names_and_ips(_namespace):\n\n meta_dict_fmt = {\"name\": \"\", \"pod_ip\": \"\"}\n label_client = \"client\"\n\n cl_lst = get_client_lst(_namespace, label_client)\n pods_meta = []\n for cl in cl_lst:\n pod_name = cl.metadata.name\n if label_client in pod_name:\n pods_meta.append(dict(meta_dict_fmt, name=pod_name, pod_ip=cl.status.pod_ip))\n\n return pods_meta",
"def test_metrics(client):\n response = client.get(\"/metrics\")\n assert response.status_code == 200",
"def test_metric_map_values(self):\n url = reverse(\"metrics\")\n client = APIClient()\n\n params = {\"source_type\": Provider.PROVIDER_OCP}\n url = url + \"?\" + urlencode(params, quote_via=quote_plus) + \"&limit=11\"\n response = client.get(url, **self.headers).data[\"data\"]\n self.assertEqual(len(COST_MODEL_METRIC_MAP), len(response))\n for metric in COST_MODEL_METRIC_MAP:\n self.assertIsNotNone(metric.get(\"source_type\"))\n self.assertIsNotNone(metric.get(\"metric\"))\n self.assertIsNotNone(metric.get(\"label_metric\"))\n self.assertIsNotNone(metric.get(\"label_measurement_unit\"))\n self.assertIsNotNone(metric.get(\"default_cost_type\"))",
"def test_metrics_server(self):\n validate_metrics_server()",
"def MultiReadClientMetadata(self, client_ids):\n res = {}\n for client_id in client_ids:\n self._ValidateClientId(client_id)\n md = self.metadatas.get(client_id, {})\n res[client_id] = objects.ClientMetadata(\n certificate=md.get(\"certificate\"),\n fleetspeak_enabled=md.get(\"fleetspeak_enabled\"),\n first_seen=md.get(\"first_seen\"),\n ping=md.get(\"ping\"),\n clock=md.get(\"clock\"),\n ip=md.get(\"ip\"),\n last_foreman_time=md.get(\"last_foreman_time\"),\n last_crash_timestamp=md.get(\"last_crash_timestamp\"),\n startup_info_timestamp=md.get(\"startup_info_timestamp\"))\n\n return res",
"def test_cw_metrics(self):\n\n instances = set()\n result = self.cw_client.list_metrics(Namespace=\"CWAgent\", MetricName=\"cpu_usage_system\")\n for i in result[\"Metrics\"]:\n instances.add(i[\"Dimensions\"][0][\"Value\"])\n\n for key, value in self.cdk_output_map.items():\n if \"Instance\" in key:\n self.assertTrue(value in instances)",
"def talk_clients(self):\n c = self.conn.cursor()\n\n c.execute('''SELECT value FROM headers WHERE header = 'To' AND value NOT LIKE '%,%';''')\n\n clients = {'android': 0, 'Adium': 0, 'BlackBerry': 0, 'Festoon': 0, 'fire': 0,\n 'Gush': 0, 'Gaim': 0, 'gmail': 0, 'Meebo': 0, 'Miranda': 0,\n 'Psi': 0, 'iChat': 0, 'iGoogle': 0, 'IM+': 0, 'Talk': 0,\n 'Trillian': 0, 'Unknown': 0\n }\n for row in c.fetchall():\n try:\n domain = row[0].split('@', 1)[1]\n resource_part = domain.split('/', 1)[1]\n except IndexError: # Throws when the address does not have an @ or a / in the string.\n continue\n\n unknown = True\n for client in clients:\n if client in resource_part:\n clients[client] += 1\n unknown = False\n\n if unknown:\n clients['Unknown'] += 1\n\n for client in clients.keys():\n if clients[client] is 0:\n del clients[client]\n\n trace = pgo.Pie(\n labels=clients.keys(),\n values=clients.values(),\n marker=dict(\n colors=[\n self.config.get('color', 'primary'),\n self.config.get('color', 'secondary'),\n ]\n )\n )\n\n layout_args = plotly_default_layout_options()\n layout_args['title'] = 'Chat Clients'\n del layout_args['xaxis']\n del layout_args['yaxis']\n\n layout = pgo.Layout(**layout_args)\n\n return plotly_output(pgo.Figure(data=[trace], layout=layout))",
"def clients(self):\n\n try:\n req = requests.get(self.root_url + \"/clients\")\n except requests.exceptions.ConnectionError as e:\n req = None\n print(str(e), file=sys.stderr)\n except Exception as e:\n print(\"Unknown error making a request to the Sensu API\", file=sys.stderr)\n print(str(e), file=sys.stderr)\n\n if req and req.status_code == 200:\n dat = req.json()\n for host in dat:\n self.metrics.append(('sensu_status', host['status'], {'host': host['name'], 'dc': host['dc']}))",
"def clients():\n pass",
"def _CheckLabelIndex(self):\n self.assertEqual(\n list(search.SearchClients(\"label:Label2\", token=self.token)),\n [self.client_id])",
"def __init__(self):\n super().__init__()\n self.metric = 'KAPPA'",
"def _create_kube_apiserver_metrics_instance(self, instance):\n kube_apiserver_metrics_instance = deepcopy(instance)\n endpoint = instance.get('prometheus_url')\n prometheus_url = endpoint\n\n # Allow using a proper URL without introducing a breaking change since\n # the scheme option is deprecated.\n if not match('^https?://.*$', endpoint):\n scheme = instance.get('scheme', self.DEFAULT_SCHEME)\n prometheus_url = \"{0}://{1}\".format(scheme, endpoint)\n\n kube_apiserver_metrics_instance['prometheus_url'] = prometheus_url\n\n # Most set ups are using self signed certificates as the APIServer can be used as a CA.\n ssl_verify = instance.get('ssl_verify', self.DEFAULT_SSL_VERIFY)\n kube_apiserver_metrics_instance['ssl_verify'] = ssl_verify\n\n # We should default to supporting environments using RBAC to access the APIServer.\n bearer_token_auth = instance.get('bearer_token_auth', self.DEFAULT_BEARER_TOKEN_AUTH)\n kube_apiserver_metrics_instance['bearer_token_auth'] = bearer_token_auth\n\n return kube_apiserver_metrics_instance",
"def create_client_hello_dictionary(self, path_tracefile):\n packets = self.get_client_hello_packets(path_tracefile)\n client_hello_dict = dict()\n for packet in packets:\n servername = self.get_client_hello_servername(packet)\n if servername:\n ip = packet.getlayer(IP).dst\n client_hello_dict[ip] = servername\n return client_hello_dict",
"def calc_distances(client_list):\n distances = {}\n for x in client_list:\n distances[x] = {}\n for y in client_list:\n distances[x][y] = dis(x, y)\n return distances",
"def info(client):\n\n return client.get_info()",
"def test_get_deployment_metric(self):\n pass",
"def get_metrics(self) -> dict:\n return self.metric_dict",
"def __init__(self):\n super().__init__()\n self.metric = 'ACURCY'",
"def test_model(self, clients_to_test=None, set_to_use='test'):\n metrics = {}\n\n if clients_to_test is not None:\n raise NotImplementedError(\"Client selection not yet implemented\")\n\n metrics_futures = []\n for cs in self.client_servers:\n metrics_future = cs.test_model.remote(\n clients_to_test=None, set_to_use=set_to_use)\n metrics_futures.append(metrics_future)\n for future in metrics_futures:\n metrics.update(ray.get(future))\n \n return metrics",
"def test_create_metrics_dict(self):\n # binary tasks have 1 class at class definition.\n num_classes = 1\n metrics_dict = create_metrics_dict(num_classes)\n assert 'iou_1' in metrics_dict.keys()\n assert 'iou_2' not in metrics_dict.keys()\n\n num_classes = 3\n metrics_dict = create_metrics_dict(num_classes)\n assert 'iou_1' in metrics_dict.keys()\n assert 'iou_2' in metrics_dict.keys()\n assert 'iou_3' not in metrics_dict.keys()\n del metrics_dict",
"def set_metrics(self):",
"def _make_metric_dict(self, metrics):\n exon = None\n hgvs_c = None\n hgvs_p = None\n hgvs_three = None\n hgvs_basep = None\n hgvs_aap = None\n\n tx = metrics.split(':')[1]\n hgnc = metrics.split(':')[0]\n raw_exon = metrics.split(':')[2]\n if raw_exon != 'wholegene':\n exon = metrics.split(':')[2][4:]\n hgvs_c = metrics.split(':')[3]\n full_c = ':'.join([tx, hgvs_c])\n hgvs_parser = Hgvs(full_c)\n\n hgvs_basep = hgvs_parser.start\n try:\n hgvs_p = metrics.split(':')[4]\n full_p = ':'.join([tx, hgvs_p])\n hgvs_parser = Hgvs(full_p)\n hgvs_three = hgvs_parser.threep\n hgvs_aap = hgvs_parser.start[3:]\n except IndexError:\n hgvs_p = None\n full_p = None\n hgvs_three = None\n hgvs_aap = None\n\n return {\n 'AAP': hgvs_aap,\n 'BASEP': hgvs_basep,\n 'EXON': exon,\n 'HGNC': hgnc,\n 'HGVSC': hgvs_c,\n 'HGVSP1': hgvs_p,\n 'HGVSP3': hgvs_three,\n 'SOURCE': self.tfx_type,\n 'SPLICE': None,\n 'TXC': tx,\n 'VFX': self.veff,\n 'PVT': None\n }",
"def metrics_group():",
"async def test__get_labels():\n # Uppercase 'C'\n assert ImageConfig._get_labels(json.loads('{\"Config\":{\"Labels\":{\"x\":\"5\"}}}')) == {\n \"x\": \"5\"\n }\n\n # Lowercase 'C'\n assert ImageConfig._get_labels(json.loads('{\"config\":{\"Labels\":{\"x\":\"5\"}}}')) == {\n \"x\": \"5\"\n }\n\n # Missing 'Labels'\n assert ImageConfig._get_labels(json.loads('{\"Config\":{}}')) == {}",
"def _get_metric_config(self, config):\n metric_config = dict()\n metric_config['include_metrics'] = config.get('include_metrics', {})\n metric_config['exclude_metrics'] = config.get('exclude_metrics', {})\n return metric_config"
] | [
"0.58376956",
"0.57469577",
"0.5601619",
"0.5601619",
"0.5514434",
"0.55044305",
"0.5325245",
"0.5324136",
"0.5164732",
"0.5106117",
"0.50848985",
"0.5079339",
"0.5064023",
"0.50074315",
"0.5007054",
"0.4984392",
"0.4983079",
"0.4963031",
"0.49587086",
"0.4958586",
"0.49579126",
"0.49354708",
"0.4930611",
"0.49288118",
"0.49218765",
"0.49194738",
"0.4916294",
"0.49119967",
"0.4905249",
"0.49031913"
] | 0.57984996 | 1 |
Pause pattern while self.pauseNow is True return imediatly if self.playStatus == False | def pauseCheck(self):
while (self.playStatus == False and self.pauseNow == True):
self.isPause = True
time.sleep(.25)
self.isPause = False
return self.playStatus | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def pause(self):\n while 1:\n if self.is_paused:\n time.sleep(1)\n else:\n break",
"def pause(self):\n if self.status()['state'] == \"playing\":\n self.toggle_pause()",
"def test_pause(self):\n source = procedural.WhiteNoise(0.5)\n player = media.Player()\n player.queue(source)\n player.play()\n start_time = time.time()\n\n stage = 0\n while player.source:\n if stage == 0 and time.time() - start_time > 0.25:\n player.pause()\n stage = 1\n if stage == 1 and time.time() - start_time > 0.75:\n player.play()\n stage = 2\n player.dispatch_events()",
"def _pause(self):\n data_paused = None\n while self.target.is_active and data_paused != '01':\n data_paused = self._mem_read(self.data['paused'][0], 1)\n time.sleep(self.pause_time)\n self.data['paused'][1] = data_paused\n return",
"def paused(self) -> bool:",
"def pause_play(self):\n\n if self.estado == gst.STATE_PAUSED \\\n or self.estado == gst.STATE_NULL \\\n or self.estado == gst.STATE_READY:\n self.__play()\n\n elif self.estado == gst.STATE_PLAYING:\n self.__pause()",
"def grab_paused(self):\n\t\tif time.time() - self.time_paused > PAUSE_INTERVAL:\n\t\t\tself.status.state = 'stop'",
"def pause(self):\n\t\tpass",
"def pause(self):\n self.paused_time = time.time()\n self.paused = True",
"def _control_pause(self):\n self.player.pause()",
"def pause(self):\n pass",
"def pause(self):\n pass",
"def pause(self):\n \n self.pause = True",
"def pause(self):\n self.block.mobile = not self.block.mobile\n if not self.paused:\n self.paused = True\n # Also print paused message\n self.screen.print(\"PAUSED\")\n else:\n self.paused = False\n self.screen.print(\"\")\n # Also reset tick time\n self.t = time.time()",
"def test_pause_queue(self):\n source = procedural.WhiteNoise(0.5)\n player = media.Player()\n player.pause()\n player.queue(source)\n\n while player.source:\n player.dispatch_events()\n player.play()",
"def pause(self):\n self.paused = True\n # FIXME?: Why is this not doing anything? Shouldn't it be calling into the player API?",
"def pause(self):\n if self._pause:\n self._pause = False\n else:\n self._pause = True\n self.step() # trigger the next step",
"def pause(self):\n raise NotImplementedError()",
"def pause(self):\n raise NotImplementedError()",
"def _pause(self, delay):\n start = time.time()\n end = start + delay\n while time.time() < end:\n yield",
"def _pause(self, delay):\n start = time.time()\n end = start + delay\n while time.time() < end:\n yield",
"def start_pause(self, **kwargs):\n if self.is_on:\n self.turn_off()\n else:\n self.turn_on()",
"def pause(): # real signature unknown; restored from __doc__\n pass",
"def pause(self):\n self._cleanup()\n self._paused = True",
"def __pause(self):\n\n self.set_state(gst.STATE_PAUSED)",
"def pause(self) -> None:\n self._running.clear()",
"def is_paused(self):\n\t\treturn self.pause",
"def on_pause(self):\r\n store = get_store()\r\n store.put(\"pause\", value=self.sm.current)\r\n return True",
"def auto_play_pause(self):\r\n if ActiveCheck.not_active():\r\n return\r\n \r\n if not self.playing:\r\n return # Suppress activity\r\n\r\n player = self.get_player()\r\n if not player.auto:\r\n return\r\n self.auto_delay_waiting = True\r\n pause = player.pause\r\n if self.speed_step >= 0:\r\n pause = self.speed_step\r\n delay_ms = int(pause*1000)\r\n self.mw.after(delay_ms)\r\n return",
"def pause(self):\n pass\n # self.condor_object.hold()"
] | [
"0.7879247",
"0.7591245",
"0.75521106",
"0.7544259",
"0.7394605",
"0.72653115",
"0.72582483",
"0.72100365",
"0.7209901",
"0.7202345",
"0.7187789",
"0.7187789",
"0.7176371",
"0.7118577",
"0.71078545",
"0.70944405",
"0.70625925",
"0.70505774",
"0.70505774",
"0.70348316",
"0.70348316",
"0.7028751",
"0.7026555",
"0.7010036",
"0.69880146",
"0.6966372",
"0.6936847",
"0.68888897",
"0.6860881",
"0.68599504"
] | 0.78421515 | 1 |
For now, we are only returning the label for the first authorization. | def get_label(self):
auth = self.authorizations[0]
return auth.label | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _get_label ( self ):\n if self._label is not None:\n return self._label\n return user_name_for( self.name )",
"def _get_label(self):\n return self.label",
"def _get_label ( self ):\n if self._label is not None:\n return self._label\n return self.name",
"def get_label(self):\n return self.label",
"def get_label(self):\n return self.label",
"def get_label(self):\n return self.label",
"def get_label(self):\n return self.label",
"def label(self):\n return self.label_",
"def get_label(urs):\n return assign_term(urs)[1]",
"def label(self):\n return self._label_",
"def first_label(self):\r\n return self.labels.split(',')[0]",
"def label(self):\r\n return self._label",
"def get_label ( self ):\n return self.label",
"def GetLabel(self):\r\n\r\n return self.label",
"def GetLabel(self):\r\n\r\n return self.label",
"def label(self) -> str:\n return self[\"label\"]",
"def label(self):\n return self._label",
"def label(self):\n return self._label",
"def label(self):\n return self._label",
"def label(self):\n return self._label",
"def label(self):\n return self._label",
"def label(self):\n return self._label",
"def label(self):\n return self._label",
"def label(self):\n return self._label",
"def label(self):\n return self._label",
"def label(self):\n return self._label",
"def get_label(self):\n return self._label",
"def label(self):\r\n return self._name",
"def label(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"label\")",
"def label(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"label\")"
] | [
"0.7064338",
"0.6762954",
"0.65058815",
"0.6465353",
"0.6465353",
"0.6465353",
"0.6465353",
"0.6433205",
"0.6383421",
"0.6355541",
"0.6347802",
"0.6343887",
"0.63365865",
"0.6327651",
"0.6327651",
"0.6300275",
"0.6296131",
"0.6296131",
"0.6296131",
"0.6296131",
"0.6296131",
"0.6296131",
"0.6296131",
"0.6296131",
"0.6296131",
"0.6296131",
"0.62883145",
"0.6279365",
"0.62559634",
"0.62559634"
] | 0.8055274 | 0 |
Change the value of every pixel by following x_n = 0.5x_p^2 where x_n is the new value and x_p is the original value | def change_value(image):
out = None
#####################################
# START YOUR CODE HERE #
#####################################
image = image / 255
out = np.empty_like(image)
height, width, _ = image.shape
for h in range(height):
for w in range(width):
x_p = image[h,w]
x_n = (x_p * x_p) * 0.5
out[h,w] = x_n
######################################
# END OF YOUR CODE #
######################################
return out | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def change_X(X):\n _X = swap_pixels(X.X)\n\n X.update(_X)\n\n return X",
"def recolorPixels(x,y,px, newColorArray):\r\n for i in range(0+coeff1*x,coeff1+coeff1*x):\r\n for j in range(0+coeff1*y,coeff1+coeff1*y):\r\n px[i,j]=newColorArray[x][y]",
"def set_pixel(self, x, y, value):\n if x < 0 or x > 7 or y < 0 or y > 15:\n # Ignore out of bounds pixels.\n return\n if y < 8:\n self.set_led( y * 16 + x, value)\n else:\n self.set_led((y-8) * 16 + (x+8), value)",
"def set_pixel(self, x, y, value):\n if x < 0 or x > 7 or y < 0 or y > 7:\n # Ignore out of bounds pixels.\n return\n\n self.set_led(y * 16 + ((x + 7) % 8), value)",
"def set_pixel(self, x, y, value):\r\n \r\n # Rotation and mirroring\r\n a = x\r\n x = y\r\n y = 7-a\r\n \r\n # From the baseclass\r\n if x < 0 or x > 7 or y < 0 or y > 7:\r\n # Ignore out of bounds pixels.\r\n return\r\n # Set green LED based on 1st bit in value.\r\n self.set_led(y * 16 + x, 1 if value & Display.COLOR_GREEN > 0 else 0)\r\n # Set red LED based on 2nd bit in value.\r\n self.set_led(y * 16 + x + 8, 1 if value & Display.COLOR_RED > 0 else 0)",
"def x_redim(self, x):\n x[0:4] *= self.r_scale\n return x",
"def transform(self, x):\n x = x * 0.5 + 0.5\n x_grey = x[:, [2], :, :] * 299 / 1000 + x[:, [1], :, :] * \\\n 587 / 1000 + x[:, [0], :, :] * 114 / 1000\n x_grey = (x_grey - 0.5) / 0.5\n return x_grey",
"def spread(self, n=2):\n for point in self.points:\n point *= n",
"def inplace(block_size=20000):\n y = np.empty(len(x))\n for k in range(len(x) // block_size + 1):\n b, e = k * block_size, (k+1) * block_size\n y[b:e] = x[b:e]\n y[b:e] *= .25\n y[b:e] += .75\n y[b:e] *= x[b:e]\n y[b:e] -= 1.5\n y[b:e] *= x[b:e]\n y[b:e] -= 2\n\n return y",
"def forward(self, x):\n\n x, _ = equiangular_calculator(x, self.ratio)\n x = x.permute(0, 3, 1, 2)\n x = F.interpolate(x, scale_factor=(self.kernel_size, self.kernel_size), mode=\"nearest\")\n x = reformat(x)\n return x",
"def raw():\n return (((.25 * x) + .75) * x - 1.5) * x - 2",
"def set_pixel(self, x, y, v):\n self.buf[y][x] = v & 0x07",
"def preprocess(self, x: paddle.Tensor) -> paddle.Tensor:\n # Normalize colors\n x = (x - self.pixel_mean) / self.pixel_std\n\n # Pad\n h, w = x.shape[-2:]\n padh = self.image_encoder.img_size - h\n padw = self.image_encoder.img_size - w\n x = F.pad(x, (0, padw, 0, padh))\n return x",
"def hash_pixel(p, n, max_value):\n multiplier = np.flip(np.array([2] * len(p)) ** range(0, len(p)))\n return sum(p // ((max_value // n) + 1) * multiplier)",
"def preprocess_input(img):\n img /= 255.\n img -= 0.5\n img *= 2.\n return img",
"def _update_node(node, value):\n node.N += 1\n node.W += value\n node.Q = node.W / node.N",
"def intensity(self, value: int, /) -> None:",
"def update(board: Board, pawn_value: int, x: int, y: int) -> None:\n\n put_pawn_at(board, pawn_value, x, y)\n block_row(board, pawn_value, y)\n block_column(board, pawn_value, x)\n block_diagonals(board, pawn_value, x, y)",
"def smoothen_image(image):\n window = square(3)\n def _replace_center_with_one_if_five_neighbors_are_different_than_0(values):\n \"\"\"\n For each location in the input image, the value returned by the function is the value assigned to that location.\n That's why, naturally, the function needs to return a scalar.\n :param values:\n :return: a scalar representing the value to be set at the current location in the input image\n \"\"\"\n greater_than_0 = 0\n for entry in values:\n if entry > 0:\n greater_than_0 += 1\n if greater_than_0 >= 5:\n return 1\n else:\n return 0\n\n \"\"\"\n This call will take windows of the shape given by the footprint, send them as an 1D array to the _replace function\n and return the value that is to be set in the center of the window. The edges are ignored (for now)\n \"\"\"\n new_image = generic_filter(image, _replace_center_with_one_if_five_neighbors_are_different_than_0, footprint = window)\n return new_image",
"def normalize(x):\n # TODO: Implement Function\n \n return x/255",
"def mold_image(image, config=None):\n if np.max(image) <= 1 and np.min(image) >= 0:\n image[:,:,:3] = image[:,:,:3]*2.0 - 1.0\n elif np.min(image) >= 0:\n image[:, :, :3] = image[:, :, :3] * (1.0/127.5) - 1.0\n return image.astype(np.float32)",
"def setPixel(self, x, y, val):\r\n self.__buffer[y][x].setValue(val)",
"def prox(self, x):\n if self.regularize:\n x[:self.offset(x)] /= (1 + 2 * self.tau * self.lambda_)\n return x",
"def prox(self, x):\n if self.regularize:\n x[:self.offset(x)] /= (1 + 2 * self.tau * self.lambda_)\n return x",
"def prox(self, x):\n if self.regularize:\n x[:self.offset(x)] /= (1 + 2 * self.tau * self.lambda_)\n return x",
"def change_brightness(img,k=0):\n\n img_copy = np.copy(img)\n img_copy = img_copy.astype(int)\n img_copy += k\n\n return img_copy",
"def change( p ):\n red = p[0]\n green = p[1]\n blue = p[2]\n return [ 255-red, 255-green, 255-blue ]",
"def normalize(x):\n a = 0\n b = 1\n scale_min = 0\n scale_max = 255\n return a + ( ( (x - scale_min)*(b - a) )/( scale_max - scale_min ) )",
"def pixel(self, x: int, y: int, colour: int, /) -> None:",
"def upscale(x, n):\n if n == 1:\n return x\n x_shape = tf.shape(x)\n height, width = x_shape[1], x_shape[2]\n return tf.image.resize_nearest_neighbor(x, [n * height, n * width])\n # return tf.batch_to_space(tf.tile(x, [n**2, 1, 1, 1]), [[0, 0], [0, 0]], n)"
] | [
"0.6009499",
"0.6003074",
"0.5953545",
"0.58825284",
"0.5864363",
"0.57073236",
"0.5685161",
"0.56669635",
"0.56421584",
"0.55719",
"0.5569989",
"0.5528347",
"0.55130744",
"0.54917955",
"0.54803175",
"0.5448064",
"0.5439173",
"0.5424856",
"0.5372966",
"0.5365994",
"0.53644747",
"0.53590024",
"0.53584045",
"0.53584045",
"0.53584045",
"0.5341684",
"0.5328572",
"0.53074026",
"0.53045136",
"0.52895194"
] | 0.7488139 | 0 |
Removes a value from the set. Returns true if the set contained the specified element. | def remove(self, val: int) -> bool:
if val in self.set:
self.set.remove(val);
self.nums.remove(val);
return True;
return False; | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def remove(self, val: int) -> bool:\n if val in self.set:\n self.set.remove(val)\n return True\n return False",
"def remove(self, val: int) -> bool:\n temp = self.randomSet.pop(val, False)\n return True if temp != False else temp",
"def remove(self, val: int) -> bool:\n if val in self.value_set:\n self.value_set.remove(val)\n if val in self.values:\n self.values.remove(val)\n return True\n else:\n return False",
"def remove(self, value: object) -> bool:\n for _ in range(self.da.length()):\n if value == self.da[_]:\n self.da.remove_at_index(_)\n return True\n return False",
"def remove(self, val: int) -> bool:\n value = val in self.container\n self.container.discard(val)\n return value",
"def remove(self, el: T) -> bool:\n if el in self:\n del self[el]\n return True\n else:\n return False",
"def remove(self, element):\n\n currentNodePointer = self.head\n # case where the first node has the element as value then erase the value\n if(currentNodePointer.getData() == element):\n self.head = self.head.getNext()\n return True\n \n while(currentNodePointer.getNext() is not None):\n if(currentNodePointer.getNext().getData() == element):\n currentNodePointer.setNext(currentNodePointer.getNext().getNext())\n return True\n else:\n currentNodePointer = currentNodePointer.getNext()\n return False",
"def remove_value(self, value: Hashable) -> bool:\n\t\treturn self.remove_values([value])",
"def remove(self, val: int) -> bool:\n \n # print(self.ind)\n # no value\n if val not in self.ind or not self.ind[val]:\n return False\n else:\n remove_ind = self.ind[val].pop() # random removal\n if not self.ind[val]:\n self.ind.pop(val)\n \n # set will not add duplicate values. So adding first is OK evenif the last elem is the one to delete\n self.ind[self.items[-1]].add(remove_ind)\n self.ind[self.items[-1]].discard(len(self.items)-1)\n self.items[-1], self.items[remove_ind] = self.items[remove_ind], self.items[-1]\n self.items.pop(-1)\n # print(self.ind)\n return True",
"def remove(self, val):\n i = self.d.get(val)\n if i is None:\n return False\n assert 0 <= i < len(self.l)\n last_val = self.l[-1]\n if val != last_val:\n self.d[last_val] = i\n self.l[i] = last_val\n del self.d[val]\n _ = self.l.pop()\n return True",
"def remove(self, val: int) -> bool:\n if self.d.get(val):\n del self.d[val]\n return True\n else:\n return False",
"def remove(self, val: int) -> bool:\n if val in self.l:\n self.l.remove(val)\n return True\n return False",
"def remove(self, val):\n if val in self.numSet:\n # remove from numSet\n self.numSet.discard(val)\n # remove from valToIndex\n index = self.valToIndex[val]\n del self.valToIndex[val]\n # remove from nums & update the index of the swapped value\n valToSwap = self.nums[self.size - 1]\n self.swap(index, self.size - 1)\n self.valToIndex[valToSwap] = index\n # don't forget to decrease the size\n self.size -= 1\n return True\n else:\n return False",
"def remove(self, val: int) -> bool:\n if val in self.dict:\n last_element, idx = self.list[-1], self.dict[val]\n self.list[idx], self.dict[last_element] = last_element, idx\n self.list.pop()\n self.dict.pop(val)\n return True\n return False",
"def remove(self, val: int) -> bool:\n if val in self.dict:\n last_element, idx = self.list[-1], self.dict[val]\n self.list[idx], self.dict[last_element] = last_element, idx\n self.list.pop()\n self.dict.pop(val)\n return True\n return False",
"def remove(self, val: int) -> bool:\n if val in self.data:\n self.data.pop(val)\n self.total -= 1\n return True\n return False",
"def remove(self, val: int) -> bool:\n if val in self.hashmap:\n last_elem, idx = self.array[-1], self.hashmap[val]\n self.array[idx], self.hashmap[last_elem] = last_elem, idx\n self.array.pop()\n self.hashmap.pop(val)\n return True\n return False",
"def remove(self, val: int) -> bool:\n if val not in self.dict:\n return False\n last_ele, idx = self.list[-1], self.dict[val]\n self.list[idx], self.dict[last_ele] = last_ele, idx\n\n self.list.pop()\n del self.dict[val]\n return True",
"def remove(self, val: int) -> bool:\n if not self.idx[val]:\n return False\n last = self.elements[-1]\n to_remove = self.idx[val].pop()\n self.elements[to_remove] = last\n self.idx[last].add(to_remove)\n self.idx[last].discard(len(self.elements) - 1)\n\n self.elements.pop()\n return True",
"def remove(self, value):\n found = False\n for i in range(len(self.data)):\n if self.data[i] != value:\n pass\n else:\n found = True\n self.__delitem__(i)\n break\n if not found:\n raise ValueError",
"def remove(self, val: int) -> bool:\n if val in self.dict:\n idx, last_elem = self.dict[val], self.list[-1]\n self.list[idx] = last_elem\n self.dict[last_elem] = idx\n self.list.pop()\n self.dict.pop(val)\n return True\n return False",
"def remove(self, val: int) -> bool:\n if val in self.dict:\n idx, last_elem = self.dict[val], self.list[-1]\n self.list[idx] = last_elem\n self.dict[last_elem] = idx\n self.list.pop()\n self.dict.pop(val)\n return True\n return False",
"def remove(self, val: int) -> bool:\n if val not in self._dict:\n return False\n idx = self._dict[val]\n last_elem = self._array[-1]\n self._array[idx], self._array[-1] = self._array[-1], self._array[idx]\n self._dict[last_elem] = idx\n self._dict.pop(val)\n self._array.pop()\n return True",
"def remove(self, val):\n temp = self.table.pop(val, None)\n if temp is None:\n return False\n return True",
"def remove(self, val: int) -> bool:\n if val in self.arr:\n index, lastVal = self.map[val], self.arr[-1]\n self.arr[index], self.arr[-1] = lastVal, self.arr[index]\n self.map[lastVal] = index\n self.arr.pop()\n self.map.pop(val)\n return True\n return False",
"def remove(self, val):\n in_ds = False\n if val in self.ds:\n self.ds.remove(val)\n in_ds = True\n return in_ds",
"def remove(self, val: int) -> bool:\n if val not in self.map:\n return False\n rm_idx = self.map[val]\n last_idx = len(self.slot) - 1\n last_val = self.slot[last_idx]\n self.slot[rm_idx] = last_val\n self.map[last_val] = rm_idx\n del self.map[val]\n self.slot.pop()\n return True",
"def remove(self, val: int) -> bool:\n if not self.dict[val]: return False\n last_num = self.arr[-1]\n removed_idx = self.dict[val].pop()\n\n self.dict[last_num].add(removed_idx)\n self.arr[removed_idx] = last_num\n\n self.dict[last_num].discard(len(self.arr) - 1)\n self.arr.pop()\n\n return True",
"def remove(self, val):\n if val in self.dic:\n i = self.dic[val]\n if i<len(self.data)-1:\n self.data[i]=self.data[-1]\n self.dic[self.data[i]]=i\n self.data.pop()\n self.dic.pop(val,0)\n return True\n else:\n return False",
"def remove(self, val):\n if val not in self.map:\n return False\n \n to_remove_idx = self.map[val]\n self.map.pop(val)\n if to_remove_idx != len(self.nums) - 1:\n to_swap = self.nums[-1]\n self.nums[-1], self.nums[to_remove_idx] = self.nums[to_remove_idx], self.nums[-1]\n self.map[to_swap] = to_remove_idx\n self.nums = self.nums[:-1]\n return True"
] | [
"0.7793552",
"0.7472986",
"0.7311967",
"0.7115737",
"0.70873374",
"0.69927806",
"0.6945106",
"0.68983823",
"0.68566453",
"0.68520516",
"0.68365693",
"0.68351394",
"0.68248504",
"0.6823558",
"0.6823558",
"0.6814836",
"0.67781126",
"0.67604584",
"0.67402226",
"0.6724619",
"0.6722487",
"0.6722487",
"0.6668758",
"0.6653026",
"0.66081506",
"0.660104",
"0.65816087",
"0.6524802",
"0.65124404",
"0.64917535"
] | 0.7555588 | 1 |
Indicates whether the identifier provided is contained in this namespace. | def contains(self, identifier):
uri = identifier if isinstance(identifier, six.string_types) else (
identifier.uri if isinstance(identifier, Identifier) else None
)
return uri.startswith(self._uri) if uri else False | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __contains__(self, identifier):\n # following breaks some tests, what is the expected behaviour?\n # return any(m.unique_id.endswith(identifier) for m in self)\n return any(m.unique_id == identifier for m in self)",
"def is_declared(self, identifier: str) -> bool:\n if identifier in self._declarations:\n return True\n if self.parent is not None:\n return self.parent.is_declared(identifier)\n return False",
"async def namespace_exists(self, namespace: str) -> bool:\n return await self.AD.state.namespace_exists(namespace)",
"def containsUri(self, *args):\n return _libsbml.XMLNamespaces_containsUri(self, *args)",
"def hasNS(self, *args):\n return _libsbml.XMLNamespaces_hasNS(self, *args)",
"def __contains__(self, name):\n return (name in self._defs) or \\\n ((self._parent is not None) and (name in self._parent))",
"def __contains__(self, name):\n\n return name in self._wdict",
"def stillLookingForPrefix(self, prefix):\n return prefix in self._prefixToIdentifiers",
"def hasURI(self, *args):\n return _libsbml.XMLNamespaces_hasURI(self, *args)",
"def hasNamespaceURI(self, *args):\n return _libsbml.XMLToken_hasNamespaceURI(self, *args)",
"def is_in_namespace(variable_names, namespace, func_logic=all):\n assert hasattr(variable_names, \"__iter__\"), \"`variable_names` should be either a single string on an object or an iterable of strings of variable names\"\n if isinstance(variable_names, str):\n variable_names = [variable_names]\n namespace = set(namespace)\n return func_logic(map(lambda x: x in namespace, variable_names))",
"def __contains__(self, name):\n return name in self.__resources",
"def is_local(self, identifier: str) -> bool:\n return identifier in self._declarations",
"def _namespace_requested(self, namespace):\r\n if namespace is None:\r\n return False\r\n namespace_tuple = self._tuplefy_namespace(namespace)\r\n if namespace_tuple[0] in IGNORE_DBS:\r\n return False\r\n elif namespace_tuple[1] in IGNORE_COLLECTIONS:\r\n return False\r\n else:\r\n return self._tuple_requested(namespace_tuple)",
"def __contains__(self, uri):\n\t\treturn uri in self._uris",
"def __contains__(self, name):\n return name in self._variables",
"def __contains__(self, key):\n return self._get(key, self.root) is not None",
"def __contains__(self, gid: uuid.UUID) -> bool:\n return gid in self._nodes",
"def __contains__(self, rname):\n return rname in self.keys()",
"def __contains__(self, key):\n node, _ = Treap._find_node(key, self.root)\n return node is not None",
"def hasPrefix(self, *args):\n return _libsbml.XMLNamespaces_hasPrefix(self, *args)",
"def hasNamespaceNS(self, *args):\n return _libsbml.XMLToken_hasNamespaceNS(self, *args)",
"def __contains__(self, name):\n return hasattr(self, name)",
"def exists( identifier ):\n return note.exists(identifier)",
"def __contains__(self, n):\n try:\n return n in self.node\n except TypeError:\n return False",
"def __contains__(self, name):\n try:\n self[name]\n return True\n except KeyError:\n return False",
"def exists(identifier, network):\n foo = next(load(identifier, network), None)\n return foo is not None",
"def __contains__(self, var: Union[Variable, int]) -> bool:\n if isinstance(var, Variable):\n return var in self.scope_vars\n return var in self.scope_vids",
"def contains(self, key: int) -> bool:\n if key in self.d:\n return True\n else:\n return False",
"def is_named(self):\n return isinstance(self.uri, rdflib.URIRef)"
] | [
"0.695668",
"0.6656405",
"0.6532839",
"0.64874536",
"0.6400062",
"0.63559645",
"0.6343124",
"0.6330577",
"0.6319895",
"0.6311052",
"0.6293539",
"0.62231743",
"0.619792",
"0.6197703",
"0.6196918",
"0.6167409",
"0.6140008",
"0.60929984",
"0.60690624",
"0.60648376",
"0.60636157",
"0.6054152",
"0.60397786",
"0.6031862",
"0.60316724",
"0.6014142",
"0.6007349",
"0.6005266",
"0.5988968",
"0.5967082"
] | 0.77918166 | 0 |
Verify that output table has headers item listed in field_names. | def assertTableHeaders(self, output_lines, field_names):
table = self.parser.table(output_lines)
headers = table['headers']
for field in field_names:
self.assertIn(field, headers) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def assert_show_fields(self, show_output, field_names):\n\n # field_names = ['name', 'description']\n # show_output = [{'name': 'fc2b98d8faed4126b9e371eda045ade2'},\n # {'description': 'description-821397086'}]\n # this next line creates a flattened list of all 'keys' (like 'name',\n # and 'description' out of the output\n all_headers = [item for sublist in show_output for item in sublist]\n for field_name in field_names:\n self.assertIn(field_name, all_headers)",
"def test_check_header_missing_fields(self):\r\n\r\n # Default header, should not generate any errors/warnings\r\n header = ['AAA', 'XXX', 'YYY',\r\n 'ZZZ']\r\n errors = []\r\n warnings = []\r\n\r\n errors, warnings = check_header(header,\r\n errors,\r\n warnings,\r\n sample_id_ix=0,\r\n desc_ix=3,\r\n bc_ix=1,\r\n linker_primer_ix=2,\r\n added_demultiplex_field=None)\r\n\r\n expected_errors = [\r\n 'Found header field AAA, expected field SampleID\\t0,0',\r\n 'Found header field XXX, expected field BarcodeSequence\\t0,1',\r\n 'Found header field YYY, expected field LinkerPrimerSequence\\t0,2',\r\n 'Found header field ZZZ, last field should be Description\\t0,3']\r\n expected_warnings = []\r\n\r\n self.assertEqual(errors, expected_errors)\r\n self.assertEqual(warnings, expected_warnings)",
"def assert_table_structure(self, items, field_names):\n for item in items:\n for field in field_names:\n self.assertIn(field, item)",
"def test_check_header_required_fields(self):\r\n\r\n # Default header, should not generate any errors/warnings\r\n header = [\r\n 'SampleID', 'BarcodeSequence', 'LinkerPrimerSequence', 'run_prefix',\r\n 'Description']\r\n errors = []\r\n\r\n errors = check_header_required_fields(header,\r\n errors,\r\n sample_id_ix=0,\r\n desc_ix=4,\r\n bc_ix=1,\r\n linker_primer_ix=2,\r\n added_demultiplex_field='run_prefix')\r\n\r\n expected_errors = []\r\n\r\n self.assertEqual(errors, expected_errors)\r\n\r\n # Should find all as errors if not named correctly\r\n header = ['AAA', 'BBB', 'CCC', 'DDD',\r\n 'EEE']\r\n errors = []\r\n\r\n errors = check_header_required_fields(header,\r\n errors,\r\n sample_id_ix=0,\r\n desc_ix=4,\r\n bc_ix=1,\r\n linker_primer_ix=2,\r\n added_demultiplex_field='run_prefix')\r\n\r\n expected_errors = [\r\n 'Found header field AAA, expected field SampleID\\t0,0',\r\n 'Found header field BBB, expected field BarcodeSequence\\t0,1',\r\n 'Found header field CCC, expected field LinkerPrimerSequence\\t0,2',\r\n 'Found header field EEE, last field should be Description\\t0,4',\r\n 'Missing added demultiplex field run_prefix\\t-1,-1']\r\n\r\n self.assertEqual(errors, expected_errors)",
"def has_headers(self):\n for column in self.columns:\n if column.header:\n return True\n return False",
"def _check_headers(cursor, headers):\n all_columns = set(chain.from_iterable(_columns(cursor, table) for table in DATA_TABLES))\n for header in headers:\n if header not in all_columns:\n raise ValueError('column {} not recognized'.format(header))",
"def testFields(self):\n requested_fields = [\"FormNumber\", \"Title\"]\n table = self.auth.table(self.dataset,\n self.table2,\n fields=requested_fields)\n table_columns = table[0].keys()\n for x in requested_fields:\n self.assertTrue(x in table_columns)\n # Account for the extra '__mmetadata' key\n self.assertEqual(len(requested_fields) + 1, len(table_columns))",
"def check_headerEntries(self, expected, found):\n\n # spreadsheets must have either a barcode field or a object ID field, but both are not required\n header1 = ('original master', 'object', 'barcode')\n header2 = ('original master', 'object',\n 'object identifier\\n(edit heading to specify type ' +\n '- e.g. barcode)')\n expected = self.remove_annoying(header1, header2, expected, found)\n\n bad_entries = []\n\n for header in expected:\n if header not in found:\n bad_entries.append(header)\n\n if bad_entries:\n self.raise_excelerror(\"Incorrect header entry for {0}.\"\n .format(bad_entries))\n return True",
"def test_header_row(self):\n header_row = self.view_class().header_row\n if not header_row:\n return\n\n response = self.view_class().get()\n # Some formatting needs to be done so that the header row\n # is compliant with the CSV dialect - all fields need\n # to be quoted.\n quoted_header_row = '\"{}\"'.format('\",\"'.join(header_row))\n self.assertContains(response, quoted_header_row)",
"def check_headerRow(self, expected, found):\n\n # spreadsheets must have either a barcode field or a object ID field, but both are not required\n header1 = 'barcode'\n header2 = ('object identifier\\n(edit heading to specify type' +\n ' - e.g. barcode)')\n expected = self.remove_annoying(header1, header2, expected, found)\n\n missing = []\n\n for header in expected:\n if header not in found:\n missing.append(header)\n\n if missing:\n self.raise_excelerror(\"Missing required value- {0}.\"\n .format(missing))\n\n return True",
"def _test_df_headers(self, df):\n assert list(df.columns.values) == [\n 'Appliances', 'BathroomsFull', 'BathroomsHalf', 'Bedrooms',\n 'DateListed', 'Description', 'MlsId', 'MlsName', 'Price',\n 'Rooms', 'StreetAddress'\n ]",
"def test_fasta_get_headers(self):\r\n\r\n header_records = mfau.get_record_headers(full_file_name)\r\n\r\n if debug:\r\n for header_record in header_records:\r\n print header_record.strip()\r\n\r\n self.assertGreaterEqual(len(header_records), 0)",
"def test_csv_reader_header_fields(process_data):\n data = process_data(file_name_or_type='clean_map.csv')\n header_fields = list(data[0].keys())\n assert header_fields == [\n 'Country',\n 'City',\n 'State_Or_Province',\n 'Lat',\n 'Long',\n 'Altitude'\n ]",
"def test_missing_header(barred_tac_list_importer, logger, db_conn):\n expect_failure(barred_tac_list_importer, exc_message='Metadata header, cannot find the column headers - tac, '\n '10000110')",
"def test_check_header(self):\r\n\r\n # Default header, should not generate any errors/warnings\r\n header = ['SampleID', 'BarcodeSequence', 'LinkerPrimerSequence',\r\n 'Description']\r\n errors = []\r\n warnings = []\r\n\r\n errors, warnings = check_header(header,\r\n errors,\r\n warnings,\r\n sample_id_ix=0,\r\n desc_ix=3,\r\n bc_ix=1,\r\n linker_primer_ix=2,\r\n added_demultiplex_field=None)\r\n\r\n expected_errors = []\r\n expected_warnings = []\r\n\r\n self.assertEqual(errors, expected_errors)\r\n self.assertEqual(warnings, expected_warnings)",
"def test_fetchSpecificHeaderFieldsWithoutHeaders(self):\n self.assertFetchSpecificFieldsWithEmptyList(\"HEADER.FIELDS\")",
"def test_fetchSpecificHeaderFieldsNotWithoutHeaders(self):\n self.assertFetchSpecificFieldsWithEmptyList(\"HEADER.FIELDS.NOT\")",
"def check_column_names(expected_headings, found_headings):\n column_missing = False\n column_additional = False\n\n # Check if column headings exist\n logging.info(\"Checking column headers are correct.\")\n diff_columns = set(expected_headings) - set(found_headings)\n if len(diff_columns) > 0:\n column_missing = True\n\n # Check whether there are any additional columns (could need renaming)\n extra_columns = set(found_headings) - set(expected_headings)\n if len(extra_columns) > 0:\n column_additional = True\n\n # Check for duplicate headings\n # NOTE: As mangle_dupe_cols=True, any duplicate columns will be stored in the form column.1.... column.N\n # We use this to avoid overwriting data. However, to identify duplicate original columns, we need to remove\n # these prior to checking for dups\n main_column_names = [i.split(\".\")[0] for i in found_headings]\n duplicate_headings = len(main_column_names) > len(set(main_column_names))\n if duplicate_headings:\n logging.error(\"Duplicate headings identified.\")\n if column_missing:\n logging.error(\"Missing headers identified:\")\n print(diff_columns)\n if column_additional:\n logging.error(\"Additional headers identified:\")\n print(extra_columns)\n if column_missing or column_additional or duplicate_headings:\n logging.info(\n \"File will not pass checks as I am unable to tell \"\n \"what to do with the columns on my own.\"\n )\n return False\n return True",
"def validate_column_names(self, cols):\n self.stdout.write('Verifying CSV header')\n csv_cols = set(cols)\n if self.required_csv_columns <= csv_cols:\n return True\n else:\n missing_cols = set(self.required_csv_columns).difference(csv_cols)\n raise ValidationError(\n \"These columns '{0}' are required, but missing in the CSV \"\n \"file.\".format(\n ', '.join(missing_cols)\n )\n )",
"def assert_has_fields(obj: dict, fields: List[str]) -> None:\n for field in fields:\n assert field in obj.keys()",
"def is_header(fields):\n if len(fields) < 11:\n return None\n # Test a column which should usually be a number in data lines and never a number in header lines.\n try:\n float(fields[8])\n return False\n except ValueError:\n pass\n first_field = fields[0]\n # An explicitly commented line is a header.\n if first_field.startswith('#'):\n return True\n # The first field in a header is usually these two (and never these in data lines).\n if first_field.lower() == 'sample' or first_field.lower() == 'family':\n return True\n # Fallback 1: There should never be a number in a header line. If we find one, it's a data line.\n for field in fields:\n try:\n float(field)\n return False\n except ValueError:\n pass\n # Fallback 2: Just test whether any of the known labels is in the line.\n for label in LABELS:\n if label in fields:\n return True\n for label in LABELS:\n if label.lower() in fields:\n return True",
"def check_header_required_fields(header,\r\n errors,\r\n sample_id_ix,\r\n desc_ix,\r\n bc_ix,\r\n linker_primer_ix,\r\n added_demultiplex_field=None):\r\n\r\n header_checks = {\r\n sample_id_ix: \"SampleID\",\r\n desc_ix: \"Description\",\r\n bc_ix: \"BarcodeSequence\",\r\n linker_primer_ix: \"LinkerPrimerSequence\"\r\n }\r\n\r\n for curr_check in header_checks:\r\n if (header[curr_check] != header_checks[curr_check] and\r\n header_checks[curr_check] == \"Description\"):\r\n errors.append('Found header field %s, last field should be %s' %\r\n (header[curr_check], header_checks[curr_check]) +\r\n '\\t%d,%d' % (0, curr_check))\r\n elif (header[curr_check] != header_checks[curr_check] and\r\n header_checks[curr_check] != \"Description\"):\r\n errors.append('Found header field %s, expected field %s' %\r\n (header[curr_check], header_checks[curr_check]) +\r\n '\\t%d,%d' % (0, curr_check))\r\n\r\n if added_demultiplex_field:\r\n if added_demultiplex_field not in header:\r\n errors.append('Missing added demultiplex field %s\\t%d,%d' %\r\n (added_demultiplex_field, -1, -1))\r\n\r\n return errors",
"def header(self, fields):\n return fields",
"def check_headers(self, headers):\n h = headers.values()[0]\n\n if 'DT' in PAR:\n if h.dt != PAR.DT:\n h.dt = PAR.DT\n\n if 'NT' in PAR:\n if h.nt != PAR.NT:\n print 'Warning: h.nt != PAR.NT'\n\n if 'NREC' in PAR:\n if h.nr != PAR.NREC:\n print 'Warning: h.nr != PAR.NREC'\n\n return h",
"def test_headers(self):\n msg = self.shortDescription()\n self.assertTrue(False, msg=msg)\n pass",
"def test_empty_cols_allowed(self):\n self.test_table.allow_empty_columns = True\n self.test_table.change_header(Path=1, SectionType=3, Value=4)\n self.assertEqual(self.test_table._header, [\"Path\", None, \"SectionType\",\n \"Value\"])",
"def validate_match_columns(import_log, field_names, model_class, header_row):\n errors = []\n column_matches = import_log.import_setting.columnmatch_set.all()\n for field_name in field_names:\n field_object, model, direct, m2m = model_class._meta.get_field_by_name(field_name)\n if (direct and\n model and\n not field_object.blank):\n field_matches = column_matches.filter(field_name=field_name)\n if field_matches:\n if field_matches[0].column_name not in header_row:\n errors += [\"{0} is required but is not in your spreadsheet. \".format(field_object.verbose_name)]\n else:\n errors += [\"{0} is required but has no match.\".format(field_object.verbose_name)]\n \n return errors",
"def test_headers(self):\n self.assert_expected_token_value()",
"def test_field_names(self):\n\n for mb_model in self.mb_model_list:\n mb_fields = mb_model._meta.fields\n db_cols = connection.introspection.get_table_description(\n self.cursor, mb_model._meta.db_table)\n\n for i in range(0, len(mb_model._meta.fields)):\n self.assertEqual(\n mb_fields[i].column,\n db_cols[i].name\n )",
"def writeheader(writer):\n writer.writerow(dict((fn, fn) for fn in writer.fieldnames))"
] | [
"0.71665496",
"0.69928867",
"0.69691926",
"0.6916123",
"0.6892314",
"0.682082",
"0.6780092",
"0.67301995",
"0.6690977",
"0.66392064",
"0.6528734",
"0.6500211",
"0.6498844",
"0.64815634",
"0.6458742",
"0.64520425",
"0.63763213",
"0.62508994",
"0.62471277",
"0.6220344",
"0.6196196",
"0.6174555",
"0.61480606",
"0.6146654",
"0.6146383",
"0.6097303",
"0.608629",
"0.60840505",
"0.6082498",
"0.60718906"
] | 0.84269035 | 0 |
Create a dictionary from an output | def _get_property_from_output(self, output):
obj = {}
items = self.parser.listing(output)
for item in items:
obj[item['Property']] = str(item['Value'])
return obj | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def parse_output(output):\n lines = output.splitlines()[3:-1]\n r = {}\n for line in lines:\n kv = filter(None, line.split('|'))\n kv = [x.strip() for x in kv]\n r.update({kv[0]: kv[1]})\n return r",
"def _get_output_dictionary(self):\n\n return_dictionary = {}\n\n for output_path in self.provided_outputs:\n return_dictionary[output_path.full_path] = self.get_value(output_path)\n\n return return_dictionary",
"def output_from_json(self, output: Dict[str, Any]) -> OutputInfo:",
"def construct_output_dict():\n list_of_recipes = construct_list_of_recipes()\n output_dict = {}\n for recipe_list in list_of_recipes:\n recipe_instance = construct_recipe_object(recipe_list)\n recipe_dict = recipe_instance.construct_json_rep_obj()\n for k, v in recipe_dict.iteritems():\n output_dict[k] = v\n output_dict = filter_output_dict(output_dict)\n return {'recipes': output_dict}",
"def test_string_to_dict(self):\n actual_result = IperfParser(OUTPUT_RESULT).to_parse()\n self.assertEqual(actual_result, IPERF_PARSER_EXPECTED_RESULT)",
"def parseDuMap(output):\n #z00du00(DB-SL-MSL-CH-SCH) : 00-00-0-0-0 01-01-0-0-0 04-04-2-0-0 05-05-2-0-0\n # 02-02-1-1-0 03-03-1-1-0 02-02-1-0-0 03-03-1-0-0\n duMap = {}\n for l in output:\n \n l_a = l.split(\":\")\n #print l_a\n #sys.exit(1)\n du = l_a[0]\n # string of 00-00-0-0-0 01-01-0-0-0\n sbChs = l_a[1]\n \n #z00du00(DB-SL-MSL-CH-SCH)\n # get 0 and from z00du0 9\n partDu = getDuPart(du)\n \n sbChArr = getAllSlChSbCh(sbChs)\n \n duMap[partDu] = sbChArr\n \n \n return duMap",
"def genie_parse_output(self) -> Union[Dict[str, Any], List[Any]]:\n structured_result = genie_parse(self.genie_platform, self.channel_input, self.result)\n return structured_result",
"def _result_to_dict(line):\n f = line.split(':;')\n return {'server': f[0], 'os_name': f[1], 'status': f[2], 'ipv4': f[3]}",
"def makeDict(result_list):\n \n result_dict = dict()\n for line in result_list:\n if line[0] == 'set_property' and line[3] == 'get_ports':\n if line[4] not in result_dict:\n result_dict[line[4]] = dict()\n result_dict[line[4]][line[1]] = line[2]\n\n return result_dict",
"def _parse_output_variables(self):\n self._output_variables_by_name = {}\n self._output_variables_by_type = {}\n for ov in self._output_variables:\n # parse the variable to get individual parts\n parsed_variable = self.parse_variable(ov)\n variable_name = parsed_variable.get('name')\n variable_type = parsed_variable.get('type')\n\n # store the variables in dict by name (e.g. \"status_code\")\n self._output_variables_by_name[variable_name] = {'variable': ov}\n\n # store the variables in dict by name-type (e.g. \"status_code-String\")\n self._output_variables_by_type[f'{variable_name}-{variable_type}'] = {'variable': ov}",
"def parsed_output(output_elements):\n parsed_output = {}\n for stanza, stanza_value in output_elements.items():\n fake_section = MagicMock()\n fake_section.options = {}\n fake_section.name = stanza\n parsed_output.update({stanza: fake_section})\n for option, value in stanza_value.items():\n fake_setting = MagicMock()\n fake_setting.name = option\n fake_setting.value = value\n parsed_output[stanza].options.update({option: fake_setting})\n return parsed_output",
"def from_thread_result_to_dictionary(returned_result):\n keys = []\n values = []\n\n for returned_result_item in returned_result:\n keys.append(returned_result_item[0])\n values.append(returned_result_item[1])\n\n dictionary = dict(zip(keys, values))\n return dictionary",
"def data_from_result():\n return dict(\n gene=\"BRAF\",\n strand=\"-\",\n tx_pos_range=(1802, 1921),\n alt_pos_range=(140453074, 140453193),\n alt_aln_method=\"splign\",\n tx_exon_id=780494,\n alt_exon_id=1927263\n )",
"def get_outputs():\n outputs = {}\n for obj in vars(acsploit.output).values():\n if hasattr(obj, 'OUTPUT_NAME'):\n outputs[obj.OUTPUT_NAME] = obj\n\n return outputs",
"def convertToDict(self): \n out = dict()\n out[\"Value\"] = self.value \n out[\"Odds\"] = self.odds \n out[\"Path\"] = self.path\n out[\"Curated\"] = self.curated \n out[\"Edit Distance\"] = self.edit_distance \n out[\"Edit Distance Stem\"] = self.edit_distance_stem\n out[\"Source ID\"] = self.source_id\n out[\"Match\"] = self.match \n out[\"Offset Start\"] = self.offset_start \n out[\"Offset End\"] = self.offset_end\n return out",
"def get_dict_of_sequential_param__output(dev_cfg, name, output_name):\n one = get_sequential_params(dev_cfg, name)\n two = []\n for i in range(len(one)):\n two.append(f\"{output_name}{i+1}\")\n\n return dict(zip(one, two))",
"def to_dict(input):\n output = dict()\n for key, value in input.asDict().items():\n if isinstance(value, ParseResults):\n output[key] = value.asList()\n else:\n output[key] = value\n\n return output",
"def get_outputs_dict(process):\n return {\n link_triplet.link_label: link_triplet.node\n for link_triplet in process.get_outgoing(link_type=(LinkType.RETURN, LinkType.CREATE))\n }",
"def finalize_output_dict(self, output_dict):\n return {key: output_dict[key].cpu().numpy() for key in output_dict.keys()}",
"def process_java_acc_output(output):\n return_value = {}\n output = output.split(\"\\n\")\n for line in output:\n # Line has relevant info\n if line.lower().startswith(\"total\"):\n values = {}\n # Remove \"total\" keyword\n line = line[6:]\n # Seperate the two valuable parts\n line_list = line.split(\",\")\n for segment in line_list:\n part = segment.split(\":\")\n # Extract key and value\n values[part[0][-8:]] = int(part[1])\n return_value[line[:6]] = values\n return return_value",
"def parse_output(use_json, output):\n return json.loads(output[0]) if use_json else parse_info.construct_tree(output)",
"def _process_raw_output(self,\r\n raw_output_fp,\r\n log_fp,\r\n output_observation_map_fp):\r\n result = {}\r\n query_id_field = 0\r\n flag_field = 1\r\n subject_id_field = 2\r\n output_observation_map_f = open(output_observation_map_fp, 'w')\r\n log_f = open(log_fp, 'w')\r\n for e in MinimalSamParser(open(raw_output_fp, 'U')):\r\n query_id = e[query_id_field]\r\n subject_id = e[subject_id_field]\r\n flag = int(e[flag_field])\r\n if (flag != 4):\r\n try:\r\n result[subject_id].append(query_id)\r\n except KeyError:\r\n result[subject_id] = [query_id]\r\n log_f.write('\\t'.join(e))\r\n log_f.write('\\n')\r\n\r\n log_f.close()\r\n for e in result.items():\r\n output_observation_map_f.write(\r\n '%s\\t%s\\n' %\r\n (e[0], '\\t'.join(e[1])))\r\n output_observation_map_f.close()\r\n return result",
"def from_json(output: Dict[str, Any]) -> Any:\n\n result: Any = OutputInfo(\n OutputIndex(bytes.fromhex(output[\"hash\"]), output[\"index\"]),\n output[\"timelock\"],\n output[\"amount\"],\n bytes.fromhex(output[\"spend_key\"]),\n )\n result.state = InputState(output[\"state\"])\n return result",
"def parse_list_output(output):\n lines = output.splitlines()\n keys = filter(None, lines[1].split('|'))\n keys = [x.lower().strip() for x in keys]\n r = []\n for line in lines[3:-1]:\n if len(line.split()) <= 1:\n continue\n values = filter(None, line.split('|'))\n values = [x.strip() for x in values]\n assert len(keys) == len(values)\n record = dict(zip(keys, values))\n r.append(record)\n return r",
"def _parse_output(self, output):\n return [json.loads(x) for x in output]",
"def test_string_to_dict2(self):\n actual_result = IperfParser(OUTPUT_RESULT_UDP).to_parse()\n self.assertEqual(actual_result, PARSER_EXPECTED_RESULT2)",
"def output_to_dict(\n sensi_orders: Tuple[int, ...], mode: str, output_tuple: Tuple\n ) -> Dict:\n output_dict = {}\n index = 0\n if not isinstance(output_tuple, tuple):\n output_tuple = (output_tuple,)\n if mode == MODE_FUN:\n if 0 in sensi_orders:\n output_dict[FVAL] = output_tuple[index]\n index += 1\n if 1 in sensi_orders:\n output_dict[GRAD] = output_tuple[index]\n index += 1\n if 2 in sensi_orders:\n output_dict[HESS] = output_tuple[index]\n elif mode == MODE_RES:\n if 0 in sensi_orders:\n output_dict[RES] = output_tuple[index]\n index += 1\n if 1 in sensi_orders:\n output_dict[SRES] = output_tuple[index]\n return output_dict",
"def parse_show_as_object(self, raw_output):\n items = self.parse_show(raw_output)\n o = {}\n for item in items:\n o.update(item)\n return o",
"def sysinfo_scrape(output):\r\n # Create a dictionary\r\n return_dict = dict()\r\n for idx, line in enumerate(output.strip().split(\"\\n\")):\r\n tokens = re.split(r\"\\s{2,}\", line)\r\n if idx == 0:\r\n return_dict[\"Name\"] = tokens[-1]\r\n else:\r\n try:\r\n name, value = tokens[-1].split(\":\")\r\n return_dict[name.strip()] = value.strip()\r\n except ValueError:\r\n # Handle extra lines in the logo\r\n pass\r\n\r\n return return_dict",
"def decode_results(self, outputs):\n ..."
] | [
"0.7458908",
"0.6636358",
"0.6539277",
"0.64794725",
"0.6453821",
"0.64392745",
"0.6378547",
"0.6347167",
"0.6300203",
"0.62663984",
"0.6249434",
"0.6200246",
"0.617561",
"0.6171171",
"0.61182153",
"0.6114068",
"0.61129606",
"0.60749435",
"0.6059385",
"0.6053162",
"0.60514975",
"0.6038417",
"0.6034911",
"0.6034067",
"0.60279477",
"0.59937805",
"0.5978123",
"0.5965041",
"0.5958704",
"0.5950834"
] | 0.6664644 | 1 |
Wait until object reaches given status. | def wait_for_object_status(self, object_name, object_id, status,
timeout=120, interval=3):
cmd = self.object_cmd(object_name, 'show')
start_time = time.time()
while time.time() - start_time < timeout:
if status in self.cinder(cmd, params=object_id):
break
time.sleep(interval)
else:
self.fail("%s %s did not reach status %s after %d seconds."
% (object_name, object_id, status, timeout)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def wait_for_status(self, status):\n code = self.instance.state['Code']\n while code != status:\n time.sleep(3)\n self.instance.reload()\n code = self.instance.state['Code']",
"def check_completion(self):\n\n time.sleep(3)\n while self.status == 0:\n pass",
"def wait(self, timeout=600):\n s = datetime.datetime.now()\n status = json.loads(self.get())\n while status['status'] != 'COMPLETE':\n status = self.get()\n e = datetime.datetime.now()\n if (e - s).seconds > timeout:\n raise RuntimeError('timeout')\n return status",
"def waitUntilSuccess():",
"def waitStatus(j, wtype='Load'):\n timeout = 1\n curIter = 0\n maxIter = 60\n done = False\n while not done:\n stat = j.GetStatus(wtype)\n if stat == \"complete\":\n done = True\n else:\n curIter = curIter + 1\n if curIter > maxIter:\n raise ValueError(\"timeout waiting\")\n time.sleep(timeout)",
"def wait_on_object_state ( aws_object, target_state, sleep_time = 10, max_wait = 300, failure_state = None ) :\n while aws_object.state != target_state :\n if aws_object.state == failure_state :\n break\n if max_wait <= 0 :\n break\n time.sleep( sleep_time )\n max_wait -= sleep_time\n aws_object.update( )\n\n return aws_object.state == target_state",
"def waitfor(self):\r\n finished = False\r\n while finished == False:\r\n time.sleep(5)\r\n finished = self.isFinished()",
"def wait(self):\n\t\twhile True:\n\t\t\tr1 = self.zaberSend(self.translation[\"hor\"], self.cmd[\"returnStatus\"], data=0)\n\t\t\tr2 = self.zaberSend(self.translation[\"ver\"], self.cmd[\"returnStatus\"], data=0)\n\t\t\tif r1[2] == 0 and r2[2] == 0:\n\t\t\t\tbreak\n\t\t\telse:\n\t\t\t\ttime.sleep(.01)",
"def wait_for_status(self, status, timeout=-1):\n if self.status == volume_status.NONE:\n log.debug(\n 'Attempted to wait for a status ({0}) on a non-existent volume'.format(status))\n return False # no volume means not worth waiting\n else:\n start_time = time.time()\n end_time = start_time + timeout\n if timeout == -1:\n checks = \"infinite\"\n wait_time = 5\n wait_forever = True\n else:\n checks = 10\n wait_time = float(timeout) / checks\n wait_forever = False\n while wait_forever or time.time() <= end_time:\n if self.status == status:\n log.debug(\"Volume {0} ({1}) has reached status '{2}'\"\n .format(self.volume_id, self.fs.get_full_name(), status))\n return True\n elif not self.volume_id:\n log.debug(\"No volume ID; not waiting for desired status ({0})\"\n .format(status))\n return False\n else:\n log.debug('Waiting for volume {0} (status \"{1}\"; {2}) to reach status \"{3}\". '\n 'Remaining checks: {4}'.format(self.volume_id, self.status,\n self.fs.get_full_name(), status, checks))\n if timeout != -1:\n checks -= 1\n time.sleep(wait_time)\n log.debug('Wait for volume {0} ({1}) to reach status {2} timed out. Current status {3}.'\n .format(self.volume_id, self.fs.get_full_name(), status, self.status))\n return False",
"def wait(self):\n return (self.status == self.STATUS_WAIT)",
"def wait_ready_to_use(self, status=True, timeout=TIMEOUT):\n LOGGER.info(\n f\"Wait for {self.kind} {self.name} status to be {'' if status else 'not '}ready to use\"\n )\n\n samples = TimeoutSampler(\n wait_timeout=timeout,\n sleep=1,\n exceptions_dict=PROTOCOL_ERROR_EXCEPTION_DICT,\n func=lambda: self.instance.get(\"status\", {}).get(\"readyToUse\", None)\n == status,\n )\n for sample in samples:\n if sample:\n return",
"def wait(self):\n\t\traise NotImplementedError(\"must be redeclared\")",
"def wait(self):\n response = self._client.api.operations[self.id].wait.get()\n\n try:\n if response.json()['metadata']['status'] == 'Failure':\n raise exceptions.LXDAPIException(response)\n except KeyError:\n # Support for legacy LXD\n pass",
"def waitForCompletion(self):\n\n while(json.loads(self.robot.device())['state']!=0):\n time.sleep(0.1)\n continue\n\n return",
"def do_wait(self):\n pass",
"def wait_progress(self):\n pass",
"def wait_progress(self):\n pass",
"def wait(self):\n pass",
"def wait(self):\n pass",
"def set_status_update_waiter(self):\n status_message_update_waiter = self.status_message_update_waiter\n if (status_message_update_waiter is not None):\n self.status_message_update_waiter = None\n status_message_update_waiter.set_result(...)",
"def wait(self, _id):\n while not self._actions[_id].done:\n sleep(1e-3)",
"def wait_for_instance_status(config, status):\n client = config.create_api_client()\n InstanceId = config.get('InstanceId')\n while True:\n time.sleep(20)\n req = DescribeInstancesRequest.DescribeInstancesRequest()\n result = do_action(client, req)\n items = result[\"Instances\"][\"Instance\"]\n lookups = {item['InstanceId']: item for item in items}\n if lookups[InstanceId]['Status'] == status:\n return\n else:\n click.echo(\"Instance's current status: {}; transfer to status {} ...\".format(\n lookups[InstanceId]['Status'], status\n ))",
"def wait(self, timeoout=None, state=\"C-completed\"):",
"def waitUntilFinished():",
"def waitUntilFinished():",
"def waitUntilFinished():",
"def waitUntilFinished():",
"def wait(self):\n time.sleep(self.next())",
"def wait(self, ms=None):\r\n util.raiseNotDefined()",
"def run_and_wait():\n self.busy.put(True)\n action()\n self.busy.put(False)\n status._finished(success=True)"
] | [
"0.73892504",
"0.72170204",
"0.71951866",
"0.70215213",
"0.6863582",
"0.6744071",
"0.6706966",
"0.66450965",
"0.6635064",
"0.65745586",
"0.6519117",
"0.6512439",
"0.65002567",
"0.6495888",
"0.6489756",
"0.6467292",
"0.6467292",
"0.6445491",
"0.6445491",
"0.64176154",
"0.6408975",
"0.63799983",
"0.63768154",
"0.63565546",
"0.63565546",
"0.63565546",
"0.63565546",
"0.6323276",
"0.6309911",
"0.62933964"
] | 0.7880691 | 0 |
Check that object deleted successfully. | def check_object_deleted(self, object_name, object_id, timeout=60):
cmd = self.object_cmd(object_name, 'show')
try:
start_time = time.time()
while time.time() - start_time < timeout:
if object_id not in self.cinder(cmd, params=object_id):
break
except exceptions.CommandFailed:
pass
else:
self.fail("%s %s not deleted after %d seconds."
% (object_name, object_id, timeout)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _objectDeleted(self, obj):\n pass",
"def do_deleting(self, request, obj, obj_display, obj_id):\n try:\n with transaction.atomic(savepoint=False):\n self.log_deletion(request, obj, obj_display)\n self.delete_model(request, obj)\n\n return self.response_delete(request, obj_display, obj_id)\n except ValidationError as ex:\n for message in ex.messages:\n self.message_user(request, message, messages.ERROR)\n return False",
"def test_delete_object(self):\n u = self.d.user('example')\n u.delete()\n\n method, url, data, headers = self.d._fetcher.last_request\n self.assertEqual(method, 'DELETE')\n self.assertEqual(url, '/users/example')",
"def delete(self, obj):",
"def test_delete(self):\n\n value = self.instance.delete()\n self.client.delete_instance.assert_called_once_with('nginx')\n self.assertEqual(value, self.client.delete_instance.return_value)",
"def delete_success(self, id_):\n rv = self.post((id_, self.delete_url), dict(post='yes'))\n assert not self.verify_object({self.id_field: id_})\n return rv",
"def test_post_deletion_success(self):\n url = reverse(\n 'post-detail',\n args=[\n self.topic1.url_name,\n self.post1.id\n ]\n )\n self.client.credentials(\n HTTP_AUTHORIZATION = 'Token ' + self.user1.auth_token.key\n )\n response = self.client.delete(url)\n\n self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)\n deleted_post = Post.objects.filter(\n id=self.post1.id,\n )\n self.assertFalse(deleted_post.exists())",
"def delete(self) -> bool:\n return False",
"def delete(self, *args, **kwargs):\n return 0",
"def delete(self, request, *args, **kwargs):\r\n self.object = self.get_object()\r\n success_url = self.get_success_url()\r\n self.object.delete()\r\n messages.success(self.request, self.success_message)\r\n return HttpResponseRedirect(success_url)",
"def check_vs_deleted(vs_name, created_objects):\r\n if keep_objects:\r\n return\r\n api_instance = client.CustomObjectsApi()\r\n val = 0\r\n while val < 12:\r\n try:\r\n api_response = api_instance.get_namespaced_custom_object(\r\n group=\"snapshot.storage.k8s.io\",\r\n version=\"v1\",\r\n plural=\"volumesnapshots\",\r\n name=vs_name,\r\n namespace=namespace_value\r\n )\r\n LOGGER.debug(api_response)\r\n time.sleep(15)\r\n LOGGER.info(f\"Volume Snapshot Delete : Checking deletion for {vs_name}\")\r\n val += 1\r\n except ApiException:\r\n LOGGER.info(f\"Volume Snapshot Delete : {vs_name} deletion confirmed\")\r\n return\r\n LOGGER.error(f\"Volume Snapshot Delete : {vs_name} is not deleted , asserting\")\r\n clean_with_created_objects(created_objects)\r\n assert False",
"def _notify_delete(self, cuds_object):",
"def do_delete(self, arg):\n \treturn False",
"def after_delete(self, obj, st):\n pass",
"def do_destroy(self, arg):\n obj = self.verify(arg, 2)\n if obj:\n del storage.all()[obj]\n storage.save()",
"def delObject(self, obj):\n\n try:\n req = self._delete(obj.selfUrl)\n if req.status_code == requests.codes.ok:\n print('object {0} deleted'.format(obj.id))\n return req.status_code\n else:\n print('not deleted', req.status_code)\n return req.status_code\n\n\n except requests.exceptions.RequestException as err:\n print('del request failed:', err)",
"def test_object_delete(self):\n self.add_attachments() # attach the attachments\n\n # we have 2 attachments\n self.assertEqual(3, self.eightythreeb.attachment_set.all().count())\n # delete a single object\n self.eightythreeb.attachment_set.all()[0].delete()\n # we should now have 2 active attachments\n self.assertEqual(2, self.eightythreeb.attachment_set.all().count())\n # and 1 deleted\n self.assertEqual(1, self.eightythreeb.attachment_set.deleted().count())",
"def delete(self,force=False):\n if self.state == ObjState.created or self.state == ObjState.absent:\n return (0,'')\n else:\n return (1,'cannot delete a live disk (only absent or user created ones)')",
"def check_deletion():\n\n if newrev == zero:\n ERROR(\"[POLICY] Refusing to delete this ref\")\n sys.exit(1)",
"def delete():",
"def test_delete_success(self):\n\n url = reverse('notification',\n kwargs={'way_id': self.notification.way_id, 'notification_id': self.notification.id})\n response = self.client.delete(url)\n\n self.assertEqual(response.status_code, 200)",
"def check_vs_content_deleted(vs_content_name, created_objects):\r\n if keep_objects:\r\n return\r\n api_instance = client.CustomObjectsApi()\r\n val = 0\r\n while val < 12:\r\n try:\r\n api_response = api_instance.get_cluster_custom_object(\r\n group=\"snapshot.storage.k8s.io\",\r\n version=\"v1beta1\",\r\n plural=\"volumesnapshotcontents\",\r\n name=vs_content_name\r\n )\r\n LOGGER.debug(api_response)\r\n time.sleep(15)\r\n LOGGER.info(f\"Volume Snapshot Content Delete : Checking deletion {vs_content_name}\")\r\n val += 1\r\n except ApiException:\r\n LOGGER.info(f\"Volume Snapshot Content Delete : {vs_content_name} deletion confirmed\")\r\n return\r\n LOGGER.error(f\"Volume Snapshot Content Delete : {vs_content_name} is not deleted , asserting\")\r\n clean_with_created_objects(created_objects)\r\n assert False",
"def before_delete(self, obj, st):\n pass",
"def test_delete(self):\n # Create a thread with two messages\n thread = self.create_thread()\n message = mommy.make(\n Message, thread=thread, sender=thread.first_message.sender)\n\n # Delete the second message\n message = Message.objects.get(pk=message.pk)\n message.delete()\n\n # Verify the thread now has one message\n thread = Thread.objects.get(pk=thread.pk)\n self.assertEqual(thread.total_messages, 1)\n\n # Verify the message status is now deleted\n deleted_message = Message.objects.with_deleted().get(pk=message.pk)\n self.assertEqual(deleted_message.status, 'deleted')",
"async def deleted(self, value):\n pass",
"def test_data_object_untrash(self):\n pass",
"def test_delete(self):\n thread = self.create_thread()\n ut = UserThread.objects.get(\n user=thread.recipients.first(), thread=thread)\n ut_id = ut.pk\n ut.delete()\n ut = UserThread.objects.with_deleted().get(pk=ut_id)\n self.assertEqual(ut.status, 'deleted')",
"def test_delete(self):\n self.assertFalse(self.user1.ad_deleted)\n self.assertTrue(self.user1.active)\n url = '/api/users/{}/'.format(self.user1.ad_guid)\n data = {'Deleted': True}\n response = self.client.put(url, json.dumps(data), content_type='application/json')\n self.assertEqual(response.status_code, 202)\n user = DepartmentUser.objects.get(pk=self.user1.pk) # Refresh from db\n self.assertTrue(user.ad_deleted)\n self.assertFalse(user.active)\n self.assertTrue(user.in_sync)\n # Also delete a second object, to check for silly 'empty string' collisions.\n url = '/api/users/{}/'.format(self.user2.ad_guid)\n response = self.client.put(url, json.dumps(data), content_type='application/json')\n self.assertEqual(response.status_code, 202)",
"def test_delete(client):\n rv = delete(client, 'Michael')\n assert json.loads(rv.data.decode())['code'] == 0\n assert json.loads(rv.data.decode())['owner'] == 'Michael'",
"def delete(self):\n ..."
] | [
"0.7490332",
"0.7215603",
"0.7164006",
"0.7086588",
"0.70412916",
"0.70006657",
"0.69678736",
"0.6930805",
"0.6886216",
"0.68708795",
"0.68376404",
"0.68198115",
"0.6819284",
"0.6812539",
"0.679975",
"0.67971224",
"0.6778325",
"0.6737792",
"0.6717377",
"0.66977274",
"0.6691106",
"0.66881365",
"0.66801673",
"0.66766256",
"0.667495",
"0.66547996",
"0.6645462",
"0.6639732",
"0.6626414",
"0.66254675"
] | 0.7400769 | 1 |
check sparsemaxloss kernel against numpy | def _test_sparsemax_loss_against_numpy(self, dtype, random, use_gpu):
z = random.uniform(low=-3, high=3, size=(test_obs, 10))
q = np.zeros((test_obs, 10))
q[np.arange(0, test_obs), random.randint(0, 10, size=test_obs)] = 1
tf_loss_op, tf_loss_out = self._tf_sparsemax_loss(z, q, dtype, use_gpu)
np_loss = self._np_sparsemax_loss(z, q).astype(dtype)
self.assertAllCloseAccordingToType(
np_loss, tf_loss_out, half_atol=1e-2, half_rtol=5e-3)
self.assertShapeEqual(np_loss, tf_loss_op) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _test_sparsemax_loss_positive(self, dtype, random, use_gpu):\n z = random.uniform(low=-3, high=3, size=(test_obs, 10))\n q = np.zeros((test_obs, 10))\n q[np.arange(0, test_obs), random.randint(0, 10, size=test_obs)] = 1\n\n tf_loss_op, tf_loss_out = self._tf_sparsemax_loss(z, q, dtype, use_gpu)\n\n self.assertAllCloseAccordingToType(np.abs(tf_loss_out), tf_loss_out)\n self.assertShapeEqual(np.zeros(test_obs), tf_loss_op)",
"def _test_sparsemax_loss_zero(self, dtype, random, use_gpu):\n # construct z and q, such that z_k >= 1 + max_{j!=k} z_k holds for\n # delta_0 = 1.\n z = random.uniform(low=-3, high=3, size=(test_obs, 10))\n z[:, 0] = np.max(z, axis=1) + 1.05\n\n q = np.zeros((test_obs, 10))\n q[:, 0] = 1\n\n tf_loss_op, tf_loss_out = self._tf_sparsemax_loss(z, q, dtype, use_gpu)\n tf_sparsemax_op, tf_sparsemax_out = self._tf_sparsemax(z, dtype, use_gpu)\n\n self.assertAllCloseAccordingToType(np.zeros(test_obs), tf_loss_out)\n self.assertShapeEqual(np.zeros(test_obs), tf_loss_op)\n\n self.assertAllCloseAccordingToType(q, tf_sparsemax_out)\n self.assertShapeEqual(q, tf_sparsemax_op)",
"def _test_sparsemax_loss_of_inf(self, dtype, random, use_gpu):\n q = np.asarray([[0, 0, 1], [0, 0, 1], [0, 0, 1], [0, 0, 1]])\n z_neg = np.asarray([\n [0, -np.inf, 0],\n [0, -np.inf, -np.inf],\n [-np.inf, -np.inf, 0],\n [-np.inf, -np.inf, -np.inf],\n ]).astype(dtype)\n z_pos = np.asarray([[0, np.inf, 0], [0, np.inf,\n np.inf], [np.inf, np.inf, 0],\n [np.inf, np.inf, np.inf]]).astype(dtype)\n z_mix = np.asarray([[0, np.inf, 0], [0, np.inf, -np.inf],\n [-np.inf, np.inf, 0], [-np.inf, np.inf,\n -np.inf]]).astype(dtype)\n\n _, tf_loss_neg = self._tf_sparsemax_loss(z_neg, q, dtype, use_gpu)\n self.assertAllCloseAccordingToType([0.25, np.inf, 0, np.nan], tf_loss_neg)\n\n _, tf_loss_pos = self._tf_sparsemax_loss(z_pos, q, dtype, use_gpu)\n self.assertAllCloseAccordingToType([np.nan, np.nan, np.nan, np.nan],\n tf_loss_pos)\n\n _, tf_loss_mix = self._tf_sparsemax_loss(z_mix, q, dtype, use_gpu)\n self.assertAllCloseAccordingToType([np.nan, np.nan, np.nan, np.nan],\n tf_loss_mix)",
"def isenhance(kernel):\n length = len(kernel)\n count = 0.0\n\n # if B(i, j) = 0, this matrix should not be enhanced\n if kernel[0] != 1:\n return False\n\n for item in kernel:\n if item == 1:\n count += 1\n\n if count / length >= 0.65 and (kernel[length - 2] == 1 or kernel[length - 1] == 1):\n return True\n else:\n return False",
"def _test_sparsemax_loss_of_nan(self, dtype, random, use_gpu):\n q = np.asarray([[0, 0, 1], [0, 0, 1], [0, 0, 1]])\n z_nan = np.asarray([[0, np.nan, 0], [0, np.nan, np.nan],\n [np.nan, np.nan, np.nan]]).astype(dtype)\n\n _, tf_loss_nan = self._tf_sparsemax_loss(z_nan, q, dtype, use_gpu)\n self.assertAllCloseAccordingToType([np.nan, np.nan, np.nan], tf_loss_nan)",
"def get_kernel_supports(self):\n return np.zeros((self.n_nodes, self.n_nodes)) + self.n_gaussians",
"def test_fixedkernel(self):\r\n X = np.random.rand(30, 4)\r\n K = np.dot(X, X.T)\r\n kernel = GPy.kern.fixed(4, K)\r\n kern = GPy.kern.poly(5, degree=4)\r\n self.assertTrue(GPy.kern.kern_test(kern, verbose=verbose))",
"def nlm_fast(data,FS,BS,sigma,dev = None, proc = None):\n\n if dev is None:\n dev = imgtools.__DEFAULT_OPENCL_DEVICE__\n\n if dev is None:\n raise ValueError(\"no OpenCLDevice found...\")\n\n if proc is None:\n proc = OCLProcessor(dev,absPath(\"kernels/nlm_fast.cl\"),options=\"-D FS=%i -D BS=%i\"%(FS,BS))\n\n img = dev.createImage_like(data)\n\n distImg = dev.createImage_like(data)\n\n distImg = dev.createImage_like(data, mem_flags = \"READ_WRITE\")\n tmpImg = dev.createImage_like(data, mem_flags = \"READ_WRITE\")\n tmpImg2 = dev.createImage_like(data, mem_flags = \"READ_WRITE\")\n\n accBuf = dev.createBuffer(data.size,\n mem_flags = cl.mem_flags.READ_WRITE,\n dtype = np.float32)\n\n weightBuf = dev.createBuffer(data.size,\n mem_flags = cl.mem_flags.READ_WRITE,\n dtype = np.float32)\n\n\n dev.writeImage(img,data);\n dev.writeBuffer(weightBuf,np.zeros_like(data,dtype=np.float32));\n\n for dx in range(BS+1):\n for dy in range(-BS,BS+1):\n proc.runKernel(\"dist\",img.shape,None,img,tmpImg,np.int32(dx),np.int32(dy))\n proc.runKernel(\"convolve\",img.shape,None,tmpImg,tmpImg2,np.int32(1))\n proc.runKernel(\"convolve\",img.shape,None,tmpImg2,distImg,np.int32(2))\n\n proc.runKernel(\"computePlus\",img.shape,None,img,distImg,accBuf,weightBuf,\n np.int32(img.shape[0]),np.int32(img.shape[1]),\n np.int32(dx),np.int32(dy),np.float32(sigma))\n\n if any([dx,dy]):\n proc.runKernel(\"computeMinus\",img.shape,None,img,distImg,accBuf,weightBuf,\n np.int32(img.shape[0]),np.int32(img.shape[1]),\n np.int32(dx),np.int32(dy),np.float32(sigma))\n\n acc = dev.readBuffer(accBuf,dtype=np.float32).reshape(data.shape)\n weights = dev.readBuffer(weightBuf,dtype=np.float32).reshape(data.shape)\n\n return acc/weights",
"def test_is_product_entangled_state_2_sys():\n ent_vec = max_entangled(4)\n np.testing.assert_equal(is_product_vector(ent_vec, dim=[4, 4]), False)",
"def compute(self, node, input_vals):\r\n #start = time.time()\r\n\r\n #assert len(input_vals) == 1\r\n strides = node.const_attr[1]\r\n ksize = node.const_attr[0]\r\n ish = list(input_vals[0].shape)\r\n input = input_vals[0]\r\n output = np.zeros([ish[0],(ish[1]-ksize[1])//strides[1]+1,(ish[2]-ksize[2])//strides[2]+1,ish[3]])\r\n osh = output.shape\r\n #print(osh)\r\n for i in range(osh[1]):\r\n for j in range(osh[2]):\r\n output[:,i,j,:] = np.amax(input[:,i*strides[1]:(i+1)*strides[1],j*strides[1]:(j+1)*strides[1],:],axis=(1,2))\r\n #end = time.time() \r\n #print(\"max_pool\") \r\n #print(end - start) \r\n return output\r\n \r\n #assert False\r",
"def test_kernel_matrix(kernel, sample):\n sample = [ele for ele in sample] # consumed several times\n\n potato = KernelMethod(kernel)\n mat = potato.matrix(sample)\n assert np.all(np.linalg.eigvals(mat) > 0) or np.isclose(\n [np.min(np.linalg.eigvals(mat))], [0]\n )",
"def indicator_kernel(h: np.ndarray, Xi: np.ndarray, x: np.ndarray) -> np.ndarray:\n return (Xi - x) == 0",
"def test_check_sparse(self):\n x, x_rand, s = self.create_testdata()\n task = mmRDTR()\n #check that a dense array x is passed thru unchanged\n check = task.check_sparse(x)\n self.assertEqual(np.all(check==x),True)\n #check that a sparse matrix s is converted to a numpy array\n check = task.check_sparse(s)\n self.assertIsInstance(check,np.ndarray)\n self.assertEqual(np.all(check==s.todense()),True)",
"def _test_gradient_against_numpy(self, dtype, random, use_gpu):\n z = random.uniform(low=-3, high=3, size=(test_obs, 10))\n q = np.zeros((test_obs, 10))\n q[np.arange(0, test_obs), np.random.randint(0, 10, size=test_obs)] = 1\n\n logits = constant_op.constant(z.astype(dtype), name='z')\n sparsemax_op = sparsemax(logits)\n loss_op = sparsemax_loss(logits, sparsemax_op, q.astype(dtype))\n loss_grad_op = gradients_impl.gradients(loss_op, [logits])[0]\n\n with self.test_session(use_gpu=use_gpu):\n tf_grad = loss_grad_op.eval()\n np_grad = self._np_sparsemax_loss_grad(z, q).astype(dtype)\n\n self.assertAllCloseAccordingToType(\n np_grad, tf_grad, half_atol=1e-2, half_rtol=5e-3)\n self.assertShapeEqual(np_grad, loss_grad_op)",
"def test_softmax():\r\n x = T.fmatrix('x')\r\n\r\n z = T.nnet.softmax(x)\r\n f = theano.function([x], z, mode=mode_without_gpu)\r\n f_gpu = theano.function([x], z, mode=mode_with_gpu)\r\n assert f.maker.fgraph.toposort()[-1].op == T.nnet.softmax\r\n assert isinstance(f_gpu.maker.fgraph.toposort()[-2].op,\r\n cuda.nnet.GpuSoftmax)\r\n\r\n def cmp(n, m):\r\n #print \"test_softmax\",n,m\r\n data = numpy.arange(n * m, dtype='float32').reshape(n, m)\r\n out = f(data)\r\n gout = f_gpu(data)\r\n assert numpy.allclose(out, gout), numpy.absolute(out - gout)\r\n\r\n #we need to test n>32*1024 to check that we make the block loop.\r\n cmp(2, 5)\r\n cmp(2 << 15, 5)\r\n cmp(4074, 400)\r\n cmp(0, 10)\r\n cmp(784, 784)\r\n cmp(4, 1000)\r\n cmp(4, 1024)\r\n cmp(4, 2000)\r\n cmp(4, 2024)\r\n # The GTX285 don't have enough shared memory.\r\n cmp(4, 4074)\r\n # The GTX580, 680 and kepler don't have enough shared memory.\r\n cmp(2, 10000)\r\n cmp(128, 16 * 1024)\r\n cmp(128, 64 * 1024)",
"def test_gauss_kernel():\n\n gauss = gauss_kernel(2, 5)\n\n assert gauss.shape == (5, 5)\n assert gauss[2, 2] == 0.039788735772973836",
"def max():\n valid=result_alpha.F>0\n src_data.F[valid]=np.maximum( src_data.F[valid],result_data.F[valid] )",
"def test_is_product_entangled_state_3_sys():\n ent_vec = max_entangled(4)\n np.testing.assert_equal(is_product_vector(ent_vec, dim=[2, 2, 2, 2]), False)",
"def test_float32_float64_equivalence(is_sparse):\n rng = np.random.RandomState(0)\n X = rng.rand(10, 2)\n\n if is_sparse:\n X[X < 0.8] = 0\n X = sp.csr_matrix(X)\n\n km64 = BisectingKMeans(n_clusters=3, random_state=0).fit(X)\n km32 = BisectingKMeans(n_clusters=3, random_state=0).fit(X.astype(np.float32))\n\n assert_allclose(km32.cluster_centers_, km64.cluster_centers_)\n assert_array_equal(km32.labels_, km64.labels_)",
"def isdense(qob):\n return isinstance(qob, np.ndarray)",
"def Kernel(x, y):\n\n Result = (np.dot(x_train[x, :], x_train[y, :])+1)**5 # Polynomial\n #Result = (np.dot(x_train[x, :], x_train[y, :])+1) # Linear\n #Gaussian\n \"\"\"\n sigma = 1\n if np.ndim(x_train[x, :]) == 1 and np.ndim(x_train[y, :]) == 1:\n Result = np.exp(- (np.linalg.norm(x_train[x, :] - x_train[y, :], 2)) ** 2 / (2 * sigma ** 2))\n elif (np.ndim(x_train[x, :]) > 1 and np.ndim(x_train[y, :]) == 1) or (np.ndim(x_train[x, :]) == 1 and np.ndim(x_train[y, :]) > 1):\n Result = np.exp(- (np.linalg.norm(x_train[x, :] - x_train[y, :], 2, axis=1) ** 2) / (2 * sigma ** 2))\n elif np.ndim(x_train[x, :]) > 1 and np.ndim(x_train[y, :]) > 1:\n Result = np.exp(- (np.linalg.norm(x[:, np.newaxis] - y[np.newaxis, :], 2, axis=2) ** 2) / (2 * sigma ** 2))\n \"\"\"\n return Result",
"def check_supported(x, indices, v, y, kernel_name=\"inplace_update\"):\n shape_indices = indices.get(\"shape\")\n shape_v = v.get(\"shape\")\n dtype_v = v.get(\"dtype\").lower()\n reg_v_len = 1\n for i in range(1, len(shape_v)):\n reg_v_len = reg_v_len * shape_v[i]\n\n if dtype_v in (\"float32\", \"int32\"):\n dtype_size = 4\n else:\n dtype_size = 2\n reg_v_size = reg_v_len * dtype_size\n\n try:\n if len(shape_indices) != 1 or (reg_v_size % 32 != 0):\n return False\n\n except RuntimeError:\n return False\n\n return True",
"def fssd_grid_search_kernel(p, X, test_locs, list_kernel):\n V = test_locs\n n_cand = len(list_kernel)\n objs = np.zeros(n_cand)\n for i in range(n_cand):\n ki = list_kernel[i]\n objs[i] = power_criterion(p, X, ki, test_locs)\n logging.info(\"(%d), obj: %5.4g, k: %s\" % (i, objs[i], str(ki)))\n\n # Widths that come early in the list\n # are preferred if test powers are equal.\n\n besti = objs.argmax()\n return besti, objs",
"def test_kernel(binary_matrix, result):\n\n # get the kernel from the gaussian elimination.\n pivots = (binary_matrix.T != 0).argmax(axis=0)\n nonpivots = np.setdiff1d(range(len(binary_matrix[0])), pivots)\n\n kernel = []\n for col in nonpivots:\n col_vector = binary_matrix[:, col]\n null_vector = np.zeros((binary_matrix.shape[1]), dtype=int)\n null_vector[col] = 1\n for i in pivots:\n first_entry = np.where(binary_matrix[:, i] == 1)[0][0]\n if col_vector[first_entry] == 1:\n null_vector[i] = 1\n kernel.append(null_vector.tolist())\n\n # get the nullspace from the _kernel function.\n nullspace = _kernel(binary_matrix)\n\n for nullvec in kernel:\n assert nullvec in nullspace.tolist()\n\n assert (nullspace == result).all()",
"def softmax_loss_naive(W, X, y, reg):\n # Initialize the loss and gradient to zero.\n loss = 0.0\n dW = np.zeros_like(W)\n\n #############################################################################\n # TODO: Compute the softmax loss and its gradient using explicit loops. #\n # Store the loss in loss and the gradient in dW. If you are not careful #\n # here, it is easy to run into numeric instability. Don't forget the #\n # regularization! #\n #############################################################################\n num_classes = W.shape[1]\n #print('num_classes = ', num_classes)\n num_train = X.shape[0]\n #print('num_train = ', num_train)\n \n min_score = 0.0\n shifted_scores = np.zeros(W.shape[1])\n #max_score = np.zeros(W.shape[1])\n max_score = 0.0\n \n loss_array = np.zeros(y.shape[0])\n for i in range(num_train):\n scores = X[i].dot(W)\n #print('scores dimensions = ', scores.shape)\n #print('scores = ', scores)\n #print('i =', i, 'y = ', y[i])\n min_score = np.min(scores)\n max_score = np.max(scores)\n #print(min_score,max_score)\n shifted_scores = np.multiply(-1,scores + abs(min_score))\n #print(scores)\n #print(shifted_scores)\n exp_scores = np.exp(shifted_scores)\n norm = np.amax(exp_scores)\n norm_scores = np.divide(exp_scores,norm)\n loss_array[i] = np.multiply(-1,np.log(norm_scores[y[i]]/(np.sum(norm_scores)-norm_scores[y[i]])))\n #print(loss_array)\n for j in range(num_classes): \n\t\n if j == y[i]: \n dW[:,j] = np.multiply(norm_scores[y[i]],1-norm_scores[y[i]])\n else:\n dW[:,j] = np.multiply(-1,np.multiply(norm_scores[y[i]],norm_scores[y[j]]))\n\t\t\t\n\t\t\t\n loss = np.amax(loss_array)\n\n # Add regularization to the loss.\n loss = 0.5 * reg * np.sum(W * W) + loss\n \n \n pass\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n\n return loss, dW",
"def TST_LCE(S,N1,N_per,alpha,model_C2ST, w_C2ST, b_C2ST, device,dtype):\r\n np.random.seed(seed=1102)\r\n torch.manual_seed(1102)\r\n torch.cuda.manual_seed(1102)\r\n N = S.shape[0]\r\n f = torch.nn.Softmax()\r\n output = f(model_C2ST(S).mm(w_C2ST) + b_C2ST)\r\n # pred_C2ST = output.max(1, keepdim=True)[1]\r\n STAT = abs(output[:N1,0].type(torch.FloatTensor).mean() - output[N1:,0].type(torch.FloatTensor).mean())\r\n STAT_vector = np.zeros(N_per)\r\n for r in range(N_per):\r\n ind = np.random.choice(N, N, replace=False)\r\n # divide into new X, Y\r\n ind_X = ind[:N1]\r\n ind_Y = ind[N1:]\r\n # print(indx)\r\n STAT_vector[r] = abs(output[ind_X,0].type(torch.FloatTensor).mean() - output[ind_Y,0].type(torch.FloatTensor).mean())\r\n S_vector = np.sort(STAT_vector)\r\n threshold = S_vector[np.int(np.ceil(N_per * (1 - alpha)))]\r\n threshold_lower = S_vector[np.int(np.ceil(N_per * alpha))]\r\n h = 0\r\n if STAT.item() > threshold:\r\n h = 1\r\n return h, threshold, STAT",
"def use_blas(self):\r\n #the gemm version only support that case\r\n if self.out_mode == 'valid' and self.dx == 0 and self.dy == 0:\r\n #We use a faster version in those case.\r\n if (self.imshp != self.imshp_logical or\r\n self.kshp != self.kshp_logical or\r\n self.unroll_patch or\r\n self.unroll_batch > 0 or\r\n self.unroll_kern > 0):\r\n\r\n return False\r\n return True\r\n return False",
"def test_maxv_keyword(self):\n # Set maxv to 200\n byt = bytscl(self.array2, maxv=200)\n control = numpy.sum(self.array2 >= 200)\n total = numpy.sum(byt == 255)\n self.assertEqual(total, control)",
"def _sparse_tanimotokernel(data_1, data_2):\n\n norm_1 = np.array(data_1.power(2).sum(axis=1).reshape(data_1.shape[0], 1))\n norm_2 = np.array(data_2.power(2).sum(axis=1).reshape(data_2.shape[0], 1))\n prod = data_1.dot(data_2.T).A\n\n divisor = (norm_1 + norm_2.T - prod) + np.finfo(data_1.dtype).eps\n result = prod / divisor\n return result",
"def __test_softmax():\n\n test1 = softmax(tf.constant(np.array([[1001, 1002], [3, 4]]), dtype=tf.float32))\n with tf.Session() as sess:\n test1 = sess.run(test1)\n __test_all_close(\"Softmax test 1\", test1, np.array([[0.26894142, 0.73105858],\n [0.26894142, 0.73105858]]))\n\n test2 = softmax(tf.constant(np.array([[-1001, -1002]]), dtype=tf.float32))\n with tf.Session() as sess:\n test2 = sess.run(test2)\n __test_all_close(\"Softmax test 2\", test2, np.array([[0.73105858, 0.26894142]]))\n\n print(\"Basic (non-exhaustive) softmax tests pass\\n\")"
] | [
"0.6425822",
"0.64143616",
"0.6307287",
"0.61605656",
"0.6009254",
"0.5761981",
"0.5626876",
"0.5618227",
"0.5602798",
"0.5556589",
"0.55460817",
"0.55382633",
"0.5535278",
"0.5496034",
"0.5479103",
"0.545446",
"0.5430234",
"0.542532",
"0.5417887",
"0.54107744",
"0.5393759",
"0.53824836",
"0.537803",
"0.53690964",
"0.53657967",
"0.53626627",
"0.5317982",
"0.53026897",
"0.5301595",
"0.52953035"
] | 0.6560093 | 0 |
check sparsemaxloss transfers nan | def _test_sparsemax_loss_of_nan(self, dtype, random, use_gpu):
q = np.asarray([[0, 0, 1], [0, 0, 1], [0, 0, 1]])
z_nan = np.asarray([[0, np.nan, 0], [0, np.nan, np.nan],
[np.nan, np.nan, np.nan]]).astype(dtype)
_, tf_loss_nan = self._tf_sparsemax_loss(z_nan, q, dtype, use_gpu)
self.assertAllCloseAccordingToType([np.nan, np.nan, np.nan], tf_loss_nan) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _test_sparsemax_loss_of_inf(self, dtype, random, use_gpu):\n q = np.asarray([[0, 0, 1], [0, 0, 1], [0, 0, 1], [0, 0, 1]])\n z_neg = np.asarray([\n [0, -np.inf, 0],\n [0, -np.inf, -np.inf],\n [-np.inf, -np.inf, 0],\n [-np.inf, -np.inf, -np.inf],\n ]).astype(dtype)\n z_pos = np.asarray([[0, np.inf, 0], [0, np.inf,\n np.inf], [np.inf, np.inf, 0],\n [np.inf, np.inf, np.inf]]).astype(dtype)\n z_mix = np.asarray([[0, np.inf, 0], [0, np.inf, -np.inf],\n [-np.inf, np.inf, 0], [-np.inf, np.inf,\n -np.inf]]).astype(dtype)\n\n _, tf_loss_neg = self._tf_sparsemax_loss(z_neg, q, dtype, use_gpu)\n self.assertAllCloseAccordingToType([0.25, np.inf, 0, np.nan], tf_loss_neg)\n\n _, tf_loss_pos = self._tf_sparsemax_loss(z_pos, q, dtype, use_gpu)\n self.assertAllCloseAccordingToType([np.nan, np.nan, np.nan, np.nan],\n tf_loss_pos)\n\n _, tf_loss_mix = self._tf_sparsemax_loss(z_mix, q, dtype, use_gpu)\n self.assertAllCloseAccordingToType([np.nan, np.nan, np.nan, np.nan],\n tf_loss_mix)",
"def _check_loss(self, loss):\n assert not np.isnan(loss), \"Model diverged with loss = NaN\"",
"def _test_sparsemax_loss_positive(self, dtype, random, use_gpu):\n z = random.uniform(low=-3, high=3, size=(test_obs, 10))\n q = np.zeros((test_obs, 10))\n q[np.arange(0, test_obs), random.randint(0, 10, size=test_obs)] = 1\n\n tf_loss_op, tf_loss_out = self._tf_sparsemax_loss(z, q, dtype, use_gpu)\n\n self.assertAllCloseAccordingToType(np.abs(tf_loss_out), tf_loss_out)\n self.assertShapeEqual(np.zeros(test_obs), tf_loss_op)",
"def _test_sparsemax_loss_zero(self, dtype, random, use_gpu):\n # construct z and q, such that z_k >= 1 + max_{j!=k} z_k holds for\n # delta_0 = 1.\n z = random.uniform(low=-3, high=3, size=(test_obs, 10))\n z[:, 0] = np.max(z, axis=1) + 1.05\n\n q = np.zeros((test_obs, 10))\n q[:, 0] = 1\n\n tf_loss_op, tf_loss_out = self._tf_sparsemax_loss(z, q, dtype, use_gpu)\n tf_sparsemax_op, tf_sparsemax_out = self._tf_sparsemax(z, dtype, use_gpu)\n\n self.assertAllCloseAccordingToType(np.zeros(test_obs), tf_loss_out)\n self.assertShapeEqual(np.zeros(test_obs), tf_loss_op)\n\n self.assertAllCloseAccordingToType(q, tf_sparsemax_out)\n self.assertShapeEqual(q, tf_sparsemax_op)",
"def max():\n valid=result_alpha.F>0\n src_data.F[valid]=np.maximum( src_data.F[valid],result_data.F[valid] )",
"def test_canonicalize_nan(self):\r\n sio = StringIO()\r\n handler = logging.StreamHandler(sio)\r\n handler.setLevel(logging.ERROR)\r\n logging.getLogger('theano.gof.opt').addHandler(handler)\r\n try:\r\n x = vector()\r\n f = theano.function([x], x + numpy.nan)\r\n finally:\r\n logging.getLogger('theano.gof.opt').removeHandler(handler)\r\n # Ideally this test would only catch the maxed out equilibrium\r\n # optimizer error message, but to be safe in case this message\r\n # is modified in the future, we assert that there is no error\r\n # at all.\r\n assert not sio.getvalue()",
"def test_add_nans_to_weights():\n # create input sparse matrix with one empty row (j=2)\n coords = np.array([[0, 3, 1, 0], [0, 3, 1, 2]])\n data = np.array([4.0, 5.0, 7.0, 9.0])\n Matin = sps.COO(coords, data, shape=(4, 4))\n\n # this is what is expected to come out (Nan added at i=0, j=2)\n coords = np.array([[0, 3, 1, 0, 2], [0, 3, 1, 2, 0]])\n data = np.array([4.0, 5.0, 7.0, 9.0, np.nan])\n expected = sps.COO(coords, data, shape=(4, 4))\n\n Matout = xe.smm.add_nans_to_weights(xr.DataArray(Matin, dims=('in', 'out')))\n assert np.allclose(expected.todense(), Matout.data.todense(), equal_nan=True)\n\n # Matrix without empty rows should return the same\n coords = np.array([[0, 3, 1, 0, 2], [0, 3, 1, 2, 1]])\n data = np.array([4.0, 5.0, 7.0, 9.0, 10.0])\n Matin = sps.COO(coords, data, shape=(4, 4))\n\n Matout = xe.smm.add_nans_to_weights(xr.DataArray(Matin, dims=('in', 'out')))\n assert np.allclose(Matin.todense(), Matout.data.todense())",
"def assert_no_nans(x):\n assert not torch.isnan(x).any()",
"def test_optimizer():\r\n nan_detected = [False]\r\n\r\n def detect_nan(i, node, fn):\r\n for output in fn.outputs:\r\n if numpy.isnan(output[0]).any():\r\n print '*** NaN detected ***'\r\n theano.printing.debugprint(node)\r\n print 'Inputs : %s' % [input[0] for input in fn.inputs]\r\n print 'Outputs: %s' % [output[0] for output in fn.outputs]\r\n nan_detected[0] = True\r\n break\r\n\r\n x = theano.tensor.dscalar('x')\r\n mode = theano.compile.MonitorMode(post_func=detect_nan)\r\n mode = mode.excluding('fusion')\r\n f = theano.function([x], [theano.tensor.log(x) * x],\r\n mode=mode)\r\n # Test that the fusion wasn't done\r\n assert len(f.maker.fgraph.apply_nodes) == 2\r\n f(0) # log(0) * 0 = -inf * 0 = NaN\r\n\r\n # Test that we still detect the nan\r\n assert nan_detected[0]",
"def test_detect_nan():\r\n nan_detected = [False]\r\n\r\n def detect_nan(i, node, fn):\r\n for output in fn.outputs:\r\n if numpy.isnan(output[0]).any():\r\n print '*** NaN detected ***'\r\n theano.printing.debugprint(node)\r\n print 'Inputs : %s' % [input[0] for input in fn.inputs]\r\n print 'Outputs: %s' % [output[0] for output in fn.outputs]\r\n nan_detected[0] = True\r\n break\r\n\r\n x = theano.tensor.dscalar('x')\r\n f = theano.function([x], [theano.tensor.log(x) * x],\r\n mode=theano.compile.MonitorMode(\r\n post_func=detect_nan))\r\n f(0) # log(0) * 0 = -inf * 0 = NaN\r\n assert nan_detected[0]",
"def _autocheck_nan(self):\n # assert np.isnan(self.W).any() == False, \"W matrix should not contain NaN values.\"\n assert np.isnan(self.Win).any() == False, \"Win matrix should not contain NaN values.\"\n if self.Wfb is not None:\n assert np.isnan(self.Wfb).any() == False, \"Wfb matrix should not contain NaN values.\"",
"def check_nan(self):\n # generate array for easier handling\n values = np.swapaxes(self.psf.psf_value, 0, 2)\n fail_count = 0\n\n # loop over energies\n for i, arr in enumerate(values):\n energy_hi = self.psf.energy_hi[i]\n energy_lo = self.psf.energy_lo[i]\n\n # check if bin is outside of safe energy threshold\n if self.psf.energy_thresh_lo > energy_hi:\n continue\n if self.psf.energy_thresh_hi < energy_lo:\n continue\n\n # loop over offsets\n for arr2 in arr:\n\n # loop over deltas\n for v in arr2:\n\n # check for nan\n if math.isnan(v.value):\n # add to fail counter\n fail_count += 1\n break\n\n results = {}\n if fail_count == 0:\n results[\"status\"] = \"ok\"\n else:\n results[\"status\"] = \"failed\"\n results[\"n_failed_bins\"] = fail_count\n\n self.results[\"nan\"] = results",
"def mask_nan_keep_loss(y_true, y_pred):\n y_pred, y_true, num_notnan = mask_nan(y_true, y_pred)\n loss = K.sum((K.flatten(y_pred) - K.flatten(y_true)) ** 2) / num_notnan\n return tf.where(~tf.math.is_nan(loss), loss, 0)",
"def _test_sparsemax_loss_against_numpy(self, dtype, random, use_gpu):\n z = random.uniform(low=-3, high=3, size=(test_obs, 10))\n q = np.zeros((test_obs, 10))\n q[np.arange(0, test_obs), random.randint(0, 10, size=test_obs)] = 1\n\n tf_loss_op, tf_loss_out = self._tf_sparsemax_loss(z, q, dtype, use_gpu)\n np_loss = self._np_sparsemax_loss(z, q).astype(dtype)\n\n self.assertAllCloseAccordingToType(\n np_loss, tf_loss_out, half_atol=1e-2, half_rtol=5e-3)\n self.assertShapeEqual(np_loss, tf_loss_op)",
"def _midn_loss_mine_hardest_negative(self, labels, losses):\n batch, num_classes = utils.get_tensor_shape(labels)\n indices_0 = tf.range(batch, dtype=tf.int64)\n indices_1 = utils.masked_argmax(data=losses, mask=1.0 - labels, dim=1)\n indices = tf.stack([indices_0, indices_1], axis=-1)\n negative_masks = tf.sparse_to_dense(\n indices, [batch, num_classes], sparse_values=1.0)\n return tf.add(labels, negative_masks)",
"def test_add_nans_to_weights():\n # create input sparse matrix with one empty row (j=2)\n row = np.array([0, 3, 1, 0])\n col = np.array([0, 3, 1, 2])\n data = np.array([4., 5., 7., 9.])\n Matin = sps.coo_matrix((data, (row, col)), shape=(4, 4))\n\n # this is what is expected to come out (Nan added at i=0, j=2)\n row = np.array([0, 3, 1, 0, 2])\n col = np.array([0, 3, 1, 2, 0])\n data = np.array([4., 5., 7., 9., np.nan])\n expected = sps.coo_matrix((data, (row, col)), shape=(4, 4))\n\n Matout = xe.smm.add_nans_to_weights(Matin)\n assert np.allclose(expected.toarray(), Matout.toarray(), equal_nan=True)\n\n # Matrix without empty rows should return the same\n row = np.array([0, 3, 1, 0, 2])\n col = np.array([0, 3, 1, 2, 1])\n data = np.array([4., 5., 7., 9., 10.])\n Matin = sps.coo_matrix((data, (row, col)), shape=(4, 4))\n\n Matout = xe.smm.add_nans_to_weights(Matin)\n assert np.allclose(Matin.toarray(), Matout.toarray())",
"def max_missed_valid(prediction, label, amount):\n fp_vec = (prediction > 0) & (label == 0)\n valid_loss_max = np.nan_to_num(amount[fp_vec].max())\n return valid_loss_max",
"def test_negative_sampling_self_adversarial_loss(self):\n loss_fct = NSSALoss(margin=1.0, adversarial_temperature=1.0)\n self.assertIs(loss_fct._reduction_method, torch.mean)\n\n pos_scores = torch.tensor([0.0, 0.0, -0.5, -0.5])\n neg_scores = torch.tensor([0.0, 0.0, -1.0, -1.0])\n\n # ≈ result of softmax\n weights = torch.tensor([0.37, 0.37, 0.13, 0.13])\n\n # neg_distances - margin = [-1., -1., 0., 0.]\n # sigmoids ≈ [0.27, 0.27, 0.5, 0.5]\n log_sigmoids = torch.tensor([-1.31, -1.31, -0.69, -0.69])\n intermediate = weights * log_sigmoids\n # sum over the softmax dim as weights sum up to 1\n neg_loss = torch.sum(intermediate, dim=-1)\n\n # pos_distances = [0., 0., 0.5, 0.5]\n # margin - pos_distances = [1. 1., 0.5, 0.5]\n # ≈ result of sigmoid\n # sigmoids ≈ [0.73, 0.73, 0.62, 0.62]\n log_sigmoids = torch.tensor([-0.31, -0.31, -0.48, -0.48])\n pos_loss = torch.mean(log_sigmoids)\n\n # expected_loss ≈ 0.34\n expected_loss = (-pos_loss - neg_loss) / 2.0\n\n loss = loss_fct(pos_scores, neg_scores, weights).item()\n\n self.assertAlmostEqual(expected_loss, 0.77, delta=0.02)\n self.assertAlmostEqual(expected_loss, loss, delta=0.02)",
"def test_check_null_weight_with_nonzeros() -> None:\n sample_weight = np.ones_like(y_toy)\n sw_out, X_out, y_out = check_null_weight(sample_weight, X_toy, y_toy)\n np.testing.assert_almost_equal(sw_out, sample_weight)\n np.testing.assert_almost_equal(X_out, X_toy)\n np.testing.assert_almost_equal(y_out, y_toy)",
"def test_nan_check(self):\n values_with_nans = np.array([1, 2, 3, np.nan, np.nan])\n\n with LogCapture(\"puma\") as log:\n _ = hist_w_unc(values_with_nans, bins=4)\n log.check(\n (\n \"puma\",\n \"WARNING\",\n \"Histogram values contain 2 nan values!\",\n )\n )",
"def missing_values():\n print('Missings in the train data:', train_data.isnull().sum())",
"def test_nan_input(self):\n self.cube_uv_down.data.fill(np.nan)\n msg = (\n \"The radiation flux in UV downward contains data \"\n \"that is negative or NaN. Data should be >= 0.\"\n )\n with self.assertRaisesRegex(ValueError, msg):\n calculate_uv_index(self.cube_uv_down)",
"def test_finite(self):\n \n Number_of_tests = 1000\n low = -1000\n high = 1000\n for i in range(Number_of_tests):\n x = np.random.rand(100) * (high - low) + low\n y = aux_functions.softmax_base(x)\n\n # This should be True if all are finite\n all_finite = np.isfinite(y).all()\n self.assertTrue(all_finite)",
"def test_to_knx_max_exceeded(self):\n with pytest.raises(ConversionError):\n DPTSceneNumber.to_knx(DPTSceneNumber.value_max + 1)",
"def min():\n valid=result_alpha.F>0\n src_data.F[valid]=np.minimum( src_data.F[valid],result_data.F[valid] )",
"def _check_nan(self, vector):\n return np.isnan(vector).sum() > 0",
"def testExpectedNaNOpOutputs(self):\n check_numerics_callback.enable_check_numerics()\n\n # Empty input tensor\n x = constant_op.constant(1, dtype=dtypes.float32, shape=[0, 1, 1, 1])\n scale = constant_op.constant([1], dtype=dtypes.float32)\n offset = constant_op.constant([1], dtype=dtypes.float32)\n\n # Calling fused_batch_norm with an empty input should output a NaN in the\n # latter four outputs without triggering the check_numerics callback\n batch_norm_res = gen_nn_ops._fused_batch_norm(\n x=x, scale=scale, offset=offset, mean=[], variance=[])\n\n _, batch_mean, batch_variance, _, _ = self.evaluate(batch_norm_res)\n\n self.assertTrue(np.isnan(batch_mean.squeeze()))\n self.assertTrue(np.isnan(batch_variance.squeeze()))",
"def ReplaceInvalid(arr, max_value=None):\n with np.warnings.catch_warnings():\n np.warnings.filterwarnings('ignore')\n arr[arr < 0.0] = np.nan\n if max_value:\n arr[arr > max_value] = np.nan",
"def nanmse(\n pred: Tensor,\n target: Tensor) -> Tensor:\n\n mask = torch.isnan(target)\n cnt = torch.sum(~mask, dtype=target.dtype)\n\n mse = torch.pow(pred - target, 2).sum() / cnt\n\n return mse",
"def _no_nan(self, feature: np.array) -> bool:\n if not np.any(np.isnan(feature)):\n return True\n else:\n return False"
] | [
"0.6993612",
"0.68045354",
"0.6742524",
"0.64270777",
"0.6119363",
"0.59418494",
"0.5930874",
"0.5891737",
"0.583687",
"0.58328086",
"0.57153714",
"0.567385",
"0.56433874",
"0.5623153",
"0.5609487",
"0.5586741",
"0.5584325",
"0.5562963",
"0.5493455",
"0.5464474",
"0.54510564",
"0.544543",
"0.5434016",
"0.5432499",
"0.5422803",
"0.542211",
"0.5421355",
"0.54155755",
"0.54033184",
"0.5401504"
] | 0.7484194 | 0 |
check sparsemaxloss is infinity safe | def _test_sparsemax_loss_of_inf(self, dtype, random, use_gpu):
q = np.asarray([[0, 0, 1], [0, 0, 1], [0, 0, 1], [0, 0, 1]])
z_neg = np.asarray([
[0, -np.inf, 0],
[0, -np.inf, -np.inf],
[-np.inf, -np.inf, 0],
[-np.inf, -np.inf, -np.inf],
]).astype(dtype)
z_pos = np.asarray([[0, np.inf, 0], [0, np.inf,
np.inf], [np.inf, np.inf, 0],
[np.inf, np.inf, np.inf]]).astype(dtype)
z_mix = np.asarray([[0, np.inf, 0], [0, np.inf, -np.inf],
[-np.inf, np.inf, 0], [-np.inf, np.inf,
-np.inf]]).astype(dtype)
_, tf_loss_neg = self._tf_sparsemax_loss(z_neg, q, dtype, use_gpu)
self.assertAllCloseAccordingToType([0.25, np.inf, 0, np.nan], tf_loss_neg)
_, tf_loss_pos = self._tf_sparsemax_loss(z_pos, q, dtype, use_gpu)
self.assertAllCloseAccordingToType([np.nan, np.nan, np.nan, np.nan],
tf_loss_pos)
_, tf_loss_mix = self._tf_sparsemax_loss(z_mix, q, dtype, use_gpu)
self.assertAllCloseAccordingToType([np.nan, np.nan, np.nan, np.nan],
tf_loss_mix) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _test_sparsemax_loss_of_nan(self, dtype, random, use_gpu):\n q = np.asarray([[0, 0, 1], [0, 0, 1], [0, 0, 1]])\n z_nan = np.asarray([[0, np.nan, 0], [0, np.nan, np.nan],\n [np.nan, np.nan, np.nan]]).astype(dtype)\n\n _, tf_loss_nan = self._tf_sparsemax_loss(z_nan, q, dtype, use_gpu)\n self.assertAllCloseAccordingToType([np.nan, np.nan, np.nan], tf_loss_nan)",
"def _test_sparsemax_loss_positive(self, dtype, random, use_gpu):\n z = random.uniform(low=-3, high=3, size=(test_obs, 10))\n q = np.zeros((test_obs, 10))\n q[np.arange(0, test_obs), random.randint(0, 10, size=test_obs)] = 1\n\n tf_loss_op, tf_loss_out = self._tf_sparsemax_loss(z, q, dtype, use_gpu)\n\n self.assertAllCloseAccordingToType(np.abs(tf_loss_out), tf_loss_out)\n self.assertShapeEqual(np.zeros(test_obs), tf_loss_op)",
"def _test_sparsemax_loss_zero(self, dtype, random, use_gpu):\n # construct z and q, such that z_k >= 1 + max_{j!=k} z_k holds for\n # delta_0 = 1.\n z = random.uniform(low=-3, high=3, size=(test_obs, 10))\n z[:, 0] = np.max(z, axis=1) + 1.05\n\n q = np.zeros((test_obs, 10))\n q[:, 0] = 1\n\n tf_loss_op, tf_loss_out = self._tf_sparsemax_loss(z, q, dtype, use_gpu)\n tf_sparsemax_op, tf_sparsemax_out = self._tf_sparsemax(z, dtype, use_gpu)\n\n self.assertAllCloseAccordingToType(np.zeros(test_obs), tf_loss_out)\n self.assertShapeEqual(np.zeros(test_obs), tf_loss_op)\n\n self.assertAllCloseAccordingToType(q, tf_sparsemax_out)\n self.assertShapeEqual(q, tf_sparsemax_op)",
"def _check_loss(self, loss):\n assert not np.isnan(loss), \"Model diverged with loss = NaN\"",
"def max():\n valid=result_alpha.F>0\n src_data.F[valid]=np.maximum( src_data.F[valid],result_data.F[valid] )",
"def test_finite(self):\n \n Number_of_tests = 1000\n low = -1000\n high = 1000\n for i in range(Number_of_tests):\n x = np.random.rand(100) * (high - low) + low\n y = aux_functions.softmax_base(x)\n\n # This should be True if all are finite\n all_finite = np.isfinite(y).all()\n self.assertTrue(all_finite)",
"def test_eval_one_max(self):\n f0 = np.ones((10, 5))\n self.assertTrue(np.isinf(eval_one_max(f0)[0]))",
"def early_stop(val_loss):\n\n assert isinstance(val_loss, list)\n\n if val_loss[-1] > val_loss[-2] > val_loss[-3] > val_loss[-4] > val_loss[-5] > val_loss[-6]:\n return True\n else:\n return False",
"def test_010_softmax():\n u = ACTIVATION_DIFF_ACCEPTANCE_VALUE\n P = softmax(np.array([2.44756739, 2.13945115]).astype(TYPE_FLOAT))\n E = np.array([0.57642539, 0.42357461]).astype(TYPE_FLOAT)\n assert np.all(np.abs(P-E) < u)\n\n for _ in range(NUM_MAX_TEST_TIMES):\n N: int = np.random.randint(1, NUM_MAX_BATCH_SIZE)\n M: int = np.random.randint(2, NUM_MAX_NODES)\n X = MAX_ACTIVATION_VALUE * np.random.randn(N, M).astype(TYPE_FLOAT)\n np.all(np.isfinite(softmax(X)))",
"def check_overflow(self):\n self.stateC = self.toConceptual(self.state)\n\n check_inf = torch.any(torch.isinf(self.stateC)) or torch.any(\n torch.isinf(self.state))\n check_nan = torch.any(torch.isnan(self.stateC)) or torch.any(\n torch.isnan(self.state))\n\n if check_inf or check_nan:\n return True\n else:\n return False",
"def test_Max_Iteration_NotZero(self):\n\t\tself.assertRaises(calc.OutOfRangeError, calc.it, M([[1 + 1j]]), 0 + 0j, 0)",
"def _test_sparsemax_loss_against_numpy(self, dtype, random, use_gpu):\n z = random.uniform(low=-3, high=3, size=(test_obs, 10))\n q = np.zeros((test_obs, 10))\n q[np.arange(0, test_obs), random.randint(0, 10, size=test_obs)] = 1\n\n tf_loss_op, tf_loss_out = self._tf_sparsemax_loss(z, q, dtype, use_gpu)\n np_loss = self._np_sparsemax_loss(z, q).astype(dtype)\n\n self.assertAllCloseAccordingToType(\n np_loss, tf_loss_out, half_atol=1e-2, half_rtol=5e-3)\n self.assertShapeEqual(np_loss, tf_loss_op)",
"def max_missed_valid(prediction, label, amount):\n fp_vec = (prediction > 0) & (label == 0)\n valid_loss_max = np.nan_to_num(amount[fp_vec].max())\n return valid_loss_max",
"def torch_isnotfinite(x):\n not_inf = ((x + 1) != x)\n not_nan = (x == x)\n return 1 - (not_inf & not_nan)",
"def max_power_out_existing_storage_rule(_m, g, y, s, t):\r\n\r\n return m.p_out[g, y, s, t] - (m.P_OUT_MAX[g] * (1 - m.F[g, y])) <= 0",
"def max_energy_existing_storage_rule(_m, g, y, s, t):\r\n\r\n return m.q[g, y, s, t] - m.Q_MAX[g] <= 0",
"def __test_softmax():\n\n test1 = softmax(tf.constant(np.array([[1001, 1002], [3, 4]]), dtype=tf.float32))\n with tf.Session() as sess:\n test1 = sess.run(test1)\n __test_all_close(\"Softmax test 1\", test1, np.array([[0.26894142, 0.73105858],\n [0.26894142, 0.73105858]]))\n\n test2 = softmax(tf.constant(np.array([[-1001, -1002]]), dtype=tf.float32))\n with tf.Session() as sess:\n test2 = sess.run(test2)\n __test_all_close(\"Softmax test 2\", test2, np.array([[0.73105858, 0.26894142]]))\n\n print(\"Basic (non-exhaustive) softmax tests pass\\n\")",
"def max_power_in_existing_storage_rule(_m, g, y, s, t):\r\n\r\n return m.p_in[g, y, s, t] - (m.P_IN_MAX[g] * (1 - m.F[g, y])) <= 0",
"def test_to_knx_max_exceeded(self):\n with pytest.raises(ConversionError):\n DPTSceneNumber.to_knx(DPTSceneNumber.value_max + 1)",
"def test_negative_sampling_self_adversarial_loss(self):\n loss_fct = NSSALoss(margin=1.0, adversarial_temperature=1.0)\n self.assertIs(loss_fct._reduction_method, torch.mean)\n\n pos_scores = torch.tensor([0.0, 0.0, -0.5, -0.5])\n neg_scores = torch.tensor([0.0, 0.0, -1.0, -1.0])\n\n # ≈ result of softmax\n weights = torch.tensor([0.37, 0.37, 0.13, 0.13])\n\n # neg_distances - margin = [-1., -1., 0., 0.]\n # sigmoids ≈ [0.27, 0.27, 0.5, 0.5]\n log_sigmoids = torch.tensor([-1.31, -1.31, -0.69, -0.69])\n intermediate = weights * log_sigmoids\n # sum over the softmax dim as weights sum up to 1\n neg_loss = torch.sum(intermediate, dim=-1)\n\n # pos_distances = [0., 0., 0.5, 0.5]\n # margin - pos_distances = [1. 1., 0.5, 0.5]\n # ≈ result of sigmoid\n # sigmoids ≈ [0.73, 0.73, 0.62, 0.62]\n log_sigmoids = torch.tensor([-0.31, -0.31, -0.48, -0.48])\n pos_loss = torch.mean(log_sigmoids)\n\n # expected_loss ≈ 0.34\n expected_loss = (-pos_loss - neg_loss) / 2.0\n\n loss = loss_fct(pos_scores, neg_scores, weights).item()\n\n self.assertAlmostEqual(expected_loss, 0.77, delta=0.02)\n self.assertAlmostEqual(expected_loss, loss, delta=0.02)",
"def sparse_softmax(sparse, dim):\n dtype = sparse.dtype\n device = sparse.device\n\n # softmax is non-linear operation, so sparse tensors must\n # be coalesced.\n sparse = sparse.coalesce()\n inf = float('inf')\n indices = sparse._indices()\n values = sparse._values()\n\n if dim < sparse.sparse_dim():\n nnz = sparse._nnz()\n\n # compute pool indices\n size = sparse.size()\n strides = torch.ones((sparse.sparse_dim(), 1), dtype=indices.dtype, device=indices.device)\n for i in reversed(range(sparse.sparse_dim() - 1)):\n strides[i, 0] = strides[i + 1, 0] * size[i + 1]\n strides[dim, 0] = 0\n\n pool = (indices * strides).sum(dim=0)\n i2p = {}\n for i in range(nnz):\n c = int(pool[i])\n if c not in i2p:\n i2p[c] = len(i2p)\n pool[i] = i2p[c]\n\n # compute max\n dense_size = tuple(size[sparse.sparse_dim():])\n mx = torch.empty((pool.max() + 1,) + dense_size, dtype=dtype, device=device)\n mx[:] = -inf\n for n in range(nnz):\n p = pool[n]\n mx[p] = torch.max(mx[p], values[n])\n\n # apply exp to (v - mx) and sum the results\n exp_values = torch.empty_like(values)\n exp_sums = torch.zeros_like(mx)\n for n in range(nnz):\n p = pool[n]\n v = exp_values[n] = (values[n] - mx[p]).exp()\n exp_sums[p] = exp_sums[p] + v\n\n # normalize with the sum of exponents\n for n in range(nnz):\n p = pool[n]\n exp_values[n] = exp_values[n] / exp_sums[p]\n\n return torch.sparse_coo_tensor(indices,\n exp_values,\n sparse.size(),\n dtype=dtype, device=device)\n\n elif dim < sparse.sparse_dim() + sparse.dense_dim():\n return torch.sparse_coo_tensor(indices,\n F.softmax(values, dim - sparse.sparse_dim() + 1),\n sparse.size(),\n dtype=dtype, device=device)\n else:\n raise ValueError(\n '`dim(=%s)` must be smaller than `sparse_dim(=%s) + dense_dim(=%s)`'\n % (dim, sparse.sparse_dim(), sparse.dense_dim()))",
"def spatial_argmax(logit):\n weights = F.softmax(logit.view(logit.size(0), -1), dim=-1).view_as(logit)\n\n\n \n two = (torch.ones_like(logit)*2).mean(dim=(-2,-1))\n \n\n true = torch.stack((logit.mean(dim=[-2,-1]),(weights.sum(1) * torch.linspace(-1, 1, logit.size(2)).to(logit.device)[None]).sum(1),(weights.sum(2) * torch.linspace(-1, 1, logit.size(1)).to(logit.device)[None]).sum(1)), 1)\n # print(true.size())\n\n false = torch.stack((logit.mean(dim=[-2,-1]),two,two), 1)\n\n # print(false.size())\n\n # print(logit.mean(dim=[-2,-1])[:,None].repeat(1,3).size())\n\n return torch.where(logit.mean(dim=[-2,-1])[:,None].repeat(1,3) > 0,true,false)",
"def test_Max_Iteration_Negative(self):\n\t\tself.assertRaises(calc.OutOfRangeError, calc.it, M([[1 + 1j]]), 0 + 0j, -10)",
"def softmax_to_dense(sparse, dim):\n dtype = sparse.dtype\n device = sparse.device\n dense = to_dense(sparse, fill_value=-float('inf'))\n r = F.softmax(dense, dim)\n # softmax on empty lines results nan, replace with zeros to match the definition\n r[r != r] = 0\n return r",
"def isinf(x):\n return False",
"def is_infinitesimal(G):\n return False",
"def fun(self, x):\n if np.any(x > 0):\n return np.inf\n else:\n return 0",
"def non_negative_capacity_rule(_m, g, y):\r\n\r\n return - m.x_c[g, y] <= 0",
"def sparsemax(logits, name=None):\n\n with ops.name_scope(name, \"sparsemax\", [logits]) as name:\n logits = ops.convert_to_tensor(logits, name=\"logits\")\n obs = array_ops.shape(logits)[0]\n dims = array_ops.shape(logits)[1]\n\n # In the paper, they call the logits z.\n # The mean(logits) can be substracted from logits to make the algorithm\n # more numerically stable. the instability in this algorithm comes mostly\n # from the z_cumsum. Substacting the mean will cause z_cumsum to be close\n # to zero. However, in practise the numerical instability issues are very\n # minor and substacting the mean causes extra issues with inf and nan\n # input.\n z = logits\n\n # sort z\n z_sorted, _ = nn.top_k(z, k=dims)\n\n # calculate k(z)\n z_cumsum = math_ops.cumsum(z_sorted, axis=1)\n k = math_ops.range(\n 1, math_ops.cast(dims, logits.dtype) + 1, dtype=logits.dtype)\n z_check = 1 + k * z_sorted > z_cumsum\n # because the z_check vector is always [1,1,...1,0,0,...0] finding the\n # (index + 1) of the last `1` is the same as just summing the number of 1.\n k_z = math_ops.reduce_sum(math_ops.cast(z_check, dtypes.int32), axis=1)\n\n # calculate tau(z)\n # If there are inf values or all values are -inf, the k_z will be zero,\n # this is mathematically invalid and will also cause the gather_nd to fail.\n # Prevent this issue for now by setting k_z = 1 if k_z = 0, this is then\n # fixed later (see p_safe) by returning p = nan. This results in the same\n # behavior as softmax.\n k_z_safe = math_ops.maximum(k_z, 1)\n indices = array_ops.stack([math_ops.range(0, obs), k_z_safe - 1], axis=1)\n tau_sum = array_ops.gather_nd(z_cumsum, indices)\n tau_z = (tau_sum - 1) / math_ops.cast(k_z, logits.dtype)\n\n # calculate p\n p = math_ops.maximum(\n math_ops.cast(0, logits.dtype), z - tau_z[:, array_ops.newaxis])\n # If k_z = 0 or if z = nan, then the input is invalid\n p_safe = array_ops.where(\n math_ops.logical_or(\n math_ops.equal(k_z, 0), math_ops.is_nan(z_cumsum[:, -1])),\n array_ops.fill([obs, dims], math_ops.cast(float(\"nan\"), logits.dtype)),\n p)\n\n return p_safe",
"def test_non_zero_loss(self):\n # Reset models.\n self.model.load_state_dict(self.initial_model_dict)\n self.actor_model.load_state_dict(self.initial_actor_model_dict)\n\n polybeast.learn(*self.learn_args)\n\n self.assertNotEqual(self.stats[\"total_loss\"], 0.0)\n self.assertNotEqual(self.stats[\"pg_loss\"], 0.0)\n self.assertNotEqual(self.stats[\"baseline_loss\"], 0.0)\n self.assertNotEqual(self.stats[\"entropy_loss\"], 0.0)"
] | [
"0.72919095",
"0.7114687",
"0.6878583",
"0.6437763",
"0.6230124",
"0.6111207",
"0.60838383",
"0.60647756",
"0.6005",
"0.58566064",
"0.5836664",
"0.5805594",
"0.57603514",
"0.5743544",
"0.5705851",
"0.56542385",
"0.5634526",
"0.5634205",
"0.56317574",
"0.5618935",
"0.55948126",
"0.5581037",
"0.5580506",
"0.5558328",
"0.55413747",
"0.55396956",
"0.55277216",
"0.5515617",
"0.5513378",
"0.55018854"
] | 0.7511145 | 0 |
check sparsemaxloss proposition 4 | def _test_sparsemax_loss_positive(self, dtype, random, use_gpu):
z = random.uniform(low=-3, high=3, size=(test_obs, 10))
q = np.zeros((test_obs, 10))
q[np.arange(0, test_obs), random.randint(0, 10, size=test_obs)] = 1
tf_loss_op, tf_loss_out = self._tf_sparsemax_loss(z, q, dtype, use_gpu)
self.assertAllCloseAccordingToType(np.abs(tf_loss_out), tf_loss_out)
self.assertShapeEqual(np.zeros(test_obs), tf_loss_op) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _test_sparsemax_loss_zero(self, dtype, random, use_gpu):\n # construct z and q, such that z_k >= 1 + max_{j!=k} z_k holds for\n # delta_0 = 1.\n z = random.uniform(low=-3, high=3, size=(test_obs, 10))\n z[:, 0] = np.max(z, axis=1) + 1.05\n\n q = np.zeros((test_obs, 10))\n q[:, 0] = 1\n\n tf_loss_op, tf_loss_out = self._tf_sparsemax_loss(z, q, dtype, use_gpu)\n tf_sparsemax_op, tf_sparsemax_out = self._tf_sparsemax(z, dtype, use_gpu)\n\n self.assertAllCloseAccordingToType(np.zeros(test_obs), tf_loss_out)\n self.assertShapeEqual(np.zeros(test_obs), tf_loss_op)\n\n self.assertAllCloseAccordingToType(q, tf_sparsemax_out)\n self.assertShapeEqual(q, tf_sparsemax_op)",
"def test_check_sparse(self):\n x, x_rand, s = self.create_testdata()\n task = mmRDTR()\n #check that a dense array x is passed thru unchanged\n check = task.check_sparse(x)\n self.assertEqual(np.all(check==x),True)\n #check that a sparse matrix s is converted to a numpy array\n check = task.check_sparse(s)\n self.assertIsInstance(check,np.ndarray)\n self.assertEqual(np.all(check==s.todense()),True)",
"def _test_sparsemax_loss_of_inf(self, dtype, random, use_gpu):\n q = np.asarray([[0, 0, 1], [0, 0, 1], [0, 0, 1], [0, 0, 1]])\n z_neg = np.asarray([\n [0, -np.inf, 0],\n [0, -np.inf, -np.inf],\n [-np.inf, -np.inf, 0],\n [-np.inf, -np.inf, -np.inf],\n ]).astype(dtype)\n z_pos = np.asarray([[0, np.inf, 0], [0, np.inf,\n np.inf], [np.inf, np.inf, 0],\n [np.inf, np.inf, np.inf]]).astype(dtype)\n z_mix = np.asarray([[0, np.inf, 0], [0, np.inf, -np.inf],\n [-np.inf, np.inf, 0], [-np.inf, np.inf,\n -np.inf]]).astype(dtype)\n\n _, tf_loss_neg = self._tf_sparsemax_loss(z_neg, q, dtype, use_gpu)\n self.assertAllCloseAccordingToType([0.25, np.inf, 0, np.nan], tf_loss_neg)\n\n _, tf_loss_pos = self._tf_sparsemax_loss(z_pos, q, dtype, use_gpu)\n self.assertAllCloseAccordingToType([np.nan, np.nan, np.nan, np.nan],\n tf_loss_pos)\n\n _, tf_loss_mix = self._tf_sparsemax_loss(z_mix, q, dtype, use_gpu)\n self.assertAllCloseAccordingToType([np.nan, np.nan, np.nan, np.nan],\n tf_loss_mix)",
"def max_score_test(self):\n max_score_tuple = self.results.max_score(molecules=[\"DDSPDLPK\"])\n assert max_score_tuple[0] == 1 # score\n assert max_score_tuple[3].scaling_factor == 100 # intensity\n\n assert self.results.max_score(molecules=[\"_DDSPDLPK_\"]) == [0, None, None, None]\n return",
"def _test_sparsemax_loss_of_nan(self, dtype, random, use_gpu):\n q = np.asarray([[0, 0, 1], [0, 0, 1], [0, 0, 1]])\n z_nan = np.asarray([[0, np.nan, 0], [0, np.nan, np.nan],\n [np.nan, np.nan, np.nan]]).astype(dtype)\n\n _, tf_loss_nan = self._tf_sparsemax_loss(z_nan, q, dtype, use_gpu)\n self.assertAllCloseAccordingToType([np.nan, np.nan, np.nan], tf_loss_nan)",
"def max():\n valid=result_alpha.F>0\n src_data.F[valid]=np.maximum( src_data.F[valid],result_data.F[valid] )",
"def test_return_sparse():\n X = Vectorizer(strategy=\"bow\", return_sparse=True).fit_transform(X_text, y10)\n assert all(pd.api.types.is_sparse(X[c]) for c in X.columns)",
"def max_power_in_existing_storage_rule(_m, g, y, s, t):\r\n\r\n return m.p_in[g, y, s, t] - (m.P_IN_MAX[g] * (1 - m.F[g, y])) <= 0",
"def max_power_in_candidate_storage_rule(_m, g, y, s, t):\r\n\r\n return m.p_in[g, y, s, t] - sum(m.x_c[g, j] for j in m.Y if j <= y) <= 0",
"def test_is_product_entangled_state_3_sys():\n ent_vec = max_entangled(4)\n np.testing.assert_equal(is_product_vector(ent_vec, dim=[2, 2, 2, 2]), False)",
"def max_power_out_existing_storage_rule(_m, g, y, s, t):\r\n\r\n return m.p_out[g, y, s, t] - (m.P_OUT_MAX[g] * (1 - m.F[g, y])) <= 0",
"def _test_sparsemax_loss_against_numpy(self, dtype, random, use_gpu):\n z = random.uniform(low=-3, high=3, size=(test_obs, 10))\n q = np.zeros((test_obs, 10))\n q[np.arange(0, test_obs), random.randint(0, 10, size=test_obs)] = 1\n\n tf_loss_op, tf_loss_out = self._tf_sparsemax_loss(z, q, dtype, use_gpu)\n np_loss = self._np_sparsemax_loss(z, q).astype(dtype)\n\n self.assertAllCloseAccordingToType(\n np_loss, tf_loss_out, half_atol=1e-2, half_rtol=5e-3)\n self.assertShapeEqual(np_loss, tf_loss_op)",
"def max_power_out_candidate_storage_rule(_m, g, y, s, t):\r\n\r\n return m.p_out[g, y, s, t] - sum(m.x_c[g, j] for j in m.Y if j <= y) <= 0",
"def test_max_score(self):\n\n \"\"\"\n Create the test data.\n \"\"\"\n tokenizer = Tokenizer(stem=False)\n posts = [\n \"Erdogan with threats to attack regime forces 'everywhere' in Syria\",\n \"Damascus says Erdogan 'disconnected from reality' after threats\",\n ]\n\n corpus = [ Document(post, tokenizer.tokenize(post)) for post in posts ]\n\n extractor = TokenExtractor()\n scorer = TFIDFScorer({ 'erdogan': 1, 'threats': 2 }, 10)\n candidates = extractor.extract(corpus)\n scores = scorer.score(candidates, normalize_scores=True)\n self.assertTrue(all( score <= 1 for score in scores.values() ))",
"def test_is_product_entangled_state_2_sys():\n ent_vec = max_entangled(4)\n np.testing.assert_equal(is_product_vector(ent_vec, dim=[4, 4]), False)",
"def spatial_argmax(logit):\n weights = F.softmax(logit.view(logit.size(0), -1), dim=-1).view_as(logit)\n\n\n \n two = (torch.ones_like(logit)*2).mean(dim=(-2,-1))\n \n\n true = torch.stack((logit.mean(dim=[-2,-1]),(weights.sum(1) * torch.linspace(-1, 1, logit.size(2)).to(logit.device)[None]).sum(1),(weights.sum(2) * torch.linspace(-1, 1, logit.size(1)).to(logit.device)[None]).sum(1)), 1)\n # print(true.size())\n\n false = torch.stack((logit.mean(dim=[-2,-1]),two,two), 1)\n\n # print(false.size())\n\n # print(logit.mean(dim=[-2,-1])[:,None].repeat(1,3).size())\n\n return torch.where(logit.mean(dim=[-2,-1])[:,None].repeat(1,3) > 0,true,false)",
"def max_energy_existing_storage_rule(_m, g, y, s, t):\r\n\r\n return m.q[g, y, s, t] - m.Q_MAX[g] <= 0",
"def __test_softmax():\n\n test1 = softmax(tf.constant(np.array([[1001, 1002], [3, 4]]), dtype=tf.float32))\n with tf.Session() as sess:\n test1 = sess.run(test1)\n __test_all_close(\"Softmax test 1\", test1, np.array([[0.26894142, 0.73105858],\n [0.26894142, 0.73105858]]))\n\n test2 = softmax(tf.constant(np.array([[-1001, -1002]]), dtype=tf.float32))\n with tf.Session() as sess:\n test2 = sess.run(test2)\n __test_all_close(\"Softmax test 2\", test2, np.array([[0.73105858, 0.26894142]]))\n\n print(\"Basic (non-exhaustive) softmax tests pass\\n\")",
"def max_energy_candidate_storage_rule(_m, g, y, s, t):\r\n\r\n return m.q[g, y, s, t] - sum(m.x_c[g, j] for j in m.Y if j <= y) <= 0",
"def analyze_sensitivity_sparse_grid(sparse_grid,max_order=2):\n from pyapprox.multivariate_polynomials import \\\n define_poly_options_from_variable_transformation\n from pyapprox.adaptive_sparse_grid import \\\n convert_sparse_grid_to_polynomial_chaos_expansion\n pce_opts=define_poly_options_from_variable_transformation(\n sparse_grid.variable_transformation)\n pce = convert_sparse_grid_to_polynomial_chaos_expansion(\n sparse_grid,pce_opts)\n pce_main_effects,pce_total_effects=\\\n get_main_and_total_effect_indices_from_pce(\n pce.get_coefficients(),pce.get_indices())\n\n interaction_terms, pce_sobol_indices = get_sobol_indices(\n pce.get_coefficients(),pce.get_indices(),max_order=max_order)\n \n return SensivitityResult(\n {'main_effects':pce_main_effects,\n 'total_effects':pce_total_effects,\n 'sobol_indices':pce_sobol_indices,\n 'sobol_interaction_indices':interaction_terms,\n 'pce':pce})",
"def test_zero(self):\n result = max_triple_mul([1, 2, 0])\n self.assertEqual(result, 0)",
"def validate(self, current_index): \n loss_out = []\n gts_cat = torch.LongTensor()\n pred_cat = torch.LongTensor()\n Validation = self.datasetManager.get_validation_dataloader()\n length = len(Validation)\n print('\\nValidation : %i steps'%length)\n for i, batch in tqdm.tqdm(enumerate(Validation)):\n batch = self.to_device(batch)\n img = batch[0]\n gts = batch[1]\n out = self.network(img)\n out = self.softmax(out)\n loss = self.loss(out,gts)\n pred = torch.argmax(out, 1, keepdim = True)\n pred = pred.view(-1)\n loss_out.append(loss.item())\n \n gts_cat = torch.cat((gts_cat,gts.cpu()),0)\n pred_cat = torch.cat((pred_cat,pred.cpu()),0)\n\n f1_score = sklearn.metrics.f1_score(gts_cat,pred_cat, average = 'macro')\n Kappa = sklearn.metrics.cohen_kappa_score(gts_cat,pred_cat)\n Accuracy = sklearn.metrics.accuracy_score(gts_cat,pred_cat) \n \n self.tb_writer.add_scalar(\"f1 score\",f1_score,current_index)\n self.tb_writer.add_scalar('Kappa score',Kappa,current_index)\n self.tb_writer.add_scalar('Accuracy', Accuracy, current_index)\n self.tb_writer.add_scalar('Validation Loss', np.mean(loss_out), current_index)\n \n return np.mean(loss_out)",
"def test_010_softmax():\n u = ACTIVATION_DIFF_ACCEPTANCE_VALUE\n P = softmax(np.array([2.44756739, 2.13945115]).astype(TYPE_FLOAT))\n E = np.array([0.57642539, 0.42357461]).astype(TYPE_FLOAT)\n assert np.all(np.abs(P-E) < u)\n\n for _ in range(NUM_MAX_TEST_TIMES):\n N: int = np.random.randint(1, NUM_MAX_BATCH_SIZE)\n M: int = np.random.randint(2, NUM_MAX_NODES)\n X = MAX_ACTIVATION_VALUE * np.random.randn(N, M).astype(TYPE_FLOAT)\n np.all(np.isfinite(softmax(X)))",
"def test_sparsity(self):\n\n self.validator.adata.X = self.validator.adata.X.toarray()\n self.validator.validate_adata()\n self.assertEqual(\n self.validator.warnings,\n [\n \"WARNING: Sparsity of 'X' is 0.875 which is greater than 0.5, \"\n \"and it is not a 'scipy.sparse.csr_matrix'. It is \"\n \"STRONGLY RECOMMENDED to use this type of matrix for \"\n \"the given sparsity.\"\n ],\n )",
"def test_maxIndex(self):\t\t\n self.assertEqual(attempt.maxIndexZ, 113)\n self.assertEqual(attempt.maxIndexW, 134)",
"def is_sparsity_enabled(cls):\n total,sp100,sp50 = 0,0,0\n for module_name, module, p_name, p, mask, pruned in cls.__sparse_parameters:\n total += 1\n mask_sum = mask.sum()\n mask_numel = mask.numel()\n if mask_sum == mask_numel:\n sp100 += 1\n elif mask_sum*2 == mask_numel:\n sp50 += 1\n\n assert (total == sp100 or total == sp50), \"Inconsistent model sparsity\"\n if total == sp100:\n return False\n elif total == sp50:\n return True",
"def ncore(self):",
"def test_is_product_entangled_state():\n ent_vec = max_entangled(3)\n np.testing.assert_equal(is_product_vector(ent_vec), False)",
"def max_cardinality():\r\n #create a list containing the number of each vertex involvement.\r\n array = []\r\n for i in adj:\r\n array += [i[0],i[1]]\r\n\r\n #compute the degree by counting the involment\r\n degree = Counter(array).most_common()\r\n\r\n #retrieve the degree only\r\n degree_ = [ i[1] for i in degree]\r\n\r\n degree_ = np.array(degree_)\r\n \r\n max_m = None\r\n \r\n #check if m is valid\r\n for i in range(degree[0][1]+2)[2:]:\r\n \r\n #valid if there are at least m vertex with degree equals to at least m-1 \r\n if i < len(np.where(degree_>=i-1)[0]):\r\n max_m = i\r\n else:\r\n break\r\n max_m += 1\r\n print(f'maximum possible clique cardinality :{max_m}')\r\n return max_m",
"def sparse_softmax(sparse, dim):\n dtype = sparse.dtype\n device = sparse.device\n\n # softmax is non-linear operation, so sparse tensors must\n # be coalesced.\n sparse = sparse.coalesce()\n inf = float('inf')\n indices = sparse._indices()\n values = sparse._values()\n\n if dim < sparse.sparse_dim():\n nnz = sparse._nnz()\n\n # compute pool indices\n size = sparse.size()\n strides = torch.ones((sparse.sparse_dim(), 1), dtype=indices.dtype, device=indices.device)\n for i in reversed(range(sparse.sparse_dim() - 1)):\n strides[i, 0] = strides[i + 1, 0] * size[i + 1]\n strides[dim, 0] = 0\n\n pool = (indices * strides).sum(dim=0)\n i2p = {}\n for i in range(nnz):\n c = int(pool[i])\n if c not in i2p:\n i2p[c] = len(i2p)\n pool[i] = i2p[c]\n\n # compute max\n dense_size = tuple(size[sparse.sparse_dim():])\n mx = torch.empty((pool.max() + 1,) + dense_size, dtype=dtype, device=device)\n mx[:] = -inf\n for n in range(nnz):\n p = pool[n]\n mx[p] = torch.max(mx[p], values[n])\n\n # apply exp to (v - mx) and sum the results\n exp_values = torch.empty_like(values)\n exp_sums = torch.zeros_like(mx)\n for n in range(nnz):\n p = pool[n]\n v = exp_values[n] = (values[n] - mx[p]).exp()\n exp_sums[p] = exp_sums[p] + v\n\n # normalize with the sum of exponents\n for n in range(nnz):\n p = pool[n]\n exp_values[n] = exp_values[n] / exp_sums[p]\n\n return torch.sparse_coo_tensor(indices,\n exp_values,\n sparse.size(),\n dtype=dtype, device=device)\n\n elif dim < sparse.sparse_dim() + sparse.dense_dim():\n return torch.sparse_coo_tensor(indices,\n F.softmax(values, dim - sparse.sparse_dim() + 1),\n sparse.size(),\n dtype=dtype, device=device)\n else:\n raise ValueError(\n '`dim(=%s)` must be smaller than `sparse_dim(=%s) + dense_dim(=%s)`'\n % (dim, sparse.sparse_dim(), sparse.dense_dim()))"
] | [
"0.6196355",
"0.6067124",
"0.6020071",
"0.5861591",
"0.58333623",
"0.5814174",
"0.5738434",
"0.56803995",
"0.5659591",
"0.5651032",
"0.56506085",
"0.5645035",
"0.5634631",
"0.5630038",
"0.56013834",
"0.55691886",
"0.5534191",
"0.5532042",
"0.5531974",
"0.5514072",
"0.5472693",
"0.54624623",
"0.5460857",
"0.54596454",
"0.5446048",
"0.54304004",
"0.5419724",
"0.54086477",
"0.53883314",
"0.5369503"
] | 0.660775 | 0 |
check sparsemaxloss proposition 5 | def _test_sparsemax_loss_zero(self, dtype, random, use_gpu):
# construct z and q, such that z_k >= 1 + max_{j!=k} z_k holds for
# delta_0 = 1.
z = random.uniform(low=-3, high=3, size=(test_obs, 10))
z[:, 0] = np.max(z, axis=1) + 1.05
q = np.zeros((test_obs, 10))
q[:, 0] = 1
tf_loss_op, tf_loss_out = self._tf_sparsemax_loss(z, q, dtype, use_gpu)
tf_sparsemax_op, tf_sparsemax_out = self._tf_sparsemax(z, dtype, use_gpu)
self.assertAllCloseAccordingToType(np.zeros(test_obs), tf_loss_out)
self.assertShapeEqual(np.zeros(test_obs), tf_loss_op)
self.assertAllCloseAccordingToType(q, tf_sparsemax_out)
self.assertShapeEqual(q, tf_sparsemax_op) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _test_sparsemax_loss_positive(self, dtype, random, use_gpu):\n z = random.uniform(low=-3, high=3, size=(test_obs, 10))\n q = np.zeros((test_obs, 10))\n q[np.arange(0, test_obs), random.randint(0, 10, size=test_obs)] = 1\n\n tf_loss_op, tf_loss_out = self._tf_sparsemax_loss(z, q, dtype, use_gpu)\n\n self.assertAllCloseAccordingToType(np.abs(tf_loss_out), tf_loss_out)\n self.assertShapeEqual(np.zeros(test_obs), tf_loss_op)",
"def _test_sparsemax_loss_of_inf(self, dtype, random, use_gpu):\n q = np.asarray([[0, 0, 1], [0, 0, 1], [0, 0, 1], [0, 0, 1]])\n z_neg = np.asarray([\n [0, -np.inf, 0],\n [0, -np.inf, -np.inf],\n [-np.inf, -np.inf, 0],\n [-np.inf, -np.inf, -np.inf],\n ]).astype(dtype)\n z_pos = np.asarray([[0, np.inf, 0], [0, np.inf,\n np.inf], [np.inf, np.inf, 0],\n [np.inf, np.inf, np.inf]]).astype(dtype)\n z_mix = np.asarray([[0, np.inf, 0], [0, np.inf, -np.inf],\n [-np.inf, np.inf, 0], [-np.inf, np.inf,\n -np.inf]]).astype(dtype)\n\n _, tf_loss_neg = self._tf_sparsemax_loss(z_neg, q, dtype, use_gpu)\n self.assertAllCloseAccordingToType([0.25, np.inf, 0, np.nan], tf_loss_neg)\n\n _, tf_loss_pos = self._tf_sparsemax_loss(z_pos, q, dtype, use_gpu)\n self.assertAllCloseAccordingToType([np.nan, np.nan, np.nan, np.nan],\n tf_loss_pos)\n\n _, tf_loss_mix = self._tf_sparsemax_loss(z_mix, q, dtype, use_gpu)\n self.assertAllCloseAccordingToType([np.nan, np.nan, np.nan, np.nan],\n tf_loss_mix)",
"def test_check_sparse(self):\n x, x_rand, s = self.create_testdata()\n task = mmRDTR()\n #check that a dense array x is passed thru unchanged\n check = task.check_sparse(x)\n self.assertEqual(np.all(check==x),True)\n #check that a sparse matrix s is converted to a numpy array\n check = task.check_sparse(s)\n self.assertIsInstance(check,np.ndarray)\n self.assertEqual(np.all(check==s.todense()),True)",
"def _test_sparsemax_loss_of_nan(self, dtype, random, use_gpu):\n q = np.asarray([[0, 0, 1], [0, 0, 1], [0, 0, 1]])\n z_nan = np.asarray([[0, np.nan, 0], [0, np.nan, np.nan],\n [np.nan, np.nan, np.nan]]).astype(dtype)\n\n _, tf_loss_nan = self._tf_sparsemax_loss(z_nan, q, dtype, use_gpu)\n self.assertAllCloseAccordingToType([np.nan, np.nan, np.nan], tf_loss_nan)",
"def max_score_test(self):\n max_score_tuple = self.results.max_score(molecules=[\"DDSPDLPK\"])\n assert max_score_tuple[0] == 1 # score\n assert max_score_tuple[3].scaling_factor == 100 # intensity\n\n assert self.results.max_score(molecules=[\"_DDSPDLPK_\"]) == [0, None, None, None]\n return",
"def test_max_score(self):\n\n \"\"\"\n Create the test data.\n \"\"\"\n tokenizer = Tokenizer(stem=False)\n posts = [\n \"Erdogan with threats to attack regime forces 'everywhere' in Syria\",\n \"Damascus says Erdogan 'disconnected from reality' after threats\",\n ]\n\n corpus = [ Document(post, tokenizer.tokenize(post)) for post in posts ]\n\n extractor = TokenExtractor()\n scorer = TFIDFScorer({ 'erdogan': 1, 'threats': 2 }, 10)\n candidates = extractor.extract(corpus)\n scores = scorer.score(candidates, normalize_scores=True)\n self.assertTrue(all( score <= 1 for score in scores.values() ))",
"def max_power_in_existing_storage_rule(_m, g, y, s, t):\r\n\r\n return m.p_in[g, y, s, t] - (m.P_IN_MAX[g] * (1 - m.F[g, y])) <= 0",
"def max_power_out_existing_storage_rule(_m, g, y, s, t):\r\n\r\n return m.p_out[g, y, s, t] - (m.P_OUT_MAX[g] * (1 - m.F[g, y])) <= 0",
"def max_power_out_candidate_storage_rule(_m, g, y, s, t):\r\n\r\n return m.p_out[g, y, s, t] - sum(m.x_c[g, j] for j in m.Y if j <= y) <= 0",
"def max_power_in_candidate_storage_rule(_m, g, y, s, t):\r\n\r\n return m.p_in[g, y, s, t] - sum(m.x_c[g, j] for j in m.Y if j <= y) <= 0",
"def max():\n valid=result_alpha.F>0\n src_data.F[valid]=np.maximum( src_data.F[valid],result_data.F[valid] )",
"def test_return_sparse():\n X = Vectorizer(strategy=\"bow\", return_sparse=True).fit_transform(X_text, y10)\n assert all(pd.api.types.is_sparse(X[c]) for c in X.columns)",
"def test_010_softmax():\n u = ACTIVATION_DIFF_ACCEPTANCE_VALUE\n P = softmax(np.array([2.44756739, 2.13945115]).astype(TYPE_FLOAT))\n E = np.array([0.57642539, 0.42357461]).astype(TYPE_FLOAT)\n assert np.all(np.abs(P-E) < u)\n\n for _ in range(NUM_MAX_TEST_TIMES):\n N: int = np.random.randint(1, NUM_MAX_BATCH_SIZE)\n M: int = np.random.randint(2, NUM_MAX_NODES)\n X = MAX_ACTIVATION_VALUE * np.random.randn(N, M).astype(TYPE_FLOAT)\n np.all(np.isfinite(softmax(X)))",
"def max_energy_candidate_storage_rule(_m, g, y, s, t):\r\n\r\n return m.q[g, y, s, t] - sum(m.x_c[g, j] for j in m.Y if j <= y) <= 0",
"def __test_softmax():\n\n test1 = softmax(tf.constant(np.array([[1001, 1002], [3, 4]]), dtype=tf.float32))\n with tf.Session() as sess:\n test1 = sess.run(test1)\n __test_all_close(\"Softmax test 1\", test1, np.array([[0.26894142, 0.73105858],\n [0.26894142, 0.73105858]]))\n\n test2 = softmax(tf.constant(np.array([[-1001, -1002]]), dtype=tf.float32))\n with tf.Session() as sess:\n test2 = sess.run(test2)\n __test_all_close(\"Softmax test 2\", test2, np.array([[0.73105858, 0.26894142]]))\n\n print(\"Basic (non-exhaustive) softmax tests pass\\n\")",
"def max_energy_existing_storage_rule(_m, g, y, s, t):\r\n\r\n return m.q[g, y, s, t] - m.Q_MAX[g] <= 0",
"def test_sparsity(self):\n\n self.validator.adata.X = self.validator.adata.X.toarray()\n self.validator.validate_adata()\n self.assertEqual(\n self.validator.warnings,\n [\n \"WARNING: Sparsity of 'X' is 0.875 which is greater than 0.5, \"\n \"and it is not a 'scipy.sparse.csr_matrix'. It is \"\n \"STRONGLY RECOMMENDED to use this type of matrix for \"\n \"the given sparsity.\"\n ],\n )",
"def _test_sparsemax_loss_against_numpy(self, dtype, random, use_gpu):\n z = random.uniform(low=-3, high=3, size=(test_obs, 10))\n q = np.zeros((test_obs, 10))\n q[np.arange(0, test_obs), random.randint(0, 10, size=test_obs)] = 1\n\n tf_loss_op, tf_loss_out = self._tf_sparsemax_loss(z, q, dtype, use_gpu)\n np_loss = self._np_sparsemax_loss(z, q).astype(dtype)\n\n self.assertAllCloseAccordingToType(\n np_loss, tf_loss_out, half_atol=1e-2, half_rtol=5e-3)\n self.assertShapeEqual(np_loss, tf_loss_op)",
"def ncore(self):",
"def is_sparsity_enabled(cls):\n total,sp100,sp50 = 0,0,0\n for module_name, module, p_name, p, mask, pruned in cls.__sparse_parameters:\n total += 1\n mask_sum = mask.sum()\n mask_numel = mask.numel()\n if mask_sum == mask_numel:\n sp100 += 1\n elif mask_sum*2 == mask_numel:\n sp50 += 1\n\n assert (total == sp100 or total == sp50), \"Inconsistent model sparsity\"\n if total == sp100:\n return False\n elif total == sp50:\n return True",
"def validate(self, s):\r\n\r\n nodes = [i for i, a in enumerate(s) if\r\n a != cf.SLEEP and self.network.get_node(i).energy >= (cf.COMMUNICATION_ENERGY + cf.SENSING_ENERGY) ] # get list of the active nodes\r\n\r\n for t in range(cf.NUM_TARGETS):\r\n no_sense_prob = 0\r\n for n in nodes:\r\n no_sense_prob += self.sensing_log_matrix[t][n]\r\n if no_sense_prob >= self.sensing_log_threshold:\r\n break\r\n\r\n if no_sense_prob < self.sensing_log_threshold:\r\n return False\r\n\r\n return True",
"def test_large_sdp(self):\n SHAPE = (256, 256)\n rows = SHAPE[0]\n cols = SHAPE[1]\n X = Variable(*SHAPE)\n Z = Variable(rows+cols, rows+cols)\n prob = Problem(Minimize(0.5*at.trace(Z)),\n [X[0, 0] >= 1, Z[0:rows, rows:rows+cols] == X, Z >> 0, Z == Z.T])\n prob.solve(solver=\"SCS\")\n self.assertAlmostEqual(prob.value, 1.0)",
"def validate(self, current_index): \n loss_out = []\n gts_cat = torch.LongTensor()\n pred_cat = torch.LongTensor()\n Validation = self.datasetManager.get_validation_dataloader()\n length = len(Validation)\n print('\\nValidation : %i steps'%length)\n for i, batch in tqdm.tqdm(enumerate(Validation)):\n batch = self.to_device(batch)\n img = batch[0]\n gts = batch[1]\n out = self.network(img)\n out = self.softmax(out)\n loss = self.loss(out,gts)\n pred = torch.argmax(out, 1, keepdim = True)\n pred = pred.view(-1)\n loss_out.append(loss.item())\n \n gts_cat = torch.cat((gts_cat,gts.cpu()),0)\n pred_cat = torch.cat((pred_cat,pred.cpu()),0)\n\n f1_score = sklearn.metrics.f1_score(gts_cat,pred_cat, average = 'macro')\n Kappa = sklearn.metrics.cohen_kappa_score(gts_cat,pred_cat)\n Accuracy = sklearn.metrics.accuracy_score(gts_cat,pred_cat) \n \n self.tb_writer.add_scalar(\"f1 score\",f1_score,current_index)\n self.tb_writer.add_scalar('Kappa score',Kappa,current_index)\n self.tb_writer.add_scalar('Accuracy', Accuracy, current_index)\n self.tb_writer.add_scalar('Validation Loss', np.mean(loss_out), current_index)\n \n return np.mean(loss_out)",
"def test_is_product_entangled_state_3_sys():\n ent_vec = max_entangled(4)\n np.testing.assert_equal(is_product_vector(ent_vec, dim=[2, 2, 2, 2]), False)",
"def max_power_existing_thermal_rule(_m, g, y, s, t):\r\n\r\n return m.p[g, y, s, t] - (m.P_MAX[g] * (1 - m.F[g, y])) <= 0",
"def test_maxv_keyword(self):\n # Set maxv to 200\n byt = bytscl(self.array2, maxv=200)\n control = numpy.sum(self.array2 >= 200)\n total = numpy.sum(byt == 255)\n self.assertEqual(total, control)",
"def analyze_sensitivity_sparse_grid(sparse_grid,max_order=2):\n from pyapprox.multivariate_polynomials import \\\n define_poly_options_from_variable_transformation\n from pyapprox.adaptive_sparse_grid import \\\n convert_sparse_grid_to_polynomial_chaos_expansion\n pce_opts=define_poly_options_from_variable_transformation(\n sparse_grid.variable_transformation)\n pce = convert_sparse_grid_to_polynomial_chaos_expansion(\n sparse_grid,pce_opts)\n pce_main_effects,pce_total_effects=\\\n get_main_and_total_effect_indices_from_pce(\n pce.get_coefficients(),pce.get_indices())\n\n interaction_terms, pce_sobol_indices = get_sobol_indices(\n pce.get_coefficients(),pce.get_indices(),max_order=max_order)\n \n return SensivitityResult(\n {'main_effects':pce_main_effects,\n 'total_effects':pce_total_effects,\n 'sobol_indices':pce_sobol_indices,\n 'sobol_interaction_indices':interaction_terms,\n 'pce':pce})",
"def calc_sparsity (data): \n matrix_size = data.shape[0]*data.shape[1] # Number of possible interactions in the matrix\n num_purchases = len(data.nonzero()[0]) # Number of items interacted with\n sparsity = 100*(1 - (num_purchases/matrix_size))\n print('{:.2f} % of the user interaction matrix is sparse'.format(sparsity,2))",
"def test_to_knx_max_exceeded(self):\n with pytest.raises(ConversionError):\n DPTSceneNumber.to_knx(DPTSceneNumber.value_max + 1)",
"def TST_LCE(S,N1,N_per,alpha,model_C2ST, w_C2ST, b_C2ST, device,dtype):\r\n np.random.seed(seed=1102)\r\n torch.manual_seed(1102)\r\n torch.cuda.manual_seed(1102)\r\n N = S.shape[0]\r\n f = torch.nn.Softmax()\r\n output = f(model_C2ST(S).mm(w_C2ST) + b_C2ST)\r\n # pred_C2ST = output.max(1, keepdim=True)[1]\r\n STAT = abs(output[:N1,0].type(torch.FloatTensor).mean() - output[N1:,0].type(torch.FloatTensor).mean())\r\n STAT_vector = np.zeros(N_per)\r\n for r in range(N_per):\r\n ind = np.random.choice(N, N, replace=False)\r\n # divide into new X, Y\r\n ind_X = ind[:N1]\r\n ind_Y = ind[N1:]\r\n # print(indx)\r\n STAT_vector[r] = abs(output[ind_X,0].type(torch.FloatTensor).mean() - output[ind_Y,0].type(torch.FloatTensor).mean())\r\n S_vector = np.sort(STAT_vector)\r\n threshold = S_vector[np.int(np.ceil(N_per * (1 - alpha)))]\r\n threshold_lower = S_vector[np.int(np.ceil(N_per * alpha))]\r\n h = 0\r\n if STAT.item() > threshold:\r\n h = 1\r\n return h, threshold, STAT"
] | [
"0.6566375",
"0.6140244",
"0.6020924",
"0.583102",
"0.5816435",
"0.5766244",
"0.57547736",
"0.5742387",
"0.5733611",
"0.5728958",
"0.569015",
"0.56716436",
"0.5660839",
"0.56137866",
"0.5600897",
"0.55933416",
"0.55928576",
"0.5521913",
"0.54795593",
"0.5471057",
"0.54647976",
"0.5448373",
"0.54449046",
"0.54373384",
"0.54353124",
"0.54339373",
"0.5423581",
"0.5422626",
"0.5422484",
"0.53889555"
] | 0.61877185 | 1 |
Sets the export_host of this ExportResponseMetadata. | def export_host(self, export_host):
self._export_host = export_host | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def response_host(self, response_host):\n\n self._response_host = response_host",
"def response_host(self, response_host):\n\n self._response_host = response_host",
"def host(self, host):\n\n self._host = host",
"def host(self, host):\n\n self._host = host",
"def host(self, host):\n\n self._host = host",
"def host(self, host):\n\n self._host = host",
"def host(self, host: str):\n\n self._host = host",
"def host(self, host):\n if host is None:\n raise ValueError(\"Invalid value for `host`, must not be `None`\")\n\n self._host = host",
"def host(self, host: str):\n if host is None:\n raise ValueError(\"Invalid value for `host`, must not be `None`\") # noqa: E501\n\n self._host = host",
"def set_host(self, host: str) -> None:\n _LOGGER.debug(\"Setting host to %s\", host)\n host_url = urlparse(host)\n self.scheme = host_url.scheme or \"http\"\n self.host = host_url.netloc or host_url.path\n self.base_url = f\"{self.scheme}://{self.host}\"\n self.api_url = f\"{self.base_url}/apps/api/{self.app_id}\"",
"def set_creation_host(self, host: str) -> None:\n self.metadata.data[\"creation_host\"] = host",
"def host(self, host) :\n\t\ttry :\n\t\t\tself._host = host\n\t\texcept Exception as e:\n\t\t\traise e",
"def sethost(self, host):\n self.__host = host",
"def Host(self, h):\r\n\r\n self.host = h\r\n return self",
"def set_service_host(self, host):\n self._api_host = f\"https://{host}\"",
"def host_name(self, host_name):\n\n self._host_name = host_name",
"def host_name(self, host_name):\n\n self._host_name = host_name",
"def hostname(self, hostname):\n\n self._hostname = hostname",
"def hostname(self, hostname):\n\n self._hostname = hostname",
"def registry_host(self, registry_host: str):\n\n self._registry_host = registry_host",
"def host_num(self, host_num):\n\n self._host_num = host_num",
"def hostname(self, hostname):\n self._hostname = hostname\n return self",
"def export_date(self, export_date):\n\n self._export_date = export_date",
"def admin_host(self, admin_host):\n\n self._admin_host = admin_host",
"def set_hostname(self, hostname):\n raise NotImplementedError()",
"def setServerHost(self, serverHost):\n\n _ExceptionUtil.raiseOnError(\n internals.blpapi_SessionOptions_setServerHost(self.__handle,\n serverHost))",
"def host_version(self, host_version):\n if host_version is None:\n raise ValueError(\"Invalid value for `host_version`, must not be `None`\") # noqa: E501\n\n self._host_version = host_version",
"def dst_hostname(self, dst_hostname):\n\n self._dst_hostname = dst_hostname",
"def host_ip(self, host_ip):\n\n self._host_ip = host_ip",
"def proxy_host(self, proxy_host):\n\n self._proxy_host = proxy_host"
] | [
"0.7138213",
"0.7138213",
"0.644298",
"0.644298",
"0.644298",
"0.644298",
"0.6365714",
"0.6363546",
"0.6119689",
"0.5977553",
"0.5908788",
"0.58681583",
"0.5725987",
"0.5704862",
"0.5632454",
"0.5604846",
"0.5604846",
"0.5506752",
"0.5506752",
"0.5476766",
"0.54253507",
"0.5403806",
"0.53860843",
"0.53830934",
"0.5342106",
"0.52974904",
"0.5283787",
"0.5248919",
"0.5229912",
"0.5226388"
] | 0.8150576 | 0 |
Sets the export_date of this ExportResponseMetadata. | def export_date(self, export_date):
self._export_date = export_date | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def set_date(self, date):\n self.data['date'] = date",
"def set_date(self, date):\n self.date = date\n return",
"def set_date(self, date):\n self.date = date",
"def date(self, date):\n\n self._date = date",
"def date(self, date):\n\n self._date = date",
"def date(self, date):\n\n self._date = date",
"def date(self, date):\n\n self._date = date",
"def date(self, date):\n\n self._date = date",
"def date(self, date):\n self._date = date",
"def set_extracte_date(self, extracte_date):\n if extracte_date is not None:\n self.extracte_date = extracte_date",
"def _set_last_exported_date(self, cr, uid, external_session, date, context=None):\n return True",
"def snapshot_date(self, snapshot_date):\n\n self._snapshot_date = snapshot_date",
"def date(self, date):\n if self.local_vars_configuration.client_side_validation and date is None: # noqa: E501\n raise ValueError(\"Invalid value for `date`, must not be `None`\") # noqa: E501\n\n self._date = date",
"def date(self, date):\n if date is None:\n raise ValueError(\n \"Invalid value for `date`, must not be `None`\"\n ) # noqa: E501\n\n self._date = date",
"def _date(self, _date):\n\n self.__date = _date",
"def _date(self, _date):\n\n self.__date = _date",
"def authorization_date(self, authorization_date):\n\n self._authorization_date = authorization_date",
"def set_datetime(self, date):\n self.date = date",
"def mod_date(self, mod_date):\n\n self._mod_date = mod_date",
"def _date(self, _date: datetime):\n if _date is None:\n raise ValueError(\"Invalid value for `_date`, must not be `None`\") # noqa: E501\n\n self.__date = _date",
"def announcement_date(self, announcement_date):\n\n self._announcement_date = announcement_date",
"def expiration_date(self, expiration_date):\n\n self._expiration_date = expiration_date",
"def set_date(self, date):\n self.date = self.date_to_local(date)\n # ephem deals only in UTC\n self.site.date = ephem.Date(self.date_to_utc(self.date))",
"def revision_date(self, revision_date):\n\n self._revision_date = revision_date",
"def set_access_date(self, access_date):\n\t\t\n\t\tif (access_date.__class__ != str or access_date ==\"\") and (access_date.__class__ != time.struct_time or len(access_date) != 9 ):\n\t\t\traise InvalidParameterError(\"access_date\", \"access_date is not in a proper format\")\n\t\ttry:\n\t\t\tif access_date.__class__ == str:\n\t\t\t\ttmp_ad = time.strptime(access_date, '%S %M %H %d %m %Y')\n\t\t\telif access_date.__class__ == time.struct_time:\n\t\t\t\ttmp_ad = access_date\n\t\t\tself.__access_date = datetime(tmp_ad[0], tmp_ad[1], tmp_ad[2], tmp_ad[3], tmp_ad[4], tmp_ad[5])\n\t\texcept:\n\t\t\traise InvalidDate, \"date is not valid access_date is not in a proper format\"",
"def license_date(self, license_date):\n\n self._license_date = license_date",
"def date(self, new_date):\n self._date.date = new_date",
"def completion_date(self, completion_date):\n\n self._completion_date = completion_date",
"def date(self, date):\n self.value = date.strftime(\"%Y-%m-%d\") if date else \"\"",
"def set_publishdate_extractor(self, extractor):\r\n if not extractor:\r\n raise ValueError(\"extractor must not be null!\")\r\n self.extract_publishdate = extractor"
] | [
"0.64057446",
"0.63387674",
"0.63318104",
"0.62219906",
"0.62219906",
"0.62219906",
"0.62219906",
"0.62219906",
"0.6208642",
"0.6150361",
"0.6148115",
"0.597754",
"0.59448314",
"0.59330714",
"0.5932477",
"0.5932477",
"0.59302837",
"0.59251916",
"0.5893984",
"0.5889508",
"0.5869775",
"0.5869477",
"0.57849216",
"0.5764885",
"0.57600737",
"0.57261586",
"0.5693605",
"0.56526333",
"0.56038797",
"0.5603519"
] | 0.8478781 | 0 |
Sets the requested_object_list of this ExportResponseMetadata. | def requested_object_list(self, requested_object_list):
self._requested_object_list = requested_object_list | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def exported_object_list(self, exported_object_list):\n\n self._exported_object_list = exported_object_list",
"def set_response_list(self, r_list):\n self.response_list = r_list",
"def set_objects(self, objects: list):\n self._objects = objects",
"def set_object_list(self, query, fields, sort, start, limit, is_public_request):\n xform = None\n\n try:\n enable_etag = True\n\n if not is_public_request:\n xform = self.get_object()\n self.data_count = xform.num_of_submissions\n enable_etag = self.data_count < SUBMISSION_RETRIEVAL_THRESHOLD\n\n where, where_params = get_where_clause(query)\n\n if where:\n # pylint: disable=attribute-defined-outside-init\n self.object_list = self.object_list.extra(\n where=where, params=where_params\n )\n\n if (start and limit or limit) and (not sort and not fields):\n start_index = start if start is not None else 0\n end_index = limit if start is None or start == 0 else start + limit\n # pylint: disable=attribute-defined-outside-init\n self.object_list = filter_queryset_xform_meta_perms(\n self.get_object(), self.request.user, self.object_list\n )\n # pylint: disable=attribute-defined-outside-init\n self.object_list = self.object_list[start_index:end_index]\n elif (sort or limit or start or fields) and not is_public_request:\n try:\n query = filter_queryset_xform_meta_perms_sql(\n self.get_object(), self.request.user, query\n )\n # pylint: disable=protected-access\n has_json_fields = sort and ParsedInstance._has_json_fields(\n _get_sort_fields(sort)\n )\n should_query_json_fields = fields or has_json_fields\n\n if self._should_paginate():\n retrieval_threshold = getattr(\n settings, \"SUBMISSION_RETRIEVAL_THRESHOLD\", 10000\n )\n query_param_keys = self.request.query_params\n page = int(\n query_param_keys.get(self.paginator.page_query_param, 1)\n )\n page_size = int(\n query_param_keys.get(\n self.paginator.page_size_query_param,\n retrieval_threshold,\n )\n )\n start = (page - 1) * page_size\n limit = page_size\n\n if sort is None:\n # Paginated data needs to be sorted. We order by\n # id ascending if sort is empty\n sort = '{\"_id\": 1}'\n\n if should_query_json_fields:\n data = query_fields_data(\n xform,\n fields=fields,\n query=query,\n sort=sort,\n start_index=start,\n limit=limit,\n )\n # pylint: disable=attribute-defined-outside-init\n self.object_list = data\n else:\n data = query_data(\n xform,\n query=query,\n sort=sort,\n start_index=start,\n limit=limit,\n json_only=not self.kwargs.get(\"format\") == \"xml\",\n )\n # pylint: disable=attribute-defined-outside-init\n self.object_list = data\n except NoRecordsPermission:\n # pylint: disable=attribute-defined-outside-init\n self.object_list = []\n\n # ETags are Disabled for XForms with Submissions that surpass\n # the configured SUBMISSION_RETRIEVAL_THRESHOLD setting\n if enable_etag:\n sql = params = None\n\n if xform:\n sql, params = get_sql_with_params(\n xform,\n query=query,\n sort=sort,\n start_index=start,\n limit=limit,\n fields=fields,\n )\n\n setattr(\n self,\n \"etag_hash\",\n (get_etag_hash_from_query(sql, params)),\n )\n except ValueError as e:\n raise ParseError(str(e)) from e\n except DataError as e:\n raise ParseError(str(e)) from e",
"def put_list(self, request, **kwargs):\r\n response = super(BaseCorsResource, self).put_list(request, **kwargs)\r\n return self.add_cors_headers(response, True)",
"def get_object_list(self, request):\r\n\r\n self._reset_collection()\r\n return self._meta.queryset.clone()",
"def post_list(self, request, **kwargs):\n response = super(BaseCorsResource, self).post_list(request, **kwargs)\n response['Access-Control-Allow-Origin'] = '*'\n response['Access-Control-Expose-Headers'] = 'Location'\n return response",
"def set(self, request, _object):\n\n value = request._get_parameter_value(self)\n value.object = _object",
"def extend(self, object_list):\n self.data['object'].extend(object_list)\n self.data['id'].extend(range(self.start_id, self.start_id+len(object_list)))\n for col in self.cols:\n if col != 'object' and col != 'id':\n self.data[col].extend([None]*(len(self.data[\"id\"] ) - len(self.data[col])))\n self.start_id += len(object_list)\n return self",
"def view_list(self, view_list):\n\n self._view_list = view_list",
"def __init__(self, export_host=None, export_date=None, requested_object_list=None, exported_object_list=None): # noqa: E501 # noqa: E501\n\n self._export_host = None\n self._export_date = None\n self._requested_object_list = None\n self._exported_object_list = None\n self.discriminator = None\n\n if export_host is not None:\n self.export_host = export_host\n if export_date is not None:\n self.export_date = export_date\n if requested_object_list is not None:\n self.requested_object_list = requested_object_list\n if exported_object_list is not None:\n self.exported_object_list = exported_object_list",
"def resources(self, value):\n self._resource_objects = value",
"def request_object_update(self, AgentID, SessionID, ID_CacheMissType_list = None):\n\n packet = Message('RequestMultipleObjects',\n Block('AgentData',\n AgentID = AgentID,\n SessionID = SessionID),\n *[Block('ObjectData',\n CacheMissType = ID_CacheMissType[1],\n ID = ID_CacheMissType[0]) for ID_CacheMissType in ID_CacheMissType_list])\n\n # enqueue the message, send as reliable\n self.region.enqueue_message(packet, True)",
"def origin_list(self, origin_list: List[Origin]):\n\n self._origin_list = origin_list",
"def post_list(self, request, **kwargs):\r\n #logger.debug(\"post list %s\\n%s\" % (request, kwargs));\r\n response = super(BaseCorsResource, self).post_list(request, **kwargs)\r\n return self.add_cors_headers(response, True)",
"def fields_in_list(self, fields_in_list):\n\n self._fields_in_list = fields_in_list",
"def fields_in_list(self, fields_in_list):\n\n self._fields_in_list = fields_in_list",
"def list(self, request, *args, **kwargs):\n query = self.filter_queryset(self.get_queryset())\n if isinstance(query, ErrorResponse):\n return query\n default_limit = DEFAULT_LIST_LIMIT\n limit, offset, range_errors = self.get_offset_limit_params(default_limit)\n if range_errors:\n return ErrorResponse(data=range_errors)\n\n self.object_list = get_object_list(offset, limit, query)\n\n # Default is to allow empty querysets. This can be altered by setting\n # `.allow_empty = False`, to raise 404 errors on empty querysets.\n if not self.allow_empty and not self.object_list:\n warnings.warn(\n 'The `allow_empty` parameter is due to be deprecated. '\n 'To use `allow_empty=False` style behavior, You should override '\n '`get_queryset()` and explicitly raise a 404 on empty querysets.',\n PendingDeprecationWarning\n )\n class_name = self.__class__.__name__\n error_msg = self.empty_error % {'class_name': class_name}\n raise Http404(error_msg)\n\n # the pagination is not supported, use offset and limit\n serializer = self.get_serializer(self.object_list, many=True)\n return self.generate_list_response(query, self.object_list, serializer, offset, limit)",
"def setListDoc(self, list):\n if list is None: list__o = None\n else: list__o = list._o\n libxml2mod.xmlSetListDoc(list__o, self._o)",
"def SetLists(self,IndList,ObjList):\n \n if not len(IndList)==len(ObjList):\n raise ValueError(\"IndList and ObjList must be of the same length\")\n \n if not prod([type(x)==tuple for x in IndList]):\n IndFormatError=1\n elif not prod([len(x)==3 for x in IndList]):\n IndFormatError=1\n \n IndFormatError=0\n \n if IndFormatError:\n raise ValueError(\"IndList must be list of 3-tuples\")\n \n if not prod([shape(x) == self.__shape for x in ObjList])==1:\n print([shape(x) for x in ObjList])\n raise ValueError(\"ObjList must contain arrays of the same shape as the BZO (shape %s)\"%str(self.__shape))\n \n \n self.__IndList=IndList\n self.__ObjList=ObjList\n \n self.__Set_NumList() \n self.__SortLists()",
"def setOutputs(self, output_list):\n self.output_list = output_list",
"def resolve_objects(self, object_list: List[Downloadable], show_progress: bool):\n resolve_objects_drs_hostname_from_id(\n object_list,\n self.resolved_compact_drs,\n f\"http://{self.hostname}/mds/aggregate/info\",\n )\n progress_bar = (\n tqdm(desc=f\"Resolving objects\", total=len(object_list))\n if show_progress\n else InvisibleProgress()\n )\n for entry in object_list:\n add_drs_object_info(entry)\n # sugar to allow download objects to self download\n entry._manager = self\n progress_bar.update(1)",
"def SetImageList(self, imageList):\r\n\r\n self._imageList = imageList",
"def apply_authorization_limits(self, request, object_list):\n return object_list.filter(user=request.user)",
"def contact_list(self, contact_list):\n \n self._contact_list = contact_list",
"def set_target_stocks_list(self, list_of_stocks):\n self.target_stocks = list_of_stocks",
"def _set_listonly(self, value):\n if not value and self.__listonly:\n self.__listonly = False\n self.clear_preprocessed()",
"def photoset_list(request, queryset=None, **kwargs):\n if queryset is None:\n queryset = Photoset.objects.all()\n \n if 'queryset' in kwargs:\n del kwargs['queryset']\n \n if 'template_name' not in kwargs:\n kwargs['template_name'] = 'flickrsets/photoset/list.html'\n \n if 'template_object_name' not in kwargs:\n kwargs['template_object_name'] = 'photoset'\n \n if 'paginate_by' not in kwargs:\n kwargs['paginate_by'] = getattr(\n app_settings,\n 'PHOTOSET_LIST_VIEW_PAGINATE_BY')\n \n return list_detail.object_list(request, queryset, **kwargs)",
"def set_event_list(self):\n self.eventList = self.get_event_list()",
"def marshal(self, request, _object):\n\n raise NotImplementedError()"
] | [
"0.65758",
"0.58558273",
"0.5800644",
"0.5349972",
"0.51523393",
"0.5088856",
"0.5084719",
"0.5048292",
"0.50420725",
"0.5036184",
"0.50234246",
"0.50130266",
"0.4954567",
"0.4948551",
"0.49092585",
"0.49018076",
"0.49018076",
"0.48524174",
"0.48476678",
"0.47991368",
"0.479118",
"0.47851488",
"0.47762388",
"0.47753882",
"0.47480747",
"0.47454834",
"0.47053242",
"0.46894065",
"0.4686309",
"0.4675035"
] | 0.843938 | 0 |
Sets the exported_object_list of this ExportResponseMetadata. | def exported_object_list(self, exported_object_list):
self._exported_object_list = exported_object_list | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __init__(self, export_host=None, export_date=None, requested_object_list=None, exported_object_list=None): # noqa: E501 # noqa: E501\n\n self._export_host = None\n self._export_date = None\n self._requested_object_list = None\n self._exported_object_list = None\n self.discriminator = None\n\n if export_host is not None:\n self.export_host = export_host\n if export_date is not None:\n self.export_date = export_date\n if requested_object_list is not None:\n self.requested_object_list = requested_object_list\n if exported_object_list is not None:\n self.exported_object_list = exported_object_list",
"def requested_object_list(self, requested_object_list):\n\n self._requested_object_list = requested_object_list",
"def setOutputs(self, output_list):\n self.output_list = output_list",
"def export_date(self, export_date):\n\n self._export_date = export_date",
"def set_response_list(self, r_list):\n self.response_list = r_list",
"def set_objects(self, objects: list):\n self._objects = objects",
"def nfs_export(self, nfs_export):\n\n self._nfs_export = nfs_export",
"def list(self, list):\n if list is None:\n raise ValueError(\"Invalid value for `list`, must not be `None`\") # noqa: E501\n\n self._list = list",
"def __init__(self, export_tuples=None):\n self._exports = export_tuples if export_tuples else []",
"def on_get_export(self, req, resp):\n resp.body = json.dumps(self.concord_list, ensure_ascii=False, indent=\"\\t\")",
"def listExport(self, params):\n\n records = self.list(params)\n\n return self.decodeDataToExport(records, params.get('exportColumns'))",
"def listExport(self, params):\n\n records = self.list(params)\n\n return self.decodeDataToExport(records, params.get('exportColumns'))",
"def manage_exportObject(\n self,\n id='',\n download=None,\n RESPONSE=None,\n REQUEST=None\n ):\n if not id:\n # can't use getId() here (breaks on \"old\" exported objects)\n id = self.id\n if getattr(id, '__func__', None) is not None:\n id = id()\n ob = self\n else:\n ob = self._getOb(id)\n\n suffix = 'zexp'\n\n if download:\n with BytesIO() as f:\n ob._p_jar.exportFile(ob._p_oid, f)\n result = f.getvalue()\n\n if RESPONSE is not None:\n RESPONSE.setHeader('Content-type', 'application/data')\n RESPONSE.setHeader(\n 'Content-Disposition',\n make_content_disposition('inline', f'{id}.{suffix}')\n )\n return result\n\n f = os.path.join(CONFIG.clienthome, f'{id}.{suffix}')\n with open(f, 'w+b') as fd:\n ob._p_jar.exportFile(ob._p_oid, fd)\n\n if REQUEST is not None:\n return self.manage_main(\n self, REQUEST,\n manage_tabs_message=f'\"{id}\" successfully exported to \"{f}\"',\n title='Object exported'\n )",
"def set_hidden_list(hidden_list, user=None):\n\tif isinstance(hidden_list, str):\n\t\thidden_list = json.loads(hidden_list)\n\n\t# set as hidden\n\tfor module_name in hidden_list:\n\t\tset_hidden(module_name, user, 1)\n\n\t# set as seen\n\tfor module_name in list(set(get_all_icons()) - set(hidden_list)):\n\t\tset_hidden(module_name, user, 0)\n\n\tif user:\n\t\tclear_desktop_icons_cache()\n\telse:\n\t\tfrappe.clear_cache()",
"def export_host(self, export_host):\n\n self._export_host = export_host",
"def output_groups(self, output_groups: List[str]):\n\n self._output_groups = output_groups",
"def save_asset_metadata_list(self, asset_metadata_list, user_id, import_only=False):\n # Determine course key to use in bulk operation. Use the first asset assuming that\n # all assets will be for the same course.\n asset_key = asset_metadata_list[0].asset_id\n course_key = asset_key.course_key\n\n with self.bulk_operations(course_key):\n original_structure = self._lookup_course(course_key).structure\n index_entry = self._get_index_if_valid(course_key)\n new_structure = self.version_structure(course_key, original_structure, user_id)\n course_assets = new_structure.setdefault('assets', {})\n\n assets_by_type = self._save_assets_by_type(\n course_key, asset_metadata_list, course_assets, user_id, import_only\n )\n\n for asset_type, assets in assets_by_type.items():\n new_structure['assets'][asset_type] = list(assets)\n\n # update index if appropriate and structures\n self.update_structure(course_key, new_structure)\n\n if index_entry is not None:\n # update the index entry if appropriate\n self._update_head(course_key, index_entry, asset_key.branch, new_structure['_id'])",
"def download(\n self,\n object_list: List[Downloadable],\n save_directory: str = \".\",\n show_progress: bool = False,\n unpack_packages: bool = True,\n delete_unpacked_packages: bool = False,\n ) -> Dict[str, Any]:\n\n self.cache_hosts_wts_tokens(object_list)\n output_dir = Path(save_directory)\n\n completed = {\n entry.object_id: DownloadStatus(filename=entry.file_name)\n for entry in object_list\n }\n\n for entry in object_list:\n # handle bundles first\n if entry.object_type is DRSObjectType.bundle:\n # append the filename to the directory path and\n child_dir = Path(save_directory, entry.file_name)\n # call download with the children object list\n child_status = self.download(\n entry.children,\n child_dir,\n show_progress,\n unpack_packages,\n delete_unpacked_packages,\n )\n # when complete, append the return status\n completed[entry.object_id] = child_status\n continue\n\n if entry.hostname is None:\n logger.critical(\n f\"{entry.hostname} was not resolved, skipping {entry.object_id}.\"\n f\"Skipping {entry.file_name}\"\n )\n completed[entry.object_id].status = \"error (resolving DRS host)\"\n continue\n\n # check to see if we have tokens\n if entry.hostname not in self.known_hosts:\n logger.critical(\n f\"{entry.hostname} is not present in this commons remote user access.\"\n f\"Skipping {entry.file_name}\"\n )\n completed[entry.object_id].status = \"error (resolving DRS host)\"\n continue\n if self.known_hosts[entry.hostname].available is False:\n logger.critical(\n f\"Was unable to get user authorization from {entry.hostname}. Skipping {entry.file_name}\"\n )\n completed[entry.object_id].status = \"error (no auth)\"\n continue\n\n drs_hostname = entry.hostname\n access_token = self.get_fresh_token(drs_hostname)\n\n if access_token is None:\n logger.critical(\n f\"No access token defined for {entry.object_id}. Skipping\"\n )\n completed[entry.object_id].status = \"error (no access token)\"\n continue\n # TODO refine the selection of access_method\n if len(entry.access_methods) == 0:\n logger.critical(\n f\"No access methods defined for {entry.object_id}. Skipping\"\n )\n completed[entry.object_id].status = \"error (no access methods)\"\n continue\n access_method = entry.access_methods[0][\"access_id\"]\n\n download_url = get_download_url_using_drs(\n drs_hostname,\n entry.object_id,\n access_method,\n access_token,\n )\n\n if download_url is None:\n completed[entry.object_id].status = \"error\"\n continue\n\n completed[entry.object_id].start_time = datetime.now(timezone.utc)\n filepath = output_dir.joinpath(entry.file_name)\n res = download_file_from_url(\n url=download_url, filename=filepath, show_progress=show_progress\n )\n\n # check if the file is a package; if so, unpack it in place\n ext = os.path.splitext(entry.file_name)[-1]\n if unpack_packages and ext in PACKAGE_EXTENSIONS:\n try:\n mds_entry = self.metadata.get(entry.object_id)\n except Exception:\n mds_entry = {} # no MDS or object not in MDS\n logger.debug(\n f\"{entry.file_name} is not a package and will not be expanded\"\n )\n\n # if the metadata type is \"package\", then unpack\n if mds_entry.get(\"type\") == \"package\":\n try:\n unpackage_object(filepath)\n except Exception as e:\n logger.critical(\n f\"{entry.file_name} had an issue while being unpackaged: {e}\"\n )\n res = False\n\n if delete_unpacked_packages:\n filepath.unlink()\n if res:\n completed[entry.object_id].status = \"downloaded\"\n logger.debug(\n f\"object {entry.object_id} has been successfully downloaded.\"\n )\n else:\n completed[entry.object_id].status = \"error\"\n logger.debug(f\"object {entry.object_id} has failed to be downloaded.\")\n completed[entry.object_id].end_time = datetime.now(timezone.utc)\n\n return completed",
"def setListDoc(self, list):\n if list is None: list__o = None\n else: list__o = list._o\n libxml2mod.xmlSetListDoc(list__o, self._o)",
"def plane_list(self, new_list):\n self.__plane_list = new_list",
"def export(self, value):\n \n self._export = bool(value)",
"def SetImageList(self, imageList):\r\n\r\n self._imageList = imageList",
"def import_(self, exported, update=False):\n for path in exported:\n kv = exported[path]\n fn = self.update if update else self.write\n fn(path, **kv)",
"def fields_in_list(self, fields_in_list):\n\n self._fields_in_list = fields_in_list",
"def fields_in_list(self, fields_in_list):\n\n self._fields_in_list = fields_in_list",
"def exportList(self, list_id):\n params = {'LIST_ID' : list_id,\n 'EXPORT_TYPE' : 'ALL',\n 'EXPORT_FORMAT': 'CSV',\n 'FILE_ENCODING': 'utf-8'}\n xrequest = xml_str(self.buildRequestEtree('ExportList', params))\n xresults = self.request(xrequest)\n xpath = '/Envelope/Body/RESULT/FILE_PATH'\n return xresults.xpath(xpath)[0].text",
"def resolve_objects(self, object_list: List[Downloadable], show_progress: bool):\n resolve_objects_drs_hostname_from_id(\n object_list,\n self.resolved_compact_drs,\n f\"http://{self.hostname}/mds/aggregate/info\",\n )\n progress_bar = (\n tqdm(desc=f\"Resolving objects\", total=len(object_list))\n if show_progress\n else InvisibleProgress()\n )\n for entry in object_list:\n add_drs_object_info(entry)\n # sugar to allow download objects to self download\n entry._manager = self\n progress_bar.update(1)",
"def set_blists(self, blists):\n self.blists = blists[:]",
"def setDownload(self, filename, data, response, bytes):\n #print data\n #serializedData=NSPropertyListSerialization.dataFromPropertyList_format_errorDescription_(data, NSPropertyListXMLFormat_v1_0, None)\n #print serializedData\n defaults=NSUserDefaults.standardUserDefaults()\n downloads=defaults.objectForKey_('downloads')\n downloads = [package for package in downloads if package[0]!=filename]\n downloads.append((filename, data, NSArchiver.archivedDataWithRootObject_(response), bytes))\n defaults.removeObjectForKey_('downloads')\n defaults.setObject_forKey_(downloads, 'downloads')",
"def view_list(self, view_list):\n\n self._view_list = view_list"
] | [
"0.57387066",
"0.572405",
"0.5720836",
"0.50837755",
"0.5052673",
"0.4954005",
"0.49233595",
"0.48638776",
"0.48131937",
"0.47819278",
"0.47225076",
"0.47225076",
"0.47171348",
"0.46870238",
"0.46680313",
"0.46198332",
"0.46057516",
"0.45436734",
"0.45278898",
"0.45272794",
"0.45091638",
"0.4498648",
"0.44917497",
"0.44813567",
"0.44813567",
"0.44757375",
"0.4452348",
"0.44139603",
"0.44012398",
"0.43921265"
] | 0.8515711 | 0 |
Clean credentials and batch environment. It cleans a token credential for the user, and the batch environment, in addition to delete all dockers. Also, Command executed by the root in prolog | def clean_environment(ctx, token):
try:
out = ctx.obj.clean_environment(token)
print_message(out)
except BaseException as e:
print_error(e.message) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def clean(self, data):\n required = {'admin_token', 'token'}\n api.validate(data, required)\n admin_token = data['admin_token']\n force = True\n self.credentials_module.authorize_admin(admin_token)\n token = data['token']\n containers = self.credentials_module.list_containers(token)\n if containers:\n self.docker_module.clean_containers(containers, force)\n exceptions.make_log(\"info\", \"Delete containers\")\n\n token_info = self.credentials_module.get_token(token)\n self.batch_module.clean_environment(token_info, admin_token)\n exceptions.make_log(\"info\", \"Batch system cleaned\")\n self.credentials_module.remove_token_from_cache(token)\n exceptions.make_log(\"info\", \"Delete token: %s\" % token)\n return token",
"def clean():\n\tprint(\"Started cleaning....\")\n\trc = call(\"./clean.sh\", shell=True)\n\tprint(\"Cleaning done.\")",
"def clean(c):\n clean_docker(c)\n clean_repo(c)",
"def clean_user_tokens() -> None:\n asyncio.run(clean_old_user_tokens())",
"def clear_datastore():\n local('lib/remote_api_shell.py tweetlocker -p /_/shell -c '\n '\"from lib.utils import clear_datastore; clear_datastore()\"',\n capture=False)",
"def clean_master():",
"def clean_workspace(self):\n try:\n if self.cleanup_resources.get('srpm_path'):\n os.remove(self.cleanup_resources.get('srpm_path'))\n if self.cleanup_resources.get('platform_pot_path'):\n os.remove(self.cleanup_resources.get('platform_pot_path'))\n if self.cleanup_resources.get('src_tar_dir'):\n rmtree(self.cleanup_resources.get('src_tar_dir'), ignore_errors=True)\n if self.cleanup_resources.get('extract_dir'):\n rmtree(self.cleanup_resources.get('extract_dir'), ignore_errors=True)\n if self.cleanup_resources.get('download_dir'):\n rmtree(self.cleanup_resources.get('download_dir'), ignore_errors=True)\n except OSError as e:\n self.app_logger('ERROR', \"Failed to clean sandbox! Due to %s\" % e)",
"def clean():\n user_init.clean_setup()",
"def clean():\n sudo(\"rm -rf %(admin_webroot)s\" % env)",
"def esp32_app_clean(ctx):\n _run_idf_script(ctx, \"fullclean\")",
"def cleanup(self):\n with hide(\"output\", \"warnings\", \"running\"):\n self.stop_all()\n self._execute_standard(\"rm -rf {model_repo}\".format(model_repo=MODEL_REPO))\n self._execute_root(\"docker rmi --force $(docker images -q)\", warn_only=True)\n self._execute_root(\"docker network rm clipper_nw\", warn_only=True)",
"def cleanup():\n cmd='docker rmi --force $(docker images -a -q)'\n bash_command(\"Deleting all images\", cmd)",
"def clean(all):\n docker_clean(all)",
"def clean_env():\n for key in ['FOO', 'THOR', 'IRON', 'NAME', 'PERSONAL_DIR']:\n os.environ.pop(key, None)",
"def clean():\n if system() == 'Windows':\n os.system('cls')\n else:\n os.system('clear')",
"def CleanUp(self):\n self.cmd.CleanUp()",
"def cleanup(self,context,result):\n if self.do_cleanup:\n try:\n return_code, stdout, stderr= runProgram([context.gsec_path,\n \"-user\", context.user_name,\n \"-password\", context.user_password,\n \"-delete\", self.user_name],[])\n except:\n result.note_exception(cause=\"Resource cleanup: Can't remove user.\")\n result[\"user_name\"] = self.user_name\n return\n else:\n if return_code != 0:\n self.fail_and_annotate_streams(result, Result.ERROR,'GSEC','Delete user',\n stdout,stderr)",
"def on_shutdown(self):\n self.factory.core.master_local_token.cleanup()\n self.api.stop()",
"def clean(self):\n self.run(['git', 'reset', '--hard', 'HEAD'])\n self.run(['git', 'clean', '-fdx'])\n self.run(['git', 'checkout', 'origin/master'])",
"def clean_session(self):\n unused_entries = ['root_freespace', 'home_freespace', 'hardvideo',\n 'optional_partitions', 'boot_id', 'greeter', 'display',\n 'boot_size', 'root_size', 'swap_size', 'home_size',\n 'root_id', 'lvm', 'swap_id', 'home_id', 'luks',\n 'user_passwd', 'root_passwd', 'desktop', 'gpu_driver',\n 'vga_controller', 'gpu_proprietary', 'desktop_extra']\n\n for unused in unused_entries:\n del self.user[unused]",
"def cleanup():\n management.call_command('cleanup')",
"def env_cleanup(self):\n pass",
"def clean_repo(c):\n c.run('git clean -ffdx')\n c.run('git reset --hard')",
"def _clean(base_dir):\n # remove the snakemake cache\n shutil.rmtree(os.path.join(base_dir, \".snakemake\"), ignore_errors=True)\n\n # remove seq2science caches\n shutil.rmtree(os.path.expanduser(os.path.join(xdg.XDG_CACHE_HOME, \"seq2science\")), ignore_errors=True)\n\n # remove historic seq2science cache location\n shutil.rmtree(os.path.expanduser(f\"~/.config/seq2science/\"), ignore_errors=True)\n\n print(\"All cleaned up!\")",
"def clean(self, util):\n if os.path.exists(self._ephemeral_caches):\n with util.Task(\"\"\"Cleaning ephemeral caches\"\"\"):\n with open(self._ephemeral_caches, \"r\") as ephemeral_log:\n for ephemeral_cache in ephemeral_log.readlines():\n self.delete(os.path.join(self._cache_dir,\n ephemeral_cache.strip()))\n\n self.delete(self._ephemeral_caches)",
"def hard_reset(self) -> None:\n os.system('rm -fr \"$HOME/.daf/\"')",
"def cleanup(self):\n\n print \"Cleaning up...\",\n sys.stdout.flush()\n\n builddir = os.path.join(self.build)\n\n comm = 'rm -rf '+builddir\n #+' '+libdir+' '+logdir\n (output, error, retz) = runShellCommand(comm)\n\n print \"done.\"",
"def clean(context):\n print(f\"Attempting to forcefully remove image {IMAGE_NAME}:{IMAGE_VER}\")\n context.run(f\"docker rmi {IMAGE_NAME}:{IMAGE_VER} --force\")\n print(f\"Successfully removed image {IMAGE_NAME}:{IMAGE_VER}\")",
"def clean_all(self):\n for p in ['process_manager.py', 'mongo']:\n cmd = (\"ps aux | grep %s | grep -v grep | awk '{ print $2 }'\"\n \" | xargs kill -s 9\") % p\n self._ssh(cmd, use_pwd=False)",
"def destroy_env(self):\n self.dut.send_expect(\"quit\", \"# \")\n time.sleep(2)"
] | [
"0.74475706",
"0.61287606",
"0.6078283",
"0.6048094",
"0.59989303",
"0.5986939",
"0.59787303",
"0.59101623",
"0.590741",
"0.5895125",
"0.58949953",
"0.58100575",
"0.5757535",
"0.5750923",
"0.5743235",
"0.57311714",
"0.5729552",
"0.57249904",
"0.57111883",
"0.5707326",
"0.56993735",
"0.5683606",
"0.56785977",
"0.56676626",
"0.5645302",
"0.56395113",
"0.563655",
"0.56269854",
"0.56172734",
"0.5599983"
] | 0.6538844 | 1 |
Delete a container or list of them. | def container_delete(ctx, token, container_ids, force):
try:
out = ctx.obj.container_delete(token, container_ids, force)
print_message(out)
except exceptions.DockerException as e:
m = e.message
print_error(m) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def delete_container(self, container: Container):",
"def DeleteContainers(self):\n for container in itertools.chain(*list(self.containers.values())):\n container.Delete()",
"def delete_container(ContainerName=None):\n pass",
"def delete_container(self, account, container):\n \n pass",
"async def remove(self, container, uids):",
"def remove(self, container):\n pass",
"def delete_container(self, filesystem, acc_dir, cont_dir, account, container):\n try:\n # create path\n path = self.create_path(filesystem, acc_dir, cont_dir, account, container)\n self.logger.debug(('DELETE container called for path: %(path)s'),\n {'path' : path})\n # call container library to confirm if container is empty or not\n self.logger.debug('Called list container interface of library')\n list_obj = ListObjectWithStatus()\n self.asyn_helper.call(\"list_container\", \\\n path, list_obj, CONTAINER_LISTING_LIMIT, '', '', '', '')\n status = list_obj.get_return_status()\n self.logger.info(('Status from container library comes '\n 'out to be: %(status)s'), {'status' : status})\n if status != OsdExceptionCode.OSD_OPERATION_SUCCESS:\n return status\n container_list = list_obj.object_record\n self.logger.debug('Got container list')\n if container_list:\n self.logger.debug('object list found in container!')\n raise HTTPConflict()\n # call container library to delete container\n self.logger.debug('Called delete container interface of library')\n status_obj = Status()\n self.asyn_helper.call(\"delete_container\", path, status_obj)\n status = status_obj.get_return_status()\n self.logger.info(('Status from container library comes '\n 'out to be: %(status)s'),\n {'status' : status})\n return status\n except Exception as err:\n self.logger.error(('container DELETE failed for account/container:'\n ' %(account)s/%(container)s '\n 'close failure: %(exc)s : %(stack)s'),\n {'account' : account, 'container' : container,\n 'exc': err, 'stack': ''.join(traceback.format_stack())})\n raise err",
"def test_delete_generic_container(self):\n container_resp = self.behaviors.create_container('name', 'generic', [])\n self._check_container_create_response(container_resp)\n\n # delete container and check the response\n del_resp = self.behaviors.delete_container(container_resp.ref)\n self.assertEqual(del_resp.status_code, 204)\n\n # check the container is actually deleted\n get_resp = self.container_client.get_container(container_resp.ref)\n self.assertEqual(get_resp.status_code, 404)",
"def test_remove(self):\n\n message = {\"method\": \"remove\",\n \"params\": {\"elem\": self.container_to_remove}}\n response = yield self._get_response(message)\n self.assertIsInstance(response, dict)\n self.assertEqual(response[\"method\"], \"remove\")\n self.assertIsInstance(response[\"result\"], list)\n\n container_name = \"/\" + self.container_to_remove\n\n containers = {i[0]: i[1] for i in response[\"result\"]}\n self.assertNotIn(container_name, containers.keys(),\n \"Container has found\")",
"def _delete_all_containers(self):\n for container_ref in self.created_entities['container']:\n self.barbicanclient.containers.delete(container_ref)",
"def _delete(self, variables):\n required_vars = ['container']\n variables_dict = self._get_vars(variables, required=required_vars)\n\n container_name = variables_dict.pop('container')\n object_name = variables_dict.pop('object', None)\n\n if object_name:\n self.swift.delete_object(container_name, object_name)\n else:\n self.swift.delete_container(container_name)\n\n self.state_change = True",
"def delete(self, path):\n params = request.args.to_dict()\n if params.get(\"instances\"):\n int_list = params.get(\"instances\")\n return items_delete_response(path, int_list)\n abort(405)",
"def delete(args):\n if args.tag is not None:\n tag = str(args.tag)\n interface = DigitalOceanSetup.create_interface()\n # Delete everything matching the tag\n interface.destroy_machines_by_tag(tag)\n elif args.delete_list:\n server_list = read_server_file()\n if len(server_list) == 1:\n interface = DigitalOceanSetup.create_interface()\n droplet_details = server_list[0]\n # Download the save game from the server\n if args.save:\n eprint(\"Running Ansible...\")\n os.environ[\"ANSIBLE_HOST_KEY_CHECKING\"] = \"False\"\n process = subprocess.Popen([\"ansible-playbook\", \"-i\",\n droplet_details[\"name\"] + \",\",\n \"--private-key\", \"~/.ssh/id_rsa\",\n \"save-factorio.yml\"],\n stdout=subprocess.PIPE)\n out, _ = process.communicate()\n eprint(out)\n # Now destory the droplet\n interface.destroy_machine_by_id(droplet_details[\"id\"])\n # Save empty list to file\n save_dict_to_file(\"servers.json\", [])\n else:\n eprint(\"Too many or no items in server list.\")\n else:\n eprint(\"Missing arguments.\")",
"def delete_volumes(volumes):\n if type(volumes) is not list:\n volumes = [volumes]\n for volume in volumes:\n command = 'cinder delete %s' % volume['id']\n a = Popen(command.split(), stdout=STDOUT, stderr=STDERR).communicate()[0]",
"def DELETE(self, req):\n account_partition, accounts, container_count = \\\n self.account_info(self.account_name, req)\n if not accounts:\n return HTTPNotFound(request=req)\n container_partition, containers = self.app.container_ring.get_nodes(\n self.account_name, self.container_name)\n headers = self._backend_requests(req, len(containers),\n account_partition, accounts)\n self._clear_container_info_cache(req)\n resp = self.make_requests(\n req, self.app.container_ring, container_partition, 'DELETE',\n req.swift_entity_path, headers)\n # Indicates no server had the container\n if resp.status_int == HTTP_ACCEPTED:\n return HTTPNotFound(request=req)\n return resp",
"def delete():",
"def remove(self, **kwargs):\n return self.client.api.remove_container(self.id, **kwargs)",
"def cli(ctx):\n stopped = click.style(\"Stopped\", fg=\"red\")\n removed = click.style(\"Removed\", fg=\"blue\")\n for container in ctx.docker.get_containers():\n name = container.hostname\n node_name = ''.join([i for i in name if not i.isdigit()])\n image_name = container.dictionary['Config']['Image']\n if node_name in TO_KILL:\n container.stop(timeout=0)\n else:\n container.stop(timeout=5)\n # container.execute(\"poweroff\", \"root\", \"/\", False)\n # container.wait()\n ctx.log(\"Container %s --> %s\" % (name, stopped))\n container.remove(v=False, link=False, force=True)\n ctx.log(\"Container %s --> %s\" % (name, removed))\n ctx.state['containers'].remove(container.short_id)\n ctx.state.fast_dump()\n # remove untagged image\n if not image_name.startswith(ctx.prefix):\n ctx.docker.remove_image(image_name, force=True)\n ctx.docker.remove_network()",
"def test_destroy(set_env, container: Container, docker_client: DockerClient):\n # pylint: disable=unused-argument\n assert container\n\n from dockerdb.commands.destroy import destroy\n\n destroy()\n\n with pytest.raises(NotFound):\n docker_client.containers.get(container_id=container.name)",
"def deleteImage(job):\n job = Job().updateJob(\n job,\n log='Started to Delete Docker images\\n',\n status=JobStatus.RUNNING,\n )\n docker_client = None\n try:\n deleteList = job['kwargs']['deleteList']\n error = False\n\n try:\n docker_client = docker.from_env(version='auto')\n\n except docker.errors.DockerException as err:\n logger.exception('Could not create the docker client')\n job = Job().updateJob(\n job,\n log='Failed to create the Docker Client\\n' + str(err) + '\\n',\n status=JobStatus.ERROR,\n )\n raise DockerImageError('Could not create the docker client')\n\n for name in deleteList:\n try:\n docker_client.images.remove(name, force=True)\n\n except Exception as err:\n logger.exception('Failed to remove image')\n job = Job().updateJob(\n job,\n log='Failed to remove image \\n' + str(err) + '\\n',\n )\n error = True\n if error is True:\n job = Job().updateJob(\n job,\n log='Failed to remove some images',\n status=JobStatus.ERROR,\n notify=True,\n progressMessage='Errors deleting some images'\n )\n else:\n job = Job().updateJob(\n job,\n log='Removed all images',\n status=JobStatus.SUCCESS,\n notify=True,\n progressMessage='Removed all images'\n )\n except Exception as err:\n logger.exception('Error with job')\n job = Job().updateJob(\n job,\n log='Error with job \\n ' + str(err) + '\\n',\n status=JobStatus.ERROR,\n\n )\n finally:\n if docker_client:\n docker_client.close()",
"def delete_command(arguments: List[str]) -> None:\n if len(arguments) != 2:\n print('Required 1 argument for create command') # noqa: WPS421\n return\n token = token_load.load()\n logic.delete(token, gist_id=arguments[1])",
"def destroy_all(self) -> None:\n try:\n containers = self.docker.containers.list(\n all=True,\n filters={\n 'label': LABEL_TASK_ID,\n },\n )\n\n for container in containers:\n container.remove(force=True)\n\n except requests.exceptions.ConnectionError:\n raise ProviderError('Docker engine unavailable')",
"def remove_reagents_from_container(request):\n container_id = int(request.POST['container_id'])\n positions = request.POST['positions']\n positions = json.loads(positions)\n current_container = Container.objects.get(id=container_id)\n\n for position in positions:\n row = int(position[0])\n column = int(position[1])\n print(row, column)\n position_query = ContainerContent.objects.filter(row__exact=row,\n column__exact=column,\n container=current_container,\n )\n print(position_query)\n position_query.delete()\n return JsonResponse({'success': True})",
"def destroyContainer(tag): #@NoSelf",
"def test_destroy_container(self):\n pass",
"def deleteNode(*args, **kwds):\n nodes = args\n if len(args) < 1:\n nodes = cmds.ls(sl=1)\n \n for node in nodes:\n node_lst = [node]\n if isinstance(node, (list, tuple)):\n node_lst = node\n\n for n in node_lst:\n if cmds.objExists(str(n)):\n cmds.delete(str(n), **kwds)\n else:\n cmds.warning(\"# Don’t exist - \" + node)",
"def delete_container_policy(ContainerName=None):\n pass",
"def test_delete_rsa_container(self):\n secret_urls = self.secret_behaviors.create_n_secrets(3)\n container_resp = self.behaviors.create_rsa_container(\n 'name', secret_urls[0], secret_urls[1], secret_urls[2])\n self._check_container_create_response(container_resp)\n\n # delete container and check the response\n del_resp = self.behaviors.delete_container(container_resp.ref)\n self.assertEqual(del_resp.status_code, 204)\n\n # check the container is actually deleted\n get_resp = self.container_client.get_container(container_resp.ref)\n self.assertEqual(get_resp.status_code, 404)",
"def remove_containers(client, prefix=DOCK_CONTAINER_NAME_PREFIX):\n\n containers = get_containers_names(client)\n for c in containers:\n if prefix in c:\n remove_container_by_name(client, c)",
"def delete(self, *names):\n if len(names) != 1:\n raise RedisClusterException(\"deleting multiple keys is not implemented in pipeline command\")\n\n return self.execute_command('DEL', names[0])"
] | [
"0.80328494",
"0.7434844",
"0.7386382",
"0.7215402",
"0.6816788",
"0.66818416",
"0.66644293",
"0.65226024",
"0.6510037",
"0.65017575",
"0.64464664",
"0.62738705",
"0.62296575",
"0.6196632",
"0.6169314",
"0.60266644",
"0.6006505",
"0.59684145",
"0.5931678",
"0.59242857",
"0.59042394",
"0.587911",
"0.5875679",
"0.5870813",
"0.58686817",
"0.5846463",
"0.58434004",
"0.5828131",
"0.57959306",
"0.5778915"
] | 0.7737605 | 1 |
floor the point to the next lower multiple of bucket_size | def bucketize(point, bucket_size):
return bucket_size * math.floor(point / bucket_size) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def bucketize(point, bucket_size):\n return bucket_size * math.floor(point / bucket_size)",
"def bucketize(point, bucket_size):\n return bucket_size * math.floor(point / bucket_size)",
"def bucket_boundaries(self, bucket):\n\n if bucket < 0 or bucket >= self.total_buckets:\n raise IndexError('bucket %d out of range' % bucket)\n if bucket == self.total_buckets - 1:\n return (self._lower_bounds[bucket], float('Inf'))\n return (self._lower_bounds[bucket], self._lower_bounds[bucket + 1])",
"def _wrap(self, point: float):\n\n if point == self.ub:\n return point\n width = self.ub - self.lb\n return ((point - self.lb) % width) + self.lb",
"def _wrap(self, point: float):\n\n if point == self.ub:\n return point\n width = self.ub - self.lb\n return ((point - self.lb) % width) + self.lb",
"def get_new_size(old_size, buckets):\n if buckets is None:\n return old_size\n else:\n w, h = old_size\n for (w_b, h_b) in buckets:\n if w_b >= w and h_b >= h:\n return w_b, h_b\n\n return old_size",
"def frequency_bucket_floor(bucket_index):\n\tfraction = bucket_index / FREQUENCY_BUCKETS\n\tlog_range = [math.log(edge, 2) for edge in HEARING_RANGE]\n\tlog_floor = log_range[0] + fraction * (log_range[1] - log_range[0])\n\treturn 2 ** log_floor",
"def ceil_inplace(a):",
"def _splitBucket(self, bucket):\n idx = self.buckets.index(bucket)\n self.buckets.pop(idx)\n middle = int(bucket.low + (bucket.high - bucket.low)/2)\n \n bucketLow = Bucket(bucket.low, middle, bucket.refreshed)\n bucketHigh = Bucket(middle+1, bucket.high, refreshed.refreshed)\n \n self.buckets.append(bucketLow)\n self.buckets.append(bucketHigh)\n \n for bucket in bucket.nodes:\n if bucketLow.inRange(bucket):\n bucketLow.addNode(bucket)\n else:\n bucketHigh.addNode(bucket)\n \n return (bucketLow, bucketHigh)",
"def _bucket_boundaries(max_length, min_length=8, length_bucket_step=1.1):\n assert length_bucket_step > 1.0\n x = min_length\n boundaries = []\n while x < max_length:\n boundaries.append(x)\n x = max(x + 1, int(x * length_bucket_step))\n return boundaries",
"def bucket_for_value(self, value):\n\n # bisect.bisect_left is wrong because the buckets are of [lower, upper) form\n return bisect.bisect(self._lower_bounds, value) - 1",
"def relative_position_bucket(relative_position,\n bidirectional: bool = True,\n num_buckets: int = 32,\n max_distance: int = 128):\n ret = 0\n relative_position = -relative_position\n if bidirectional:\n assert num_buckets % 2 == 0, 'When bidirectional is True, the number of buckets must be ' \\\n 'divisible by 2.'\n num_buckets //= 2\n ret = ret + (relative_position < 0).astype(np.int32) * num_buckets\n relative_position = np.abs(relative_position)\n else:\n # Clip all the negative values to 0\n relative_position = np.clip(relative_position, a_min=0, a_max=None)\n # Now, the relative_position is in the range [0, inf)\n\n # Half of the buckets deal with the exact increments,\n # i.e., 0, 1, 2, ..., max_exact - 1, where max_exact = num_buckets // 2\n max_exact = num_buckets // 2\n is_small = relative_position < max_exact\n\n # The other half of the buckets are for logarithmically bigger bins in positions up to\n # max_distance\n val_if_large = max_exact + (\n np.log(relative_position.astype(np.float32) / max_exact)\n / math.log(max_distance / max_exact) * (num_buckets - max_exact)).astype(np.int32)\n val_if_large = np.minimum(val_if_large, num_buckets - 1)\n ret = ret + np.where(is_small, relative_position, val_if_large)\n return ret",
"def near_split(x, num_bins=None, size_bins=None):\n if num_bins:\n quotient, remainder = divmod(x, num_bins)\n return [quotient + 1] * remainder + [quotient] * (num_bins - remainder)\n elif size_bins:\n return near_split(x, num_bins=int(np.ceil(x / size_bins)))",
"def near_split(x, num_bins=None, size_bins=None):\n if num_bins:\n quotient, remainder = divmod(x, num_bins)\n return [quotient + 1] * remainder + [quotient] * (num_bins - remainder)\n elif size_bins:\n return near_split(x, num_bins=int(np.ceil(x / size_bins)))",
"def smooth5(size: int) -> int:\n if size < 6:\n return size\n if not size % 2:\n return size\n\n new = np.inf\n power5 = 1\n while power5 < size:\n power35 = power5\n while power35 < size:\n power2 = 2 ** ((-int(-size // power35) - 1).bit_length())\n n = power2 * power35\n if n == size:\n return new\n elif n < new:\n new = n\n power35 *= 3\n if power35 == size:\n return new\n if power35 < new:\n new = power35\n power5 *= 5\n if power5 == size:\n return new\n if power5 < new:\n new = power5\n return new",
"def _bucket_boundaries(self, max_length, min_length=8, length_bucket_step=1.1):\n assert min_length <= max_length\n assert length_bucket_step > 1.0\n x = min_length\n boundaries = []\n while x < max_length:\n boundaries.append(x)\n x = max(x + 1, int(x * length_bucket_step))\n return boundaries",
"def RoundUp(value, boundary):\n return (value + boundary - 1) & ~(boundary - 1)",
"def compute_pool(in_size):\n return (in_size - 2) // 2 + 1",
"def floor_inplace(a):",
"def lower_bound(self) -> float:\n ...",
"def FixedWidthBucketer(width, num_finite_buckets=100):\n return Bucketer(width=width, growth_factor=0.0,\n num_finite_buckets=num_finite_buckets)",
"def estimate_bucket_pipeline(bucket_boundaries, num_samples, safe=True):\n if len(bucket_boundaries) < 2:\n raise ValueError('Bucket boundaries must contain at least 2 values')\n\n batch_step = 8\n\n batch_sizes = []\n for boundary in bucket_boundaries:\n batch_size = num_samples / (boundary - 1)\n batch_size = np.floor(batch_size / batch_step) if safe \\\n else np.round(batch_size / batch_step)\n batch_size *= batch_step\n\n if safe and batch_size < batch_step:\n if len(batch_sizes) < 2:\n raise ValueError('Too few samples per batch')\n\n return bucket_boundaries[:len(batch_sizes) - 1], batch_sizes, bucket_boundaries[len(batch_sizes) - 1]\n\n batch_sizes.append(max(batch_step, batch_size.astype(int)))\n\n return bucket_boundaries[:-1], batch_sizes, bucket_boundaries[-1]",
"def _prep_buckets(buckets, len_x):\n if isinstance(buckets, int):\n lims = np.linspace(0, len_x-1, buckets+1, dtype=int)\n else:\n lims = buckets\n buckets = len(lims)-1\n\n # Determine center of each bucket\n mids = np.rint(np.convolve(lims, np.ones(2), 'valid') / 2).astype(int)\n mids[0] = 0\n mids[-1] = len_x - 1\n\n return lims, mids",
"def floor(self, tuple_data, val, nearest = 1):\r\n retval = val - (val % nearest) if val != None else None\r\n return retval",
"def calculate_large_constant(self, bound, real_reduction_iterations):#factor):\n minimum_exponent = round(90/(real_reduction_iterations-1))#math.ceil(math.log(bound, 10) * factor)\n \n return ZZ(10 ** minimum_exponent)",
"def pt2index(self, point: float, nbits: int, alignleft=True, tol=0.0) -> int:\n assert isinstance(nbits, int)\n\n if self.periodic:\n point = self._wrap(point)\n\n if point > self.ub + tol:\n raise OutOfDomainError(\"Point {0} exceepds upper bound {1}\".format(point, self.ub+tol))\n if point < self.lb - tol:\n raise OutOfDomainError(\"Point {0} exceepds lower bound {1}\".format(point, self.lb-tol))\n\n bucket_fraction = 2**nbits * (point - self.lb) / (self.ub - self.lb)\n\n index = math.floor(bucket_fraction) if alignleft else math.ceil(bucket_fraction)\n\n # Catch numerical errors when point == self.ub\n # if alignleft is True and index >= 2**nbits:\n # index = (2**nbits) - 1\n\n return index",
"def all_bucket_boundaries(self):\n\n lower = self._lower_bounds[0]\n for i in xrange(1, self.total_buckets):\n upper = self._lower_bounds[i]\n yield (lower, upper)\n lower = upper\n\n yield (lower, float('Inf'))",
"def upsample_nearest(input, size=None, scale_factor=None):\n return interpolate(input, size, scale_factor, 'nearest')",
"def testBucketSize(self):\n b = SomeBucket()\n fit = b.add(1000)\n self.assertEqual(100, fit)",
"def example_ten():\n x = list(range(10**6))\n i = bisect_left(x, 991234)"
] | [
"0.7870765",
"0.7870765",
"0.6348058",
"0.61719537",
"0.61719537",
"0.5968876",
"0.59520507",
"0.5859526",
"0.5772838",
"0.57719916",
"0.5732552",
"0.57283777",
"0.5643887",
"0.5643887",
"0.5638288",
"0.56255656",
"0.5601112",
"0.55790997",
"0.55427814",
"0.5534332",
"0.55317855",
"0.5513875",
"0.5488901",
"0.5468757",
"0.5430644",
"0.5424693",
"0.5416462",
"0.54140425",
"0.53989995",
"0.53902507"
] | 0.79474443 | 0 |
buckets the points and counts how many in each bucket | def make_histogram(points, bucket_size):
return Counter(bucketize(point, bucket_size) for point in points) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def make_histogram(points, bucket_size):\n return Counter(bucketize(point, bucket_size) for point in points)",
"def make_histogram(points, bucket_size):\n return Counter(bucketize(point, bucket_size) for point in points)",
"def bucketize(point, bucket_size):\r\n return bucket_size * math.floor(point / bucket_size)",
"def get_buckets(self, first, last, num_buckets, hertz_cutoff=float(5)):\n # Pensar en la posibilidad de no aplicar PCA, permitir utilizar fft sobre una feature diferente, por ejemplo raiz-cuadrada(x2 + y2 + z2)\n if self.pca == True:\n pca = PCA(n_components=1, copy=True, whiten=True)\n numpy_data = array(self.data)\n transformed_dataset = PCA.fit_transform(pca, numpy_data)\n slice=transformed_dataset[first:last]\n else:\n slice = self.data[first:last]\n slice = [column[0] for column in slice]\n \n transformed = fft.fft(slice)\n absolute = [abs(complex) for complex in transformed]\n\n frequencies = self.get_frequencies()\n\n buckets = [0 for i in range(num_buckets)]\n width = hertz_cutoff / num_buckets\n sum_of_buckets = 0.0000001\n for i in range(1, len(absolute)):\n index = int(frequencies[i] / width)\n if index >= num_buckets:\n break\n buckets[index] += absolute[i]\n sum_of_buckets += absolute[i]\n\n #if args.normalize == 't':\n # buckets = map(lambda x: x/sum_of_buckets, buckets)\n\n return buckets",
"def test_bins(self):\n min_val = 0\n max_val = 1\n buckets = 10\n values_per_bucket = 10\n\n import numpy\n\n data = list(numpy.linspace(min_val, max_val, buckets * values_per_bucket))\n bins = numpy.linspace(min_val, max_val + sys.float_info.epsilon, buckets + 1)\n digitized = numpy.digitize(data, bins)\n counts = numpy.bincount(digitized)\n self.assertEqual(buckets + 1, len(counts))\n self.assertEqual(0, counts[0])\n for bucket in counts[1:]:\n self.assertEqual(values_per_bucket, bucket)",
"def bucketize(point, bucket_size):\n return bucket_size * math.floor(point / bucket_size)",
"def bucketize(point, bucket_size):\n return bucket_size * math.floor(point / bucket_size)",
"def _prep_buckets(buckets, len_x):\n if isinstance(buckets, int):\n lims = np.linspace(0, len_x-1, buckets+1, dtype=int)\n else:\n lims = buckets\n buckets = len(lims)-1\n\n # Determine center of each bucket\n mids = np.rint(np.convolve(lims, np.ones(2), 'valid') / 2).astype(int)\n mids[0] = 0\n mids[-1] = len_x - 1\n\n return lims, mids",
"def initial_clusters(self, points):\n groups = {}\n d = int(256 / (self.initial_k))\n for i in range(self.initial_k):\n j = i * d\n groups[(j, j, j)] = []\n for i, p in enumerate(points):\n # if i%100000 == 0:\n # print('processing pixel:', i)\n go = min(groups.keys(), key=lambda c: euclidean_distance(p, c)) \n groups[go].append(p)\n return [g for g in groups.values() if len(g) > 0]",
"def compute_histogram(self):\n # compute distance between points \n distmatrix = np.sqrt(pdist(self.points))\n if not self.mean_dist:\n self.mean_dist = np.mean(distmatrix)\n distmatrix = distmatrix/self.mean_dist\n distmatrix = squareform(distmatrix)\n #compute angles between points\n angles = compute_angles(self.points)\n #quantize angles to a bin\n tbins = np.floor(angles / (2 * pi / self.nbins_theta))\n lg = np.logspace(self.r1, self.r2, num=5)\n #quantize radious to bins\n rbins = np.ones(angles.shape) * -1\n for r in lg:\n counts = (distmatrix < r) \n rbins = rbins + counts.astype(int) \n return rbins, tbins",
"def grid_point_count(self):\n return pytools.product(self.grid_point_counts())",
"def GetPointsInBucket(self, , p_int=..., p_int=..., p_int=...):\n ...",
"def total(h):\r\n\treturn sum(i.points() for i in h)",
"def list_buckets():\n pass",
"def grid_point_counts(self):\n return [high-low for low, high in self._Limits]",
"def get_number_of_posts_per_bucket(dataset, min_time, max_time):\n\n buckets_rdd = dataset.map(lambda rec: (get_bucket(rec, min_time.timestamp(),\n max_time.timestamp()), 1)).\\\n reduceByKey(lambda c1, c2: c1 + c2)\n return buckets_rdd",
"def get_buckets(self, stamp_token):\n are_buckets_ready, buckets = (\n gen_quantile_ops.quantile_accumulator_get_buckets(\n quantile_accumulator_handles=[self._quantile_accumulator_handle],\n stamp_token=stamp_token))\n return are_buckets_ready[0], buckets[0]",
"def gal_count(clusters):\n sum = 0\n for x in clusters:\n sum += x.ngal\n return sum",
"def htable(nbuckets):",
"def build_histogram(iterator, key):\n buckets = defaultdict(int)\n values = {}\n\n num_objects = 0\n for obj in iterator:\n num_objects += 1\n\n try:\n val = obj[key]\n except (KeyError, TypeError):\n continue\n\n value_hash = hashlib.sha1()\n value_hash.update(syaml.dump_config(sort_yaml_obj(val)).encode())\n value_hash = value_hash.hexdigest()\n\n buckets[value_hash] += 1\n values[value_hash] = val\n\n return [\n (h, buckets[h], float(buckets[h]) / num_objects, values[h])\n for h in sorted(buckets.keys(), key=lambda k: -buckets[k])\n ]",
"def runcount(test_keys, sigma, sigma_max, sigma_step,\n npoints_min, npoints_max, npoints_step):\n run = 1\n for key in test_keys:\n if key:\n while sigma < sigma_max:\n npoints = npoints_min\n while npoints < npoints_max:\n npoints += npoints_step\n run += 1\n sigma += sigma_step\n return run",
"def __init__(self):\n self.buckets = collections.defaultdict(list)",
"def sum_of_reoccurring_data_points(x):\n unique, counts = np.unique(x, return_counts=True)\n counts[counts < 2] = 0\n return np.sum(counts * unique)",
"def point_count(N, S):\n\n x, y = make_grid(N)\n\n xc, yc = np.zeros_like(x), np.zeros_like(y)\n # grids for holding result of mandelbrot check\n \n z_binary = np.zeros( (N, N) )\n z_density = np.zeros( (N, N) )\n\n for (xi, i) in zip(x, xrange(N)):\n for (yi, j) in zip(y, xrange(N)):\n\n z = 0 ; s = 0\n c = complex( xi , yi ) \n abs_z = np.sqrt( z*z.conjugate() )\n # initial values for z, c, |z|, and step count\n\n for k in xrange(S):\n\n if abs_z > 2:\n break\n else:\n z_prim = z*z + c\n abs_z = np.sqrt( z_prim*z_prim.conjugate() )\n z = z_prim \n s += 1\n z_density[j, i] += 1\n \n\n\n if abs_z < 2:\n z_binary[j, i] = 1\n \n return z_binary, z_density",
"def __init__(self, bucket_ranges):\n # An array of the histogram bucket boundaries, such as 1, 10, 30, 100\n self.__bucket_ranges = list(bucket_ranges)\n last_value = None\n for i in self.__bucket_ranges:\n if last_value is not None and i < last_value:\n raise ValueError(\"The bucket_ranges argument must be sorted.\")\n else:\n last_value = i\n\n # __counts[i] holds the total number of values we have seen >= to __boundaries[i-1] and < __boundaries[i]\n self.__counts = [0] * len(bucket_ranges)\n # __overflows holds the number of values >= __boundaries[-1]\n self.__overflow = 0\n # The minimum and maximum values seen.\n self.__min = None\n self.__max = None\n # The total number of values collected.\n self.__total_count = 0\n # The sum of the values collected\n self.__total_values = 0",
"def hash_point(self, point) -> int:\n\n hash_value = 7\n hash_value = 53 * hash_value + hash(point.id)\n hash_value = 53 * hash_value + hash(point.cat)\n hash_value = 53 * hash_value + int(point.lat * point.lat)\n hash_value = 53 * hash_value + int(point.lon * point.lon)\n return hash_value",
"def length(self):\n # Loop through all buckets\n # Count number of key-value entries in each bucket\n\n # could be done with 1 line with comprehension\n # return sum(bucket.length() for bucket in self.buckets)\n\n total_entries = 0\n\n for linked_list in self.buckets:\n total_entries += linked_list.length()\n\n return total_entries",
"def get_clusters(self,points):\n self.points = points\n self.__dabest = [self.__cmeans(points,i) for i in range(self.__start,self.__end)]\n ##self.hull = \n return self.__dabest",
"def count_constellations(points):\n\n num_points = len(points)\n edges = np.zeros((num_points, num_points), np.bool)\n for i in range(num_points):\n edges[i, i] = True\n point_i = points[i]\n for j in range(i+1, num_points):\n edges[i, j] = (point_i - points[j]) <= THRESHOLD\n edges[j, i] = edges[i, j]\n\n visited = set()\n constellations = []\n for i in range(num_points):\n if i in visited:\n continue\n\n constellations.append(build_constellation(edges, i, visited))\n\n return len(constellations)",
"def buckets(self):\n return self.indexed"
] | [
"0.7339186",
"0.7339186",
"0.6404966",
"0.63198906",
"0.63145477",
"0.6261517",
"0.6261517",
"0.6104376",
"0.6046917",
"0.5988437",
"0.59414226",
"0.58722836",
"0.58446556",
"0.5792215",
"0.57895434",
"0.57877",
"0.56928134",
"0.5675051",
"0.56433356",
"0.5634637",
"0.5604564",
"0.5580229",
"0.55766076",
"0.55704105",
"0.55604106",
"0.5548378",
"0.55383205",
"0.5535823",
"0.5523993",
"0.55128175"
] | 0.7430182 | 0 |
returns a random draw from a standard normal distribution | def random_normal():
return inverse_normal_cdf(random.random()) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def random_normal():\n return inverse_normal_cdf(random.random())",
"def normal(mean, std):\n\n return random.gauss(mean, std)",
"def get_standard_normal_distribution():\n return np.random.normal(0, 1)",
"def draw_normal(self):\n means, scale = self.get_means_and_scales()\n return np.random.normal(means,scale,size=[self.sims,means.shape[0]]).T",
"def draw_normal_initial(self):\n means, scale = self.get_means_and_scales_from_q()\n return np.random.normal(means,scale,size=[self.sims,means.shape[0]]).T",
"def get_random_vector(self, mean, standard_deviaton):\n result = []\n for i in xrange(len(mean)):\n result.append(np.random.normal(mean[i], standard_deviaton[i]))\n return result",
"def draw_random_u(d):\n mu = np.zeros(d)\n cov = np.eye(d)\n u = multivariate_normal.rvs(mean=mu, cov=cov)\n return u / np.linalg.norm(u)",
"def normal_sample(mu, sigma):\n return mu + sigma * torch.randn_like(sigma)",
"def _gen_normal(self, count, **kwargs):\n normal = scipy.stats.norm(loc=kwargs['mean'], scale=kwargs['stdev'])\n rvs = normal.rvs(count)\n return rvs",
"def calcRandNorm(mean,std,seed,var):\n\n varR = (seed*(1.0+var)-seed*(1.0-var))\n val = np.random.random(size=np.size(varR))*varR+seed\n np.clip(val,0.01,0.99,out=val)\n val = sps.norm.ppf(val,loc=mean,scale=std)\n return val",
"def random():\r\n return R.NextDouble()",
"def generate_normal_data(avg_strike, avg_dip, n=10, noise_std=5, porp=2):\n opp_strike = avg_strike + 180\n if opp_strike > 360: \n opp_strike -= 360\n strike = avg_strike * np.ones(n)\n strike[n//porp:] = opp_strike\n dip = avg_dip * np.ones(n)\n \n # Add noise\n strike += noise_std * np.random.randn(n)\n dip += noise_std * np.random.randn(n)\n\n # Filter out things out of a reasonable range\n strike[dip > 90] -= 180\n dip[dip > 90] = 180 - dip[dip>90]\n\n strike[dip < 0] -= 180\n dip[dip < 0] *= -1\n\n strike[strike < 0] += 360\n strike[strike > 360] -= 360\n \n\n normal = geometric_functions.plane2normal(strike, dip)\n slip = geometric_functions.normal_slip(*normal)\n\n return strike, dip, normal, slip",
"def sample_from_truncated_normal(mean, std, clip_a, clip_b, size=None):\n a, b = (clip_a - mean) / std, (clip_b - mean) / std\n r = stats.truncnorm.rvs(a, b, size=size)\n return r * std + mean",
"def stdProbabilityNorm(self,std=False):\n sv = str(scipy.__version__).split('.')\n if int(sv[0])==0 and int(sv[1])==15:\n self.raiseAWarning('SciPy 0.15 detected! In this version, the normalization factor for normal distributions was modified.')\n self.raiseAWarning('Using modified value...')\n return 1.0/np.sqrt(np.pi/2.)\n else:\n return 1.0/np.sqrt(2.*np.pi)",
"def stdProbabilityNorm(self):\n return 0.5",
"def stdProbabilityNorm(self):\n return 1./factorial(self.alpha-1)",
"def __call__(self, shape):\n return np.random.normal(loc=self.mean, scale=self.stddev, size=shape)",
"def standard_normal(weight_shape):\n return np.random.normal(size=weight_shape)",
"def MakeNormalPlot(ys, root=None, line_options={}, **options):\n # TODO: when n is small, generate a larger sample and desample\n n = len(ys)\n xs = [random.normalvariate(0.0, 1.0) for i in range(n)]\n #xs=EstimateRankits(n)\n pyplot.clf()\n pyplot.plot(sorted(xs), sorted(ys), 'b.', markersize=3, **line_options)\n \n myplot.Show(xlabel = 'Standard normal values',\n legend=False,\n **options)",
"def normal_distr(x, mu, sigma, s=1):\n \n return s * 1/(sigma * torch.sqrt(torch.tensor(2 * np.pi))) * torch.exp((-1/2) * ((x - mu) / sigma) ** 2)",
"def _get_gaussian_random(self):\n u1 = generateRandom()\n u2 = generateRandom()\n if u1 < 1e-6:\n u1 = 1e-6\n return sqrt(-2 * log(u1)) * cos(2 * pi * u2)",
"def sample_from(self):\n return numpy.random.normal(self.mu, math.sqrt(self.sigma))",
"def test_randn_normal_distribution():\n\n seed = 28041995\n pts = 10**5\n alpha = 0.05\n expected_mean = 0.0\n expected_var = 1.0\n\n dpnp.random.seed(seed)\n res = dpnp.asnumpy(dpnp.random.randn(pts))\n var = numpy.var(res)\n mean = numpy.mean(res)\n assert math.isclose(var, expected_var, abs_tol=0.03)\n assert math.isclose(mean, expected_mean, abs_tol=0.03)",
"def random(N, D, rng):\n samples = rng.randn(N, D)\n norm = np.sqrt(np.sum(samples*samples, axis=1))\n return samples/norm[:,None]",
"def glorot_normal(seed=None):\n return lambda shape, dtype, batch_ndims=0: _initialize( # pylint: disable=g-long-lambda\n shape, dtype, batch_ndims,\n scale=1., mode='fan_avg', distribution='truncated_normal', seed=seed)",
"def _random_standard_centers(n=100):\n generator = mn(mean=np.array([0, 0]),\n cov=np.array([[1.0, 0.0], [0.0, 1.0]]))\n return [mn(mean=pt, cov=np.array([[1.0, 0.0], [0.0, 1.0]]))\n for pt in generator.rvs(size=n)]",
"def test_normal(self):\r\n s = np.random.normal(-0.42, 0.55, 5000)\r\n plt.hist(s, 30, density=False)\r\n plt.xlabel('Interlayer point energy [eV]')\r\n plt.ylabel('Frequency')\r\n plt.show()",
"def normal_init(self, shape):\n return np.random.normal(size=(shape[0],shape[1]))*0.01",
"def stdProbabilityNorm(self):\n B = factorial(self.alpha-1)*factorial(self.beta-1)/factorial(self.alpha+self.beta-1)\n norm = 1.0/(2**(self.alpha+self.beta-1)*B)\n return norm",
"def test_2_normal(self):\n print(\"test 2: normal distributions\")\n\n mean = self.means[0]\n dispersion = self.dispersions[0]\n\n for i, x in enumerate(self.X):\n print(i+1, normal(x, mean, dispersion), sep=' : ')"
] | [
"0.80372727",
"0.8026352",
"0.7671266",
"0.758368",
"0.72377944",
"0.6984565",
"0.67650646",
"0.6753574",
"0.67039895",
"0.6645325",
"0.66249055",
"0.65704095",
"0.6569231",
"0.6554965",
"0.65050215",
"0.6493456",
"0.6487899",
"0.64656204",
"0.64537066",
"0.64235955",
"0.64082295",
"0.6407857",
"0.64057165",
"0.63974816",
"0.6377026",
"0.6367465",
"0.63468117",
"0.634032",
"0.6316116",
"0.62966305"
] | 0.80891997 | 0 |
Transform request data to dict with 2 level of depth | def request_data_to_dict(data):
if not isinstance(data, ImmutableMultiDict):
raise ValueError('Input must be ImmutableMultiDict type.')
res = {}
for (key, value) in data.to_dict().items():
matches = re.match('(.*)\[(.*)\]', key)
if matches:
(key_lv_1, key_lv_2) = matches.groups()
if key_lv_1 not in res:
res[key_lv_1] = {}
res[key_lv_1][key_lv_2] = value
else:
res[key] = value
return res | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _to_request_dict(self):\n return {\"attr1\": self.attr1, \"attr2\": \"test\"}",
"def to_dict(self, request) -> Dict[str, Any]:\n adict = self.__dict__.copy()\n adict[\"url\"] = self.href(adict[\"url\"], request)\n adict[\"img\"] = self.href(adict[\"img\"], request)\n if self.children:\n adict[\"children\"] = [child.to_dict(request) for child in self.children]\n return adict",
"def all_request_data(include_args=False) -> Union[dict, list]:\n base_data = request.values if include_args else request.form\n data = dict(base_data.copy())\n if request.json is not None:\n if isinstance(request.json, list):\n return request.json\n data.update(request.json)\n return data",
"def buildRequestToDict(self, uID, request, approval):\n result = {}\n result['uID'] = uID\n result['request'] = request\n result['approval'] = approval\n return result",
"def _prepare_multipart_form_data(data):\n output = dict()\n for key in data:\n output[key] = (None, data[key])\n return output",
"def convert(data):\n return {k: [d[k] for d in data] for k in data[0].keys()}",
"def _prepare_data(\n self,\n request_data: Optional[Dict[str, Any]] = None,\n ) -> Dict[str, Any]:\n if request_data is None:\n request_data = {}\n request_data['page.rows'] = self._rows_in_page\n if self._current_row:\n request_data['page.number'] = \\\n self._current_row // self._rows_in_page + 1\n else:\n # Page number starts from 0\n page_number = self._min_row // self._rows_in_page\n # But for request page number starts from 1\n request_data['page.number'] = page_number + 1\n self._current_row = self._rows_in_page * page_number\n return request_data",
"def _prepare_payload(self):\n\n requests_json = []\n for qry in self._current_query.queries:\n request = qry.build_request()\n requests_json.append(self._serialize_request(request, len(requests_json)))\n\n return {\"requests\": requests_json}",
"def _flatten_dictionary(self, params, parent=None):\r\n data = OrderedDict()\r\n for key, val in params.items():\r\n full_key = parent + \"[\" + key + \"]\" if parent else key\r\n if isinstance(val, dict):\r\n data.update(self._flatten_dictionary(val, full_key))\r\n else:\r\n data[full_key] = val\r\n return data",
"def to_dict(self, data):\n return json.loads(json.dumps(data))",
"def buildCheckRequestToDict(self, uID, request, firstname, lastname):\n result = {}\n result['uID'] = uID\n result['request'] = request\n result['firstname'] = firstname\n result['lastname'] = lastname\n return result",
"def get_request_dict(request: Union[str, bytes, HttpRequest, dict]) -> Dict[str, str]:\n if isinstance(request, (str, bytes)):\n try:\n return json.loads(request)\n except Exception:\n print('Must be given a valid JSON')\n raise\n if not isinstance(request, dict):\n return vars(request)\n return request",
"def normalise_bookmarks(self, data):\n return {\n k: v.__dict__ for k, v in data.items()\n }",
"def _get_url_params_as_dict(_request):\n return _multi_dict_to_dict(_request.args)",
"def flat_to_nested(self, data: dict, original_data, target, method):\n data[target] = method(original_data)\n return data",
"def _collect_data(self):\n data = {\n \"K\": self.K,\n \"root\": self.root\n }\n return data",
"def request_data():\n if request.method in ('POST', \"PUT\"):\n return request.get_json(force=True)\n else:\n return request.values",
"def post_dict(self):\r\n contents = self.request_content\r\n\r\n # The POST dict will contain a list of values for each key.\r\n # None of our parameters are lists, however, so we map [val] --> val\r\n # If the list contains multiple entries, we pick the first one\r\n try:\r\n post_dict = urlparse.parse_qs(contents, keep_blank_values=True)\r\n return {\r\n key: list_val[0]\r\n for key, list_val in post_dict.items()\r\n }\r\n\r\n except:\r\n return dict()",
"def parse (self, request):\n\n data = {}\n body_start = request.find('\\r\\n\\r\\n')\n if body_start == -1:\n data['body'] = None\n else:\n data['body'] = request[body_start+4:]\n parts = request.split(' ', 2)\n data['method'] = parts[0]\n data['resource'] = parts[1]\n return (data)",
"def _flatten_dict(self, obj, prefix=''):\n\n encoded_dict = QueryDict('').copy()\n\n if hasattr(obj, 'items'):\n for key, value in obj.items():\n\n item_key = '%(prefix)s%(key)s' % { 'prefix': prefix, 'key': key }\n\n # Flatten lists for formsets and model choice fields\n if isinstance(value, list):\n for i, item in enumerate(value):\n\n if isinstance(item, dict):\n\n # Flatten nested object to work with formsets\n item_prefix = '%(key)s-%(index)d-' % { 'key': key, 'index': i }\n encoded_dict.update(self._flatten_dict(item, prefix=item_prefix))\n\n # ID for use with model multi choice fields\n id_value = item.get('id', None)\n if id_value:\n encoded_dict.update({ key: id_value })\n\n else:\n\n # Value for use with model multi choice fields\n encoded_dict.update({ key: item })\n\n # ID for use with model choice fields\n elif isinstance(value, dict):\n encoded_dict[item_key] = value.get('id', value)\n\n # Keep JavaScript null as Python None\n elif value is None:\n encoded_dict[item_key] = None\n\n # Other values are used directly\n else:\n encoded_dict[item_key] = unicode(value)\n\n return encoded_dict",
"def nested_to_flat(self, data: dict, target: str, **kwargs):\n data.update(data.pop(target, {}))\n return data",
"def _build_request_data(request):\n\n # webob (pyramid)\n if WebobBaseRequest and isinstance(request, WebobBaseRequest):\n return _build_webob_request_data(request)\n\n # django\n if DjangoHttpRequest and isinstance(request, DjangoHttpRequest):\n return _build_django_request_data(request)\n\n # django rest framework\n if RestFrameworkRequest and isinstance(request, RestFrameworkRequest):\n return _build_django_request_data(request)\n\n # werkzeug (flask)\n if WerkzeugRequest and isinstance(request, WerkzeugRequest):\n return _build_werkzeug_request_data(request)\n\n # tornado\n if TornadoRequest and isinstance(request, TornadoRequest):\n return _build_tornado_request_data(request)\n\n # bottle\n if BottleRequest and isinstance(request, BottleRequest):\n return _build_bottle_request_data(request)\n\n # Sanic\n if SanicRequest and isinstance(request, SanicRequest):\n return _build_sanic_request_data(request)\n\n # falcon\n if FalconRequest and isinstance(request, FalconRequest):\n return _build_falcon_request_data(request)\n\n # Plain wsgi (should be last)\n if isinstance(request, dict) and 'wsgi.version' in request:\n return _build_wsgi_request_data(request)\n\n # FastAPI (built on top of Starlette, so keep the order)\n if FastAPIRequest and isinstance(request, FastAPIRequest):\n return _build_fastapi_request_data(request)\n\n # Starlette (should be the last one for Starlette based frameworks)\n if StarletteRequest and isinstance(request, StarletteRequest):\n return _build_starlette_request_data(request)\n\n return None",
"def _build_payload(data):\n\n for k, v in data.items():\n data[k] = _transform(v, key=(k,))\n\n payload = {\n 'access_token': SETTINGS['access_token'],\n 'data': data\n }\n\n return payload",
"def __json__(self) -> dict[Any, Any]:\n return self.dict(\n include={\n **{k: ... for k in self.dict().keys() if k != \"input\"},\n \"input\": {\n \"dataset\": {\"id\"},\n \"asset\": {\"id\"},\n },\n },\n exclude={\n \"steps\": {\"__all__\": {\"id\"}},\n },\n )",
"def to_representation(self, data):\n items = super(DictSerializer, self).to_representation(data)\n return {item[self.dict_key]: item for item in items}",
"def as_dict(self):\n return dict((key, value) for key, value, depth in self.entries.itervalues())",
"def json2dict(data):\n sub_all = data.get(\"values\")\n keys = data.get(\"fields\")\n dic_all = []\n for sub in sub_all:\n x = dict(zip(keys, sub))\n if not x['datastring'] is None:\n x['datastring'] = json.loads(x.get('datastring'))\n dic_all.append(x)\n return dic_all",
"def make_dicts(self):\n self._dicts = [tree.to_dict() for tree in self.reaction_trees]\n self._update_route_dict(self._dicts, \"dict\")",
"def data_to_dict(data: Data) -> Dict[str, Any]:\n properties = data.serialize_init_args(obj=data)\n properties[\"__type\"] = data.__class__.__name__\n return properties",
"def flatten(data, delim='_'):\n result = {}\n\n def flatten_dict(keys, name=''):\n if isinstance(keys, collections.MutableMapping):\n for value in keys:\n flatten_dict(keys[value], \"{}{}{}\".format(name, value, delim))\n elif isinstance(keys, list):\n count = 0\n for value in keys:\n if isinstance(value, collections.MutableMapping):\n flatten_dict(value, \"{}{}{}\".format(name, count, delim))\n else:\n result[name[:-1]] = keys\n count += 1\n else:\n result[name[:-1]] = keys\n\n flatten_dict(data)\n return result"
] | [
"0.6378962",
"0.63289046",
"0.6227822",
"0.60884583",
"0.6052619",
"0.60420406",
"0.60407573",
"0.6025036",
"0.59494644",
"0.5901705",
"0.5876816",
"0.5818334",
"0.5817537",
"0.57859",
"0.5757327",
"0.5757108",
"0.5747224",
"0.5745498",
"0.5735476",
"0.5723966",
"0.57147676",
"0.57069033",
"0.56878793",
"0.5649065",
"0.56477416",
"0.5638119",
"0.5636117",
"0.5631413",
"0.55703944",
"0.5537921"
] | 0.7102085 | 0 |
Fades all outputs to the given color and waits for it to complete. | def FadeOutputs(box, color, steps=50):
for output in box:
output.Fade(color=color, steps=steps)
time.sleep(steps / (float(box.frequency) / len(box))) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def do_fade_colour(l, leds, r, g, b, duration):\n l._do_multi_led_command(\n create_fade_colour_command, leds, r, g, b, duration\n )",
"def color_chase(self, color: tuple = CYAN, wait: float = DEFAULT_SPEED):\n for i in range(self.np.n):\n self.np[i] = color\n time.sleep(wait)\n self.np.show()\n return True",
"def _colour_loop(self, colours, seconds=None, milliseconds=None, fade=True):\n colours = self.convert_to_colour_list(colours) #Forces a list of colours into an actual python list\n if len(colours)<2:\n colours.append(\"#000000\") #Blink between black and the specified colour if only one provided\n \n #Start with the first colour immediately:\n if fade:\n self.fade(colours[0])\n else:\n self.set(colours[0])\n step_time = self.clean_time_in_milliseconds(seconds, milliseconds, default_seconds=1, minimum_milliseconds=50)\n \n #Do the loop\n i = 1 #We're moving to the second colour now\n total_colours = len(colours)\n while not self._sequence_stop_signal:\n #Resolve our colour\n next_colour = colours[i]\n i = (i+1) % total_colours #ensures we are never asking for more colours than provided\n if fade: #Fading is a blocking process, thus we let the fade loop use up the time\n _latest_colour = self.fade(next_colour, fade_time=step_time, check=False)\n else: #Set is instant, so we need to consume the step time\n _latest_colour = self.set(next_colour, fade=False, check=False)\n self.sleep(step_time/1000) #NB fade uses milliseconds!!\n #Return the latest colour\n return self.sync_channels()",
"def fadeToRGB(self, color: tuple):\n r, g, b = color\n self._sendi2c('c', [r, g, b])",
"def theaterChase(self, color, wait_ms=50, iterations=10):\n for j in range(iterations):\n for q in range(3):\n for i in range(0, self.LEDS, 3):\n self.ring.setPixelColor(i + q, color)\n self.ring.show()\n time.sleep(wait_ms / 1000.0)\n for i in range(0, self.LEDS, 3):\n self.ring.setPixelColor(i + q, 0)",
"def fade(startColor, endColor, steps, interval, strip):\r\n lastUpdate = utime.time() - interval\r\n for i in range(0, steps):\r\n print(\"range step: \", steps)\r\n red = ((startColor[0] * (steps - i)) + (endColor[0] * i)) // steps\r\n green = ((startColor[1] * (steps - i)) + (endColor[1] * i)) // steps\r\n blue = ((startColor[2] * (steps - i)) + (endColor[2] * i)) // steps\r\n \r\n while ((utime.time() - lastUpdate) < interval):\r\n pass\r\n setStrip(strip, (red, green, blue))\r\n lastUpdate = utime.time()",
"def FadeOut(self):\r\n\r\n while 1:\r\n self._alpha_amount -= 10\r\n if self._alpha_amount <= 0:\r\n self._alpha_amount = 255\r\n return\r\n\r\n self.SetTransparent(self._alpha_amount)\r\n wx.SafeYield()\r\n wx.MilliSleep(15)",
"def theaterChase(self, color, wait_ms=50, iterations=10):\n\t\tstrip = self._strip\n\t\tfor j in range(iterations):\n\t\t\tfor q in range(3):\n\t\t\t\tfor i in range(0, strip.numPixels(), 3):\n\t\t\t\t\tstrip.setPixelColor(i+q, color)\n\t\t\t\tstrip.show()\n\t\t\t\ttime.sleep(wait_ms/1000.0)\n\t\t\t\tfor i in range(0, strip.numPixels(), 3):\n\t\t\t\t\tstrip.setPixelColor(i+q, 0)",
"def cycle_colors(colors=(\"red\", \"green\", \"blue\"), delay_secs=1):\n set_color('black') # Start with all LED's \"off\"\n\n for c in colors:\n print(\"LEDs are all \" + c)\n set_color(c)\n update()\n sleep(delay_secs)",
"def led_theaterChase(strip, color, wait_ms=50, iterations=5):\n for j in range(iterations):\n for q in range(3):\n for i in range(0, strip.numPixels()-q, 3):\n strip.setPixelColor(i+q, color)\n strip.show()\n gevent.sleep(wait_ms/1000.0)\n for i in range(0, strip.numPixels()-q, 3):\n strip.setPixelColor(i+q, 0)",
"def theaterChase(strip, color, wait_ms=50, iterations=10):\r\n for j in range(iterations):\r\n for q in range(3):\r\n for i in range(0, strip.numPixels(), 3):\r\n strip.setPixelColor(i+q, color)\r\n strip.show()\r\n time.sleep(wait_ms/1000.0)\r\n for i in range(0, strip.numPixels(), 3):\r\n strip.setPixelColor(i+q, 0)",
"def theaterChase(strip, color, wait_ms=50, iterations=10):\n for j in range(iterations):\n for q in range(3):\n for i in range(0, strip.numPixels(), 3):\n strip.setPixelColor(i+q, color)\n strip.show()\n time.sleep(wait_ms/1000.0)\n for i in range(0, strip.numPixels(), 3):\n strip.setPixelColor(i+q, 0)",
"def theaterChase(strip, color, wait_ms=50, iterations=10):\n for j in range(iterations):\n for q in range(3):\n for i in range(0, strip.numPixels(), 3):\n strip.setPixelColor(i+q, color)\n strip.show()\n time.sleep(wait_ms/1000.0)\n for i in range(0, strip.numPixels(), 3):\n strip.setPixelColor(i+q, 0)",
"def theaterChase(strip, color, wait_ms=50, iterations=1):\n\tfor j in range(iterations):\n\t\tfor q in range(3):\n\t\t\tfor i in range(0, strip.numPixels(), 3):\n\t\t\t\tstrip.setPixelColor(i+q, color)\n\t\t\tstrip.show()\n\t\t\ttime.sleep(wait_ms/1000.0)\n\t\t\tfor i in range(0, strip.numPixels(), 3):\n\t\t\t\tstrip.setPixelColor(i+q, 0)",
"def fade_display():\n for col in range(5):\n for row in range(5):\n brightness = microbit.display.get_pixel(col, row)\n # reduce by one, but make sure it's still in 0 to 9\n brightness = clamp(MIN_BRIGHTNESS, brightness - 1, MAX_BRIGHTNESS)\n microbit.display.set_pixel(col, row, brightness)",
"def fade_out(self, duration: int = 1):\n original_brightness = self.np.brightness\n\n step_level = 0.01\n sleep_cycle = duration / (original_brightness / step_level)\n\n while self.np.brightness > 0:\n # FIXME :\n # Im not totally sure why, but...\n # self.np.brightness -= step_level\n # causes self.np.brightness of 0.1 to become 0.09000000000000001\n # and i dont feel like figuring out why right now\n self.np.brightness = round(self.np.brightness - step_level, 2)\n self.np.show()\n time.sleep(sleep_cycle)\n\n self.np.fill(OFF)\n self.np.show()\n\n # Reset brightness to original value now that pixels are OFF\n self.np.brightness = original_brightness\n\n return True",
"def startColorLoop():\n b.set_group(1, 'on', True)\n b.set_group(1, 'bri', 254)\n b.set_group(1, 'hue', 255)\n b.set_group(1, 'sat', 255)\n b.set_group(1, 'effect', 'colorloop')",
"def flash_red(self, duration=0.2):\n self.pen_color = wx.RED\n self.Refresh(True)\n t = time.time()\n while time.time() - t < duration:\n time.sleep(0.001)\n self.pen_color = wx.WHITE\n self.Refresh(True)",
"def fadeLED( gpio, startVal, stopVal ):\n\t#convert passed values into usable format for pi-blaster (i.e 0 - 1)\n\tRGBstartVal = startVal / 255\n\tRGBstopVal = stopVal / 255\n\t#debug\n\tprint RGBstartVal, startVal, RGBstopVal, stopVal;\n\t#set the current LED values to the start value\n\tcurrentVal = RGBstartVal\n\tif RGBstartVal < RGBstopVal:\n\t\twhile currentVal < RGBstopVal:\n\t\t\tos.system(\"echo \\\"{0}={1}\\\" > /dev/pi-blaster\" .format(gpio,currentVal))\n\t\t\tcurrentVal = currentVal + STEP;\n\t\t\ttime.sleep(FADESPEED)\n\t\t\tprint currentVal\n\telif RGBstartVal > RGBstopVal:\n\t\t while currentVal > RGBstopVal:\n\t\t\tos.system(\"echo \\\"{0}={1}\\\" > /dev/pi-blaster\" .format(gpio,currentVal))\n currentVal = currentVal - STEP;\n time.sleep(FADESPEED)\n print currentVal\n\treturn;",
"def test_blink(self):\n display = get_display(1)\n display.register_state(main.Fade)\n prev = display.strand[0]\n for i in range(1000):\n display.tick()\n assert(all(0 <= display.strand[0][i] <= 255 for i in range(3)))\n assert display.strand[0] != prev\n prev = display.strand[0]",
"def animate():\n for c in itertools.cycle(['|', '/', '-', '\\\\']):\n if done:\n break\n sys.stdout.write('\\rloading ' + c)\n sys.stdout.flush()\n time.sleep(0.1)\n sys.stdout.write('\\rDone! ')",
"def colorEyes(self, color, fade_duration = 0.2):\n\n\t\tif color in self.colors:\n\t\t\tcolor = self.colors[color]\n\n\t\tself.leds.fadeRGB(\"FaceLeds\", color, fade_duration)",
"def drive_to_color(self, color):\n while not self.color_sensor.color == color:\n self.right_motor.run_forever(speed_sp=150)\n self.left_motor.run_forever(speed_sp=150)\n self.right_motor.stop(stop_action=ev3.Motor.STOP_ACTION_BRAKE)\n self.left_motor.stop(stop_action=ev3.Motor.STOP_ACTION_BRAKE)\n if color == ev3.ColorSensor.COLOR_RED:\n ev3.Sound.play(\"/home/robot/csse120/assets/sounds/Enter_Sandman.wav\").wait()\n elif color == ev3.ColorSensor.COLOR_BLUE:\n ev3.Sound.play(\"/home/robot/csse120/assets/sounds/Luke_Bryan_-_That_s_My_Kind_Of_Night_with_Lyrics_.wav\").wait()\n elif color == ev3.ColorSensor.COLOR_BLACK:\n ev3.Sound.play(\"/home/robot/csse120/assets/sounds/Semi-Charmed_Life_1_.wav\").wait()",
"def jump(self, colours, seconds=None, milliseconds=None):\n return self.run_sequence(self._colour_loop, colours=colours, seconds=seconds, milliseconds=milliseconds, fade=False)",
"def colorEyes(self, color, fade_duration = 0.2):\n\n\t\tif color in self.colors:\n\t\t\tcolor = colors[color]\n\n\t\tself.leds.fadeRGB(\"FaceLeds\", color, fade_duration)",
"def fadeout(self, time):\r\n check_mixer()\r\n sdl.Mix_FadeOutChannel(self.chan, time)",
"def Demo(controller_name, outputs):\n print 'Initiating controller %r ...\\n' % controller_name\n box = getattr(controller, controller_name).FirstDevice(outputs=outputs)\n print '\\nFade to white and back.'\n FadeOutputs(box, '#fff')\n FadeOutputs(box, '#000')\n print 'Fade to a random color and back to black, ad nauseum.'\n while True:\n FadeOutputs(box, utils.RandomColor())\n FadeOutputs(box, '#000')",
"def fadeOut(self):\n self.fadeout_counter += 1\n for tone in self.tones:\n amp = tone.getAmplitude()\n # print(\"Fadeout call %i: new amp is %f, with delta %f\" % (self.fadeout_counter,\n # amp, constants.fadeout_multiplier))\n\n if amp > 0.005:\n amp *= constants.fadeout_multiplier\n else:\n amp = 0\n self.resetFadeout()\n tone.setAmplitude(amp)\n\n # this bit is a workaround for LoopingCall\n # getting stuck on the first call when using\n # qt4reactor\n if self.ui and self.fadeout_counter == 1:\n self.ui.flicker()",
"def led_rainbow(strip, wait_ms=2, iterations=1):\n for j in range(256*iterations):\n for i in range(strip.numPixels()):\n strip.setPixelColor(i, color_wheel((i+j) & 255))\n strip.show()\n gevent.sleep(wait_ms/1000.0)",
"def colorwipe(strip, color, wait_ms=1):\n for k in range(strip.numPixels()):\n strip.setPixelColor(k, color)\n strip.show()\n time.sleep(wait_ms/1000.0)\n strip.setPixelColor(k, color)\n strip.show()\n time.sleep(wait_ms/1000.0)"
] | [
"0.6856355",
"0.66404843",
"0.64948034",
"0.6415791",
"0.6347536",
"0.62956667",
"0.6144092",
"0.6049132",
"0.5983142",
"0.59646887",
"0.5947122",
"0.5939072",
"0.59066415",
"0.58729315",
"0.57774615",
"0.5768261",
"0.5764425",
"0.5718582",
"0.56916755",
"0.56897503",
"0.56601495",
"0.5651298",
"0.56409705",
"0.5637987",
"0.5633521",
"0.5524392",
"0.5522047",
"0.55021185",
"0.5493574",
"0.5489311"
] | 0.8030329 | 0 |
Returns the name the function should have in the Python api, based on the c++function name. For entry_type 'function', the cpp_name is used unmodified, otherwise strip everything before the first underscore, so that | def to_py_name(cpp_name, entry_type):
if entry_type == 'function':
return cpp_name
first_underscore = cpp_name.find('_')
assert(first_underscore != -1)
return cpp_name[first_underscore + 1:] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _plugin_funcname(func):\n funcname = func.__name__.rstrip(\"_\")\n if funcname.startswith(\"__\"):\n return funcname + \"__\"\n return funcname",
"def wrapper_function_name(text):\n text = GLGenerator.split_to_body_and_ext(text)\n body = text[0]\n ext = text[1]\n for suffix, replacement in FUNCTION_SUFFIXES.items():\n if body.endswith(suffix):\n body = body[:-len(suffix)] + replacement\n break\n text = body + ext\n res = util.to_snake_case(text[2:])\n return res",
"def get_fun_name(line):\n match = re.match(r'(function|macro)\\s*\\((\\w+)', line)\n if not match:\n return\n return match.groups()[1]",
"def _function_name(func):\n return \"Calling the function: def {}()\".format(func.__name__)",
"def decode_cpp_function_names(self) -> None:\n with Popen(['c++filt'], stdin=PIPE, stdout=PIPE, universal_newlines=True) as proc:\n for func in self.source_functions:\n proc.stdin.write(func.name + '\\n')\n proc.stdin.flush()\n func.pretty_name = proc.stdout.readline().rstrip('\\n\\r')",
"def cython_functionname(self, t, cycyt=None):\n if cycyt is None:\n t = self.canon(t)\n if isinstance(t, basestring):\n return t, self.cython_functionnames[t]\n elif t[0] in self.base_types:\n return t, self.cython_functionnames[t[0]]\n return self.cython_functionname(t, self.cython_functionnames[t[0]])\n d = {}\n for key, x in zip(self.template_types[t[0]], t[1:-1]):\n if isinstance(x, basestring):\n val = self.cython_functionnames[x] if x in self.cython_functionnames \\\n else x\n elif isinstance(x, Number):\n val = str(x).replace('-', 'Neg').replace('+', 'Pos')\\\n .replace('.', 'point')\n elif x[0] in self.base_types:\n val = self.cython_functionnames[x[0]]\n else:\n _, val = self.cython_functionname(x, self.cython_functionnames[x[0]])\n d[key] = val\n return t, cycyt.format(**d)",
"def get_function_name_at(self, address):\n pass",
"def make_python_name(self, name):\n # FIXME see cindex.SpellingCache\n for k, v in [('<', '_'), ('>', '_'), ('::', '__'), (',', ''), (' ', ''),\n (\"$\", \"DOLLAR\"), (\".\", \"DOT\"), (\"@\", \"_\"), (\":\", \"_\"),\n ('-', '_')]:\n if k in name: # template\n name = name.replace(k, v)\n # FIXME: test case ? I want this func to be neutral on C valid\n # names.\n if name.startswith(\"__\"):\n return \"_X\" + name\n if len(name) == 0:\n pass\n elif name[0] in \"01234567879\":\n return \"_\" + name\n return name",
"def get_func_name(func, resolv_alias=True, win_characters=True):\r\n if hasattr(func, '__module__'):\r\n module = func.__module__\r\n else:\r\n try:\r\n module = inspect.getmodule(func)\r\n except TypeError:\r\n if hasattr(func, '__class__'):\r\n module = func.__class__.__module__\r\n else:\r\n module = 'unknown'\r\n if module is None:\r\n # Happens in doctests, eg\r\n module = ''\r\n if module == '__main__':\r\n try:\r\n filename = os.path.abspath(inspect.getsourcefile(func))\r\n except:\r\n filename = None\r\n if filename is not None:\r\n # mangling of full path to filename\r\n parts = filename.split(os.sep)\r\n if parts[-1].startswith('<ipython-input'):\r\n # function is defined in an IPython session. The filename\r\n # will change with every new kernel instance. This hack\r\n # always returns the same filename\r\n parts[-1] = '__ipython-input__'\r\n filename = '-'.join(parts)\r\n if filename.endswith('.py'):\r\n filename = filename[:-3]\r\n module = module + '-' + filename\r\n module = module.split('.')\r\n if hasattr(func, 'func_name'):\r\n name = func.func_name\r\n elif hasattr(func, '__name__'):\r\n name = func.__name__\r\n else:\r\n name = 'unknown'\r\n # Hack to detect functions not defined at the module-level\r\n if resolv_alias:\r\n # TODO: Maybe add a warning here?\r\n if hasattr(func, 'func_globals') and name in func.func_globals:\r\n if not func.func_globals[name] is func:\r\n name = '%s-alias' % name\r\n if inspect.ismethod(func):\r\n # We need to add the name of the class\r\n if hasattr(func, 'im_class'):\r\n klass = func.im_class\r\n module.append(klass.__name__)\r\n if os.name == 'nt' and win_characters:\r\n # Stupid windows can't encode certain characters in filenames\r\n name = _clean_win_chars(name)\r\n module = [_clean_win_chars(s) for s in module]\r\n return module, name",
"def get_function_raw_name_at(self, address):\n pass",
"def function_name(parameters):",
"def function_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"function_name\")",
"def function_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"function_name\")",
"def name(self):\n\t\treturn self._func_name",
"def function_name(cls):\n function_name = String(cls.__name__).snakecase().lower()\n return function_name",
"def _name_from_args(func, _, params):\n return \"{}_{}\".format(func.__name__, \"_\".join(str(arg) for arg in params.args))",
"def _make_class_name(name):\n return name[0].upper() + name[1:] + \"Ufunc\"",
"def __write_cpp_func_name(self, cpp_file, return_type, object_suffix, in_header):\n if in_header:\n func_suffix = \";\"\n else:\n func_suffix = \" {\"\n func_name = \"Make\" + self.class_name + object_suffix + \"()\" + func_suffix\n if len(return_type + \" \" + func_name) > 80:\n print(return_type, file=cpp_file)\n print(func_name, file=cpp_file)\n else:\n print(return_type + \" \" + func_name, file=cpp_file)",
"def _GetMapEntryTypeName(field_name: str) -> str:\n capitalized_name_components = map(str.capitalize, field_name.split(\"_\"))\n\n return f\"{''.join(capitalized_name_components)}Entry\"",
"def get_function_name():\n\n # inspect.stack()[0][2] returns name of this function\n function_name = inspect.stack()[1][3]\n\n return function_name",
"def function_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"function_name\")",
"def get_name(name, file: str) -> str:\n return os.path.basename(file) if name == \"__main__\" else name",
"def name_from_dist(dist_func):\n return str(dist_func).split()[0].split('.')[-1][:-4]",
"def function_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"function_name\")",
"def function_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"function_name\")",
"def get_function_name(ifunc, *, scoped=False, mangle=False):\n\n name = _translate_function_name(interrogate_function_name(ifunc), mangle)\n\n if scoped:\n parent = interrogate_function_class(ifunc)\n if parent:\n name = get_type_name(parent, scoped=True, mangle=mangle) + '.' + name\n\n return name",
"def funcName():\r\n import sys\r\n return sys._getframe(1).f_code.co_name",
"def _get_func_name(func):\n parts = []\n module = inspect.getmodule(func)\n if module:\n parts.append(module.__name__)\n\n qualname = func.__qualname__\n if qualname != func.__name__:\n parts.append(qualname[: qualname.find(\".\")])\n\n parts.append(func.__name__)\n return \".\".join(parts)",
"def __name__(self):\n return '_'.join([function.__name__ for function in self.functions])",
"def fname(func):\n return \"%s.%s\" % (func.__module__, func.__name__)"
] | [
"0.67665726",
"0.6633372",
"0.64924026",
"0.64650005",
"0.6389167",
"0.63741195",
"0.6252101",
"0.62447554",
"0.62166333",
"0.61651427",
"0.61233187",
"0.6080773",
"0.6080773",
"0.60069233",
"0.59802777",
"0.5970677",
"0.5940581",
"0.5938672",
"0.5932276",
"0.5924244",
"0.5922279",
"0.59160954",
"0.5912154",
"0.59005755",
"0.59005755",
"0.5896942",
"0.58967555",
"0.58890486",
"0.587486",
"0.5863453"
] | 0.84663165 | 0 |
Returns the name the property should have in the Python api, based on the C++ struct name. | def property_to_py_name(cpp_struct_name):
first_underscore = cpp_struct_name.find('_')
assert first_underscore != -1
return cpp_struct_name[first_underscore + 1:] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def PropertyName(self) -> str:",
"def property_name(self) -> str:\n return str(self.prop_name)",
"def _get_name(x):\r\n if isinstance(x, Property) or isinstance(x, KeyIndex):\r\n return x.name\r\n elif isinstance(x, Edge):\r\n return x.label\r\n raise RuntimeError(\"Invalid object type {}\".format(type(x)))",
"def name(self):\n return self.prop.key",
"def name(self):\n return self.proto.name",
"def get_property_name(self, iprop):\n pname = _pychidg.f90wrap_get_property_name(self=self._handle, iprop=iprop)\n return pname",
"def get_property_name(name):\n name = _strip(name)\n return name",
"def getName(obj):",
"def name(self) -> str:\n return self.proto.name",
"def name(self): # -> Any | str:\n ...",
"def propertyName(self, p_int): # real signature unknown; restored from __doc__\n return \"\"",
"def get_property_field_name(name):\n name = _strip(name)\n return _PROPERTY_FIELD_PREFIX + name",
"def name(self):\n return self.properties.get('name', None)",
"def property_name(self, name: str) -> None:\n name = str(name)\n if len(name) > 100:\n name = name[:100]\n self.prop_name = name",
"def name(self) -> str: # pragma: no cover",
"def name(self):\n return self.properties.get('name')",
"def name(self):\n return self.properties.get('name')",
"def name(self) -> str:",
"def name(self) -> str:",
"def name(self) -> str:",
"def name(self) -> str:",
"def name(self) -> str:",
"def name(self): # -> Any:\n ...",
"def to_PyGetSetDef_entry(cpp_struct_name, py_name, doc):\r\n return 'PROPERTY_FORWARDER(%s, \"%s\", %s)' % (\r\n cpp_struct_name, py_name, doc)",
"def getprop(name):\n return _slp.getprop(name)",
"def get_name(self):\n return self.thing.name",
"def prop(self):\n return getattr(self, name)",
"def py_field_name(self, field):\n name = field.name\n name = as_identifier(name)\n if self.options(field).convert_case:\n name = from_camel_case(name)\n name = self._mangle_name(name)\n return name",
"def get_name(self) -> str:\n return self.__name",
"def _get_name(self):\n return self.__name"
] | [
"0.7078016",
"0.6528718",
"0.65224946",
"0.6521",
"0.6296151",
"0.6226451",
"0.622544",
"0.6218055",
"0.61775655",
"0.615941",
"0.6148613",
"0.6143576",
"0.6141541",
"0.61411786",
"0.6134688",
"0.6088676",
"0.6088676",
"0.6035033",
"0.6035033",
"0.6035033",
"0.6035033",
"0.6035033",
"0.6016793",
"0.6000106",
"0.5993892",
"0.5983944",
"0.59800386",
"0.59767205",
"0.59413713",
"0.5932973"
] | 0.8263242 | 0 |
Determines the Python method type (METH_NOARGS or METH_VARARGS) from the C++ argument list and type of function. | def get_type(args_str, entry_type):
# The C-method-implementations accept self as the first argument,
# so a one-argument method will be invoked with zero arguments in Python.
no_args = 1 if entry_type == "method" else 0
return ("METH_NOARGS" if len(args_str.split(",")) == no_args
else "METH_VARARGS") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_pytype(self, c_arg, parse_arg):\n if isinstance(c_arg, FunctionAddress):\n return 'O'\n else:\n try:\n return pytype_parse_registry[(parse_arg.dtype, parse_arg.precision)]\n except KeyError as e:\n raise NotImplementedError(\"Type not implemented for argument collection : \"+str(type(parse_arg))) from e",
"def method ( return_type = Any, *arg_types, **kwarg_types ):\n # The following is a 'hack' to get around what seems to be a Python bug\n # that does not pass 'return_type' and 'arg_types' through to the scope of\n # 'callback' below:\n kwarg_types[''] = ( return_type, arg_types )\n\n def callback ( frame, method_name, func ):\n\n # This undoes the work of the 'hack' described above:\n return_type, arg_types = kwarg_types['']\n del kwarg_types['']\n\n # Add a 'fake' positional argument as a place holder for 'self':\n arg_types = ( Any, ) + arg_types\n\n # Make the sure the first argument is a function:\n if type( func ) is not FunctionType:\n raise TypeError, (\"'method' must immediately precede a method \"\n \"definition.\")\n\n # Make sure the return type is a trait (if not, coerce it to one):\n return_type = _trait_for( return_type )\n\n # Make up the list of arguments defined by the function we are wrapping:\n code = func.func_code\n func_name = func.func_name\n arg_count = code.co_argcount\n var_names = code.co_varnames[ : arg_count ]\n defaults = func.func_defaults or ()\n defaults = ( Missing, ) * (arg_count - len( defaults )) + defaults\n arg_traits = []\n n = len( arg_types )\n if n > len( var_names ):\n raise TraitError, (\"Too many positional argument types specified \"\n \"in the method signature for %s\" % func_name)\n for i, name in enumerate( var_names ):\n if (i > 0) and (i < n):\n if name in kwarg_types:\n raise TraitError, (\"The '%s' argument is defined by both \"\n \"a positional and keyword argument in \"\n \"the method signature for %s\" %\n ( name, func_name ) )\n trait = arg_types[i]\n else:\n try:\n trait = kwarg_types[ name ]\n del kwarg_types[ name ]\n except:\n # fixme: Should this be an error (missing parameter type?)\n trait = Any\n arg_traits.append( name )\n arg_traits.append( Trait( defaults[i], _trait_for( trait ) ) )\n\n # Make sure there are no unaccounted for type parameters left over:\n if len( kwarg_types ) > 0:\n names = kwarg_types.keys()\n if len( names ) == 1:\n raise TraitError, (\"The '%s' method signature keyword defines \"\n \"a type for an argument which '%s' does not \"\n \"have.\" % ( names[0], func_name ))\n else:\n names.sort()\n raise TraitError, (\"The %s method signature keywords define \"\n \"types for arguments which '%s' does not have.\" % (\n ', '.join( [ \"'%s'\" % name for name in names ] ),\n func_name ))\n\n # Otherwise, return a method wrapper for the function:\n frame.f_locals[ method_name ] = CTraitMethod( func_name, func,\n tuple( [ return_type ] + arg_traits ) )\n\n _add_assignment_advisor( callback )",
"def arg_type(self):\n\n arg_type = self.ctype\n\n if 'int' in arg_type:\n arg_type = 'int'\n\n if self.is_list:\n arg_type = 'list of {}'.format(arg_type)\n\n if 'required' in self.qualifiers:\n arg_type = \"{}, optional\".format(arg_type)\n\n return arg_type",
"def method(rtype):\n\n def decorator(func):\n argcount = func.__code__.co_argcount\n argnames = func.__code__.co_varnames[:argcount]\n ndefaults = 0\n if func.__defaults__:\n ndefaults = len(func.__defaults__)\n\n argNames = func.__code__.co_varnames[(argcount - ndefaults):]\n\n if ndefaults < (argcount - 1):\n raise cSyntaxError(\n 'Type declarations missing from arguments %(args)r in the BLM '\n 'method %(func)s().' % {\n 'args': list(reversed(argnames))[ndefaults:],\n 'func': func.__name__,})\n params = []\n if func.__defaults__:\n params = [ arg._instantiate(name) for arg, name in\n zip(func.__defaults__, argNames)]\n\n func.__defaults__ = None\n m = ExternalMethod(func.__name__, func)\n if rtype:\n m.rtype = rtype._instantiate('result')\n m.params = params\n\n return m\n\n return decorator",
"def argument_types(self):\r\n class ArgumentsIterator(collections.Sequence):\r\n def __init__(self, parent):\r\n self.parent = parent\r\n self.length = None\r\n\r\n def __len__(self):\r\n if self.length is None:\r\n self.length = conf.lib.clang_getNumArgTypes(self.parent)\r\n\r\n return self.length\r\n\r\n def __getitem__(self, key):\r\n # FIXME Support slice objects.\r\n if not isinstance(key, int):\r\n raise TypeError(\"Must supply a non-negative int.\")\r\n\r\n if key < 0:\r\n raise IndexError(\"Only non-negative indexes are accepted.\")\r\n\r\n if key >= len(self):\r\n raise IndexError(\"Index greater than container length: \"\r\n \"%d > %d\" % ( key, len(self) ))\r\n\r\n result = conf.lib.clang_getArgType(self.parent, key)\r\n if result.kind == TypeKind.INVALID:\r\n raise IndexError(\"Argument could not be retrieved.\")\r\n\r\n return result\r\n\r\n assert self.kind == TypeKind.FUNCTIONPROTO\r\n return ArgumentsIterator(self)",
"def get_func_type(self, *args):\n return _ida_hexrays.cfunc_t_get_func_type(self, *args)",
"def test_method():\n class TestClass(object):\n\n def typed(self, arg1):\n # type: (TestClass, int) -> None\n pass\n\n def untyped(self, arg1):\n # type: (int) -> None\n pass\n\n assert get_type_hints(TestClass.typed, globals(), locals()) == {\n 'return': type(None),\n 'self': TestClass,\n 'arg1': int\n }\n assert get_type_hints(TestClass.untyped) == {\n 'return': type(None),\n 'arg1': int\n }",
"def get_method_sig(method):\n\n # The return value of ArgSpec is a bit weird, as the list of arguments and\n # list of defaults are returned in separate array.\n # eg: ArgSpec(args=['first_arg', 'second_arg', 'third_arg'],\n # varargs=None, keywords=None, defaults=(42, 'something'))\n argspec = inspect.getargspec(method)\n arg_index=0\n args = []\n\n # Use the args and defaults array returned by argspec and find out\n # which arguments has default\n for arg in argspec.args:\n default_arg = _get_default_arg(argspec.args, argspec.defaults, arg_index)\n if default_arg.has_default:\n args.append(\"%s=%s\" % (arg, default_arg.default_value))\n else:\n args.append(arg)\n arg_index += 1\n return \"%s(%s)\" % (method.__name__, \", \".join(args))",
"def get_func_type(self, *args):\n return _ida_hexrays.cfuncptr_t_get_func_type(self, *args)",
"def get_member_type(*args):\n return _ida_hexrays.get_member_type(*args)",
"def trait_method ( func, return_type, **arg_types ):\n # Make the sure the first argument is a function:\n if type( func ) is not FunctionType:\n if type( return_type ) is not FunctionType:\n raise TypeError, \"First or second argument must be a function.\"\n else:\n func, return_type = return_type, func\n\n # Make sure the return type is a trait (if not, coerce it to one):\n return_type = _trait_for( return_type )\n\n # Make up the list of arguments defined by the function we are wrapping:\n code = func.func_code\n arg_count = code.co_argcount\n var_names = code.co_varnames[ : arg_count ]\n defaults = func.func_defaults or ()\n defaults = ( Missing, ) * (arg_count - len( defaults )) + defaults\n arg_traits = []\n for i, name in enumerate( var_names ):\n try:\n trait = arg_types[ name ]\n del arg_types[ name ]\n except:\n # fixme: Should this be a hard error (i.e. missing parameter type?)\n trait = Any\n arg_traits.append( name )\n arg_traits.append( Trait( defaults[i], _trait_for( trait ) ) )\n\n # Make sure there are no unaccounted for type parameters left over:\n if len( arg_types ) > 0:\n names = arg_types.keys()\n if len( names ) == 1:\n raise TraitError, (\"The '%s' keyword defines a type for an \"\n \"argument which '%s' does not have.\" % (\n names[0], func.func_name ))\n else:\n names.sort()\n raise TraitError, (\"The %s keywords define types for arguments \"\n \"which '%s' does not have.\" % (\n ', '.join( [ \"'%s'\" % name for name in names ] ),\n func.func_name ))\n\n # Otherwise, return a method wrapper for the function:\n return CTraitMethod( func.func_name, func,\n tuple( [ return_type ] + arg_traits ) )",
"def _preprocess_typecheck(argSig, argspecs, slf_or_clsm=False):\n # todo: Maybe move also slf-logic here\n vargs = argspecs.varargs\n try:\n kw = argspecs.keywords\n except AttributeError:\n kw = argspecs.varkw\n try:\n kwonly = argspecs.kwonlyargs\n except AttributeError:\n kwonly = None\n if not vargs is None or not kw is None:\n arg_type_lst = list(get_Tuple_params(argSig))\n if not vargs is None:\n vargs_pos = (len(argspecs.args)-1) \\\n if slf_or_clsm else len(argspecs.args)\n # IndexErrors in this section indicate that a child-method was\n # checked against a parent's type-info with the child featuring\n # a more wider type on signature level (e.g. adding vargs)\n try:\n vargs_type = typing.Sequence[arg_type_lst[vargs_pos]]\n except IndexError:\n vargs_type = typing.Sequence[typing.Any]\n try:\n arg_type_lst[vargs_pos] = vargs_type\n except IndexError:\n arg_type_lst.append(vargs_type)\n if not kw is None:\n kw_pos = len(argspecs.args)\n if slf_or_clsm:\n kw_pos -= 1\n if not vargs is None:\n kw_pos += 1\n if not kwonly is None:\n kw_pos += len(kwonly)\n try:\n kw_type = typing.Dict[str, arg_type_lst[kw_pos]]\n except IndexError:\n kw_type = typing.Dict[str, typing.Any]\n try:\n arg_type_lst[kw_pos] = kw_type\n except IndexError:\n arg_type_lst.append(kw_type)\n return typing.Tuple[tuple(arg_type_lst)]\n else:\n return argSig",
"def tagFunctionTypeDecidingMethod(self, parentTagType):\n # DOC {{{\n # }}}\n\n # CODE {{{\n if (parentTagType == PythonTag.TT_CLASS):\n return PythonTag.TT_METHOD\n else:\n return PythonTag.TT_FUNCTION\n # }}}",
"def _get_types(func, clsm, slf, clss = None, prop_getter = False,\n unspecified_type = Any, infer_defaults = None):\n func0 = util._actualfunc(func, prop_getter)\n # check consistency regarding special case with 'self'-keyword\n if not slf:\n argNames = util.getargnames(util.getargspecs(func0))\n if len(argNames) > 0:\n if clsm:\n if argNames[0] != 'cls':\n util._warn_argname('classmethod using non-idiomatic cls argname',\n func0, slf, clsm, clss)\n if clss is None and (slf or clsm):\n if slf:\n assert util.is_method(func) or isinstance(func, property)\n if clsm:\n assert util.is_classmethod(func)\n clss = util.get_class_that_defined_method(func)\n assert hasattr(clss, func.__name__)\n args, res = _funcsigtypes(func, slf or clsm, clss, None, prop_getter,\n unspecified_type = unspecified_type, infer_defaults = infer_defaults)\n return _match_stub_type(args), _match_stub_type(res)",
"def cpp_type(self, t):\n t = self.canon(t)\n if isinstance(t, basestring):\n if t in self.base_types:\n return self.cpp_types[t]\n # must be tuple below this line\n tlen = len(t)\n if 2 == tlen:\n if 0 == t[1]:\n return self.cpp_type(t[0])\n elif self.isrefinement(t[1]):\n if t[1][0] in self.cpp_types:\n subtype = self.cpp_types[t[1][0]]\n if callable(subtype):\n subtype = subtype(t[1], self)\n return subtype\n else:\n return self.cpp_type(t[0])\n else:\n last = '[{0}]'.format(t[-1]) if isinstance(t[-1], int) else t[-1]\n return self._cpp_type_add_predicate(self.cpp_type(t[0]), last)\n elif 3 <= tlen:\n assert t[0] in self.template_types\n assert len(t) == len(self.template_types[t[0]]) + 2\n template_name = self.cpp_types[t[0]]\n assert template_name is not NotImplemented\n template_filling = []\n kinds = self.argument_kinds.get(t, ((Arg.NONE,),)*(tlen-2))\n for x, kind in zip(t[1:-1], kinds):\n if kind is Arg.LIT:\n x = self.cpp_literal(x)\n elif kind is Arg.TYPE:\n x = self.cpp_type(x)\n elif kind is Arg.VAR:\n x = self._cpp_var_name(x)\n elif isinstance(x, bool):\n x = self.cpp_types[x]\n elif isinstance(x, Number):\n x = str(x)\n else:\n try:\n x = self.cpp_type(x) # Guess it is a type?\n except TypeError:\n x = self._cpp_var_name(x) # Guess it is a variable\n template_filling.append(x)\n cppt = '{0}< {1} >'.format(template_name, ', '.join(template_filling))\n if 0 != t[-1]:\n last = '[{0}]'.format(t[-1]) if isinstance(t[-1], int) else t[-1]\n cppt = self._cpp_type_add_predicate(cppt, last)\n return cppt",
"def parameterTypes(self, p_int): # real signature unknown; restored from __doc__\n return []",
"def argument_type(arg):\n types = (int, float)\n \n for t in types:\n try:\n return type(t(arg))\n except ValueError:\n continue\n \n return str",
"def infer_function_call(func, func_type, argtypes):\n from numba2 import phase\n\n if is_method(func_type):\n func = func_type.parameters[0]\n argtypes = [func_type.parameters[1]] + list(argtypes)\n else:\n func = func.const\n\n # TODO: Support recursion !\n\n if len(func.overloads) == 1 and not func.opaque:\n argtypes = fill_missing_argtypes(func.py_func, tuple(argtypes))\n\n env = fresh_env(func, argtypes)\n func, env = phase.typing(func, env)\n # env[\"numba.typing.restype\"]\n if func_type is None:\n func_type = env[\"numba.typing.signature\"]\n return func, func_type, env[\"numba.typing.restype\"]",
"def get_func(name, argtypes=None, restype=c_int, lib=libDE):\n logger.debug(\"Getting NewWordFinder API function: 'name': '{}', 'argtypes': '{}',\"\n \" 'restype': '{}'.\".format(name, argtypes, restype))\n func = getattr(lib, name)\n if argtypes is not None:\n func.argtypes = argtypes\n if restype is not c_int:\n func.restype = restype\n logger.debug(\"NewWordFinder API function '{}' retrieved.\".format(name))\n return func",
"def _build_comute_argtype(num_nd, num_nd_write):\n ret = [_xc_func_p, ctypes.c_size_t]\n ret += [_ndptr] * num_nd\n ret += [_ndptr_w] * num_nd_write\n return tuple(ret)",
"def _getargs(fn_sig):\n params = fn_sig.parameters\n args = []\n for k, v in params.items():\n if (v.kind & v.POSITIONAL_OR_KEYWORD) == v.POSITIONAL_OR_KEYWORD:\n args.append(k)\n else:\n msg = \"%s argument type unsupported in jitclass\" % v.kind\n raise errors.UnsupportedError(msg)\n return args",
"def get_types(*args, **kwargs) -> list:\n arg_types = []\n for arg in args:\n arg_types.append(type(arg))\n for values in kwargs.values():\n arg_types.append(type(values))\n return arg_types",
"def GetType(self, *args, **kwargs):\n pass",
"def cpp_type_to_python(self, ot: str):\n t = ot\n t = remove_cvref(t)\n t = self._remove_variable_type_prefix(t)\n try:\n return cpp_base_type_to_python(t)\n except KeyError:\n pass\n if is_function_pointer_type(t):\n func = function_pointer_type_info(t)\n args = \",\".join([self.cpp_type_to_python(arg.type) for arg in func.args])\n return f'Callable[[{args}], {self.cpp_type_to_python(func.ret_type)}]'\n\n if is_function_type(t):\n func = function_type_info(t)\n args = \",\".join([self.cpp_type_to_python(arg.type) for arg in func.args])\n return f'Callable[[{args}], {self.cpp_type_to_python(func.ret_type)}]'\n\n if is_pointer_type(t):\n cpp_base = self.resolve_to_basic_type_remove_const(pointer_base(t))\n if is_pointer_type(cpp_base) or is_array_type(cpp_base):\n return f'\"level 2 pointer:{t}\"' # un-convertible: level 2 pointer\n if cpp_base in ARRAY_BASES:\n return ARRAY_BASES[cpp_base]\n return self.cpp_type_to_python(cpp_base)\n if is_array_type(t):\n b = array_base(t)\n if b in ARRAY_BASES: # special case: string array\n return ARRAY_BASES[b]\n base = self.cpp_type_to_python(b)\n return f'List[{base}]'\n if is_tuple_type(t):\n es = tuple_elements(t)\n bases = [self.cpp_type_to_python(i) for i in es]\n bases_str = \",\".join(bases)\n return f'Tuple[{bases_str}]'\n\n # check classes\n objects = self.objects\n if t in objects:\n o = objects[t]\n if isinstance(o, GeneratorClass) or isinstance(o, GeneratorEnum):\n return t.replace(\"::\", \".\").strip(\" .\") # todo fix this\n if isinstance(o, GeneratorTypedef):\n return self.cpp_type_to_python(o.target)\n\n if t.startswith(\"(anonymous\"):\n return f'\"{t}\"'\n\n # this means this is\n logger.warning(\"%s might be an internal symbol, failed to resolve to basic type\", t)\n return t",
"def test_method_reference_explicit_type_arguments_for_generic_type(self):\n self.assert_contains_method_reference_expression_in_m(\n parse.parse(setup_java_class(\"List<String>::size;\")))",
"def overload_method(typ, attr, **kwargs):\n return _overload_method_common(typ, attr, **kwargs)",
"def overload_classmethod(typ, attr, **kwargs):\n return _overload_method_common(types.TypeRef(typ), attr, **kwargs)",
"def get_types(func):\n return _get_types(func, util.is_classmethod(func), util.is_method(func))",
"def getmethparlist(ob):\n defText = callText = \"\"\n # bit of a hack for methods - turn it into a function\n # but we drop the \"self\" param.\n # Try and build one for Python defined functions\n args, varargs, varkw = inspect.getargs(ob.__code__)\n items2 = args[1:]\n realArgs = args[1:]\n defaults = ob.__defaults__ or []\n defaults = [\"=%r\" % (value,) for value in defaults]\n defaults = [\"\"] * (len(realArgs)-len(defaults)) + defaults\n items1 = [arg + dflt for arg, dflt in zip(realArgs, defaults)]\n if varargs is not None:\n items1.append(\"*\" + varargs)\n items2.append(\"*\" + varargs)\n if varkw is not None:\n items1.append(\"**\" + varkw)\n items2.append(\"**\" + varkw)\n defText = \", \".join(items1)\n defText = \"(%s)\" % defText\n callText = \", \".join(items2)\n callText = \"(%s)\" % callText\n return defText, callText",
"def getmethparlist(ob):\n defText = callText = \"\"\n # bit of a hack for methods - turn it into a function\n # but we drop the \"self\" param.\n # Try and build one for Python defined functions\n args, varargs, varkw = inspect.getargs(ob.__code__)\n items2 = args[1:]\n realArgs = args[1:]\n defaults = ob.__defaults__ or []\n defaults = [\"=%r\" % (value,) for value in defaults]\n defaults = [\"\"] * (len(realArgs)-len(defaults)) + defaults\n items1 = [arg + dflt for arg, dflt in zip(realArgs, defaults)]\n if varargs is not None:\n items1.append(\"*\" + varargs)\n items2.append(\"*\" + varargs)\n if varkw is not None:\n items1.append(\"**\" + varkw)\n items2.append(\"**\" + varkw)\n defText = \", \".join(items1)\n defText = \"(%s)\" % defText\n callText = \", \".join(items2)\n callText = \"(%s)\" % callText\n return defText, callText"
] | [
"0.61319065",
"0.60803515",
"0.5884141",
"0.5874044",
"0.58434784",
"0.5799301",
"0.56729174",
"0.56608915",
"0.5459384",
"0.54216975",
"0.54152423",
"0.53767866",
"0.53465176",
"0.5273557",
"0.52425903",
"0.51958585",
"0.51934904",
"0.513849",
"0.5122222",
"0.5119435",
"0.5089043",
"0.5086425",
"0.5076506",
"0.5019748",
"0.50111264",
"0.50091416",
"0.4995413",
"0.49849144",
"0.49705145",
"0.49705145"
] | 0.6993184 | 0 |
Creates one entry for a PyMethodDef array from the entries for one function (as returned by parse_file). | def to_PyMethodDef_entry(items):
entry_type = items[0]
items = items[1:]
if entry_type == 'method':
return 'FORWARDER(%s, %s, "%s", %s)' % items
elif entry_type == 'function':
return 'FREE_FORWARDER(%s, %s, "%s", %s)' % items
elif entry_type == 'method_template':
return 'FORWARDER(%s<common_type>, %s, "%s", %s)' % items
else:
assert False | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def to_PyMethodDef(name, entries, extra_includes):\r\n\r\n methodEntries = [to_PyMethodDef_entry(items) for items in entries]\r\n if name is not None:\r\n methodDef = ('static PyMethodDef %s_methods[] = {\\n ' % name +\r\n ',\\n '.join(methodEntries) + ',\\n ')\r\n else:\r\n methodDef = ',\\n'.join(methodEntries) + ',\\n '\r\n\r\n for include in extra_includes:\r\n methodDef += '#include \"%s\"\\n' % include\r\n\r\n if name is not None:\r\n methodDef += '{nullptr,nullptr,0,nullptr} // Sentinel\\n};'\r\n return methodDef",
"def _make_methods(functions, modname):\n methods_table = []\n codes = []\n for funcname, flags, code in functions:\n cfuncname = \"%s_%s\" % (modname, funcname)\n if 'METH_KEYWORDS' in flags:\n signature = '(PyObject *self, PyObject *args, PyObject *kwargs)'\n else:\n signature = '(PyObject *self, PyObject *args)'\n methods_table.append(\n \"{\\\"%s\\\", (PyCFunction)%s, %s},\" % (funcname, cfuncname, flags))\n func_code = \"\"\"\n static PyObject* {cfuncname}{signature}\n {{\n {code}\n }}\n \"\"\".format(cfuncname=cfuncname, signature=signature, code=code)\n codes.append(func_code)\n\n body = \"\\n\".join(codes) + \"\"\"\n static PyMethodDef methods[] = {\n %(methods)s\n { NULL }\n };\n static struct PyModuleDef moduledef = {\n PyModuleDef_HEAD_INIT,\n \"%(modname)s\", /* m_name */\n NULL, /* m_doc */\n -1, /* m_size */\n methods, /* m_methods */\n };\n \"\"\" % dict(methods='\\n'.join(methods_table), modname=modname)\n return body",
"def make_c_header(self):\n res = \\\n\"\"\"PyThreadState* ___madz_LANG_python_thread_state; //Holds Thread State for this interpreter\nPyObject *___madz_LANG_python_wrapper_module; //Hold Pointer to the _madz.py file representing this plugin\ntypedef struct{{\n{function_pointers}\n}}___madz_LANG_python_TYPE_;\n___madz_LANG_python_TYPE_ ___madz_LANG_python_OUTPUT;\nvoid ___madz_init_imports();\n{fn_dec}\n\n\"\"\"\n c_gen = c_wrapgen.CGenerator([],\"\", self.description)\n #TODO function_pointers, all same except\n fragments ={\"fn_dec\" : \"\", \"function_pointers\" : \"\"}\n fn = \"\"\"{rettype}{fnname}({args});\\n\"\"\"\n pointer = \"\"\" {prettype} (*{nodename})({args});\\n\"\"\"\n for node in self.description.definitions():\n if isinstance(node.type.get_type(), pdl.TypeFunction):\n frg = {\n \"prettype\": c_gen.gen_type_string(\"\", node.type.return_type),\n \"rettype\": c_gen.gen_type_string(\"\", node.type.return_type),\n \"fnname\": \"___madz_LANG_python_FN_\" + node.name,\n \"nodename\": node.name,\n \"args\": \",\".join(map(\n lambda a: c_gen.gen_type_string(a.name, a.type),\n node.type.args)),\n\n }\n fragments[\"fn_dec\"] += fn.format(**frg)\n fragments[\"function_pointers\"] += pointer.format(**frg)\n if fragments[\"function_pointers\"] == \"\":\n fragments[\"function_pointers\"] = \"uint8_t _madz_empty;\"\n return res.format(**fragments)",
"def main():\n import argparse\n parser = argparse.ArgumentParser()\n parser.add_argument('-i', '--input', help='Input .py file', nargs='+')\n args = parser.parse_args()\n\n mod_func = []\n\n for pyfile in args.input:\n tree = ast.parse(open(pyfile).read())\n\n methods = sorted({node.name for node in ast.walk(tree)\n if isinstance(node, ast.FunctionDef)})\n mod_func.extend([[pyfile, methods[i]] for i in range(len(methods))])\n\n write_csv(\"meth_func.csv\", mod_func)",
"def visit_FunctionDef(self, node):\n self.manager.found[\"funcs\"].append({\"name\":node.name,\n \"lineno\":node.lineno,\n \"namespace\":\".\".join(self.parent)})",
"def make_module_hook(self):\n res = \\\n\"\"\"{fname} = shared_object.{fname}\n {fname}.restype = POINTER({structname})\n {varname} = {fname}()\n\n\"\"\"\n fragments ={\n \"varname\": self._namespace_mangle(self.namespace) + \"_plugin\",\n \"fname\": \"___madz_LANG_python_get_out_struct\" if self.namespace == \"\" else \"___madz_LANG_python_get_\"+self._namespace_mangle(self.namespace) + \"_struct\",\n \"structname\": self.python_madz_types + (\"OUTSTRUCT\" if self.namespace == \"\" else self._namespace_mangle(self.namespace))\n }\n\n return res.format(**fragments)",
"def make_c_function_stubs(self):\n fn =\\\n\"\"\"{rettype} {fnname}({args}){{\n {rettype} ret;\n\n ret = {cast_and_deref}___madz_LANG_python_OUTPUT.{nodename}({argnames});\n\n return ret;\n}}\n\n\"\"\"\n fn_no_return =\\\n\"\"\"{rettype} {fnname}({args}){{\n ___madz_LANG_python_OUTPUT.{nodename}({argnames});\n return;\n}}\n\n\"\"\"\n res = \"\"\n c_gen = c_wrapgen.CGenerator([],\"\", self.description)\n for node in self.description.definitions():\n if isinstance(node.type.get_type(), pdl.TypeFunction):\n fragments = {\n \"maybe_parentheses\": \")\" if isinstance(node.type.return_type.get_type(),pdl.TypeStruct) else \"\",\n \"cast_and_deref\": self.make_c_cast_deref_string(c_gen, node.type.return_type),\n \"rettype\": c_gen.gen_type_string(\"\", node.type.return_type),\n \"fnname\": \"___madz_LANG_python_FN_\" + node.name,\n \"nodename\": node.name,\n \"args\": \",\".join(map(\n lambda a: c_gen.gen_type_string(a.name, a.type),\n node.type.args)),\n \"argnames\":\",\".join(map(\n lambda a: a.name,\n node.type.args))\n }\n res += (fn if not isinstance(node.type.return_type, pdl.TypeTypeNone) else fn_no_return).format(**fragments)\n return res",
"def make_function_callbacks(self):\n res = \"\"\n for node in self.description.definitions():\n if isinstance(node.type.get_type(), pdl.TypeFunction):\n frags={\n \"name\": node.name,\n \"nameupper\": self.python_madz_deftypes + \"___\" + node.name,\n \"sanitize\": \"_sanitize_python_callback\" if isinstance(node.type.return_type.get_type(), pdl.TypePointer) else \"_python_callback\"\n }\n res += \\\n\"\"\"\n temp = cast({sanitize}(user_code_module.{name}, {nameupper}), {nameupper})\n keepers['{nameupper}'] = temp\n _plugin.contents.{name} = temp\n\"\"\".format(**frags)\n return res",
"def visit_FunctionDef(self, node):\n if node.args.kwarg == 'kwargs':\n functions[node.name] = {}\n # functions[node.name] = {'firstline': node.lineno}\n # sigend = max(node.lineno, lastline(node.args))\n # functions[node.name]['sigend'] = sigend\n functions[node.name]['args'] = [argument.id for argument in node.args.args if argument.id != 'self']\n for argument in functions[node.name]['args']:\n arguments[argument] = ''\n\n # docstring = ast.get_docstring(node)\n # docstringlength = len(docstring.split('\\n')) if docstring else -1\n # functions[node.name]['docend'] = sigend+docstringlength\n # functions[node.name]['lastline'] = lastline(node)\n self.generic_visit(node)",
"def create_included_function_list_readme():\n import iteration_utilities\n from iteration_utilities import Iterable\n from itertools import chain\n from operator import itemgetter\n from astropy.table import Table\n from astropy.io.ascii import RST\n\n rtd_link = '`{name} <http://iteration-utilities.readthedocs.io/en/latest/api/{file}.html#{module}.{name}>`_'\n\n module_to_file = {'iteration_utilities': 'cfuncs',\n 'iteration_utilities._cfuncs': 'cfuncs',\n 'iteration_utilities._helpers._performance': 'helper',\n 'iteration_utilities._recipes._additional': 'additional',\n 'iteration_utilities._recipes._core': 'core',\n }\n\n it = Iterable(chain(iteration_utilities._cfuncs.__dict__.items(),\n iteration_utilities._helpers._performance.__dict__.items(),\n iteration_utilities._recipes._core.__dict__.items(),\n iteration_utilities._recipes._additional.__dict__.items())\n # Exclude PY2 variable and private functions\n ).filterfalse(lambda x: x[0].startswith(('PY2', '_'))\n # Exclude everything that has no __module__\n ).filter(lambda x: hasattr(x[1], '__module__')\n # Only include functions that come from the package\n ).filter(lambda x: x[1].__module__.startswith('iteration_utilities')\n # Remove duplicate names\n ).unique_everseen(itemgetter(0)\n # Sort lexically\n ).get_sorted(key=lambda x: x[0].lower())\n\n it = Iterable(it\n # Create a Sphinx link from function name and module\n ).map(lambda i: rtd_link.format(file = module_to_file[i[1].__module__],\n module = i[1].__module__,\n name = i[0])\n # Group into 4s so we get a 4 column Table\n ).grouper(4, fillvalue=''\n # Convert to list because Table expects it.\n ).as_list()\n\n return '\\n'.join(RST().write(Table(rows=it)))",
"def _create_function(self, expr):\n bb_entry = self.fn.append_basic_block('entry')\n builder = ll.IRBuilder(bb_entry)\n\n lj = LLVMJitCallbackPrinter(self.module, builder, self.fn,\n func_arg_map=self.param_dict)\n\n ret = self._convert_expr(lj, expr)\n\n if self.signature.ret_arg:\n output_fp_ptr = builder.bitcast(self.fn.args[self.signature.ret_arg],\n ll.PointerType(self.fp_type))\n for i, val in enumerate(ret):\n index = ll.Constant(ll.IntType(32), i)\n output_array_ptr = builder.gep(output_fp_ptr, [index])\n builder.store(val, output_array_ptr)\n builder.ret(ll.Constant(ll.IntType(32), 0)) # return success\n else:\n lj.builder.ret(self._wrap_return(lj, ret))\n\n strmod = str(self.module)\n return strmod",
"def make_func_declarations(self):\n\n\t\tfor name in self.func_dict:\n\t\t\tbody = Lexer(self.func_dict[name]).get_tokens()\n\t\t\ti = body.index('\\\\') + 1 #Start of parameters\n\t\t\tj = body.match_paren(i)\n\t\t\tparam_tokens = body[i + 1: j] #Stuff inside parentheses\n\t\t\t#\t\t\tprint \"param list:\", param_tokens\n\n\t\t\tparams = self.split_params(param_tokens)\n\t\t\tparams = map(lambda n: n.split(':'), params)\n\t\t\t#params is now [[<name>,<type>],...]\n\t\t\tc_types = map(lambda n: self.convert_type(*n), params)\n\t\t\t#\t\t\tprint c_types\n\n\t\t\treturn_type = ''\n\t\t\t# +2 to skip over \")\" and \":\"\n\t\t\tif body[j+2] == '(': #Function returns another function\n\t\t\t\t# +3 for [\")\",\"->\",\"<type>\"]\n\t\t\t\tfor x in xrange(j+2, body.match_paren(j+2)+3):\n\t\t\t\t\treturn_type += body[x]\n\t\t\telse: #Function returns a concrete type\n\t\t\t\treturn_type = body[j+2] #+2 to skip over \")\" and \":\"\n\n\t\t\tfunc_type = self.convert_type(name, return_type)\n\t\t\t#\t\t\tprint \"params\", params\n\t\t\t#\t\t\tprint \"c_types\", c_types\n\t\t\t#while True:exec raw_input() in globals(), locals()\n\t\t\tself.cpp_declarations[name] = func_type + '(' + ', '.join(c_types) + ')'\n\n\t\tself.cpp_declarations['main'] = 'int main()' #actually this isn't used",
"def create_included_function_list():\n import iteration_utilities\n from iteration_utilities import Iterable\n from itertools import chain\n from operator import itemgetter\n from astropy.table import Table\n from astropy.io.ascii import RST\n\n it = Iterable(chain(iteration_utilities._cfuncs.__dict__.items(),\n iteration_utilities._helpers._performance.__dict__.items(),\n iteration_utilities._recipes._core.__dict__.items(),\n iteration_utilities._recipes._additional.__dict__.items())\n # Exclude PY2 variable and private functions\n ).filterfalse(lambda x: x[0].startswith(('PY2', '_'))\n # Exclude everything that has no __module__\n ).filter(lambda x: hasattr(x[1], '__module__')\n # Only include functions that come from the package\n ).filter(lambda x: x[1].__module__.startswith('iteration_utilities')\n # Remove duplicate names\n ).unique_everseen(itemgetter(0)\n # Sort lexically\n ).get_sorted(key=lambda x: x[0].lower())\n\n it = Iterable(it\n # Create a Sphinx link from function name and module\n ).map(lambda i: ':py:func:`~{}.{}`'.format(i[1].__module__, i[0])\n # Group into 4s so we get a 4 column Table\n ).grouper(4, fillvalue=''\n # Convert to list because Table expects it.\n ).as_list()\n\n return '\\n'.join(RST().write(Table(rows=it)))",
"def add_functions(specification: Mapping[str, Any]) -> Mapping[str, Any]:\n\n # Class 'Mapping' does not define '__setitem__', so the '[]' operator cannot be used on its instances\n specification[\"functions\"][\"list\"] = []\n specification[\"functions\"][\"list_long\"] = []\n specification[\"functions\"][\"list_short\"] = []\n\n specification[\"functions\"][\"primary\"] = []\n specification[\"functions\"][\"primary_list_long\"] = []\n specification[\"functions\"][\"primary_list_short\"] = []\n\n specification[\"functions\"][\"modifier\"] = []\n specification[\"functions\"][\"modifier_list_long\"] = []\n specification[\"functions\"][\"modifier_list_short\"] = []\n\n specification[\"functions\"][\"to_short\"] = {}\n specification[\"functions\"][\"to_long\"] = {}\n\n for func_name in specification[\"functions\"][\"info\"]:\n\n abbreviated_name = specification[\"functions\"][\"info\"][func_name][\"abbreviation\"]\n\n specification[\"functions\"][\"list\"].extend((func_name, abbreviated_name))\n\n specification[\"functions\"][\"list_long\"].append(func_name)\n specification[\"functions\"][\"list_short\"].append(abbreviated_name)\n\n if specification[\"functions\"][\"info\"][func_name][\"type\"] == \"primary\":\n specification[\"functions\"][\"primary\"].append(func_name)\n specification[\"functions\"][\"primary\"].append(abbreviated_name)\n specification[\"functions\"][\"primary_list_long\"].append(func_name)\n specification[\"functions\"][\"primary_list_short\"].append(abbreviated_name)\n else:\n specification[\"functions\"][\"modifier\"].append(func_name)\n specification[\"functions\"][\"modifier\"].append(abbreviated_name)\n specification[\"functions\"][\"modifier_list_long\"].append(func_name)\n specification[\"functions\"][\"modifier_list_short\"].append(abbreviated_name)\n\n specification[\"functions\"][\"to_short\"][abbreviated_name] = abbreviated_name\n specification[\"functions\"][\"to_short\"][func_name] = abbreviated_name\n\n specification[\"functions\"][\"to_long\"][abbreviated_name] = func_name\n specification[\"functions\"][\"to_long\"][func_name] = func_name\n\n specification[\"functions\"][\"list\"] = list(set(specification[\"functions\"][\"list\"]))\n\n return specification",
"def add_function_entry(self, name=None):\n return self._build_op('function_entry', [], name=name)",
"def make_cpp_func_bodies(self):\n\t\tfor name, body in self.func_bodies.iteritems():\n\t\t\tt = Lexer(body).get_tokens()\t\t\t\n\t\t\tS = [] #Stack\n\t\t\tx = 0\n\t\t\twhile x < len(t):\n\t\t\t\tif t[x] == '(': #function call begins\n\t\t\t\t\tx += 1\n\t\t\t\t\tS.append(self.FUNCS_DICT.get(t[x], t[x]) + '(')\n\t\t\t\telif t[x] == ')': #function call ends\n\t\t\t\t\tacc = ''\n\t\t\t\t\twhile S[-1][-1] != '(':\n\t\t\t\t\t\t#pop off params until function call is reached\n\t\t\t\t\t\tacc = S.pop() + ',' + acc\n\t\t\t\t\t# [:-1] to strip off comma at the end\n\t\t\t\t\tS.append(S.pop() + acc[:-1] + ')') #S.pop() gives function\n\t\t\t\telse:\n\t\t\t\t\tS.append(self.convert_atom(t[x]))\n\t\t\t\tx += 1\n\t\t\tself.cpp_func_bodies[name] = S[0]",
"def enhance_function_signatures(specification: Mapping[str, Any]) -> Mapping[str, Any]:\n\n for func in specification[\"functions\"][\"signatures\"]:\n\n # Add primary parent functions to modifier functions\n if specification[\"functions\"][\"signatures\"][func][\"func_type\"] == \"modifier\":\n specification[\"functions\"][\"signatures\"][func][\"primary_function\"] = specification[\n \"functions\"\n ][\"info\"][func][\"primary_function\"]\n\n for i, sig in enumerate(specification[\"functions\"][\"signatures\"][func][\"signatures\"]):\n args = sig[\"arguments\"]\n req_args = []\n pos_args = []\n opt_args = []\n mult_args = []\n\n for arg in args:\n # Multiple argument types\n if arg.get(\"multiple\", False):\n if arg[\"type\"] in [\"Function\", \"Modifier\"]:\n mult_args.extend(arg.get(\"values\", []))\n elif arg[\"type\"] in [\"NSArg\"]:\n # Complex and Composite signature has this\n mult_args.extend(arg.get(\"values\", []))\n elif arg[\"type\"] in [\"StrArgNSArg\", \"StrArg\"]:\n\n mult_args.append(arg[\"type\"])\n\n # Optional, position dependent - will be added after req_args based on order in bel_specification\n elif arg.get(\"optional\", False) and arg.get(\"position\", False):\n if arg[\"type\"] in [\"Function\", \"Modifier\"]:\n pos_args.append(arg.get(\"values\", []))\n elif arg[\"type\"] in [\"StrArgNSArg\", \"NSArg\", \"StrArg\"]:\n pos_args.append(arg[\"type\"])\n\n # Optional, position independent\n elif arg.get(\"optional\", False):\n if arg[\"type\"] in [\"Function\", \"Modifier\"]:\n opt_args.extend(arg.get(\"values\", []))\n elif arg[\"type\"] in [\"StrArgNSArg\", \"NSArg\", \"StrArg\"]:\n opt_args.append(arg[\"type\"])\n\n # Required arguments, position dependent\n else:\n if arg[\"type\"] in [\"Function\", \"Modifier\"]:\n req_args.append(arg.get(\"values\", []))\n elif arg[\"type\"] in [\"StrArgNSArg\", \"NSArg\", \"StrArg\"]:\n req_args.append(arg[\"type\"])\n\n specification[\"functions\"][\"signatures\"][func][\"signatures\"][i][\n \"req_args\"\n ] = copy.deepcopy(req_args)\n specification[\"functions\"][\"signatures\"][func][\"signatures\"][i][\n \"pos_args\"\n ] = copy.deepcopy(pos_args)\n specification[\"functions\"][\"signatures\"][func][\"signatures\"][i][\n \"opt_args\"\n ] = copy.deepcopy(opt_args)\n specification[\"functions\"][\"signatures\"][func][\"signatures\"][i][\n \"mult_args\"\n ] = copy.deepcopy(mult_args)\n\n return specification",
"def to_PyGetSetDef(name, entries):\r\n getSetDefEntries = [to_PyGetSetDef_entry(*items) for items in entries]\r\n getSetDef = ('static PyGetSetDef %s_getseters[] = {\\n ' % name +\r\n ',\\n '.join(getSetDefEntries) + ',\\n ')\r\n getSetDef += '{nullptr,nullptr,nullptr,nullptr,nullptr} // Sentinel\\n};'\r\n return getSetDef",
"def createFunction(self, entryPoint: ghidra.program.model.address.Address, name: unicode) -> ghidra.program.model.listing.Function:\n ...",
"def _exported_functions(self):\n\n mod_base = self.obj_parent.DllBase\n exp_dir = self.obj_parent.export_dir()\n\n # PE files with a large number of functions will have arrays\n # that spans multiple pages. Thus the first entries may be valid, \n # last entries may be valid, but middle entries may be invalid\n # (paged). In the various checks below, we test for None (paged)\n # and zero (non-paged but invalid RVA). \n\n # Array of RVAs to function code \n address_of_functions = obj.Object('Array',\n offset = mod_base + self.AddressOfFunctions,\n targetType = 'unsigned int',\n count = self.NumberOfFunctions,\n vm = self.obj_native_vm)\n # Array of RVAs to function names \n address_of_names = obj.Object('Array',\n offset = mod_base + self.AddressOfNames,\n targetType = 'unsigned int',\n count = self.NumberOfNames,\n vm = self.obj_native_vm)\n # Array of RVAs to function ordinals \n address_of_name_ordinals = obj.Object('Array',\n offset = mod_base + self.AddressOfNameOrdinals,\n targetType = 'unsigned short',\n count = self.NumberOfNames,\n vm = self.obj_native_vm)\n\n # When functions are exported by Name, it will increase\n # NumberOfNames by 1 and NumberOfFunctions by 1. When \n # functions are exported by Ordinal, only the NumberOfFunctions\n # will increase. First we enum functions exported by Name \n # and track their corresponding Ordinals, so that when we enum\n # functions exported by Ordinal only, we don't duplicate. \n\n seen_ordinals = []\n\n # Handle functions exported by name *and* ordinal \n for i in range(self.NumberOfNames):\n\n name_rva = address_of_names[i]\n ordinal = address_of_name_ordinals[i]\n\n if name_rva in (0, None):\n continue\n\n # Check the sanity of ordinal values before using it as an index\n if ordinal == None or ordinal >= self.NumberOfFunctions:\n continue\n\n func_rva = address_of_functions[ordinal]\n\n if func_rva in (0, None):\n continue\n\n # Handle forwarded exports. If the function's RVA is inside the exports \n # section (as given by the VirtualAddress and Size fields in the \n # DataDirectory), the symbol is forwarded. Return the name of the \n # forwarded function and None as the function address. \n\n if (func_rva >= exp_dir.VirtualAddress and\n func_rva < exp_dir.VirtualAddress + exp_dir.Size):\n n = self._name(func_rva)\n f = obj.NoneObject(\"Ordinal function {0} in module {1} forwards to {2}\".format(\n ordinal, str(self.obj_parent.BaseDllName or ''), n))\n else:\n n = self._name(name_rva)\n f = func_rva\n\n # Add the ordinal base and save it \n ordinal += self.Base\n seen_ordinals.append(ordinal)\n\n yield ordinal, f, n\n\n # Handle functions exported by ordinal only \n for i in range(self.NumberOfFunctions):\n\n ordinal = self.Base + i\n\n # Skip functions already enumberated above \n if ordinal not in seen_ordinals:\n\n func_rva = address_of_functions[i]\n\n if func_rva in (0, None):\n continue\n\n seen_ordinals.append(ordinal)\n\n # There is no name RVA \n yield ordinal, func_rva, obj.NoneObject(\"Name RVA not accessible\")",
"def _build_function_definition(self, spec, decl, param_decls, body):\n declaration = self._build_declarations(spec, [dict(decl=decl, init=None)])[0]\n\n # Adding \"list\" to type.\n spec.name = [spec.name]\n return ast.FuncDef(spec, declaration, param_decls, body)",
"def export_for_pydoc(self, module_globals):\n module_all = module_globals.setdefault(\"__all__\", [])\n for k, v in sorted(self.constants.items()):\n module_globals[k] = v\n module_all.append(k)\n for k, v in sorted(self.enums.items()):\n module_globals[k] = v\n module_all.append(k)\n for fname, (argtypes, argtuple, restype) in sorted(\n self.fundecls.items()):\n prototype = \"def {}{}: pass\".format(\n fname, inspect.formatargspec(argtuple._fields))\n d = {}\n exec(prototype, globals(), d)\n func = d[fname]\n for arg, argtype in zip(argtuple._fields, argtypes):\n func.__annotations__[arg] = argtype\n func.__annotations__[\"return\"] = restype\n module_globals[fname] = func\n module_all.append(fname)",
"def entry_for_one_func(nom, func):\r\n args, varargs, varkw, defaults = inspect.getargspec(func)\r\n argspec = inspect.formatargspec(args, varargs, varkw, defaults)\r\n return entry(nom,\r\n argspec=argspec,\r\n funcdoc=func.__doc__)",
"def funcs_in_script(filename):\n f = open(filename, 'r')\n lines = f.readlines()\n f.close()\n\n N = len(lines)\n funcs = []\n for n in range(N):\n line = lines[n]\n\n ###################################################\n # RETRIEVE FUNCTION NAME #\n ###################################################\n if not line[:4] == 'def ':\n continue\n if not '(' in line:\n continue\n end = line.index('(')\n name = line[4:end]\n\n ###################################################\n # RETRIEVE DOCSTRING HEADER #\n ###################################################\n header = ''\n for m in range(n, N - 1):\n line = lines[m]\n\n # this should not happen (when coded in python syntax, a closing\n # parenthesis must appear first)\n if m > n and line[:4] == 'def ':\n break\n\n # this marks the end of the function definition\n if '):' in line:\n hline = lines[m + 1] # potential docstring header line\n # if it exists, then here\n\n\n # remove leading white spaces:\n while hline[0] == ' ':\n hline = hline[1:]\n\n # check whether it is in fact (the start of) a docstring\n if hline[:3] not in ['\"\"\"', \"'''\"]:\n break\n\n # take the first line of this docstring\n header = hline[3:-1]\n\n # remove docstring closing:\n if header[-3:] in ['\"\"\"', \"'''\"]:\n header = header[:-3]\n\n # ignore outdated functions if labelled as such:\n if header.lower()[:10] == '[outdated]':\n name = None\n if header.lower()[:1] == '*':\n name = None\n break\n\n if name is None:\n continue\n\n funcs.append([name, header])\n\n return funcs",
"def update_free_function_info(self):\n\n for eachModule in self.package_info.module_info:\n if eachModule.use_all_free_functions:\n free_functions = self.source_ns.free_functions(allow_empty=True)\n for eachFunction in free_functions:\n if eachModule.is_decl_in_source_path(eachFunction):\n function_info = CppFreeFunctionInfo(eachFunction.name)\n function_info.module_info = eachModule\n function_info.decl = eachFunction\n eachModule.free_function_info.append(function_info)\n\n else:\n for eachFunction in eachModule.free_function_info:\n functions = self.source_ns.free_functions(eachFunction.name,\n allow_empty=True)\n if len(functions) == 1:\n eachFunction.decl = functions[0]",
"def _FunctionDef(self, t):\n self.write(\"\\n\")\n # check decorators\n if len(t.decorator_list) != 1 or not isinstance(t.decorator_list[0], ast.Attribute):\n self.RaiseError(t, \"Function definitions require a single pyflamegpu decorator of either 'pyflamegpu.agent_function', 'pyflamegpu.agent_function_condition' or 'pyflamegpu.device_function'\") \n # FLAMEGPU_AGENT_FUNCTION\n if t.decorator_list[0].attr == 'agent_function' and t.decorator_list[0].value.id == 'pyflamegpu':\n if getattr(t, \"returns\", False):\n self.RaiseWarning(t, \"Function definition return type not supported on 'pyflamegpu.agent_function'\")\n self.fill(f\"FLAMEGPU_AGENT_FUNCTION({t.name}, \")\n self.dispatchFGPUFunctionArgs(t)\n self.write(\")\")\n # FLAMEGPU_DEVICE_FUNCTION\n elif t.decorator_list[0].attr == 'device_function' and t.decorator_list[0].value.id == 'pyflamegpu':\n self.fill(f\"FLAMEGPU_DEVICE_FUNCTION \")\n if t.returns:\n self.dispatchType(t.returns)\n else:\n self.write(\"void\")\n self.write(f\" {t.name}(\")\n self.dispatchFGPUDeviceFunctionArgs(t)\n self.write(\")\")\n # add to list of defined functions that can be called\n self._device_functions.append(t.name)\n # FLAMEGPU_DEVICE_FUNCTION\n elif t.decorator_list[0].attr == 'agent_function_condition' and t.decorator_list[0].value.id == 'pyflamegpu':\n # check for return annotation\n if not hasattr(t, \"returns\"):\n self.RaiseError(t, \"Agent function conditions must have a 'bool' return type specified as a return type annotation\")\n # check for return annotation type\n if not isinstance(t.returns, ast.Name):\n self.RaiseError(t, \"Agent function conditions return type must be 'bool'\")\n if t.returns.id is not 'bool':\n self.RaiseError(t, \"Agent function conditions return type must be 'bool'\")\n # check to ensure no arguments (discard any with a warning)\n if t.args.args:\n self.RaiseWarning(t, \"Agent function conditions does not support arguments. These will be discarded.\")\n # write the agent function macro\n self.fill(f\"FLAMEGPU_AGENT_FUNCTION_CONDITION({t.name})\")\n else:\n self.RaiseError(t, \"Function definition uses an unsupported decorator. Must use either 'pyflamegpu.agent_function', 'pyflamegpu.agent_function_condition' or 'pyflamegpu.device_function'\")\n self.enter()\n self.dispatch(t.body)\n self.leave()",
"def parse_capi(lines):\n pattern = r'(\\w+)\\s+(\\**)\\s*(\\w+)\\((.*)\\)' # Float32 *sin(...)\n pexcept = r'except (\\??)(.*)'\n\n functions = []\n for line in lines:\n if line.strip():\n m = re.match(pattern, line)\n restype, stars, fname, argtypes = m.groups()\n rest = line[len(m.group(0)):].strip()\n if rest:\n maybe, badval = re.match(pexcept, rest).groups()\n else:\n maybe, badval = None, None\n\n restype = parse_type(\"%s %s\" % (restype, \" \".join(stars)))\n argtypes = map(parse_type, argtypes.split(','))\n signature = Function(restype, argtypes)\n functions.append(Py_Function(fname, signature, maybe, badval))\n\n return functions",
"def entry_for_one_method(nom, method):\r\n # TODO(lhosken) : This is darned similar to entry_for_one_func. Merge 'em?\r\n # (Punted so far since funcdoc indentation made my head hurt)\r\n assert inspect.ismethod(method)\r\n args, varargs, varkw, defaults = inspect.getargspec(method)\r\n # args[:1] instead of args to discard \"self\" arg\r\n argspec = inspect.formatargspec(args[1:], varargs, varkw, defaults)\r\n return entry(nom,\r\n argspec=argspec,\r\n funcdoc=(method.__doc__ or \"\").replace(\"\\n\", \" \"))",
"def make_function_stubs(self):\n res = \"\"\n for node in self.description.declarations() + self.description.definitions():\n if isinstance(node.type,pdl.TypeFunction):\n res += \"def {}({}):\\n pass\".format(node.name, \", \".join(map(\n lambda t: \"{}\".format(t.name), node.type.args)) )\n\n return res",
"def __init__(self, total, function_name, param_sorts, return_sort):\r\n super(FunctionDecl, self).__init__()\r\n global functions\r\n self.total = total\r\n self.function_name = function_name\r\n self.param_sorts = param_sorts\r\n self.return_sort = return_sort\r\n self.basic = basic\r\n self.static = static\r\n\r\n function_info = []\r\n function_info.append(static)\r\n function_info.append(param_sorts)\r\n function_info.append(return_sort)\r\n functions[function_name] = function_info"
] | [
"0.75998366",
"0.5944248",
"0.55603445",
"0.5401207",
"0.5332072",
"0.5176276",
"0.5170217",
"0.5108655",
"0.50803465",
"0.5051816",
"0.50399566",
"0.5013269",
"0.4962817",
"0.4942569",
"0.49400118",
"0.49136788",
"0.4911339",
"0.49019086",
"0.4900917",
"0.48928633",
"0.4855824",
"0.48545307",
"0.48012346",
"0.47907102",
"0.4769449",
"0.47669825",
"0.47159237",
"0.4712289",
"0.47002876",
"0.4698508"
] | 0.7316116 | 1 |
Creates one entry for a PyGetSetDef array from the entries for one propertystruct (as returned by parse_file). | def to_PyGetSetDef_entry(cpp_struct_name, py_name, doc):
return 'PROPERTY_FORWARDER(%s, "%s", %s)' % (
cpp_struct_name, py_name, doc) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def to_PyGetSetDef(name, entries):\r\n getSetDefEntries = [to_PyGetSetDef_entry(*items) for items in entries]\r\n getSetDef = ('static PyGetSetDef %s_getseters[] = {\\n ' % name +\r\n ',\\n '.join(getSetDefEntries) + ',\\n ')\r\n getSetDef += '{nullptr,nullptr,nullptr,nullptr,nullptr} // Sentinel\\n};'\r\n return getSetDef",
"def getsetdescr_attach(space, py_obj, w_obj, w_userdata=None):\n py_getsetdescr = cts.cast('PyGetSetDescrObject*', py_obj)\n if isinstance(w_obj, GetSetProperty):\n py_getsetdef = make_GetSet(space, w_obj)\n assert space.isinstance_w(w_userdata, space.w_type)\n w_obj = W_GetSetPropertyEx(py_getsetdef, w_userdata)\n # now w_obj.getset is py_getsetdef, which was freshly allocated\n # XXX how is this ever released?\n # XXX assign to d_dname, d_type?\n assert isinstance(w_obj, W_GetSetPropertyEx)\n py_getsetdescr.c_d_getset = w_obj.getset",
"def get_property_setters(self, doclist):\n\t\tfrom webnotes.utils import cstr\n\t\tproperty_dict = {}\n\t\t# final property dict will be\n\t\t# {\n\t\t#\tdoc_type: {\n\t\t#\t\tfieldname: [list of property setter dicts]\n\t\t#\t}\n\t\t# }\n\n\t\tdoc_type_list = list(set(\n\t\t\td.doctype=='DocType' and d.name or d.parent\n\t\t\tfor d in doclist))\n\t\tin_string = '\", \"'.join(doc_type_list)\n\t\tfor ps in webnotes.conn.sql(\"\"\"\\\n\t\t\tSELECT doc_type, field_name, property, property_type, value\n\t\t\tFROM `tabProperty Setter`\n\t\t\tWHERE doc_type IN (\"%s\")\"\"\" % in_string, as_dict=1):\n\t\t\tproperty_dict.setdefault(ps.get('doc_type'),\n\t\t\t\t\t{}).setdefault(cstr(ps.get('field_name')), []).append(ps)\n\n\t\treturn property_dict, doc_type_list",
"def getset(self, name, value):\r\n return self.format_bulk('GETSET', name, value)",
"def AddGet_SetGet_array_element_test(type: str):\n m = pyflamegpu.ModelDescription(\"model\")\n ed = m.Environment()\n add_func = getattr(ed, f\"newPropertyArray{type}\")\n get_func = getattr(ed, f\"getProperty{type}\")\n set_func = getattr(ed, f\"setProperty{type}\")\n b = [0] * ARRAY_TEST_LEN\n c = [0] * ARRAY_TEST_LEN\n for i in range(ARRAY_TEST_LEN):\n b[i] = i\n c[i] = ARRAY_TEST_LEN-i\n add_func(\"a\", b)\n for i in range(ARRAY_TEST_LEN):\n assert get_func(\"a\", i) == b[i]\n assert set_func(\"a\", i, c[i]) == b[i]\n for i in range(ARRAY_TEST_LEN):\n assert get_func(\"a\", i) == c[i]",
"def AddGet_SetGet_array_test(type: str):\n m = pyflamegpu.ModelDescription(\"model\")\n ed = m.Environment()\n add_func = getattr(ed, f\"newPropertyArray{type}\")\n get_func = getattr(ed, f\"getPropertyArray{type}\")\n set_func = getattr(ed, f\"setPropertyArray{type}\")\n b = [0] * ARRAY_TEST_LEN\n c = [0] * ARRAY_TEST_LEN\n for i in range(ARRAY_TEST_LEN):\n b[i] = i\n c[i] = ARRAY_TEST_LEN-i\n add_func(\"a\", b)\n a = get_func(\"a\")\n for i in range(ARRAY_TEST_LEN):\n assert a[i] == b[i]\n set_func(\"a\", c)\n for i in range(ARRAY_TEST_LEN):\n assert a[i] == b[i]\n a = get_func(\"a\")\n for i in range(ARRAY_TEST_LEN):\n assert a[i] == c[i]",
"def AddGet_SetGet_test(type: str):\n m = pyflamegpu.ModelDescription(\"model\")\n ed = m.Environment()\n add_func = getattr(ed, f\"newProperty{type}\")\n get_func = getattr(ed, f\"getProperty{type}\")\n set_func = getattr(ed, f\"setProperty{type}\")\n add_func(\"a\", 1)\n assert get_func(\"a\") == 1\n assert set_func(\"a\", 2) == 1\n assert get_func(\"a\") == 2",
"def parse_set(field, star_range):\n ranges = tuple(parse_range(r, star_range) for r in field.split(\",\"))\n return crontab.Set(ranges)",
"def get_prop_spec(client_factory, spec_type, properties):\r\n prop_spec = client_factory.create('ns0:PropertySpec')\r\n prop_spec.type = spec_type\r\n prop_spec.pathSet = properties\r\n return prop_spec",
"def _get_set(self, key, operation, create=False, decode=False):\n return self._get_by_type(key, operation, create, b'set', set(), decode=decode)",
"def set_properties(struct):",
"def get_prop_spec(client_factory, spec_type, properties):\n prop_spec = client_factory.create('ns0:PropertySpec')\n prop_spec.type = spec_type\n prop_spec.pathSet = properties\n return prop_spec",
"def __properties__(self) -> dict:\r\n parameters = [\r\n d for d in dir(self) if (d[0] != \"_\") and (d.count(\"set\") == 0)\r\n and (d.count(\"_c\") == 0) and (d.count(\"_f\") == 0)\r\n ]\r\n\r\n return self.__as_json__(parameters)",
"def patch_set(self, *patch_tuples):\n return PatchSet(*patch_tuples)",
"def getSets():",
"def create_sets(self,FD_SET=[],VA_SET=[]):\n \n self.m.S = Set(initialize=self.sectors, doc='sectors')\n\n if self.EORA is True:\n self.m.rROW = Set(initialize=self.countries+['ROW'],ordered=True, doc='regions including export')\n self.m.R = Set(initialize=self.countries+['ROW'],ordered=True, doc='regions')\n else:\n self.m.rROW = Set(initialize=self.countries,ordered=True, doc='regions including export')\n self.m.R = Set(initialize=self.countries,ordered=True, doc='regions')\n\n if self.EORA is True:\n self.m.fdemand = Set(initialize=['P3h', 'P3n','P3g', 'P51','P52','P53'], doc='Final Demand')\n else:\n self.m.fdemand = Set(initialize=self.fd_cat, doc='Final Demand')\n\n if self.EORA is True:\n self.m.VA = Set(initialize=['VA'], doc='value added')\n else:\n self.m.VA = Set(initialize=VA_SET, doc='value added')",
"def build_up_set_params(set_name, series):\n set_params = {}\n if (set_name):\n set_params['name'] = set_name\n if (series):\n set_params['series'] = series\n return set_params",
"def mspatchc_ExtractPatchHeaderToFile(jitter, get_str, set_str):\n ret_ad, args = jitter.func_args_stdcall([\"PatchFileName\", \"PatchHeaderFileName\"])\n raise RuntimeError('API not implemented')\n jitter.func_ret_stdcall(ret_ad, ret_value)",
"def build_property_spec(client_factory, type=\"VirtualMachine\",\r\n properties_to_collect=[\"name\"],\r\n all_properties=False):\r\n property_spec = client_factory.create('ns0:PropertySpec')\r\n property_spec.all = all_properties\r\n property_spec.pathSet = properties_to_collect\r\n property_spec.type = type\r\n return property_spec",
"def hset(self, name, key, value):\r\n return self.format_multi_bulk('HSET', name, key, value)",
"def _create_parameter_set_names_array(self):\n return xarray.DataArray(list(self._parameter_set_names.values()),\n coords=[list(self._parameter_set_names.keys())],\n dims=[_hash_coordinate_key],\n name=_set_coordinate_key)",
"def svn_client_propset(char_propname, svn_string_t_propval, char_target, svn_boolean_t_recurse, apr_pool_t_pool): # real signature unknown; restored from __doc__\n pass",
"def get_specification(self) -> Dict:\n specification = {\n 'version': VERSION,\n 'metadata': {\n 'twoColumn': True,\n 'layout': self.autolayout\n },\n 'nodes': []\n }\n\n def strip_io(io_list: list, direction) -> list:\n \"\"\"\n Strips every input/output from metadata and leaves only\n `name` and `type` keys.\n \"\"\"\n return [\n {\n 'name': io['name'],\n 'type': io['type'],\n 'direction': direction\n }\n for io in io_list\n ]\n\n toremove = set()\n for key, node in self.nodes.items():\n try:\n node_cls = load_class(node.cls_name)\n except (ModuleNotFoundError, ImportError, Exception) as err:\n msg = f'Could not add {node_cls}. Reason:'\n _LOGGER.warn('-' * len(msg))\n _LOGGER.warn(msg)\n _LOGGER.warn(err)\n _LOGGER.warn('-' * len(msg))\n toremove.add(key)\n continue\n parameterschema = node_cls.form_parameterschema()\n\n properties = []\n for name, props in parameterschema['properties'].items():\n new_property = {'name': name}\n\n if 'default' in props:\n new_property['default'] = props['default']\n\n if 'description' in props:\n new_property['description'] = props['description']\n\n def add_default(default_val):\n if new_property.get('default') is None:\n new_property['default'] = default_val\n\n # Case for an input with range defined\n if 'enum' in props:\n new_property['type'] = 'select'\n new_property['values'] = list(map(str, props['enum']))\n add_default(new_property['values'][0])\n # Case for a single value input\n elif 'type' in props:\n if 'array' in props['type']:\n new_property['type'] = 'list'\n if 'items' in props and 'type' in props['items']:\n dtype = props['items']['type']\n new_property['dtype'] = dtype\n add_default([])\n elif 'boolean' in props['type']:\n new_property['type'] = 'checkbox'\n add_default(False)\n elif 'string' in props['type']:\n new_property['type'] = 'text'\n add_default('')\n elif 'integer' in props['type']:\n new_property['type'] = 'integer'\n add_default(0)\n elif 'number' in props['type']:\n new_property['type'] = 'number'\n add_default(0)\n elif 'object' in props['type']:\n # Object arguments should be defined in specification\n # as node inputs, rather than properties\n new_property = None\n else:\n new_property['type'] = 'text'\n add_default('')\n # If no type is specified then text is used\n else:\n new_property['type'] = 'text'\n add_default('')\n\n if new_property is not None:\n properties.append(new_property)\n\n specification['nodes'].append({\n 'name': node.name,\n 'type': node.type,\n 'category': node.category,\n 'properties': properties,\n 'interfaces': strip_io(\n self.io_mapping[node.type]['inputs'],\n 'input'\n ) + strip_io(\n self.io_mapping[node.type]['outputs'],\n 'output'\n )\n })\n\n for key in toremove:\n del self.nodes[key]\n return specification",
"def _get_descriptor_set():\n proto = pb.FileDescriptorSet()\n\n # The FileDescriptor for tensorflow.ranking.internal.ExampleListWithContext.\n file_proto = proto.file.add(\n name=_FILE_NAME, package=_PACKAGE, syntax=\"proto3\")\n message_proto = file_proto.message_type.add(name=_MESSAGE_NAME)\n message_proto.field.add(\n name=_EXAMPLES_FIELD_NAME,\n number=1,\n type=pb.FieldDescriptorProto.TYPE_BYTES,\n label=pb.FieldDescriptorProto.LABEL_REPEATED)\n message_proto.field.add(\n name=_CONTEXT_FIELD_NAME,\n number=2,\n type=pb.FieldDescriptorProto.TYPE_BYTES)\n\n return proto",
"def select_patch(self, pset, name):\n new_pset = {}\n local = []\n for p in pset.pop(name, []):\n if p.path:\n new_pset[p.pop()] = [p]\n else:\n local = p.slist\n return (local, new_pset)",
"def PopulateArrayFromDictionary(self, array_prop, src, name, dst):\n prop = array_prop.item_type\n sub = {\n 'namespace': API_UTIL_NAMESPACE,\n 'name': name,\n 'src': src,\n 'dst': dst,\n }\n\n sub['type'] = self._type_manager.GetCppType(prop),\n if array_prop.optional:\n val = ('%(namespace)s::PopulateOptionalArrayFromDictionary'\n '(*%(src)s, \"%(name)s\", &%(dst)s)')\n else:\n val = ('%(namespace)s::PopulateArrayFromDictionary'\n '(*%(src)s, \"%(name)s\", &%(dst)s)')\n\n return val % sub",
"def getSet(unique_name):",
"def getSet(unique_name):",
"def mspatchc_CreatePatchFile(jitter, get_str, set_str):\n ret_ad, args = jitter.func_args_stdcall([\"OldFileName\", \"NewFileName\", \"PatchFileName\", \"OptionFlags\", \"OptionData\"])\n raise RuntimeError('API not implemented')\n jitter.func_ret_stdcall(ret_ad, ret_value)",
"def mspatchc_CreatePatchFileEx(jitter, get_str, set_str):\n ret_ad, args = jitter.func_args_stdcall([\"OldFileCount\", \"OldFileInfoArray\", \"NewFileName\", \"PatchFileName\", \"OptionFlags\", \"OptionData\", \"ProgressCallback\", \"CallbackContext\"])\n raise RuntimeError('API not implemented')\n jitter.func_ret_stdcall(ret_ad, ret_value)"
] | [
"0.71594757",
"0.5288491",
"0.50017947",
"0.49717405",
"0.49567866",
"0.49039754",
"0.48913658",
"0.47898185",
"0.47757462",
"0.47685832",
"0.47616416",
"0.47450364",
"0.47120082",
"0.46812397",
"0.46606937",
"0.46587437",
"0.46313342",
"0.46271035",
"0.4625974",
"0.46091345",
"0.4594545",
"0.45931476",
"0.45852926",
"0.45817614",
"0.4572646",
"0.45679078",
"0.4567237",
"0.4567237",
"0.45665428",
"0.45549092"
] | 0.5890328 | 1 |
Creates a string of a CPyGetSetDef array named _getseters, containing all entries in the list (as created by to_PyGetSetDef_entry). | def to_PyGetSetDef(name, entries):
getSetDefEntries = [to_PyGetSetDef_entry(*items) for items in entries]
getSetDef = ('static PyGetSetDef %s_getseters[] = {\n ' % name +
',\n '.join(getSetDefEntries) + ',\n ')
getSetDef += '{nullptr,nullptr,nullptr,nullptr,nullptr} // Sentinel\n};'
return getSetDef | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def getSets():",
"def get_drivers():\n return [str(d) for d in drivers.values()]",
"def getset(self, name, value):\r\n return self.format_bulk('GETSET', name, value)",
"def get_reader_funcs():\n return READERS",
"def get_all(self):\n return [self.get(name) for name in self.factories.iterkeys()]",
"def clsnames_affecting_onsets(self):\n # type: () -> Set[str]\n output = set()\n output.update(self.NONGRACE_NOTEHEAD_CLSNAMES)\n output.update(self.REST_CLSNAMES)\n output.update(self.MEASURE_SEPARATOR_CLSNAMES)\n output.update(self.TIME_SIGNATURES)\n output.add('repeat_measure')\n return output",
"def get_supported_sets(self):\n return _SUPPORTED_SETS",
"def get_encoder_names(cls) -> list[str]:\n return cls.backbone_names",
"def getBuilderNames():",
"def list_builders(self) -> List[str]:\n return sorted(_iter_builder_names(self._ns2data_dir))",
"def get_set_types(self):\n if not self._refreshed:\n self.refresh()\n return self._setTypes",
"def get_setup_names(self):\n self.setup_names = list(self._optimetrics.GetSetupNames())\n return self.setup_names.copy()",
"def getSets(unique_name=None):",
"def _create_parameter_set_names_array(self):\n return xarray.DataArray(list(self._parameter_set_names.values()),\n coords=[list(self._parameter_set_names.keys())],\n dims=[_hash_coordinate_key],\n name=_set_coordinate_key)",
"def listBuilderNames():",
"def listBuilderNames():",
"def sets(self):\n return self._loaded_and_cached(gdxcc.GMS_DT_SET)",
"def get_decoders_names(self):\n if self.replay_source is None:\n return [\"P\" + str(self.parameters_common_index) + \".\" + str(self.parameters_fs_index) + \"_E\" \\\n + str(self.get_encoder_number())]\n\n if self.helper_decoders_one_class:\n decoders_names = [\"P\" + str(self.parameters_common_index) + \".\" +\n str(self.parameters_fs_index) + \".\" +\n str(self.parameters_helper_index) + \".\" +\n str(self.parameters_incremental_index) +\n \"_T\" + str(self.test_index) + \"_S\" + str(i) + \"_\" +\n self.replay_source + \"_1\" for i in range(len(self.test_structure))]\n else:\n decoders_names = [\"P\" + str(self.parameters_common_index) + \".\" +\n str(self.parameters_fs_index) + \".\" +\n str(self.parameters_helper_index) + \".\" +\n str(self.parameters_incremental_index) +\n \"_T\" + str(self.test_index) + \"_S\" + str(i) + \"_\" +\n self.replay_source for i in range(len(self.test_structure))]\n\n decoders_names[0] = \"P\" + str(self.parameters_common_index) + \".\" + str(self.parameters_fs_index) + \"_E\" \\\n + str(self.get_encoder_number())\n\n return decoders_names",
"def get_property_setters(self, doclist):\n\t\tfrom webnotes.utils import cstr\n\t\tproperty_dict = {}\n\t\t# final property dict will be\n\t\t# {\n\t\t#\tdoc_type: {\n\t\t#\t\tfieldname: [list of property setter dicts]\n\t\t#\t}\n\t\t# }\n\n\t\tdoc_type_list = list(set(\n\t\t\td.doctype=='DocType' and d.name or d.parent\n\t\t\tfor d in doclist))\n\t\tin_string = '\", \"'.join(doc_type_list)\n\t\tfor ps in webnotes.conn.sql(\"\"\"\\\n\t\t\tSELECT doc_type, field_name, property, property_type, value\n\t\t\tFROM `tabProperty Setter`\n\t\t\tWHERE doc_type IN (\"%s\")\"\"\" % in_string, as_dict=1):\n\t\t\tproperty_dict.setdefault(ps.get('doc_type'),\n\t\t\t\t\t{}).setdefault(cstr(ps.get('field_name')), []).append(ps)\n\n\t\treturn property_dict, doc_type_list",
"def to_PyGetSetDef_entry(cpp_struct_name, py_name, doc):\r\n return 'PROPERTY_FORWARDER(%s, \"%s\", %s)' % (\r\n cpp_struct_name, py_name, doc)",
"def exporters():\n return dict(_exporters)",
"def getTestSets():\n return list(_testsetdict.keys())",
"def getBuildSets():",
"def get_functions():\n\treturn [f for f in globals() if f.startswith('make_')]",
"def get_generators(as_list=False):\n\n return cmakeutil.get_generators(CMakeBuilder.path, as_list)",
"def get(self, opset: OpsetVersion) -> Optional[Set[Callable]]:\n return self._functions.get(opset)",
"def getAllKnownCallees():\n callees = set()\n for callee in calls:\n callees.add(callee[1])\n return callees",
"def get_a_list_of_testset_names() -> str:\n message = 'The available test sets are:'\n for testset in sorted(DATASETS.keys(), reverse=True):\n message += '\\n%20s: %s' % (testset, DATASETS[testset].get('description', ''))\n return message",
"def getListCreators(self):\n return _libsbml.ModelHistory_getListCreators(self)",
"def get_tools(cls):\n pass"
] | [
"0.5476404",
"0.5249653",
"0.51922673",
"0.51390755",
"0.5105949",
"0.50751984",
"0.5071153",
"0.5018218",
"0.50118506",
"0.5000767",
"0.4974314",
"0.4955749",
"0.49450973",
"0.49285832",
"0.49245515",
"0.49245515",
"0.4890076",
"0.48870137",
"0.48566785",
"0.48400316",
"0.48038697",
"0.47798193",
"0.47735214",
"0.4766151",
"0.47636086",
"0.4747328",
"0.4725793",
"0.47217333",
"0.4716518",
"0.4705116"
] | 0.72217596 | 0 |
Creates a string of a CPyMethodDef array named _methods, containing all the entries in the list (as created by to_PyMethodDef_entry). Includes any include in the extra_includes list after the regular entries (before the sentinel). | def to_PyMethodDef(name, entries, extra_includes):
methodEntries = [to_PyMethodDef_entry(items) for items in entries]
if name is not None:
methodDef = ('static PyMethodDef %s_methods[] = {\n ' % name +
',\n '.join(methodEntries) + ',\n ')
else:
methodDef = ',\n'.join(methodEntries) + ',\n '
for include in extra_includes:
methodDef += '#include "%s"\n' % include
if name is not None:
methodDef += '{nullptr,nullptr,0,nullptr} // Sentinel\n};'
return methodDef | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def to_PyMethodDef_entry(items):\r\n\r\n entry_type = items[0]\r\n items = items[1:]\r\n if entry_type == 'method':\r\n return 'FORWARDER(%s, %s, \"%s\", %s)' % items\r\n elif entry_type == 'function':\r\n return 'FREE_FORWARDER(%s, %s, \"%s\", %s)' % items\r\n elif entry_type == 'method_template':\r\n return 'FORWARDER(%s<common_type>, %s, \"%s\", %s)' % items\r\n else:\r\n assert False",
"def _make_methods(functions, modname):\n methods_table = []\n codes = []\n for funcname, flags, code in functions:\n cfuncname = \"%s_%s\" % (modname, funcname)\n if 'METH_KEYWORDS' in flags:\n signature = '(PyObject *self, PyObject *args, PyObject *kwargs)'\n else:\n signature = '(PyObject *self, PyObject *args)'\n methods_table.append(\n \"{\\\"%s\\\", (PyCFunction)%s, %s},\" % (funcname, cfuncname, flags))\n func_code = \"\"\"\n static PyObject* {cfuncname}{signature}\n {{\n {code}\n }}\n \"\"\".format(cfuncname=cfuncname, signature=signature, code=code)\n codes.append(func_code)\n\n body = \"\\n\".join(codes) + \"\"\"\n static PyMethodDef methods[] = {\n %(methods)s\n { NULL }\n };\n static struct PyModuleDef moduledef = {\n PyModuleDef_HEAD_INIT,\n \"%(modname)s\", /* m_name */\n NULL, /* m_doc */\n -1, /* m_size */\n methods, /* m_methods */\n };\n \"\"\" % dict(methods='\\n'.join(methods_table), modname=modname)\n return body",
"def create_included_function_list():\n import iteration_utilities\n from iteration_utilities import Iterable\n from itertools import chain\n from operator import itemgetter\n from astropy.table import Table\n from astropy.io.ascii import RST\n\n it = Iterable(chain(iteration_utilities._cfuncs.__dict__.items(),\n iteration_utilities._helpers._performance.__dict__.items(),\n iteration_utilities._recipes._core.__dict__.items(),\n iteration_utilities._recipes._additional.__dict__.items())\n # Exclude PY2 variable and private functions\n ).filterfalse(lambda x: x[0].startswith(('PY2', '_'))\n # Exclude everything that has no __module__\n ).filter(lambda x: hasattr(x[1], '__module__')\n # Only include functions that come from the package\n ).filter(lambda x: x[1].__module__.startswith('iteration_utilities')\n # Remove duplicate names\n ).unique_everseen(itemgetter(0)\n # Sort lexically\n ).get_sorted(key=lambda x: x[0].lower())\n\n it = Iterable(it\n # Create a Sphinx link from function name and module\n ).map(lambda i: ':py:func:`~{}.{}`'.format(i[1].__module__, i[0])\n # Group into 4s so we get a 4 column Table\n ).grouper(4, fillvalue=''\n # Convert to list because Table expects it.\n ).as_list()\n\n return '\\n'.join(RST().write(Table(rows=it)))",
"def main():\n import argparse\n parser = argparse.ArgumentParser()\n parser.add_argument('-i', '--input', help='Input .py file', nargs='+')\n args = parser.parse_args()\n\n mod_func = []\n\n for pyfile in args.input:\n tree = ast.parse(open(pyfile).read())\n\n methods = sorted({node.name for node in ast.walk(tree)\n if isinstance(node, ast.FunctionDef)})\n mod_func.extend([[pyfile, methods[i]] for i in range(len(methods))])\n\n write_csv(\"meth_func.csv\", mod_func)",
"def __build_method__(self) -> str:\n out = \"\"\n for imp in self.__base_imports__:\n out += imp + \"\\n\"\n return out + self.__method + \"\\n\" + self.__return__",
"def add_methods(self):\n for name in self.WRAP_METHODS_LIST: self.add_method_list(name)\n for name in self.WRAP_METHODS_NDA: self.add_method_nda(name)\n for name in self.WRAP_METHODS_2NDA: self.add_method_double_nda(name)",
"def getmethparlist(ob):\n defText = callText = \"\"\n # bit of a hack for methods - turn it into a function\n # but we drop the \"self\" param.\n # Try and build one for Python defined functions\n args, varargs, varkw = inspect.getargs(ob.__code__)\n items2 = args[1:]\n realArgs = args[1:]\n defaults = ob.__defaults__ or []\n defaults = [\"=%r\" % (value,) for value in defaults]\n defaults = [\"\"] * (len(realArgs)-len(defaults)) + defaults\n items1 = [arg + dflt for arg, dflt in zip(realArgs, defaults)]\n if varargs is not None:\n items1.append(\"*\" + varargs)\n items2.append(\"*\" + varargs)\n if varkw is not None:\n items1.append(\"**\" + varkw)\n items2.append(\"**\" + varkw)\n defText = \", \".join(items1)\n defText = \"(%s)\" % defText\n callText = \", \".join(items2)\n callText = \"(%s)\" % callText\n return defText, callText",
"def getmethparlist(ob):\n defText = callText = \"\"\n # bit of a hack for methods - turn it into a function\n # but we drop the \"self\" param.\n # Try and build one for Python defined functions\n args, varargs, varkw = inspect.getargs(ob.__code__)\n items2 = args[1:]\n realArgs = args[1:]\n defaults = ob.__defaults__ or []\n defaults = [\"=%r\" % (value,) for value in defaults]\n defaults = [\"\"] * (len(realArgs)-len(defaults)) + defaults\n items1 = [arg + dflt for arg, dflt in zip(realArgs, defaults)]\n if varargs is not None:\n items1.append(\"*\" + varargs)\n items2.append(\"*\" + varargs)\n if varkw is not None:\n items1.append(\"**\" + varkw)\n items2.append(\"**\" + varkw)\n defText = \", \".join(items1)\n defText = \"(%s)\" % defText\n callText = \", \".join(items2)\n callText = \"(%s)\" % callText\n return defText, callText",
"def _methods_of(cls):\n # The idea of unbound methods exists in Python 2 and was removed in\n # Python 3, so \"inspect.ismethod\" is used here for Python 2 and\n # \"inspect.isfunction\" for Python 3.\n all_methods = inspect.getmembers(\n cls, predicate=lambda x: inspect.ismethod(x) or inspect.isfunction(x))\n methods = [m for m in all_methods if not m[0].startswith(\"_\")]\n\n help_groups = {}\n for m in methods:\n group = getattr(m[1], \"help_group\", \"0\")\n help_groups.setdefault(group, []).append(m)\n\n if len(help_groups) > 1:\n # we should sort methods by groups\n methods = []\n for group in sorted(help_groups.items(), key=lambda x: x[0]):\n if methods:\n # None -> empty line between groups\n methods.append((None, None))\n methods.extend(group[1])\n return methods",
"def create_included_function_list_readme():\n import iteration_utilities\n from iteration_utilities import Iterable\n from itertools import chain\n from operator import itemgetter\n from astropy.table import Table\n from astropy.io.ascii import RST\n\n rtd_link = '`{name} <http://iteration-utilities.readthedocs.io/en/latest/api/{file}.html#{module}.{name}>`_'\n\n module_to_file = {'iteration_utilities': 'cfuncs',\n 'iteration_utilities._cfuncs': 'cfuncs',\n 'iteration_utilities._helpers._performance': 'helper',\n 'iteration_utilities._recipes._additional': 'additional',\n 'iteration_utilities._recipes._core': 'core',\n }\n\n it = Iterable(chain(iteration_utilities._cfuncs.__dict__.items(),\n iteration_utilities._helpers._performance.__dict__.items(),\n iteration_utilities._recipes._core.__dict__.items(),\n iteration_utilities._recipes._additional.__dict__.items())\n # Exclude PY2 variable and private functions\n ).filterfalse(lambda x: x[0].startswith(('PY2', '_'))\n # Exclude everything that has no __module__\n ).filter(lambda x: hasattr(x[1], '__module__')\n # Only include functions that come from the package\n ).filter(lambda x: x[1].__module__.startswith('iteration_utilities')\n # Remove duplicate names\n ).unique_everseen(itemgetter(0)\n # Sort lexically\n ).get_sorted(key=lambda x: x[0].lower())\n\n it = Iterable(it\n # Create a Sphinx link from function name and module\n ).map(lambda i: rtd_link.format(file = module_to_file[i[1].__module__],\n module = i[1].__module__,\n name = i[0])\n # Group into 4s so we get a 4 column Table\n ).grouper(4, fillvalue=''\n # Convert to list because Table expects it.\n ).as_list()\n\n return '\\n'.join(RST().write(Table(rows=it)))",
"def method_list(self):\n\t\tmethod_names = list(self.methods.keys())\n\t\tmethod_names.sort()\n\t\tmethod_list = []\n\t\tfor mn in method_names:\n\t\t\tmethod_list += [self.methods[mn]]\n\t\treturn method_list",
"def generate_ext_stub(cls):\n # Qualified name (C Version)\n qname = '_'.join(symbol_table.prefix+[cls.name])\n self.exts.append(qname)\n\n if self.config.verbose:\n import sys\n mod_name = '.'.join(symbol_table.prefix[1:]+[cls.name])\n sys.stdout.write('\\r'+' '*80)\n sys.stdout.write('\\rgenerating glue code for %s'%mod_name)\n sys.stdout.flush()\n\n # Consolidate all methods, defined and inherited\n cls.scan_methods()\n \n # chpl_defs = ChapelScope(chpl_stub)\n ci = self.ClassInfo(cls)\n\n # if self.server:\n # ci.impl = self.pkg_impl\n\n ci.stub.new_def(babel.externals(cls.get_scoped_id()))\n ci.stub.new_def(babel.builtin_stub_functions(cls.get_scoped_id()))\n \n has_contracts = ior_template.generateContractChecks(cls)\n self.gen_default_methods(cls, has_contracts, ci)\n\n #print qname, map(lambda x: x[2][1]+x[2][2], cls.all_methods)\n for method in cls.all_methods:\n (Method, Type, Name, Attrs, Args, \n Except, From, Requires, Ensures, DocComment) = method\n ci.epv.add_method((method, Type, Name, Attrs, \n babel.drop_rarray_ext_args(Args),\n Except, From, Requires, Ensures, DocComment))\n\n # all the methods for which we would generate a server impl\n impl_methods = babel.builtins+cls.get_methods()\n impl_methods_names = [sidlir.method_method_name(m) for m in impl_methods]\n\n # client\n for method in cls.all_methods:\n has_impl = sidlir.method_method_name(method) in impl_methods_names\n self.generate_client_method(symbol_table, method, ci, has_impl)\n\n if self.server:\n class_methods = filter(sidlir.is_not_static, impl_methods)\n static_methods = filter(sidlir.is_static, impl_methods)\n\n # # Class\n # ci.impl.new_def(gen_doc_comment(cls.doc_comment, chpl_stub)+\n # 'class %s_Impl {'%qname)\n # splicer = '.'.join(cls.qualified_name+['Impl'])\n # ci.impl.new_def('// DO-NOT-DELETE splicer.begin(%s)'%splicer)\n # ci.impl.new_def('// DO-NOT-DELETE splicer.end(%s)'%splicer)\n # for method in class_methods: \n # self.generate_server_method(symbol_table, method, ci)\n\n # ci.impl.new_def('} // class %s_Impl'%qname)\n # ci.impl.new_def('')\n # ci.impl.new_def('')\n\n # # Static\n # if static_methods:\n # ci.impl.new_def('// all static member functions of '+qname)\n # ci.impl.new_def(gen_doc_comment(cls.doc_comment, chpl_stub)+\n # '// FIXME: chpl allows only one module per library //'+\n # ' module %s_static_Impl {'%qname)\n\n # for method in static_methods:\n # self.generate_server_method(symbol_table, method, ci)\n\n # ci.impl.new_def('//} // module %s_static_Impl'%qname)\n # ci.impl.new_def('')\n # ci.impl.new_def('')\n\n\n # # Chapel Stub (client-side Chapel bindings)\n # self.generate_chpl_stub(chpl_stub, qname, ci)\n \n # # Because of Chapel's implicit (filename-based) modules it\n # # is important for the Chapel stub to be one file, but we\n # # generate separate files for the cstubs\n # self.pkg_chpl_stub.new_def(chpl_stub)\n\n # Stub (in C), the order of these definitions is somewhat sensitive\n ci.stub.genh_top(ir.Import(qname+'_IOR'))\n ci.stub.gen(ir.Import(ci.stub._name))\n\n pkg_name = '_'.join(symbol_table.prefix)\n ci.stub.gen(ir.Import(pkg_name))\n ci.stub.write()\n\n # IOR\n ior_template.generate_ior(ci, with_ior_c=self.server, _braid_config=self.config )\n ci.ior.write()\n\n # Skeleton\n if self.server:\n self.generate_skeleton(ci, qname)\n\n # Convenience header\n ext_h = CFile(qname)\n ext_h.genh(ir.Import(qname+'_IOR'))\n ext_h.genh(ir.Import(qname+'_Stub'))\n ext_h.write()\n\n # Makefile\n self.classes.append(qname)",
"def _get_methods(self):\n\n methods = inspect.getmembers(self, predicate=callable)\n method_list = set()\n\n for name, _ in methods:\n if (name in ('proxy', 'start', 'stop', 'part', 'join',)\n or name[0] == '_'):\n continue\n\n method_list.add(name)\n\n return method_list",
"def get_methods_docstr(\r\n cls_or_func: Callable,\r\n methods: Union[Sequence[Callable], Mapping[str, Any]],\r\n ignore_kwargs: Optional[List[str]] = None) -> str:\r\n method_template = \"\"\"\r\n Methods\r\n -------\r\n method=\"{name}\"\r\n\r\n .. code-block:: python\r\n\r\n {mainname}(..., {kwargs_sig})\r\n \"\"\"\r\n res = cls_or_func.__doc__ or \"\"\r\n mainname = cls_or_func.__name__\r\n\r\n def_ignore_kwargs = [\"params\"]\r\n if ignore_kwargs is None:\r\n ignore_kwargs = []\r\n ignore_kwargs = ignore_kwargs + def_ignore_kwargs\r\n\r\n if isinstance(methods, dict):\r\n generator = methods.items() # type: Union[ItemsView[str, Any], Generator[Tuple[str, Any], None, None]]\r\n elif isinstance(methods, list):\r\n generator = ((method.__name__, method) for method in methods)\r\n else:\r\n raise TypeError(\"methods must be a list or a dict\")\r\n\r\n for name, method in generator:\r\n # get the signatures\r\n sigparams = inspect.signature(method).parameters\r\n kwargs_sig_list = ['method=\"%s\"' % name]\r\n kwargs_sig_list2 = [\"%s=%s\" % (pname, val) for pname, val in _get_default_parameters(sigparams, ignore_kwargs)]\r\n kwargs_sig_list = kwargs_sig_list + ([\"*\"] if len(kwargs_sig_list2) > 0 else []) + kwargs_sig_list2\r\n kwargs_sig = \", \".join(kwargs_sig_list)\r\n\r\n # add the method name\r\n methoddoc = method.__doc__\r\n res = res + method_template.format(\r\n mainname=mainname,\r\n name=name,\r\n kwargs_sig=kwargs_sig,\r\n )\r\n if methoddoc is not None:\r\n method_doc = _add_indent(method.__doc__, \" \" * 4)\r\n res = res + method_doc\r\n return res",
"def list_methods(self):\n return list(self.methods.keys())",
"def list_methods(self, request, context):\n response = ListMethodsResponse()\n for method in self._delegator.list_methods(request.component):\n response.methods.append(method)\n return response",
"def getRegisteredMethods(file):\n\n return nfdd.getNativeFunctions(file)",
"def listMethods(self, req):\n for method in self.all_methods(req):\n yield method.name",
"def list_methods(client: Client) -> List[str]:\n return client._client.ListMethods()",
"def get_extension_funcs():\n raise NotImplementedError()",
"def _method_calls(fn):\n return [x[1] for x in re.findall(METHOD, getsource(fn))]",
"def getMethods(iface):\n return getElements(iface, IMethod).items()",
"def filter_methods(methods: list) -> list:\n \n if OCTOPUS_INCLUSION_PATTERNS:\n methods = filter_on_inclusion(OCTOPUS_INCLUSION_PATTERNS, methods)\n \n elif OCTOPUS_EXCLUSION_PATTERNS:\n methods = filter_on_exclusion(OCTOPUS_EXCLUSION_PATTERNS, methods) \n \n return methods",
"def method_decl(self):\r\n return '\\t{\"%s\", %s, %s, \"%s\"}' % (\r\n self.name, self.name, self.method, self.doc)",
"def all_methods(self, req):\n for provider in self.method_handlers:\n for candidate in provider.xmlrpc_methods():\n # Expand all fields of method description\n yield Method(provider, *candidate)",
"def get_access_methods(object_info: dict) -> List[str]:\n if object_info is None:\n logger.critical(\"no access methods defined for this file\")\n return []\n return object_info[\"access_methods\"]",
"def methodSignature(self, req, method):\n p = self.get_method(method)\n return [','.join([RPC_TYPES[x] for x in sig]) for sig in p.xmlrpc_signatures()]",
"def make_c_header(self):\n res = \\\n\"\"\"PyThreadState* ___madz_LANG_python_thread_state; //Holds Thread State for this interpreter\nPyObject *___madz_LANG_python_wrapper_module; //Hold Pointer to the _madz.py file representing this plugin\ntypedef struct{{\n{function_pointers}\n}}___madz_LANG_python_TYPE_;\n___madz_LANG_python_TYPE_ ___madz_LANG_python_OUTPUT;\nvoid ___madz_init_imports();\n{fn_dec}\n\n\"\"\"\n c_gen = c_wrapgen.CGenerator([],\"\", self.description)\n #TODO function_pointers, all same except\n fragments ={\"fn_dec\" : \"\", \"function_pointers\" : \"\"}\n fn = \"\"\"{rettype}{fnname}({args});\\n\"\"\"\n pointer = \"\"\" {prettype} (*{nodename})({args});\\n\"\"\"\n for node in self.description.definitions():\n if isinstance(node.type.get_type(), pdl.TypeFunction):\n frg = {\n \"prettype\": c_gen.gen_type_string(\"\", node.type.return_type),\n \"rettype\": c_gen.gen_type_string(\"\", node.type.return_type),\n \"fnname\": \"___madz_LANG_python_FN_\" + node.name,\n \"nodename\": node.name,\n \"args\": \",\".join(map(\n lambda a: c_gen.gen_type_string(a.name, a.type),\n node.type.args)),\n\n }\n fragments[\"fn_dec\"] += fn.format(**frg)\n fragments[\"function_pointers\"] += pointer.format(**frg)\n if fragments[\"function_pointers\"] == \"\":\n fragments[\"function_pointers\"] = \"uint8_t _madz_empty;\"\n return res.format(**fragments)",
"def available_methods():\n return {mc.method_id: mc for mc in MethodFactory.method_classes}",
"def _fc_function_definitions(self) -> str:\n result = 'extern \"C\" {\\n\\n'\n for namespace in self.namespaces:\n for member in namespace.members:\n result += member.fortran_c_wrapper()\n\n result += '}\\n\\n'\n return result"
] | [
"0.6497351",
"0.6201312",
"0.59603804",
"0.5856507",
"0.57363343",
"0.5670631",
"0.56094706",
"0.56094706",
"0.5410812",
"0.5366973",
"0.5323587",
"0.52034914",
"0.51885706",
"0.51738644",
"0.5158081",
"0.51455",
"0.51223594",
"0.5052642",
"0.5005883",
"0.49957657",
"0.49465442",
"0.49348673",
"0.4926541",
"0.4903981",
"0.49019453",
"0.49009365",
"0.48925713",
"0.48799035",
"0.48462152",
"0.48262075"
] | 0.7831075 | 0 |
Writes an htmlfile documenting the passed in methods, using the docstrings (as returned by parse_file) | def write_method_doc(file_name, entries):
with open(file_name, 'w', newline='\n') as f:
f.write('<table border="0">')
f.write('<tr><td><b>Method</b></td><td><b>Description</b></td></tr>')
for items in sorted(entries, key=itemgetter(3)):
f.write('<tr><td valign="top">%s</td><td>%s</td></tr>' %
(items[3], doc_to_html(items[4])))
f.write('</table>') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _write_member_documentation_pages(\n documenter: sphinx.ext.autodoc.Documenter):\n for entry in _get_documenter_members(documenter):\n if entry.is_inherited:\n continue\n if (entry.overload and entry.overload.overload_id and\n re.fullmatch('[0-9]+', entry.overload.overload_id)):\n logger.warning('Unspecified overload id: %s', entry.object_name)\n member_rst_path = os.path.join(documenter.env.app.srcdir, 'python', 'api',\n entry.page_name + '.rst')\n objtype = entry.documenter.objtype\n member_content = ''\n if objtype == 'class':\n member_content += ':duplicate-local-toc:\\n\\n'\n member_content += sphinx_utils.format_directive(\n 'tensorstore-python-apidoc',\n options=dict(\n fullname=entry.full_name,\n objtype=objtype,\n importname=entry.import_name,\n objectdescription=True,\n subscript=entry.subscript,\n overload=cast(ParsedOverload, entry.overload).overload_id,\n ),\n )\n pathlib.Path(member_rst_path).write_text(member_content)\n _write_member_documentation_pages(entry.documenter)",
"def method(name, doc):\n import html\n\n params = method_params(doc)\n doc = html.escape(doc)\n return string.Template(METHOD_TEMPLATE).substitute(\n name=name, params=params, doc=doc\n )",
"def writeHtmlFile(nodes, functionName, filename, errorsOnly):\n fout = open(filename, 'w')\n fout.write('<html>\\n')\n fout.write('<head>\\n')\n fout.write(' <style type=\"text/css\">\\n')\n fout.write(' body { font-size: 0.8em }\\n')\n fout.write(' th { background-color: #A3C159; text-transform: uppercase }\\n')\n fout.write(' td { background-color: white; vertical-align: text-top }\\n')\n fout.write(' pre { background-color: #EEEEEE }\\n')\n fout.write(' </style>\\n')\n fout.write('</head>\\n')\n fout.write('<body>\\n')\n\n fout.write('<a href=\"index.htm\">Home</a> -- ')\n if errorsOnly:\n fout.write('<a href=\"all-'+functionName+'.htm\">All test cases</a>')\n else:\n fout.write('<a href=\"errors-'+functionName+'.htm\">Error test cases</a>')\n fout.write('<br><br>')\n\n testclass = None\n num = 0\n for node in nodes:\n if errorsOnly and node['expected']=='':\n continue\n if trimname(node['functionName']) == functionName:\n num = num + 1\n\n if not testclass:\n testclass = node['testclass']\n fout.write('<h1>' + node['testclass'] + '::' + functionName + '</h1>')\n fout.write('<table border=\"0\" cellspacing=\"0\">\\n')\n fout.write(' <tr><th>Nr</th><th>Code</th><th>Expected</th></tr>\\n')\n\n fout.write(' <tr><td>' + str(num) + '</td>')\n fout.write('<td><pre>' + strtoxml(node['code']).replace('\\\\n', '\\n') + '</pre></td>')\n fout.write('<td>' + strtoxml(node['expected']).replace('\\\\n', '<br>') + '</td>')\n fout.write('</tr>\\n')\n\n if testclass != None:\n fout.write('</table>\\n');\n fout.write('</body></html>\\n')\n fout.close()",
"def makeDocFile(self):\n\n f_out = \"%s/%s-doc.php\" % (self.dir_out, self.project_id)\n version = max(self.versions)\n\n with open(f_out, 'w') as f:\n f.write(\"<!DOCTYPE html>\\n\" \\\n \"<html xmlns=\\\"http://www.w3.org/1999/xhtml\\\">\\n\" \\\n \"<head>\\n\" \\\n \"<meta http-equiv=\\\"Content-Type\\\" content=\\\"text/html; charset=utf-8\\\"/>\\n\" \\\n \"\\n\" \\\n \"<title>Kit&Pack − Ultimate Power Booster</title>\\n\" \\\n \"<link rel=\\\"shortcut icon\\\" type=\\\"image/png\\\" href=\\\"../favicon.png\\\"/>\" \\\n \"<link rel=\\\"stylesheet\\\" type=\\\"text/css\\\" href=\\\"../css/doc-2.css\\\" />\\n\"\n \"\\n\" \\\n \"</head>\\n\" \\\n \"<body>\\n\" \\\n \"\\n\" \\\n \"<h1>Ultimate Power Booster</h1>\" \\\n \"\\n\")\n\n # Write a list of other versions of the documentation\n f.write(\"<p>Versions de cette documentation.</p>\\n\")\n f.write(\"<ul>\\n\")\n for v in self.versions:\n f.write(\"\\t<li><a href=\\\"%s.php\\\">%s</a></li>\\n\" % (\n v, v))\n f.write(\"</ul>\\n\\n\")\n\n f.write(\"<?php\\n\" \\\n \"include(\\\"%s.php\\\")\\n\" \\\n \"?>\\n\" \\\n \"\\n\" \\\n \"</body>\\n\" \\\n \"</html>\" % (version))",
"def store_documentation(self, file_path, *args, **dargs):\n pass",
"def get_methods_docstr(\r\n cls_or_func: Callable,\r\n methods: Union[Sequence[Callable], Mapping[str, Any]],\r\n ignore_kwargs: Optional[List[str]] = None) -> str:\r\n method_template = \"\"\"\r\n Methods\r\n -------\r\n method=\"{name}\"\r\n\r\n .. code-block:: python\r\n\r\n {mainname}(..., {kwargs_sig})\r\n \"\"\"\r\n res = cls_or_func.__doc__ or \"\"\r\n mainname = cls_or_func.__name__\r\n\r\n def_ignore_kwargs = [\"params\"]\r\n if ignore_kwargs is None:\r\n ignore_kwargs = []\r\n ignore_kwargs = ignore_kwargs + def_ignore_kwargs\r\n\r\n if isinstance(methods, dict):\r\n generator = methods.items() # type: Union[ItemsView[str, Any], Generator[Tuple[str, Any], None, None]]\r\n elif isinstance(methods, list):\r\n generator = ((method.__name__, method) for method in methods)\r\n else:\r\n raise TypeError(\"methods must be a list or a dict\")\r\n\r\n for name, method in generator:\r\n # get the signatures\r\n sigparams = inspect.signature(method).parameters\r\n kwargs_sig_list = ['method=\"%s\"' % name]\r\n kwargs_sig_list2 = [\"%s=%s\" % (pname, val) for pname, val in _get_default_parameters(sigparams, ignore_kwargs)]\r\n kwargs_sig_list = kwargs_sig_list + ([\"*\"] if len(kwargs_sig_list2) > 0 else []) + kwargs_sig_list2\r\n kwargs_sig = \", \".join(kwargs_sig_list)\r\n\r\n # add the method name\r\n methoddoc = method.__doc__\r\n res = res + method_template.format(\r\n mainname=mainname,\r\n name=name,\r\n kwargs_sig=kwargs_sig,\r\n )\r\n if methoddoc is not None:\r\n method_doc = _add_indent(method.__doc__, \" \" * 4)\r\n res = res + method_doc\r\n return res",
"def get_doc(filename: str) -> str:\n\n # Create the header.\n doc = \"# `\" + filename.split(\"/\")[-1] + \"`\\n\\n\"\n\n lines: List[str] = Path(filename).read_text().split(\"\\n\")\n\n for i in range(len(lines)):\n # Create a class description.\n if lines[i].startswith(\"class\"):\n # Skip private classes.\n match = re.search(\"class _(.*):\", lines[i])\n if match is not None:\n continue\n # Add the name of the class\n class_name = re.search(\"class (.*):\", lines[i]).group(1)\n doc += f\"## `{class_name}`\\n\\n\"\n # Add an example.\n class_example = f\"`from tdw.{filename[:-3].replace('/', '.')} import \" + re.sub(r\"(.*)\\((.*)\\)\", r'\\1',\n class_name) + \"`\"\n doc += class_example + \"\\n\\n\"\n doc += PyDocGen.get_class_description(lines, i)\n # Parse an enum.\n if re.search(r\"class (.*)\\(Enum\\):\", lines[i]) is not None:\n doc += \"\\n\\n\" + PyDocGen.get_enum_values(lines, i)\n doc += \"\\n\\n***\\n\\n\"\n # Create a function description.\n elif lines[i].strip().startswith(\"def\"):\n # Skip private functions.\n match = re.search(\"def _(.*)\", lines[i])\n if match is not None and \"__init__\" not in lines[i]:\n continue\n # Append the function description.\n doc += PyDocGen.get_function_documentation(lines, i) + \"\\n\\n***\\n\\n\"\n\n # Move the \"main class\" to the top of the document.\n main_class_name = ''.join(x.capitalize() or '_' for x in filename[:-3].split('_'))\n main_class = re.search(\"(## `\" + main_class_name + \"`((.|\\n)*))\", doc)\n if main_class is not None:\n main_class = main_class.group(1)\n doc_header = re.search(\"(.*)\\n\\n\", doc).group(0)\n doc_temp = doc.replace(main_class, \"\").replace(doc_header, \"\")\n doc = doc_header + main_class + doc_temp\n\n return doc",
"def generate_headers(src_files, out_root, doc_root):\r\n\r\n if not os.path.exists(out_root):\r\n os.makedirs(out_root)\r\n did_print_heading = False\r\n changed = False\r\n for (name, files) in src_files:\r\n if files.__class__ == str:\r\n src = files\r\n files = (src,)\r\n else:\r\n src = files[0]\r\n\r\n dst = src.replace(\".hh\", \"-method-def.hh\")\r\n dst = dst.replace(\".cpp\", \"-method-def.hh\")\r\n dst = os.path.join(out_root, os.path.split(dst)[1])\r\n\r\n dst_doc = src.replace(\".hh\", '-methods.txt')\r\n dst_doc = dst_doc.replace(\".cpp\", '-methods.txt')\r\n dst_doc_filename = os.path.split(dst_doc)[1]\r\n dst_doc_filename = os.path.join(doc_root, dst_doc_filename)\r\n\r\n dst_prop_doc = src.replace(\".cpp\", '-properties.txt')\r\n dst_doc_prop_filename = os.path.split(dst_prop_doc)[1]\r\n dst_doc_prop_filename = os.path.join(doc_root, dst_doc_prop_filename)\r\n\r\n if util.changed(src, dst):\r\n if not did_print_heading:\r\n print(\"* Generating Python method definitions.\")\r\n did_print_heading = True\r\n generate(files, dst, dst_doc_filename, dst_doc_prop_filename, name)\r\n changed = True\r\n if not changed:\r\n print(\"* Python method definitions up to date.\")",
"def __html__(self, file_path:str):\n raise NotImplementedError",
"def generate(src_file_names,\r\n dst_file_name,\r\n dst_doc_file_name,\r\n dst_property_doc_file_name,\r\n name):\r\n methods = []\r\n properties = []\r\n extra_includes = []\r\n entries = (methods, properties)\r\n for src_file_name in src_file_names:\r\n check_file(src_file_name)\r\n m, p = parse_file(src_file_name)\r\n methods.extend(m)\r\n properties.extend(p)\r\n\r\n extra_includes.extend(find_extra_include(src_file_name))\r\n if len(entries[0]) == 0 and len(entries[1]) == 0:\r\n print(\"No entries found in %s.\" % src_file_name)\r\n exit(1)\r\n\r\n write_result(dst_file_name, name, entries, extra_includes, src_file_names)\r\n write_method_doc(dst_doc_file_name, entries[0])\r\n write_property_doc(dst_property_doc_file_name, entries[1])",
"def generateHtml(self, tokens, html, css):\n\n\t\tf = open(html, \"w\")\n\t\tf.write(\"\"\"<!DOCTYPE html>\n<html lang=\"en\">\n<head>\n<title>Document</title>\n</head>\n\t\t\"\"\")\n\t\tif os.path.exists(\"css/default.css\"):\n\t\t\tstyle = open(\"css/default.css\", \"r\").read()\n\t\telse:\n\t\t\tstyle = open(f\"{css}css/default.css\", \"r\").read()\n\t\tf.write(f\"<style>\\n{style}\\n</style>\\n\")\n\t\tf.write(\"<body>\")\n\t\tf.write('<div class=\"markdown-body\">')\n\t\tfor t in tokens:\n\t\t\tf.write(t.html)\n\t\tf.write(\"</div>\")\n\t\tf.write(\"</body>\")\n\t\tf.write(\"</html>\")\n\t\tf.close()",
"def test_fs_func_docstrings(self):\n for func in self.fs_f:\n self.assertIsNot(func[1].__doc__, None,\n \"{:s} method needs a docstring\".format(func[0]))\n self.assertTrue(len(func[1].__doc__) >= 1,\n \"{:s} method needs a docstring\".format(func[0]))",
"def __init__(self,\n source_path='./*.py',\n template_path='./docs/templates/*_template.md',\n output_path='./docs/documentation.md',\n ignore=['extra']\n ):\n\n template_files = glob.glob(template_path)\n # filename = t.split('/')[-1]\n self.sources = {os.path.basename(s).split('.')[0]: os.path.normpath(s) for s in glob.glob(source_path) if not any(i in s for i in ignore)}\n self.templates = {os.path.basename(t).split('_')[0]: os.path.normpath(t) for t in template_files}\n self.output_path = output_path\n\n self.template_content = {}\n for k, v in self.templates.items():\n path = v\n with open(path, 'r') as template_file:\n self.template_content[k] = template_file.read()\n\n self.text = ''\n self.classes = []\n self.headers = ['Params', 'Returns', 'Attributes']\n self.hierarchy = [\n 'class',\n 'method',\n 'parameter',\n 'pinfo',\n 'extra'\n ]\n self.tab_length = 6",
"def store_documentation(self, file_path, css_path=None):\n html = self.get_documentation(css_path)[1]\n with open(file_path, \"w+\", encoding=\"utf8\") as f:\n f.write(html)",
"def doctest_markup_files(fnames):\n for fname in fnames:\n with open(fname, 'rt') as fobj:\n res = list(fobj)\n out, errs = doctest_markup(res)\n for err_tuple in errs:\n print('Marked line %s unchanged because \"%s\"' % err_tuple)\n with open(fname, 'wt') as fobj:\n fobj.write(''.join(out))",
"def function_to_document(foo, bar):\n return foo + bar",
"def fini_doc(self):\n raise NotImplementedError()",
"def main_docstring():",
"def opendocs():\n _open_file('_build/index.html')",
"def docstrings(param1, param2):\n return \"example string\"",
"def _generate(self, markup=None):\n raise NotImplementedError",
"def dumpDoc(modulename, directory=None):\n docco = getObjectsDefinedIn(modulename, directory)\n print('codegrab.py - ReportLab Documentation Utility')\n print('documenting', modulename + '.py')\n print('-------------------------------------------------------')\n print()\n if docco.functions == []:\n print('No functions found')\n else:\n print('Functions:')\n for f in docco.functions:\n print(f.proto)\n print(' ' + f.doc)\n\n if docco.classes == []:\n print('No classes found')\n else:\n print('Classes:')\n for c in docco.classes:\n print(c.name)\n print(' ' + c.doc)\n for m in c.methods:\n print(m.proto) # it is already indented in the file!\n print(' ' + m.doc)\n print()",
"def func_doc():",
"def documento():\r\n\tpass",
"def test_methods(self):\n\n #log\n self.logger.debug('\\n\\nExecute test methods:\\n-----------------------------')\n\n\n \n #test methods here\n #------------------------------------------------------------------\n\n #dummy_method\n self.dummy_method()\n\n #stylesheet_test\n #self.stylesheet_test(self.wdgt_explanation)\n\n #------------------------------------------------------------------\n\n\n\n #log\n self.logger.debug('\\n\\n-----------------------------\\nFinished test methods.')",
"def test_user_func_docstrings(self):\n for func in self.student_f:\n self.assertIsNot(func[1].__doc__, None,\n \"{:s} method needs a docstring\".format(func[0]))\n self.assertTrue(len(func[1].__doc__) >= 1,\n \"{:s} method needs a docstring\".format(func[0]))",
"def test_doc():\n pass",
"def write(self):\n with open(self.outputFile, \"w\") as outputFile:\n self.writeHeader(outputFile)\n # Get the classnames and sort them to get consistent ordering.\n names = [cls.name for cls in self.classes]\n classes = dict([(cls.name, cls) for cls in self.classes])\n for name in sorted(names):\n if self.verbosity > 1:\n utils.log(name)\n cls = classes[name]\n cls.write(outputFile)\n\n # can't just use pprint library because\n # pep8 will complain about formatting\n outputFile.write('\\npostMethods = \\\\\\n [(\\'')\n for i, tup in enumerate(self.postSignatures):\n url, request, response = tup\n if i != 0:\n outputFile.write(' (\\'')\n outputFile.write(url)\n outputFile.write('\\',\\n ')\n outputFile.write(request)\n outputFile.write(',\\n ')\n outputFile.write(response)\n outputFile.write(')')\n if i == len(self.postSignatures) - 1:\n outputFile.write(']\\n')\n else:\n outputFile.write(',\\n')",
"def write_result(file_name, name, entries, extra_includes, src_file_names):\r\n\r\n with open(file_name, 'w', newline='\\n') as f:\r\n f.write('// Generated by %s\\n' % os.path.basename(__file__))\r\n f.write('// Based on %s: %s\\n' %\r\n ((\"this file\" if len(src_file_names) < 2 else\r\n \"these files\"), \", \".join(src_file_names)))\r\n methods = entries[0]\r\n if len(methods) != 0:\r\n f.write(to_PyMethodDef(name, methods, extra_includes))\r\n f.write('\\n')\r\n\r\n properties = entries[1]\r\n if len(properties) != 0:\r\n f.write('\\n')\r\n f.write(to_PyGetSetDef(name, properties))",
"def test_user_func_docstrings(self):\n for func in self.user_f:\n self.assertIsNot(func[1].__doc__, None,\n \"{:s} method needs a docstring\".format(func[0]))\n self.assertTrue(len(func[1].__doc__) >= 1,\n \"{:s} method needs a docstring\".format(func[0]))"
] | [
"0.60740525",
"0.59743536",
"0.59427845",
"0.5824418",
"0.5816547",
"0.57680935",
"0.5738121",
"0.5711054",
"0.56876665",
"0.5671265",
"0.56643975",
"0.56380713",
"0.5587306",
"0.55807567",
"0.5538951",
"0.55344105",
"0.5531077",
"0.5515236",
"0.54965585",
"0.54924417",
"0.54919994",
"0.54866713",
"0.5455307",
"0.543309",
"0.5432222",
"0.5399194",
"0.538335",
"0.5373138",
"0.53405875",
"0.53238773"
] | 0.75820845 | 0 |
Writes an htmlfile documenting the passed in properties, using the docstrings (as returned by parse_file) Expects a list of (propertyname, docstr)tuples. | def write_property_doc(file_name, entries):
if len(entries) == 0:
return
with open(file_name, 'w', newline='\n') as f:
f.write('<!-- Generated by %s -->' % os.path.basename(__file__))
f.write('<table border="0">')
f.write('<tr><td><b>Property</b></td><td><b>Description</b></td></tr>')
for items in entries:
f.write('<tr><td valign="top">%s</td><td>%s</td></tr>' %
(items[1], doc_to_html(items[2])))
f.write('</table>') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def render_to_file(properties,file):\n properties['tempfile']=None\n properties['remove_temp']=True\n properties['outfile']=file",
"def write_html(filelist):\n tmp = tempfile.NamedTemporaryFile(mode=\"w+\", suffix=\".html\", delete=False)\n\n tmp.write(r\"\"\"<!doctype html>\n <html>\n <head>\n <style type=\"text/css\">\n body {\n background-color: #DDD;\n padding: 10px;\n }\n img {\n display: block;\n margin: 0 auto 20px auto;\n box-shadow: 4px 4px 10px #333;\n }\n </style>\n </head>\n <body>\"\"\")\n\n for arg in filelist:\n tmp.write('<img src=\"file://%s\" width=\"100%%\" />\\n' % arg)\n\n tmp.write(r\"\"\"</body>\n </html>\"\"\")\n\n tmp.close()\n\n return tmp.name",
"def generateHtml(self, tokens, html, css):\n\n\t\tf = open(html, \"w\")\n\t\tf.write(\"\"\"<!DOCTYPE html>\n<html lang=\"en\">\n<head>\n<title>Document</title>\n</head>\n\t\t\"\"\")\n\t\tif os.path.exists(\"css/default.css\"):\n\t\t\tstyle = open(\"css/default.css\", \"r\").read()\n\t\telse:\n\t\t\tstyle = open(f\"{css}css/default.css\", \"r\").read()\n\t\tf.write(f\"<style>\\n{style}\\n</style>\\n\")\n\t\tf.write(\"<body>\")\n\t\tf.write('<div class=\"markdown-body\">')\n\t\tfor t in tokens:\n\t\t\tf.write(t.html)\n\t\tf.write(\"</div>\")\n\t\tf.write(\"</body>\")\n\t\tf.write(\"</html>\")\n\t\tf.close()",
"def write_html_file(out_table, outpath):\r\n page_out = PAGE_HTML % ('Taxa Summaries', out_table)\r\n out = open(outpath, \"w+\")\r\n out.write(page_out)\r\n out.close()",
"def write_properties(self, inputfile):\n raise NotImplementedError(\n \"Writing to this file format is not yet implemented\")",
"def write_file(file_descriptor, boxes):\n global _XHTML_HEADER\n\n impl = xml.dom.minidom.getDOMImplementation()\n newdoc = impl.createDocument(None, \"root\", None)\n\n file_descriptor.write(_XHTML_HEADER)\n file_descriptor.write(to_unicode(\"<body>\\n\"))\n for box in boxes:\n xml_str = to_unicode(\"%s\") % box.get_xml_tag(newdoc).toxml()\n file_descriptor.write(\n to_unicode(\"<p>\") + xml_str + to_unicode(\"</p>\\n\")\n )\n file_descriptor.write(to_unicode(\"</body>\\n</html>\\n\"))",
"def store_documentation(self, file_path, css_path=None):\n html = self.get_documentation(css_path)[1]\n with open(file_path, \"w+\", encoding=\"utf8\") as f:\n f.write(html)",
"def write_html_file(out_table, outpath):\r\n page_out = PAGE_HTML % (outpath, out_table)\r\n out = open(outpath, \"w+\")\r\n out.write(page_out)\r\n out.close()",
"def makeDocFile(self):\n\n f_out = \"%s/%s-doc.php\" % (self.dir_out, self.project_id)\n version = max(self.versions)\n\n with open(f_out, 'w') as f:\n f.write(\"<!DOCTYPE html>\\n\" \\\n \"<html xmlns=\\\"http://www.w3.org/1999/xhtml\\\">\\n\" \\\n \"<head>\\n\" \\\n \"<meta http-equiv=\\\"Content-Type\\\" content=\\\"text/html; charset=utf-8\\\"/>\\n\" \\\n \"\\n\" \\\n \"<title>Kit&Pack − Ultimate Power Booster</title>\\n\" \\\n \"<link rel=\\\"shortcut icon\\\" type=\\\"image/png\\\" href=\\\"../favicon.png\\\"/>\" \\\n \"<link rel=\\\"stylesheet\\\" type=\\\"text/css\\\" href=\\\"../css/doc-2.css\\\" />\\n\"\n \"\\n\" \\\n \"</head>\\n\" \\\n \"<body>\\n\" \\\n \"\\n\" \\\n \"<h1>Ultimate Power Booster</h1>\" \\\n \"\\n\")\n\n # Write a list of other versions of the documentation\n f.write(\"<p>Versions de cette documentation.</p>\\n\")\n f.write(\"<ul>\\n\")\n for v in self.versions:\n f.write(\"\\t<li><a href=\\\"%s.php\\\">%s</a></li>\\n\" % (\n v, v))\n f.write(\"</ul>\\n\\n\")\n\n f.write(\"<?php\\n\" \\\n \"include(\\\"%s.php\\\")\\n\" \\\n \"?>\\n\" \\\n \"\\n\" \\\n \"</body>\\n\" \\\n \"</html>\" % (version))",
"def write_properties(self, prop_filename):\n # Collect list of all keys in self.plats that have True values,\n # but change \"windows\" to \"win64\" because build-sanity is annoying.\n sanity_plats = [\n (x if x != \"windows\" else \"win64\")\n for x in self.plats.keys() if self.plats[x]\n ]\n with open(prop_filename, \"w\") as prop:\n prop.write(\"CURRENT_BUILD_NUMBER={}\\n\".format(self.bld_num))\n prop.write(\"VERSION={}\\n\".format(self.version))\n prop.write(\"DISTROS={}\\n\".format(\" \".join(sanity_plats)))\n prop.write(\"TESTRUNNER_BRANCH={}\\n\".format(self.testrunner_branch))\n if self.use_magma:\n prop.write(\"EXTRA_TEST_PARAMS={}\\n\".format(\"bucket_storage=magma\"))",
"def create_html_files(filenames, options):\n html_files = []\n status('creating HTML files...\\n',options)\n for i in range(len(filenames)):\n filename = filenames[i]\n status('* %s (%d of %d)... ' % (filename, i+1, len(filenames)), options)\n html_file = txt2htmlfilename(filename)\n docutils.core.publish_file(source=None, source_path=filename,\n destination_path=html_file, \n reader=None, reader_name='standalone',\n parser=None, parser_name='restructuredtext',\n writer=None, writer_name='html',\n settings=None, settings_spec=None,\n settings_overrides=HTML_WRITER_OVERRIDES)\n html_files.append(html_file)\n status('OK\\n', options)\n return html_files",
"def create_html_page(htmldata, filename):\n begin = \"<html>\\n\\n<body>\\n\\n<p>\\n\"\n end = \"\\n</p>\\n\\n</body>\\n\\n</html>\"\n full_text = begin + htmldata + end\n f = open(filename, \"w\")\n f.write(full_text)\n f.close()",
"def save_html(self, report_summary, file_name, folder):\n myfile = open(file_name, \"w\")\n myfile.write(t('! DOCTYPE html') + nl())\n myfile.write(t('html') + nl())\n myfile.write(t('head') + nl())\n myfile.write(t('link type=\"text/css\" rel=\"stylesheet\" ') + nl())\n\n myfile.write(html_space(4) + t('style'))\n myfile.write('table{width= 100%; border-collapse:collapse; border:1px solid black collapse}')\n myfile.write('th,td {padding:3px}' + nl())\n myfile.write(html_space(8) + 'td.detail{background-color:#D5DF93; font-size:20; '\n 'font-family:Helvetica, Arial, Sans Serif; font-weight:bold}' + nl())\n myfile.write(html_space(8) + 'td.detail1{font-size:20; '\n 'font-family:Helvetica, Arial, Sans Serif; font-weight:bold}' + nl())\n myfile.write(html_space(8) + 'td.detail2{font-size:20;'\n ' font-family:Helvetica, Arial, Sans Serif}' + nl())\n myfile.write(html_space(8) + 'td.header0{background-color:#8fac3a; font-size:20;'\n ' font-family:Helvetica, Arial, Sans Serif; font-weight:bold}' + nl())\n myfile.write(html_space(8) + 'td.header1{background-color:#E6E6E6; font-size:20;'\n ' font-family:Helvetica, Arial, Sans Serif; font-weight:bold}' + nl())\n myfile.write(html_space(8) + 'td.header2{font-size:20; width:50%}' + nl())\n myfile.write(html_space(4) + t('/style') + nl())\n\n myfile.write(t('/head') + nl())\n myfile.write(t('body') + nl())\n\n # Project summary\n self.company_name = str(report_summary[\"ProfileSummary\"]['CompanyName'])\n self.company_logo = str(report_summary[\"ProfileSummary\"]['CompanyLogo'])\n\n self.group_team_name = str(report_summary[\"ProfileSummary\"]['Group/TeamName'])\n self.designer = str(report_summary[\"ProfileSummary\"]['Designer'])\n self.project_title = str(report_summary['ProjectTitle'])\n self.sub_title = str(report_summary['Subtitle'])\n self.job_number = str(report_summary['JobNumber'])\n self.client = str(report_summary['Client'])\n additional_comments = str(report_summary['AdditionalComments'])\n\n # Seated angle design parameters\n connectivity = str(self.connectivity)\n shear_force = str(self.shear_force)\n column_sec = str(self.column_section)\n column_fu = str(self.column_fu)\n beam_sec = str(self.beam_section)\n seated_angle_section = str(self.angle_sec)\n top_angle_section = str(self.top_angle)\n angle_fu = str(self.angle_fu)\n\n bolt_type = str(self.bolt_type)\n is_hsfg = self.is_hsfg\n bolt_grade = str(self.bolt_grade)\n bolt_diameter = str(self.bolt_diameter)\n bolt_fu = str(self.bolt_fu)\n is_environ_corrosive = self.is_environ_corrosive\n\n # Design Preferences\n detail_gap = str(self.detail_gap)\n bolt_hole_clearance = str(self.bolt_hole_clearance)\n bolt_hole_type = str(self.bolt_hole_type)\n bolt_material_grade = self.bolt_fu_overwrite\n slip_factor_mu_f = self.mu_f\n min_edge_multiplier = self.min_edge_multiplier\n type_of_edge = self.type_of_edge\n design_method = self.design_method\n\n # Calculation outputs\n bolts_provided = str(self.bolts_provided)\n bolts_required = str(self.bolts_required)\n\n number_of_rows = str(self.num_rows)\n number_of_cols = str(self.num_cols)\n edge = str(self.edge_dist)\n gauge = str(self.gauge)\n pitch = str(self.pitch)\n end = str(self.end_dist)\n\n kb = str(self.k_b)\n beam_w_t = str(self.beam_w_t)\n beam_fu = str(self.beam_fu)\n dia_hole = str(self.bolt_hole_diameter)\n shear_capacity = str(self.bolt_shear_capacity)\n bearing_capacity = str(self.bolt_bearing_capacity)\n\n check_pass = \"<p align=left style=color:green><b>Pass</b></p>\"\n check_fail = \"<p align=left style=color:red><b>Fail</b></p>\"\n\n if self.safe == True:\n remark = check_pass\n elif self.safe == False:\n remark = check_fail\n\n # -----------------------------------------------------------------------------------\n rstr = self.design_report_header()\n # -----------------------------------------------------------------------------------\n\n # ---------------------------------- Design conclusion ------------------------------\n rstr += t('table border-collapse= \"collapse\" border=\"1px solid black\" width= 100% ') + nl()\n\n rstr += design_summary_row(0, \"Design Conclusion\", \"header0\", col_span=\"2\")\n\n row = [1, \"Seated Angle\", remark]\n rstr += t('tr')\n rstr += html_space(1) + t('td class=\"detail1 \"') + space(row[0]) + row[1] + t('/td')\n rstr += t('td class=\"detail1\"') + row[2] + t('/td') + nl()\n # rstr += t('td class=\"header1 safe\"') + row[3] + t('/td')\n rstr += t('/tr')\n\n rstr += design_summary_row(0, \"Seated Angle\", \"header0\", col_span=\"2\")\n rstr += design_summary_row(0, \"Connection Properties\", \"detail\", col_span=\"2\")\n rstr += design_summary_row(0, \"Connection \", \"detail1\", col_span=\"2\")\n rstr += design_summary_row(1, \"Connection Title\", \"detail2\", text_two=\" Seated Angle\")\n rstr += design_summary_row(1, \"Connection Type\", \"detail2\", text_two=\" Shear Connection\")\n rstr += design_summary_row(0, \"Connection Category\", \"detail1\")\n rstr += design_summary_row(1, \"Connectivity\", \"detail2\", text_two=str(connectivity))\n rstr += design_summary_row(1, \"Beam Connection\", \"detail2\", text_two=\"Bolted\")\n rstr += design_summary_row(1, \"Column Connection\", \"detail2\", text_two=\"Bolted\")\n rstr += design_summary_row(0, \"Loading (Factored Load)\", \"detail1\")\n rstr += design_summary_row(1, \"Shear Force (kN)\", \"detail2\", text_two=str(shear_force))\n rstr += design_summary_row(0, \"Components \", \"detail1\", col_span=\"2\")\n rstr += design_summary_row(1, \"Column Section\", \"detail1\", text_two=str(column_sec), text_two_css=\"detail2\")\n rstr += design_summary_row(2, \"Material\", \"detail2\", text_two=\"Fe \" + str(column_fu))\n rstr += design_summary_row(2, \"Hole\", \"detail2\", text_two=str(bolt_hole_type))\n rstr += design_summary_row(1, \"Beam Section\", \"detail1\", text_two=str(beam_sec), text_two_css=\"detail2\")\n rstr += design_summary_row(2, \"Material\", \"detail2\", text_two=\"Fe \" + str(beam_fu))\n rstr += design_summary_row(2, \"Hole\", \"detail2\", text_two=str(bolt_hole_type))\n rstr += design_summary_row(1, \"Seated Angle Section\", \"detail1\", text_two=str(seated_angle_section),\n text_two_css=\"detail2\")\n rstr += design_summary_row(2, \"Material\", \"detail2\", text_two=\"Fe \" + str(angle_fu))\n rstr += design_summary_row(2, \"Hole\", \"detail2\", text_two=str(bolt_hole_type))\n rstr += design_summary_row(1, \"Top Angle Section\", \"detail1\", text_two=str(top_angle_section),\n text_two_css=\"detail2\")\n rstr += design_summary_row(2, \"Material\", \"detail2\", text_two=\"Fe \" + str(angle_fu))\n rstr += design_summary_row(2, \"Hole\", \"detail2\", text_two=bolt_hole_type)\n rstr += design_summary_row(1, \"Bolts\", \"detail1\", col_span=\"2\")\n rstr += design_summary_row(2, \"Type\", \"detail2\", text_two=bolt_type)\n rstr += design_summary_row(2, \"Grade\", \"detail2\", text_two=bolt_grade)\n rstr += design_summary_row(2, \"Diameter (mm)\", \"detail2\", text_two=bolt_diameter)\n rstr += design_summary_row(2, \"Bolts - Required\", \"detail2\", text_two=bolts_required)\n rstr += design_summary_row(2, \"Bolts - Provided\", \"detail2\", text_two=bolts_provided)\n rstr += design_summary_row(2, \"Rows\", \"detail2\", text_two=number_of_rows)\n rstr += design_summary_row(2, \"Columns\", \"detail2\", text_two=number_of_cols)\n rstr += design_summary_row(2, \"Gauge (mm)\", \"detail2\", text_two=gauge)\n rstr += design_summary_row(2, \"Pitch (mm)\", \"detail2\", text_two=pitch)\n rstr += design_summary_row(2, \"End Distance (mm)\", \"detail2\", text_two=end)\n rstr += design_summary_row(2, \"Edge Distance (mm)\", \"detail2\", text_two=edge)\n rstr += design_summary_row(0, \"Assembly\", \"detail1\", col_span=\"2\")\n rstr += design_summary_row(1, \"Column-Beam Clearance (mm)\", \"detail2\", text_two=detail_gap,\n text_two_css=\"detail2\")\n\n rstr += \" \" + nl() + t('/table')\n rstr += t('h1 style=\"page-break-before:always\"') # page break\n rstr += t('/h1')\n\n # -----------------------------------------------------------------------------------\n rstr += self.design_report_header()\n # -----------------------------------------------------------------------------------\n\n # --------------------------------- Design Preferences ------------------------------\n # Write your code here\n\n\n # -----------------------------------------------------------------------------------\n rstr += self.design_report_header()\n # -----------------------------------------------------------------------------------\n\n # ------------------------------------ DESIGN CHECKS ---------------------------------\n rstr += t('table width = 100% border-collapse= \"collapse\" border=\"1px solid black\" table-layout:fixed')\n rstr += t('tr')\n rstr += t('td style=\"width:200px;\"')\n rstr += t('td width=\"50%\"')\n rstr += t('td width=\"50%\"')\n rstr += t('td style=\"width:50px;\"')\n rstr += t('/tr')\n rstr += design_check_row(\"Design Check\", \"\", \"\", \"\", col_span=\"4\", text_one_css=\"detail\")\n\n rstr += design_check_row(\"Check\", \"Required\", \"Provided\", \"Remark\", text_one_css=\"header1\",\n text_two_css=\"header1\", text_three_css=\"header1\", text_four_css=\"header1\")\n\n # Bolt\n rstr += design_check_row(\"Bolt Checks\", \"\", \"\", \"\", col_span=\"4\", text_one_css=\"detail\")\n\n # Bolt shear capacity (kN)\n const = str(round(math.pi / 4 * 0.78, 4))\n if is_hsfg == False:\n req_field = \"<i>V</i><sub>dsb</sub> = bolt_fu*(pi*0.78/4)*bolt_diameter^2/(√3)/\" \\\n \"<i>gamma<sub>mb</sub></i><br> [cl. 10.3.3]\"\n prov_field = \"<i>V</i><sub>dsb</sub> = \" + bolt_fu + \"*(\" + const + \")*\" + bolt_diameter + \"^2/\" \\\n + \"(√3)/1.25/1000 <br> \" + space(2) + \"= \" + shear_capacity\n elif is_hsfg == True:\n if bolt_hole_type == \"Standard\":\n K_h = str(1.0)\n elif bolt_hole_type == \"Oversized\":\n K_h = str(0.85)\n req_field = \"HSFG bolt shear capacity:\"\n # req_field += \"<br> <i>V</i><sub>dsf</sub> = mu_f*n_e*K_h*A_nb*f_0/<i>gamma<sub>mb</sub></i>\"\n req_field += \"<br> [cl. 10.3.3]\"\n prov_field = \"<i>V</i><sub>dsf</sub> = (\"\n prov_field += str(\n slip_factor_mu_f) + \")*(1)*(\" + K_h + \")*(\" + const + \"*\" + bolt_diameter + \"^2)<br>\" + space(2) + \\\n \"*(0.70*\" + bolt_fu + \")\" + \"/1.25/1000 <br> \" + space(2) + \"= \" + shear_capacity\n rstr += design_check_row(\"Bolt shear capacity (kN)\", req_field, prov_field, \" \")\n\n # Bolt bearing capacity (kN)\n # req_field = \"<i>V<sub>dpb</sub></i> = 2.5*k<sub>b</sub>*bolt_diameter*critical_thickness\" \\\n # +\"<br> *<i>f</i><sub>u</sub>/<i>gamma<sub>mb</sub></i><br> [Cl. 10.3.4]\"\n req_field = \"<i>V<sub>dpb</sub></i>:<br> [Cl. 10.3.4]\"\n if is_hsfg == False:\n prov_field = \"<i>V</i><sub>dpb</sub> = 2.5*\" + kb + \"*\" + bolt_diameter + \"*\" + beam_w_t + \"*\" \\\n + beam_fu + \"/1.25/1000) <br>\" + space(2) + \" = \" + bearing_capacity + \" kN\"\n elif is_hsfg == True:\n prov_field = 'N/A'\n rstr += design_check_row(\"Bolt bearing capacity (kN)\", req_field, prov_field, \"\")\n\n # Bolt capacity (kN)\n req_field = \"min (bolt_shear_capacity, bolt_bearing_capacity)\"\n prov_field = \"min (\" + str(self.bolt_shear_capacity) + \", \" + str(self.bolt_bearing_capacity) + \") = \" \\\n + str(self.bolt_value)\n rstr += design_check_row(\"Bolt capacity (kN)\", req_field, prov_field, \"\")\n\n # No. of bolts\n # bolts = str(round(float(shear_force) / float(str(self.bolt_value)), 1))\n bolts_req_based_on_force = (math.ceil(float(shear_force) / self.bolt_value))\n if bolts_req_based_on_force > self.bolts_provided:\n remark = check_fail\n else:\n remark = check_pass\n # req_field = \"shear_force/ bolt_value = \" + str(shear_force) + \"/\" + str(self.bolt_value) + \" = \" \\\n req_field = str(shear_force) + \"/\" + str(self.bolt_value) + \" = \" \\\n + str(bolts_req_based_on_force)\n rstr += design_check_row(\"No. of bolts\", req_field, bolts_provided, remark)\n\n rstr += design_check_row(\"No. of columns\", \" \", number_of_cols, \" \")\n rstr += design_check_row(\"No. of row(s)\", \" ≤ 2\", number_of_rows, \" \")\n\n # Bolt pitch (mm)\n if self.pitch >= self.min_pitch and self.pitch <= self.max_spacing:\n remark = check_pass\n # req_field = \" ≥ 2.5*bolt_diameter ,<br> ≤ min(32*thickness_governing_min, 300) \"\n req_field = \"<br> ≥ 2.5* \" + bolt_diameter + \" = \" + str(self.min_pitch) + \",<br> ≤ min(32*\" + \\\n str(self.thickness_governing_min) + \", 300) = \" + str(self.max_spacing) + \"<br> [cl. 10.2.2] <br>\"\n prov_field = pitch\n elif self.pitch < self.min_pitch or self.pitch > self.max_spacing:\n if self.num_rows == 1:\n remark = \" \"\n req_field = \"N/A\"\n prov_field = \"N/A\"\n else:\n remark = check_fail\n # req_field = \" ≥ 2.5*bolt_diameter ,<br> ≤ min(32*thickness_governing_min, 300)\"\n req_field = \"<br> ≥ 2.5* \" + bolt_diameter + \" = \" + str(\n self.min_pitch) + \",<br> ≤ min(32*\" + \\\n str(self.thickness_governing_min) + \", 300) = \" + str(self.max_spacing) + \"<br> [cl. 10.2.2] <br>\"\n prov_field = pitch\n rstr += design_check_row(\"Bolt pitch (mm)\", req_field, prov_field, remark)\n\n # Bolt gauge (mm)\n if self.gauge >= self.min_gauge and self.gauge <= self.max_spacing:\n remark = check_pass\n elif self.gauge < self.min_gauge or self.gauge > self.max_spacing:\n remark = check_fail\n # req_field = \" ≥ 2.5*bolt_diameter ,<br> ≤ min(32*thickness_governing_min, 300)\"\n req_field = \"<br> ≥ 2.5*\" + bolt_diameter + \" = \" + str(self.min_gauge) + \",<br> ≤ min(32*\" + \\\n str(self.thickness_governing_min) + \", 300) = \" + str(self.max_spacing) + \"<br> [cl. 10.2.2] <br>\"\n rstr += design_check_row(\"Bolt gauge (mm)\", req_field, gauge, remark)\n\n # End distance (mm)\n if self.end_dist >= self.min_end_dist:\n remark = check_pass\n elif self.end_dist < self.min_end_dist:\n remark = check_fail\n # req_field = \" ≥\" + str(self.min_edge_multiplier) + \"*bolt_hole_diameter\" + \" [cl. 10.2.4.2]\"\n req_field = \"<br> ≥\" + str(self.min_edge_multiplier) + \"*\" + dia_hole + \" = \" + str(self.min_end_dist)\n rstr += design_check_row(\"End distance (mm)\", req_field, end, remark)\n\n # Edge distance (mm)\n if self.edge_dist >= self.min_edge_dist and self.edge_dist <= self.max_edge_dist:\n remark = check_pass\n elif self.edge_dist < self.min_edge_dist or self.edge_dist > self.max_edge_dist:\n remark = check_fail\n # req_field = \" ≥\" + str(self.min_edge_multiplier) + \"*bolt_hole_diameter,\"\n req_field = \" ≥\" + str(self.min_edge_multiplier) + \"*\" + dia_hole + \" = \" + str(self.min_edge_dist) + \" [cl. 10.2.4.2]<br>\"\n # Cl 10.2.4.3 if members are exposed to corrosive influences\n if is_environ_corrosive == \"Yes\":\n req_field += \"<br><br> As the members are exposed to corrosive influences: \"\n # req_field += \"<br> ≤ min(12*thickness_governing_min*sqrt(250/f_y),<br>\" + space(\n # 2) + \" 40+4*thickness_governing_min)\"\n req_field += \"<br> [Cl 10.2.4.3]\"\n req_field += \"<br> ≤ min(12*\" + str(self.thickness_governing_min) + \"*sqrt(250/\" \\\n + str(self.angle_fy) + \"), 40 + 4*\" + str(self.thickness_governing_min)\\\n + \") = \" + str(self.max_edge_dist)\n elif is_environ_corrosive == \"No\":\n # req_field += \"<br><br> ≤ 12*thickness_governing_min*sqrt(250/f_y)\"\n req_field += \"<br> ≤ 12*\" + str(self.thickness_governing_min) + \"sqrt(250/\" \\\n + str(self.angle_fy) + \") = \" + str(self.max_edge_dist) + \"[Cl 10.2.4.3]\"\n rstr += design_check_row(\"Edge distance (mm)\", req_field, edge, remark)\n\n # Seated angle\n rstr += design_check_row(\"Seated Angle \" + str(self.angle_sec), \"\", \"\", \"\", col_span=\"4\",\n text_one_css=\"detail\")\n\n # Seated angle length\n if connectivity == \"Column flange-Beam flange\":\n # req_field = \"= min(supported_beam_width,<br>\"+space(2)+\"supporting_column_width)\"\n req_field = \" <br> = min(\" + str(self.beam_w_f) + \", \" + str(self.column_w_f) + \")\"\n prov_field = str(self.angle_l)\n elif connectivity == \"Column web-Beam flange\":\n # limiting_angle_length = self.column_d - 2 * self.column_f_t - 2 * self.column_R1 - self.root_clearance_col\n # self.angle_l = int(math.ceil(min(self.beam_w_f, limiting_angle_length)))\n # req_field = \"= min(width of supported beam, <br>\" + space(2) + \\\n # \"column_depth - 2*column_flange_thickness<br>\" + space(2) +\\\n # \" - 2*column_R1 - root_clearance_col)\"\n req_field = \"<br> = min(\" + str(self.beam_w_f) \\\n + \", \" + str(self.column_d) + \" - 2*\" + str(self.column_f_t) \\\n + \" - 2*\" + str(self.column_R1) + \" - \" + str(self.root_clearance_col) + \")\"\n prov_field = str(self.angle_l)\n # As the seated angle length is a determined/calculated parameter, there is no design 'check' remark\n rstr += design_check_row(\"Length (mm)\", req_field, prov_field, \" \")\n\n # Length of outstanding leg\n if self.outstanding_leg_length_required < self.angle_B:\n remark = check_pass\n elif self.outstanding_leg_length_required > self.angle_B:\n remark = check_fail\n # req_field = \"b = (R*\" + sub(\"gamma\", \"m0\") + \"/(\" + sub(\"f\", \"yw\") +\\\n # \"*beam_web_thickness))<br>\" + space(2) + \"+ beam_column_clear_gap\"\n req_field = \"<br>[Cl. 8.7.4]\"\n req_field += \"<br> = (\" + str(self.shear_force) + \"*1000*\" + str(self.gamma_m0) + \"/(\" + str(self.beam_fy) \\\n + \"*\" + str(self.beam_w_t) + \")) + \" + str(self.detail_gap)\n prov_field = str(self.angle_B)\n rstr += design_check_row(\"Outstanding leg length (mm)\", req_field, prov_field, remark)\n\n # For angle thickness\n # Shear capacity of outstanding leg\n if self.outstanding_leg_shear_capacity > self.shear_force:\n remark = check_pass\n elif self.outstanding_leg_shear_capacity < self.shear_force:\n remark = check_fail\n req_field = sub(\"V\", \"dp\") + \" ≥ V <br>\"\n req_field += sub(\"V\", \"dp\") + \" ≥ \" + str(self.shear_force) + \"kN <br> [Cl. 8.4.1]\"\n # prov_field = sub(\"V\", \"dp\") + \"=\" + sub(\"A\", \"v\") + sub(\"f\", \"yw\") + \"/ (√ 3 *\" + sub(\"gamma\", \"m0\") + \")\"\n prov_field = \"<br>\" + space(1) + \"= (\" + str(self.angle_l) + \"*\" + str(self.angle_t)\\\n + \")*\" + str(self.angle_fy) + \"/ (√ 3 *\" + str(self.gamma_m0)\\\n + \")<br>\" + space(1) + \"= \" + str(self.outstanding_leg_shear_capacity)\n rstr += design_check_row(\"Shear capacity of outstanding leg (kN)\", req_field, prov_field,\n remark)\n\n # Moment capacity of outstanding leg\n if self.is_shear_high == False:\n req_field = \"As V ≤ 0.6 \" + sub(\"V\", \"d\")\n req_field += \",<br>[Cl 8.2.1.2] is applicable <br>\"\n req_field += sub(\"M\", \"d\") + \" ≥ Moment at root of angle\"\n req_field += \"<br>\" + sub(\"M\", \"d\") + \" ≥ \" + str(self.moment_at_root_angle)\n prov_field = sub(\"M\", \"d\") + \" = min(\" + sub(\"beta\", \"b\") + sub(\"Z\", \"e\") + sub(\"f\", \"y\")\n prov_field += \"/\" + sub(\"gamma\", \"m0\") + \", <br>\" + space(1) +\\\n \" 1.5\" + sub(\"Z\", \"e\") + sub(\"f\",\"y\") + \"/\" + sub(\"gamma\", \"m0\") + \")\"\n prov_field += \"<br>\" + space(1) + \" = min(1.0* \" + str(self.angle_l) + \"*(\" + str(self.angle_t) + \"^2/6)*\"\n prov_field += str(self.angle_fy) + \"/\" + str(self.gamma_m0) + \",<br>\" + space(2) \\\n + \" 1.5*\" + str(self.angle_l) + \"*(\" + str(self.angle_t) + \"^2/6)*\"\n prov_field += str(self.angle_fy) + \"/\" + str(self.gamma_m0) + \")\"\n prov_field += \"<br>\" + space(1) + \"= \" + str(self.moment_capacity_angle)\n\n elif self.is_shear_high == True:\n req_field = \"As V ≥ 0.6 \" + sub(\"V\", \"d\")\n req_field += \",<br>[Cl 8.2.1.3] is applicable\"\n req_field += \"<br>\" + sub(\"M\", \"dv\") + \" ≥ Moment at root of angle\"\n req_field += \"<br>\" + sub(\"M\", \"dv\") + \" ≥ \" + str(self.moment_at_root_angle) + \"<br>\"\n prov_field = sub(\"M\", \"dv\") + \"= min((1 - beta)\" + sub(\"M\", \"d\") + \" , \"\n prov_field += \"1.2 \" + sub(\"Z\", \"e\") + sub(\"f\", \"y\") + \"/\" + sub(\"gamma\", \"m0\") + \") <br>\"\n prov_field += space(1) + \"where, <br>\" + space(2) + \"beta = ((2V/\" + sub(\"V\", \"d\")\\\n + \")-1)^2 = \" + str(round(self.moment_high_shear_beta, 4)) + \"<br>\"\n prov_field += \"<br>\" + sub(\"M\", \"dv\") + \" = \" + \"min((1 - \" + str(round(self.moment_high_shear_beta, 4))\\\n + \")<br>\" + space(1) + \"*1.0*(\" + str(self.angle_l) + \"*\" + str(self.angle_t) + \"^2/6)*\"\n prov_field += str(self.angle_fy) + \"/\" + str(self.gamma_m0) + \" , \"\n prov_field += \"<br>\" + space(1) + \"1.2*(\" + str(self.angle_l) + \"*\" + str(self.angle_t) + \"^2/6)*\"\n prov_field += str(self.angle_fy) + \"/\" + str(self.gamma_m0) + \")\"\n prov_field += \"<br>\" + space(1) + \" = \" + str(self.moment_capacity_angle)\n\n if self.moment_capacity_angle > self.moment_at_root_angle:\n remark = check_pass\n elif self.moment_capacity_angle < self.moment_at_root_angle:\n remark = check_fail\n rstr += design_check_row(\"Moment capacity of outstanding leg (kN-mm)\", req_field,\n prov_field, remark)\n\n # Top angle\n rstr += design_check_row(\"Top Angle\", \"\", \"\", \"\", col_span=\"4\", text_one_css=\"detail\")\n req_field = \"Recommended size (based on stability only): \" + str(self.top_angle_recommended)\n prov_field = \"User selected size: \" + str(self.top_angle)\n rstr += design_check_row(\"Section \", req_field, prov_field, \" \")\n\n # End distance (mm)\n if self.top_angle_end_dist_beam <= self.min_end_dist or \\\n self.top_angle_end_dist_column <= self.min_end_dist:\n remark = check_fail\n else:\n remark = check_pass\n req_field = \" ≥\" + str(self.min_edge_multiplier) + \"*bolt_hole_diameter\" + \" [cl. 10.2.4.2]\"\n req_field += \"<br> ≥\" + str(self.min_edge_multiplier) + \"*\" + dia_hole + \" = \" + str(self.min_end_dist)\n prov_field = \" on leg connected to Beam: \" + str(self.top_angle_end_dist_beam)\n prov_field += \"<br> on leg connected to Column: \" + str(self.top_angle_end_dist_column)\n rstr += design_check_row(\"End distance (mm)\", req_field, prov_field, remark)\n\n\n rstr += t('/table')\n rstr += t('h1 style=\"page-break-before:always\"')\n rstr += t('/h1')\n\n # -----------------------------------------------------------------------------------\n rstr += self.design_report_header()\n # -----------------------------------------------------------------------------------\n\n # Connection images (views)\n rstr += t('table width = 100% border-collapse= \"collapse\" border=\"1px solid black\"')\n\n # row = [0, \"Views\", \" \"]\n # rstr += t('tr')\n # rstr += t('td colspan=\"2\" class=\" detail\"') + space(row[0]) + row[1] + t('/td')\n # rstr += t('/tr')\n rstr += design_summary_row(0, \"Views\", \"detail\", col_span=\"2\")\n\n if self.safe is True:\n png = folder + \"/images_html/3D_Model.png\"\n datapng = '<object type=\"image/PNG\" data= %s width =\"450\"></object\">' % png\n\n side = folder + \"/images_html/seatSide.png\"\n dataside = '<object type=\"image/PNG\" data= %s width =\"400\"></object>' % side\n\n top = folder + \"/images_html/seatTop.png\"\n datatop = '<object type=\"image/PNG\" data= %s width =\"400\"></object>' % top\n\n front = folder + \"/images_html/seatFront.png\"\n datafront = '<object type=\"image/PNG\" data= %s width =\"450\"></object>' % front\n\n row = [0, datapng, datatop]\n rstr += t('tr') + nl()\n rstr += html_space(4) + t('td align=\"center\" class=\" header2\"') + space(row[0]) + row[1] + t('/td') + nl()\n rstr += html_space(4) + t('td align=\"center\" class=\" header2\"') + row[2] + t('/td') + nl()\n rstr += t('/tr' + nl())\n\n row = [0, dataside, datafront]\n rstr += t('tr') + nl()\n rstr += html_space(4) + t('td align=\"center\" class=\" header2\"') + space(row[0]) + row[1] + t('/td') + nl()\n rstr += html_space(4) + t('td align=\"center\" class=\" header2 \"') + row[2] + t('/td') + nl()\n rstr += t('/tr') + nl()\n\n else:\n pass\n\n rstr += t('/table') + nl() + \" \" + nl()\n rstr += t('h1 style=\"page-break-before:always\"')\n rstr += t('/h1')\n\n # -----------------------------------------------------------------------------------\n rstr += self.design_report_header()\n # -----------------------------------------------------------------------------------\n\n rstr += t('hr')\n rstr += t('/hr') + nl() + \" \" + nl()\n\n rstr += t('table width = 100% border-collapse= \"collapse\" border=\"1px solid black\"') + nl()\n rstr += html_space(1) + t('''col width=30%''')\n rstr += html_space(1) + t('''col width=70%''') + nl()\n\n rstr += html_space(1) + t('tr') + nl()\n row = [0, \"Additional Comments\", additional_comments]\n rstr += html_space(2) + t('td class= \"detail1\"') + space(row[0]) + row[1] + t('/td') + nl()\n rstr += html_space(2) + t('td class= \"detail2\" align=\"justified\"') + row[2] + t('/td') + nl()\n rstr += html_space(1) + t('/tr') + nl()\n\n rstr += t('/table') + nl()\n\n myfile.write(rstr)\n myfile.write(t('/body'))\n myfile.write(t('/html'))\n myfile.close()",
"def save_html_files(dir_out, htmls, filenames):\n html_directory = os.path.join(dir_out, \"html\")\n for html, filename in zip(htmls, filenames):\n export_path = os.path.join(html_directory, filename + \".html\")\n with codecs.open(export_path, \"w\", encoding=\"utf-8\") as invoice_file:\n invoice_file.writelines(html)",
"def write_project_file(html_files, options):\n if not options.default_topic:\n options.default_topic = html_files[0]\n\n settings = {\n 'chm_file': options.chm_file,\n 'hhc_file': options.hhc_file,\n 'default_topic': options.default_topic,\n 'display_compile_progress': ['Yes','No'][not(options.verbose)],\n 'full_text_search_on': ['Yes','No'][not(options.full_text_search)],\n 'language': LANGUAGES[options.language_code.lower()],\n 'title': options.title,\n 'files': '\\n'.join(html_files),\n }\n\n status('creating project file (%s...) ' % options.hhp_file, options)\n\n f = open(options.hhp_file,'w')\n print >> f, HHP_TEMPLATE % settings\n f.close()\n\n status('OK\\n', options)",
"def write_contents_file(filenames, options):\n contents = []\n status('creating contents file (%s...) \\n' % options.hhc_file, options)\n\n for i in range(len(filenames)):\n filename = filenames[i]\n\n status('* %s (%d of %d)... ' % (filename, i+1, len(filenames)), options)\n\n # this should really be relative\n html_filename = txt2htmlfilename(filename)\n writer=Writer()\n writer.section_filename = html_filename\n\n pub = docutils.core.Publisher()\n pub.set_reader('standalone', None, 'restructuredtext')\n pub.writer = writer\n settings = pub.get_settings(output_encoding='iso-8859-1')\n pub.settings._destination = ''\n pub.source = docutils.io.FileInput(source_path=filename, \n encoding=settings.input_encoding)\n pub.destination = docutils.io.StringOutput(\n encoding=settings.output_encoding)\n pub.document = pub.reader.read(pub.source, pub.parser, pub.settings)\n pub.apply_transforms()\n output = pub.writer.write(pub.document, pub.destination)\n pub.writer.assemble_parts()\n contents.append(output)\n\n status('OK\\n', options)\n\n f = open(options.hhc_file,'w')\n print >> f, HHC_HEADER + ''.join(contents) + HHC_FOOTER\n f.close()",
"def write_to_html_file(self, data: str):\n try:\n os.mkdir(\"../\" + self.uri)\n except FileExistsError:\n pass\n\n f = open(\"../\" + self.uri + self.file_name, \"w\")\n f.write(data)\n print(\"[WRITE] written to .html file\")\n f.close()",
"def write_method_doc(file_name, entries):\r\n\r\n with open(file_name, 'w', newline='\\n') as f:\r\n f.write('<table border=\"0\">')\r\n f.write('<tr><td><b>Method</b></td><td><b>Description</b></td></tr>')\r\n for items in sorted(entries, key=itemgetter(3)):\r\n f.write('<tr><td valign=\"top\">%s</td><td>%s</td></tr>' %\r\n (items[3], doc_to_html(items[4])))\r\n f.write('</table>')",
"def make_html_table(filelist):\n pre=open(\"HTML_Header.txt\").read()\n out=[]\n for file in filelist:\n x=load_file(file)[1]\n out.append(\"<tr>\")\n out.append(\"<th>{}</th>\".format(x[0]))\n out.append(\"<th>{}</th>\".format(x[2]))\n out.append(\"<th>{}</th>\".format(x[1]))\n out.append(\"<th>{}</th>\".format(x[6]))\n out.append(\"<th>{}</th>\".format(x[7]))\n out.append(\"<th>{}</th>\".format(x[8]))\n out.append(\"<th>{}</th>\".format(x[9]))\n out.append(\"<th>{}</th>\".format(x[12]))\n out.append(\"<th>link</th>\")\n out.append(\"</tr>\")\n out.append(\"</table>\")\n \n for i in range(0,len(out)):\n pre=pre+out[i]+\"\\n\"\n \n path=os.getcwd()\n os.chdir(\"Ausgabe\")\n open(\"table.html\",\"w\").write(pre)\n os.chdir(path)",
"def to_file(self, html_file: str = None) -> None:\n if not html_file:\n html_file = f\"{self.id}.html\"\n\n with open(html_file, \"w\") as f:\n f.write(self.soup.html)",
"def write_properties(props):\n root = Element('{%s}coreProperties' % COREPROPS_NS)\n for attr in (\"creator\", \"title\", \"description\", \"subject\", \"identifier\",\n \"language\"):\n SubElement(root, '{%s}%s' % (DCORE_NS, attr)).text = getattr(props, attr)\n\n for attr in (\"created\", \"modified\"):\n value = datetime_to_W3CDTF(getattr(props, attr))\n SubElement(root, '{%s}%s' % (DCTERMS_NS, attr),\n {'{%s}type' % XSI_NS:'%s:W3CDTF' % DCTERMS_PREFIX}).text = value\n\n for attr in (\"lastModifiedBy\", \"category\", \"contentStatus\", \"version\",\n \"revision\", \"keywords\"):\n SubElement(root, '{%s}%s' % (COREPROPS_NS, attr)).text = getattr(props, attr)\n\n if props.lastPrinted is not None:\n SubElement(root, \"{%s}lastPrinted\" % COREPROPS_NS).text = datetime_to_W3CDTF(props.lastPrinted\n )\n return tostring(root)",
"def writeHtmlFile(nodes, functionName, filename, errorsOnly):\n fout = open(filename, 'w')\n fout.write('<html>\\n')\n fout.write('<head>\\n')\n fout.write(' <style type=\"text/css\">\\n')\n fout.write(' body { font-size: 0.8em }\\n')\n fout.write(' th { background-color: #A3C159; text-transform: uppercase }\\n')\n fout.write(' td { background-color: white; vertical-align: text-top }\\n')\n fout.write(' pre { background-color: #EEEEEE }\\n')\n fout.write(' </style>\\n')\n fout.write('</head>\\n')\n fout.write('<body>\\n')\n\n fout.write('<a href=\"index.htm\">Home</a> -- ')\n if errorsOnly:\n fout.write('<a href=\"all-'+functionName+'.htm\">All test cases</a>')\n else:\n fout.write('<a href=\"errors-'+functionName+'.htm\">Error test cases</a>')\n fout.write('<br><br>')\n\n testclass = None\n num = 0\n for node in nodes:\n if errorsOnly and node['expected']=='':\n continue\n if trimname(node['functionName']) == functionName:\n num = num + 1\n\n if not testclass:\n testclass = node['testclass']\n fout.write('<h1>' + node['testclass'] + '::' + functionName + '</h1>')\n fout.write('<table border=\"0\" cellspacing=\"0\">\\n')\n fout.write(' <tr><th>Nr</th><th>Code</th><th>Expected</th></tr>\\n')\n\n fout.write(' <tr><td>' + str(num) + '</td>')\n fout.write('<td><pre>' + strtoxml(node['code']).replace('\\\\n', '\\n') + '</pre></td>')\n fout.write('<td>' + strtoxml(node['expected']).replace('\\\\n', '<br>') + '</td>')\n fout.write('</tr>\\n')\n\n if testclass != None:\n fout.write('</table>\\n');\n fout.write('</body></html>\\n')\n fout.close()",
"def makeHTMLIndexFile(self):\n part1 = \"\"\"<html>\n <body>\n <title>Index</title>\n <div id=\"pg_body\">\n <div id=\"testSuitesTitle\">TestSuites</div>\n <div id=\"resultsTitle\">Results</div>\n <div id=\"testSuites\">\n \"\"\"\n part2 = self.makeLinks()\n part3 = \"\"\"</div>\n <div id=\"results\">\n <iframe id=\"loadHTMLResults\" name=\"loadHTMLResults\" frameborder=\"0\" src=\"statistics.html\" style=\"height:100%;width:100%;\"></iframe>\n </div>\n <div id=\"footer\">Test Engineer(s) :\"\"\"+testEngineers+\"\"\"</div>\n </div>\n </body>\n </html>\n\t\t<style>\n\t\tbody{\n margin:0;\n }\n\t\t#pg_body{\n\t\twidth=100%;\n\t\ttext-align:center;\n\t\t}\n\t\t#testSuitesTitle{\n\t\twidth:25%;\n\t\tfloat:left;\n\t\tbackground-color:#6495ED;\n\t\tfont-weight:bold;\n\t\tcolor:white;\n\t\t}\n\t\t#resultsTitle{\n\t\twidth:75%;\n\t\tfloat:right;\n\t\tbackground-color:#6495ED;\n\t\tfont-weight:bold;\n\t\tcolor:white;\n\t\t}\n\t\t#testSuites{\n\t\twidth:25%;\n\t\tfloat:left;\n\t\tbackground-color:lightgrey;\n\t\tfont-weight:bold;\n\t\ttext-align:left;\n\t\theight:94%;\n\t\toverflow:scroll;\n\t\t}\n\t\t#results{\n\t\twidth:75%;\n\t\tbackground-color:white;\n\t\tfloat:right;\n\t\ttext-align:left;\n\t\theight:94%;\n\t\toverflow:scroll;\n\t\t}\n\t\t#footer{\n\t\twidth:100%;\n\t\ttext-align:left;\n\t\tcolor:lightgrey;\n\t\tbackground-color:#6495ED;\n\t\t}\n\t\t</style>\n \"\"\"\n \n page = (part1+part2+part3)\n f = open(self.dir+'/index.html','w')\n f.write(page)\n f.close",
"def docGenerator(docRequirements, docFilePath):\n\tamount = int(docRequirements[0])\n\tsize = docRequirements[1]\n\tunit = docRequirements[2].lower()\n\tif not(isValidUnit(unit)):\n\t\tprint \"Unit is incorrect.\"\n\t\treturn\n\tprint \"Creating %s files, each %s%s in size...\" % (amount, size, unit)\n\troundDown = int(float(size))\n\tfilename = fileToUse(roundDown, unit)\n\tnumOfWrites = calcNumOfWrites(roundDown, filename, unit)\n\tfor i in range(0, amount):\n\t\tfor j in range(0, numOfWrites):\n\t\t\twith open(filename) as base:\n\t\t\t\twith open(docFilePath+\"file_%03d.txt\" % i, \"a\") as output:\n\t\t\t\t\toutput.write(base.read())\n\t\tconvertedSize = convertFromBytes(int(os.path.getsize(output.name)), unit)\n\t\tprint \"Created file %s of %s%s size.\" % (output.name, convertedSize, unit)\n\tprint \"Generated %s %s%s files locally.\" % (amount, size, unit)\n\tbase.close()\n\tpushDocsFromDir(docFilePath)",
"def collect_html(args):\n url_list = args.url_list\n output_dir = args.output_dir\n\n print(url_list)\n\n # do some checks\n try: \n assert os.path.exists(url_list), 'url_list must exist'\n assert os.path.exists(output_dir), 'output_dir must exist'\n except AssertionError as err: \n logger.error('Failed check: {}'.format(err)) \n return \n\n urls = common.read_file(url_list)\n \n for url in urls: \n logger.debug(url) \n\n html = spy_tools.collect_html(url)\n out = url.split('/')\n output = os.path.join(output_dir, out[-1] + '.html')\n common.write_file(html, output)",
"def generate(src_file_names,\r\n dst_file_name,\r\n dst_doc_file_name,\r\n dst_property_doc_file_name,\r\n name):\r\n methods = []\r\n properties = []\r\n extra_includes = []\r\n entries = (methods, properties)\r\n for src_file_name in src_file_names:\r\n check_file(src_file_name)\r\n m, p = parse_file(src_file_name)\r\n methods.extend(m)\r\n properties.extend(p)\r\n\r\n extra_includes.extend(find_extra_include(src_file_name))\r\n if len(entries[0]) == 0 and len(entries[1]) == 0:\r\n print(\"No entries found in %s.\" % src_file_name)\r\n exit(1)\r\n\r\n write_result(dst_file_name, name, entries, extra_includes, src_file_names)\r\n write_method_doc(dst_doc_file_name, entries[0])\r\n write_property_doc(dst_property_doc_file_name, entries[1])",
"def write_index_html(wk_dir,region_dict,metrics_filename,ext=\"png\"):\n # Make lists of the metrics and figure files to display\n metrics_dir = os.path.join(wk_dir,metrics_dir_name)\n metric_list = sorted([\n f for f in os.listdir(metrics_dir) if f.endswith('_summary.csv')])\n plot_list=[]\n fig_list=sorted([f for f in os.listdir(wk_dir+'/'+figure_dir_name)])\n for keyword in ['lag','correlations','twodpdf']:\n plot_list.append([f for f in fig_list if (keyword in f)]) # sort datasets\n subtitle_list=['Autocorrelation','2D Histograms','Correlation maps']\n\n # Start working on html text. Each line is appened to a list that\n # is then written to file.\n html_file=['<html>\\n',\n '<body>','<head><title>ASoP-Coherence</title></head>\\n',\n '<br><h1>ASoP-Coherence results</h1>\\n','<h2>Contents</h2>\\n',\n '<dl>\\n','<dt><a href=\"#Metrics\">Metrics</a></dt>\\n',\n '<dt><a href=\"#Figures\">Figures</a></dt>\\n',\n '<dd><a href=\"#Autocorrelation\">Autocorrelation</a></dd>\\n',\n '<dd><a href=\"#2D-Histograms\">2D Histograms</a></dd>\\n',\n '<dd><a href=\"#Correlation-maps\">Correlation Maps</a></dd>\\n',\n '</dl>\\n''<section id=\"Metrics\">\\n','<br><h2>Metrics</h2>\\n']\n html_file.append('<h3>Intermittency Metrics</h3>\\n')\n\n # Display metrics JSON in dashboard option\n metrics_json = os.path.basename(metrics_filename)\n metrics_relocated = os.path.join(metrics_dir_name,metrics_json)\n tmp='<p><a href=\"'+metrics_relocated+'\" target=\"_blank\">'+metrics_json+'</a></p>\\n'\n html_file.append(tmp)\n\n # Link CSV tables for download\n html_file.append('<h3>Tables</h3>\\n')\n for metric_file in metric_list:\n metric_path = os.path.join(metrics_dir_name,metric_file)\n html_file.append('<p><a href=\"{0}\">{1}</a></p>\\n'.format(metric_path,metric_file))\n html_file.append('<br>\\n')\n html_file.append('</section>\\n')\n\n # Add figures\n html_file.append('<section id=\"Figures\">\\n')\n html_file.append('<h2>Figures</h2>\\n')\n for title,category in zip(subtitle_list,plot_list):\n html_file.append('<section id='+title.replace(' ','-')+'>\\n')\n html_file.append('<h3>{0}</h3>\\n'.format(title))\n # Adjust figure width for autocorrelation\n fwidth = \"647\"\n if title==\"Autocorrelation\":\n fwidth=\"450\"\n for region in region_dict:\n html_file.append('<h4>{0}</h4>\\n'.format(region.replace('_',' ')))\n region_fig = [f for f in category if (region.replace(\" \",\"_\") in f)]\n for fig in region_fig:\n tmp = '<p><a href=\"{0}\" target=\"_blank\" alt={0}>' + \\\n '<img src=\"{0}\" width={1} alt=\"{0}\"></a></p>\\n'\n html_file.append(\n tmp.format(os.path.join(figure_dir_name,fig),fwidth))\n html_file.append('</section>\\n')\n html_file.append('</section>\\n')\n\n html_file.append('</body>\\n</html>\\n')\n filename=wk_dir+'/index.html'\n with open(filename,'w') as html_page:\n html_page.writelines(html_file)",
"def write_reference_pages(printfile: Optional[TextIO], do_print: bool, reflist: list, refdict: dict,\n citelist: list, name_table: dict, point_locations: dict) -> None:\n # for ref in tqdm(reflist):\n for ref in reflist:\n if ref.cite_key != \"<pending>\":\n if do_print:\n write_reference_page(printfile, do_print, ref, citelist, refdict, name_table, point_locations)\n else:\n with open(WEBOUT_PATH + \"references/\" + ref.cite_key + \".html\", \"w\", encoding=\"utf-8\") as outfile:\n write_reference_page(outfile, do_print, ref, citelist, refdict, name_table, point_locations)",
"def write_output(directory, name, html):\n if not os.path.isdir(directory):\n os.mkdir(directory)\n with open(os.path.join(directory, '.'.join((name, 'html'))), 'w') as f:\n f.write(beautify(html))",
"def save(self):\n f=open(\"{}/{}.html\".format(self.path,self.name),\"w\")\n f.write(\"<html>\\n <head>\\n\")\n for c in self.css:\n f.write(\" <link rel=\\\"Stylesheet\\\" href=\\\"{}\\\" />\\n\".format(c))\n f.write(\" </head>\\n</body>\\n\")\n for line in self.template.split(\"\\n\"):\n f.write(\" {}\\n\".format(line))\n f.write(\" </body>\\n</html>\")\n f.close()"
] | [
"0.587501",
"0.5714326",
"0.55795753",
"0.55168897",
"0.5469178",
"0.54641604",
"0.5401127",
"0.5393503",
"0.538135",
"0.5371805",
"0.53659755",
"0.53104347",
"0.52841675",
"0.5282833",
"0.5242343",
"0.51993394",
"0.5180886",
"0.51743466",
"0.51662326",
"0.510156",
"0.50791055",
"0.50417876",
"0.50383294",
"0.5017193",
"0.50149155",
"0.49858952",
"0.49795738",
"0.49697083",
"0.49648133",
"0.49563548"
] | 0.7246242 | 0 |
Generate the Python methoddef header and html documentation for the c++file indicated by src_file_name, by locating "special" Ccomments. The header is saved to dst_file_name and the html documentation to dst_doc_file_name. The name is used for the PyMethodDef and PyGetSetDef. | def generate(src_file_names,
dst_file_name,
dst_doc_file_name,
dst_property_doc_file_name,
name):
methods = []
properties = []
extra_includes = []
entries = (methods, properties)
for src_file_name in src_file_names:
check_file(src_file_name)
m, p = parse_file(src_file_name)
methods.extend(m)
properties.extend(p)
extra_includes.extend(find_extra_include(src_file_name))
if len(entries[0]) == 0 and len(entries[1]) == 0:
print("No entries found in %s." % src_file_name)
exit(1)
write_result(dst_file_name, name, entries, extra_includes, src_file_names)
write_method_doc(dst_doc_file_name, entries[0])
write_property_doc(dst_property_doc_file_name, entries[1]) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def generate_headers(src_files, out_root, doc_root):\r\n\r\n if not os.path.exists(out_root):\r\n os.makedirs(out_root)\r\n did_print_heading = False\r\n changed = False\r\n for (name, files) in src_files:\r\n if files.__class__ == str:\r\n src = files\r\n files = (src,)\r\n else:\r\n src = files[0]\r\n\r\n dst = src.replace(\".hh\", \"-method-def.hh\")\r\n dst = dst.replace(\".cpp\", \"-method-def.hh\")\r\n dst = os.path.join(out_root, os.path.split(dst)[1])\r\n\r\n dst_doc = src.replace(\".hh\", '-methods.txt')\r\n dst_doc = dst_doc.replace(\".cpp\", '-methods.txt')\r\n dst_doc_filename = os.path.split(dst_doc)[1]\r\n dst_doc_filename = os.path.join(doc_root, dst_doc_filename)\r\n\r\n dst_prop_doc = src.replace(\".cpp\", '-properties.txt')\r\n dst_doc_prop_filename = os.path.split(dst_prop_doc)[1]\r\n dst_doc_prop_filename = os.path.join(doc_root, dst_doc_prop_filename)\r\n\r\n if util.changed(src, dst):\r\n if not did_print_heading:\r\n print(\"* Generating Python method definitions.\")\r\n did_print_heading = True\r\n generate(files, dst, dst_doc_filename, dst_doc_prop_filename, name)\r\n changed = True\r\n if not changed:\r\n print(\"* Python method definitions up to date.\")",
"def rewriteDocstringForPython (docstring):\n\n # Take out the C++ comment start and end.\n\n docstring = docstring.replace('/**', '').replace('*/', '')\n p = re.compile('^(\\s*)\\*([ \\t]*)', re.MULTILINE)\n docstring = p.sub(r'\\2', docstring)\n\n # Rewrite some of the data type references to equivalent Python types.\n # (Note: this rewriting affects only the documentation comments inside\n # classes & methods, not the method signatures.)\n\n docstring = docstring.replace(r'const char *', 'string ')\n docstring = docstring.replace(r'const char* ', 'string ')\n docstring = docstring.replace(r'an unsigned int', 'a long integer')\n docstring = docstring.replace(r'unsigned int', 'long')\n docstring = docstring.replace(r'const std::string&', 'string')\n docstring = docstring.replace(r'const std::string', 'string')\n docstring = docstring.replace(r'std::string', 'string')\n docstring = docstring.replace(r'NULL', 'None')\n docstring = docstring.replace(r'@c true', '@c True')\n docstring = docstring.replace(r'@c false', '@c False')\n\n # Also use Python syntax instead of \"const XMLNode*\" etc.\n\n p = re.compile(r'const (%?)(' + r') ?(\\*|&)', re.DOTALL)\n docstring = p.sub(rewriteClassRef, docstring) \n p = re.compile(r'(%?)(' + r') ?(\\*|&)', re.DOTALL)\n docstring = p.sub(rewriteClassRef, docstring) \n\n # Need to escape the quotation marks:\n\n docstring = docstring.replace('\"', \"'\")\n docstring = docstring.replace(r\"'\", r\"\\'\")\n\n # Python method cross-references won't be made by doxygen unless\n # the method reference is written without arguments.\n\n p = re.compile('(\\s+)(\\S+?)::(\\w+\\s*)(\\([^)]*?\\))', re.MULTILINE)\n docstring = p.sub(translatePythonCrossRef, docstring)\n p = re.compile('(@see\\s+)(\\w+\\s*)(\\([^)]*?\\))')\n docstring = p.sub(translatePythonSeeRef, docstring)\n\n # Friggin' doxygen escapes HTML character codes, so the hack we have to\n # do for Javadoc turns out doesn't work for the Python documentation.\n # Kluge around it.\n\n docstring = re.sub(r'\\\\f\\$\\\\geq\\\\f\\$', '>=', docstring)\n docstring = re.sub(r'\\\\f\\$\\\\leq\\\\f\\$', '<=', docstring)\n docstring = re.sub(r'\\\\f\\$\\\\times\\\\f\\$', '*', docstring)\n\n # SWIG does some bizarre truncation of leading characters that\n # happens to hit us because of how we have to format verbatim's.\n # This tries to kluge around it: \n p = re.compile('@verbatim.+?@endverbatim', re.DOTALL)\n docstring = p.sub(indentVerbatimForPython, docstring)\n\n return docstring",
"def make_c_header(self):\n res = \\\n\"\"\"PyThreadState* ___madz_LANG_python_thread_state; //Holds Thread State for this interpreter\nPyObject *___madz_LANG_python_wrapper_module; //Hold Pointer to the _madz.py file representing this plugin\ntypedef struct{{\n{function_pointers}\n}}___madz_LANG_python_TYPE_;\n___madz_LANG_python_TYPE_ ___madz_LANG_python_OUTPUT;\nvoid ___madz_init_imports();\n{fn_dec}\n\n\"\"\"\n c_gen = c_wrapgen.CGenerator([],\"\", self.description)\n #TODO function_pointers, all same except\n fragments ={\"fn_dec\" : \"\", \"function_pointers\" : \"\"}\n fn = \"\"\"{rettype}{fnname}({args});\\n\"\"\"\n pointer = \"\"\" {prettype} (*{nodename})({args});\\n\"\"\"\n for node in self.description.definitions():\n if isinstance(node.type.get_type(), pdl.TypeFunction):\n frg = {\n \"prettype\": c_gen.gen_type_string(\"\", node.type.return_type),\n \"rettype\": c_gen.gen_type_string(\"\", node.type.return_type),\n \"fnname\": \"___madz_LANG_python_FN_\" + node.name,\n \"nodename\": node.name,\n \"args\": \",\".join(map(\n lambda a: c_gen.gen_type_string(a.name, a.type),\n node.type.args)),\n\n }\n fragments[\"fn_dec\"] += fn.format(**frg)\n fragments[\"function_pointers\"] += pointer.format(**frg)\n if fragments[\"function_pointers\"] == \"\":\n fragments[\"function_pointers\"] = \"uint8_t _madz_empty;\"\n return res.format(**fragments)",
"def rewriteDocstringForCSharp (docstring):\n\n # Preliminary: rewrite some of the data type references to equivalent\n # C# types. (Note: this rewriting affects only the documentation\n # comments inside classes & methods, not the actual method signatures.)\n\n docstring = docstring.replace(r'const char *', 'string ')\n docstring = docstring.replace(r'const char* ', 'string ')\n docstring = docstring.replace(r'an unsigned int', 'a long integer')\n docstring = docstring.replace(r'unsigned int', 'long')\n docstring = docstring.replace(r'const std::string&', 'string')\n docstring = docstring.replace(r'const std::string &', 'string ')\n docstring = docstring.replace(r'const std::string', 'string')\n docstring = docstring.replace(r'std::string', 'string')\n docstring = docstring.replace(r'const ', '')\n docstring = docstring.replace(r'NULL', 'null')\n docstring = docstring.replace(r'boolean', 'bool')\n\n # Use C# syntax instead of \"const XMLNode*\" etc.\n\n p = re.compile(r'const (%?)(' + r')( ?)(\\*|&)', re.DOTALL)\n docstring = p.sub(rewriteClassRefAddingSpace, docstring) \n p = re.compile(r'(%?)(' + r')( ?)(\\*|&)', re.DOTALL)\n docstring = p.sub(rewriteClassRefAddingSpace, docstring) \n\n # <code> has its own special meaning in C#; we have to turn our input\n # file's uses of <code> into <c>. Conversely, we have to turn our\n # uses of verbatim to <code>.\n\n p = re.compile(r'<code>(.+?)</code>', re.DOTALL)\n docstring = p.sub(r'<c>\\1</c>', docstring)\n p = re.compile('@verbatim(.+?)@endverbatim', re.DOTALL)\n docstring = p.sub(r'<code>\\1</code>', docstring)\n\n # Do replacements on some documentation text we sometimes use.\n\n p = re.compile(r'antimonyConstants([@.])')\n docstring = p.sub(r'antimonycs.antimony\\1', docstring)\n\n # Fix @link for constants that we forgot conditionalize in the source.\n\n p = re.compile(r'@link +([A-Z_0-9]+?)@endlink', re.DOTALL)\n docstring = p.sub(r'@link antimony.\\1@endlink', docstring)\n\n # Can't use math symbols. Kluge around it.\n\n docstring = re.sub(r'\\\\f\\$\\\\geq\\\\f\\$', '>=', docstring)\n docstring = re.sub(r'\\\\f\\$\\\\leq\\\\f\\$', '<=', docstring)\n docstring = re.sub(r'\\\\f\\$\\\\times\\\\f\\$', '*', docstring)\n\n # Some additional special cases.\n\n docstring = docstring.replace(r'SBML_formulaToString()', 'antimonycs.antimony.formulaToString()')\n docstring = docstring.replace(r'SBML_parseFormula()', 'antimonycs.antimony.parseFormula()')\n\n # Need to escape the quotation marks:\n\n docstring = docstring.replace('\"', \"'\")\n docstring = docstring.replace(r\"'\", r\"\\'\") \n\n return docstring",
"def to_PyMethodDef(name, entries, extra_includes):\r\n\r\n methodEntries = [to_PyMethodDef_entry(items) for items in entries]\r\n if name is not None:\r\n methodDef = ('static PyMethodDef %s_methods[] = {\\n ' % name +\r\n ',\\n '.join(methodEntries) + ',\\n ')\r\n else:\r\n methodDef = ',\\n'.join(methodEntries) + ',\\n '\r\n\r\n for include in extra_includes:\r\n methodDef += '#include \"%s\"\\n' % include\r\n\r\n if name is not None:\r\n methodDef += '{nullptr,nullptr,0,nullptr} // Sentinel\\n};'\r\n return methodDef",
"def get_doc(filename: str) -> str:\n\n # Create the header.\n doc = \"# `\" + filename.split(\"/\")[-1] + \"`\\n\\n\"\n\n lines: List[str] = Path(filename).read_text().split(\"\\n\")\n\n for i in range(len(lines)):\n # Create a class description.\n if lines[i].startswith(\"class\"):\n # Skip private classes.\n match = re.search(\"class _(.*):\", lines[i])\n if match is not None:\n continue\n # Add the name of the class\n class_name = re.search(\"class (.*):\", lines[i]).group(1)\n doc += f\"## `{class_name}`\\n\\n\"\n # Add an example.\n class_example = f\"`from tdw.{filename[:-3].replace('/', '.')} import \" + re.sub(r\"(.*)\\((.*)\\)\", r'\\1',\n class_name) + \"`\"\n doc += class_example + \"\\n\\n\"\n doc += PyDocGen.get_class_description(lines, i)\n # Parse an enum.\n if re.search(r\"class (.*)\\(Enum\\):\", lines[i]) is not None:\n doc += \"\\n\\n\" + PyDocGen.get_enum_values(lines, i)\n doc += \"\\n\\n***\\n\\n\"\n # Create a function description.\n elif lines[i].strip().startswith(\"def\"):\n # Skip private functions.\n match = re.search(\"def _(.*)\", lines[i])\n if match is not None and \"__init__\" not in lines[i]:\n continue\n # Append the function description.\n doc += PyDocGen.get_function_documentation(lines, i) + \"\\n\\n***\\n\\n\"\n\n # Move the \"main class\" to the top of the document.\n main_class_name = ''.join(x.capitalize() or '_' for x in filename[:-3].split('_'))\n main_class = re.search(\"(## `\" + main_class_name + \"`((.|\\n)*))\", doc)\n if main_class is not None:\n main_class = main_class.group(1)\n doc_header = re.search(\"(.*)\\n\\n\", doc).group(0)\n doc_temp = doc.replace(main_class, \"\").replace(doc_header, \"\")\n doc = doc_header + main_class + doc_temp\n\n return doc",
"def __init__ (self, isInternal, docstring, name, args, isConst):\n\n self.name = name\n self.isConst = isConst\n self.isInternal = isInternal\n\n if isInternal:\n if language == 'java':\n # We have a special Javadoc doclet that understands a non-standard\n # Javadoc tag, @internal. When present in the documentation string\n # of a method, it causes it to be excluded from the final\n # documentation output. @internal is something doxygen offers.\n #\n p = re.compile('(\\s+?)\\*/', re.MULTILINE)\n self.docstring = p.sub(r'\\1* @internal\\1*/', docstring)\n elif language == 'csharp':\n # We mark internal methods in a different way for C#.\n self.docstring = docstring\n else:\n self.docstring = \" @internal\\n\" + docstring\n else:\n self.docstring = docstring\n\n # In Java and C#, if a method is const and swig has to translate the type,\n # then for some reason swig cannot match up the resulting doc strings\n # that we put into %javamethodmodifiers. The result is that the java\n # documentation for the methods are empty. I can't figure out why, but\n # have figured out that if we omit the argument list in the doc string\n # that is put on %javamethodmodifiers for such case, swig does generate \n # the comments for those methods. This approach is potentially dangerous\n # because swig might attach the doc string to the wrong method if a\n # methods has multiple versions with varying argument types, but the\n # combination doesn't seem to arise in antimony currently, and anyway,\n # this fixes a real problem in the Java documentation for antimony.\n\n if language == 'java' or language == 'csharp':\n if isConst and (args.find('unsigned int') >= 0):\n self.args = ''\n elif not args.strip() == '()':\n if isConst:\n self.args = args + ' const'\n else:\n self.args = args\n else:\n if isConst:\n self.args = '() const'\n else:\n self.args = ''\n else:\n self.args = args",
"def convert_doxygen_docstring(lines, name):\n\n lines = lines[:]\n newlines = []\n indent = 0\n reading_desc = False\n\n while lines:\n line = lines.pop(0)\n if line.startswith(\"////\"):\n continue\n\n line = line.rstrip()\n if line.startswith('///<'):\n strline = line[4:]\n else:\n strline = line\n\n strline = strline.lstrip('/ \\t')\n\n if strline == \"**\" or strline == \"*/\":\n continue\n\n if strline.startswith(\"** \"):\n strline = strline[3:]\n elif strline.startswith(\"* \"):\n strline = strline[2:]\n elif strline == \"*\":\n strline = \"\"\n\n strline = strline.lstrip(' \\t')\n\n if strline.startswith('@'):\n special = strline.split(' ', 1)[0][1:]\n if special == 'par' and strline.endswith(':') and lines and '@code' in lines[0]:\n newlines.append(' '*indent + strline[5:] + ':')\n newlines.append('')\n line = lines.pop(0)\n offset = line.index('@code')\n while lines:\n line = lines.pop(0)\n if '@endverbatim' in line or '@endcode' in line:\n break\n newlines.append(' ' + line[offset:])\n\n newlines.append('')\n continue\n elif special == \"verbatim\" or special == \"code\":\n if newlines and newlines[-1]:\n newlines.append('')\n\n newlines.append('.. code-block:: guess')\n newlines.append('')\n offset = line.index('@' + special)\n while lines:\n line = lines.pop(0)\n if '@endverbatim' in line or '@endcode' in line:\n break\n newlines.append(' ' + line[offset:])\n\n newlines.append('')\n continue\n elif special == \"f[\":\n if newlines and newlines[-1]:\n newlines.append('')\n\n newlines.append('.. math::')\n newlines.append('')\n offset = line.index('@' + special)\n while lines:\n line = lines.pop(0)\n if '@f]' in line:\n break\n newlines.append(' ' + line[offset:])\n\n newlines.append('')\n continue\n elif special == 'param':\n #TODO\n #if extra is not None:\n # _, name, desc = strline.split(' ', 2)\n # extra['param:' + name] = desc\n continue\n elif special == 'deprecated':\n if newlines and newlines[-1]:\n newlines.append('')\n\n _, value = strline.split(' ', 1)\n\n # I'd love to use the proper Sphinx deprecated tag, but it\n # requires a version number, whereas Doxygen doesn't.\n newlines.append('*Deprecated:* ' + convert_doxygen_format(value, name))\n newlines.append('')\n continue\n elif special in ('brief', 'return', 'returns'):\n #TODO\n #if extra is not None:\n # _, value = strline.split(' ', 1)\n # extra[special] = value\n continue\n elif special == 'details':\n strline = strline[9:]\n elif special == 'sa' or special == 'see':\n if newlines and newlines[-1]:\n newlines.append('')\n\n _, value = strline.split(' ', 1)\n values = value.split(',')\n\n for i, value in enumerate(values):\n result = resolve_reference(value.partition('(')[0], name)\n if result:\n values[i] = ':{0}:`{1}`'.format(*result)\n else:\n values[i] = ':obj:`{0}`'.format(value)\n\n if special == 'see':\n newlines.append('See {}.'.format(', '.join(values)))\n else:\n newlines.append('See also {}.'.format(', '.join(values)))\n newlines.append('')\n continue\n elif special in ('note', 'warning'):\n if newlines and newlines[-1]:\n newlines.append('')\n\n newlines.append('.. %s:: ' % (special))\n newlines.append('')\n newlines.append(' ' + convert_doxygen_format(strline[2 + len(special):], name))\n while lines and lines[0].strip(' *\\t/'):\n line = lines.pop(0).lstrip(' *\\t')\n newlines.append(' ' + convert_doxygen_format(line, name))\n\n newlines.append('')\n continue\n elif special == 'since':\n if newlines and newlines[-1]:\n newlines.append('')\n\n newlines.append('.. versionadded:: ' + strline[7:])\n newlines.append('')\n continue\n else:\n print(\"Unhandled documentation tag: @\" + special)\n\n if strline or len(newlines) > 0:\n newlines.append(' '*indent + convert_doxygen_format(strline, name))\n\n return newlines",
"def GenPy(mod,fname):\n f = open(fname, 'w')\n title = \"\"\"#\n# This file is generated automatically\n# Author:IAN\n# http://www.iknot.org\n\"\"\"\n f.write(title)\n for i in mod.__dict__.keys():\n s = \"def \" + i + \"():\" + \"\\n\"\n f.write(s)\n s = \" return\"\n f.write(s + \"\\n\")\n f.close()\n kcs_ui.message_noconfirm('py file saved to:%s'%(fname))",
"def __write_cpp_func_name(self, cpp_file, return_type, object_suffix, in_header):\n if in_header:\n func_suffix = \";\"\n else:\n func_suffix = \" {\"\n func_name = \"Make\" + self.class_name + object_suffix + \"()\" + func_suffix\n if len(return_type + \" \" + func_name) > 80:\n print(return_type, file=cpp_file)\n print(func_name, file=cpp_file)\n else:\n print(return_type + \" \" + func_name, file=cpp_file)",
"def docstring_hack():\n pass",
"def gen_header(cmd_list):\n\ts = \"/* Warning: This file is automatically generated. Do not modify. */\\n\"\n\ts += \"#ifndef COMMGEN_H\\n\"\n\ts += \"#define COMMGEN_H\\n\\n\"\n\ts += \"#ifdef __cplusplus\\n\"\n\ts += \"extern \\\"C\\\" {\\n\"\n\ts += \"#endif\\n\\n\"\n\ts += \"#include <stdint.h>\\n\\n\"\n\ts += gen_struct_def(cmd_list)\n\ts += \"/* To avoid the volatile qualifier being a pain in the ass, the main loop\\n\"\n\ts += \" * accesses the DataReal struct through this pointer. */\\n\"\n\ts += \"extern volatile struct comm_data_t *Data;\\n\\n\"\n\ts += \"/* Parse a packet, update the struct, and send a reply. */\\n\"\n\ts += \"void parse_packet(uint8_t *buf, uint16_t count);\\n\\n\"\t\n\tfor c in cmd_list:\n\t\ts += gen_send_proto(c) + \"\\n\"\n\t\ts + gen_parse_proto(c) + \"\\n\"\n\ts += gen_packing_protos()\n\ts += gen_build_str_dec()\n\t#s += \"void send_packet(uint8_t *data, uint16_t count);\\n\\n\"\n\ts += \"#ifdef __cplusplus\\n\"\n\ts += \"}\\n\"\n\ts += \"#endif\\n\\n\"\t\n\ts += \"#endif\\n\"\n\treturn s",
"def main_docstring():",
"def gen_capi(args):\n\n if not args.header:\n return \"\"\n\n cmd = [\"ctags\", \"-x\", \"--c-kinds=fpsgx\", args.header]\n\n process = Popen(cmd, stdout=PIPE, stderr=PIPE)\n out, err = process.communicate()\n\n if process.returncode:\n return \"\"\n\n titles = {\n \"nvm_geo\": \"Geometry\",\n \"nvm_buf\": \"Buffer Allocation\",\n \"nvm_dev\": \"Device Management\",\n \"nvm_addr\": \"Addressing\",\n \"nvm_cmd\": \"Raw Commands\",\n \"nvm_vblk\": \"Virtual Block\",\n \"nvm_bbt\": \"Bad-Block-Table\"\n }\n docs = {}\n\n lib = {}\n for line in out.split(\"\\n\"):\n parts = (\" \".join(line.split())).split(\" \")[:2]\n if len(parts) < 2:\n continue\n\n name, kind = parts\n ns = \"_\".join(name.split(\"_\")[:2])\n\n if ns not in lib:\n lib[ns] = {}\n\n if kind not in lib[ns]:\n lib[ns][kind] = []\n\n lib[ns][kind].append(name)\n\n for ns in lib:\n\n if \"prototype\" in lib[ns]:\n ordering = [\n \"bbt_get\", \"bbt_set\", \"bbt_mark\", \"bbt_flush\",\n \"addr_erase\", \"addr_read\", \"addr_write\", \"addr_check\",\n \"addr_.*2\",\n \"vblk_erase\", \"vblk_p?read\", \"vblk_p?write\", \"vblk_pad\",\n \"lba_p?read\", \"lba_p?write\",\n \"_alloc\", \"_fill\", \"_free\", \"_pr\",\n \"_get_\", \"_set_\"\n ]\n\n ordered = []\n for order in ordering:\n for func in lib[ns][\"prototype\"]:\n if re.search(order, func):\n if func not in ordered:\n ordered.append(func)\n\n lib[ns][\"prototype\"] = list(\n set(lib[ns][\"prototype\"]) -\n set(ordered)\n ) + ordered\n\n title = \"%s - %s\" % (ns, titles[ns]) if ns in titles else ns\n\n rst = \"\\n\".join([\n \".. _sec-capi-%s:\" % ns, \"\",\n title,\n \"=\" * len(title),\n \"\", \"\"\n ])\n\n if \"typedefs\" in lib[ns]:\n for typedef in lib[ns][\"typedefs\"]:\n rst += \"\\n\".join([\n typedef,\n \"-\" * len(typedef), \"\",\n \".. doxygentypedef:: %s\" % typedef,\n \"\", \"\"\n ])\n\n for mangler in [\"struct\", \"externvar\"]:\n if mangler in lib[ns]:\n for struct in lib[ns][mangler]:\n rst += \"\\n\".join([\n struct,\n \"-\" * len(struct), \"\",\n \".. doxygenstruct:: %s\" % struct,\n \" :members:\",\n \"\", \"\"\n ])\n\n if \"enum\" in lib[ns]:\n for enum in lib[ns][\"enum\"]:\n rst += \"\\n\".join([\n enum,\n \"-\" * len(enum), \"\",\n \".. doxygenenum:: %s\" % enum,\n \"\", \"\"\n ])\n\n if \"prototype\" in lib[ns]:\n for func in lib[ns][\"prototype\"]:\n rst += \"\\n\".join([\n func,\n \"-\" * len(func), \"\",\n \".. doxygenfunction:: %s\" % func,\n \"\", \"\"\n ])\n\n docs[ns] = rst\n\n return docs",
"def generate(self, src_fname: str):\n fname, _ = os.path.splitext(src_fname)\n graph_name, _ = os.path.splitext(os.path.basename(self.pb_file))\n header_fname = '{}.hpp'.format(fname)\n header_snippet = Snippet(\"get_ctx.hpp\")\n header_snippet.template_vars[\"header_guard\"] = \"_{}_H\".format(fname.upper())\n header_snippet.template_vars[\"graph_name\"] = graph_name\n header_snippet.template_vars[\"placeholders\"] = []\n\n composer = Composer()\n container = SnippetContainer(\"get_ctx.cpp\")\n container.template_vars[\"graph_name\"] = graph_name\n container.template_vars[\"placeholders\"] = []\n container.add_header('\"{}\"'.format(header_fname))\n\n print(\"Parsing {}\".format(self.pb_file))\n graph_info, layers = parse_pb(self.pb_file)\n\n # TODO better snippet construction abstraction\n for layer_id, layer in enumerate(layers, 1):\n for op_name in layer:\n op_info = graph_info[op_name]\n op_type = op_info[\"op_type\"]\n if op_type == \"Placeholder\":\n out_tname, _, _ = op_info[\"output_tensor\"][0]\n container.template_vars[\"placeholders\"].append(out_tname)\n header_snippet.template_vars[\"placeholders\"].append(out_tname)\n elif op_type == 'Const':\n for out_tname, out_dtype, _ in op_info[\"output_tensor\"]:\n pre_tname = self._prepare_tensor_name(out_tname)\n idx_fname = \"{}.idx\".format(pre_tname)\n snippet = CreateTensorIdxSnippet(self.embed_data_dir, out_tname,\n idx_fname=idx_fname,\n tf_dtype=out_dtype)\n container.add_snippet(snippet)\n idx_path = os.path.join(self.idx_dir, idx_fname)\n value = op_info[\"output_content\"][out_tname]\n self._save_data(idx_path, value, out_dtype)\n elif op_type == \"Add\":\n inputs = [tname for tname, _, _ in op_info[\"input_tensor\"]]\n output, _, _ = op_info[\"output_tensor\"][0]\n tf_dtype = op_info[\"input_tensor\"][0][1]\n snippet = AddOpSnippet(inputs, output, tf_dtype=tf_dtype)\n container.add_snippet(snippet)\n elif op_type == \"ArgMax\":\n inputs = [tname for tname, _, _ in op_info[\"input_tensor\"]]\n output, out_dtype, _ = op_info[\"output_tensor\"][0]\n _, in_dtype, _ = op_info[\"input_tensor\"][0]\n snippet = ArgMaxOpSnippet(inputs, output, in_dtype, out_dtype)\n container.add_snippet(snippet)\n elif op_type == \"Dequantize\":\n inputs = [tname for tname, _, _ in op_info[\"input_tensor\"]]\n output, out_dtype, _ = op_info[\"output_tensor\"][0]\n snippet = DequantizeOpSnippet(inputs, output, out_dtype)\n container.add_snippet(snippet)\n elif op_type == \"Max\":\n inputs = [tname for tname, _, _ in op_info[\"input_tensor\"]]\n output, out_dtype, out_shape = op_info[\"output_tensor\"][0]\n if len(out_shape) == 0: # dirty hack for uTensor\n out_shape = [1]\n snippet = MaxOpSnippet(inputs, output, out_dtype, out_shape)\n container.add_snippet(snippet)\n elif op_type == \"Min\":\n inputs = [tname for tname, _, _ in op_info[\"input_tensor\"]]\n output, out_dtype, out_shape = op_info[\"output_tensor\"][0]\n if len(out_shape) == 0: # dirty hack for uTensor\n out_shape = [1]\n snippet = MinOpSnippet(inputs, output, out_dtype, out_shape)\n container.add_snippet(snippet)\n elif op_type == \"QuantizeV2\":\n inputs = [tname for tname, _, _ in op_info[\"input_tensor\"]]\n outputs = [tname for tname, _, _ in op_info[\"output_tensor\"]]\n out_dtype = op_info[\"output_tensor\"][0][1]\n snippet = QuantizeV2OpSnippet(inputs, outputs, out_dtype)\n container.add_snippet(snippet)\n elif op_type == \"QuantizedMatMul\":\n inputs = [tname for tname, _, _ in op_info[\"input_tensor\"]]\n outputs = [tname for tname, _, _ in op_info[\"output_tensor\"]]\n x_dtype = op_info[\"input_tensor\"][0][1]\n w_dtype = op_info[\"input_tensor\"][1][1]\n out_dtype = op_info[\"output_tensor\"][0][1]\n snippet = QuantizedMatMulOpSnippet(inputs, outputs, x_dtype, w_dtype, out_dtype)\n container.add_snippet(snippet)\n elif op_type == \"QuantizedRelu\":\n inputs = [tname for tname, _, _ in op_info[\"input_tensor\"]]\n outputs = [tname for tname, _, _ in op_info[\"output_tensor\"]]\n _, in_dtype, _ = op_info[\"input_tensor\"][0]\n _, qout_dtype, _ = op_info[\"output_tensor\"][0]\n out_dtypes = [t[1] for t in op_info[\"output_tensor\"][1:]]\n snippet = QuantizedReluOpSnippet(inputs, outputs, in_dtype, out_dtypes, qout_dtype)\n container.add_snippet(snippet)\n elif op_type == \"RequantizationRange\":\n inputs = [tname for tname, _, _ in op_info[\"input_tensor\"]]\n outputs = [tname for tname, _, _ in op_info[\"output_tensor\"]]\n _, out_dtype, _ = op_info[\"output_tensor\"][0]\n snippet = RequantizationRangeOpSnippet(inputs, outputs, out_dtype)\n container.add_snippet(snippet)\n elif op_type == \"Requantize\":\n inputs = [tname for tname, _, _ in op_info[\"input_tensor\"]]\n outputs = [tname for tname, _, _ in op_info[\"output_tensor\"]]\n _, qout_dtype, _ = op_info[\"output_tensor\"][0]\n _, range_dtype, _ = op_info[\"output_tensor\"][1]\n snippet = RequantizeOpSnippet(inputs, outputs, qout_dtype, range_dtype)\n container.add_snippet(snippet)\n elif op_type == \"Reshape\":\n inputs = [tname for tname, _, _ in op_info[\"input_tensor\"]]\n output, _, _ = op_info[\"output_tensor\"][0]\n snippet = ReshapeOpSnippet(inputs, output)\n container.add_snippet(snippet)\n else:\n raise ValueError(\"unsupported op type in uTensor: {}, try quantizing your graph\".format(op_type))\n if self.debug_cmt:\n comments = [\"<<< Graph Layer {}\".format(layer_id), \n \">>> Graph Layer {}\".format(layer_id+1)]\n cmt_snippet = CommentSnippet(comments)\n container.add_snippet(cmt_snippet)\n composer.add_snippet(container)\n\n print(\"Generate header file: {}\".format(header_fname))\n with open(header_fname, \"w\") as wf:\n wf.write(header_snippet.render())\n print(\"Generate source file: {}\".format(src_fname))\n with open(src_fname, \"w\") as wf:\n wf.write(composer.compose())",
"def write_method_doc(file_name, entries):\r\n\r\n with open(file_name, 'w', newline='\\n') as f:\r\n f.write('<table border=\"0\">')\r\n f.write('<tr><td><b>Method</b></td><td><b>Description</b></td></tr>')\r\n for items in sorted(entries, key=itemgetter(3)):\r\n f.write('<tr><td valign=\"top\">%s</td><td>%s</td></tr>' %\r\n (items[3], doc_to_html(items[4])))\r\n f.write('</table>')",
"def test_0_check_xc_docstring(self):\n self.banner(\"Checking the docstring on your extra credit.\") \n filename = self.find_file('project9_xc.py')\n self.check_docstring(filename)",
"def make_c_function_stubs(self):\n fn =\\\n\"\"\"{rettype} {fnname}({args}){{\n {rettype} ret;\n\n ret = {cast_and_deref}___madz_LANG_python_OUTPUT.{nodename}({argnames});\n\n return ret;\n}}\n\n\"\"\"\n fn_no_return =\\\n\"\"\"{rettype} {fnname}({args}){{\n ___madz_LANG_python_OUTPUT.{nodename}({argnames});\n return;\n}}\n\n\"\"\"\n res = \"\"\n c_gen = c_wrapgen.CGenerator([],\"\", self.description)\n for node in self.description.definitions():\n if isinstance(node.type.get_type(), pdl.TypeFunction):\n fragments = {\n \"maybe_parentheses\": \")\" if isinstance(node.type.return_type.get_type(),pdl.TypeStruct) else \"\",\n \"cast_and_deref\": self.make_c_cast_deref_string(c_gen, node.type.return_type),\n \"rettype\": c_gen.gen_type_string(\"\", node.type.return_type),\n \"fnname\": \"___madz_LANG_python_FN_\" + node.name,\n \"nodename\": node.name,\n \"args\": \",\".join(map(\n lambda a: c_gen.gen_type_string(a.name, a.type),\n node.type.args)),\n \"argnames\":\",\".join(map(\n lambda a: a.name,\n node.type.args))\n }\n res += (fn if not isinstance(node.type.return_type, pdl.TypeTypeNone) else fn_no_return).format(**fragments)\n return res",
"def modify_header():\n\n print_debug_info()\n if not bool(int(vim.eval(\"g:BHModify\"))):\n return\n\n if not should_do_write():\n debug(\"should not write this buffer.\")\n return\n\n if not has_header():\n debug(\"This file has no header.\")\n return add_header()\n\n # only if the suffix is supported and we have a method to strip the comment.\n if not ((\"extract_comment_%s\" % SUFFIX) in globals() and suffix_is_supported()):\n return\n\n comment = globals()[\"extract_comment_%s\" % SUFFIX]()\n debug(\"comment: %s\" % str(comment))\n if not comment:\n debug(\"comment is empty\")\n return\n\n comment_dict = {}\n\n if len(comment) < 3:\n # Less than 3 lines of original comment, put them in Description part.\n comment_dict['Description'] = '\\n'.join(comment)\n else:\n comment_dict = read_comment(comment)\n if \"\" in comment_dict:\n del comment_dict[\"\"]\n new_header_dict = read_comment(globals().get(\"%s_header\" % SUFFIX).rstrip().splitlines())\n debug(\"new\")\n debug(set(new_header_dict.keys()))\n debug(set(comment_dict.keys()))\n debug(\"end\")\n if not set(new_header_dict.keys()) == set(comment_dict.keys()):\n return prepend_header(render_header(comment_dict))\n else:\n debug(\"do not modify header since we already have the same header.\")",
"def _make_source(name, init, body):\n code = \"\"\"\n #include <Python.h>\n\n %(body)s\n\n PyMODINIT_FUNC\n PyInit_%(name)s(void) {\n %(init)s\n }\n \"\"\" % dict(\n name=name, init=init, body=body,\n )\n return code",
"def generate_header():\n header_file = AUTOGEN_WARNING\n header_file += \"/// /file atomic_nuclear_data.h\\n\"\n header_file += \"/// /author Andrew Davis ([email protected])\\n\"\n header_file += \"///\\n\"\n header_file += (\n \"/// /brief Implements all the fundamental atomic & nuclear data data\\n\"\n )\n header_file += \"#include <map>\\n\"\n header_file += \"\\n\"\n header_file += \"namespace pyne\\n\"\n header_file += \"{\\n\"\n header_file += (\n \" /// main function to be called when you wish to load the nuclide data \\n\"\n )\n header_file += \" /// into memory \\n\"\n header_file += \" void _load_atomic_mass_map_memory();\\n\"\n header_file += \" /// function to create mapping from nuclides in id form\\n\"\n header_file += \" /// to their atomic masses\\n\"\n header_file += \" \\n\"\n header_file += \" void _insert_atomic_mass_map();\\n\"\n header_file += \" \\n\"\n header_file += \" /// function to create mapping from nuclides in id form \\n\"\n header_file += \" /// to their natural abundances\\n\"\n header_file += \" void _insert_abund_map();\\n\"\n header_file += \" \\n\"\n header_file += (\n \" /// Mapping from nuclides in id form to their natural abundances\\n\"\n )\n header_file += \" extern std::map<int,double> natural_abund_map;\\n\"\n header_file += \" \\n\"\n header_file += \" /// Mapping from nuclides in id form to their atomic masses.\\n\"\n header_file += \" extern std::map<int,double> atomic_mass_map;\\n\"\n header_file += \" \\n\"\n header_file += (\n \" /// Mapping from nuclides in id form to the associated error in \\n\"\n )\n header_file += \" /// abdundance \\n\"\n header_file += \" extern std::map<int,double> atomic_mass_error_map;\\n\"\n header_file += \"} // namespace pyne\\n\"\n return header_file",
"def write_result(file_name, name, entries, extra_includes, src_file_names):\r\n\r\n with open(file_name, 'w', newline='\\n') as f:\r\n f.write('// Generated by %s\\n' % os.path.basename(__file__))\r\n f.write('// Based on %s: %s\\n' %\r\n ((\"this file\" if len(src_file_names) < 2 else\r\n \"these files\"), \", \".join(src_file_names)))\r\n methods = entries[0]\r\n if len(methods) != 0:\r\n f.write(to_PyMethodDef(name, methods, extra_includes))\r\n f.write('\\n')\r\n\r\n properties = entries[1]\r\n if len(properties) != 0:\r\n f.write('\\n')\r\n f.write(to_PyGetSetDef(name, properties))",
"def getComments(source):\n\n markup = []\n for f in source:\n markup += extractMarkup(f)\n\n docs = collateDocs(markup)\n return docs",
"def cppdoc(self, irc, msg, args, num, req):\n self.googleq('www.cplusplus.com/reference/', req, num, irc)",
"def to_PyGetSetDef_entry(cpp_struct_name, py_name, doc):\r\n return 'PROPERTY_FORWARDER(%s, \"%s\", %s)' % (\r\n cpp_struct_name, py_name, doc)",
"def gen_cheader(protocol):\n\ts = \"\"\"/* Junior Design Sp2018 Final Project\n * Robot Firmware - RPi <-> Microcontroller Communication\n * Nick Ames 2018\n * WARNING: This file is automatically generated by gen-files.py\n * Any changes you make will be erased.\n */\n#include <stdfix.h>\n#include <stdint.h>\n#include \"config.h\"\n\n\"\"\"\n\ts += \"struct comm_data_t {\\n\"\n\tfor r in protocol:\n\t\ts += \"\\t\" + r.size + \" \" + r.name + \"; /* \" + r.desc + \" */\\n\"\n\ts += \"};\\n\\n\"\n\tfor r in protocol:\n\t\ts += \"%s get_%s(void); /* %s */\\n\"%(r.size, r.name, r.desc)\n\t\ts += \"void set_%s(%s); /* %s */\\n\\n\"%(r.name, r.size, r.desc)\n\ts += \"\"\"extern volatile struct comm_data_t Data;\"\"\"\n\treturn s",
"def __GetWrapperFileName(cls, src):\n return FileUtils.GetBinPathForFile(src).replace('.i', '.swig.cc')",
"def rewriteDocstringForJava (docstring):\n\n # Preliminary: rewrite some of the data type references to equivalent\n # Java types. (Note: this rewriting affects only the documentation\n # comments inside classes & methods, not the method signatures.)\n\n docstring = docstring.replace(r'const char *', 'String ')\n docstring = docstring.replace(r'const char* ', 'String ')\n docstring = docstring.replace(r'an unsigned int', 'a long integer')\n docstring = docstring.replace(r'unsigned int', 'long')\n docstring = docstring.replace(r'const std::string&', 'String')\n docstring = docstring.replace(r'const std::string &', 'String ')\n docstring = docstring.replace(r'const std::string ', 'String ')\n docstring = docstring.replace(r'std::string', 'String')\n docstring = docstring.replace(r'NULL', 'null')\n\n # Also use Java syntax instead of \"const XMLNode*\" etc.\n\n p = re.compile(r'const (%?)(' + r')( ?)(\\*|&)', re.DOTALL)\n docstring = p.sub(rewriteClassRefAddingSpace, docstring) \n p = re.compile(r'(%?)(' + r')( ?)(\\*|&)', re.DOTALL)\n docstring = p.sub(rewriteClassRefAddingSpace, docstring) \n\n # Do the big work.\n\n docstring = sanitizeForHTML(docstring)\n\n # Fix up for a problem introduced by sanitizeForHTML: it puts {@link ...}\n # into the arguments of functions mentioned in @see's, if the function\n # has more than one argument. This gets rid of the @link's. This should\n # be fixed properly some day.\n\n p = re.compile(r'((@see|@throws)\\s+[\\w\\\\ ,.\\'\"=<>()#]*?){@link\\s+([^}]+?)}')\n while re.search(p, docstring) != None:\n docstring = p.sub(r'\\1\\3', docstring)\n\n # Inside of @see, change double colons to pound signs.\n\n docstring = re.sub('(@see\\s+\\w+)::', r'\\1#', docstring)\n\n # The syntax for @see is slightly different: method names need to have a\n # leading pound sign character. This particular bit of code only handles\n # a single @see foo(), which means the docs have to be written that way.\n # Maybe someday in the future it should be expanded to handle\n # @see foo(), bar(), etc., but I don't have time right now to do it.\n\n docstring = re.sub('(@see\\s+)([\\w:.]+)\\(', r'\\1#\\2(', docstring)\n\n # Remove the '*' character that Javadoc doesn't want to see in @see's.\n # (This doesn't make a difference; javadoc still can't match up the refs.)\n\n # p = re.compile('@see[\\s\\w.:,()#]+[*][\\s\\w.:,()*#]')\n # docstring = p.sub(removeStar, docstring)\n\n # The syntax for @link is vastly different.\n \n p = re.compile('@link([\\s/*]+[\\w\\s,.:#()*]+[\\s/*]*[\\w():#]+[\\s/*]*)@endlink', re.DOTALL)\n docstring = p.sub(r'{@link \\1}', docstring)\n\n # Outside of @see and other constructs, dot is used to reference members\n # instead of C++'s double colon.\n\n docstring = docstring.replace(r'::', '.')\n\n # Need to escape quotation marks. The reason is that the\n # %javamethodmodifiers directives created for use with SWIG will\n # themselves be double-quoted strings, and leaving embedded quotes\n # will completely screw that up.\n\n docstring = docstring.replace('\"', \"'\")\n docstring = docstring.replace(r\"'\", r\"\\'\")\n\n return docstring",
"def dumpDoc(modulename, directory=None):\n docco = getObjectsDefinedIn(modulename, directory)\n print('codegrab.py - ReportLab Documentation Utility')\n print('documenting', modulename + '.py')\n print('-------------------------------------------------------')\n print()\n if docco.functions == []:\n print('No functions found')\n else:\n print('Functions:')\n for f in docco.functions:\n print(f.proto)\n print(' ' + f.doc)\n\n if docco.classes == []:\n print('No classes found')\n else:\n print('Classes:')\n for c in docco.classes:\n print(c.name)\n print(' ' + c.doc)\n for m in c.methods:\n print(m.proto) # it is already indented in the file!\n print(' ' + m.doc)\n print()",
"def c_comment(self, token: Token):\n if token.value.startswith(\"/**\"):\n self.doc_comments.append(PrefixCppDocComment(token))"
] | [
"0.67592466",
"0.58502406",
"0.58490044",
"0.5828653",
"0.5795212",
"0.5750393",
"0.57325697",
"0.5689374",
"0.5534153",
"0.5486456",
"0.5484368",
"0.54745",
"0.546431",
"0.54593277",
"0.5433575",
"0.5428016",
"0.5416794",
"0.5406283",
"0.537415",
"0.53738326",
"0.53385717",
"0.5337847",
"0.5298798",
"0.52854407",
"0.5277117",
"0.525556",
"0.5236064",
"0.5234041",
"0.52104205",
"0.5208136"
] | 0.6063957 | 1 |
Generate headers with a Python methoddef array and html documentation tables for the listed source files. | def generate_headers(src_files, out_root, doc_root):
if not os.path.exists(out_root):
os.makedirs(out_root)
did_print_heading = False
changed = False
for (name, files) in src_files:
if files.__class__ == str:
src = files
files = (src,)
else:
src = files[0]
dst = src.replace(".hh", "-method-def.hh")
dst = dst.replace(".cpp", "-method-def.hh")
dst = os.path.join(out_root, os.path.split(dst)[1])
dst_doc = src.replace(".hh", '-methods.txt')
dst_doc = dst_doc.replace(".cpp", '-methods.txt')
dst_doc_filename = os.path.split(dst_doc)[1]
dst_doc_filename = os.path.join(doc_root, dst_doc_filename)
dst_prop_doc = src.replace(".cpp", '-properties.txt')
dst_doc_prop_filename = os.path.split(dst_prop_doc)[1]
dst_doc_prop_filename = os.path.join(doc_root, dst_doc_prop_filename)
if util.changed(src, dst):
if not did_print_heading:
print("* Generating Python method definitions.")
did_print_heading = True
generate(files, dst, dst_doc_filename, dst_doc_prop_filename, name)
changed = True
if not changed:
print("* Python method definitions up to date.") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def create_included_function_list_readme():\n import iteration_utilities\n from iteration_utilities import Iterable\n from itertools import chain\n from operator import itemgetter\n from astropy.table import Table\n from astropy.io.ascii import RST\n\n rtd_link = '`{name} <http://iteration-utilities.readthedocs.io/en/latest/api/{file}.html#{module}.{name}>`_'\n\n module_to_file = {'iteration_utilities': 'cfuncs',\n 'iteration_utilities._cfuncs': 'cfuncs',\n 'iteration_utilities._helpers._performance': 'helper',\n 'iteration_utilities._recipes._additional': 'additional',\n 'iteration_utilities._recipes._core': 'core',\n }\n\n it = Iterable(chain(iteration_utilities._cfuncs.__dict__.items(),\n iteration_utilities._helpers._performance.__dict__.items(),\n iteration_utilities._recipes._core.__dict__.items(),\n iteration_utilities._recipes._additional.__dict__.items())\n # Exclude PY2 variable and private functions\n ).filterfalse(lambda x: x[0].startswith(('PY2', '_'))\n # Exclude everything that has no __module__\n ).filter(lambda x: hasattr(x[1], '__module__')\n # Only include functions that come from the package\n ).filter(lambda x: x[1].__module__.startswith('iteration_utilities')\n # Remove duplicate names\n ).unique_everseen(itemgetter(0)\n # Sort lexically\n ).get_sorted(key=lambda x: x[0].lower())\n\n it = Iterable(it\n # Create a Sphinx link from function name and module\n ).map(lambda i: rtd_link.format(file = module_to_file[i[1].__module__],\n module = i[1].__module__,\n name = i[0])\n # Group into 4s so we get a 4 column Table\n ).grouper(4, fillvalue=''\n # Convert to list because Table expects it.\n ).as_list()\n\n return '\\n'.join(RST().write(Table(rows=it)))",
"def html_index(source_files: iter([SourceFile]), compile_root: str) -> str:\n def single_summary(source_file: SourceFile) -> str:\n (covered, lines) = source_file.coverage_stats()\n (br_covered, br_count, _, _) = source_file.branch_stats()\n (fn_covered, fn_count) = source_file.function_stats()\n (coverage_percent, coverage_health) = to_percentage(covered, lines, 90, 75)\n (branch_percent, branch_health) = to_percentage(br_covered, br_count, 75, 50)\n (fn_percent, fn_health) = to_percentage(fn_covered, fn_count, 90, 75)\n\n\n return '''<tr>\n <td><a href=\"{}\">{}</a></td>\n <td class=\"cov-health-{}\" title=\"{}/{}\">{}%</td>\n <td class=\"cov-health-{}\" title=\"{}/{}\">{}%</td>\n <td class=\"cov-health-{}\" title=\"{}/{}\">{}%</td>\n </tr>'''.format(\n to_html_filename(source_file.source_name),\n escape(source_file.source_name),\n coverage_health, covered, lines, coverage_percent,\n branch_health, br_covered, br_count, branch_percent,\n fn_health, fn_covered, fn_count, fn_percent\n )\n\n title = escape(compile_root)\n\n html_res = [\"\"\"\n <!DOCTYPE html>\n <html>\n <head>\n <title>Coverage report for \"\"\" + title + \"\"\"</title>\n <style type=\"text/css\">\n /*<![CDATA[*/\n .cov-health-all { background-color: #80FF80; }\n .cov-health-zero { background-color: black; color: white; }\n .cov-health-good { background-color: yellow; }\n .cov-health-normal { background-color: orange; }\n .cov-health-bad { background-color: red; }\n td { text-align: right; padding: 0.1em 0.5em; }\n td:first-child { text-align: left; }\n table { border-collapse: collapse; }\n tr { border: 1px solid black; }\n /*]]>*/\n </style>\n <script src=\"sorttable.js\"></script>\n </head>\n <body>\n <h1>Coverage report for \"\"\" + title + \"\"\"</h1>\n <div><table class=\"sortable\">\n <thead><tr><th>File</th><th>Lines</th><th>Branch</th><th>Functions</th></tr></thead>\n <tbody>\n \"\"\"]\n\n html_res.extend(single_summary(s) for s in source_files)\n html_res.append('</tbody></table></div></body></html>')\n\n return '\\n'.join(html_res)",
"def __init__(self,\n source_path='./*.py',\n template_path='./docs/templates/*_template.md',\n output_path='./docs/documentation.md',\n ignore=['extra']\n ):\n\n template_files = glob.glob(template_path)\n # filename = t.split('/')[-1]\n self.sources = {os.path.basename(s).split('.')[0]: os.path.normpath(s) for s in glob.glob(source_path) if not any(i in s for i in ignore)}\n self.templates = {os.path.basename(t).split('_')[0]: os.path.normpath(t) for t in template_files}\n self.output_path = output_path\n\n self.template_content = {}\n for k, v in self.templates.items():\n path = v\n with open(path, 'r') as template_file:\n self.template_content[k] = template_file.read()\n\n self.text = ''\n self.classes = []\n self.headers = ['Params', 'Returns', 'Attributes']\n self.hierarchy = [\n 'class',\n 'method',\n 'parameter',\n 'pinfo',\n 'extra'\n ]\n self.tab_length = 6",
"def _generate_header_files(self):\n return True",
"def generate_docs(self) -> List[Path]:\n outputs = []\n for file in self.files:\n if (stem := file.stem) == \"__init__\":\n # We might have more than one __init__.py file depending on package structure and these files shouldn't\n # contain methods, so we don't want to convert them\n continue\n\n if not (doc := get_doc(file)):\n continue # No docstring returned, skip this file\n doc = doc[33:] # First 33 characters are not required for our docs\n\n # Write the output we've generated to a file\n (output := self.directory / f\"{stem}.md\").write_text(generate_header(stem) + doc)\n outputs.append(output)\n return outputs",
"def write_method_doc(file_name, entries):\r\n\r\n with open(file_name, 'w', newline='\\n') as f:\r\n f.write('<table border=\"0\">')\r\n f.write('<tr><td><b>Method</b></td><td><b>Description</b></td></tr>')\r\n for items in sorted(entries, key=itemgetter(3)):\r\n f.write('<tr><td valign=\"top\">%s</td><td>%s</td></tr>' %\r\n (items[3], doc_to_html(items[4])))\r\n f.write('</table>')",
"def generate_headers(self):\n raise NotImplementedError()",
"def Show_Headers( self ):\r\n self.system.Change_Seq( \"Header\" )",
"def create_file_overview_doc() -> None:\n folder_file_docstring = generate_folder_file_docstrings_dict()\n\n table_markdown = []\n for folder in folder_file_docstring:\n dictionary_of_files = folder_file_docstring[folder]\n\n folder_text = folder\n for file in sorted(dictionary_of_files):\n new_row = f\"| {folder_text:{COLUMN_WIDTHS[0]}}| {file:{COLUMN_WIDTHS[1]}}| {folder_file_docstring[folder][file]:{COLUMN_WIDTHS[2]}}|\\n\"\n table_markdown.append(new_row)\n folder_text = \" \"\n\n # adding blank row at the end of every folder\n table_markdown.append(BLANK_ROW)\n\n # Folders that do not contain any files with docstrings are added separately to the file-overview\n for folder_name in sorted(DESC_FOR_NO_FILE_FOLDERS):\n new_row = f\"| {folder_name:{COLUMN_WIDTHS[0]}}| {SPACEBAR:{COLUMN_WIDTHS[1]}}| {DESC_FOR_NO_FILE_FOLDERS[folder_name]:{COLUMN_WIDTHS[2]}}|\\n\"\n table_markdown.extend([new_row, BLANK_ROW])\n\n with open(DEVELOPER_DOC_PATH, \"r\") as dev_file:\n doc_data = dev_file.readlines()\n\n doc_data[TABLE_OFFSET - 1 :] = table_markdown[:-1]\n updated_data = \"\".join(doc_data)\n\n with open(DEVELOPER_DOC_PATH, \"w\") as dev_file:\n dev_file.write(updated_data)",
"def _header(self, path, files):\n headers = [fits.getheader(os.path.join(path, f))\n for f in sorted(files)]\n N = len(headers)\n\n def mean_key(headers, key, comment, type):\n return (np.mean([type(h[key]) for h in headers]), comment)\n\n h = fits.Header()\n h['BUNIT'] = 'e-/s'\n h['ORIGIN'] = 'Zwicky Transient Facility', 'Data origin'\n h['OBSERVER'] = 'ZTF Robotic Software', 'Observer'\n h['INSTRUME'] = 'ZTF/MOSAIC', 'Instrument name'\n h['OBSERVAT'] = 'Palomar Observatory', 'Observatory'\n h['TELESCOP'] = 'Palomar 48-inch', 'Observatory telescope'\n h['OBSLON'] = -116.8597, 'Observatory longitude (deg)'\n h['OBSLAT'] = 33.3483, 'Observatory latitude (deg E)'\n h['OBSALT'] = 1706., 'Observatory altitude (m)'\n h['IMGTYPE'] = 'object', 'Image type'\n h['NIMAGES'] = N, 'Number of images in stack'\n h['EXPOSURE'] = (sum([_['EXPOSURE'] for _ in headers]),\n 'Total stack exposure time (s)')\n if len(headers) == 0:\n return h\n\n h['MAGZP'] = 25.0, 'Magnitude zero point, solar color'\n h['MAGZPRMS'] = (\n np.sqrt(np.sum([h.get('MAGZPRMS', 0)**2 for h in headers])) / N,\n 'Mean MAGZP RMS')\n h['PCOLOR'] = headers[0]['PCOLOR']\n h['CLRCOEFF'] = mean_key(headers, 'CLRCOEFF',\n 'Mean color coefficient', float)\n\n h['OBSJD1'] = float(headers[0]['OBSJD']), 'First shutter start time'\n h['OBSJDN'] = float(headers[-1]['OBSJD']), 'Last shutter start time'\n h['OBSJDM'] = mean_key(\n headers, 'OBSJD', 'Mean shutter start time', float)\n\n wcsfn = sorted(files)[0]\n wcs = WCS(fits.getheader(os.path.join(path, wcsfn),\n extname='SANGLE'))\n h.update(wcs.to_header())\n h['WCSORIGN'] = wcsfn\n\n h['DBPID'] = (','.join([str(_['DBPID']) for _ in headers]),\n 'Database processed-image IDs')\n h['DESG'] = headers[0]['DESG'], 'Target designation'\n for k, comment in {\n 'RH': 'Mean heliocentric distance (au)',\n 'DELTA': 'Mean observer-target distance (au)',\n 'PHASE': 'Mean Sun-target-observer angle (deg)',\n 'RDOT': 'Mean heliocentric radial velocity, km/s',\n 'SELONG': 'Mean solar elongation, deg',\n 'SANGLE': 'Mean projected target->Sun position angle, deg',\n 'VANGLE': 'Mean projected velocity position angle, deg',\n 'TRUEANOM': 'Mean true anomaly (osculating), deg',\n 'TMTP': 'Mean T-Tp (osculating), days',\n 'TGTRA': 'Mean target RA, deg',\n 'TGTDEC': 'Mean target Dec, deg',\n 'TGTDRA': 'Mean target RA*cos(dec) rate of change,arcsec/s',\n 'TGTDDEC': 'Mean target Dec rate of change, arcsec/s',\n 'TGTRASIG': 'Mean target RA 3-sigma uncertainty, arcsec',\n 'TGTDESIG': 'Mean target Dec 3-sigma uncertainty, arcsec',\n }.items():\n try:\n h[k] = mean_key(headers, k, comment, float)\n except ValueError:\n # target rates might be empty strings\n h[k] = ''\n\n return h",
"def first_header():\n return \"\"\"\n<th>Target\n<th>Date\n<th colspan=\"2\">UT\n<th>Exp\n<th>Cycle\n<th>No. of\n<th>Filters\n<th>XxY\n<th>Speed\n<th>NX1xNY1\n<th>X1\n<th>Y1\n<th>NX2xNY2\n<th>X2\n<th>Y2\n<th>Grat.\n<th>Slit\n<th>Slit\n<th>ID\n<th>PI\n<th align=\"left\">Comment\n\"\"\"",
"def generate_header():\n header_file = AUTOGEN_WARNING\n header_file += \"/// /file atomic_nuclear_data.h\\n\"\n header_file += \"/// /author Andrew Davis ([email protected])\\n\"\n header_file += \"///\\n\"\n header_file += (\n \"/// /brief Implements all the fundamental atomic & nuclear data data\\n\"\n )\n header_file += \"#include <map>\\n\"\n header_file += \"\\n\"\n header_file += \"namespace pyne\\n\"\n header_file += \"{\\n\"\n header_file += (\n \" /// main function to be called when you wish to load the nuclide data \\n\"\n )\n header_file += \" /// into memory \\n\"\n header_file += \" void _load_atomic_mass_map_memory();\\n\"\n header_file += \" /// function to create mapping from nuclides in id form\\n\"\n header_file += \" /// to their atomic masses\\n\"\n header_file += \" \\n\"\n header_file += \" void _insert_atomic_mass_map();\\n\"\n header_file += \" \\n\"\n header_file += \" /// function to create mapping from nuclides in id form \\n\"\n header_file += \" /// to their natural abundances\\n\"\n header_file += \" void _insert_abund_map();\\n\"\n header_file += \" \\n\"\n header_file += (\n \" /// Mapping from nuclides in id form to their natural abundances\\n\"\n )\n header_file += \" extern std::map<int,double> natural_abund_map;\\n\"\n header_file += \" \\n\"\n header_file += \" /// Mapping from nuclides in id form to their atomic masses.\\n\"\n header_file += \" extern std::map<int,double> atomic_mass_map;\\n\"\n header_file += \" \\n\"\n header_file += (\n \" /// Mapping from nuclides in id form to the associated error in \\n\"\n )\n header_file += \" /// abdundance \\n\"\n header_file += \" extern std::map<int,double> atomic_mass_error_map;\\n\"\n header_file += \"} // namespace pyne\\n\"\n return header_file",
"def print_doc_tables(services, events):\n\n head_1 = \"\"\"### API services\n\nTBD provides the following services:\n\n|SERVICES | DESCRIPTION|\n|---| ---|\"\"\"\n\n head_2 = \"\"\"### API events\n\nTBD listens and consumes the following messages from the bus:\n\n|MESSAGES CONSUMED | DESCRIPTION|\n|---| ---|\"\"\"\n\n head_3 = \"\"\"TBD generates and publishes the following messages:\n\n|MESSAGES PUBLISHED | DESCRIPTION|\n|---| ---|\"\"\"\n\n def table_row(event_type):\n s = \"\"\n s += \"|[*%s*](#%s) | Some description that needs to be writen manually |\" \\\n % (\n event_type,\n event_type.replace('.', '-')\n )\n return s\n\n print(head_1)\n for s in services:\n print(table_row(s.routing_key))\n\n print()\n print()\n print(head_2)\n for e in events:\n print(table_row(e.routing_key))\n\n print()\n print()\n print(head_3)\n for e in events:\n print(table_row(e.routing_key))",
"def archives_doc(ctx: click.Context, sources: Set[Path], state: State) -> None:\n modules = {\n file.parts[-1]: parse_module(str(file.absolute())).serialize()\n for file in sources\n }\n\n out(modules)\n ctx.exit(0)",
"def _generate_headlines(self):\n includes = set()\n for decl in self._ast.decls:\n includes.add(decl.cpp_file)\n for include in includes:\n yield f'#include \"{include}\"'\n yield '#include \"third_party/pybind11/include/pybind11/pybind11.h\"'\n yield ''\n yield 'namespace py = pybind11;'\n yield ''",
"def toc(self):\n toc = []\n header_cells = (cell for cell in self.markdown_cells() if cell.source.startswith(\"##\"))\n for header_cell in header_cells:\n header = header_cell.source.splitlines()[0].strip().split()\n txt = ' '.join(header[1:])\n url = '#'.join([self.html_url, '-'.join(header[1:])])\n toc.append(\" \" * (len(header[0]) - 2) + f\"- [{txt}]({url})\")\n return toc",
"def get_doc(filename: str) -> str:\n\n # Create the header.\n doc = \"# `\" + filename.split(\"/\")[-1] + \"`\\n\\n\"\n\n lines: List[str] = Path(filename).read_text().split(\"\\n\")\n\n for i in range(len(lines)):\n # Create a class description.\n if lines[i].startswith(\"class\"):\n # Skip private classes.\n match = re.search(\"class _(.*):\", lines[i])\n if match is not None:\n continue\n # Add the name of the class\n class_name = re.search(\"class (.*):\", lines[i]).group(1)\n doc += f\"## `{class_name}`\\n\\n\"\n # Add an example.\n class_example = f\"`from tdw.{filename[:-3].replace('/', '.')} import \" + re.sub(r\"(.*)\\((.*)\\)\", r'\\1',\n class_name) + \"`\"\n doc += class_example + \"\\n\\n\"\n doc += PyDocGen.get_class_description(lines, i)\n # Parse an enum.\n if re.search(r\"class (.*)\\(Enum\\):\", lines[i]) is not None:\n doc += \"\\n\\n\" + PyDocGen.get_enum_values(lines, i)\n doc += \"\\n\\n***\\n\\n\"\n # Create a function description.\n elif lines[i].strip().startswith(\"def\"):\n # Skip private functions.\n match = re.search(\"def _(.*)\", lines[i])\n if match is not None and \"__init__\" not in lines[i]:\n continue\n # Append the function description.\n doc += PyDocGen.get_function_documentation(lines, i) + \"\\n\\n***\\n\\n\"\n\n # Move the \"main class\" to the top of the document.\n main_class_name = ''.join(x.capitalize() or '_' for x in filename[:-3].split('_'))\n main_class = re.search(\"(## `\" + main_class_name + \"`((.|\\n)*))\", doc)\n if main_class is not None:\n main_class = main_class.group(1)\n doc_header = re.search(\"(.*)\\n\\n\", doc).group(0)\n doc_temp = doc.replace(main_class, \"\").replace(doc_header, \"\")\n doc = doc_header + main_class + doc_temp\n\n return doc",
"def generate_header_from_declarations(function_declarations, verbose=True):\n header = \"\"\n for (f_name, (f_dims, f_dict)) in function_declarations.iteritems():\n s = header_from_function_name_and_args(f_name, f_dict[\"args\"])\n header += s + \"\\n\"\n\n return header",
"def __header(self, conf):\n result = \"\"\n\n i = conf[\"conf_json\"][0]\n result += \"\"\"\n<a id='toc'></a>\n# %s\n\n**Version:** %s <br/>\n**API URL:** <a href=\"%s\">%s</a><br/>\n**Contact:** %s<br/>\n**License:** %s<br/>\n\n\n\n## <a id=\"description\"></a>Description [back to top](#toc)\n\n%s\n\n%s\"\"\" % (\n i[\"title\"],\n i[\"version\"],\n i[\"base\"] + i[\"url\"],\n i[\"base\"] + i[\"url\"],\n i[\"contacts\"],\n i[\"license\"],\n i[\"description\"],\n self.__parameters(),\n )\n # (i[\"title\"], i[\"version\"], i[\"base\"] + i[\"url\"], i[\"base\"] + i[\"url\"], i[\"contacts\"], i[\"contacts\"], i[\"license\"],\n # \"\".join([\"<li>[%s](#%s): %s</li>\" % (op[\"url\"], op[\"url\"], op[\"description\"].split(\"\\n\")[0])\n # for op in self.conf_json[1:]]),\n # i[\"description\"], self.__parameters())\n return markdown(result)",
"def generate_docs(self, sections):\n for docs, code in sections:\n yield docs + code",
"def to_html(self) -> str:\n source_name = escape(self.source_name)\n (covered, lines) = self.coverage_stats()\n lines_stats = \"{} / {} ({} lines of code)\".format(covered, lines, len(self.source_code))\n (br_covered, br_count, calls_covered, calls_count) = self.branch_stats()\n branch_stats = \"{} / {}\".format(br_covered, br_count)\n call_stats = \"{} / {}\".format(calls_covered, calls_count)\n (fn_covered, fn_count) = self.function_stats()\n fn_stats = \"{} / {}\".format(fn_covered, fn_count)\n\n self.decode_cpp_function_names()\n\n result = [\"\"\"\n <!DOCTYPE html>\n <html>\n <head>\n <meta charset=\"utf-8\">\n <title>Coverage report of file \"\"\" + source_name + \"\"\"</title>\n <style type=\"text/css\">\n /*<![CDATA[*/\n .cov-health-zero td { color: white; }\n .cov-health-zero a { color: #CCCCFF; }\n .cov-health-zero a:visited { color: #FFCCFF; }\n .cov-health-zero:nth-child(odd) td { background-color: #CC0000; }\n .cov-health-zero:nth-child(even) td { background-color: #DD0000; }\n .cov-health-na td { color: silver; }\n .cov-health-na td:nth-child(2) { visibility: hidden; }\n .branch { cursor: help; }\n .branch-taken { color: silver; }\n .branch-taken:hover { color: black; }\n .branch-not-taken { color: red; }\n .branch-not-taken:hover { color: maroon; }\n #source tbody td:last-child, #funcs tbody td:first-child\n { text-align: left; font-family: monospace; white-space: pre; }\n .sortable { border-collapse: collapse; }\n div { width: 100%; overflow: hidden; }\n .sortable td { text-align: right; padding-left: 2em; }\n .sortable tbody tr:nth-child(odd) { background-color: #FFFFCC; }\n .sortable tbody tr:nth-child(even) { background-color: #FFFFDD; }\n #source tbody tr:hover td:last-child { font-weight: bold; }\n #source tbody td:first-child { max-width: 7em; font-size: smaller; word-wrap: break-word; }\n #source tbody td:nth-child(2) { font-size: smaller; color: silver; }\n #summary { float: right; border-collapse: collapse; }\n #summary td { border: 1px solid black; }\n caption { font-weight: bold; }\n /*]]>*/\n </style>\n <script src=\"sorttable.js\"></script>\n </head>\n <body>\n <p><a href=\"index.html\">⇐ Back</a> | Go to line #<input type=\"number\" id=\"goto\" /></p>\n <h1>\"\"\" + source_name + \"\"\"</h1>\n <div>\n <table id=\"summary\">\n <caption>Summary</caption>\n <tr><td>Lines</td><td>\"\"\" + lines_stats + \"\"\"</td></tr>\n <tr><td>Branches</td><td>\"\"\" + branch_stats + \"\"\"</td></tr>\n <tr><td>Calls</td><td>\"\"\" + call_stats + \"\"\"</td></tr>\n <tr><td><a href=\"#functions\">Functions</a></td><td>\"\"\" + fn_stats + \"\"\"</td></tr>\n </ul>\n </table>\n <table class=\"sortable\" id=\"source\">\n <thead><tr><th>Branches</th><th>Cov</th><th>Line</th><th class=\"sorttable_nosort\">Source</th></tr></thead>\n <tbody>\n \"\"\"]\n result.extend(line.to_html() for line in self.source_code)\n result.append(\"\"\"\n </tbody>\n </table>\n </div>\n <h2 id=\"functions\">Functions</h2>\n <div>\n <table class=\"sortable\" id=\"funcs\">\n <thead><tr><th>Function</th><th>Calls</th><th>Ret.</th><th>Blk. Exec.</th></tr></thead>\n <tbody>\"\"\")\n result.extend(func.to_html() for func in self.source_functions)\n result.append(\"\"\"\n </tbody>\n </table>\n </div>\n <script>\n //<![CDATA[\n document.getElementById('goto').onchange = function()\n {\n location = \"#line-\" + this.value;\n }\n //]]>\n </script>\n </body>\n </html>\n \"\"\")\n return '\\n'.join(result)",
"def generate_headerfile(template, n_division=10000, df=6, start_chi=25, filepath=\"Chi2PLookup.h\", verbose=False):\n divisor = \"const int Chi2PLookup::divisor = {};\".format(n_division)\n\n names = []\n cutoff = []\n p_values_arrays = []\n degrees_of_freedom = range(1, df+1)\n\n if verbose:\n print(\"Generating p-value arrays...\")\n print(\" df={}\".format(df))\n print(\" precision={}\".format(n_division))\n\n for df in degrees_of_freedom:\n var_name = \"pValues_{}\".format(df)\n names.append(var_name)\n max_chi = max_chi_value(df=df, start_chi=start_chi)\n cutoff.append(max_chi)\n n_elements = max_chi * n_division\n\n chi_values = (val / n_division for val in range(0, n_elements + 1))\n p_values = (str(1 - chi2.cdf(val, df)) for val in chi_values)\n\n if verbose:\n print(\"\\tAdding p-values array to template for degree of freedom = {} ...\".format(df))\n\n p_values_arrays.append(\"double {}[] = {{{}}};\".format(var_name, \", \".join(p_values)))\n\n cutoff_array = \"const int Chi2PLookup::cutoff[] = {{{}}};\".format(\", \".join([str(i) for i in cutoff]))\n p_values_array_of_arrays = \"const double * Chi2PLookup::pValues[] = {{{}}};\\n\".format(\", \".join(names))\n\n template = template.format(divisor, cutoff_array, \"\\n\".join(p_values_arrays), p_values_array_of_arrays)\n\n if verbose:\n print(\"Saving file to: {}\".format(os.path.abspath(filepath)))\n\n with open(filepath, \"w\") as outfile:\n outfile.write(template)\n\n return template",
"def generate(src_file_names,\r\n dst_file_name,\r\n dst_doc_file_name,\r\n dst_property_doc_file_name,\r\n name):\r\n methods = []\r\n properties = []\r\n extra_includes = []\r\n entries = (methods, properties)\r\n for src_file_name in src_file_names:\r\n check_file(src_file_name)\r\n m, p = parse_file(src_file_name)\r\n methods.extend(m)\r\n properties.extend(p)\r\n\r\n extra_includes.extend(find_extra_include(src_file_name))\r\n if len(entries[0]) == 0 and len(entries[1]) == 0:\r\n print(\"No entries found in %s.\" % src_file_name)\r\n exit(1)\r\n\r\n write_result(dst_file_name, name, entries, extra_includes, src_file_names)\r\n write_method_doc(dst_doc_file_name, entries[0])\r\n write_property_doc(dst_property_doc_file_name, entries[1])",
"def scan_docs():\n\n\n def scan_file(fn):\n f = open(fn)\n\n for l in f:\n m = re.search(r\"\\.\\. (\\w+):: ([.\\w+]+)\", l)\n\n if not m:\n continue\n\n name_kind[m.group(2)] = m.group(1)\n\n for i in os.listdir(\"source\"):\n if i.endswith(\".rst\"):\n scan_file(os.path.join(\"source\", i))\n\n for i in os.listdir(\"source/inc\"):\n scan_file(os.path.join(\"source\", \"inc\", i))",
"def make_table_header(table_file, function_list):\n\tstring = '%10s' %(\" \")\n\tfor function in function_list:\n\t\tstring += \"\\t\"+'%32s' %(\"f_\"+str(function-1))\n\ttable_file.write(string+\"\\n\")\n\tstring = '%10s' %(\" \")+ \"\\t\" + \"------------\"*4*len(function_list)\n\ttable_file.write(string+\"\\n\")\n\treturn None",
"def create_included_function_list():\n import iteration_utilities\n from iteration_utilities import Iterable\n from itertools import chain\n from operator import itemgetter\n from astropy.table import Table\n from astropy.io.ascii import RST\n\n it = Iterable(chain(iteration_utilities._cfuncs.__dict__.items(),\n iteration_utilities._helpers._performance.__dict__.items(),\n iteration_utilities._recipes._core.__dict__.items(),\n iteration_utilities._recipes._additional.__dict__.items())\n # Exclude PY2 variable and private functions\n ).filterfalse(lambda x: x[0].startswith(('PY2', '_'))\n # Exclude everything that has no __module__\n ).filter(lambda x: hasattr(x[1], '__module__')\n # Only include functions that come from the package\n ).filter(lambda x: x[1].__module__.startswith('iteration_utilities')\n # Remove duplicate names\n ).unique_everseen(itemgetter(0)\n # Sort lexically\n ).get_sorted(key=lambda x: x[0].lower())\n\n it = Iterable(it\n # Create a Sphinx link from function name and module\n ).map(lambda i: ':py:func:`~{}.{}`'.format(i[1].__module__, i[0])\n # Group into 4s so we get a 4 column Table\n ).grouper(4, fillvalue=''\n # Convert to list because Table expects it.\n ).as_list()\n\n return '\\n'.join(RST().write(Table(rows=it)))",
"def second_header():\n return \"\"\"\n<th>\n<th>start\n<th>start\n<th>end\n<th>(secs)\n<th>time\n<th>frames\n<th>\n<th>bin\n<th>\n<th>\n<th>\n<th>\n<th>\n<th>\n<th>\n<th>\n<th>width\n<th>angle\n<th>\n<th>\n<th>\n<th>\n\"\"\"",
"def format_report_header(self):",
"def make_table_header(*headers):\n return [[Cell(h, bold=True) for h in headers]]",
"def make_html_table(filelist):\n pre=open(\"HTML_Header.txt\").read()\n out=[]\n for file in filelist:\n x=load_file(file)[1]\n out.append(\"<tr>\")\n out.append(\"<th>{}</th>\".format(x[0]))\n out.append(\"<th>{}</th>\".format(x[2]))\n out.append(\"<th>{}</th>\".format(x[1]))\n out.append(\"<th>{}</th>\".format(x[6]))\n out.append(\"<th>{}</th>\".format(x[7]))\n out.append(\"<th>{}</th>\".format(x[8]))\n out.append(\"<th>{}</th>\".format(x[9]))\n out.append(\"<th>{}</th>\".format(x[12]))\n out.append(\"<th>link</th>\")\n out.append(\"</tr>\")\n out.append(\"</table>\")\n \n for i in range(0,len(out)):\n pre=pre+out[i]+\"\\n\"\n \n path=os.getcwd()\n os.chdir(\"Ausgabe\")\n open(\"table.html\",\"w\").write(pre)\n os.chdir(path)"
] | [
"0.6388133",
"0.63291883",
"0.6162227",
"0.61605513",
"0.605548",
"0.5960463",
"0.5946018",
"0.58367574",
"0.582316",
"0.58196837",
"0.58120084",
"0.5807799",
"0.5753701",
"0.57469726",
"0.57268125",
"0.5716415",
"0.5679393",
"0.56756175",
"0.5639785",
"0.5602997",
"0.55872995",
"0.5579383",
"0.5572189",
"0.5553682",
"0.55275744",
"0.5508838",
"0.5499171",
"0.5488672",
"0.5480619",
"0.54677904"
] | 0.77620685 | 0 |
Construct an instance of ``client_class`` and register it under given alias. | def create_connection(self, alias='async', client_class=AsyncElasticsearch, **kwargs):
kwargs.setdefault('serializer', serializer)
conn = self._conns[alias] = client_class(**kwargs)
return conn | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def register_client(self, client, client_name):\n self.clients[client_name] = client",
"def create_client(self, module_name, version, client_class):\n # NOTE(kiennt): Get created client rather create a new one.\n # The key is the combination of module_name and version.\n # because we can create multiple clients of a module with\n # different versions.\n client = self.created_clients.get(module_name + version)\n if client:\n return client\n module_client = self._import_client(module_name)\n try:\n client = getattr(module_client, client_class)(\n version=version,\n session=self._sess)\n self.created_clients[module_name+version] = client\n return client\n except Exception as err:\n raise err",
"def register_aliases(self, aliases, plugin_class):\n for alias in aliases:\n self.plugins[alias] = plugin_class",
"def get_client_class(api_name, version, version_map):\r\n try:\r\n client_path = version_map[str(version)]\r\n except (KeyError, ValueError):\r\n msg = _(\"Invalid %(api_name)s client version '%(version)s'. must be \"\r\n \"one of: %(map_keys)s\")\r\n msg = msg % {'api_name': api_name, 'version': version,\r\n 'map_keys': ', '.join(version_map.keys())}\r\n raise exceptions.UnsupportedVersion(msg)\r\n\r\n return import_class(client_path)",
"def __init__(self, integration, client_class=None):\n endpoint_url = integration.setting(\n MinIOUploaderConfiguration.ENDPOINT_URL).value\n\n _, host, _, _, _ = urlsplit(endpoint_url)\n\n if not client_class:\n client_class = boto3.client\n\n if callable(client_class):\n client_class = functools.partial(client_class, endpoint_url=endpoint_url)\n else:\n self.client = client_class\n\n super(MinIOUploader, self).__init__(integration, client_class, host)",
"def register(self, client):\n self.clients.append(client)",
"def __init__(self, client, **kwargs):\n self._ac = client\n self._wrapped = kwargs",
"def make_client(instance):\n network_client = utils.get_client_class(\n API_NAME,\n instance._api_version[API_NAME],\n API_VERSIONS)\n LOG.debug('Instantiating network client: %s', network_client)\n\n endpoint = instance.get_endpoint_for_service_type(\n API_NAME,\n region_name=instance._region_name,\n )\n\n return network_client(\n username=instance._username,\n tenant_name=instance._project_name,\n password=instance._password,\n region_name=instance._region_name,\n auth_url=instance._auth_url,\n endpoint_url=endpoint,\n token=instance.auth.get_token(instance.session),\n insecure=instance._insecure,\n ca_cert=instance._cacert,\n )",
"def __init__(self, rest_class, client, endpoint_prefix=''):\n\n self._rest_class = self._mapToRestClass(rest_class)\n self._rest_client = client\n\n # endpoint is always lowercase\n self._endpoint = '%s%s/' % (endpoint_prefix, self._mapToRestClass(rest_class).lower())",
"def create_client(self) -> None:\n pass",
"def add_client(name):\n return create_client(name)",
"def _create_instance(cls, configuration, auth_type):\n auth = ClientAuthFactory.get(\n username=configuration.username,\n password=configuration.password,\n auth_type=auth_type\n )\n instance = HttpClient(configuration.url, auth)\n cls._INSTANCES[configuration] = instance\n return instance",
"def __init__(self, client):\n self.client = client",
"def __init__(self, client):\n\n self.client = client",
"def __init__(self, client_auth_type, client_id, client_secret=None):\n self.client_auth_type = client_auth_type\n self.client_id = client_id\n self.client_secret = client_secret",
"def make_client(self, context):\n return Client(self.settings['client_routing'], context=context)",
"def __init__(self, client, name):\n if not isinstance(client, couch.Client):\n raise Exception(\"'client' arg must be instance of couch.Client\")\n\n self.client = client\n self.name = name",
"def buildProtocol(self, addr):\n if hasattr(settings, \"DISCORD_SESSION_CLASS\"):\n protocol_class = class_from_module(\n settings.DISCORD_SESSION_CLASS, fallback=DiscordClient\n )\n protocol = protocol_class()\n else:\n protocol = DiscordClient()\n\n protocol.factory = self\n protocol.sessionhandler = self.sessionhandler\n return protocol",
"def create_client(service, region, access_key_id, secret_access_key):\n client = boto3.client(service,\n region_name=region,\n aws_access_key_id=access_key_id,\n aws_secret_access_key=secret_access_key\n )\n return client",
"def create_client(service, region, access_key_id, secret_access_key):\n client = boto3.client(service,\n region_name=region,\n aws_access_key_id=access_key_id,\n aws_secret_access_key=secret_access_key\n )\n return client",
"def make_client(service_key, constructor=None, options=None, **kwargs):\n cloud = get_config(service_key=service_key, options=options, **kwargs)\n if not constructor:\n constructor = cloud_config._get_client(service_key)\n return cloud.get_legacy_client(service_key, constructor)",
"def register_driver(self, driver, alias):\n return self._drivers.register(driver, alias)",
"def __init__(self, client):\n self._client = client",
"def __init__(self,\n alias_name=None,\n client_subnet_whitelist=None,\n smb_config=None,\n view_path=None):\n\n # Initialize members of the class\n self.alias_name = alias_name\n self.client_subnet_whitelist = client_subnet_whitelist\n self.smb_config = smb_config\n self.view_path = view_path",
"def __new__(cls, host=None, user=None, client=None):\n cls.__check_parameters(host=host, user=user)\n if client is None:\n raise InvalidClientException(\"Integrated Client during connection creation can't be None\")\n return super(Connection, cls).__new__(cls, host=host, user=user, client=client)",
"def __init__(self, client=None):\n self._client = client",
"def client():\n\n client = Client()\n return client",
"def client(self, hostname_or_ip):\n hostname, aliases, ip = self.resolve(hostname_or_ip)\n try:\n client = Client.objects.get(name=hostname)\n printer_name = client.label_printer.cups_printer_name\n self.cups_server = client.label_printer.cups_server_hostname\n cups_hostname = self.cups_server.hostname\n self._label_printer = client.label_printer\n except Client.DoesNotExist:\n self.cups_server = 'localhost' # default\n cups_hostname = self.cups_server.hostname\n self._client = ClientTuple(hostname, aliases, ip, None, cups_hostname)\n try:\n printer_name = self.label_printer.cups_printer_name\n except AttributeError:\n printer_name = None\n self._client = ClientTuple(hostname, aliases, ip, printer_name, cups_hostname)",
"def __init__(self, client, use_stubs=True):\n super().__init__(client, use_stubs)",
"def __init__(self, client, use_stubs=True):\n super().__init__(client, use_stubs)"
] | [
"0.5862595",
"0.57551134",
"0.57218677",
"0.56946874",
"0.56426543",
"0.5604221",
"0.5513979",
"0.5479135",
"0.5450147",
"0.54027754",
"0.53822577",
"0.5351544",
"0.5310474",
"0.53071845",
"0.5302867",
"0.5245894",
"0.52256596",
"0.5221313",
"0.52094626",
"0.52094626",
"0.5206279",
"0.52039605",
"0.51951146",
"0.51834553",
"0.51795316",
"0.5174824",
"0.5164689",
"0.51492316",
"0.5148325",
"0.5148325"
] | 0.64954954 | 0 |
Perform outer indexing on dask array `x`, one dimension at a time. It is assumed that `indices` is suitably normalised (no ellipsis, etc.) | def _dask_oindex(x, indices):
axis = 0
for index in indices:
x = da.take(x, index, axis=axis)
# If axis wasn't dropped by a scalar index:
if not isinstance(index, Integral):
axis += 1
return x | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def dask_getitem(x, indices):\n indices = _simplify_index(indices, x.shape)\n try:\n out = x[indices]\n except NotImplementedError:\n out = _dask_oindex(x, indices)\n # dask does culling anyway as part of optimization, but it first calls\n # ensure_dict, which copies all the keys, presumably to speed up the\n # case where most keys are retained. A lazy indexer is normally used to\n # fetch a small part of the data.\n if np.product(out.numblocks) < 0.5 * np.product(x.numblocks):\n dsk = dask.optimization.cull(out.dask, out.__dask_keys__())[0]\n out.dask = dask.highlevelgraph.HighLevelGraph.from_collections(out.name, dsk)\n return out",
"def broadcast_index(values, indices):\r\n assert_array(indices, shape=(...,) + values.shape[:-1])\r\n indexed_values = jp.take_along_axis(\r\n values.reshape((1,) + values.shape),\r\n indices.reshape((-1,) + values.shape[:-1] + (1,)),\r\n axis=-1,\r\n )\r\n flat_result = jp.squeeze(indexed_values, axis=-1)\r\n return flat_result.reshape(indices.shape)",
"def take_along_axis(a, indices, axis):\n\n if indices.dtype.kind not in ('i', 'u'):\n raise IndexError('`indices` must be an integer array')\n\n if axis is None:\n a = a.ravel()\n axis = 0\n\n ndim = a.ndim\n\n axis = internal._normalize_axis_index(axis, ndim)\n\n if ndim != indices.ndim:\n raise ValueError(\n '`indices` and `a` must have the same number of dimensions')\n\n fancy_index = []\n for i, n in enumerate(a.shape):\n if i == axis:\n fancy_index.append(indices)\n else:\n ind_shape = (1,) * i + (-1,) + (1,) * (ndim - i - 1)\n fancy_index.append(cupy.arange(n).reshape(ind_shape))\n\n return a[tuple(fancy_index)]",
"def flatten_idx(idx, axis=-1):\n idx = numpy.asanyarray(idx)\n if not idx.dtype.kind in ('i', 'u'):\n idx = idx.astype(int)\n preshape = idx.shape[:axis]\n postshape = idx.shape[axis:]\n stride = int(numpy.product(postshape[1:])) #1 if applied to empty\n #The index on this axis moves stride elements in flat\n outidx = idx.flatten() * stride #makes a copy\n #First add the offsets to get us to [..., idx @ axis = 0, 0...)\n outidx += numpy.repeat(\n numpy.arange(0, len(outidx), int(numpy.product(postshape)),\n dtype=idx.dtype),\n numpy.product(postshape))\n #Now offsets for non-zero on the trailing axes [0, 0, ... 0@axis, ...]\n outidx += numpy.tile(numpy.arange(0, stride, dtype=idx.dtype),\n int(numpy.product(preshape)) * idx.shape[axis])\n return outidx",
"def index(x, axis, index_spec):\n idx = [slice(None)] * x.ndim\n idx[axis] = index_spec\n\n indexer = tuple(idx)\n return indexer",
"def multi_index(t, indices):\n if K._BACKEND == 'theano':\n return t[tuple(indices)]\n #from operator import getitem\n # Use native Theano indexing. \n #return getitem(t, tuple(indices)) # Equivalent to t[indices].\n else:\n return _tf_multi_index(t, indices)",
"def _simplify_index(indices, shape):\n # First clean up and check indices, unpacking ellipsis and boolean arrays\n indices = da.slicing.normalize_index(indices, shape)\n out = []\n axis = 0\n for index in indices:\n if index is not np.newaxis:\n length = shape[axis]\n axis += 1\n # If there is 1-D fancy index on this axis, try to convert to slice\n if isinstance(index, np.ndarray) and index.ndim == 1:\n try:\n index = _range_to_slice(index)\n except ValueError:\n pass\n else:\n index = da.slicing.normalize_slice(index, length)\n out.append(index)\n return tuple(out)",
"def ravel_index(x, dims):\n i = 0\n for dim, j in zip(dims, x):\n i *= dim\n i += j\n return i",
"def get_slice(x, indices):\n return x[indices]",
"def indices(dimensions, dtype=int, sparse=False):\n\n if not isinstance(dimensions, (tuple, list)):\n pass\n elif len(dimensions) > 2 or len(dimensions) == 0:\n pass\n elif dtype != int:\n pass\n elif sparse:\n pass\n else:\n return dpnp_indices(dimensions)\n\n return call_origin(numpy.indices, dimensions, dtype, sparse)",
"def array_array_index(array, indices):\n if indices.shape[1] == 1:\n return array[np.arange(array.shape[0]), indices[:, 0]].reshape(indices.shape)\n\n stride = np.arange(indices.shape[0])*array.shape[1]\n indices_mod = indices + stride[:, None]\n indices_flat = indices_mod.ravel()\n return array.ravel()[indices_flat].reshape(indices.shape).copy()",
"def pndindex(*args):\n return np.ndindex(*args)",
"def pndindex(*args):\r\n return np.ndindex(*args)",
"def select_at_indexes(indexes, tensor):\n dim = len(indexes.shape)\n assert indexes.shape == tensor.shape[:dim]\n num = indexes.numel()\n t_flat = tensor.view((num,) + tensor.shape[dim:])\n s_flat = t_flat[torch.arange(num), indexes.view(-1)]\n return s_flat.view(tensor.shape[:dim] + tensor.shape[dim + 1:])",
"def _index(tensor_3d, tensor_2d):\n x, y, z = tensor_3d.size()\n t = tensor_3d.reshape(x * y, z)\n tt = tensor_2d.reshape(x * y)\n v = t[torch.arange(x * y), tt]\n v = v.reshape(x, y)\n return v",
"def flattened_indices_from_row_col_indices(row_indices, col_indices, num_cols):\n return (row_indices * num_cols) + col_indices",
"def _tf_multi_index(t, indices):\n # Note: this is far from a full implementation of Theano fancy\n # indexing, use with care.\n assert K._BACKEND == 'tensorflow'\n from collections import Sequence\n import tensorflow as tf\n\n if not isinstance(indices, Sequence):\n raise ValueError(indices)\n\n if len(indices) == 1:\n return tf.gather(t, indices[0]) # gather() suffices for 1d\n if K.ndim(t) == len(indices):\n # Index n-dimensional tensor with n indices: pack the indices\n # from e.g. [[i_0, i_1, ...] [j_0, j_1, ...]] to [[i_0, j_0],\n # [i_1, j_1], ...] and use gather_nd()\n # (https://www.tensorflow.org/api_docs/python/array_ops.html#gather_nd)\n # TODO: check that all i in indices have ndim n-1 \n # TODO: support broadcasting for numpy arrays with np.broadcast_to()\n #indices = tf.pack(list(indices), axis=len(indices)-1)\n indices = tf.pack(list(indices), axis=-1)\n # indices = tf.Print(indices, [indices], 'indices', summarize=100)\n return tf.gather_nd(t, indices)\n else:\n raise NotImplementedError('index {} with {}'.format(t, indices))",
"def safe_indexing(X, indices):\n if hasattr(X, \"iloc\"):\n # Work-around for indexing with read-only indices in pandas\n indices = indices.copy() # if indices.flags.writeable else indices.copy()\n # Pandas Dataframes and Series\n try:\n return X.iloc[indices]\n except ValueError:\n # Cython typed memoryviews internally used in pandas do not support\n # readonly buffers.\n # TODO: that was commented\n # warnings.warn(\"Copying input dataframe for slicing.\",\n # DataConversionWarning)\n return X.copy().iloc[indices]\n elif hasattr(X, \"shape\"):\n if hasattr(X, 'take') and (hasattr(indices, 'dtype') and\n indices.dtype.kind == 'i'):\n # This is often substantially faster than X[indices]\n return X.take(indices, axis=0)\n else:\n return X[indices]\n else:\n return [X[idx] for idx in indices]",
"def select_indices(arr,index_arr,axis=-1):\n shape_list=(lambda x,y: [ 1 if dim!=x else y for dim in range(len(arr.shape))] )\n indices_list=[np.reshape(np.arange(length),shape_list(length_id,length))\n for length_id,length in enumerate(arr.shape)]\n indices_list[axis]=index_arr\n return arr.ravel()[np.ravel_multi_index(indices_list,dims=arr.shape)]",
"def gather_nd_python(a_np, indices_np):\n a_shape = a_np.shape\n indices_np = indices_np.astype(\"int32\")\n indices_shape = indices_np.shape\n assert len(indices_shape) > 1\n assert indices_shape[0] <= len(a_shape)\n b_shape = list(indices_shape[1:])\n for i in range(indices_shape[0], len(a_shape)):\n b_shape.append(a_shape[i])\n b_np = np.zeros(b_shape)\n for idx in np.ndindex(*indices_shape[1:]):\n a_idx = []\n for i in range(indices_shape[0]):\n indices_pos = tuple([i] + list(idx))\n a_idx.append(indices_np[indices_pos])\n b_np[idx] = a_np[tuple(a_idx)]\n return b_np",
"def all_neighbor_simplices_real_idx(n_dim, indices):\n all_simplices = base_idx_neighbor_idx_simplices(n_base=indices.shape[0],\n n_neighbors=indices.shape[1],\n n_dim=n_dim)\n base_vector_indices = all_simplices[:, 0]\n neighbors_indices = indices[base_vector_indices]\n #if debug:\n # print(os.getpid(), 'eee', neighbors_indices.shape, all_simplices[:,1:].shape)\n neighbors_indices = array_array_index(neighbors_indices,\n all_simplices[:,1:])\n #if debug:\n # print(os.getpid(), 'fff')\n simplices_real_indices = np.vstack([base_vector_indices.T,\n neighbors_indices.T]).T\n\n return simplices_real_indices",
"def apply_index(data, idx):\n data = numpy.asanyarray(data)\n idx = numpy.asanyarray(idx)\n if len(idx.shape) != 2:\n raise ValueError(\"idx must have dimensions 2, not {0}\".format(\n len(idx.shape)))\n if len(data.shape) < 2:\n raise ValueError(\"data must have at least dimensions 2\")\n if idx.shape[0] != data.shape[0]:\n raise ValueError(\"data and idx must have same size in \"\n \"0th dimension\")\n if not idx.shape[1] in data.shape[1:]:\n raise ValueError(\"Size of idx dimension 1 must match a dimension in \"\n \"data\")\n idx_dim = data.shape[1:].index(idx.shape[1]) + 1\n return numpy.rollaxis(\n numpy.rollaxis(data, idx_dim, 1) #make time and index dim adjacent\n #get a 2d array where every element matches index of first axis\n [numpy.mgrid[0:idx.shape[0], slice(idx.shape[1])][0],\n idx, #2d array, every element is desired index of second axis\n ...] #and the other axes come along for the ride\n , 1, idx_dim + 1) #and put index dim back in place",
"def _index_dset(dset, indices):\n # get dset and arr shape\n dset_shape = dset.shape\n arr_shape = _get_dset_shape(dset, indices)\n\n # create empty array of dset dtype\n arr = np.empty(arr_shape, dtype=dset.dtype)\n\n # get arr and dset indices for each dimension in indices\n dset_indices = []\n arr_indices = []\n for i, dset_inds in enumerate(indices):\n if isinstance(dset_inds, (int, np.integer)):\n # this dimension is len 1, so slice is fine\n arr_indices.append([slice(None)])\n dset_indices.append([[dset_inds]])\n\n elif isinstance(dset_inds, slice):\n # this dimension is just a slice, so slice is fine\n arr_indices.append([slice(None)])\n dset_indices.append([dset_inds])\n\n elif isinstance(dset_inds, list):\n if isinstance(dset_inds[0], (int, np.integer)):\n # this is a list of integers, append slice\n arr_indices.append([slice(None)])\n dset_indices.append([dset_inds])\n elif isinstance(dset_inds[0], slice):\n # this is a list of slices, need list of slice lens\n slens = [_get_slice_len(s, dset_shape[i]) for s in dset_inds]\n ssums = [sum(slens[:j]) for j in range(len(slens))]\n arr_inds = [slice(s, s + l) for s, l in zip(ssums, slens)]\n arr_indices.append(arr_inds)\n dset_indices.append(dset_inds)\n\n # iterate over each of the 4 axes and fill the array\n for blt_arr, blt_dset in zip(arr_indices[0], dset_indices[0]):\n for spw_arr, spw_dset in zip(arr_indices[1], dset_indices[1]):\n for freq_arr, freq_dset in zip(arr_indices[2], dset_indices[2]):\n for pol_arr, pol_dset in zip(arr_indices[3], dset_indices[3]):\n # index dset and assign to arr\n arr[blt_arr, spw_arr, freq_arr, pol_arr] = dset[\n blt_dset, spw_dset, freq_dset, pol_dset\n ]\n\n return arr",
"def join_indices(\n self, *inds, dirs=None, return_transposed_shape_data=False\n ):\n # Format index_batches to be a list of lists of indices.\n if isinstance(inds[0], Iterable):\n index_batches = list(map(list, inds))\n else:\n index_batches = [list(inds)]\n # Remove empty batches.\n index_batches = [b for b in index_batches if len(b) > 0]\n\n if dirs is None:\n warnings.warn(\n \"In join_indices, dirs was not given and is thus generated to \"\n \"be [1,...,1].\"\n )\n dirs = [1] * len(index_batches)\n else:\n if not isinstance(dirs, Iterable):\n dirs = [dirs]\n assert len(dirs) == len(index_batches)\n\n if not index_batches:\n # Nothing to be done. However, join_indices should always return an\n # array independent of the original, so we take a view.\n if return_transposed_shape_data:\n return (\n self.view(),\n self.shape.copy(),\n self.qhape.copy(),\n self.dirs.copy(),\n )\n else:\n return self.view()\n\n # Group dirs together with index_batches so that they get sorted\n # together.\n index_batches_with_dirs = [\n b + [d] for b, d in zip(index_batches, dirs)\n ]\n\n # Create the permutation for transposing the tensor. At the same time\n # transpose and sort index_batches.\n # We create trivial one-index batches for all the indices that are not\n # going to be joined, so that all indices are in some batch. Then we\n # sort the batches by the first index in each one.\n joined = set(sum(index_batches, []))\n not_joined = [[i] for i in range(len(self.shape)) if i not in joined]\n all_batches = not_joined + index_batches_with_dirs\n all_batches.sort(key=opr.itemgetter(0))\n # The batches are now in right order, and we just have to turn this\n # into a permutation of the indices.\n # The a[:-1] conditional statement leaves out the dirs when creating\n # the permutation.\n perm = sum((a[:-1] if len(a) > 1 else a for a in all_batches), [])\n # Filter out the trivial batches we added a few lines above.\n index_batches_with_dirs = [b for b in all_batches if len(b) > 1]\n # Separate the dirs and the batches now that sorting is done.\n dirs = [b[-1] for b in index_batches_with_dirs]\n index_batches = [b[:-1] for b in index_batches_with_dirs]\n # Sort the indices inside each batch according to the permutation perm.\n index_batches = [list(map(perm.index, b)) for b in index_batches]\n res = self.transpose(perm)\n\n if return_transposed_shape_data:\n transposed_shape = res.shape.copy()\n transposed_qhape = res.qhape.copy()\n transposed_dirs = res.dirs.copy()\n\n # For each batch that consists of a single index, we only need to flip\n # its direction to match what's in dirs. Do that, and then remove those\n # batches from index_batches.\n # We traverse index_batches in reverse order so that removing elements\n # from the end doesn't mess up the loop.\n for i, b in reversed(tuple(enumerate(index_batches))):\n if len(b) == 1:\n if res.dirs[b[0]] != dirs[i]:\n res = res.flip_dir(b[0])\n del dirs[i]\n del index_batches[i]\n\n if not index_batches:\n # If no indices are left, there is no need to join anything.\n if return_transposed_shape_data:\n return res, transposed_shape, transposed_qhape, transposed_dirs\n else:\n return res\n\n # Find out the remaining, new indices after the joining.\n cumulant = 0\n new_inds = []\n for b in index_batches:\n new_inds.append(b[0] - cumulant)\n cumulant += len(b) - 1\n\n # Reverse index_batches and dirs for the future so that we first\n # process the indices at the end.\n index_batches.reverse()\n dirs.reverse()\n\n # For every non-zero block in res, reshape the block and add it to the\n # right key in new_sects. However, every item in new_sects will consist\n # of several blocks that need to be concatenated. Because of this,\n # new_sects[k] is a list of lists [k_part1, k_part2, ..., k_partn,\n # reshaped_block], where k_parts are the qnums of the indices that were\n # joined. Thus by later sorting these lists we get them in the right\n # order for concatenation.\n new_sects = {}\n # Go through every valid index instead of every key in sects, because\n # blocks of zeros may be concatenated with other blocks.\n valid_ks = (\n qcomb\n for qcomb in itt.product(*res.qhape)\n if res.is_valid_key(qcomb)\n )\n del_slcs = [slice(b[1], b[-1] + 1) for b in index_batches]\n get_slcs = [slice(b[0], b[-1] + 1) for b in index_batches]\n dir_batches = [[res.dirs[i] for i in batch] for batch in index_batches]\n for k in valid_ks:\n v = res[k]\n new_k = list(k)\n new_shp = list(v.shape)\n k_parts = []\n for b, dir_b, dir_new, del_slc, get_slc in zip(\n index_batches, dir_batches, dirs, del_slcs, get_slcs\n ):\n k_part = k[get_slc]\n k_parts.append(k_part)\n k_part = map(opr.mul, k_part, dir_b)\n new_qnum = self._qod_func(sum(k_part) * dir_new)\n new_k[b[0]] = new_qnum\n del new_k[del_slc]\n new_shp[b[0]] = fct.reduce(opr.mul, v.shape[get_slc])\n del new_shp[del_slc]\n k_parts.reverse()\n new_k = tuple(new_k)\n l = new_sects.setdefault(new_k, [])\n l.append(k_parts + [v.reshape(new_shp)])\n\n # Concatenator is a helper function that recursively concatenates the\n # pieces together. It is called once for every index in a batch.\n def concatenator(l, i=0):\n if i == len(l[0]) - 2:\n l = [el[-1] for el in l]\n else:\n l = [tuple(g) for k, g in itt.groupby(l, opr.itemgetter(i))]\n l = tuple(map(lambda k: concatenator(k, i=i + 1), l))\n return np.concatenate(l, new_inds[i])\n\n for k, v in new_sects.items():\n # These are the new blocks, just need to concatenate.\n v.sort()\n new_sects[k] = concatenator(v)\n res.sects = new_sects\n\n # Compute the new shape, qhape and dir.\n for new_d, batch in zip(dirs, index_batches):\n product_of_tuple = lambda l: fct.reduce(opr.mul, l)\n cart_prod_of_dims = itt.product(\n *tuple(res.shape[i] for i in batch)\n )\n new_dim = list(map(product_of_tuple, cart_prod_of_dims))\n\n qhps = ([q * res.dirs[i] for q in res.qhape[i]] for i in batch)\n cartesian_product_of_qims = itt.product(*tuple(qhps))\n new_qim = map(sum, cartesian_product_of_qims)\n new_qim = (q * new_d for q in new_qim)\n new_qim = list(map(self._qod_func, new_qim))\n\n # Still need to concatenate.\n # Sort by new_qim.\n if new_qim:\n new_qim, new_dim = zip(*sorted(zip(new_qim, new_dim)))\n new_qim, new_dim = list(new_qim), list(new_dim)\n n = 0\n q = new_qim[n]\n i = 1\n while i < len(new_qim):\n if new_qim[i] == q:\n new_dim[n] += new_dim[i]\n del new_qim[i]\n del new_dim[i]\n else:\n n = i\n q = new_qim[n]\n i += 1\n\n res.shape[batch[0]] = new_dim\n del res.shape[batch[1] : batch[0] + len(batch)]\n res.qhape[batch[0]] = new_qim\n del res.qhape[batch[1] : batch[0] + len(batch)]\n res.dirs[batch[0]] = new_d\n del res.dirs[batch[1] : batch[0] + len(batch)]\n\n if return_transposed_shape_data:\n return res, transposed_shape, transposed_qhape, transposed_dirs\n else:\n return res",
"def _at_index(data, indices, keepdim=None, padding=np.nan):\n if not (keepdim is None or keepdim in ['data', 'index']):\n raise TypeError('unexpected argument keepdim={}'.format(keepdim))\n\n data = np.asarray(data)\n indices = np.asarray(indices)\n i = indices[indices < data.size]\n\n if keepdim is None:\n return data[i]\n elif keepdim == 'data':\n res = np.full(data.size, padding)\n res[i] = data[i]\n return res\n elif keepdim == 'index':\n res = np.full(indices.size, padding)\n if i.size !=0:\n res[0:indices.size-1] = data[i]\n return res",
"def aggregate_relative_indices(\n array: np.ndarray,\n indices: Iterable[int],\n starting_index_func: Callable,\n aggregate_func: Callable\n):\n starting_index = starting_index_func(array)\n get_indices = [\n starting_index + i\n for i in indices\n if 0 <= starting_index + i < len(array)\n ]\n return aggregate_func(array[get_indices])",
"def _ravel_shape_indices(dimensions, dtype=int, chunks=None):\n\n indices = [\n dask.array.arange(\n 0,\n numpy.prod(dimensions[i:], dtype=dtype),\n numpy.prod(dimensions[i + 1:], dtype=dtype),\n dtype=dtype,\n chunks=c\n )\n for i, c in enumerate(chunks)\n ]\n\n indices = da_blockwise(\n _ravel_shape_indices_kernel, tuple(range(len(indices))),\n *sum([(a, (i,)) for i, a in enumerate(indices)], tuple()),\n dtype=dtype\n )\n\n return indices",
"def take_along_axis(x1, indices, axis):\n\n x1_desc = dpnp.get_dpnp_descriptor(x1, copy_when_nondefault_queue=False)\n indices_desc = dpnp.get_dpnp_descriptor(\n indices, copy_when_nondefault_queue=False\n )\n if x1_desc and indices_desc:\n if x1_desc.ndim != indices_desc.ndim:\n pass\n elif not isinstance(axis, int):\n pass\n elif axis >= x1_desc.ndim:\n pass\n elif x1_desc.ndim == indices_desc.ndim:\n val_list = []\n for i in list(indices_desc.shape)[:-1]:\n if i == 1:\n val_list.append(True)\n else:\n val_list.append(False)\n if not all(val_list):\n pass\n else:\n return dpnp_take_along_axis(x1, indices, axis)\n else:\n return dpnp_take_along_axis(x1, indices, axis)\n\n return call_origin(numpy.take_along_axis, x1, indices, axis)",
"def batched_index_select(input, dim, index):\n views = [input.shape[0]] + [1 if i != dim else -1 for i in range(1, len(input.shape))]\n expanse = list(input.shape)\n expanse[0] = -1\n expanse[dim] = -1\n index = index.view(views).expand(expanse)\n return torch.gather(input, dim, index)",
"def indXtoJ(indX):\n return np.unravel_index(indX % xx.size, xx.shape)"
] | [
"0.6630981",
"0.65513384",
"0.625214",
"0.60793054",
"0.5899021",
"0.58930767",
"0.584284",
"0.58320093",
"0.57988596",
"0.57887155",
"0.57549566",
"0.5752462",
"0.57381105",
"0.5714157",
"0.5673157",
"0.5671463",
"0.56660175",
"0.56258434",
"0.56149113",
"0.5592513",
"0.5555908",
"0.554421",
"0.55425954",
"0.55237263",
"0.5509066",
"0.54842067",
"0.5483267",
"0.54597306",
"0.54322946",
"0.5421832"
] | 0.75664073 | 0 |
Determine appropriate name for callable `f` (akin to function name). | def _callable_name(f):
try:
return f.__name__
except AttributeError:
if isinstance(f, partial):
return f.func.__name__
return f.__class__.__name__ | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_callable_name(func):\n if isinstance(func, functools.partial):\n return get_callable_name(func.func)\n else:\n return func.__name__",
"def funcname(func):\n try:\n return '%s()' % func.__name__\n except AttributeError:\n return repr(func)",
"def name_func(func, num, params):\n return \"%s_%s_%s\" % (\n func.__name__, int(num),\n parameterized.to_safe_name('_'.join((params.args[0].__name__, params.args[1].__name__)))\n )",
"def _function_name(func):\n return \"Calling the function: def {}()\".format(func.__name__)",
"def name(cls):\n return arg.s()(cls.func).func.__name__",
"def name(self):\n\t\treturn self._func_name",
"def get_func_name(func, resolv_alias=True, win_characters=True):\r\n if hasattr(func, '__module__'):\r\n module = func.__module__\r\n else:\r\n try:\r\n module = inspect.getmodule(func)\r\n except TypeError:\r\n if hasattr(func, '__class__'):\r\n module = func.__class__.__module__\r\n else:\r\n module = 'unknown'\r\n if module is None:\r\n # Happens in doctests, eg\r\n module = ''\r\n if module == '__main__':\r\n try:\r\n filename = os.path.abspath(inspect.getsourcefile(func))\r\n except:\r\n filename = None\r\n if filename is not None:\r\n # mangling of full path to filename\r\n parts = filename.split(os.sep)\r\n if parts[-1].startswith('<ipython-input'):\r\n # function is defined in an IPython session. The filename\r\n # will change with every new kernel instance. This hack\r\n # always returns the same filename\r\n parts[-1] = '__ipython-input__'\r\n filename = '-'.join(parts)\r\n if filename.endswith('.py'):\r\n filename = filename[:-3]\r\n module = module + '-' + filename\r\n module = module.split('.')\r\n if hasattr(func, 'func_name'):\r\n name = func.func_name\r\n elif hasattr(func, '__name__'):\r\n name = func.__name__\r\n else:\r\n name = 'unknown'\r\n # Hack to detect functions not defined at the module-level\r\n if resolv_alias:\r\n # TODO: Maybe add a warning here?\r\n if hasattr(func, 'func_globals') and name in func.func_globals:\r\n if not func.func_globals[name] is func:\r\n name = '%s-alias' % name\r\n if inspect.ismethod(func):\r\n # We need to add the name of the class\r\n if hasattr(func, 'im_class'):\r\n klass = func.im_class\r\n module.append(klass.__name__)\r\n if os.name == 'nt' and win_characters:\r\n # Stupid windows can't encode certain characters in filenames\r\n name = _clean_win_chars(name)\r\n module = [_clean_win_chars(s) for s in module]\r\n return module, name",
"def fname(func: Callable) -> str:\n return \"{}.{}\".format(func.__module__, func.__name__)",
"def _get_func_name(func):\n parts = []\n module = inspect.getmodule(func)\n if module:\n parts.append(module.__name__)\n\n qualname = func.__qualname__\n if qualname != func.__name__:\n parts.append(qualname[: qualname.find(\".\")])\n\n parts.append(func.__name__)\n return \".\".join(parts)",
"def func(f):\n return func_custom(f.func_name)(f)",
"def function_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"function_name\")",
"def function_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"function_name\")",
"def get_function_name(ifunc, *, scoped=False, mangle=False):\n\n name = _translate_function_name(interrogate_function_name(ifunc), mangle)\n\n if scoped:\n parent = interrogate_function_class(ifunc)\n if parent:\n name = get_type_name(parent, scoped=True, mangle=mangle) + '.' + name\n\n return name",
"def get_callable_from_string(f_name):\n try:\n mod_name, func_name = get_mod_func(f_name)\n if mod_name == \"\" and func_name == \"\":\n raise AttributeError(\"%s couldn't be converted to a module or function name\" % f_name)\n\n module = __import__(mod_name)\n\n if func_name == \"\":\n func_name = mod_name # The common case is an eponymous class\n\n return getattr(module, func_name)\n\n except (ImportError, AttributeError), exc:\n raise RuntimeError(\"Unable to create a callable object for '%s': %s\" % (f_name, exc))",
"def function_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"function_name\")",
"def _name_from_args(func, _, params):\n return \"{}_{}\".format(func.__name__, \"_\".join(str(arg) for arg in params.args))",
"def function_name(cls):\n function_name = String(cls.__name__).snakecase().lower()\n return function_name",
"def get_function_name():\n\n # inspect.stack()[0][2] returns name of this function\n function_name = inspect.stack()[1][3]\n\n return function_name",
"def fname(func):\n return \"%s.%s\" % (func.__module__, func.__name__)",
"def function_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"function_name\")",
"def function_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"function_name\")",
"def function_name(parameters):",
"def function_name_to_string(func):\n if func == statistical_parity_difference:\n return \"Statistical Parity Difference\"\n if func == theil_index:\n return \"Theil Index\"\n if func == equal_opportunity_difference:\n return \"Equal Opportunity Difference\"\n if func == disparate_impact:\n return \"Disparate Impact\"\n if func == average_odds_difference:\n return \"Average Odds Difference\"\n if func == auc:\n return \"AUC\"\n if func == binary_accuracy:\n return \"Binary Accuracy\"",
"def get_function_name(wrapped, instance, args, kwargs):\n return wrapped.__name__",
"def display_name(self) -> str:\n return f\"{self.func.__module__}.{self.func.__qualname__}\"",
"def _uniquify_name(self, name, callable):\n while True:\n try:\n callable(name)\n name += u'_'\n except:\n break\n return name",
"def get_function_name(self):\n return self.__function",
"def get_qual_name(func: object) -> str:\n return func.__module__ + \".\" + func.__name__",
"def get_function(name):\n \n # Check if already a function\n if callable(name):\n return name\n \n if not isinstance(name, str):\n raise ValueError(f'{name} must be callable or a string.')\n \n if name in globals(): \n if callable(globals()[name]):\n f = globals()[name]\n else:\n raise ValueError(f'global {name} is not callable')\n else:\n # try to import\n m_name, f_name = name.rsplit('.', 1)\n module = importlib.import_module(m_name)\n f = getattr(module, f_name)\n \n return f",
"def get_class_decoder_function_name(name):\n name = get_class_functional_name(name)\n return 'decode_{0}'.format(name)"
] | [
"0.72435784",
"0.68455863",
"0.67366433",
"0.6668289",
"0.6594174",
"0.6500698",
"0.6486582",
"0.6452623",
"0.6450915",
"0.64476234",
"0.640476",
"0.640476",
"0.63907254",
"0.63632125",
"0.63278747",
"0.6300249",
"0.6296908",
"0.6286463",
"0.62828344",
"0.6204712",
"0.6204712",
"0.61979127",
"0.61744064",
"0.614954",
"0.61047083",
"0.6092441",
"0.607495",
"0.6051154",
"0.6028006",
"0.60267836"
] | 0.8215907 | 0 |
Transform data (`keep` is userspecified secondstage index). | def __call__(self, data, keep):
return self.transform(data, keep) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def transform(self, dataframe: DataFrame) -> DataFrame:",
"def convert_index_select(g, op, block):\n\n x = g.get_node(op.input(\"X\")[0])\n index = g.get_node(op.input(\"Index\")[0])\n axis = op.attr(\"dim\")\n out = _op.transform.take(x, index, axis, mode=\"wrap\")\n g.add_node(op.output(\"Out\")[0], out)",
"def transform(self, data: pd.DataFrame, columns: list, verbose: int=1) -> pd.DataFrame:",
"def transform():",
"def transform(self, data):",
"def transform(self, dataset, labels):\n print(f\"Dropping {len(self.deficient)} deficient features...\")\n dataset.drop(columns=self.deficient, inplace=True)\n print(f\"Scanning {len(dataset)} samples for duplicates...\")\n duplicates = dataset.duplicated()\n print(f\"Dropping {sum(duplicates)} duplicate samples...\")\n dataset.drop(index=dataset.index[duplicates], inplace=True)\n dataset.reset_index(drop=True, inplace=True)\n labels.drop(labels=labels.index[duplicates], inplace=True)\n labels.reset_index(drop=True, inplace=True)\n return dataset, labels",
"def preprocess(data, to_drop=[]):\n \n columns = data.columns.to_list()\n \n # split data to numeric vs categorical\n numeric_features = data.select_dtypes(include=[\n 'int64', 'float64']).columns\n \n if len(to_drop) > 0:\n categorical_features = data.select_dtypes(include=[\n 'object']).drop(to_drop, axis=1).columns\n print(categorical_features)\n else: \n categorical_features = data.select_dtypes(include=[\n 'object']).columns\n \n categorical_transformer = Pipeline(steps=[\n ('imputer', SimpleImputer(strategy='most_frequent', fill_value='missing'))])\n \n numerical_transformer = Pipeline(steps=[\n ('imputer', SimpleImputer(strategy='median')),\n ('scaler', RobustScaler())\n ])\n # missing_values = np.nan\n \n# Bundle preprocessing for numerical and categorical data\n preprocessor = ColumnTransformer(\n transformers=[\n ('num', numerical_transformer, numeric_features),\n ('cat', categorical_transformer, categorical_features)\n ])\n\n my_pipeline = Pipeline(steps=[('preprocessor', preprocessor) ])\n \n for col in to_drop:\n columns.remove(col)\n print('Hello')\n \n trans_data = my_pipeline.fit_transform(data)\n return trans_data#pd.DataFrame(#, columns=columns) ",
"def transform_dataframe (input_data) : \n # PARTITIONING data based on portofolio value and \n # ORDERING the number of client in descending order \n # Then FILTERING based on their numbering \n\n w = Window.partitionBy(\"NMPTF\").orderBy(col(\"TOTAL_CLIENTS\").desc())\n transformed_df1 = ( input_data.select(\"*\",row_number()\n .over(w).alias('NUMBER'))\n .where(col(\"NUMBER\")<=3) )\n transformed_df_final = transformed_df1.drop(\"NUMBER\")\n \n\n return transformed_df_final",
"def re_transform(old_db,new_db,retransform=[]): \n for x in retransform:\n dfn = pd.concat([old_db[x],new_db[x]],axis = 1)\n dfn.columns = ['old','new']\n \n grouping = dfn.groupby(['old']).agg(lambda x:stats.mode(x)[0]) #get mode\n nan_newvalues = dfn['new'][dfn['old'].isnull()].reset_index(drop=True)\n\n values_imputate = [(grouping['new'] - z).idxmin() for z in nan_newvalues]\n dfn['old'][dfn['old'].isnull()] = values_imputate\n new_categ = dfn['old']\n new_db.loc[:,x] = new_categ\n return new_db",
"def transform(self, data: pd.DataFrame) -> pd.DataFrame:\n data = data[data.language == 'English'] # type: ignore\n data = data[['premise', 'hypothesis', 'label']] # type: ignore\n return data",
"def preprocess(df):\n drop_cols = ['duration_ms', 'key', 'mode', 'time_signature', 'popularity', 'tempo']\n drop_cols += ['track_id', 'track_name', 'artist_name']\n for col in drop_cols:\n if col in list(df.columns):\n df = df.drop(columns=col)\n return df",
"def transform(self, data):\n data[self.field] = data[self.field].fillna(-1)\n return data",
"def reconstruct_input(self, ix):",
"def transform(self, data: pd.DataFrame) -> pd.DataFrame:\n return pd.DataFrame(self.pipeline.transform(data))",
"def _common_preprocess(self, data):\n\n data = data.drop('id', axis=1) \n data = data.drop(['17', '488', 'B01AF', 'H01AB'], axis=1, errors='ignore')\n\n # drop age outliers\n idx = data[(data['age'] > 99)].index\n data = data.drop(idx)\n\n # drop rows with CKD\n idx = data[((data['585'] != 0) | (data['586'] != 0)) &\n (data['ckd'] == 0)].index\n data = data.drop(idx)\n data = data.drop(['585', '586'], axis=1)\n\n return data",
"def preprocess_feature(df):",
"def transform(self, X, y=None):\n if isinstance(X, pd.DataFrame):\n return X[self.columns_to_keep_]\n else:\n return X[:, self.columns_to_keep_]",
"def inverse_transform(self, df):\n return df",
"def transform(self, data: pd.DataFrame):\n raise NotImplementedError",
"def transform(self, X, y=None):\n return X.drop(columns=self.columns_to_remove)",
"def transform(self, X, y=None):\n X_ww = infer_feature_types(X)\n if len(self._text_columns) == 0:\n return X_ww\n\n provenance = {}\n for col in self._text_columns:\n transformed = self._lsa_pipeline.transform(X_ww[col])\n X_ww.ww[\"LSA({})[0]\".format(col)] = pd.Series(\n transformed[:, 0], index=X_ww.index\n )\n X_ww.ww[\"LSA({})[1]\".format(col)] = pd.Series(\n transformed[:, 1], index=X_ww.index\n )\n provenance[col] = [\"LSA({})[0]\".format(col), \"LSA({})[1]\".format(col)]\n self._provenance = provenance\n\n X_t = X_ww.ww.drop(columns=self._text_columns)\n return X_t",
"def transform(self, data: pd.DataFrame) -> pd.DataFrame:\n data = data[['premise', 'hypothesis', 'label']] # type: ignore\n return data",
"def transform(self, df):\n\t\tdf = self.__parse_json(df)\n\t\tdf = self.__fillnan(df)\n\t\tdf = self.__parse_dates(df)\n\t\tdf['budget'] = df['budget'].apply(lambda x: self.missing_budget_imputing if int(x) == 0 else x)\n\t\tdf['has_collections'] = df['belongs_to_collection'].isna().astype(int)\n\t\tdf['homepage'] = df['homepage'].isna().astype(int)\n\t\tdf['is_en'] = df['original_language'].apply(lambda x: 1 if x == 'en' else 0)\n\t\tdf = self.__encode_genre_transform(df)\n\t\tdf = self.__top_countries_and_companies_transform(df)\n\t\tdf = self.__bin_columns_transform(df)\n\t\tdf.drop(\n\t\t\t['release_date', 'original_language', 'production_countries', 'production_companies', 'id', 'backdrop_path',\n\t\t\t 'imdb_id', 'poster_path', 'video', 'belongs_to_collection', 'status', 'runtime',\n\t\t\t 'original_title', 'overview', 'tagline', 'title'], axis=1, inplace=True)\n\t\treturn df",
"def prepare_data(train, test):\n # change the name of the target column\n train.rename(columns={\"revenue\": \"target\"}, inplace=True)\n # map bool values to yes and no\n train[\"Weekend\"] = train[\"Weekend\"].map({True: \"Yes\", False: \"No\"})\n test[\"Weekend\"] = test[\"Weekend\"].map({True: \"Yes\", False: \"No\"})\n # set the id col as index\n train.set_index(\"id\", inplace=True)\n test.set_index(\"id\", inplace=True)\n\n # seperate the fetures and the target\n X_train = train.drop(\"target\", axis=1).copy()\n y_train = train[\"target\"].copy()\n X_test = test.copy()\n\n # select numerical and categorical columns\n num_cols = X_train.select_dtypes(exclude=\"object\").columns.tolist()\n cat_cols = X_train.select_dtypes(include=\"object\").columns.tolist()\n\n # numerical pipeline\n num_pipe = make_pipeline(SimpleImputer(strategy=\"mean\"))\n\n # categorical pipeline\n cat_pipe = make_pipeline(\n SimpleImputer(strategy=\"constant\", fill_value=\"NA\"),\n OneHotEncoder(handle_unknown=\"ignore\", sparse=False),\n )\n\n # full pipeline for data preprocessing\n full_pipe = ColumnTransformer(\n [(\"num\", num_pipe, num_cols), (\"cat\", cat_pipe, cat_cols)]\n )\n return X_train, y_train, X_test, full_pipe",
"def _transform(self, X, y=None):\n # lazy imports to avoid hard dependency\n from tsfresh import extract_features\n\n Xt = extract_features(\n X,\n column_id=X.columns[0],\n column_value=X.columns[3],\n column_kind=X.columns[2],\n column_sort=X.columns[1],\n **self.default_fc_parameters_,\n )\n\n # When using the long input format, tsfresh seems to sort the index,\n # here we make sure we return the dataframe in the sort order as the\n # input data\n instances = X.iloc[:, 0].unique()\n Xt = Xt.reindex(instances)\n return Xt",
"def stage_two_preprocessing(data: pd.Series) -> pd.Series:\n # designed to be run after remove_contractions\n data_ = data.dropna()\n data_ = remove_punctuation(data_)\n data_ = numbers_to_words(data_)\n data_ = remove_stopwords(data_)\n return data_",
"def preprocess_time(data, metadata):\n timestamp_name = metadata[\"timestamp_name\"]\n if timestamp_name == \"\":\n timestamp_name = \"fake_ts\"\n data[timestamp_name] = data.index\n\n data[timestamp_name] = pd.to_datetime(data[timestamp_name])\n data.sort_values(by=[timestamp_name], inplace=True)\n data.set_index([timestamp_name], inplace=True)\n\n return data",
"def samples_keep(self,index):\n\n\t\tif isinstance(index, (int, long)): index = range(self.samples)[-index:]\n\n\t\tself.sampled_topics = np.take(self.sampled_topics,index,axis=0)\n\t\tself.tt = np.take(self.tt,index,axis=2)\n\t\tself.dt = np.take(self.dt,index,axis=2)\n\n\t\tself.samples = len(index)",
"def clean(data, skip_transformations=False, target=False):\n data = convert_type(data)\n data = category_grouping(data)\n if target:\n target = data[['Target']]\n data = data.drop(columns='Target')\n x_train, x_test, y_train, y_test = f.train_test(data, target)\n x_train, y_train = resample_vals(x_train, y_train)\n x_train = x_train.assign(Train=lambda x: 1)\n x_test = x_test.assign(Train=lambda x: 0)\n data = pd.concat([x_train, x_test])\n data = onehot_features(data)\n data = log_trans(data, test=True)\n data = cap_outliers(data, test=True)\n data = scale(data, test=True)\n x_train = data.loc[data['Train'] == 1]\n x_test = data.loc[data['Train'] == 0]\n return x_train, x_test, y_train, y_test\n data = onehot_features(data)\n if skip_transformations:\n return data\n data = log_trans(data)\n data = cap_outliers(data)\n data = scale(data)\n return data",
"def _transform(self, dataset):\n raise NotImplementedError()"
] | [
"0.55106413",
"0.54800296",
"0.53775775",
"0.5366347",
"0.5208422",
"0.52055204",
"0.51941574",
"0.5188958",
"0.5141277",
"0.51131773",
"0.5106497",
"0.510201",
"0.50470954",
"0.5045959",
"0.502488",
"0.49978232",
"0.49935693",
"0.4965403",
"0.49543115",
"0.4952945",
"0.4943503",
"0.49151474",
"0.49149868",
"0.4914079",
"0.49016622",
"0.4893758",
"0.48916516",
"0.4870366",
"0.4851304",
"0.48486742"
] | 0.5873317 | 0 |
Initialises the ``InputDevice`` object and starts ``pifacecad.SwitchEventListener``. Also, registers callbacks to ``press_key`` method. | def __init__(self):
self.cad = pifacecad.PiFaceCAD()
self.listener = pifacecad.SwitchEventListener(chip=self.cad)
for i in range(8):
self.listener.register(i, pifacecad.IODIR_FALLING_EDGE, self.press_key)
self.listener.activate()
atexit.register(self.atexit) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def start_listener():\n listener = keyboard.Listener(\n on_press=on_press\n )\n listener.start()",
"def setInput(self):\n gpio.setup(self.bcm_id, gpio.IN, pull_up_down=self.pull)\n self.mode = gpio.IN",
"def startCallback (self):\n if self.hasCallback:\n return\n # set up IRQ interrupt function. GPIO.setmode should alreay have been called\n GPIO.setup(self.IRQ_PIN, GPIO.IN, pull_up_down=GPIO.PUD_UP)\n GPIO.add_event_detect (self.IRQ_PIN, GPIO.FALLING)\n GPIO.add_event_callback (self.IRQ_PIN, AHF_LickDetectorCallback) \n self.hasCallack = True\n # state of touches from one invocation to next, used in callback to separate touches from untouches\n self.prevTouches = self.mpr121.touched()",
"def __init__(self, dev_path, caps_mapping, pulse_handler):\n self.caps_mapping = caps_mapping\n self.pulse_handler = pulse_handler\n\n self.device = evdev.InputDevice(dev_path)\n\n if self._is_capable_of(caps_mapping):\n # Every 0.1 seconds is good enough\n self.handler_timeout = GLib.timeout_add(100, self._handle_events)\n else:\n self.device.close()",
"def listen(device_input, callback):\n while True:\n time.sleep(0.01)\n event = readControlDataRaw(device_input)\n (control_id, control_type, event_type, value) = parseControlEvent(event)\n if control_id != -1:\n callback(control_id, control_type, event_type, value)",
"def listen(self):\n\n if not self.key_data:\n self.key_data = {}\n for i in range(1024):\n self.key_data[i] = False\n\n if not self.axis_data:\n self.axis_data = {}\n for i in range(self.controller.get_numaxes()):\n self.axis_data[i] = 0.0\n\n if not self.button_data:\n self.button_data = {}\n for i in range(self.controller.get_numbuttons()):\n self.button_data[i] = False\n\n if not self.hat_data:\n self.hat_data = {}\n for i in range(self.controller.get_numhats()):\n self.hat_data[i] = (0, 0)\n\n debug_toggle = True\n print_state_toggle = True\n\n # These parameters define how frequesnt speed setting sent over serial to arduino\n speed_threshold = 10.0 # sets update threshold\n speed_step = 1 # sets acceleration\n speed_delay = 0.01 # delay per 1 step in sec\n\n mode_switch = \"j\" # control mode: k - keyboard, j - joystick\n\n # Parameters for keyboard control mode\n speed = 0.0\n speed_current = 0\n direction = \"r\" # r - release, f - forward, b - backward\n direction_current = \"r\"\n\n # Parameters for joystick control mode\n speed_l = 0\n speed_r = 0\n prev_speed_l = 0\n prev_speed_r = 0\n prev_btn = False\n\n while True:\n prev = self.axis_data\n for event in pygame.event.get():\n if event.type == pygame.KEYDOWN:\n self.key_data[event.key] = True\n elif event.type == pygame.KEYUP:\n self.key_data[event.key] = False\n if event.type == pygame.JOYAXISMOTION:\n self.axis_data[event.axis] = round(event.value,2)\n elif event.type == pygame.JOYBUTTONDOWN:\n self.button_data[event.button] = True\n elif event.type == pygame.JOYBUTTONUP:\n self.button_data[event.button] = False\n elif event.type == pygame.JOYHATMOTION:\n self.hat_data[event.hat] = event.value\n\n # check for exit command\n if self.button_data[9] or self.key_data[pygame.QUIT] or self.key_data[pygame.K_ESCAPE]:\n pygame.quit()\n break\n\n # toggle debug\n if self.key_data[pygame.K_d]:\n if debug_toggle:\n print(\"Toggle debug\")\n self.ser.write(b'd')\n debug_toggle = False\n else:\n debug_toggle = True\n\n # print out motors status\n if self.key_data[pygame.K_p]:\n if print_state_toggle:\n self.ser.write(b'p')\n if self.ser.in_waiting:\n print (self.ser.readline())\n print_state_toggle = False\n else:\n print_state_toggle = True\n\n if self.key_data[pygame.K_1] and mode_switch != \"k\":\n mode_switch = \"k\"\n\n if self.key_data[pygame.K_2] and mode_switch != \"j\":\n print(\"Joystick mode: ON\")\n mode_switch = \"j\"\n\n if mode_switch == \"k\": # keyboard control mode\n # accelearte forward\n if self.key_data[pygame.K_a] and direction != \"r\":\n if speed < 255.0:\n speed = speed + speed_step\n sleep(speed_delay)\n # accelerate backward\n if self.key_data[pygame.K_z] and direction != \"r\":\n if speed > 0.0:\n speed = speed - speed_step\n sleep(speed_delay)\n\n if self.key_data[pygame.K_UP] and direction != \"f\":\n direction = \"f\"\n if self.key_data[pygame.K_DOWN] and direction != \"b\":\n direction = \"b\"\n if self.key_data[pygame.K_UP] == False and direction == \"f\":\n direction = \"r\"\n if self.key_data[pygame.K_DOWN] == False and direction == \"b\":\n direction = \"r\"\n\n if math.fabs(speed - speed_current) > speed_threshold or direction != direction_current:\n # print(\"{0}, {1}, {2}, {3}\".format(speed, speed_current, direction, direction_current))\n direction_current = direction\n if direction == \"r\":\n speed = 0.0\n speed_current = int(speed)\n str_r = \"sr\" + direction_current + str(speed_current) + \"e\"\n str_l = \"sl\" + direction_current + str(speed_current) + \"e\"\n print(str_l)\n print(str_r)\n self.ser.write(str_r.encode())\n self.ser.write(str_l.encode())\n\n if(self.key_data[pygame.K_LEFT]):\n str_rf = \"srf\" + str(speed_current) + \"e\"\n self.ser.write(str_rf.encode())\n str_lf = \"slf\" + str(int(speed_current*0.9)) + \"e\"\n self.ser.write(str_lf.encode())\n elif(self.key_data[pygame.K_RIGHT]):\n str_rb = \"srf\" + str(int(speed_current*0.9)) + \"e\"\n self.ser.write(str_rb.encode())\n str_lb = \"slf\" + str(speed_current) + \"e\"\n self.ser.write(str_lb.encode())\n\n if (self.key_data[pygame.K_UP] == False and self.key_data[pygame.K_DOWN] == False) and (self.key_data[pygame.K_a] == False and self.key_data[pygame.K_z] == False):\n speed = 0\n speed_current = speed\n direction = \"r\"\n direction_current = direction\n self.ser.write(b'srze')\n self.ser.write(b'slze')\n\n if mode_switch == \"j\": # joystick control mode\n if self.ser.in_waiting:\n data = str(self.ser.readline().strip())\n data = data[2 :len(data)-1]\n print(data)\n #self.aio.send('Team Hacky Slackers', data)\n\n prev_speed_l = speed_l\n prev_speed_r = speed_r\n speed_threshold = 1\n\n #simplified linear mapping for controller\n speed_l = int((self.axis_data[0]*(-50)) + 90)\n speed_r = int(math.fabs(self.axis_data[3]*255))\n #print(self.axis_data)\n #print(\"curr_l: {0}, perv_l: {1}, curr_r:{2}, perv_r:{3}\".format(speed_l, prev_speed_l, speed_r,prev_speed_r))\n\n if self.axis_data[0] < -0.05 and math.fabs(speed_l - prev_speed_l) > speed_threshold:\n str_lf = \"slf\" + str(speed_l) + \"e\"\n self.ser.write(str_lf.encode())\n elif self.axis_data[0] > 0.05 and math.fabs(speed_l - prev_speed_l) > speed_threshold:\n str_lb = \"slb\" + str(speed_l) + \"e\"\n self.ser.write(str_lb.encode())\n\n\n if self.axis_data[3] < -0.03 and math.fabs(speed_r - prev_speed_r) > speed_threshold:\n str_rf = \"srf\" + str(speed_r) + \"e\"\n self.ser.write(str_rf.encode())\n elif self.axis_data[3] > 0.03 and math.fabs(speed_r - prev_speed_r) > speed_threshold:\n str_rb = \"srb\" + str(speed_r) + \"e\"\n self.ser.write(str_rb.encode())\n\n if ( self.axis_data[0] >= -0.05 and self.axis_data[0] <= 0.05 ) and ( self.axis_data[3] >= -0.05 and self.axis_data[3] <= 0.05 ):\n speed_l = 90\n speed_r = 0\n self.ser.write(b'srze')\n self.ser.write(b'slze')\n\n #Logic to call RFID scan only once per click of R1 button\n # if(prev_btn != self.button_data[5]):\n # prev_btn = self.button_data[5]\n # if self.button_data[5] :\n # print(\"Scanning for RFID Card\")\n # self.ser.write(\"i\".encode())\n\n # clear()\n # pprint.pprint(self.button_data)\n # pprint.pprint(self.axis_data)\n # pprint.pprint(self.hat_data)",
"def start(self):\n\n buttons = {}\n for pin in self._pin_nums:\n buttons[\"button_\" + str(pin)] = pin\n\n self._gpio = self.init_interface(\"gpio\",\n impl=self._impl, \n **buttons)\n\n d_len = len(self._dir)\n b_len = len(self._bounce)\n for i, button in enumerate(buttons):\n # Initiliaze every button\n dire = self._dir[i % d_len]\n self._directions.append(dire)\n\n boun = self._bounce[i % b_len]\n self._debounce.append(boun)\n\n self._button_init(button, dire, boun)",
"def start(self):\n if self._start_event is None:\n _call_spawn_callbacks(self)\n hub = get_my_hub(self) # pylint:disable=undefined-variable\n self._start_event = hub.loop.run_callback(self.switch)",
"def setup_platform(hass, config, add_devices_callback, discovery_info=None):\n add_devices_callback([\n HE853Switch('OviSwitch', STATE_ON),\n HE853Switch('AC', STATE_OFF)\n ])",
"def on_switch(self, callback):\n self._switch_callback = callback",
"def start(self):\n self.has_event = False\n self.running = True\n self._condition.acquire()\n self._thread = threading.Thread(target=read_input, args=(self,))\n self._thread.start()",
"def setup(self, callback=False, display=\"lcd\"):\n self.display_medium = display\n self._setup_gpio_in()\n if callback:\n self._add_event_detect()\n self._add_event_callback()",
"def __init__(self, graphics_controller):\n super(KeyHandler, self).__init__()\n self._key_handler = window.key.KeyStateHandler()\n self._key_release_mappings = {}\n self._key_press_mappings = {}\n self._key_down_mappings = {}\n\n graphics_controller.add_key_handler(\n self._key_handler,\n on_press=lambda *args:\n self._handle_key_event(*args, self._key_press_mappings),\n on_release=lambda *args:\n self._handle_key_event(*args, self._key_release_mappings))",
"def setup(self):\n\n for row_pin in keypad_row_pins:\n #Set up row-pins\n self.gpio.setup(row_pin, self.gpio.OUT)\n\n for col_pin in keypad_col_pins:\n #Set up col-pins\n self.gpio.setup(col_pin, self.gpio.IN)",
"def setupInputEventHandlers(self):\n\n default.Script.setupInputEventHandlers(self)\n self.inputEventHandlers.update(\n self.structuralNavigation.inputEventHandlers)\n\n self.inputEventHandlers[\"sayAllHandler\"] = \\\n input_event.InputEventHandler(\n Script.sayAll,\n cmdnames.SAY_ALL)\n\n self.inputEventHandlers[\"panBrailleLeftHandler\"] = \\\n input_event.InputEventHandler(\n Script.panBrailleLeft,\n cmdnames.PAN_BRAILLE_LEFT,\n False) # Do not enable learn mode for this action\n\n self.inputEventHandlers[\"panBrailleRightHandler\"] = \\\n input_event.InputEventHandler(\n Script.panBrailleRight,\n cmdnames.PAN_BRAILLE_RIGHT,\n False) # Do not enable learn mode for this action",
"def monitor(self):\n\n # Log beginning of process\n board_logger = self.get_board_logger()\n board_logger.info(\"Beginning monitor of input for pin %s.\", \\\n self.__pin)\n \n # Set input status of pin for board object\n self.set_input_status(GPIO.input(self.__pin))\n status = \"ALARM\" if self.get_input_status() else \"RECOVERY\"\n board_logger.info(\"Initital status: %s\", status)\n\n # Deal with an error status upon power failure\n if self.get_input_status() == 1: self.initiate_event()\n\n # Monitor pin until KeyBoardInterrupt is detected\n while True:\n\n # Log monitoring\n board_logger.info(\"Monitoring for pin changes...\")\n \n # Wait for a change in pin status\n GPIO.wait_for_edge(self.__pin, GPIO.BOTH)\n\n sleep(0.005) #debounce for 5ms\n\n if self.get_input_status() != GPIO.input(self.__pin):\n \n # Set input status of pin\n self.set_input_status(GPIO.input(self.__pin))\n\n # Initiate event\n self.initiate_event()",
"def setup_gpio(self):\n GPIO.setmode(GPIO.BOARD)\n GPIO.setup(self._input_pin, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)",
"def input_key_event(self, key, custom_key=None):\n\n key_event = INPUT_ACTION_SWITCHER.get(key)\n if key_event == \"-1\":\n key_event = custom_key\n self.android_device_driver.adb.exec_adb_cmd(\"shell input keyevent \" +\n key_event).wait()",
"def setup(self):\n if not self._gpio_setup:\n if self._gpio is None:\n try:\n import RPi.GPIO as GPIO\n self._gpio = GPIO\n except ImportError:\n raise ImportError('This library requires the RPi.GPIO module\\nInstall with: sudo apt install python-rpi.gpio')\n self._gpio.setmode(self._gpio.BCM)\n self._gpio.setwarnings(False)\n self._gpio.setup(self.cs_pin, self._gpio.OUT)\n self._gpio.setup(self.dc_pin, self._gpio.OUT, initial=self._gpio.LOW, pull_up_down=self._gpio.PUD_OFF)\n self._gpio.setup(self.reset_pin, self._gpio.OUT, initial=self._gpio.HIGH, pull_up_down=self._gpio.PUD_OFF)\n self._gpio.setup(self.busy_pin, self._gpio.IN, pull_up_down=self._gpio.PUD_OFF)\n\n if self._spi_bus is None:\n import spidev\n self._spi_bus = spidev.SpiDev()\n\n self._spi_bus.open(0, self.cs_channel)\n self._spi_bus.no_cs = True\n self._spi_bus.max_speed_hz = 5000000\n\n self._gpio_setup = True\n\n self._gpio.output(self.reset_pin, self._gpio.LOW)\n time.sleep(0.1)\n self._gpio.output(self.reset_pin, self._gpio.HIGH)\n time.sleep(0.1)\n\n self._gpio.output(self.reset_pin, self._gpio.LOW)\n time.sleep(0.1)\n self._gpio.output(self.reset_pin, self._gpio.HIGH)\n\n self._busy_wait(1.0)\n\n # Sending init commands to display\n self._send_command(AC073TC1_CMDH, [0x49, 0x55, 0x20, 0x08, 0x09, 0x18])\n\n self._send_command(AC073TC1_PWR, [0x3F, 0x00, 0x32, 0x2A, 0x0E, 0x2A])\n\n self._send_command(AC073TC1_PSR, [0x5F, 0x69])\n\n self._send_command(AC073TC1_POFS, [0x00, 0x54, 0x00, 0x44])\n\n self._send_command(AC073TC1_BTST1, [0x40, 0x1F, 0x1F, 0x2C])\n\n self._send_command(AC073TC1_BTST2, [0x6F, 0x1F, 0x16, 0x25])\n\n self._send_command(AC073TC1_BTST3, [0x6F, 0x1F, 0x1F, 0x22])\n\n self._send_command(AC073TC1_IPC, [0x00, 0x04])\n\n self._send_command(AC073TC1_PLL, [0x02])\n\n self._send_command(AC073TC1_TSE, [0x00])\n\n self._send_command(AC073TC1_CDI, [0x3F])\n\n self._send_command(AC073TC1_TCON, [0x02, 0x00])\n\n self._send_command(AC073TC1_TRES, [0x03, 0x20, 0x01, 0xE0])\n\n self._send_command(AC073TC1_VDCS, [0x1E])\n\n self._send_command(AC073TC1_T_VDCS, [0x00])\n\n self._send_command(AC073TC1_AGID, [0x00])\n\n self._send_command(AC073TC1_PWS, [0x2F])\n\n self._send_command(AC073TC1_CCSET, [0x00])\n\n self._send_command(AC073TC1_TSSET, [0x00])",
"def __init__(self):\n super().__init__()\n\n # Robot state\n self.ask_mode = False\n\n # Connect two large motors on output ports B and C\n self.sound = Sound()\n self.leds = Leds()\n self.p1 = TouchSensor(INPUT_1)\n self.p2 = TouchSensor(INPUT_2)\n self.p3 = TouchSensor(INPUT_3)\n self.p4 = TouchSensor(INPUT_4)",
"def setup():\n GPIO.setmode(GPIO.BCM)\n for pin in [config.gpio_pin_p1_stretch,\n config.gpio_pin_p1_serve,\n config.gpio_pin_p2_stretch,\n config.gpio_pin_p2_serve]:\n GPIO.setup(pin, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)\n\n input_reader_thread = threading.Thread(target=input_reader_worker)\n input_reader_thread.setDaemon(True)\n input_reader_thread.start()",
"def __init__(self, cfg_gpio, cb_function=None, cb_mode_change=None, logger=None):\n\t\n\t\tself.gpio = GpioWrapper()\n\t\n\t\t# CONSTANTS\n\t\tself.RGB_PWM_FREQ = 100\n\t\n\t\t#TEMP - DEBUG - EXPERIMENTAL\n\t\tself.int_encoder = None\n\t\tself.int_enabled = True\n\t\n\t\t# configuration (dictionary)\n\t\tself.cfg_gpio = cfg_gpio\n\n\t\t# callbacks\n\t\tif cb_function is not None:\n\t\t\tself.callback_function = cb_function\n\t\t\tstaticmethod(self.callback_function)\n\t\t\n\t\tif cb_mode_change is not None:\n\t\t\tself.callback_mode_change = cb_mode_change\n\t\t\tstaticmethod(self.callback_mode_change)\n\t\t\t\n\t\t# (optional) logger\n\t\tif logger is not None:\n\t\t\tself.LOG_TAG = 'GPIO'\n\t\t\tself.logger = logger\n\t\t\tself.__printer(\"GpioController initializing.\",level=LL_DEBUG)\n\t\n\t\t# pins\n\t\tself.pins_state = {}\t\t# pin (previous) state\n\t\tself.pins_function = {}\t\t# pin function(s)\n\t\tself.pins_config = {}\t\t# consolidated config, key=pin\n\t\t\n\t\t# mode sets\n\t\tself.ms_all = {}\t\t\t# contains the different modesets, key=modeset name\n\t\tself.ms_authorative = []\t# list of modeset of which we have an authority\n\t\t\n\t\t# events\n\t\tself.event_mode_change = [] # list of event dicts, connected to mode changes\n\t\tself.event_mode_command = []\n\t\t\n\t\t# default long press time\n\t\tself.long_press_ms = 800\n\n\t\t# experimental -- detect speed\n\t\tself.encoder_last_chg = datetime.now()\n\t\tself.encoder_last_speed = None\n\t\tself.encoder_fast_count = 0\n\t\t\n\t\tif callable(self.callback_function):\n\t\t\tself.__gpio_setup(self.int_handle_switch,self.int_handle_encoder)\n\t\telse:\n\t\t\tself.__gpio_setup()\n\t\t\t\n\t\t# inform our parent that we have an authorative modeset, so it can inform the world of its state\n\t\tif callable(self.callback_function):\n\t\t\tmode_change_params = []\n\t\t\n\t\t\tfor modesetid in self.ms_authorative:\n\t\t\t\tfor modeset in self.ms_all[modesetid]:\n\t\t\t\t\tmode_change_params.append(modeset['mode'])\n\t\t\t\t\tmode_change_params.append(modeset['state'])\n\t\t\t\t\t\n\t\t\tif len(mode_change_params) > 0:\n\t\t\t\tself.callback_function('MODE-CHANGE',mode_change_params)\t# TODO. CHANGE to *mode_change_params\n\t\t\t\t#self.callback_mode_change(mode_change_params,init=True)",
"def initialize():\n for pin in sorted(OUTPUT_PINS.values()):\n _enable_pin(pin, OUT)\n\n for pin in sorted(INPUT_PINS.values()):\n _enable_pin(pin, IN)",
"def launch ():\n def start_switch (event):\n log.info(\"switch %s has come up\" % event.dpid)\n log.info(event.connection.ports)\n sw = switches_by_dpid.get(event.dpid)\n\n if sw is None:\n # New switch\n sw = TopoSwitch(event.connection)\n switches_by_dpid[event.dpid] = sw\n sw.connect(event.connection)\n else:\n sw.connect(event.connection)\n core.openflow.addListenerByName(\"ConnectionUp\", start_switch)",
"def __init__(self):\n super().__init__()\n\n self._registry = {}\n el = gremlin.event_handler.EventListener()\n el.joystick_event.connect(self._joystick_cb)",
"def listen(self):\n while self.active:\n self.handle_input()",
"def init(self):\n self.connect_to_switches()\n self.reset_states()",
"def listen(self):\n if self.listening:\n return\n\n if self.mode == gpio.IN:\n # Remove any existing detection\n gpio.remove_event_detect(self.bcm_id)\n\n # Use separate callbacks for rising and falling edges\n gpio.add_event_detect(self.bcm_id, gpio.BOTH,\n callback=self._edge)\n\n self.listening = True",
"def start(self):\n #TODO add thread checking, should only be 1 thread per serial interface\n self.connected = True\n t1 = Thread(target=self._read_cardiochip) \n t1.daemon = True\n t1.start()\n print \"Started CardioChip reader\"",
"def main():\n\tleds = [KeyboardLed(number, indicator) for number, indicator in get_available_leds()]\n\n\tkeyboard_hook = pyxhook.HookManager()\n\tkeyboard_hook.KeyDown = lambda event: flash_leds_synced(leds) \n\tkeyboard_hook.HookKeyboard()\n\n\tkeyboard_hook.start()"
] | [
"0.6211504",
"0.6013261",
"0.5729392",
"0.56921124",
"0.5665231",
"0.5625194",
"0.5592315",
"0.5584637",
"0.5559815",
"0.5458288",
"0.54401493",
"0.53518695",
"0.5323422",
"0.5239154",
"0.5230533",
"0.5202559",
"0.51961684",
"0.5169283",
"0.5162053",
"0.51603216",
"0.5147439",
"0.51465404",
"0.5142052",
"0.5136302",
"0.51332533",
"0.51206106",
"0.5095838",
"0.50722307",
"0.5064706",
"0.5059349"
] | 0.6555623 | 0 |
Wrapper over _get_variable_wrapper() to get weights, with weights decay factor in loss. | def _get_weights_wrapper(
name, shape, dtype=tf.float32, initializer=initializers.xavier_initializer(),
weights_decay_factor=None
):
weights = _get_variable_wrapper(
name=name, shape=shape, dtype=dtype, initializer=initializer
)
if weights_decay_factor is not None and weights_decay_factor > 0.0:
weights_wd = tf.multiply(
tf.nn.l2_loss(weights), weights_decay_factor, name=name + '/l2loss'
)
tf.add_to_collection('losses', weights_wd)
return weights | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _variable_with_weight_decay(self, shape, stddev, wd):\n\n initializer = tf.truncated_normal_initializer(stddev=stddev)\n var = tf.get_variable('weights', shape=shape,\n initializer=initializer)\n\n# if wd and (not tf.get_variable_scope().reuse):\n# weight_decay = tf.mul(tf.nn.l2_loss(var), wd, name='weight_loss')\n# tf.add_to_collection('losses', weight_decay)\n return var",
"def _variable_with_weight_decay(name, shape, stddev, wd):\n dtype = tf.float16 if FLAGS.use_fp16 else tf.float32\n var = tf.get_variable(name, shape,\n initializer=tf.truncated_normal_initializer(stddev=stddev, dtype=dtype))\n # add weight decay term to 'losses' collection, so the sum of all loss in 'losses' collection\n # will be the total/final loss\n if wd is not None:\n weight_decay = tf.multiply(tf.nn.l2_loss(var), wd, name='weight_loss')\n tf.add_to_collection('losses', weight_decay)\n return var",
"def _variable_with_weight_decay(name, shape, stddev, wd):\n\n #var = _variable_on_cpu(name, shape, tf.truncated_normal_initializer(stddev=stddev))\n var = weight_variable(shape)\n if wd is not None:\n weight_decay = tf.mul(tf.nn.l2_loss(var), wd, name='weight_loss')\n tf.add_to_collection('losses', weight_decay)\n return var",
"def _variable_with_weight_decay(name, shape, stddev, wd):\n dtype = tf.float16 if FLAGS.use_fp16 else tf.float32\n var = _variable_on_cpu(\n name,\n shape,\n tf.truncated_normal_initializer(stddev=stddev, dtype=dtype))\n if wd is not None:\n weight_decay = tf.mul(tf.nn.l2_loss(var), wd, name='weight_loss')\n tf.add_to_collection('losses', weight_decay)\n return var",
"def _variable_with_weight_decay(name, shape, stddev, wd):\n dtype = tf.float16 if FLAGS.use_fp16 else tf.float32\n var = _variable_on_cpu(name, shape, tf.truncated_normal_initializer(stddev=stddev, dtype=dtype))\n\n if wd is not None:\n weight_decay = tf.multiply(tf.nn.l2_loss(var), wd, name = 'weight_loss')\n tf.add_to_collection('losses', weight_decay)\n \n return var",
"def _variable_with_weight_decay(name, shape, stddev, wd, use_xavier=True):\n if use_xavier:\n # initializer = tf.contrib.layers.xavier_initializer()\n initializer = tf.initializers.glorot_uniform()\n else:\n initializer = tf.truncated_normal_initializer(stddev=stddev)\n var = _variable_on_cpu(name, shape, initializer)\n if wd is not None:\n weight_decay = tf.multiply(tf.nn.l2_loss(var), wd, name='weight_loss')\n tf.add_to_collection('losses', weight_decay)\n return var",
"def _variable_with_weight_decay(name, shape, stddev, wd):\n var = variable(\n name,\n shape,\n initializer=tf.truncated_normal_initializer(stddev=stddev, dtype=tf.float32))\n if wd is not None:\n weight_decay = tf.multiply(tf.nn.l2_loss(var), wd, name='weight_loss')\n tf.add_to_collection('losses', weight_decay)\n return var",
"def _variable_with_weight_decay(name, shape, stddev, wd):\n dtype = tf.float32\n var = _variable_on_cpu(\n name,\n shape,\n tf.truncated_normal_initializer(stddev=stddev, dtype=dtype))\n if wd is not None:\n weight_decay = tf.multiply(tf.nn.l2_loss(var), wd, name='weight_loss')\n tf.add_to_collection('losses', weight_decay)\n return var",
"def _variable_with_weight_decay(name, shape, stddev, wd):\n dtype = tf.float32\n var = _variable_on_cpu(\n name,\n shape,\n tf.truncated_normal_initializer(stddev=stddev, dtype=dtype))\n if wd is not None:\n weight_decay = tf.multiply(tf.nn.l2_loss(var), wd, name='weight_loss')\n tf.add_to_collection('losses', weight_decay)\n return var",
"def _variable_with_weight_decay(name, shape, stddev, wd):\n var = _variable_on_cpu(name, shape,\n tf.truncated_normal_initializer(stddev=stddev))\n if wd:\n weight_decay = tf.mul(tf.nn.l2_loss(var), wd, name='weight_loss')\n tf.add_to_collection('losses', weight_decay)\n return var",
"def _variable_with_weight_decay(name, shape, stddev, wd):\n var = _variable_on_cpu(\n name,\n shape,\n tf.truncated_normal_initializer(stddev=stddev, dtype=tf.float32))\n if wd is not None:\n weight_decay = tf.multiply(tf.nn.l2_loss(var), wd, name='weight_loss')\n tf.add_to_collection('losses', weight_decay)\n return var",
"def _variable_with_weight_decay_orig(name, shape, stddev, wd):\n dtype = tf.float16 if FLAGS.use_fp16 else tf.float32\n var = _variable_on_cpu(\n name,\n shape,\n tf.truncated_normal_initializer(stddev=stddev, dtype=dtype))\n if wd is not None:\n weight_decay = tf.multiply(tf.nn.l2_loss(var), wd, name='weight_loss')\n tf.add_to_collection('losses', weight_decay)\n return var",
"def _variable_with_weight_decay(self, name, shape, wd):\n var = self._variable_on_device(\n name,\n shape,\n tf.contrib.layers.xavier_initializer_conv2d(uniform=True))\n if wd is not None:\n weight_decay = tf.multiply(tf.nn.l2_loss(var), wd,\n name='weight_loss')\n tf.add_to_collection('losses', weight_decay)\n return var\n\n # We will replicate the model structure for the training subgraph, as well\n # as the evaluation subgraphs, while sharing the trainable parameters.",
"def variable_with_weight_decay(kernel_shape, initializer, wd):\n w = tf.get_variable(name=\"weights\", shape=kernel_shape, dtype=tf.float32, initializer=initializer)\n\n collection = tf.GraphKeys.REGULARIZATION_LOSSES\n if wd and (not tf.get_variable_scope().reuse):\n weight_decay = tf.multiply(tf.nn.l2_loss(w), wd, name=\"w_loss\")\n tf.add_to_collection(collection, weight_decay)\n variable_summaries(w)\n return w",
"def variable_with_weight_decay(kernel_shape, initializer, wd):\n w = tf.get_variable('weights', kernel_shape, tf.float32, initializer=initializer)\n\n collection_name = tf.GraphKeys.REGULARIZATION_LOSSES\n if wd and (not tf.get_variable_scope().reuse):\n weight_decay = tf.multiply(tf.nn.l2_loss(w), wd, name='w_loss')\n tf.add_to_collection(collection_name, weight_decay)\n variable_summaries(w)\n return w",
"def _variable_with_weight_decay(name, shape, wd = 0.0):\n var = _variable_on_cpu(name, shape, tf.contrib.layers.xavier_initializer())\n # print(\"change var\")\n # var = tf.Variable(tf.truncated_normal(shape, mean= 0.0, stddev = 1.0), name = name)\n if wd != 0.0:\n weight_decay = tf.mul(tf.nn.l2_loss(var), wd, name='weight_loss')\n tf.add_to_collection('losses', weight_decay)\n return var",
"def get_weights(self):\n params = self.weights\n return K.batch_get_value(params)",
"def _variable_with_weight_decay(name, shape, stddev, wd, fresh_init = True, init_tensor=0, verbose=False):\n if verbose:\n print(\"The name of the variable: {}\".format(name))\n dtype = tf.float16 if FLAGS.use_fp16 else tf.float32\n\n\n if fresh_init:\n var = _variable_on_cpu(\n name,\n shape,\n tf.truncated_normal_initializer(stddev=stddev, dtype=dtype))\n else:\n var = _variable_on_cpu_with_constant(\n name,\n init_tensor)\n\n if verbose:\n print(\"Here's the variable of name {}:\".format(name))\n my_vars = tf.Print(var, [var], message=\"This is var: \")\n if wd is not None:\n weight_decay = tf.multiply(tf.nn.l2_loss(var), wd, name='weight_loss')\n tf.add_to_collection('losses', weight_decay)\n return var",
"def _get_weights(self, name, shape, dtype=None, initializer=None):\n\n if initializer is None: initializer = self.initializer\n else: initializer = initializers.get(initializer)\n # Set default dtype if not specified\n if dtype is None: dtype = hub.dtype\n\n # Get regularizer if necessary\n regularizer = None\n if hub.use_global_regularizer: regularizer = hub.get_global_regularizer()\n # Get constraint if necessary\n constraint = hub.get_global_constraint()\n # Get weights\n weights = tf.get_variable(name, shape, dtype=dtype, initializer=initializer,\n regularizer=regularizer, constraint=constraint)\n\n # If weight dropout is positive, dropout and return\n if self.weight_dropout > 0:\n return linker.dropout(weights, self.weight_dropout, rescale=True)\n\n # Binarize weights if required\n if hub.binarize_weights:\n # See this paper: https://arxiv.org/pdf/1602.02830.pdf\n return self.binarize_weights(weights)\n\n # If no mask is needed to be created, return weight variable directly\n if not any([self.prune_is_on, self.being_etched, hub.force_to_use_pruner]):\n return weights\n\n # Register, context.pruner should be created in early model.build\n assert context.pruner is not None\n # Merged lottery logic into etch logic\n if self.prune_is_on:\n assert not self.being_etched\n self.etch = 'lottery:prune_frac={}'.format(self.prune_frac)\n\n # Register etch kernel to pruner\n masked_weights = context.pruner.register_to_kernels(weights, self.etch)\n\n # Return\n assert isinstance(masked_weights, tf.Tensor)\n return masked_weights",
"def weight_variable(shape, name=None, trainable=True, is_conv=True):\n\n if is_conv:\n initializer = tf.contrib.layers.xavier_initializer_conv2d()\n else:\n initializer = tf.contrib.layers.xavier_initializer()\n\n initializer = tf.truncated_normal_initializer(0, 0.02)\n weights = tf.get_variable(name, shape=shape, dtype=tf.float32, initializer=initializer, trainable=trainable)\n\n return weights",
"def get_weights(self):\n return self.__weights",
"def get_weights(self, var_id: int, batch_no: int) -> ndarray:\n pass",
"def GetWeights(self) -> numpy.ndarray:\n return numpy.concatenate(list(\n variable_ndarray.flatten() for variable_ndarray in\n self._layer.get_weights()))",
"def _decay(self):\n wd_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)\n log.info('Weight decay variables')\n [log.info(x) for x in wd_losses]\n log.info('Total length: {}'.format(len(wd_losses)))\n if len(wd_losses) > 0:\n return tf.add_n(wd_losses)\n else:\n log.warning('No weight decay variables!')\n return 0.0",
"def get_weights(self):\n weights = []\n for layer in self.layers:\n weights += layer.weights\n return K.batch_get_value(weights)",
"def weight_variable(shape):\n initial = tf.truncated_normal(shape, stddev=0.1)\n return tf.get_variable(\n 'weight', initializer=initial, regularizer=tf.nn.l2_loss)",
"def get_weights(self):",
"def weight_variable(name, shape, initializer=tf.contrib.layers.xavier_initializer()):\n variable = tf.get_variable(name, initializer=initializer(shape))\n return variable",
"def get_weights(self):\n return self.w",
"def get_weights(self):\n return self.w"
] | [
"0.77637476",
"0.77271247",
"0.7695299",
"0.75827795",
"0.75666004",
"0.75396603",
"0.7501326",
"0.7492685",
"0.7492685",
"0.74766654",
"0.74586254",
"0.74359125",
"0.73115295",
"0.7291328",
"0.72828215",
"0.72800875",
"0.7099408",
"0.68967044",
"0.67947274",
"0.6773664",
"0.67584455",
"0.6724593",
"0.66693103",
"0.6657834",
"0.66566205",
"0.66195154",
"0.6566922",
"0.6550411",
"0.65263057",
"0.65263057"
] | 0.78818905 | 0 |
Get variables in a triple pattern | def get_vars(triple):
return set([v for k, v in triple.items() if v.startswith('?')]) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def variables(s):\n result = set([])\n def walk(s):\n if is_variable(s):\n result.add(s)\n else:\n for arg in s.args:\n walk(arg)\n walk(s)\n return result",
"def variables(self):\n return tuple(flatten([a.variables for a in self.args]))",
"def get_variable_matches(text):\n return _property_pattern.findall(text)",
"def vars(cls):\n for key in dir(cls):\n if key.startswith('var_'):\n yield key[4:]",
"def get_list_slice_vars(list_):\n\n # Dump unwanted portions\n array, list_ = list_.split(\"[\")\n list_ = list_.split(\"]\", 1)[0]\n\n # Split at ':'\n variables = list_.split(\":\")\n var_count = len(variables)\n\n step = \"\"\n\n # If step provided\n if var_count == 3:\n\n # If provided, store provided values\n start, stop, step = variables\n else:\n\n # Else store start, stop with default step\n start, stop = variables\n\n # If values are not provided by user, fall back to defaults\n\n # Set start default to 0\n if not start:\n start = \"0\"\n\n # Set stop default to array length\n if not stop:\n stop = \"Array.length\"\n\n # Set step default to 1\n if not step:\n step = \"1\"\n\n # Return stripped array with extracted values\n return array, start, stop, step",
"def get_variable_names(text):\n names = []\n if '@@' in text:\n matches = _property_pattern.findall(text)\n for token, key in matches:\n names.append(key)\n\n return names",
"def get_loop_vars(rule):\n traverse_list = [rule]\n loop_vars = []\n while(traverse_list):\n one_rule = traverse_list.pop()\n operator = one_rule[0]\n operate_nums = one_rule[1:]\n # print(\"operate_nums: %s\" % operate_nums)\n for operate_num in operate_nums:\n if isinstance(operate_num, str):\n loop_vars.append(operate_num)\n elif isinstance(operate_num, list):\n traverse_list.append(operate_num)\n else:\n continue\n # remove redundant vars, and keep the order\n ans_vars = list(set(loop_vars))\n ans_vars.sort(key=loop_vars.index)\n return ans_vars",
"def find_connected_pattern(variables, triples):\n pos = 0\n for triple in triples:\n tripleVars = get_vars(triple['triple'])\n if len(variables & tripleVars) > 0:\n return triple, pos, variables | tripleVars\n pos += 1\n return None, None, variables",
"def variable_parser(var_list, prefix):\r\n ret_list = []\r\n for var in var_list:\r\n varname = var.name\r\n varprefix = varname.split('/')[0]\r\n if varprefix == prefix:\r\n ret_list.append(var)\r\n return ret_list",
"def get_vars_and_coefficients(elements, start=3):\n return [var for var in elements[start + 1::2]], [float(coeff) for coeff in elements[start::2]]",
"def variables_referenced(text):\n return set(substitution_pattern.findall(text))",
"def variable_parser(var_list, prefix):\r\n ret_list = []\r\n for var in var_list:\r\n varname = var.name\r\n varprefix = varname.split('/')[0]\r\n if varprefix == prefix:\r\n ret_list.append(var)\r\n elif prefix in varname:\r\n ret_list.append(var)\r\n return ret_list",
"def get_variables(interface1, interface2, interface3):\n variables = {\n \"interface1\": interface1,\n \"interface2\": interface2,\n \"interface3\": interface3,\n\n # Interface 2 - ingress\n \"settings_receive\": {\n \"state\": \"receive\",\n \"iface-ref\": interface2,\n },\n\n # Interface 2 - egress\n \"settings_transmit\": {\n \"state\": \"transmit\",\n \"iface-ref\": interface2,\n },\n\n # Interface 2 - ingress/egress\n \"settings_both\": {\n \"state\": \"both\",\n \"iface-ref\": interface2,\n },\n\n # Interface 3 - ingress/egress\n \"settings_if2\": {\n \"state\": \"both\",\n \"iface-ref\": interface3,\n },\n\n # IP addresses for traffic test\n \"tg_to_dut_if1_ip\": \"192.168.1.1\",\n \"dut_to_tg_if1_ip\": \"192.168.1.2\",\n \"tg_to_dut_if2_ip\": \"192.168.2.1\",\n \"dut_to_tg_if2_ip\": \"192.168.2.2\",\n \"prefix\": 24,\n }\n return variables",
"def get_variables(self):\n return [self.g_t, self.m_t]",
"def test_simple_extraction_of_values(self):\n\t\tself.assertEqual([\"a\", \"b\"], au.extract_variables(bf.And([bf.Var(\"b\"), bf.Var(\"a\")])), \"Invalid variables extracted, expected [a, b].\")",
"def regex_findall_variables(raw_string: Text) -> List[Text]:\n try:\n match_start_position = raw_string.index(\"$\", 0)\n except ValueError:\n return []\n\n vars_list = []\n while match_start_position < len(raw_string):\n\n # Notice: notation priority\n # $$ > $var\n\n # search $$\n dollar_match = dolloar_regex_compile.match(raw_string, match_start_position)\n if dollar_match:\n match_start_position = dollar_match.end()\n continue\n\n # search variable like ${var} or $var\n var_match = variable_regex_compile.match(raw_string, match_start_position)\n if var_match:\n var_name = var_match.group(1) or var_match.group(2)\n vars_list.append(var_name)\n match_start_position = var_match.end()\n continue\n\n curr_position = match_start_position\n try:\n # find next $ location\n match_start_position = raw_string.index(\"$\", curr_position + 1)\n except ValueError:\n # break while loop\n break\n\n return vars_list",
"def extract_variables(content: Any) -> Set:\n if isinstance(content, (list, set, tuple)):\n variables = set()\n for item in content:\n variables = variables | extract_variables(item)\n return variables\n\n elif isinstance(content, dict):\n variables = set()\n for key, value in content.items():\n variables = variables | extract_variables(value)\n return variables\n\n elif isinstance(content, str):\n return set(regex_findall_variables(content))\n\n return set()",
"def find_template_variables(code):\n return re.findall(re_template_var, code)",
"def match(pattern, data, myvars=None):\n if myvars is None:\n myvars = {}\n if type(pattern) is ListType and len(pattern) >= 1:\n # 'variables' are ['varname']\n myvars[pattern[0]] = data\n return 1, myvars\n if type(pattern) is not TupleType:\n return (pattern == data), myvars\n if len(data) != len(pattern):\n return 0, myvars\n for pattern, data in map(None, pattern, data):\n same, myvars = match(pattern, data, myvars)\n if not same:\n break\n return same, myvars",
"def _get_vars(symbol: Union[str, int]) -> str:\n if isinstance(symbol, str):\n return {\n 'circle': 'var b1=n.round(t,2);',\n 'square': 'var b1=n.round(t,2);',\n 'diamond': 'var b1=n.round(t*1.3,2);',\n 'hexagram': 'var b1=n.round(t,2);var b2=n.round(t/2,2);var b3=n.round(t*Math.sqrt(3)/2,2);'\n }[symbol]\n return {\n 37: 'var d1=n.round(t*1.2,2);var d2=n.round(t*1.6,2);var d3=n.round(t*0.8,2);',\n 38: 'var d1=n.round(t*1.2,2);var d2=n.round(t*1.6,2);var d3=n.round(t*0.8,2);',\n 39: 'var d1=n.round(t*1.2,2);var d2=n.round(t*1.6,2);var d3=n.round(t*0.8,2);',\n 40: 'var d1=n.round(t*1.2,2);var d2=n.round(t*1.6,2);var d3=n.round(t*0.8,2);',\n 34: 'var d1=n.round(t,2);',\n 33: 'var d1=n.round(t*1.4,2);',\n 35: 'var d1=n.round(t*1.2,2);var d2=n.round(t*0.85,2);',\n 36: 'var d1=n.round(t/2,2);var d2=n.round(t,2);'\n }[symbol]",
"def vars(svars):\n return np.array([pm.var(var) for var in svars.split()])",
"def __splitVariableNames(self, name, indexes):\n if name == 'x':\n var = self.xCoordinates[indexes[0]][indexes[1]]\n elif name == 'y':\n var = self.yCoordinates[indexes[0]][indexes[1]]\n elif name == 'z':\n var = self.zCoordinates[indexes[0]][indexes[1]]\n elif name == 'colorMap':\n var = self.colorMapCoordinates[indexes[0]][indexes[1]]\n elif name == 'clusterLabels':\n var = self.clusterLabels[indexes[0]][indexes[1]]\n elif name == 'mixtureLabels':\n var = self.mixtureLabels[indexes[0]][indexes[1]]\n elif name == 'mixtureMeans':\n var = self.mixtureMeans[indexes[0]][indexes[1]]\n elif name == 'mixtureCovars':\n var = self.mixtureCovars[indexes[0]][indexes[1]]\n\n # The variable can contain brackets {} (when the symbol \"|\" is present in\n # the variable name), e.g.:\n # DataName|Input|{RavenAuxiliary|variableName|initial_value}\n # or it can look like:\n # DataName|Input|variableName\n\n if var is not None:\n result = [None] * 3\n if '|input|' in var.lower():\n match = re.search(r\"(\\|input\\|)\", var.lower())\n elif '|output|' in var.lower():\n match = re.search(r\"(\\|output\\|)\", var.lower())\n else:\n self.raiseAnError(IOError, f'In Plot {self.name}, the input coordinate {name} has not specified an \"Input\" or \"Output\" (case insensitive). e.g., sourceName|Input|aVariable) in {var}')\n startLoc, endLoc = match.start(), match.end()\n result = [var[:startLoc].strip(), var[startLoc+1:endLoc-1].strip(), var[endLoc:].strip()]\n if '{' in result[-1] and '}' in result[-1]:\n locLower, locUpper = result[-1].find(\"{\"), result[-1].rfind(\"}\")\n result[-1] = result[-1][locLower + 1:locUpper].strip()\n else:\n result = None\n\n return result",
"def getVariables(self):\n statVars = [self[vn] for vn in self.statVars]\n timeVars = [self[vn] for vn in self.timeVars]\n return statVars + timeVars",
"def scanvars(reader, frame, locals):\n import tokenize\n import keyword\n vars, lasttoken, parent, prefix, value = [], None, None, '', __UNDEF__\n for ttype, token, start, end, line in tokenize.generate_tokens(reader):\n if ttype == tokenize.NEWLINE:\n break\n if ttype == tokenize.NAME and token not in keyword.kwlist:\n if lasttoken == '.':\n if parent is not __UNDEF__:\n value = getattr(parent, token, __UNDEF__)\n vars.append((prefix + token, prefix, value))\n else:\n where, value = lookup(token, frame, locals)\n vars.append((token, where, value))\n elif token == '.':\n prefix += lasttoken + '.'\n parent = value\n else:\n parent, prefix = None, ''\n lasttoken = token\n return vars",
"def variables(self):\n return [term.variable for term in self.terms]",
"def _variable_pattern(self):\n variable_pattern = r'#([A-Za-z]+)' # match literal (#App,#Trigger) at beginning of String\n variable_pattern += r':([\\d]+)' # app id (:7979)\n variable_pattern += r':([A-Za-z0-9_\\.\\-\\[\\]]+)' # variable name (:variable_name)\n variable_pattern += r'!(StringArray|BinaryArray|KeyValueArray' # variable type (array)\n variable_pattern += r'|TCEntityArray|TCEnhancedEntityArray' # variable type (array)\n variable_pattern += r'|String|Binary|KeyValue|TCEntity|TCEnhancedEntity' # variable type\n variable_pattern += r'|(?:(?!String)(?!Binary)(?!KeyValue)' # non matching for custom\n variable_pattern += r'(?!TCEntity)(?!TCEnhancedEntity)' # non matching for custom\n variable_pattern += r'[A-Za-z0-9_-]+))' # variable type (custom)\n return variable_pattern",
"def make_vars(tups):\n return dict([(varname, value) for varname, value in tups])",
"def test_complex_extraction_of_values(self):\n\t\ta = bf.Var(\"a\")\n\t\tb = bf.Var(\"b\")\n\t\tc = bf.Var(\"c\")\n\t\tformula = bf.And([bf.Or([b, a, c]), bf.Or([bf.Not(a), bf.Not(c)]), bf.Not(b)])\n\t\tself.assertEqual([\"a\", \"b\", \"c\"], au.extract_variables(formula), \"Invalid variables extracted, expected [a, b, c].\")",
"def getVariables(self)->Dict[str,str]:\n pass",
"def variables(self):\n return {u for u in self if u.type == 'var'}"
] | [
"0.65504885",
"0.62275475",
"0.60280734",
"0.58704334",
"0.58324313",
"0.5800524",
"0.5797577",
"0.57811517",
"0.5696571",
"0.56887144",
"0.5681551",
"0.56710494",
"0.5640094",
"0.56376356",
"0.56341684",
"0.56109506",
"0.56105363",
"0.55877894",
"0.5572952",
"0.5562894",
"0.55621636",
"0.5539044",
"0.5523845",
"0.5510052",
"0.5499931",
"0.5490798",
"0.54800904",
"0.5470635",
"0.5470623",
"0.5462828"
] | 0.6889056 | 0 |
Find the first pattern in a set of triples pattern connected to a set of variables | def find_connected_pattern(variables, triples):
pos = 0
for triple in triples:
tripleVars = get_vars(triple['triple'])
if len(variables & tripleVars) > 0:
return triple, pos, variables | tripleVars
pos += 1
return None, None, variables | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def first_match(s,patterns):\n\n for p in patterns:\n m=p.match(s)\n if m:\n return p,m\n return None,None",
"def __extract_pattern_nodes(graph):\n tp_nodes = graph.subjects(RDF.type, AGORA.TriplePattern)\n for tpn in tp_nodes:\n subject = list(graph.objects(tpn, AGORA.subject)).pop()\n predicate = list(graph.objects(tpn, AGORA.predicate)).pop()\n obj = list(graph.objects(tpn, AGORA.object)).pop()\n subject_str = list(graph.objects(subject, RDFS.label)).pop().toPython()\n predicate_str = graph.qname(predicate)\n if (obj, RDF.type, AGORA.Variable) in graph:\n object_str = list(graph.objects(obj, RDFS.label)).pop().toPython()\n else:\n object_str = list(graph.objects(obj, AGORA.value)).pop().toPython()\n __plan_patterns[tpn] = '{} {} {}'.format(subject_str, predicate_str, object_str)",
"def _get_rule_pattern(sequent_side, context_variables):\n pattern = list()\n left_context = False\n right_context = False\n together = list()\n # This is for the together, see below\n prev_context = False\n\n for member_index in range(len(sequent_side)):\n # Context variable\n if sequent_side[member_index] in context_variables:\n prev_context = True\n if member_index == 0:\n left_context = True\n if member_index == len(sequent_side) - 1:\n right_context = True\n\n # Non-context variable (formula)\n else:\n pattern.append(sequent_side[member_index])\n if not prev_context and len(pattern) > 1:\n together.append((len(pattern) - 2, len(pattern) - 1)) # last index, prev to last index of pattern\n prev_context = False\n\n return pattern, left_context, right_context, together",
"def triples():",
"def find_one_independent_choose(all_set_variables):\n task_list = []\n for key in all_set_variables:\n value = all_set_variables[key]\n choose_keywords = list(value)\n for choose_keyword in choose_keywords:\n set_vars = value[choose_keyword]\n task_list.append((key, choose_keyword))\n task_list = add_more_important_tasks(\n choose_keyword, all_set_variables, task_list\n )\n logging.debug(task_list)\n return task_list[0]",
"def FIRST(L):\n global fi,eps\n R=set()\n eps_appear=False\n for x in L:\n eps_appear=False\n if not x.isTerminal():\n for o in fi[x]:\n if o==eps:\n eps_appear=True\n else:\n R.add(o)\n if eps not in fi[x]:\n break\n elif x!=eps:\n R.add(x)\n break\n else: # x==eps\n eps_appear=True\n if eps_appear:\n R.add(eps)\n if len(R)==0:\n R.add(eps)\n return R",
"def basic_find_one_independent_choose(all_set_variables):\n task_list = []\n for choose_keyword in list(all_set_variables):\n # for choose_keyword, set_vars in six.iteritems(value):\n task_list.append(choose_keyword)\n task_list = basic_add_more_important_tasks(\n choose_keyword, all_set_variables, task_list\n )\n logging.debug(task_list)\n return task_list[0]",
"def pythTripletFinder(sum1):\n a = 0\n b = 0\n sum1 = int(sum1)\n for x in range(1,sum1):\n\tfor y in range(1,sum1):\n\t if (x*x + y*y) == (sum1 - x -y)**2 :\n\t\treturn x,y,sum1-x-y\n return 0,0,0",
"def get_first():\n for s in TERMINAL_SET:\n # For each terminal, initialize First with itself.\n sym = SYMBOL_DICT[s]\n sym.first_set = set([s])\n\n for s in NON_TERMINAL_SET:\n sym = SYMBOL_DICT[s]\n if sym.is_nullable:\n sym.first_set = set(['null'])\n else:\n sym.first_set = set()\n\n while True:\n first_set_is_stable = True\n for p in PRODUCTION_LIST:\n sym_left = symbol_for_str(p.left)\n if p.right[0] == 'null':\n sym_left.first_set.update(set(['null']))\n continue\n previous_first_set = set(sym_left.first_set)\n\n for s in p.right:\n # For X -> Y..., First(X) = First(X) U First(Y)\n sym_right = symbol_for_str(s)\n sym_left.first_set.update(sym_right.first_set)\n # For X -> Y1 Y2 ... Yi-1 , if Y1...Yi-1 is all nullable\n # Then First(X) = First(X) U First(Y1) U First(Y2) ...\n if sym_right.is_nullable:\n continue\n else:\n break\n\n if previous_first_set != sym_left.first_set:\n first_set_is_stable = False\n\n if first_set_is_stable:\n break",
"def exactly_one(variables):\n cnf = [variables]\n n = len(variables)\n\n for i in range(n):\n for j in range(i + 1, n):\n v1 = variables[i]\n v2 = variables[j]\n cnf.append([-v1, -v2])\n\n return cnf",
"def getDirectFollowSets(self, FIRST):\n self.init_follow = {v:set() for v in self.v }\n self.containsFOLLOWOf = set()\n for v in self.v:\n if v == self.np[0][0]: # Starting Production\n self.init_follow[v] = set(['$']) # $ is in follow of 'S' applying rule 1\n for prod in self.g[v]:\n for i in range(len(prod)):\n if prod[i] in self.v and i+1 < len(prod):\n if prod[i+1] in self.t:\n self.init_follow[prod[i]] |= set([prod[i+1]])\n else:\n t = i + 1\n while t < len(prod) and prod[t] in self.nullables_map:\n if self.nullables_map[prod[t]] == True:\n self.init_follow[prod[i]] |= FIRST[prod[t]]-set(['epsilon'])\n else:\n self.init_follow[prod[i]] |= FIRST[prod[t]]\n break\n t += 1\n if t >= len(prod): # every thing on rhs of prod[i] could produce epsison, rule - 3\n self.containsFOLLOWOf |= set([(prod[i], v)])\n else: #prod[i+1] is a non nullable prod or prod[t] was a terminal\n if prod[t] in self.t:\n self.init_follow[prod[i]] |= set([prod[t]])\n else:\n self.init_follow[prod[i]] |= FIRST[prod[t]]-set(['epsilon'])\n\n elif prod[i] in self.v:\n self.containsFOLLOWOf |= set([(prod[i], v)]) # applying rule 2\n\n #self.containsFOLLOWOf = set([(a, b) for (a, b) in self.containsFOLLOWOf if a != b]) # remove the self loops\n return self.init_follow",
"def get_matching(variables, strict=True, single=True, **criteria):\n matching = []\n for var in variables:\n for crit_name, crit_info in criteria.items():\n if getattr(var, crit_name) == crit_info:\n continue\n else:\n break\n else:\n matching.append(var)\n\n if not matching and strict:\n raise RuntimeError(\"No matching variables were found.\")\n if single:\n if len(matching) > 1:\n raise RuntimeError(\n f\"Expected to find 1 matching variable. Found '{matching}'.\"\n )\n if not matching:\n return ()\n return matching[0]\n return tuple(matching)",
"def solve_part2(start):\n inputs = load_inputs(False)\n all_matches = []\n tiles = inputs.keys()\n for elem in tiles:\n matches = defaultdict(list)\n for elem2 in tiles:\n if elem != elem2 and compare_tile(inputs[elem], inputs[elem2]):\n l = matches[elem]\n l.append(elem2)\n if matches[elem]:\n all_matches.append(matches[elem])\n\n # start frmo an aribtrary corner\n # find a match, rotate me so that the match is along the right side\n # fill in properly oriented match\n # repeat, for row = 1+, consider top-match and left-match\n\n # for eery rotations / orientation, look fot the pattern",
"def greedy_selector(self):\n r_k = 0 \n best_route = []\n cities_to_visit = [i for i in range(1, self.city_count)]\n for _ in range(1, self.city_count):\n s_ind = np.argmax([self.tau[(r_k, u)] for u in cities_to_visit])\n s_k = cities_to_visit.pop(s_ind)\n best_route.append((r_k, s_k))\n r_k = s_k\n best_route.append((r_k, 0))\n \n shortest_path = np.sum([self.phi[(p)] for p in best_route])\n return best_route, shortest_path",
"def get_prelu_pattern():\n @mb.program(input_specs=[mb.TensorSpec(shape=([get_new_symbol(), get_new_symbol(),\n get_new_symbol(), get_new_symbol()])), ])\n def prelu_pattern(x):\n # perm value can be anything, it will be checked in \"is_var_constraint_satisifed\" method\n x = mb.transpose(x=x, perm=[0,1,2,3], name=\"transpose\")\n return _prelu_pattern(x)\n\n return prelu_pattern",
"def sat_generate_candidate_assignments(self):\n # YOUR CODE HERE\n short = min(len(c) for c in self.clauses)\n for c in self.clauses:\n if len(c) == short:\n return set(c.literals)\n # return (set(x.literals) for x in self.clauses if len(x) == min(len(c) for c in self.clauses))",
"def __extract_patterns_and_spaces(self):\n\n def __decorate_nodes(nodes, space):\n \"\"\"\n Performs a backward search from a list of pattern nodes and assigns a set of search spaces\n to all encountered nodes.\n :param nodes: List of pattern nodes that belongs to a search space\n :param space: List of search space id\n :return:\n \"\"\"\n for n in nodes:\n if n not in self.__node_spaces:\n self.__node_spaces[n] = set([])\n self.__node_spaces[n].add(space)\n pred_nodes = self.__plan_graph.subjects(AGORA.next, n)\n __decorate_nodes(pred_nodes, space)\n\n # Extract all search spaces in the plan and build a dictionary of subjects-to-ignore per each of them.\n # Ignored subjects are those that won't be dereferenced due to a explicit graph pattern (object) filter,\n # e.g. ?s doap:name \"jenkins\" -> All ?s that don't match the filter will be ignored.\n self.__spaces = set(self.__plan_graph.subjects(RDF.type, AGORA.SearchSpace))\n self.__subjects_to_ignore = dict([(sp, set([])) for sp in self.__spaces])\n\n patterns = list(self.__plan_graph.subjects(RDF.type, AGORA.TriplePattern))\n for tp in patterns:\n # A triple pattern belongs to a UNIQUE search space\n space = list(self.__plan_graph.subjects(AGORA.definedBy, tp)).pop()\n self.__patterns[tp] = {'space': space}\n\n # Depending on the format of each triple pattern (either '?s a Concept' or '?s prop O'),\n # it is required to extract different properties.\n tp_pred = list(self.__plan_graph.objects(tp, predicate=AGORA.predicate)).pop()\n\n if tp_pred == RDF.type: # ?s a Concept\n self.__patterns[tp]['type'] = list(self.__plan_graph.objects(tp, predicate=AGORA.object)).pop()\n try:\n check_type = list(self.__plan_graph.objects(tp, predicate=AGORA.checkType)).pop().toPython()\n except IndexError:\n check_type = True\n self.__patterns[tp]['check'] = check_type\n else: # ?s prop O\n self.__patterns[tp]['property'] = tp_pred\n tp_obj = list(self.__plan_graph.objects(tp, predicate=AGORA.object)).pop()\n if (tp_obj, RDF.type, AGORA.Literal) in self.__plan_graph: # In case O is a Literal\n self.__patterns[tp]['filter_object'] = list(self.__plan_graph.objects(tp_obj, AGORA.value)).pop()\n elif isinstance(tp_obj, URIRef):\n self.__patterns[tp]['filter_object'] = tp_obj\n\n tp_sub = list(self.__plan_graph.objects(tp, predicate=AGORA.subject)).pop()\n if isinstance(tp_sub, URIRef):\n self.__patterns[tp]['filter_subject'] = tp_sub\n\n # Get all pattern nodes (those that have a byPattern properties) of the search plan and search backwards\n # in order to set the scope of each search space.\n nodes = list(self.__plan_graph.subjects(AGORA.byPattern, tp))\n for n in nodes:\n if n not in self.__node_patterns:\n self.__node_patterns[n] = set([])\n self.__node_patterns[n].add(tp)\n __decorate_nodes(nodes, space)",
"def C(relatorlist,quit_at=float('inf')):\n F,rels=fg.parseinputwords(relatorlist)\n if not all(r==F.cyclic_reduce(r) for r in rels):\n raise ValueError(\"Relators are not cyclically reduced.\")\n thepieces=pieces(rels)\n minnumberpieces=quit_at\n def min_string_piece_expression(whatsleft,thepieces,quit_at):\n # recursively determine the minimal expression of the string whatsleft as a concatenation of elements of thepieces, or stop once it is determined that any such expression requires at least quit_at many pieces\n # find a piece that agrees with a prefix of whatsleft and the recurse on the suffix\n if not whatsleft:\n return 0\n minexp=quit_at\n for p in thepieces:\n if p!=whatsleft[:len(p)]:\n continue\n else:\n minexp=min(minexp,1+min_string_piece_expression(whatsleft[len(p):],thepieces,minexp-1))\n return minexp\n def min_relator_piece_expression(relator,thepieces,quit_at):\n # This is first step in recursive search. Here we want to find a piece p such that for relator r we can write p=xy and r=yzx, with y nontrivial. That is, in this step only we think of r as cyclic word and allow first piece that wraps.\n r=relator()\n minexp=quit_at\n for p in thepieces:\n if len(p)>len(r):\n continue\n possiblestartingindices=[] # for given p there may be different possible choices of y\n for startingindex in range(len(r)-len(p)+1,len(r)+1):\n if p==(r+r)[startingindex:startingindex+len(p)]:\n possiblestartingindices.append(startingindex)\n if not possiblestartingindices:\n continue\n for startingindex in possiblestartingindices:\n # found a way to fit p into r spanning the beginning of r. This accounts for x and y part of r. Now recursively find shortest expression of z=whatsleft as a concatenation of pieces.\n whatsleft=(r+r)[startingindex+len(p):startingindex+len(r)]\n if not whatsleft:\n return 1\n else:\n minexp=min(minexp,1+min_string_piece_expression(whatsleft,thepieces,minexp-1))\n return minexp\n for thisrelator in rels:\n minnumberpieces=min(minnumberpieces,min_relator_piece_expression(thisrelator,thepieces,minnumberpieces))\n return minnumberpieces",
"def GetInitialPatterns(graph, temporal = False):\n initialPatternList = []\n candidateEdges = graph.edges.values()\n while candidateEdges:\n edge1 = candidateEdges.pop(0)\n matchingEdges = [edge1]\n nonmatchingEdges = []\n graph1 = Graph.CreateGraphFromEdge(edge1)\n if temporal:\n graph1.TemporalOrder()\n for edge2 in candidateEdges:\n graph2 = Graph.CreateGraphFromEdge(edge2)\n if temporal:\n graph2.TemporalOrder()\n if Graph.GraphMatch(graph1,graph2):\n matchingEdges.append(edge2)\n else:\n nonmatchingEdges.append(edge2)\n if len(matchingEdges) > 1:\n # Create initial pattern\n pattern = Pattern.Pattern()\n pattern.definition = Graph.CreateGraphFromEdge(matchingEdges[0])\n if temporal:\n pattern.definition.TemporalOrder()\n pattern.instances = []\n for edge in matchingEdges:\n pattern.instances.append(Pattern.CreateInstanceFromEdge(edge))\n pattern.evaluate(graph)\n initialPatternList.append(pattern)\n candidateEdges = nonmatchingEdges\n return initialPatternList",
"def solve_part1(start):\n inputs = load_inputs(False)\n two_matches = []\n tiles = inputs.keys()\n for elem in tiles:\n matches = defaultdict(list)\n for elem2 in tiles:\n if elem != elem2 and compare_tile(inputs[elem], inputs[elem2]):\n l = matches[elem]\n l.append(elem2)\n\n if len(matches[elem]) == 2:\n print matches\n two_matches.append(elem)\n\n return reduce((lambda x, y: int(x) * int(y)), two_matches)",
"def match(pattern, data, myvars=None):\n if myvars is None:\n myvars = {}\n if type(pattern) is ListType and len(pattern) >= 1:\n # 'variables' are ['varname']\n myvars[pattern[0]] = data\n return 1, myvars\n if type(pattern) is not TupleType:\n return (pattern == data), myvars\n if len(data) != len(pattern):\n return 0, myvars\n for pattern, data in map(None, pattern, data):\n same, myvars = match(pattern, data, myvars)\n if not same:\n break\n return same, myvars",
"def _find_combination_targets(self) -> None:\n for cell_pattern, pattern_index in self._scratch.primes.items():\n self._combinations_after_pattern(cell_pattern, pattern_index)",
"def make_pattern_set(self):\n \n _pattern = []\n for x in range(1,9):\n _pattern.append(self.make_pattern())\n \n self.pattern = _pattern",
"def solve_part1(puzzle_input):\n try:\n banks = [int(bank) for bank in puzzle_input[0].split('\\t')]\n except:\n banks = puzzle_input\n\n existing_patterns = []\n current_pattern = banks\n existing_patterns.append(make_pattern(current_pattern))\n\n cont = True\n\n print('start here')\n while cont:\n next_pattern = update_banks(current_pattern)\n cp = make_pattern(next_pattern)\n\n if cp in existing_patterns:\n cont = False\n else:\n existing_patterns.append(cp)\n\n current_pattern = next_pattern\n\n return len(existing_patterns)",
"def _generate_immediate_neighbours(pattern: str) -> list:\n generated = []\n for i in range(len(pattern)):\n if pattern[i] == 'A':\n generated.extend([pattern[:i] + c + pattern[i + 1:] for c in LIST_A])\n elif pattern[i] == 'C':\n generated.extend([pattern[:i] + c + pattern[i + 1:] for c in LIST_C])\n elif pattern[i] == 'T':\n generated.extend([pattern[:i] + c + pattern[i + 1:] for c in LIST_T])\n elif pattern[i] == 'G':\n generated.extend([pattern[:i] + c + pattern[i + 1:] for c in LIST_G])\n\n return generated",
"def find_best_reference_set(points):\n\n # Group points by color\n grouped = defaultdict(list)\n for point in points:\n grouped[point.color].append(point)\n\n # Brute force search on all combinations of points with unique colors\n possibilities = product(*[grouped[key] for key in grouped])\n return min(possibilities, key=summed_distances)",
"def _find_repeating_patterns(self):\n for node in self.reactions():\n # We are only interesting of starting at the very first reaction\n if any(self.graph[mol] for mol in self._reactants_nodes(node)):\n continue\n actions = self._list_reactions(node)\n if len(actions) < 5:\n continue\n\n hashes = [\n self._reaction_hash(rxn1, rxn2)\n for rxn1, rxn2 in zip(actions[:-1:2], actions[1::2])\n ]\n for idx, (hash1, hash2) in enumerate(zip(hashes[:-1], hashes[1:])):\n if hash1 == hash2:\n self._hide_reaction(actions[idx * 2])\n self._hide_reaction(actions[idx * 2 + 1])\n self.has_repeating_patterns = True\n # The else-clause prevents removing repeating patterns in the middle of a route\n else:\n break",
"def validategp(d_set, pattern):\n # pattern = [('2', '+'), ('4', '+')]\n min_supp = d_set.thd_supp\n n = d_set.attr_size\n gen_pattern = GP()\n \"\"\"type gen_pattern: GP\"\"\"\n bin_arr = np.array([])\n\n for gi in pattern.gradual_items:\n arg = np.argwhere(np.isin(d_set.valid_bins[:, 0], gi.gradual_item))\n if len(arg) > 0:\n i = arg[0][0]\n valid_bin = d_set.valid_bins[i]\n if bin_arr.size <= 0:\n bin_arr = np.array([valid_bin[1], valid_bin[1]])\n gen_pattern.add_gradual_item(gi)\n else:\n bin_arr[1] = valid_bin[1].copy()\n temp_bin = np.multiply(bin_arr[0], bin_arr[1])\n supp = float(np.sum(temp_bin)) / float(n * (n - 1.0) / 2.0)\n if supp >= min_supp:\n bin_arr[0] = temp_bin.copy()\n gen_pattern.add_gradual_item(gi)\n gen_pattern.set_support(supp)\n if len(gen_pattern.gradual_items) <= 1:\n return pattern\n else:\n return gen_pattern",
"def _getTempsDelexpression(listedMatrix):\n\n \"\"\"\n each template is implemented as a boolean expression of symbols\n where each symbol represents each one of the 26 neighbors\n\n \"\"\"\n str1 = ''.join(str(e) for e in listedMatrix)\n a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p, q, r, s, t, u, v, w, x, y, z = map(exprvar, str1)\n\n direction1 = (~(a) & ~(b) & ~(c) & ~(j) & ~(k) & ~(l) & ~(r) & ~(s) & ~(t) & p & (d | e | f | m | n | u | v | w | g | h | i | o | q | x | y | z)) | \\\n (~(a) & ~(b) & ~(c) & ~(d) & ~(e) & ~(f) & ~(g) & ~(h) & ~(i) & v & (d | e | f | m | n | u | v | w | g | h | i | o | q | x | y | z)) | \\\n (~(a) & ~(b) & ~(c) & ~(j) & ~(k) & ~(l) & ~(r) & ~(s) & ~(t) & ~(d) & ~(e) & ~(f) & ~(g) & ~(g) & ~(h) & ~(i) & y & (m | n | u | w | o | q | x | z)) | \\\n (~(a) & ~(b) & ~(c) & ~(k) & ~(e) & ~(d & j) & ~ (l & f) & p & v) | \\\n (~(a) & ~(b) & ~(k) & ~(e) & c & v & p & ~(j & d) & ((l & ~(f)) | (~(l) & f))) | \\\n (a & v & p & ~(b) & ~(c) & ~(k) & ~(e) & ~(l & f) & ((j & ~(d)) | (~(j) & d))) | \\\n (~(a) & ~(b) & ~(k) & ~(e) & n & v & p & ~(j & d)) | \\\n (~(b) & ~(c) & ~(k) & ~(e) & m & v & p & ~(l & f)) | \\\n (~(b) & ~(k) & ~(e) & a & n & v & p & ((j & ~(d)) | (~(j) & d))) | \\\n (~(b) & ~(k) & ~(e) & c & m & v & p & ((l & ~(f)) | (~(l) & f))) | \\\n (~(a) & ~(b) & ~(c) & ~(j) & ~(k) & ~(l) & ~(r) & ~(s) & ~(t) & ~(d) & ~(e) & ~(g) & ~(h) & q & y) | \\\n (~(a) & ~(b) & ~(c) & ~(j) & ~(k) & ~(l) & ~(r) & ~(s) & ~(t) & ~(e) & ~(f) & ~(h) & ~(i) & o & y) | \\\n (~(a) & ~(b) & ~(c) & ~(j) & ~(k) & ~(r) & ~(s) & ~(d) & ~(e) & ~(f) & ~(g) & ~(h) & ~(i) & w & y) | \\\n (~(a) & ~(b) & ~(c) & ~(d) & ~(e) & ~(f) & ~(g) & ~(h) & ~(i) & ~(k) & ~(l) & ~(s) & ~(t) & u & y)\n return expr(direction1, simplify=True)",
"def get_pattern(flags: dict, input_tensors: list,\n output_tensors: list, tensor_list: list, tensor_map: dict):\n # If nothing matches, default pattern would be opaque pattern\n matched_pattern = OpPatternRecognizer._apply_pattern_rules(flags,\n input_tensors,\n output_tensors,\n tensor_list,\n tensor_map)\n matched_subpattern = OpPatternRecognizer.apply_subpattern_rules(flags,\n input_tensors,\n output_tensors,\n tensor_list,\n tensor_map)\n matched_special_op = OpPatternRecognizer.apply_spec_rules(flags,\n input_tensors,\n output_tensors,\n tensor_list,\n tensor_map)\n return matched_pattern, matched_subpattern, matched_special_op"
] | [
"0.56770355",
"0.54898673",
"0.5466495",
"0.54459643",
"0.5443161",
"0.5358336",
"0.527935",
"0.5254748",
"0.525361",
"0.5221164",
"0.52143013",
"0.51818883",
"0.51779795",
"0.5138044",
"0.5081138",
"0.5056731",
"0.49985862",
"0.49973455",
"0.4994104",
"0.49817485",
"0.498029",
"0.49581414",
"0.49504864",
"0.49482137",
"0.49240586",
"0.4914231",
"0.49096102",
"0.48860654",
"0.4877829",
"0.48738474"
] | 0.7778533 | 0 |
returns the frequency of a tone. formulas from | def tone_to_freq(tone):
return math.pow(2, (tone - 69.0) / 12.0) * 440.0 | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_tone_frequency(self):\n return self.tone_frequency",
"def tone(n, base_freq=440.0):\n # -2 -1 0 1 2 3 4 5 6 7 8 9 10 11 12\n # G G# A A# B C C# D D# E F F# G G# A\n # G Ab A Bb B C Db D Eb E F Gb G Ab A\n return base_freq * 2 ** (n/12)",
"def freq():",
"def freq(self) -> int:",
"def frequencies(self):\r\n\r\n #XXX Use NFFT in the method in order to calculate these, without having\r\n #to calculate the spectrum:\r\n f, spectrum = tsa.get_spectra(self.input.data, method=self.method)\r\n return f",
"def get_frequency(self, detune=0) -> float:\n return np.power(2, (self._cents + detune)/1200) * 440",
"def get_frequency(self,):\n\n # TODO: Find way to appropriately reconvert the frequency to its initial\n # TODO: Value or alert that the value is APPROXIMATE\n FTW = int (0)\n freq = int(0)\n\n FTW_bytes = self._read('CFTW0')\n FTW = FTW.from_bytes(FTW_bytes,'big')\n freq = FTW*self.clock_freq/2**32\n\n print('Latest frequency set: ', \"{:.2e}\".format(freq), 'Hz')\n print(['%.2e' % elem for elem in self.frequencies])\n\n return self.frequencies",
"def get_cw_freq(self):\n return self.get_frequency(self.synth)",
"def frequencies(self):\n radii = self.radii\n freqs = (1 / (self.shape[0] * self.pixel[0])) * radii\n return freqs",
"def frequencies(self):\r\n\r\n self.method['Fs'] = self.method.get('Fs', self.input.sampling_rate)\r\n NFFT = self.method.get('NFFT', 64)\r\n Fs = self.method.get('Fs')\r\n freqs = tsu.get_freqs(Fs, NFFT)\r\n lb_idx, ub_idx = tsu.get_bounds(freqs, self.lb, self.ub)\r\n\r\n return freqs[lb_idx:ub_idx]",
"def get_freq(self, surface_option=None, a=[]):\n\n if (surface_option is None) or (len(a) == 0): return self.modes['freq']\n return self.modes['freq'] + self.get_surface_correction(surface_option, a)",
"def get_frequency(self):\r\n return self.f",
"def fft_frequency(fft, index):\n\treturn index * AUDIO_RATE / len(fft) / 2 # Same as in fft_index, see above",
"def GetFrequency(self):\n ...",
"def mils_to_freq(m):\n f = root_pitch['freq']*(2**(float(m)/12000))\n return f;",
"def midi_to_frequency(midi_note):\n return round(440.0 * 2 ** ((midi_note - 69) * (1.0 / 12.0)), 1)",
"def midi_to_frequency(midi_note: Union[float, int]) -> float:\n half_tone = 2 ** (1 / 12)\n return 440. * half_tone ** (midi_note - 69.)",
"def freq(self, frequency: Optional[int]):",
"def pure_tone(freq: float, sr: float = 128, dur: float = 4, device=None):\n time = torch.arange(int(sr * dur), device=device).float() / sr\n return torch.cos(2 * math.pi * freq * time)",
"def get_frequency(self):\r\n x = self.query('FREQ?')\r\n if x == None: return None\r\n return float(x)",
"def note_to_frequency(note, a_tuning=440):\n note = note - 69 # A above middle C = 0. note(69) = ('A', 5)\n return (a_tuning * 2.0 ** (note / 12.0))",
"def to_frequency(self, tuning=440.0):\n\n NOTES = 'CcDdEFfGgAaB'\n base = NOTES.find('A')\n\n octave_delta = self.octave - Note.BASE_OCTAVE # 0\n octave_halfsteps = octave_delta * 12 # 0\n offset = NOTES.find(self.name) - base # -1\n halfsteps = octave_halfsteps + offset # -2\n freq = tuning * (1.059463 ** halfsteps)\n\n return freq",
"def freq(self, value: int, /) -> None:",
"def frequency(self):\n return self._pca.frequency",
"def getFreq(self,):\n\t\treturn self.freq;",
"def get_frequency(self):\r\n # print '*********in get freq'\r\n self.cntr.run('FREQ 1')\r\n f_0_ = self.cntr.get_measurements(1)\r\n self.f_0 = f_0_[0]\r\n self.cntr.run('FREQ 2')\r\n f_rep_ = self.cntr.get_measurements(1)\r\n self.f_rep = f_rep_[0]",
"def frequency(self):\n return float(self.get_frequency())",
"def getFundFreq(self, data, sampleRate):\n sp = SignalProc.SignalProc(256, 128)\n sp.data = data\n sp.sampleRate = sampleRate\n # spectrogram is not necessary if we're not returning segments\n segment = Segment.Segmenter(sp, sampleRate)\n pitch, y, minfreq, W = segment.yin(minfreq=100, returnSegs=False)\n # we use NaNs to represent \"no F0 found\"\n if pitch.size == 0:\n return float(\"nan\"), float(\"nan\")\n\n segs = segment.convert01(pitch > minfreq)\n segs = segment.deleteShort(segs, 5)\n if len(segs) == 0:\n return float(\"nan\"), float(\"nan\")\n else:\n pitch = pitch[np.where(pitch>minfreq)]\n return round(np.min(pitch)), round(np.max(pitch))",
"def frequency(self):\n return self.reference_clock_speed / 4096 / self.prescale_reg",
"def frequencies(self):\r\n\r\n # Get the sampling rate from the seed time-series:\r\n self.method['Fs'] = self.method.get('Fs', self.seed.sampling_rate)\r\n NFFT = self.method.get('NFFT', 64)\r\n Fs = self.method.get('Fs')\r\n freqs = tsu.get_freqs(Fs, NFFT)\r\n lb_idx, ub_idx = tsu.get_bounds(freqs, self.lb, self.ub)\r\n\r\n return freqs[lb_idx:ub_idx]"
] | [
"0.7913658",
"0.77914226",
"0.7399102",
"0.7215293",
"0.7153386",
"0.7143531",
"0.7114183",
"0.70356035",
"0.69720876",
"0.695965",
"0.69546825",
"0.6954073",
"0.69424343",
"0.69303775",
"0.6903233",
"0.6880722",
"0.68684185",
"0.6841452",
"0.6833891",
"0.6807867",
"0.67693275",
"0.6758636",
"0.6753116",
"0.67447096",
"0.67231023",
"0.67198473",
"0.6716079",
"0.67141247",
"0.6709416",
"0.6704682"
] | 0.87280464 | 0 |
this function adds 5 cards from the deck to the hand | def deal_poker_hand(self, deck):
for i in range(5):
self.hand.append(deck.drawCard()) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def add_card(self, added_cards):\n\n self.hand[:0] = added_cards",
"def add_a_card_to_hand(self, hand, deck):\n hand.append(deck.pop())",
"def deal(self, num_cards=7):\n self.deck.shuffle()\n for player in self.players:\n for i in range(num_cards):\n self.hands[player].append(self.deck.draw())",
"def add_card(self, card):\r\n self.hand.append(card)",
"def addCardToHand(self, card):\r\n self.hand.append(card)",
"def add_card(self, card):\n self.cards.append(card)\n self.sum_hand(self.cards)",
"def deal(numhands, n=5, deck=[r+s for r in '23456789TJQKA' for s in 'SHDC']):\n # deals numhands hands with n cards each.\n random.shuffle(deck)\n return [deck[n*i:n*(i+1)] for i in range(numhands)]",
"def dealHand(deck):\n hand = [] \n for i in range(7): \n hand.append(deck.pop())\n return hand",
"def copies_in_top_five(self, deck):\n question_string = \"After drawing your opening hand with one copy of {card}, how likely is it that another copy of {card} is in the top five cards of your deck?\"\n answer_suffix = 'percent'\n # That's another reason why we don't choose a card earlier: we might be\n # interested in a card with a specific quality.\n chosen_card = random.choice([ card for card in deck.decklist if card.count > 1 ])\n remaining_copies = chosen_card.count - 1\n remaining_deck = sum([c.count for c in deck.decklist]) - 7\n\n in_top_five_chance = hypergeom.sf(1, remaining_deck, remaining_copies, 5)\n in_top_five_chance = in_top_five_chance * 100\n correct_string = \"{:.2f}\".format(in_top_five_chance)\n\n wrongs = self.gen_wrong(in_top_five_chance, 'percent', 4)\n possible = wrongs + [correct_string]\n random.shuffle(possible)\n\n print \"Chance of a copy of {} in the next five cards: {}\".format(chosen_card.name, correct_string)\n return question_string.format(card=chosen_card.name), correct_string, possible, answer_suffix, chosen_card",
"def add_card(self, card):\n #Determines if there are more than one.\n if isinstance(card,list):\n for element in range(len(card)):\n #Creates a new hand\n new_hand=Hand([card[element]])\n #Adds hand to list\n self.cards+= new_hand.cards\n else:\n new_hand=Hand([card])\n self.cards+= new_hand.cards",
"def add_cards(self, cards):\n self.get_cards().extend(cards)",
"def add_card_to_hand(self, card):\n self.hand.append(card)",
"def dealDraw(deck):\n hand = [] \n for i in range(7): \n hand.append(deck.pop())\n return hand",
"def move_cards(self, hand, num):\n \n # Check to see if the deck has enough cards\n if len(self.cards) < num:\n print(\"There aren't enough cards in the stack\")\n return\n \n for i in range(num):\n hand.cards.append(self.cards.pop())",
"def add_card(self, card):\n self.hand.append(card)",
"def add_card(self, card):\n if not isinstance(card, Card):\n raise TypeError(\"'card' must be a card object.\")\n # append new card to list of cards in the hand\n self.cards.append(card)\n self.total = card + self.total\n # aces require a little more work\n if card.rank == 14:\n self.soft = True\n self.num_aces += 1\n self.num_hard_aces += 1\n # account for soft hands\n if self.total > 21 and self.soft:\n self.total -= 10\n self.num_hard_aces -= 1\n self.soft = False\n # catch the edge case where you're delt 12+ aces\n if self.total > 21:\n self.total -= 10\n self.num_hard_aces -= 1\n self.soft = False\n if self.num_hard_aces > 0:\n self.soft = True\n if self.total > 21:\n self.bust = True",
"def sum_hand(self, cards):\n self.totalValue = 0\n for card in cards:\n self.totalValue += DeckOfCards.value(self, card)\n\n for card in cards:\n if self.totalValue > 21 and 'A' in card:\n self.totalValue -= 10\n \n if self.totalValue > 21:\n self.keepGoing = False\n print(f\"{self.name} busted!\")",
"def add_card(self, card):\n self.unpack_cards()\n card.dealt(self)\n self.card_list.append(card)\n self.num_cards.set(self.num_cards.get()+1)\n # pretty inefficient to unpack and pack on every card addition...\n self.pack_cards() \n if self.empty.get() is True:\n self.empty.set(False)\n self.toggle_empty_hand()",
"def rank_five_cards(cards):\n\n # List of all card values\n values = sorted([card.number for card in cards])\n\n # Checks if hand is a straight\n is_straight = all([values[i] == values[0] + i for i in range(5)])\n\n # Additional straight check\n if not is_straight:\n\n # Weakest straight\n is_straight = all(values[i] == values[0] + i for i in range(4)) and values[4] == 12\n\n # Rotate values as the ace is weakest in this case\n values = values[1:] + values[:1]\n\n # Checks if hand is a flush\n is_flush = all([card.suit == cards[0].suit for card in cards])\n\n # Get card value counts\n value_count = {value: values.count(value) for value in values}\n\n # Sort value counts by most occuring\n sorted_value_count = sorted([(count, value) for value, count in value_count.items()],\n reverse=True)\n\n # Get all kinds (e.g. four of a kind, three of a kind, pair)\n kinds = [value_count[0] for value_count in sorted_value_count]\n\n # Get values for kinds\n kind_values = [value_count[1] for value_count in sorted_value_count]\n\n # Royal flush\n if is_straight and is_flush and values[0] == 8:\n return [ROYAL_FLUSH] + [str(value) for value in values]\n # Straight flush\n if is_straight and is_flush:\n return [STRAIGHT_FLUSH] + kind_values\n # Four of a kind\n if kinds[0] == 4:\n return [FOUR_OF_A_KIND] + kind_values\n # Full house\n if kinds[0] == 3 and kinds[1] == 2:\n return [FULL_HOUSE] + kind_values\n # Flush\n if is_flush:\n return [FLUSH] + kind_values\n # Straight\n if is_straight:\n return [STRAIGHT] + kind_values\n # Three of a kind\n if kinds[0] == 3:\n return [THREE_OF_A_KIND] + kind_values\n # Two pair\n if kinds[0] == 2 and kinds[1] == 2:\n return [TWO_PAIR] + kind_values\n # Pair\n if kinds[0] == 2:\n return [PAIR] + kind_values\n # No pair\n return [HIGH_CARD] + kind_values",
"def move_cards(self, hand, num=1):\n for i in range(num):\n hand.add_card(self.pop_card())",
"def makedeck(deck):\r\n #making deck of cards\r\n SUITS = [\"Hearts\",\"Diamonds\",\"Clubs\",\"Spades\"]\r\n VALUES = [\"A\",\"2\",\"3\",\"4\",\"5\",\"6\",\"7\",\"8\",\"9\",\"10\",\"J\",\"Q\",\"K\"]\r\n for e in SUITS:\r\n for i in VALUES:\r\n card = i+\" \"+e\r\n deck.append(card)",
"def deal_cards():\n for _ in range(2):\n user_cards.append(random.choice(deck))\n dealer_cards.append(random.choice(deck))",
"def move_cards(self, hand, num):\n for i in range(num):\n hand.add_card(self.pop_card())",
"def deal(deck): \r\n hand = []\r\n for n in range(2): \r\n deck, hand = draw(deck, hand)\r\n \r\n return deck, hand",
"def add_card(self, card):\n self.decklist.append(card)",
"def hit(self, deck):\n try:\n self.hand.append(deck.pop(0))\n except IndexError:\n print('There are no more cards in the deck!')",
"def create_hand(self):\n print(\"card len\")\n print(len(self.cards))\n\n my_hand = Hand()\n for index in range(5):\n my_hand.add_card(self.cards.pop())\n\n print(\"card len\")\n print(len(self.cards))\n print(\"hand len\")\n print(len(my_hand.cards))\n return my_hand",
"def fresh_deck():\r\n deck = sorted(range(13)*4)\r\n for i in range(52):\r\n deck[i] += 1\r\n for i in range(12):\r\n deck[-(i+1)] = 10\r\n return deck",
"def add_card(self, card):\n self.deckcards.append(card)",
"def add(self, cards):\n\n super().add(cards)\n self._update_value()"
] | [
"0.72352415",
"0.7053274",
"0.6955573",
"0.6879287",
"0.6851322",
"0.68399423",
"0.68385714",
"0.6816445",
"0.68131894",
"0.680666",
"0.6777246",
"0.67551434",
"0.6744151",
"0.672975",
"0.6721525",
"0.67009944",
"0.6674014",
"0.6656667",
"0.6654007",
"0.66452503",
"0.6627303",
"0.66247976",
"0.66110593",
"0.66085947",
"0.658203",
"0.6579294",
"0.6576704",
"0.65348023",
"0.6525534",
"0.65169966"
] | 0.7919288 | 0 |
prints all cards in hand | def print_hand(self):
for card in self.hand:
card.printCard() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def show_hand(self):\n for card in self.hand:\n print(card)",
"def player_show_hand(self):\n for card in self.get_hand():\n print(card.get_card())",
"def show_hand(self):\n\n print(f\"{self.name.title()}'s cards are:\")\n for card in self.hand:\n print(card.get_card_details())",
"def show(self):\r\n for card in self.cards_list:\r\n print(card)",
"def print_deck(self):\n\n ls = []\n for card in self.deck:\n ls.append(card.get_card())\n print(ls)",
"def print_cards(self, all_cards=True):\n # print(\"Cards:\")\n result = \"\"\n cards = self.cards\n if all_cards:\n cards += self.cards_on_table\n for c in cards:\n result += str(c)\n return result",
"def print_hands(self):\n # Clear the terminal and reprint round header\n os.system(\"clear\")\n self.print_header\n\n # Only display one of the dealers cards if they are still playing\n if not self.round_winner:\n print()\n print(\"Dealer's Cards\")\n print(\"=\" * 25)\n print(\"UNKNOWN\")\n for card in self.dealer.cards:\n if card != self.dealer.cards[0]:\n print(f\"{card.game_value} of {card.suit}\")\n print(\"-\"*25)\n print(\"TOTAL = ?\")\n print()\n\n print(\"Player's Cards\")\n print(\"=\" * 25)\n for card in self.player.cards:\n print(f\"{card.game_value} of {card.suit}\")\n print(\"-\" * 25)\n print(\"TOTAL = \" + str(self.player.sum_cards()))\n print()\n\n # Display the players cards and all of the dealers cards\n elif self.round_winner:\n print()\n print(\"Dealer's Cards\")\n print(\"=\" * 25)\n for card in self.dealer.cards:\n print(f\"{card.game_value} of {card.suit}\")\n print(\"-\" * 25)\n print(\"TOTAL = \" + str(self.dealer.sum_cards()))\n print()\n\n print(\"Player's Cards\")\n print(\"=\" * 25)\n for card in self.player.cards:\n print(f\"{card.game_value} of {card.suit}\")\n print(\"-\" * 25)\n print(\"TOTAL = \" + str(self.player.sum_cards()))\n print()\n pass",
"def print_cards(cards):\r\n string = ''\r\n for c in cards:\r\n suit = c[0]\r\n if suit == 1:\r\n suit = \"\\u2665\" # heart\r\n elif suit == 2:\r\n suit = \"\\u2660\" # Spade\r\n elif suit == 3:\r\n suit = \"\\u2666\" # Diamond\r\n else:\r\n suit = \"\\u2663\" # club\r\n\r\n num = c[1]\r\n if num == 11:\r\n num = 'J'\r\n elif num == 12:\r\n num = 'Q'\r\n elif num == 13:\r\n num = 'K'\r\n else:\r\n num = str(num)\r\n\r\n string = string + num + suit + ' '\r\n return string",
"def printAll(self, cards, output):\r\n for (card, num) in cards.items():\r\n self.ts.addCards(card, num)\r\n self.ts.writeCards(output)",
"def print(self):\n\n for domino in self.hand:\n print(domino)",
"def show_card(self):\n return self.hands.show(0)",
"def print_card(self, index=0):\n print self._cards[index]",
"def display_hand(self):\n \n print (self._hand)",
"def displayHand(hand):\r\n for letter in hand.keys():\r\n for j in range(hand[letter]):\r\n print(letter,end=\" \") # print all on the same line\r\n print() # print an empty line\r",
"def displayHand(hand):\n for letter in hand.keys():\n for j in range(hand[letter]):\n print letter, # print all on the same line\n print # print an empty line",
"def displayHand(hand: d_si) -> None:\n for letter in hand.keys():\n for _ in range(hand[letter]):\n print(letter,end=\" \")\n print()",
"def print_hand(hand: list, output: bool = True) -> str:\n # Split each card ASCII art into lines\n hand = [card_factory(**card).split(\"\\n\") for card in hand]\n\n screenshot = \"\"\n # The max display is 80 char long, so 6 card max can fit in one row\n # We will split the hand into slices of 6 cards to print\n for row in [hand[x: x + 6] for x in range(0, len(hand), 6)]:\n # Print line x of each cards in the hand\n for line_number in range(len(hand[0])):\n # Adjusting the spacing so the cards are centered\n current_line = \" \" + \" \" * (7 - len(row)) * 5\n for card in row:\n current_line += card[line_number] + \" \"\n\n screenshot += current_line + \"\\n\"\n if output:\n print(current_line)\n\n return screenshot",
"def print_card(card):\n\n titles = [\"Ones\", \"Twos\", \"Threes\", \"Fours\", \"Fives\", \"Sixes\", \n \"One pair\", \"Two Pairs\", \"Three of\", \"Four of\", \"Straigth\",\n \"Big straight\", \"House\", \"Yatzy\"]\n \n print(\"+---------+-----------------+-------+\")\n print(\"| Index | Name | Score |\")\n print(\"+---------+-----------------+-------+\")\n\n for i in range(len(card)):\n print(\"| {:>7} | {:<15} | {:<5} |\".format(i, titles[i], card[i]))\n\n print(\"+---------+-----------------+-------+\")",
"async def cards_per_hand(ctx):\n message = NNB.cards_per_hand()\n await ctx.send(message)",
"def revealAll(aDeck):\r\n cardNames=''\r\n for x in range(len(aDeck)):\r\n card= aDeck[x]\r\n if (card[\"Name\"] == \"Joker\"):\r\n cardNames += card[\"Suite\"] + \" \" + card[\"Name\"]\r\n else:\r\n cardNames += card[\"Name\"] + \" of \" +card[\"Suite\"]\r\n cardNames += \"\\n\" \r\n return (cardNames)",
"def print_cards(list_var):\n\tfor i in range(len(list_var)):\n\t\tprint(\"player %d cards are\" %i,list_var[i])",
"def __str__(self):\r\n if len(self.hand) < 1:\r\n return \"No cards in hand.\"\r\n else:\r\n output = \"%s's hand contains\" % (self.tag.capitalize())\r\n for card in self.hand:\r\n output += \" %s\" % (card)\r\n return output + \".\"",
"def cards_per_hand(self):\n s = \"\"\n for id in self.player_id_list:\n name = self.players[id].name\n cards = len(self.players[id])\n s += \"{} has {} cards.\\n\".format(name, cards)\n return s[:-1]",
"def print_hand(self):\n if self.cheating:\n print(\"You're cheating!\")\n print(\"until you reroll it!\")\n print(\"\"\"\nYou rolled:\na = [ {} ]\nb = [ {} ]\n\nYou are in Stage {}\n \"\"\".format(self.die_a, self.die_b, self.stage))",
"def displayDiscarded(self):\n print(\"Discarded :\")\n if len(self.discarded) == 0:\n print(\"*no discard yet*\")\n else:\n for card in self.discarded:\n print(card.toString(), end=\" \")\n print()",
"def pr(x):\n Card.print_pretty_cards(x)",
"def __str__(self):\n string = \"Hand contains \"\n h = self.hand\n \n for i in range(len(h)):\n string += str(h[i].get_suit()) + str(h[i].get_rank()) + \" \"\n \n return string",
"def displayHands(p_hand, d_hand):\n os.system('clear') # Call to OS clear the screen to clean up output\n print(\"\\nPlayer hand: \", p_hand.showHand())\n print(\"Player score: \", p_hand.handSum())\n\n print(\"\\nDealer hand: \", d_hand.showHand())\n print(\"Dealer score: \", d_hand.handSum())",
"def __repr__(self):\n return f\"Deck({self.cards})\"",
"def play_all(self):\n for _ in range(self._hand.size()):\n card = self._hand.pop()\n self._active.push(card)\n self._money = self._money + card.money\n self._attack = self._attack + card.attack\n print '\\nPlayed all cards!'"
] | [
"0.86974907",
"0.83651304",
"0.8254563",
"0.79101205",
"0.790326",
"0.78862673",
"0.7541825",
"0.74812996",
"0.7473956",
"0.7269598",
"0.7237259",
"0.7206232",
"0.72048044",
"0.71964407",
"0.7182787",
"0.7144437",
"0.71311176",
"0.70919424",
"0.6971113",
"0.6928701",
"0.6875076",
"0.6819464",
"0.6742277",
"0.67186135",
"0.6685823",
"0.6683121",
"0.6659518",
"0.6651436",
"0.6644035",
"0.65541804"
] | 0.8903049 | 0 |
There are values in the xls that have descriptions in one cell and the value to the left, this function is a helper in those cases | def get_horizontal_field_value(xls, row_index, description_index, fields_count=1, description=None, partial_match=False):
if description:
actual_description = get_cell_value(xls, row_index, description_index)
if not actual_description:
raise ValueError("empty cell at coordinate: {}:{}".format(row_index, description_index))
mismatch = False
if partial_match:
if description not in actual_description:
mismatch = True
else:
if description != actual_description:
mismatch = True
if mismatch:
raise ValueError("Mismatch between expected description and actual description: \"{}\" != \"{}\""
.format(description, actual_description))
output = []
for i in range(1, fields_count + 1, 1):
cell_value = get_cell_value(xls, row_index, description_index + i)
if cell_value is not None:
output.append(cell_value)
if not output:
return None
return ' '.join(v for v in output) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _get_data_labels(sheet, row, col):\n final_column = col\n header_row = _FIELDS['cell_value']['header']['row']\n # Abstract this sort of thing\n header = sheet.cell(row + header_row, final_column).value\n while any(header.startswith(label) for label\n in _FIELDS['isotherm tabular']['labels']):\n final_column += 1\n header = sheet.cell(row + header_row, final_column).value\n return [sheet.cell(row + header_row, i).value for i in\n range(col, final_column)]",
"def find_empty_cell():\n\n for x in range(1, 38):\n if worksheet.cell((x, 1)).value == \"\":\n date_cell = worksheet.cell((x, 1))\n description_cell = worksheet.cell((x, 2))\n payment_received_cell = worksheet.cell((x, 3))\n rent_due_cell = worksheet.cell((x, 4))\n\n return date_cell, description_cell, payment_received_cell, rent_due_cell\n\n else:\n continue",
"def extract_cell_data(sheet, rows, cols):\n extract = []\n for row in rows:\n text = []\n for col in cols:\n cell_name = col + str(row)\n cell_value = str(sheet[cell_name].value).strip()\n cell_value = sheet[cell_name].value\n \n if isinstance(cell_value, datetime.datetime):\n cell_value = cell_value.strftime(\"%Y-%m-%d\")\n elif cell_value is None:\n continue\n else:\n cell_value = str(cell_value).strip()\n \n text.append(cell_value)\n text = \" \".join(text)\n if text:\n extract.append(text)\n else:\n continue\n if extract:\n return \" \".join(extract)\n else:\n return \"\"",
"def icd9_descriptions(row):\n\n if 1 <= row['icd9_code'] <= 139:\n val = 'Parasitic_Disease'\n elif 140 <= row['icd9_code'] <= 239:\n val = 'Neoplasm'\n elif 240 <= row['icd9_code'] <= 279:\n val = 'Endocrine'\n elif 280 <= row['icd9_code'] <= 289:\n val = \"Blood\"\n elif 290 <= row['icd9_code'] <= 319:\n val = \"Mental_Disorder\"\n elif 320 <= row['icd9_code'] <= 389:\n val = \"Nervous_System\"\n elif 390 <= row['icd9_code'] <= 459:\n val = \"Circulatory_System\"\n elif 460 <= row['icd9_code'] <= 519:\n val = \"Respiratory_System\"\n elif 520 <= row['icd9_code'] <= 579:\n val = \"Digestive_System\"\n elif 580 <= row['icd9_code'] <= 629:\n val = \"Genitourinary_System\"\n elif 630 <= row['icd9_code'] <= 679:\n val = \"Pregnancy\"\n elif 680 <= row['icd9_code'] <= 709:\n val = \"Skin\"\n elif 710 <= row['icd9_code'] <= 739:\n val = \"Musculoskeletal\"\n elif 740 <= row['icd9_code'] <= 759:\n val = \"Congenital_Anomalies\"\n elif 760 <= row['icd9_code'] <= 779:\n val = \"Perinatal\"\n elif 780 <= row['icd9_code'] <= 799:\n val = \"Ill-Defined\"\n elif 800 <= row['icd9_code'] <= 999:\n val = \"Injury/Poison\"\n elif row['icd9_code'] < .4:\n val = \"Supplemental_factors\"\n elif .4 <= row['icd9_code'] < .7:\n val = \"External_Cause_Inj_Poison\"\n elif .7 <= row['icd9_code'] < .9:\n val = \"Morphology_of_Neoplasms\"\n else:\n val = row['icd9_code']\n\n return val",
"def cpi_data():\n\n\n cpi_workbook = xlrd.open_workbook(\n 'corruption_perception_index.xlsx')\n cpi_sheet = cpi_workbook.sheets()[0]\n\n\n cpi_title_rows = zip(cpi_sheet.row_values(1), cpi_sheet.row_values(2))\n cpi_titles = [t[0] + ' ' + t[1] for t in cpi_title_rows]\n cpi_titles = [t.strip() for t in cpi_titles]\n cpi_rows = [cpi_sheet.row_values(r) for r in range(3, cpi_sheet.nrows)]\n\n\n cpi_types = get_types(cpi_sheet.row(3))\n cpi_titles[0] = cpi_titles[0] + ' Duplicate'\n cpi_table = get_table(cpi_rows, cpi_types, cpi_titles)\n\n return cpi_table",
"def _get_xls_row_vals(self, row):\n return [v.value for v in row]",
"def si(content):\n import types\n if isinstance(content, openpyxl.cell.Cell):\n if isinstance(content.value, types.NoneType):\n return unicode('')\n else:\n return unicode(content.value).strip()\n elif isinstance(content, xlrd.sheet.Cell):\n if content.ctype == xlrd.sheet.XL_CELL_EMPTY:\n return unicode('')\n else:\n return unicode(content.value).strip()\n return unicode(content).strip()",
"def check_headerRow(self, expected, found):\n\n # spreadsheets must have either a barcode field or a object ID field, but both are not required\n header1 = 'barcode'\n header2 = ('object identifier\\n(edit heading to specify type' +\n ' - e.g. barcode)')\n expected = self.remove_annoying(header1, header2, expected, found)\n\n missing = []\n\n for header in expected:\n if header not in found:\n missing.append(header)\n\n if missing:\n self.raise_excelerror(\"Missing required value- {0}.\"\n .format(missing))\n\n return True",
"def test_parse_sample_sheet(self):\n pass",
"def read_xd_master_file(path, errorpointer):\n filepointer = open(path, 'r')\n for line in filepointer.readlines():\n if 'TITLE' in line:\n compound_name = line.partition('!')[2].lstrip().rstrip()\n if 'CELL' in line:\n cell = [float(i) for i in line.split(\" \") if '.' in i]\n break\n filepointer.close()\n try:\n return compound_name, cell\n except:\n errorpointer.write(path + '\\n')\n return None, None",
"def getHeaderRowPosition(sheetData):\n for index, row in enumerate(sheetData):\n if row[1] != '':\n return index\n return 0",
"def load_plothrm_detailed(filename):\n\n filename = Path(filename)\n\n book = xlrd.open_workbook(filename)\n xlsx_file = pd.ExcelFile(filename)\n\n df_seq = None\n df_meta = None\n\n for sheetidx, sheetname in enumerate(xlsx_file.sheet_names):\n\n print(f'Parsing \"{filename}\" sheet \"{sheetname}\"', flush=True)\n\n try:\n\n sheetnumber = sheetidx + 1\n worksheet = book.sheet_by_name(sheetname)\n\n # try to load a value from the row and column\n def value_default(row, col, d):\n if row >= worksheet.nrows or col >= worksheet.ncols:\n return d\n s = worksheet.cell_value(row, col)\n if type(s) is str and len(s) == 0:\n return d\n else:\n return s\n\n # load metadata\n meta = dict()\n meta['sheetname'] = sheetname\n meta['sheetnumber'] = sheetnumber\n meta['plothrm_version'] = str(worksheet.cell_value(0, 1))\n meta['data_filename'] = str(worksheet.cell_value(1, 1))\n meta['seq_filename'] = str(worksheet.cell_value(2, 1))\n meta['nchan'] = int(worksheet.cell_value(3, 1))\n meta['nsamp'] = int(worksheet.cell_value(4, 1))\n meta['sampling_hz'] = float(worksheet.cell_value(5, 1))\n meta['opt_zero_above'] = float(value_default(6, 2, np.nan))\n meta['opt_remove_baselines'] = bool (value_default(6, 4, 0))\n meta['opt_remove_sync'] = bool (value_default(6, 6, 0))\n meta['opt_channel_smooth'] = bool (value_default(6, 8, 0))\n meta['opt_sample_smooth'] = float(value_default(6, 10, np.nan))\n meta['height_res'] = float(worksheet.cell_value(7, 1))\n meta['sync_bound'] = float(worksheet.cell_value(8, 1))\n # TODO load region data at rows 9, 10, 11 (Excel 1-based index rows 10-12)\n\n for k, v in meta.items():\n meta[k] = [v]\n meta = pd.DataFrame(meta)\n\n if df_meta is None:\n df_meta = meta\n else:\n df_meta = df_meta.append(meta)\n\n # load sequence data\n df = pd.read_excel(filename, sheetname, header=13, na_values=['ERROR', 'Infinity'])\n df['sheetname'] = sheetname\n df['sheetnumber'] = sheetnumber\n if df_seq is None:\n df_seq = df\n else:\n df_seq = df_seq.append(df)\n\n except Exception as e:\n print(f' error parsing worksheet \"{sheetname}\": {e}')\n continue\n\n return df_seq.reset_index(drop=True), df_meta.reset_index(drop=True)",
"def get_retired():\n retired = []\n cell_list = sheet.findall(\"RETIRE\", in_column=4)\n\n for i in cell_list:\n item = sheet.cell(i.row, i.col - 1).value\n retired.append(str(item))\n\n return retired",
"def parse_columns():\n # Fetch columns letters\n old_column_letter = get_old_numbers_column()\n new_columns_letters = get_new_numbers_columns()\n\n # Fetch old numbers\n old_column_cells = sheet[f'{old_column_letter}15':f'{old_column_letter}120']\n old_numbers_cells = []\n for ii in old_column_cells:\n if re.match('[0-9]{7}', str(ii[0].value)):\n old_numbers_cells.append(ii)\n print(old_numbers_cells)\n\n ##----- WORKING UNTIL HERE -----##\n \n # Fetch all new numbers (for each format)\n # Creates an array of arrays of cells ([[CellA, CellB], [CellC, CellD]])\n for ii in old_numbers_cells:\n new_numbers_cells_array = []\n for ij in new_columns_letters:\n new_columns_cells = [f'{ij}15', f'{ij}120']\n new_numbers_cells = []\n for jj in new_columns_cells:\n if re.search('[0-9]{7}'):\n new_numbers_cells.append(ii)\n new_numbers_cells_array.append(new_numbers_cells)\n\n # Combines all the cells :\n # Creates an array of tuples, easier to work with..\n work_tuples = []\n for ii in old_numbers_cells:\n # First we create an array..\n work_array = [int(ii.value)]\n # Then we fill it..\n for ij in new_numbers_cells_array:\n # Using the current position in the old cells array...\n work_array.append(ij[old_numbers_cells.index(ii)])\n # Finally, the array is parsed as a tuple and added to the list\n work_tuples.append(tuple(work_array))\n\n return work_tuples",
"def read_data(filename):\n \n # Iterate over all X-values. Y-values are stored in colummns of particular worksheet\n for x in range(0,13):\n\n wb = xlrd.open_workbook(filename)\n ws = wb.sheet_by_index(0)\n\n # This position of metadata doesn't change its relative position from sheet-to-sheet\n n_energy = int(ws.cell_value(1,3))\n n_iter = int(ws.cell_value(4,3))\n Rows_to_Skip = 15\n\n # Rename columns\n column_names = [str(x) for x in range(0,n_iter)]\n column_names.insert(0,'nan')\n column_names.insert(0,'KE')\n\n # Read data using pandas\n df_data = pd.read_excel(io = filename,\n sheet_name=x,\n skiprows = Rows_to_Skip,\n names = column_names,\n index_col='KE'\n )\n # Drop the second column as it is always supposed to be false\n df_data.drop(columns=df_data.columns[0],inplace=True)\n \n # Get x_data as the index \n x_array = np.array(df_data.index).reshape(len(df_data.index),1)\n \n # If we encounter first sheet\n if x==0:\n y = df_data.to_numpy()\n \n # Stack with the cummulative y built till now\n else:\n y = np.hstack((y, df_data.to_numpy()))\n \n # Ideally x_array should be (481, 1), and y should be (481, 169)\n return x_array, y",
"def test_get_cell(workbook):\n assert workbook.get_cell(3,1) == '507906000030242007'",
"def calc_col_info(self):\n print('\\nCOLUMN VALUE INFORMATION\\n' +\n '----------------------------')\n results.append('\\nCOLUMN VALUE INFORMATION\\n' +\n '----------------------------')\n li = []\n for x in range(0, self.tot_col):\n print(str('\\n' + self.file_list[0][x]) +\n '\\n--------------') # Prints name of column\n\n results.append('\\n' + self.file_list[0][x] +\n '\\n--------------')\n\n for y in range(1, self.tot_rows + 1):\n li.append(self.file_list[y][x])\n li_no_empty = [x for x in li if x != ''] # List with no empty fields\n\n # MAX & MIN VALUE\n print('Maximum value: ' + str(max(li)))\n print('Minimum value: ' + str(min(li_no_empty)))\n results.append('Maximum value: ' + str(max(li)))\n results.append('Minimum value: ' + str(min(li_no_empty)))\n\n # MAX & MIN LENGTH\n li_b = []\n li_c = []\n for a in range(0, len(li)):\n li_b.append(len(li[a]))\n\n print('Maximum length: ' + str(max(li_b)))\n results.append('Maximum length: ' + str(max(li_b)))\n\n for b in range(0, len(li_no_empty)):\n li_c.append(len(li_no_empty[b]))\n\n print('Minimum length: ' + str(min(li_c)))\n results.append('Minimum length: ' + str(min(li_c)))\n\n del li_b[:]\n del li_c[:]\n\n # DISTINCT\n unique_set = set(li) # Counts blanks\n unique_set.discard('') # Does not account for null values\n unique_count = len(unique_set)\n\n print('Distinct values: ' + str(unique_count))\n results.append('Distinct values: ' + str(unique_count))\n\n # DUPLICATES\n value_count = {}\n for c in li:\n value_count[c] = value_count.get(c, 0) + 1\n dups = {key: value for key, value in value_count.items() if value > 1}\n sorted_dups = sorted(dups.items(), key=operator.itemgetter(1))\n\n print('\\nDuplicate values\\n' +\n '-------')\n results.append('\\nDuplicate values\\n' +\n '-------')\n\n for item in sorted_dups:\n print('{}'.format(str(item[0])) + ' : ' + str(item[1]))\n results.append('{}'.format(str(item[0])) + ' : ' + str(item[1]))\n\n # for key, num in dups.items():\n # print('{} : {}'.format(key, num))\n # results.append('{} : {}'.format(key, num))\n\n del li[:]",
"def iter_xls_cols():\n from itertools import product,count\n from string import ascii_lowercase\n for size in count(1):\n for let in product(ascii_lowercase, repeat=size):\n yield \"\".join(let)",
"def process_exp_value(exp_data):\n VALUE_XPATH = 'hunterdb:Value/child::text()'\n DOI_XPATH = 'hunterdb:Source/hunterdb:DOI/child::text()'\n PREFERRED_XPATH = '@hunterdb:preferredValue'\n doi_values = exp_data.xpath(DOI_XPATH, namespaces=HUNTER_DB_NAMESPACE_DICT)\n values_values = exp_data.xpath(VALUE_XPATH, namespaces=HUNTER_DB_NAMESPACE_DICT)\n preferred = exp_data.xpath(PREFERRED_XPATH, namespaces=HUNTER_DB_NAMESPACE_DICT)\n if len(preferred) == 1:\n preferred = bool(preferred[0])\n else:\n preferred= False\n if len(doi_values) == 1 & len(values_values) == 1:\n return (float(values_values[0]), doi_values[0], preferred)\n else:\n LOGGER.debug(\"values: {}\", values_values)\n LOGGER.debug(\"doi_values: {}\", doi_values)",
"def _get_table_val(val):\n text = val.text.strip()\n if val.br:\n val = \", \".join(text.split('\\r\\n'))\n elif val.sup:\n val = \"\".join(map(str, val.contents))\n elif NON_BREAK_SPACE in text:\n val = \", \".join(text.split(f' {NON_BREAK_SPACE} {NON_BREAK_SPACE} '))\n else:\n val = text\n\n return val",
"def getInfoFromExcel(excel_path):\n book = xlrd.open_workbook(excel_path)\n field = book.sheets()[0].row_values(0)\n insid_index = field.index('id')\n tester_index = field.index('Responsible Tester')\n #tester_index = field.index('Tester')\n insid = book.sheets()[0].col_values(insid_index)\n tester = book.sheets()[0].col_values(tester_index)\n insid.pop(0) #delete 'id'\n tester.pop(0) #delete 'tester'\n #print insid\n #print tester\n info = {}\n for i in range(len(insid)):\n info[int(insid[i])] = str(tester[i])\n \n return info",
"def _report_xls_render_space_extra_kaiser(self):\n return None",
"def format_sheet(sheet_to_be_formatted, output_name):\n\n wb = load_workbook(sheet_to_be_formatted)\n # ws = book['Sheet1']\n ws = wb.active\n\n columns_to_keep_unhidden = ['index', 'company_name','price_90_days_ago', 'price_30_days_ago', 'price_21_days_ago', \n 'price_7_days_ago', 'price_most_recent', \n 'price_change_7_days', 'price_change_21_days', 'price_change_30_days', 'price_change_90_days', \n 'constant_price_drop_7', 'constant_price_drop_21']\n\n ## Columns to be formated\n percentage_columns = ['price_change_7_days', 'price_change_21_days', 'price_change_30_days', 'price_change_90_days']\n\n ## Creating dicts\n percentage_columns_dict = {}\n \n columns_to_keep_dict = {}\n for column_cell in ws.iter_cols(1, ws.max_column): # iterate column cell\n\n ## Hiding daily prices columns\n\n if column_cell[0].value not in columns_to_keep_unhidden:\n col = column_cell[0].column_letter\n ws.column_dimensions[col].hidden= True\n\n if column_cell[0].value in percentage_columns:\n percentage_columns_dict[column_cell[0].value] = column_cell[0].column_letter\n \n if column_cell[0].value in columns_to_keep_unhidden:\n columns_to_keep_dict[column_cell[0].value] = column_cell[0].column_letter\n \n \n # Percentage format can be only set to cells. Thus we need to iterrate through rows of applicable columns\n row_nums = len(list(ws.rows))\n print(row_nums)\n for v in percentage_columns_dict.values():\n for row_num in range(2, row_nums):\n ws[f'{v}{row_num}'].number_format = '0.00%'\n \n# Setting width for selected columns\n for k in columns_to_keep_dict.keys():\n col = columns_to_keep_dict[k]\n print(col)\n ws.column_dimensions[col].width = 20\n\n\n\n wb.save(output_name)",
"def test_extract_column_zero_and_one_correct_content():\n data = \"\"\"[email protected],Records manager\[email protected],Network engineer\[email protected],Electrical engineer\[email protected],Science writer\"\"\"\n result = extract.extract_data_given_column(data, 0)\n assert \"[email protected]\" in result\n result = extract.extract_data_given_column(data, 1)\n assert \"Records manager\" in result",
"def _get_errors(sheet, row, col):\n field = _FIELDS['primary data']\n val = sheet.cell(row + field['row'], col + field['column']).value\n if not val:\n return []\n final_row = row + field['row']\n error = sheet.cell(final_row, col + field['column']).value\n while error:\n final_row += 1\n error = sheet.cell(final_row, col + field['column']).value\n return [sheet.cell(i, col + field['column']).value\n for i in range(row + field['row'], final_row)]",
"def lectxl(NOM):\n #NOM=input(\"nom du fichier:\")#interactif\n #NOM=str(NOM +\".xlsx\")\n workbook = xlrd.open_workbook(NOM)\n SheetNameList = workbook.sheet_names()\n worksheet = workbook.sheet_by_name(SheetNameList[0])\n num_rows = worksheet.nrows \n f=[NOM]\n for curr_row in range(0,num_rows):\n row = worksheet.row(curr_row)\n f.append(row)\n return f",
"def get_header_au(row):\n rules = [\"Time\", \"Smile\", \"AU\"]\n #header = row[0:2]\n header=row\n #print row\n result = []\n i = 0\n #for all values in the header\n for h in header:\n print h\n if h in rules or h[0:2] in rules or 'AU' in h:\n result.append([h, i])\n i = i + 1\n # print result\n return result",
"def simple_cell_string(self, values):\n if len(values) == 0:\n return '!'\n elif len(values) == 1:\n return self.number_strings[values[0]]\n else:\n return '.'",
"def checkRows( self ):\n\n for x in [0,3,6]:\n firstVal = self.__grid[x]\n secondVal = self.__grid[x+1]\n thirdVal = self.__grid[x+2]\n\n compiledVal = str(firstVal) + str(secondVal) + str(thirdVal)\n\n if 'xx' in compiledVal.lower():\n\n return ('X', compiledVal)\n\n elif 'oo' in compiledVal.lower():\n\n return ('O', compiledVal) \n\n elif compiledVal.lower() == 'x2x' or \\\n compiledVal.lower() == 'x5x' or \\\n compiledVal.lower() == 'x8x':\n\n return ('X', compiledVal)\n \n return None",
"def testDictionary(xlsxFile):\n names, allGrades = getNamesGrades.getNamesGrades(xlsxFile)\n if xlsxFile == None or xlsxFile == \"\":\n xlsxFile = \"test.xlsx\"\n wb = Workbook(xlsxFile)\n wb = openpyxl.load_workbook(xlsxFile)\n ws = wb.active\n\n def addGrade(gradeName, gradeColumn):\n \"\"\" adds Grade, first Argument is string of grade Name,\n second Argument is integer of column\"\"\"\n\n grades = []\n sheet = wb[wb.sheetnames[0]]\n for i in range(0,13):\n sourceValue = sheet.cell(row=10+i, column=gradeColumn).value\n grades.append(sourceValue)\n allGrades[wb.sheetnames[0]] [gradeName] = grades\n return allGrades\n\n xlsXCoordinates = [ (\"Ø Mitarbeit\", 5),\n (\"Ø Kurztest\", 4),\n (\"Ø Klausur\", 3)]\n for i in range(len(xlsXCoordinates)):\n addGrade(xlsXCoordinates[i][0], xlsXCoordinates[i][1] )\n\n return names, allGrades"
] | [
"0.57297254",
"0.54683656",
"0.544114",
"0.5382319",
"0.53744954",
"0.5310725",
"0.5275922",
"0.52674615",
"0.5236666",
"0.5231945",
"0.5164695",
"0.51571536",
"0.51552546",
"0.5144152",
"0.5134118",
"0.5122181",
"0.51046264",
"0.51006085",
"0.5028648",
"0.50078714",
"0.50003433",
"0.49611586",
"0.49491304",
"0.4941765",
"0.49241692",
"0.49224707",
"0.49176118",
"0.4903964",
"0.4881909",
"0.48617876"
] | 0.62578756 | 0 |
The user clicked to update their favorites. This checks whether or not to remove the athlete in the session as a favorite | def update_favorites():
check_favorite = Favorite.query.filter(Favorite.favorited_item==session["athlete_id"]).first()
route = f'/athletes/{session["athlete_id"]}'
if check_favorite is None:
new_update = Favorite(id=current_user.id, favorited_item=session["athlete_id"])
db.session.add(new_update)
else:
db.session.delete(check_favorite)
db.session.commit()
return redirect(route) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def update_favourites(self, item_info, status):\r\n if status == \"Add\":\r\n return self.model.add_to_favourites(item_info)\r\n elif status == \"Remove\":\r\n return self.model.delete_from_favourites(item_info)",
"def favourite(self, favourite):\n\n self._favourite = favourite",
"def toggle_favorite(self, user, article, is_favoriting):\n if user not in article.favorited_by.all() and is_favoriting:\n article.favorited_by.add(user)\n if user in article.favorited_by.all() and not is_favoriting:\n article.favorited_by.remove(user)\n article.favoritesCount = article.favorited_by.all().count()\n article.save()",
"def save_to_favorites_list():\n\n #get show id from the event handler/post request\n show_id = str(request.form.get(\"id\"))\n #get button content from the event handler/post request\n button_content = request.form.get(\"button_content\")\n\n button_content_encoded = button_content.encode('utf-8')\n\n #save utf-8 encoded checkmark as a string variable\n check_mark = \"\\xe2\\x9c\\x93\"\n\n #find the current logged in user\n email = session.get(\"current_user\")\n\n if email:\n\n #use email to find the user_id\n user_id = User.find_user_id_with_email(email)\n\n #if the show has not been favorited yet\n if check_mark not in button_content_encoded:\n #add row in favorites table\n favorite = Favorite.add_to_favorites(show_id, user_id)\n\n #pass back the show_id and that the show has been favorited\n payload = {\"show_id\":show_id,\"favorite\":\"True\"}\n return jsonify(payload)\n else:\n #delete row in favorites table\n Favorite.delete_favorite(show_id)\n\n #pass back the show_id and that the show has been unfavorited\n payload = {\"show_id\":show_id,\"favorite\":\"False\"}\n return jsonify(payload)\n else:\n flash(\"You need to be logged in to see that page.\")\n return redirect(\"/login\")",
"def remove_favourite(recipe_id):\r\n if \"user\" in session:\r\n user = coll_users.find_one({\"username_lower\": session[\"user\"]})[\"_id\"]\r\n coll_users.update_one(\r\n {\"_id\": ObjectId(user)},\r\n {\"$pull\": {\"user_favs\": ObjectId(recipe_id)}})\r\n coll_recipes.update(\r\n {\"_id\": ObjectId(recipe_id)}, {\"$inc\": {\"favourites\": -1}})\r\n return redirect(url_for(\r\n \"recipes.recipe_detail\",\r\n recipe_id=recipe_id))\r\n else:\r\n flash(\"You must be logged in to perform that action!\")\r\n return redirect(url_for(\"users.login\"))",
"def favourite():\n\n # user is adding or deleting a favourite\n if request.method == \"POST\":\n\n # user is adding a station from 'stations.html'\n if request.form.get(\"add\"):\n\n # max limit of 5 favourites per user\n if len(Favourite.query.filter(Favourite.user_id==session[\"user_id\"]).all()) > 4:\n\n return redirect(url_for(\"stations\", error=\"limit\"))\n\n # remember id of station to add\n station_id = request.form.get(\"add\")\n\n # check user hasn't already favourited station\n if(Favourite.query.filter(Favourite.user_id==session[\"user_id\"],Favourite.station_id==station_id).first()):\n\n return redirect(url_for(\"stations\", error=\"taken\"))\n\n # add favourite to db for user\n addFav = Favourite(user_id=session[\"user_id\"],station_id=station_id)\n db.session.add(addFav)\n db.session.commit()\n\n return redirect(url_for(\"stations\", success=True))\n\n # user is deleting a station from 'favourites.html'\n elif request.form.get(\"delete\"):\n\n station_id = request.form.get(\"delete\")\n\n delFav = Favourite.query.filter(Favourite.user_id==session[\"user_id\"],Favourite.station_id==station_id).first()\n db.session.delete(delFav)\n db.session.commit()\n\n return redirect(url_for(\"favourite\", deleted=True))\n\n # user is viewing favourites via GET\n else:\n favourites = Favourite.query.filter(Favourite.user_id==session[\"user_id\"]).all()\n\n return render_template(\"favourites.html\", username=get_username(), favourites=favourites)",
"def remove_from_fav(request, q_id):\n if request.method == 'POST':\n Quotes.objects.remove_from_user_fav(request.session['id'], q_id)\n return redirect('/quotes')",
"def add_favourite(recipe_id):\r\n if \"user\" in session:\r\n user = coll_users.find_one(\r\n {\"username_lower\": session[\"user\"]})[\"_id\"]\r\n coll_users.update_one(\r\n {\"_id\": ObjectId(user)},\r\n {\"$push\": {\"user_favs\": ObjectId(recipe_id)}})\r\n coll_recipes.update(\r\n {\"_id\": ObjectId(recipe_id)}, {\"$inc\": {\"favourites\": 1}})\r\n return redirect(url_for(\r\n \"recipes.recipe_detail\",\r\n recipe_id=recipe_id))\r\n else:\r\n flash(\"You must be logged in to perform that action!\")\r\n return redirect(url_for(\"users.login\"))",
"def favorited(self: Article, request: Request):\n if not request.user:\n return False\n\n if self in request.user.favorites:\n return True\n\n return False",
"def post_favorite(request, pk=None):\n post = Post.objects.get(pk=pk).original_or_self()\n if post.favorites.filter(pk=request.user.pk).exists():\n post.favorites.remove(request.user)\n else:\n post.favorites.add(request.user)\n post.save()\n\n referer = request.META['HTTP_REFERER']\n if referer:\n return redirect(referer)\n else:\n return redirect('posts:post', pk=post.pk)",
"def favourite_delete(self, data, sesh):\n\n\t\t# Verify fields\n\t\ttry: DictHelper.eval(data, ['id'])\n\t\texcept ValueError as e: return Services.Effect(error=(1001, [(f, \"missing\") for f in e.args]))\n\n\t\t# Remove the thrower from the logged in thrower's favourites and return\n\t\t#\tthe result\n\t\treturn Services.Effect(\n\t\t\tFavourites.remove(sesh['thrower']['_id'], data['id'])\n\t\t)",
"def delete_favorite_food(self, user_answer_choice_id_substitute):\n self.cursor = self.data_base.cursor(MySQLCursorPrepared)\n self.cursor.execute(\"\"\"DELETE FROM Favorite where id = {}\"\"\"\n .format(int(user_answer_choice_id_substitute)))\n self.data_base.commit()",
"def add_to_fav(request, q_id):\n if request.method == 'POST':\n Quotes.objects.add_to_user_fav(request.session['id'], q_id)\n return redirect('/quotes')",
"def favorite(self, favorite: bool):\n if favorite is None:\n raise ValueError(\"Invalid value for `favorite`, must not be `None`\")\n\n self._favorite = favorite",
"def mark_favorite(request, object_id):\n feed_item = get_object_or_404(FeedItem, id=object_id)\n fav_item, is_new = FavoriteItem.objects.get_or_create(feed_item=feed_item)\n if request.is_ajax():\n return JSONResponse({'status': 'ok', 'text': 'Marked as favorite'}, False)\n return redirect(request.META.get('HTTP_REFERER', 'feed_item_list'))",
"def remove_from_fav(request, favorite_id):\n # Gets a favorite designated by favorite_id or returns 404\n favorite = get_object_or_404(Favorite, pk=favorite_id)\n favorite.delete()\n\n print(\"{}, {} a été supprimé des favoris\".format(\n favorite.products.name, favorite.products.brand))\n\n return redirect(request.META['HTTP_REFERER'])",
"def unmark_favorite(request, object_id):\n fav_item = get_object_or_404(FavoriteItem, feed_item__id=object_id)\n fav_item.delete()\n if request.is_ajax():\n return JSONResponse({'status': 'ok', 'text': 'Unmarked favorite'}, False)\n return redirect(request.META.get('HTTP_REFERER', 'feed_item_list'))",
"def unfavorite(self, item):\n self._createAction(item, \"unfavorite\")",
"def favorite(self) -> bool:\n return self._favorite",
"def favorite(request, movie_id):\n\n movie = get_object_or_404(Movie, pk=movie_id)\n try:\n if movie.liked:\n movie.liked = False\n else:\n movie.liked = True\n movie.save()\n except (KeyError, Movie.DoesNotExist):\n return JsonResponse({'success': False})\n else:\n return JsonResponse({'success': True})",
"def rm_from_fav(show_id, name):\n\n db = get_db()\n db.execute(\n 'DELETE FROM shows_users WHERE show_id = ? and user_id = ?',\n (show_id, session['user_id'])\n )\n\n flash('\\\"%s\\\" has been successfully removed from your favourite TV Shows!' % name)\n db.commit()\n return redirect(request.referrer)",
"def delete_favorite(request):\n company_id = request.data.get('id')\n company = Company.objects.get(id=company_id)\n\n request.user.profile.companies.remove(company)\n return Response({'favorite': False})",
"def favorite(self):\n url = \"https://api.imgur.com/3/image/{0}/favorite\".format(self.id)\n return self._imgur._send_request(url, needs_auth=True, method='POST')",
"def get_favorite(self):\n\n\t\treturn self.__favorite",
"def favorite(self):\n url = \"https://api.imgur.com/3/album/{0}/favorite\".format(self.id)\n return self._imgur._send_request(url, needs_auth=True, method=\"POST\")",
"def delete(self, request, *args, **kwargs):\n favorites = self.get_object()\n favorites_list = favorites.anuncios.all()\n if favorites_list:\n for favorite in favorites_list:\n favorites.anuncios.remove(favorite)\n msg_success = 'Se han eliminado todos los anuncios de favoritos.'\n messages.success(request, msg_success)\n else:\n msg_info = 'No hay favoritos para eliminar'\n messages.info(request, msg_info)\n return HttpResponseRedirect(self.get_success_url())",
"def update_favorite_things():\n data = request.data\n favorite_things = json.loads(data)\n print(favorite_things)\n connection = mongo_connect()\n if(favorite_things[\"action\"] == \"add\"):\n connection[\"users\"].update_one(\n {\"_id\": ObjectId(session[\"_id\"])},\n {\"$push\": {\n favorite_things[\"type\"]: ObjectId(favorite_things[\"item_id\"])\n }\n }\n )\n else:\n connection[\"users\"].update_one(\n {\"_id\": ObjectId(session[\"_id\"])},\n {\"$pull\":\n {\n favorite_things[\"type\"]:\n ObjectId(favorite_things[\"item_id\"])\n }\n }\n )\n resp = jsonify(success=True)\n return resp",
"async def _timein_delete(self, *, favourite_name):\n\t\t\n\t\tif favourite_name not in self.cache:\n\t\t\tawait self.bot.say(\"Favourite with that name does not exist\")\n\t\t\treturn\n\t\t\t\n\t\tself.favourites.pop(favourite_name, None)\n\t\tself.cache.pop(favourite_name, None)\n\t\t\n\t\tf = \"data/timein/cache.json\"\n\t\tdataIO.save_json(f, self.cache)\n\t\tf = \"data/timein/favourites.json\"\n\t\tdataIO.save_json(f, self.favourites)\n\t\t\n\t\tawait self.bot.say(\"Favourite \\\"\" + favourite_name + \"\\\" deleted\")",
"def cheer(self, songs):\n if self.favourite_song in songs:\n return \"Whoo!\"",
"def test_display_favorite(self):\n\n result = self.client.get(\"/view_favorites\")\n self.assertIn(b\"s1925148\", result.data)"
] | [
"0.69176424",
"0.688864",
"0.68283784",
"0.66788083",
"0.6618724",
"0.65838104",
"0.64054716",
"0.62992626",
"0.6211721",
"0.6205134",
"0.61849916",
"0.61616564",
"0.6160226",
"0.60770786",
"0.60408217",
"0.60285735",
"0.6025413",
"0.60150605",
"0.60038364",
"0.59932923",
"0.5900618",
"0.58442664",
"0.5835583",
"0.5826513",
"0.5825047",
"0.58093995",
"0.58006513",
"0.57760805",
"0.57739866",
"0.57455295"
] | 0.7853664 | 0 |
Adds hopping conjugates to self.dict. | def add_conjugates(self):
# declare new dict
self.new_dict = copy.deepcopy(self.dict)
# iterate over items
for i in range(len(self.dict)):
for rel_tag, hopping in self.dict[i].items():
x, y, z, j = rel_tag
reverse_tag = (-x, -y, -z, i)
reverse_hopping = np.conjugate(np.transpose(hopping))
if reverse_tag not in self.new_dict[j]:
self.new_dict[j][reverse_tag] = reverse_hopping
# done
self.dict = self.new_dict | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def buildDict(self, words):\n for word in words:\n self.word_set.add(word)\n for candidate in self.candidates(word):\n self.neighbors[candidate] += 1",
"def makeGraphDictionary(self):\n graph_dict_incomplete = {}\n # dictionary contains all links, no matter if they are functional\n for i in range(0, len(self._partner_indices)):\n graph_dict_incomplete[i] = set(self._partner_indices[i])\n if self._variant[0] == \"V0_instant\":\n self.graph_dict = graph_dict_incomplete\n else:\n # helper\n link_list = []\n link_list2 = []\n for vertex in graph_dict_incomplete:\n self.setKeyDictionary(dictionary=self.graph_dict,\n key=vertex,\n value=set())\n for neighbour in graph_dict_incomplete[vertex]:\n # Iterate through all plants and the neighbours\n # If a new pair occurs it will be appended in link_list2\n # If the pair occurs again it wll be appended in link_list\n # This means that the link (or rgf process) is finished\n # for both plants\n if {neighbour, vertex} not in link_list2:\n link_list2.append({vertex, neighbour})\n else:\n # plants are only put in the dict. if they occur more\n # than once, i.e. both partners have finished rgf\n link_list.append({vertex, neighbour})\n self.setKeyDictionary(dictionary=self.graph_dict,\n key=vertex,\n value=neighbour)",
"def build_adj_dict(self, Set):\n \t\tif len(Set):\n\n \t\t\tfor bubble in Set:\t\n \t\t\t\t\n \t\t\t\tdistance = dist(bubble.Bubble_last_pos, self.Bubble_last_pos)\n \t\t\t\tif distance <= (bubble.Bubble_radius * 1.2 + self.Bubble_radius * 1.2):\n \t\t\t\t\t# add edge between new bubble and existing bubble\n \t\t\t\t\tif bubble.color not in self.adj_dict.keys():\n\t\t\t\t\t\tself.adj_dict.setdefault(bubble.color,[]).append(bubble)\n\t\t\t\t\telse:\n\t\t\t\t\t\tif bubble not in self.adj_dict[bubble.color]:\n\t\t\t\t\t\t\tself.adj_dict[bubble.color].append(bubble)\n\n\t\t\t\t\t\n\t\t\t\t\tif self.color not in bubble.adj_dict.keys(): \n\t\t\t\t\t\tbubble.adj_dict.setdefault(self.color,[]).append(self)\n\t\t\t\t\telse:\n\t\t\t\t\t\tif self not in bubble.adj_dict[self.color]:\n\t\t\t\t\t\t\tbubble.adj_dict[self.color].append(self)\n\t\t\t\t\t\n\t\t\n\t\treturn self.adj_dict",
"def augment(self):\n n1 = { 'edges': [ self.next_insert['pred'], self.next_insert ], 'pred': self.next_insert['pred'] }\n n2 = { 'edges': [ n1, self.next_insert ], 'pred': n1 }\n self.next_insert['pred'] = n2\n self.next_insert = n2\n self.nodect += 2",
"def appendBropts(self, key, value):\n # type: (str, tp.Any)->None\n new_value = value\n if key in self._ifAttributes['bridge-opts']:\n new_value = self._ifAttributes['bridge-opts'][key] + value\n self.replaceBropt(key, new_value)",
"def add(counts):\n if counts:\n for k in grammar.keys():\n grammar[k] = grammar[k] + counts[k]",
"def buildDict(self, dict):\n for word in dict:\n self.add(word)",
"def buildDict(self, dict):\n for word in dict:\n self.s.add(word)\n self.length_set = set([len(word) for word in dict])",
"def copy_forward_mapping(self) -> Dict[str, Set[str]]:\n return deepcopy(self._forward_mapping)",
"def insert(self, word):\r\n t = self.trie\r\n \r\n for w in word: \r\n if w not in t: \r\n t[w] = {}\r\n t = t[w]\r\n t['#'] = True\r\n print(self.trie)",
"def buildDict(self, dict):\n self.all_words = set(dict)\n self.wc_dict = collections.defaultdict(int)\n for w in dict:\n for wc in self.get_wildcards(w):\n self.wc_dict[wc] += 1",
"def buildDict(self, words):\n self.dict = collections.defaultdict(set)\n for word in words:\n for i in xrange(len(word)):\n self.dict[word[:i] + '*' + word[i+1:]].add(word[i])",
"def add_ngrams(mydict,sentence):\n ngrams = get_ngrams(sentence,2,3)\n for ngram in ngrams:\n if ngram in mydict:\n mydict[ngram]+=1\n else:\n mydict[ngram]=1\n return mydict",
"def addWord(self, word: str) -> None:\n curr = self.trie\n for char in word:\n if char not in curr:\n curr[char] = {}\n curr = curr[char]\n curr['$'] = {}",
"def buildDict(self, words):\n for word in words:\n length = len(word)\n key = \"{}/{}\".format(length, word[0])\n ls = self.origin.get(key, [])\n ls.append(word)\n self.origin[key] = ls",
"def _add_dictionary(self, current, added):\n for key in added:\n if key in current and isinstance(current[key], collections.Mapping):\n self._add_dictionary(current[key], added[key])\n else:\n current[key] = added[key]",
"def make_chains(corpus):\n c_dict = {}\n\n for x in range(len(corpus)):\n if x < (len(corpus)-2): # not in edge\n bigram_tuple = tuple([corpus[x],corpus[x+1]])\n if bigram_tuple in c_dict:\n c_dict[bigram_tuple].append(corpus[x+2])\n else:\n c_dict[bigram_tuple] = [corpus[x+2]]\n else:\n bigram_tuple = tuple([corpus[-2],corpus[-1]]) # ran twice. Why?\n c_dict.setdefault(bigram_tuple) # could set a default word? Empty list?\n\n return c_dict",
"def addWord(self, word: str) -> None:\n trie = self.trie\n for c in word:\n if c not in trie:\n trie[c] = dict()\n trie = trie[c]\n trie['#'] = '#'",
"def intents_clustering(self):\n self.phrs2intents = {}\n number_of_other = 10000;\n for i in range(len(self.data)):\n for ut in self.data[i]['utterances']:\n if ut['speaker'] == 'USER':\n if 'segments' in ut.keys():\n for seg in ut['segments']:\n if 'annotations' in seg.keys():\n for anno in seg['annotations']:\n name = anno['name']\n if ut['text'] not in self.phrs2intents.keys():\n self.phrs2intents[ ut['text'] ] = [name]\n elif name not in self.phrs2intents[ ut['text'] ]:\n self.phrs2intents[ ut['text'] ].append(name)\n else:\n if number_of_other > 0:\n self.phrs2intents[ ut['text'] ] = ['other']\n number_of_other -= 1\n self.X = np.array(list(self.phrs2intents.keys()))",
"def _create_connections(self):\n self.predecessors = {}\n self.successors = {}\n for nd in self.nodes:\n self.predecessors[nd.name] = []\n self.successors[nd.name] = []\n\n for (nd_out, nd_in) in self.edges:\n self.predecessors[nd_in.name].append(nd_out)\n self.successors[nd_out.name].append(nd_in)",
"def create_tunnel(cave_from, cave_to):\n caves[cave_from].append(cave_to)\n caves[cave_to].append(cave_from)",
"def update_adj_dict(self,word2,same_line):\n if word2 in self.adj_dict:\n if same_line:\n self.adj_dict[word2] = (self.adj_dict[word2][0]+1,\n self.adj_dict[word2][1])\n else:\n self.adj_dict[word2] = (self.adj_dict[word2][0], \n self.adj_dict[word2][1]+1)\n else:\n if same_line: \n self.adj_dict[word2] = (1, 0)\n else:\n self.adj_dict[word2] = (0, 1)",
"def addWord(self, word: 'str') -> 'None':\n p=self.dictword\n for s in word:\n if s not in p:\n p[s]={}\n p=p[s]\n else:\n p=p[s]\n p['#']=None",
"def _set_hop(self, v, load=False):\n try:\n t = YANGDynClass(v,base=YANGListType(\"hop_id\",yc_hop_pyangbind_example__input_LocatorRecord_rloc_explicit_locator_path_hop, yang_name=\"hop\", parent=self, is_container='list', user_ordered=True, path_helper=self._path_helper), is_container='list', yang_name=\"hop\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True)\n except (TypeError, ValueError):\n raise ValueError(\"\"\"hop must be of a type compatible with base=YANGListType(\"hop_id\",yc_hop_pyangbind_example__input_LocatorRecord_rloc_explicit_locator_path_hop, yang_name=\"hop\", parent=self, is_container='list', user_ordered=True, path_helper=self._path_helper), is_container='list', yang_name=\"hop\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True\"\"\")\n self.__hop = t\n if hasattr(self, '_set'):\n self._set()",
"def add_children_to_parents(self, mutated_pop_dict, mating_pop_dict):\n\n print('Combining parent and child generations')\n\n merged_networks_dict = OrderedDict()\n\n for id, G in mutated_pop_dict.items():\n new_id = ''.join(\n [random.choice(string.ascii_letters + string.digits)\n for i in range(10)]\n )\n merged_networks_dict[new_id] = copy.deepcopy(G)\n for id, G in mating_pop_dict.items():\n merged_networks_dict[id] = copy.deepcopy(G)\n\n return merged_networks_dict",
"def _add_state(self, prefix):\n for i in range(len(self.states)):\n self.states[i] = prefix + self.states[i]\n\n self.q_0 = prefix + self.q_0\n\n for i in range(len(self.final)):\n self.final[i] = prefix + self.final[i]\n\n keys = list(self.transition.keys())\n for key in keys:\n new_key = prefix + key\n self.transition[new_key] = []\n for i in range(len(self.transition[key])):\n self.transition[new_key].append(prefix + self.transition[key][i])\n del self.transition[key]",
"def insert(self, word: str) -> None:\n curr_chars = self.chars\n for c in list(word):\n if c not in curr_chars:\n curr_chars[c] = {}\n curr_chars = curr_chars[c]\n\n curr_chars[self.end_of_word] = self.end_of_word",
"def add_vertex(self, v):\n self[v] = {}",
"def add_vertex(self, v):\n self[v] = {}",
"def _set_hop_id(self, v, load=False):\n parent = getattr(self, \"_parent\", None)\n if parent is not None and load is False:\n raise AttributeError(\"Cannot set keys directly when\" +\n \" within an instantiated list\")\n\n try:\n t = YANGDynClass(v,base=unicode, is_leaf=True, yang_name=\"hop-id\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True)\n except (TypeError, ValueError):\n raise ValueError(\"\"\"hop_id must be of a type compatible with base=unicode, is_leaf=True, yang_name=\"hop-id\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True\"\"\")\n self.__hop_id = t\n if hasattr(self, '_set'):\n self._set()"
] | [
"0.5047579",
"0.5037985",
"0.502265",
"0.5007653",
"0.49975562",
"0.49723896",
"0.49697816",
"0.48349583",
"0.48268276",
"0.48259962",
"0.48032284",
"0.47840226",
"0.4778364",
"0.47758386",
"0.47497863",
"0.4746505",
"0.47428873",
"0.4741161",
"0.47241557",
"0.47111377",
"0.4697564",
"0.46961874",
"0.46941915",
"0.46901202",
"0.46575382",
"0.46238127",
"0.46138385",
"0.460321",
"0.460321",
"0.458946"
] | 0.65367895 | 0 |
Shift input ids one token to the right, and wrap the last non pad token (usually ). | def shift_tokens_right(self, input_ids, pad_token_id):
prev_output_tokens = input_ids.clone()
index_of_eos = (input_ids.ne(pad_token_id).sum(dim=1) - 1).unsqueeze(-1)
prev_output_tokens[:, 0] = input_ids.gather(1, index_of_eos).squeeze()
prev_output_tokens[:, 1:] = input_ids[:, :-1]
return prev_output_tokens | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def shift_tokens_right(input_ids, pad_token_id):\r\n prev_output_tokens = input_ids.clone()\r\n index_of_eos = (input_ids.ne(pad_token_id).sum(dim=1) - 1).unsqueeze(-1)\r\n prev_output_tokens[:, 0] = input_ids.gather(1, index_of_eos).squeeze()\r\n prev_output_tokens[:, 1:] = input_ids[:, :-1]\r\n return prev_output_tokens",
"def shift_tokens_right(self,input_ids: torch.Tensor, pad_token_id: int, decoder_start_token_id: int):\n shifted_input_ids = input_ids.new_zeros(input_ids.shape)\n shifted_input_ids[:, 1:] = input_ids[:, :-1].clone()\n shifted_input_ids[:, 0] = decoder_start_token_id\n\n assert pad_token_id is not None, \"self.model.config.pad_token_id has to be defined.\"\n # replace possible -100 values in labels by `pad_token_id`\n shifted_input_ids.masked_fill_(shifted_input_ids == -100, pad_token_id)\n\n return shifted_input_ids",
"def shift_tokens_right(input_ids: jnp.array, pad_token_id: int, decoder_start_token_id: int) -> jnp.ndarray:\n shifted_input_ids = jnp.zeros_like(input_ids)\n shifted_input_ids = shifted_input_ids.at[:, 1:].set(input_ids[:, :-1])\n shifted_input_ids = shifted_input_ids.at[:, 0].set(decoder_start_token_id)\n\n shifted_input_ids = jnp.where(shifted_input_ids == -100, pad_token_id, shifted_input_ids)\n return shifted_input_ids",
"def pad_tokens(x, max_length, pad_token_id,\n truncate_from=\"left\",\n pad_from=\"left\"):\n assert truncate_from in (\"left\", \"right\")\n assert pad_from in (\"left\", \"right\")\n if len(x) > max_length:\n if truncate_from == \"left\":\n return x[-max_length:]\n else:\n return x[:max_length]\n elif len(x) < max_length:\n padding = [pad_token_id] * (max_length - len(x))\n if pad_from == \"left\":\n return padding + x\n else:\n return x + padding\n else:\n return x",
"def shift_right(input, pad=2):\n return tf.concat((tf.ones_like(input[:, :1]) * pad, input[:, :-1]), 1)",
"def align_tokens(tokens, target_token_index):\n seq_len = self.seq_len - 2\n if len(tokens) > seq_len:\n start_index = max(0, int(target_token_index - seq_len / 2 + middle_shift))\n start_index = min(start_index, len(tokens) - seq_len)\n while tokens[start_index].startswith('#') and start_index + seq_len > target_token_index + 1:\n start_index -= 1\n start_index = max(0, start_index)\n tokens = tokens[start_index : start_index + seq_len]\n target_token_index -= start_index\n tokens = ['[CLS]', ] + tokens + ['[SEP]', ]\n target_token_index += 1\n return tokens, target_token_index",
"def wrap_pad(input, size):\n M1 = tf.concat([input[:, :, -size[1]:, :], input, input[:, :, 0:size[1], :]], 2)\n M1 = tf.concat([M1[:, -size[0]:, :, :], M1, M1[:, 0:size[0], :, :]], 1)\n return M1",
"def unpad(plain):\n return plain[:-ord(plain[-1])]",
"def truncate_sequences(\n self,\n ids: List[int],\n token_boxes: List[List[int]],\n pair_ids: Optional[List[int]] = None,\n pair_token_boxes: Optional[List[List[int]]] = None,\n labels: Optional[List[int]] = None,\n num_tokens_to_remove: int = 0,\n truncation_strategy: Union[str, TruncationStrategy] = \"longest_first\",\n stride: int = 0,\n ) -> Tuple[List[int], List[int], List[int]]:\n if num_tokens_to_remove <= 0:\n return ids, token_boxes, pair_ids, pair_token_boxes, labels, [], [], []\n\n if not isinstance(truncation_strategy, TruncationStrategy):\n truncation_strategy = TruncationStrategy(truncation_strategy)\n\n overflowing_tokens = []\n overflowing_token_boxes = []\n overflowing_labels = []\n if truncation_strategy == TruncationStrategy.LONGEST_FIRST:\n for _ in range(num_tokens_to_remove):\n if pair_ids is None or len(ids) > len(pair_ids):\n if not overflowing_tokens:\n window_len = min(len(ids), stride + 1)\n else:\n window_len = 1\n overflowing_tokens.extend(ids[-window_len:])\n overflowing_token_boxes.extend(token_boxes[-window_len:])\n overflowing_labels.extend(labels[-window_len:])\n ids = ids[:-1]\n token_boxes = token_boxes[:-1]\n labels = labels[:-1]\n else:\n if not overflowing_tokens:\n window_len = min(len(pair_ids), stride + 1)\n else:\n window_len = 1\n overflowing_tokens.extend(pair_ids[-window_len:])\n overflowing_token_boxes.extend(pair_token_boxes[-window_len:])\n pair_ids = pair_ids[:-1]\n pair_token_boxes = pair_token_boxes[:-1]\n elif truncation_strategy == TruncationStrategy.ONLY_FIRST:\n if len(ids) > num_tokens_to_remove:\n window_len = min(len(ids), stride + num_tokens_to_remove)\n overflowing_tokens = ids[-window_len:]\n overflowing_token_boxes = token_boxes[-window_len:]\n overflowing_labels = labels[-window_len:]\n ids = ids[:-num_tokens_to_remove]\n token_boxes = token_boxes[:-num_tokens_to_remove]\n labels = labels[:-num_tokens_to_remove]\n else:\n logger.error(\n f\"We need to remove {num_tokens_to_remove} to truncate the input \"\n f\"but the first sequence has a length {len(ids)}. \"\n f\"Please select another truncation strategy than {truncation_strategy}, \"\n \"for instance 'longest_first' or 'only_second'.\"\n )\n elif truncation_strategy == TruncationStrategy.ONLY_SECOND and pair_ids is not None:\n if len(pair_ids) > num_tokens_to_remove:\n window_len = min(len(pair_ids), stride + num_tokens_to_remove)\n overflowing_tokens = pair_ids[-window_len:]\n overflowing_token_boxes = pair_token_boxes[-window_len:]\n pair_ids = pair_ids[:-num_tokens_to_remove]\n pair_token_boxes = pair_token_boxes[:-num_tokens_to_remove]\n else:\n logger.error(\n f\"We need to remove {num_tokens_to_remove} to truncate the input \"\n f\"but the second sequence has a length {len(pair_ids)}. \"\n f\"Please select another truncation strategy than {truncation_strategy}, \"\n \"for instance 'longest_first' or 'only_first'.\"\n )\n\n return (\n ids,\n token_boxes,\n pair_ids,\n pair_token_boxes,\n labels,\n overflowing_tokens,\n overflowing_token_boxes,\n overflowing_labels,\n )",
"def truncate_pad(line, num_steps, padding_token):\n if len(line) > num_steps:\n return line[:num_steps] # Truncate\n return line + [padding_token] * (num_steps - len(line)) # Pad",
"def truncate_pad(line, num_steps, padding_token):\n if len(line) > num_steps:\n return line[:num_steps] # Truncate\n return line + [padding_token] * (num_steps - len(line)) # Pad",
"def truncate_pad(line, num_steps, padding_token):\n if len(line) > num_steps:\n return line[:num_steps] # Truncate\n return line + [padding_token] * (num_steps - len(line)) # Pad",
"def pad_decoder_inp_targ(self, max_len, pad_id):\n\t\twhile len(self.dec_input) < max_len:\n\t\t\tself.dec_input.append(pad_id)\n\t\twhile len(self.target) < max_len:\n\t\t\tself.target.append(pad_id)",
"def shift(self, t, word):\n return t[1:] + (word,)",
"def pad_batch(self, ids):\r\n max_len = max([len(x) for x in ids])\r\n return [x + [0] * (max_len - len(x)) for x in ids]",
"def shift(t, word):\n return t[1:] + (word,)",
"def pad_query_input(self, max_len, pad_id):\n\t\twhile len(self.query_input) < max_len:\n\t\t\tself.query_input.append(pad_id)",
"def pad_encoder_input(self, max_len, pad_id):\n while len(self.enc_input) < max_len:\n self.enc_input.append(pad_id)",
"def collate_tokens(\n values,\n pad_idx,\n eos_idx=None,\n left_pad=False,\n move_eos_to_beginning=False,\n pad_to_length=None,\n pad_to_multiple=1,\n):\n size = max(v.size(0) for v in values)\n size = size if pad_to_length is None else max(size, pad_to_length)\n if pad_to_multiple != 1 and size % pad_to_multiple != 0:\n size = int(((size - 0.1) // pad_to_multiple + 1) * pad_to_multiple)\n res = values[0].new(len(values), size).fill_(pad_idx)\n\n def copy_tensor(src, dst):\n assert dst.numel() == src.numel()\n if move_eos_to_beginning:\n if eos_idx is None:\n # if no eos_idx is specified, then use the last token in src\n dst[0] = src[-1]\n else:\n dst[0] = eos_idx\n dst[1:] = src[:-1]\n else:\n dst.copy_(src)\n\n for i, v in enumerate(values):\n copy_tensor(v, res[i][size - len(v):] if left_pad else res[i][:len(v)])\n return res",
"def _dynamic_padding(self, batch_data, pad_id):\n pad_p_len = min(self.max_p_len, max(batch_data['passage_length']))\n pad_q_len = min(self.max_q_len, max(batch_data['question_length']))\n batch_data['passage_token_ids'] = [(ids + [pad_id] * (pad_p_len - len(ids)))[: pad_p_len]\n for ids in batch_data['passage_token_ids']]\n batch_data['question_token_ids'] = [(ids + [pad_id] * (pad_q_len - len(ids)))[: pad_q_len]\n for ids in batch_data['question_token_ids']]\n return batch_data, pad_p_len, pad_q_len",
"def truncate_sequences(self,\n ids,\n pair_ids=None,\n num_tokens_to_remove=0,\n truncation_strategy='longest_first',\n stride=0):\n if num_tokens_to_remove <= 0:\n return ids, pair_ids, []\n\n if truncation_strategy == 'longest_first':\n overflowing_tokens = []\n if pair_ids is None or len(ids) <= len(pair_ids):\n for _ in range(num_tokens_to_remove):\n if pair_ids is None or len(ids) >= len(pair_ids):\n overflowing_tokens = [ids[-1]] + overflowing_tokens\n ids = ids[:-1]\n else:\n pair_ids = pair_ids[:-1]\n window_len = min(len(ids), stride)\n else:\n for _ in range(num_tokens_to_remove):\n if pair_ids is None or len(ids) > len(pair_ids):\n overflowing_tokens = [ids[-1]] + overflowing_tokens\n ids = ids[:-1]\n else:\n pair_ids = pair_ids[:-1]\n window_len = min(len(ids), stride)\n if window_len > 0:\n overflowing_tokens = ids[-window_len:] + overflowing_tokens\n elif truncation_strategy == 'only_first':\n assert len(ids) > num_tokens_to_remove\n window_len = min(len(ids), stride + num_tokens_to_remove)\n overflowing_tokens = ids[-window_len:]\n ids = ids[:-num_tokens_to_remove]\n elif truncation_strategy == 'only_second':\n assert pair_ids is not None and len(pair_ids) > num_tokens_to_remove\n window_len = min(len(pair_ids), stride + num_tokens_to_remove)\n overflowing_tokens = pair_ids[-window_len:]\n pair_ids = pair_ids[:-num_tokens_to_remove]\n elif truncation_strategy == 'do_not_truncate':\n raise ValueError(\n \"Input sequence are too long for max_length. Please select a truncation strategy.\"\n )\n else:\n raise ValueError(\n \"Truncation_strategy should be selected in ['longest_first', 'only_first', 'only_second', 'do_not_truncate']\"\n )\n return (ids, pair_ids, overflowing_tokens)",
"def trim_input_ids(input_ids: torch.tensor, pad_token_id, mask_token_id, num_masks: int):\n assert input_ids.shape[0] == 1\n input_ids_without_pad = [x for x in input_ids[0] if x != pad_token_id]\n\n trimmed_input_ids = []\n mask_count = 0\n for input_id in input_ids_without_pad:\n if input_id == mask_token_id:\n if mask_count >= num_masks:\n continue\n mask_count += 1\n trimmed_input_ids.append(input_id)\n\n return torch.tensor([trimmed_input_ids], dtype=torch.long, device=input_ids.device)",
"def build_inputs_with_special_tokens(\n self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None\n ) -> List[int]:\n\n if token_ids_1 is None:\n return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]\n cls = [self.cls_token_id]\n sep = [self.sep_token_id]\n return cls + token_ids_0 + sep + sep + token_ids_1 + sep",
"def normalise_tag_id(input_id):\n return input_id.replace(\" \", \"\").replace(\"\\n\", \"\").replace(\"\\r\", \"\")",
"def pad_encoder_input(self, max_sen_len, pad_id):\n while len(self.enc_input) < max_sen_len:\n self.enc_input.append(pad_id)",
"def collate_tokens(values, pad_idx, eos_idx, left_pad, move_eos_to_beginning=False, pad_sequence=1):\n #size = max(v.size(0) for v in values)\n orig_size = max(v.size(0) for v in values)\n size = 0\n if pad_sequence > 1:\n size = orig_size // pad_sequence * pad_sequence\n if orig_size % pad_sequence > 0:\n size += pad_sequence\n else:\n size = orig_size\n res = values[0].new(len(values), size).fill_(pad_idx)\n\n def copy_tensor(src, dst):\n assert dst.numel() == src.numel()\n if move_eos_to_beginning:\n assert src[-1] == eos_idx\n dst[0] = eos_idx\n dst[1:] = src[:-1]\n else:\n dst.copy_(src)\n\n for i, v in enumerate(values):\n copy_tensor(v, res[i][size - len(v):] if left_pad else res[i][:len(v)])\n return res",
"def cn_whole_word_mask(input_tokens, ref_ids):\n\n for i in range(len(input_tokens)):\n if i in ref_ids:\n # We move it back by -1 as the ref_ids start at 1, not 0\n input_tokens[i-1] = \"##\" + input_tokens[i-1]\n\n input_tokens = _whole_word_mask(input_tokens)\n\n return input_tokens",
"def pad_seq(seq, max_seq_len=0):\n if max_seq_len:\n pad_len = max_seq_len - len(seq)\n if pad_len > 0:\n return np.concatenate([seq, np.zeros(pad_len, dtype=np.int64)])\n elif pad_len < 0: # chop to fit\n two_last_tokens = seq[-2:]\n out = seq[:max_seq_len]\n out[-2:] = two_last_tokens\n return out.astype(np.int64)\n return seq.astype(np.int64)",
"def full_tokens(tokens):\n target_length = roundup2(len(tokens))\n padding_length = target_length - len(tokens)\n tokens = [PADDING_TOKEN] * padding_length + tokens\n return tokens",
"def shift_right(self):\n self.pointer = (self.pointer + 1) % len(self.data)"
] | [
"0.8174382",
"0.7355779",
"0.730172",
"0.62766033",
"0.5987434",
"0.59027076",
"0.5735923",
"0.5567176",
"0.5503809",
"0.54715633",
"0.54715633",
"0.54715633",
"0.5460714",
"0.54170334",
"0.5338661",
"0.53169227",
"0.52875674",
"0.52819985",
"0.5207631",
"0.51908386",
"0.5179993",
"0.5175452",
"0.5173635",
"0.51706654",
"0.5155161",
"0.5131358",
"0.51284117",
"0.50838935",
"0.5081181",
"0.50639814"
] | 0.81405616 | 1 |
Visualizes in a pyplot window an image and a label pair from provided paths. For reading files, Pillow is used so all paths and formats must be Pillowcompatible. The task definition is used to define colors for label ids (see panoptic_parts/utils/defs/template_v1.0.yaml). | def visualize_from_paths(image_path, label_path, task_def_path):
# sid2color is a mapping from all possible sids to colors
with open(task_def_path) as fp:
task_def = yaml.load(fp, Loader=yaml.Loader)
sid2color = task_def['sid2color']
# add colors for all sids that may exist in labels, but don't have a color from task_def
sid2color.update({sid: sid2color[-1] # we use the void color here
for sid in range(task_def['max_sid'])
if not (sid in task_def['valid_sids'] or sid in sid2color)})
# reduce resolution for faster execution
image = Image.open(image_path)
label = Image.open(label_path)
uids = np.array(label, dtype=np.int32)
# optionally transform parts ids
# here we trasform the pids from the original dataset to another set of pids according
# to sid2pids_groups, where parts for some scene-level semantic classes are grouped
# TODO(panos): consider moving this functionality to colorize_label
if 'sid2pids_groups' in task_def.keys():
uids = _transform_uids(uids, task_def['max_sid'], task_def['sid2pids_groups'])
# create the colorized label images
uids_sem_inst_parts_colored, uids_sem_colored, uids_sem_inst_colored = \
experimental_colorize_label(uids,
sid2color=sid2color,
return_sem=True,
return_sem_inst=True,
emphasize_instance_boundaries=True)
# plot
# initialize figure for plotting
_, ((ax1, ax2), (ax3, ax4)) = plt.subplots(nrows=2, ncols=2)
# for ax in axes:
# ax.set_axis_off()
ax1.imshow(image)
ax1.set_title('image')
ax2.imshow(uids_sem_colored)
ax2.set_title('labels colored on semantic level')
ax3.imshow(uids_sem_inst_colored)
ax3.set_title('labels colored on semantic and instance levels')
ax4.imshow(uids_sem_inst_parts_colored)
ax4.set_title('labels colored on semantic, instance, and parts levels')
plt.show() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def plot_oneshot_task(pairs):\n fig,(ax1,ax2) = plt.subplots(2)\n ax1.matshow(pairs[0][0].reshape(300,300),cmap='gray')\n img = concat_images(pairs[1])\n ax1.get_yaxis().set_visible(False)\n ax1.get_xaxis().set_visible(False)\n ax2.matshow(img,cmap='gray')\n plt.xticks([])\n plt.yticks([])\n plt.show()",
"def run_path_visualisation(paths, config, modulesConfig):\n all_targets = [os.path.basename(config[s][\"target\"]) for s in config.sections]\n all_target_tasks = {os.path.basename(config[s][\"target\"]):s for s in config.sections}\n \n added_tasks = []\n prepared_paths = []\n for path in paths:\n prepared_tasks = []\n for idx, task in enumerate(list(reversed(path))):\n s_module, s_name, *identifier = task.split(\" \")\n\n # Special Rule For Join Module To Have A Connection To Another Module\n special_connection = False\n if s_module == \"processing_join\":\n args = config[task]\n con_module, con_name, *identifier = all_target_tasks.get(os.path.basename(args[\"joinwith\"]), s_module+\"_SPECIAL \"+s_name+\"_SPECIAL\").split(\" \")\n special_connection = {\n \"connection_to_module\" : con_module,\n \"connection_to_name\" : con_name,\n \"will_be_created\" : (os.path.basename(args[\"joinwith\"]) in all_targets)\n }\n\n prepared_tasks.append({\n 'module':s_module,\n 'name':s_name,\n 'display': (task not in added_tasks),\n 'specialConnection': special_connection,\n 'last': (idx == len(path) - 1),\n 'attributes': config[task]\n })\n added_tasks.append(task)\n prepared_paths.append(prepared_tasks)\n logger.debug(\"Path prepared for visualization!\")\n render_path_visualisation(config['projectRoot'], config['projectName'], prepared_paths)",
"def plot12(self, dataset, ts_string_indices, source_jpg_folder='jpg_images', extension='jpg', rows=3, cols=4,\n outfname='Sample Frames.png', cmap=None, gui_color='green'):\n # Settings ############################################################\n font_label_box = {\n 'color': 'green',\n 'size': 16,\n }\n font_steering = {'family': 'monospace',\n # 'color': 'darkred',\n 'weight': 'normal',\n 'size': 20,\n }\n ROWS = rows\n COLS = cols\n NUM_IMAGES = ROWS * COLS\n\n # Figure ##############################################################\n # figsize = [width, height]\n fig = plt.figure(figsize=PAPER_A3_LAND, facecolor='white')\n fig.suptitle(\"Sample frames, Dataset: {}\".format(dataset.data_folder), fontsize=20)\n\n for i, ts_string_index in enumerate(ts_string_indices):\n rec = dataset.df.loc[ts_string_index]\n\n timestamp_string = rec['datetime'].strftime(\"%D %H:%M:%S.\") + \"{:.2}\".format(\n str(rec['datetime'].microsecond))\n\n if 'steering_pred_signal' in dataset.df.columns:\n this_label = \"{}\\n{:0.2f}/{:0.2f} steering \\n{:0.2f} throttle\".format(timestamp_string,\n rec['steering_signal'],\n rec['steering_pred_signal'],\n rec['throttle_signal'])\n else:\n this_label = \"{}\\n{:0.2f}/ steering \\n{:0.2f} throttle\".format(timestamp_string, rec['steering_signal'],\n rec['throttle_signal'])\n\n ax = fig.add_subplot(ROWS, COLS, i + 1)\n\n # Main Image ##########################################################\n jpg_path = os.path.join(dataset.path_dataset, source_jpg_folder, ts_string_index + '.' + extension)\n assert os.path.exists(jpg_path), \"{} does not exist\".format(jpg_path)\n img = mpl.image.imread(jpg_path)\n ax.imshow(img, cmap=cmap)\n # plt.title(str_label)\n\n # Data box ########################################################\n\n # ax.axes.get_xaxis().set_visible(False)\n # ax.axes.get_yaxis().set_visible(False)\n t = ax.text(5, 25, this_label, color=gui_color, alpha=1)\n # t = plt.text(0.5, 0.5, 'text', transform=ax.transAxes, fontsize=30)\n t.set_bbox(dict(facecolor='white', alpha=0.7, edgecolor='none'))\n\n # Steering widget HUD #################################################\n # Steering HUD: Actual steering signal\n steer_actual = ''.join(['|' if v else '-' for v in dataset.linear_bin(rec['steering_signal'])])\n text_steer = ax.text(80, 105, steer_actual, fontdict=font_steering, horizontalalignment='center',\n verticalalignment='center', color=gui_color)\n # Steering HUD: Predicted steering angle\n if 'steering_pred_signal' in dataset.df.columns:\n steer_pred = ''.join(['◈' if v else ' ' for v in dataset.linear_bin(rec['steering_pred_signal'])])\n text_steer_pred = ax.text(80, 95, steer_pred, fontdict=font_steering, horizontalalignment='center',\n verticalalignment='center', color='red')\n\n outpath = os.path.join(dataset.path_dataset, outfname)\n fig.savefig(outpath)\n logging.debug(\"Wrote Sample Frames figure to {}\".format(outpath))",
"def _convert_path_list_to_images_and_labels(self, path_list, is_one_shot_task):\n number_of_pairs = int(len(path_list) / 2)\n pairs_of_images = [np.zeros(\n (number_of_pairs, self.image_height, self.image_height, 1)) for i in range(2)]\n labels = np.zeros((number_of_pairs, 1))\n\n for pair in range(number_of_pairs):\n image = Image.open(path_list[pair * 2])\n image = np.asarray(image).astype(np.float64)\n image = image / image.std() - image.mean()\n\n pairs_of_images[0][pair, :, :, 0] = image\n image = Image.open(path_list[pair * 2 + 1])\n image = np.asarray(image).astype(np.float64)\n image = image / image.std() - image.mean()\n\n pairs_of_images[1][pair, :, :, 0] = image\n if not is_one_shot_task:\n if (pair + 1) % 2 == 0:\n labels[pair] = 0\n else:\n labels[pair] = 1\n\n else:\n if pair == 0:\n labels[pair] = 1\n else:\n labels[pair] = 0\n\n if not is_one_shot_task:\n random_permutation = np.random.permutation(number_of_pairs)\n labels = labels[random_permutation]\n pairs_of_images[0][:, :, :,\n :] = pairs_of_images[0][random_permutation, :, :, :]\n pairs_of_images[1][:, :, :,\n :] = pairs_of_images[1][random_permutation, :, :, :]\n\n return pairs_of_images, labels",
"def makeGanttChart(mode, fileName):\n\n figType = OUTPUT_FILE_EXTENSION # \"png\", \"eps\"\n\n # extract the figure name and target directory (to store the figures)\n figureFileName = fileName[0:-6]\n k = figureFileName.rfind(\"/\") + 1\n figureFileName = figureFileName[k:]\n k = fileName.rfind(\"/\") + 1\n targetDirectory = \"\"\n if k == 0:\n targetDirectory = \"./\"\n else:\n targetDirectory = fileName[0:k]\n targetFname = targetDirectory + figureFileName + \".\" + figType\n\n # import the tasks\n tasks = parseTasks(mode, fileName)\n machines = sorted(getMachines(tasks))\n orders = sorted(getOrders(tasks))\n processingUnits = sorted(getProccessingUnits(tasks))\n operations = sorted(getOperations(tasks))\n\n if mode == \"MTS\":\n tasks = consolidateSiblingTasks(tasks, machines)\n tasks = removeDuplicateTasks(tasks)\n if checkForOverlappingTasks(tasks, machines):\n print(\"ERROR! Found overlapping tasks, check your input file!\")\n exit(5)\n\n # Print all of the read tasks in DEBUG mode\n if MYDEBUG:\n for t in tasks:\n t.print()\n\n # build the figure\n fig = plt.figure(figsize=(10, 5), dpi=DPI) # <------ USER OPTION HERE -----------------\n ax = fig.add_subplot(111)\n ax.set_title(figureFileName)\n\n # set up the axes\n y_pos = np.arange(len(machines))\n ax.set_yticks(y_pos)\n ax.set_ylim(min(y_pos) - 0.7, max(y_pos) + 0.7)\n ax.set_yticklabels(machines)\n ax.set_xlabel(\"Time (Hours)\")\n x_pos = np.arange(math.ceil(getMakeSpan(tasks))+1)\n ax.set_xticks(x_pos)\n ax.set_axisbelow(True)\n ax.grid(b=True, which=\"major\", axis=\"x\", alpha=0.5)\n\n # assign a unique color to each order and each operation\n # http://matplotlib.org/examples/color/colormaps_reference.html\n cmapOrders = plt.cm.Pastel2(np.linspace(0, 1, len(orders)))\n cmapOperations = plt.cm.Pastel2(np.linspace(0, 1, len(operations)))\n\n # plot the task rectangles\n # https://stackoverflow.com/questions/21397549/stack-bar-plot-in-matplotlib-and-add-label-to-each-section-and-suggestions\n for i, m in enumerate(machines):\n compatibleTasks = []\n for t in tasks:\n if m == t.machine:\n compatibleTasks.append(t)\n slots = [] # time slots for machine m\n for ct in compatibleTasks:\n for ct in compatibleTasks:\n thisSlot = (ct.tBegin, ct.tEnd)\n if thisSlot not in slots:\n slots.append(thisSlot)\n slots = sorted(slots)\n if mode == \"SCH\":\n for s, slt in enumerate(slots):\n thisBatchSize = \"\"\n thisOperation = \"\"\n for ct in compatibleTasks:\n if (ct.tBegin, ct.tEnd) == slt:\n thisBatchSize = ct.batchSize\n thisOperation = ct.operation\n thisColor = cmapOperations[operations.index(thisOperation)]\n h = ax.barh(i, width=slots[s][1]-slots[s][0], left=slots[s][0], align='center', color=thisColor)\n bl = h[0].get_xy()\n x = 0.5*h[0].get_width() + bl[0]\n y = 0.5*h[0].get_height() + bl[1]\n ax.text(x, y, str(thisBatchSize), ha='center',va='center')\n elif mode == \"MTS\":\n for s, slt in enumerate(slots):\n # Get the MAIN task corresponding to the current time slot\n currentTask = 0\n for ct in compatibleTasks:\n if (ct.tBegin, ct.tEnd) == slt:\n currentTask = ct\n # Plot the unique task\n if len(currentTask.subtasks) == 0:\n duration = slots[s][1]-slots[s][0]\n thisColor = cmapOrders[orders.index(currentTask.order)]\n\n h = []\n h.append(ax.barh(i, width=duration, left=slots[s][0], align='center', color=\"grey\", alpha=0.7))\n h.append(ax.barh(i, width=duration - 2*MARGIN, left=slots[s][0] + MARGIN, align='center',\n color=thisColor, height=0.65, linewidth=0))\n bl = h[0][0].get_xy()\n x = 0.5*h[0][0].get_width() + bl[0]\n y = 0.5*h[0][0].get_height() + bl[1]\n thisBatchSize = currentTask.batchSize\n ax.text(x, y, str(thisBatchSize), ha='center',va='center', size=LABEL_SIZE)\n else:\n # Plot first the MAIN task\n duration = slots[s][1]-slots[s][0]\n barHandles = []\n barHandles.append(ax.barh(i, width=duration, left=slots[s][0],\n align='center', color=\"grey\", alpha=0.7))\n bl = barHandles[0][0].get_xy()\n l = slots[s][0] + MARGIN\n # Plot the SUB tasks\n for counter, thisSub in enumerate(currentTask.subtasks):\n thisColor = cmapOrders[orders.index(thisSub.order)]\n partialDuration = (thisSub.batchSize/currentTask.batchSize) * duration - \\\n 2*MARGIN/len(currentTask.subtasks)\n barHandles.append(ax.barh(i, width=partialDuration, left=l, align='center', height=0.65, linewidth=0,\n color=thisColor))\n bl = barHandles[-1][0].get_xy()\n x = 0.5*barHandles[-1][0].get_width() + bl[0]\n y = 0.5*barHandles[-1][0].get_height() + bl[1]\n thisBatchSize = thisSub.batchSize\n ax.text(x, y, str(thisBatchSize), ha='center',va='center', size=LABEL_SIZE)\n l = l + partialDuration\n else:\n print(\"INVALID MODE\")\n exit(5)\n\n # Show / print the figure\n fig.savefig(targetFname, dpi=DPI)\n # if MYDEBUG:\n # plt.show()\n plt.clf()\n plt.close()\n\n\n # plot a legend (print in different file)\n if PLOT_LEGENDS:\n if mode == \"SCH\":\n pat = []\n leg = plt.figure(figsize=(5, 5), dpi=DPI)\n frame = plt.gca()\n frame.axes.get_xaxis().set_visible(False)\n frame.axes.get_yaxis().set_visible(False)\n leg.patch.set_visible(False)\n for op in operations:\n thisColor = cmapOperations[operations.index(op)]\n pat.append(mpatches.Patch(color=thisColor, label=op))\n plt.legend(handles=pat)\n leg.savefig(targetDirectory + figureFileName + \"_legend.\" + figType, dpi=DPI)\n elif mode == \"MTS\":\n pat = []\n leg = plt.figure(figsize=(5, 5), dpi= DPI)\n frame = plt.gca()\n frame.axes.get_xaxis().set_visible(False)\n frame.axes.get_yaxis().set_visible(False)\n leg.patch.set_visible(False)\n for ord in orders:\n thisColor = cmapOrders[orders.index(ord)]\n pat.append(mpatches.Patch(color=thisColor, label=ord))\n plt.legend(handles=pat)\n leg.savefig(targetDirectory + figureFileName + \"_legend.\" + figType, dpi=DPI)\n else:\n print(\"INVALID MODE\")\n exit(5)",
"def visualize(**images):\n n = len(images)\n plt.figure(figsize=(16, 5))\n for i, (name, image) in enumerate(images.items()):\n plt.subplot(1, n, i + 1)\n plt.xticks([])\n plt.yticks([])\n plt.title(' '.join(name.split('_')).title())\n plt.imshow(image)\n plt.show()\n # plt.savefig('./drive/My Drive/Colab Notebooks/TACK/Large/result' + ' '.join(name.split('_')).title() + '.png')",
"def _load_img_label(self):\n name = self._launch_file_b()\n self._img_label.configure(text=name)",
"def plots_from_files(imspaths, figsize=(10,5), rows=1, titles=None, maintitle=None):\n f = plt.figure(figsize=figsize)\n if maintitle is not None:\n plt.suptitle(maintitle, fontsize=16) \n for i in range(len(imspaths)):\n sp = f.add_subplot(rows, len(imspaths)//rows, i+1)\n sp.axis('Off')\n if titles is not None: sp.set_title(titles[i], fontsize=16)\n img = plt.imread(imspaths[i])\n plt.imshow(img)",
"def draw_label_on_image(root_folder_path,root_folder_name,img_name,img_type,class_name,bb_color,bb_list):\n img_path=os.path.join(root_folder_path,root_folder_name,img_type,img_name+\".png\")\n img=cv2.imread(img_path)\n for each_bb in bb_list:\n cv2.rectangle(img,(each_bb[0],each_bb[2]),(each_bb[1],each_bb[3]),bb_color,3)\n cv2.putText(img,class_name,(each_bb[0],each_bb[3]),cv2.FONT_HERSHEY_SIMPLEX,1,(255,0,0),2,cv2.LINE_AA)\n cv2.imwrite(img_path,img)",
"def plot_images(imgs, lbls=None, cols=4, rows=5, save_path=None):\n # numpy array to list\n if not type(imgs) is list:\n imgs = list(imgs)\n # 1. check if the parameters are correct\n assert(len(imgs) == cols*rows)\n if not lbls is None:\n assert(len(lbls) == len(imgs))\n\n # 2. create the figure\n fig = plt.figure()\n\n # 3. iterate over the rows and cols\n for i in range(1, cols*rows + 1):\n # add the next subplot\n fig.add_subplot(rows, cols, i)\n # plot the image\n plt.imshow(imgs[i-1])\n # check if a lbls list is specified\n if not lbls is None:\n # show the lbl of the image\n plt.title(lbls[i-1])\n plt.tight_layout(pad=0.6)\n\n if not save_path is None:\n plt.savefig(save_path)\n else: \n plt.show()",
"def read_files_and_visualize(data):\n\n image = cv2.imread(data[0])\n label = cv2.imread(data[1], 0)\n name = data[1].split('/')[-1].split('.')[0]\n obj_label = None\n\n if generator_options.save_label_preview:\n obj_label = []\n if os.path.isfile(data[2]):\n with open(data[2], 'r') as f:\n obj = csv.reader(f, delimiter=',')\n for row in obj:\n row = [int(r.split('.')[0]) if index != 0 else r\n for index, r in enumerate(row)]\n obj_label.append(row)\n\n else:\n label_vals = np.unique(label)\n for val in label_vals:\n obj_label.append([_LABEL_DEF_FULL[val], 0, 0, 0, 0])\n\n save_visuals(image, label, obj_label, name)",
"def load_imgsLabels(self, image_paths):\n \n# label = image_paths[-1]\n \n images = self.load_images(image_paths)\n \n images = self.resize_images(images)\n \n images_list = self.greyscale_images(images)\n\n return images_list",
"def get_path_image(path_data, label, filename):\n\n return path_data.joinpath(f'label_{label}', filename)",
"def work(q, example_paths, label_paths, total_files, export_path_example, export_path_label, size, layover, input_size):\n\n while not q.empty():\n try:\n i = q.get(False)\n except Empty:\n break\n\n # Show progress\n utils.print_process(total_files - q.qsize(), total_files)\n\n # We assume that related examples and labels have the same index in the path lists\n example_path = example_paths[i]\n label_path = label_paths[i]\n\n # Creates masks for the image pairs\n mask_image(example_path, export_path_example, size, layover, input_size)\n mask_image(label_path, export_path_label, size, layover, input_size)\n\n q.task_done()",
"def plot_labels(lbl: scipy.ndarray, lbl_count: int) -> None:\n color_map = scipy.rand(lbl_count, 3)\n color_map = matplotlib.colors.ListedColormap(color_map)\n plt.imshow(lbl, cmap=color_map)\n plt.show()",
"def visualize(**images):\n n_images = len(images)\n plt.figure(figsize=(20,8))\n for idx, (name, image) in enumerate(images.items()):\n plt.subplot(1, n_images, idx + 1)\n plt.xticks([]); \n plt.yticks([])\n # get title from the parameter names\n plt.title(name.replace('_',' ').title(), fontsize=20)\n plt.imshow(image)\n plt.savefig('sample_gt_pred_2_max.jpeg')\n plt.show()",
"def plot_image(img, label=\"\"): \n if img.shape[0] == 3:\n img = img.transpose(1,2,0)\n fig,ax = plt.subplots(1)\n sns.set_style('white')\n ax.imshow(np.asarray(img))\n if label!=\"\":\n plt.title(number_label[label])\n return fig,ax",
"def plot(path, subjects):\n transformToXYZmm = np.array([[-3.125, 0, 0, 81.250], [0, 3.125, 0, -115.625], [0, 0, 6, -54.000], [0, 0, 0, 1.000]])\n data = data_load.load_data(path, subjects)\n dimx = int(data[0][\"meta\"][\"dimx\"][0])\n dimy = int(data[0][\"meta\"][\"dimy\"][0])\n dimz = int(data[0][\"meta\"][\"dimz\"][0])\n coordToCol = data[0][\"meta\"][\"coordToCol\"][0][0]\n images = {}\n max_val = 0\n voxels = np.load(\"data/general_selected_500_1.npy\")\n directory = os.listdir(\"data/input/\")\n bar = pyprind.ProgBar(len(directory), title='Info extraction and Image Building')\n bar2 = pyprind.ProgBar(len(images.keys()), title='Saving Pictures')\n for file in directory:\n file_name = \"data/input/{}\".format(file)\n fh = open(file_name)\n activation_values = np.asarray(list(map(lambda x: float(x), filter(lambda x: x != '', fh.read().split(\",\")))))\n fh.close()\n plot_matrix = np.zeros((dimx, dimy, dimz))\n for x in range(dimx):\n for y in range(dimy):\n for z in range(dimz):\n indice = coordToCol[x][y][z]\n if indice != 0:\n if indice in list(voxels):\n voxel_indice = list(voxels).index(indice)\n value = activation_values[voxel_indice]\n if abs(value) > max_val:\n max_val = abs(value)\n plot_matrix[x][y][z] = value\n image = nib.Nifti1Image(plot_matrix, transformToXYZmm)\n images[file_name] = image\n bar.update(force_flush=True)\n print(bar)\n for image in images:\n plotting.plot_glass_brain(images[image], display_mode='ortho', vmax=max_val, plot_abs=False, threshold=None, colorbar=True, output_file=\"{}-wom1.png\".format(image))\n bar2.update(force_flush=True)\n print(bar2)",
"def plotImages(image_list, name_list, path, as_grey, toSave=False):\n\n\tfig = plt.figure()\n\timageCoordinate = 100 + 10*len(image_list) + 1\n\ti = 0\n\n\tfor image in image_list:\n\t\tfig.add_subplot(imageCoordinate)\n\t\tplt.title(name_list[i])\n\t\tplt.axis('off')\n\t\tplt.imshow(image)\n\t\tif as_grey:\n\t\t\tplt.set_cmap('gray')\n\n\t\timageCoordinate += 1\n\t\ti += 1\n\n\tif toSave:\n\t\tplt.savefig(path + \".jpg\",bbox_inches='tight')\n\tplt.show()",
"def plot_list_image_path(list_image_path, log_image_path=False):\n i = 1\n nb_img = len(list_image_path)\n plt.figure(figsize=(10, 2 * nb_img))\n for image_path in list_image_path:\n if not os.path.isfile(image_path):\n continue\n img = load_img(image_path)\n plt.subplot(math.ceil(nb_img/3) + 1, 3, i)\n i += 1\n if log_image_path:\n plt.title(image_path)\n plt.imshow(img)\n plt.axis('off')\n plt.show()",
"def _plot_imgs_and_ctrs_np(self, config) -> None:\n imgs = config['imgs']\n ctrs = config['ctrs']\n orig_slices = config['slices']\n plane = config['plane']\n title = config['title']\n draw_only_ctrs = config['draw_only_ctrs']\n file_name_prefix = config['file_name_prefix']\n labels = config['labels']\n\n create_folder(self._output_folder)\n if plane != PlaneTypes.ALL: # Draw single plane\n # This should plot a single image with all the contours overlayed\n first_img_shape = imgs[0].shape\n # Validate the size of the images\n for c_img in imgs:\n if c_img.shape != first_img_shape:\n raise Exception('The shape of the images must be the same')\n\n slices = get_slices(orig_slices, imgs[0], plane)\n for c_slice in slices:\n draw_slice = should_display_slice(ctrs, c_slice, plane, draw_only_ctrs)\n if draw_slice:\n fig, ax = plt.subplots(1, len(imgs), squeeze=True, figsize=(8, 8))\n for img_idx, c_img in enumerate(imgs):\n img_slice = get_proper_plane(c_img, plane, c_slice)\n ctrs_slice = [get_proper_plane(np_ctr, plane, c_slice) for np_ctr in ctrs]\n if len(imgs) > 1:\n plot_slice(img_slice, ctrs_slice, ax[img_idx], labels)\n else:\n plot_slice(img_slice, ctrs_slice, ax, labels)\n c_title = F'{title} {plane.value} {c_slice:04d}'\n file_name = F'{file_name_prefix}_{plane.value}_{c_slice:04d}'\n plt.title(c_title, fontsize=20)\n pylab.savefig(join(self._output_folder, F'{file_name}.jpg'), bbox_inches='tight')\n else:\n if len(imgs) != 1:\n raise Exception('The number of image allowed for Plane type ALL must be 1')\n # In this case it should plot 3 images, one for each plane. Here we force it to plot\n # only the middle slice\n c_img = imgs[0]\n plt.subplots(1, 3, squeeze=True, figsize=(8 * 3, 8))\n for id_plane, plane in enumerate([PlaneTypes.AXIAL, PlaneTypes.SAGITTAL, PlaneTypes.CORONAL]):\n ax = plt.subplot(1, 3, id_plane + 1)\n c_slice = get_slices(SliceMode.MIDDLE, c_img, plane)[0]\n img_slice = get_proper_plane(c_img, plane, c_slice)\n ctrs_slice = [get_proper_plane(np_ctr, plane, c_slice) for np_ctr in ctrs]\n plot_slice(img_slice, ctrs_slice, ax, labels)\n c_title = F'{title} ALL {c_slice:04d}'\n plt.title(c_title, fontsize=20)\n\n file_name = F'{file_name_prefix}_{plane.value}_{c_slice:04d}'\n pylab.savefig(join(self._output_folder, F'{file_name}.jpg'), bbox_inches='tight')\n\n self._close_figure()",
"def save_images(self, samples, label=None, dir=\"\"):\n if label is None:\n label = self.global_step_\n fig = plt.figure()\n self.net_.eval()\n self.dist.visualize(fig, samples, self.energy)\n plot_fn = os.path.join(dir, f\"samples_{label}.png\")\n fig.savefig(plot_fn)\n plt.close(fig)",
"def plot_segmentation_prediction(\n prediction: ndarray,\n label: ndarray,\n raw_img: Image,\n raw_label: Image,\n img_shape: tuple,\n img_name: str,\n save_path: str,\n) -> None:\n raw_img = raw_img.resize(img_shape)\n\n final_mask = mark_boundaries(raw_img, prediction == 1, [255, 0, 0])\n final_mask = mark_boundaries(final_mask, prediction == 2, [0, 255, 0])\n final_mask = mark_boundaries(final_mask, prediction == 3, [0, 0, 255])\n\n final_seg_mask = zeros(img_shape + (3,), uint8)\n final_seg_mask[prediction == 1] = [255, 0, 0]\n final_seg_mask[prediction == 2] = [0, 255, 0]\n final_seg_mask[prediction == 3] = [0, 0, 255]\n\n final_label = mark_boundaries(raw_img, label[1], [255, 0, 0])\n final_label = mark_boundaries(final_label, label[2], [0, 255, 0])\n\n if label.shape[0] == 4:\n final_label = mark_boundaries(final_label, label[3], [0, 0, 255])\n\n fig = plt.figure(figsize=(14, 14))\n\n fig.add_subplot(2, 2, 1)\n plt.imshow(final_mask)\n\n plt.title(\"Prediction\")\n\n fig.add_subplot(2, 2, 2)\n plt.imshow(final_seg_mask)\n plt.title(\"Prediction - mask\")\n\n fig.add_subplot(2, 2, 3)\n plt.imshow(final_label)\n plt.title(\"Reference\")\n\n raw_label = array(raw_label)\n raw_label[(raw_label == [255, 255, 0]).sum(axis=2) == 3] = [255, 0, 0]\n raw_label = Image.fromarray(raw_label)\n raw_label = raw_label.resize(img_shape)\n \n fig.add_subplot(2, 2, 4)\n plt.imshow(raw_label)\n plt.title(\"Reference - mask\")\n\n plt.savefig(join(save_path, img_name))\n plt.close()",
"def plot_images_grid(images, labels, title):\n images = images.cpu()\n labels = labels.cpu()\n \n assert type(images[0]) is torch.Tensor, 'Image to plot is not torch.Tensor'\n image_size = int(np.sqrt(images[0].shape[0]))\n \n fig = plt.figure(figsize=(10,4))\n for idx in range(10):\n ax = fig.add_subplot(2,10/2,idx+1, xticks=[], yticks=[])\n ax.imshow(images[idx].view(image_size, image_size), cmap = 'gray')\n label = labels[idx].item()\n ax.set_title(label)\n #end\n fig.suptitle(title, fontsize = 14)\n plt.show()\n plt.close('all')",
"def label_visualize(img_dir):\n img = scipy.misc.imread(img_dir).astype(np.uint8)\n yo = np.nonzero(img == 1)\n visual = np.zeros((img.shape[0], img.shape[1], 3), dtype=np.uint8)\n\n for i in range(0, 34):\n index = np.nonzero(img == i)\n visual[index + (0,)] = labels[i][0]\n visual[index + (1,)] = labels[i][1]\n visual[index + (2,)] = labels[i][2]\n\n scipy.misc.imsave('./' + img_dir.split('/')[-1], visual)",
"def plot_labelled_images(x, y, filename, y_predict=None):\n\n plt.clf()\n\n nplot = 10\n nrows = 2\n ncols = 5\n\n # randomly choose which images from the dataset to plot \n random_indices = np.random.choice(x.shape[0], size=nplot, replace=False)\n\n figure = plt.gcf()\n\n for i, index in enumerate(random_indices):\n ax = figure.add_subplot(nrows, ncols, i + 1, xticks=[], yticks=[])\n\n # plot image\n ax.imshow(np.squeeze(x[index]))\n\n # add label as title of image\n label_index = np.argmax(y[index])\n label = label_names[label_index]\n\n # if predicted labels have been supplied in addition to true labels, show both\n if y_predict is not None:\n predicted_label_index = np.argmax(y_predict[index])\n predicted_label = label_names[predicted_label_index]\n title = \"true={}\\n(predicted={})\".format(label, predicted_label)\n\n # else only show true labels\n else:\n title = \"true={}\".format(label)\n\n ax.set_title(title)\n\n size = figure.get_size_inches()\n figure.set_size_inches(size[0]*2, size[1]*2)\n\n plt.savefig(filename, bbox_inches='tight')",
"def img_viewer_examples(images, labels, prediction = None, size=0, greyscale=False):\n batchSize = min(size, images.shape[0])\n \n if size == 0:\n batchSize = images.shape[0]\n\n # I CAN TAKE THE BATCH_SIZE from the images size/shape according the sent data type\n no_of_columns = round(math.sqrt(batchSize))\n no_of_rows = math.ceil(batchSize / no_of_columns)\n print(\"batch size {}, no_of_rows {}, no_of_columns {}\".format(batchSize, no_of_rows, no_of_columns))\n fig = plt.figure(figsize=(no_of_columns*1.25, no_of_rows*1.5))\n # (width, height)\n for idx in np.arange(batchSize):\n ax = fig.add_subplot(no_of_rows, no_of_columns,\n idx+1, xticks=[], yticks=[])\n if greyscale:\n ax.imshow(np.squeeze(images[idx]), cmap='gray')\n else:\n ax.imshow(np.squeeze(images[idx]))\n # print out the correct label for each image\n # .item() gets the value contained in a Tensor\n # WAIT FOR TASNEEM TO SEE THE RETURNED DATA TYPE\n if not prediction is None:\n ax.set_title(\"{} ({})\".format(str(prediction[idx]), str(labels[idx])),\n color=(\"green\" if prediction[idx] == labels[idx] else \"red\"))\n else:\n ax.set_title(str(labels[idx]))",
"def plot_image_comparison(name, img_arr):\n\n plt.clf()\n fig = plt.figure()\n\n # divide the images into rows and columns\n num_imgs = len(img_arr)\n columns = num_imgs // 2\n rows = math.ceil(num_imgs / columns)\n\n for i, vals in enumerate(img_arr):\n fig.add_subplot(rows, columns, i+1)\n plt.imshow(vals[\"img\"], vmin=0, vmax=255)\n plt.axis(\"off\")\n plt.title(vals[\"title\"], fontsize=8)\n\n plt.savefig(f\"{name}/comparison.jpeg\")",
"def plot_images(images, labels=None, proba=None, ncols=5, models_names=None,\n classes = ['airplane', 'automobile', 'bird', 'cat', 'deer',\n 'dog', 'frog', 'horse', 'ship', 'truck'], **kwargs):\n if isinstance(models_names, str):\n models_names = (models_names, )\n if not isinstance(proba, (list, tuple)):\n proba = (proba, )\n if models_names is None:\n models_names = ['']\n else:\n if models_names is None:\n models_names = ['Model ' + str(i+1) for i in range(len(proba))]\n\n # if the classes names are not specified they can be implicitely infered from the `proba` shape,\n if classes is None:\n if proba[0] is not None:\n classes = [str(i) for i in range(proba[0].shape[1])]\n elif labels is None:\n pass\n elif proba[0] is None:\n raise ValueError('Specify classes')\n\n n_items = len(images)\n nrows = (n_items // ncols) + 1\n fig, ax = plt.subplots(nrows, ncols, **kwargs)\n ax = ax.flatten()\n for i in range(n_items):\n ax[i].imshow(images[i])\n if labels is not None: # plot images with labels\n true_class_name = classes[labels[i]]\n title = 'Label: {}'.format(true_class_name)\n if proba[0] is not None: # plot images with labels and predictions\n for j, model_proba in enumerate(proba): # the case of preidctions of several models\n class_pred = np.argmax(model_proba, axis=1)[i]\n class_proba = model_proba[i][class_pred]\n pred_class_name = classes[class_pred]\n title += '\\n {0} pred: {1}, p = {2:.2f}'.format(models_names[j], pred_class_name, class_proba)\n ax[i].title.set_text(title)\n ax[i].grid(b=None)\n\n for i in range(n_items, nrows * ncols):\n fig.delaxes(ax[i])",
"def show_result(inputs, labels, outputs):\n num_classes = outputs.size(1)\n outputs = outputs.argmax(dim=1).detach().cpu().numpy()\n if num_classes == 2:\n outputs *= 255\n mask = outputs[0].reshape((360, 640))\n fig, ax = plt.subplots(1, 2, figsize=(20, 1 * 5))\n ax[0].imshow(inputs[0, :3, :, ].detach().cpu().numpy().transpose((1, 2, 0)))\n ax[0].set_title('Image')\n ax[1].imshow(labels[0].detach().cpu().numpy().reshape((360, 640)), cmap='gray')\n ax[1].set_title('gt')\n plt.show()\n plt.figure()\n plt.imshow(mask, cmap='gray')\n plt.title('Pred')\n plt.show()"
] | [
"0.62449753",
"0.5956047",
"0.5905638",
"0.5834986",
"0.5807051",
"0.57767344",
"0.5769278",
"0.5720147",
"0.5714393",
"0.5708018",
"0.55924964",
"0.55918765",
"0.55361027",
"0.55186796",
"0.5489448",
"0.5472695",
"0.54674464",
"0.54264355",
"0.5421594",
"0.54154986",
"0.5411966",
"0.54003215",
"0.53931046",
"0.539129",
"0.5387641",
"0.5376994",
"0.5375564",
"0.53652376",
"0.5361416",
"0.53431624"
] | 0.80462617 | 0 |
Flying formation box calculation | def calculateFFBox(qOfFlights):
# if qOfFlights == 2: rows=2; columns=1
# else:
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def calculate_ft(self):\n \n # Create a function which is able to evaluate B**2\n ffunc = scipy.interpolate.interp1d(self.psigrid, self.e.getF()[self.tind])\n def b2_func(R, Z, psi):\n bt = ffunc(psi)/R\n br = -self.psifunc.ev(R, Z, dy=1)/R\n bz = self.psifunc.ev(R, Z, dx=1)/R\n \n return bt**2 + br**2 + bz**2\n \n\n def b_bmax2(R,Z,psi):\n b2 = b2_func(R,Z,psi)\n return b2 / np.max(b2)\n \n def b_bmax(R,Z,psi):\n return np.sqrt(b_bmax2(R,Z,psi))\n \n # Evaluate the flux-surface averaged h^2 and h, as required\n fsa_h2 = self.fs_average(b_bmax2)\n fsa_h = self.fs_average(b_bmax)\n \n # This is the function which gets flux-surface averaged in equation (7)\n def ftl_func(R,Z,psi):\n h = b_bmax(R,Z,psi)\n h2 = b_bmax2(R,Z,psi)\n \n return (1 - (np.sqrt(1 - h) * (1 + 0.5 * h)))/h2\n \n \n # Equation 6, 7 in Lin-Liu\n fs_ftu = 1 - fsa_h2 / fsa_h**2 * (1 - np.sqrt(1 - fsa_h) * (1 + 0.5 * fsa_h))\n fs_ftl = 1 - fsa_h2 * self.fs_average(ftl_func)\n # Equation 18, 19 \n om = 0.75\n self.fs_ft = om*fs_ftu + (1-om)*fs_ftl",
"def box_function(width_left, width_right, shift, sigma, x):\n\n prefactor = 2.0 * 0.25\n left = erf( (1.0/width_left * x + 1.0/width_left * shift + 1.0) / ( sigma * math.sqrt(2.0)) )\n right = erf( (1.0/width_right * x + 1.0/width_right * shift - 1.0) / ( sigma * math.sqrt(2.0)) )\n\n return prefactor * (left - right)",
"def box_mul():\n\tu0=r.uniform(0,1)\n\tu1=r.uniform(0,1)\n\tz0 = m.sqrt((-2) * m.log(u0)) * m.cos(2 * m.pi * u1)\n\tz1 = m.sqrt((-2) * m.log(u0)) * m.sin(2 * m.pi * u1)\n\treturn (z0, z1)",
"def _fv(self):\n return self.beta * (self.x ** self.c)",
"def calc_mag(self):\n mag = np.sum(self.box)\n return mag",
"def factor(self):\r\n\t\t\r\n\t\t# get gcf\r\n\t\tg = self.extract()\r\n\t\t\r\n\t\t# invert and multiply\r\n\t\tv = g.invert()\r\n\t\tf = self.multiply(v)\r\n\t\t\r\n\t\treturn f,g",
"def f(self):\n return self.g() + self.h()",
"def box_refinement(box, gt_box):\n box = box.astype(np.float32)\n gt_box = gt_box.astype(np.float32)\n\n height = box[:, 2] - box[:, 0]\n width = box[:, 3] - box[:, 1]\n center_y = box[:, 0] + 0.5 * height\n center_x = box[:, 1] + 0.5 * width\n\n gt_height = gt_box[:, 2] - gt_box[:, 0]\n gt_width = gt_box[:, 3] - gt_box[:, 1]\n gt_center_y = gt_box[:, 0] + 0.5 * gt_height\n gt_center_x = gt_box[:, 1] + 0.5 * gt_width\n\n dy = (gt_center_y - center_y) / height\n dx = (gt_center_x - center_x) / width\n dh = np.log(gt_height / height)\n dw = np.log(gt_width / width)\n\n return np.stack([dy, dx, dh, dw], axis=1)",
"def fce(B):\n return wce(B)/(2.*np.pi)",
"def p2f(self):\n\n stale = self.m_f\n self.m_f = self.v.b / self.m_v",
"def magic_box(l: Tensor):\n return torch.exp(l - l.detach())",
"def _F(x,gam):\n beta = np.sqrt(1 - gam**-2)\n B = 1 + 0.5 * (gam**2 - 1)\n C = 10 * x * gam * beta * (2 + gam * beta)\n C /= 1 + x**2 * (gam**2 - 1)\n\n F_1 = (17 - 3 * x**2 / (2 - x)**2 - C) * np.sqrt(1 - x)\n F_2 = 12 * (2 -x) - 7 * x**2 / (2 - x) - 3 * x**4 / (2 - x)**3\n F_3 = np.log((1 + np.sqrt(1 - x)) / np.sqrt(x))\n\n return B * F_1 + F_2 * F_3",
"def box_refinement(box, gt_box):\n\n width = box[:, 2] - box[:, 0]\n height = box[:, 3] - box[:, 1]\n center_x = box[:, 0] + 0.5 * width\n center_y = box[:, 1] + 0.5 * height\n\n gt_width = gt_box[:, 2] - gt_box[:, 0]\n gt_height = gt_box[:, 3] - gt_box[:, 1]\n gt_center_x = gt_box[:, 0] + 0.5 * gt_width\n gt_center_y = gt_box[:, 1] + 0.5 * gt_height\n\n dx = (gt_center_x - center_x) / width\n dy = (gt_center_y - center_y) / height\n dw = torch.log(gt_width / width)\n dh = torch.log(gt_height / height)\n\n result = torch.stack([dx, dy, dw, dh], dim=1)\n return result",
"def calc_magnitude(box,octant):\n # Read the Mi(z=2) magnitudes for the box.\n miz2 = FH.read_file(box)['Miz2'][:]\n # Read the index for each QSO in the octant, and get the Mi(z=2).\n data = FH.read_file(octant)\n zz = data['Z']\n dmod = data['DMOD']\n miz2 = miz2[data['INDX']]\n # Now convert to apparent i-band magnitude using the k-correction.\n # If a tabulated k-correction is available, use that, otherwise\n # default to a power-law continuum approximation.\n # See discussion in Ross++13, Appendix B and Section 4.\n kfile=os.getenv('MOCKINGDESI_BASE')+\"/data/qso-iband-k-correction.txt\"\n if os.path.exists(kfile):\n print(\"Using K-correction from \"+kfile)\n kcorr = np.loadtxt(kfile)\n kcorr = np.interp(zz,kcorr[:,1],kcorr[:,2])\n else:\n print(\"Using power-law K-correction\")\n alpha = -0.5\n kcorr = -2.5*(1+alpha)*np.log10( (1+zz)/(1+2.0) )\n gmi = np.poly1d([0.1502,-0.9886,2.147,-1.758,0.6397])\t# See notes.\n rmi = np.poly1d([-0.1482,1.636,-6.716,12.55,-10.39,3.017])\n magi = miz2 + dmod + kcorr\t# e.g. Ross++13, Eq. 5\n magg = magi + gmi(zz.clip(0.5,3.5))\n magr = magi + rmi(zz.clip(0.5,3.5))\n # and write the results\n data = {}\n data['GMAG'] = magg.astype('f4')\n data['RMAG'] = magr.astype('f4')\n FH.write_file(octant,data)\n #",
"def Transformed(self, *args):\n return _Bnd.Bnd_Box_Transformed(self, *args)",
"def _wf(self, p):\n r = self.faces - p\n n = norm(r, axis=2)\n num = row_wise_dot(r[:, 0, :], np.cross(r[:, 1, :], r[:, 2, :]))\n den = n[:, 1] * n[:, 2] * n[:, 0]\n for i in range(3):\n j = (i + 1) % 3\n k = (i + 2) % 3\n den += row_wise_dot(r[:, i, :], r[:, j, :]) * n[:, k]\n return 2*np.arctan2(num, den)",
"def fs_form_factor(x_c_m,t_c,mach,max_t_sweep):\n return (1.0+(0.6/x_c_m)*t_c+100.0*(t_c**4))*(1.34*(mach**0.18)*((np.cos(np.deg2rad(max_t_sweep)))**0.28))",
"def F_trans(self):\n rho_H1 = self.edp_par['rho_H1'].value\n Z_H1 = self.edp_par['Z_H1'].value\n sigma_H1 = self.edp_par['sigma_H1'].value\n rho_M = self.edp_par['rho_M'].value\n sigma_M = self.edp_par['sigma_M'].value\n psi = self.edp_par['psi'].value \n common_scale = self.edp_par['common_scale'].value\n \n # Calculate the intermediate variables\n alpha = self.qz*cos(psi) - self.qx*sin(psi)\n Z_CH2 = Z_H1 - sigma_H1\n Z_W = Z_H1 + sigma_H1\n DeltaZ_H = Z_W - Z_CH2\n \n # Calculate the Gaussian part \n FG = -rho_M*sigma_M * exp(-0.5*(alpha*sigma_M)**2)\n FG += 2*rho_H1*sigma_H1 * cos(alpha*Z_H1) * exp(-0.5*(alpha*sigma_H1)**2)\n FG *= np.sqrt(2*pi)\n \n # Calculate the strip part\n FS = -2 * sin(alpha*Z_CH2) / alpha\n \n # Calculate the bridging part\n FB = 1 / (alpha + pi/DeltaZ_H)\n FB += 1 / (alpha - pi/DeltaZ_H)\n FB *= sin(alpha*Z_W) + sin(alpha*Z_CH2)\n FB *= 0.5\n FB -= (sin(alpha*Z_W)-sin(alpha*Z_CH2)) / alpha\n \n return common_scale * (FG + FS + FB)",
"def F_trans(self):\n common_scale = self.edp_par['common_scale'].value\n R_HM = self.edp_par['R_HM'].value\n X_h = self.edp_par['X_h'].value\n psi = self.edp_par['psi'].value \n arg = self.qz*X_h*np.cos(psi) - self.qx*X_h*np.sin(psi)\n return common_scale * (R_HM*np.cos(arg) - 1)",
"def vf(gravedad, tiempo):\r\n #se realiza un multiplicacion y el valor se le asigna a la variable vf\r\n vf=gravedad*tiempo\r\n #se regresa vf\r\n return vf",
"def box(times, signal, f0=None, fn=None, df=None, Nbin=10, qmi=0.005, qma=0.75 ):\n #-- initialize some variables needed in the FORTRAN module\n n = len(times)\n T = times.ptp()\n u = np.zeros(n)\n v = np.zeros(n)\n \n #-- frequency vector and variables\n nf = (fn-f0)/df\n if f0<2./T: f0=2./T\n \n #-- calculate EEBLS spectrum and model parameters\n power,depth,qtran,in1,in2 = eebls.eebls(times,signal,u,v,nf,f0,df,Nbin,qmi,qma,n)\n frequencies = np.linspace(f0,fn,nf)\n \n #-- to return parameters of fit, do this:\n # pars = [max_freq,depth,qtran+(1./float(nb)),(in1-1)/float(nb),in2/float(nb)]\n return frequencies,power",
"def sigma_xx_to_a_to_ff(self, Q, f):\n if f == \"e\":\n mf = me\n # gall = self.gaee\n elif f == \"mu\":\n mf = mmu\n # gall = self.gamumu\n mx = self.mx\n if Q >= 2.0 * mf and Q >= 2.0 * mx:\n # gaxx = self.gaxx\n # ma = self.ma\n # width_a = self.width_a\n ret_val = 0.0\n assert ret_val.imag == 0\n assert ret_val.real >= 0\n return ret_val.real\n else:\n return 0.0",
"def compute_fade(f):\n\n return 6 * f**5 - 15 * f**4 + 10 * f**3",
"def _get_jittered_box(self, box, mode):\n\n '''\n a, b = scale_jitter_coefficient[0], scale_jitter_coefficient[1]\n m, n = center_jitter_coefficient[0], center_jitter_coefficient[1]\n f = center_jitter_factor\n sqrt(awbhf^2) / 2 = mw + w/2 ------> m = (f*sqrt(ab)*sqrt(h/w)-1)*0.5\n sqrt(awbhf^2) / 2 = nh + h/2 ------> n = (f*sqrt(ab)*sqrt(w/h)-1)*0.5\n '''\n scale_jitter_factor = self.scale_jitter_factor[mode]\n center_jitter_factor = self.center_jitter_factor[mode]\n\n scale_jitter_coefficient = torch.exp(torch.randn(2) * scale_jitter_factor).clamp(0.25, 4)\n center_jitter_coefficient = (scale_jitter_coefficient.prod().sqrt() * torch.Tensor([(box[3]/box[2]).sqrt(), (box[2]/box[3]).sqrt()]) * center_jitter_factor - 1).clamp(0)\n\n scale_jitter = box[2:4] * scale_jitter_coefficient\n center_jitter = box[0:2] + 0.5 * box[2:4] + (torch.rand(2)-0.5) * box[2:4] * center_jitter_coefficient\n\n return torch.cat((center_jitter - 0.5 * scale_jitter, scale_jitter), dim=0)",
"def compute_force(X, V, bl, ip, box, gamma, kT, dt):\n N = len(X)\n F = np.zeros((N, 3))\n Fcube = np.zeros((N, N, 3))\n inv_box = np.zeros((3, 3))\n for i in range(3): inv_box[i, i] = 1.0 / box[i, i]\n g = np.zeros(3)\n rij = np.zeros(3)\n vij = np.zeros(3)\n a = 0.0\n nr = 0.0\n fpair = 0.0\n\n vir = 0.0\n sigma = np.zeros(3)\n volume = np.linalg.det(box)\n\n for i in range(N):\n for j in range(i):\n rij = X[i] - X[j]\n g = matvecmul(inv_box, rij)\n g = g - np.round_(g, 0, np.empty_like(g))\n rij = matvecmul(box, g)\n vij = V[i] - V[j]\n\n a = ip[bl[i]-1, bl[j]-1]\n nr = norm_numba(rij)\n\n fc = a * wr(nr)\n fpair = fc \\\n - gamma * wr(nr)**2 * dot_numba(rij, vij) / nr \\\n + sqrt(2.0*gamma*kT) * wr(nr) * np.random.randn() / sqrt(dt)\n Fcube[i, j, :] = fpair / nr * rij\n Fcube[j, i, :] = -fpair / nr * rij\n\n vir += Fcube[i, j, :] @ rij\n sigma += Fcube[i, j, :] * rij\n\n # kinetic part of stress tensor\n for i in range(N):\n sigma += V[i] * V[i]\n\n sigma = sigma / volume\n F = np.sum(Fcube, 1)\n\n return F, vir, sigma",
"def fCalc(self):\n # A dictionary composed of all internal and boundary points\n allPoints = dict(self.internalPoints.items() + self.boundaryPoints.items())\n\n for pointLabel in allPoints.keys():\n # Compute fE, fW, fN and fW only for internal mesh points\n if allPoints[pointLabel].type.lower() == 'internal':\n xLabel = pointLabel[0]\n yLabel = pointLabel[1]\n x = self.internalPoints[(xLabel,yLabel)].x\n y = self.internalPoints[(xLabel,yLabel)].y\n xE = allPoints[(xLabel + 1,yLabel)].x\n xW = allPoints[(xLabel - 1,yLabel)].x\n yN = allPoints[(xLabel,yLabel + 1)].y\n yS = allPoints[(xLabel,yLabel - 1)].y\n \n if (xE - x)/self.h < -0.000001 or (xE - x)/self.h > 1.000001:\n errorMessage = '**Error! (xE - x)/h for the point with label ('+str(xLabel) + ',' + str(yLabel) + ') and coordinate (' + str(x) + ',' + str(y) + ') is not between zero and one: (xE - x)/h = ' + str((xE - x)/self.h)\n raise customError(errorMessage)\n else:\n self.internalPoints[(xLabel,yLabel)].fE = (xE - x)/self.h\n\n # Note that in the following we use -0.000001 and 1.000001 \n # instead of 0 and 1, respectively, to avoid problems with\n # with very small fractions. For example if the fractions is\n # greater than one by 2.22e-16 the condition (x - xW)/self.h > 1\n # will be false and the code returns an error\n if (x - xW)/self.h < -0.000001 or (x - xW)/self.h > 1.000001:\n errorMessage = '**Error! (x - xW)/h for the point with label ('+str(xLabel) + ',' + str(yLabel) + ') and coordinate (' + str(x) + ',' + str(y) + ') is not between zero and one: (x - xW)/h = ' + str((x - xW)/self.h)\n raise customError(errorMessage)\n else:\n self.internalPoints[(xLabel,yLabel)].fW = (x - xW)/self.h\n \n if (yN - y)/self.h < -0.000001 or (yN - y)/self.h > 1.000001:\n errorMessage = '**Error! (yN - y)/h for the point with label ('+str(xLabel) + ',' + str(yLabel) + ') and coordinate (' + str(x) + ',' + str(y) + ') is not between zero and one: (yN - y)/h = ' + str((yN - y)/self.h)\n raise customError(errorMessage)\n else:\n self.internalPoints[(xLabel,yLabel)].fN = (yN - y)/self.h\n \n if (y - yS)/self.h < -0.000001 or (y - yS)/self.h > 1.000001:\n errorMessage = '**Error! (y - yS)/h for the point with label ('+str(xLabel) + ',' + str(yLabel) + ') and coordinate (' + str(x) + ',' + str(y) + ') is not between zero and one: (y - yS)/h = ' + str((y - yS)/self.h)\n raise customError(errorMessage)\n else:\n self.internalPoints[(xLabel,yLabel)].fS = (y - yS)/self.h\n \n # Calculate the coeeficients requried to compute the Laplacian \n self.internalPoints[(xLabel,yLabel)].LapCoeffCalc()",
"def f(self):\n\n if self._f is not None:\n return(self._f)\n if self.larmor is None:\n return(None)\n if self._ppm is not None:\n self._f = (self._ppm - self._ppmshift) * self.larmor * 1e-6;\n return(self._f)\n return(None)",
"def f_fixed(self):\n self.fx_free = self.fy_free = self.fz_free = False\n return self",
"def test_figure34(self):\n star = 0.1\n current = 1.37\n func = lambda x: x**6 + 3 * x - 4\n\n logging.info(\"\\nCONFIRMING FIGURE 3.4\")\n rf_results = undertest.regula_falsi(func, star, current, 100)",
"def F(self, combo_name='Combo 1'):\n\n # Calculate and return the global force vector\n return matmul(inv(self.T()), self.f(combo_name))"
] | [
"0.64013517",
"0.6395771",
"0.6101848",
"0.60527843",
"0.5713939",
"0.5679622",
"0.5649706",
"0.5587416",
"0.5569904",
"0.55439585",
"0.5499885",
"0.5495949",
"0.54799724",
"0.547841",
"0.54695714",
"0.5405273",
"0.5377096",
"0.53553575",
"0.53518355",
"0.53368825",
"0.5336279",
"0.53353",
"0.5322059",
"0.53028977",
"0.5301791",
"0.5276786",
"0.5271405",
"0.5244533",
"0.5242322",
"0.52260244"
] | 0.6472747 | 0 |
Calculate track [degrees] between flights [degrees] | def calculateTrackBetweenFlights(lat1,lon1,lat2,lon2):
return Geodesic.WGS84.Inverse(lat1,lon1, lat2, lon2)['azi1'] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def steps_to_angle():\n pass",
"def getFlightAngles():\n\n flight_angles = RoboCaller().call(\"getFlightAngles\", \"int\")\n for i in range(len(flight_angles)):\n flight_angles[i] = (flight_angles[i] + 2**15) % 2**16 - 2**15\n return flight_angles",
"def getTheta(self, trackWidth):\n leftDist = leftUS.sensor_detect()\n print(\"LEFT US: \" + str(leftDist))\n rightDist = rightUS.sensor_detect()\n print(\"RIGHT US: \" + str(rightDist))\n #totalWidth (hypotenuse) = leftUS + rightUS + robotWidth\n totalWidth = leftDist + rightDist + 6\n try:\n print(math.acos(trackWidth/totalWidth))\n return math.acos(trackWidth/totalWidth)\n except ValueError:\n return 0",
"def wind_bearing(self) -> float:\r\n return self._first_timeserie[\"data\"][\"instant\"][\"details\"][\r\n \"wind_from_direction\"\r\n ]",
"def _angle_of_attack(self, rel_wind, blade_chord):\n # blade_chord_vector - (relative_wind + pi)\n # rel_oposite = rel_wind.rotated(math.pi)\n aoa_rad = rel_wind.theta - blade_chord.theta\n aoa_rad = vec.normalize_angle(aoa_rad)\n aoa_360 = aoa_rad * 360 / math.tau\n return aoa_rad, aoa_360",
"def lunar_phase(cls, tee):\n return mod(cls.lunar_longitude(tee) - cls.hindu_solar_longitude(tee), 360)",
"def get_track(segment_courant):\n\n\t# construction segment vertical pour calculer la route.\n\tseg_calcul = g.Segment(segment_courant.start, g.Point(segment_courant.start.x,segment_courant.start.y + 100))\n\ttrack = np.arccos(seg_calcul.scal(segment_courant)/\n\t\t\t\t\t ((seg_calcul.norm())*(segment_courant.norm())))\n\n\t#track positive pour un virage a droite\n\tif seg_calcul.det(segment_courant)>0:\n\t\t#print(\"track=\", -track * (RAD2DEG))\n\t\treturn -track #en RAD\n\telse :\n\t\t#print(\"track=\", track * (RAD2DEG))\n\t\treturn track #en RAD",
"async def get_radec(self, **kwargs: Any) -> Tuple[float, float]:\n return float(self._telescope.position.ra.degree), float(self._telescope.position.dec.degree)",
"def get_true_bearing(shot, top):\n tripidx = shot[\"trip\"]\n\n if tripidx != -1:\n decl = top[\"trips\"][tripidx][KEY_DECLINATION]\n else:\n decl = 0\n\n return shot[\"compass\"] + decl",
"def compute_steering_angle(self, frame):\n preprocessed = img_preprocess(frame)\n X = np.asarray([preprocessed])\n #steering_angle = self.model.predict(X)[0]\n steering_angle = self.model(X, training=False)[0]\n\n logging.debug('new steering angle: %s' % steering_angle)\n return int(steering_angle + 0.5) # round the nearest integer",
"def get_fde(forecasted_trajectory, gt_trajectory) -> float:\n fde = torch.sqrt(\n (forecasted_trajectory[:,-1, 0] - gt_trajectory[:,-1, 0]) ** 2\n + (forecasted_trajectory[:,-1, 1] - gt_trajectory[:,-1, 1]) ** 2\n )\n return fde.mean()",
"def calculate_bearing_difference(current_bearing, previous_bearing):\n\n difference = current_bearing - previous_bearing\n\n while difference < -180:\n difference += 360\n while difference > 180:\n difference -= 360\n\n return difference",
"def meters_to_decimal_degrees(value):\n return value * 360.0 / EARTH_RADIUS",
"def rhumb_bearing(start,finish):\n s = math.pi * np.squeeze(np.array(start)) / 180\n f = math.pi * np.squeeze(np.array(finish)) / 180\n\n delta_lat = math.log(math.tan(math.pi/4 + f[0]/2)/\n math.tan(math.pi/4 + s[0]/2))\n delta_lon = f[1]-s[1]\n\n if abs(delta_lon) > math.pi:\n if delta_lon > 0:\n delta_lon = -2*math.pi + delta_lon\n else:\n delta_lon = 2*math.pi + delta_lon\n\n res = 180*math.atan2(delta_lon,delta_lat)/math.pi\n\n return (res + 360) % 360",
"def phase(self):\n return -self.attrs['RFphase']*2*np.pi",
"def degrees(x):\n return 0.0",
"def direction_diff(direction_a, direction_b):\n diff = abs(direction_a - direction_b)\n return diff if diff < math.pi else 2*math.pi - diff",
"def comp_angle_opening_magnet(self):\n\n if self.W1 > 0:\n Rbo = self.get_Rbo()\n return float(2 * arcsin(self.W1 / (2 * Rbo)))\n else:\n return self.comp_angle_magnet()",
"def angle(self) -> float:\n ...",
"def getAngDist(ra1, dec1, ra2, dec2): \n \n delt_lon = (ra1 - ra2)*np.pi/180.\n delt_lat = (dec1 - dec2)*np.pi/180.\n # Haversine formula\n dist = 2.0*np.arcsin( np.sqrt( np.sin(delt_lat/2.0)**2 + np.cos(dec1*np.pi/180.)*np.cos(dec2*np.pi/180.)*np.sin(delt_lon/2.0)**2 ) ) \n\n return dist/np.pi*180.",
"def deg2rad(a):",
"def angle(self) -> int:",
"def get_turn_degrees(self):\n self.turn_degrees = 360/self.num_protrusions\n return self.turn_degrees",
"def angle2fD(angle,units='degrees'):\n if units=='degrees':\n angle*=(math.pi/180.0)\n X = 4.0*math.tan(angle/4.0)\n fD = 1.0/X\n return fD",
"def ra2phi(ra: float) -> float:\n return np.pi / 180.0 * ra",
"def tp2rd(tht,phi):\n ra = phi/np.pi*180.0\n dec = -1*(tht/np.pi*180.0-90.0)\n return ra,dec",
"def convert_angle(self, event):\n try:\n #Compare other unit to one unit(degree)\n current_value, current_unit = float(\"0\" + str(self.v.get())), self.dropdown.get()\n unit_comp = {\"arcminute\": 0.016667, \"arcsecond\": 0.000278, \"circle\": 360, \"degree\": 1.0, \"gon\": 0.9, \"gradian\": 0.9, \"mil(Nato)\": 0.05625, \"mil(Soviet Union)\": 0.06, \"mil(Sweden)\": 0.057143, \"octant\": 45.0, \"quadrant\": 90.0, \"radian\": 57.29578, \"revolution\": 360.0, \"sextant\": 60.0, \"sign\": 30.0, \"turn\": 360.0}\n value_comp, printer = current_value * unit_comp[current_unit], \"\"\n unit_list = sorted(unit_comp.keys())\n unit_list.remove(current_unit)\n for unit in unit_list:\n printer += \"To %s \" % unit + \" \" * (max([len(i) for i in unit_list]) - len(unit)) + str(value_comp / unit_comp[unit]) + [\"\", \"\\n\"][unit_list[-1] != unit]\n except ValueError: #In case user enter the other type of value, not Int or Float\n printer = \"Value is invalid.\"\n self.print_text(printer)",
"def parang (hourangle, declination, latitude):\n\n return -np.arctan2 (-np.sin (hourangle),\n np.cos (declination) * np.tan (latitude)\n - np.sin (declination) * np.cos (hourangle))",
"def rad2deg(a):",
"def bearing(start,finish):\n\n s = math.pi * np.squeeze(np.array(start)) / 180\n f = math.pi * np.squeeze(np.array(finish)) / 180\n\n y = math.sin(f[1] - s[1]) * math.cos(f[0])\n x = math.cos(s[0])*math.sin(f[0]) - math.sin(s[0])*math.cos(f[0])*math.cos(f[1] - s[1])\n\n return math.atan2(y,x)/math.pi * 180 % 360"
] | [
"0.61580503",
"0.6018533",
"0.5900657",
"0.5821334",
"0.5786489",
"0.5732119",
"0.5710629",
"0.5670621",
"0.5646427",
"0.56237847",
"0.55656844",
"0.55639803",
"0.55621606",
"0.5554743",
"0.5529634",
"0.5524188",
"0.55181473",
"0.5487468",
"0.5469649",
"0.5459477",
"0.54507184",
"0.5448711",
"0.54375315",
"0.54375297",
"0.54278725",
"0.5424233",
"0.5396216",
"0.5389427",
"0.53861237",
"0.537966"
] | 0.7136936 | 0 |
Checking if tracks match | def checkTracks(track1,track2):
matched=True if abs(track1-track2) <= TRACKS_DIFFERENCE else False
return matched | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_matching_tracks(self):\n\n # 5037: Pop 101 (feat. Anami Vice) by Marianas Trench\n # 8755 : Satisfied (feat. Miguel & Queen Latifah) by Sia\n # 6699 : Un Besito Mas (feat. Juan Luis Guerra) by Jesse & Joy\n targets = {5037: '2fGFaTDbE8aS4f31fM0XE4',\n 8755: '1ybJ2itxCxPCPkcA9sOgTO',\n 6699: '1182pxG4uNxr3QqIH8b8k0',\n }\n\n matches = {track.i_id: track.id\n for track in self.tracks\n if track.i_id in targets}\n\n for i_id, s_id in targets.iteritems():\n self.assertEqual(s_id, matches[i_id])",
"def _match_tracks(artist, title, mb_tracks):\n # pylint: disable=R0914\n dbg(\"artists is %s\", artist)\n dbg(\"title is %s\", title)\n title_artist_str = c.g + title + c.w, c.g + artist + c.w\n xprint(\"\\nSearching for %s by %s\\n\\n\" % title_artist_str)\n\n def dtime(x):\n \"\"\" Format time to M:S. \"\"\"\n return time.strftime('%M:%S', time.gmtime(int(x)))\n\n # do matching\n for track in mb_tracks:\n ttitle = track['title']\n length = track['length']\n xprint(\"Search : %s%s - %s%s - %s\" % (c.y, artist, ttitle, c.w,\n dtime(length)))\n q = \"%s %s\" % (artist, ttitle)\n w = q = ttitle if artist == \"Various Artists\" else q\n query = generate_search_qs(w, 0, result_count=50)\n dbg(query)\n have_results = _search(q, query, splash=False, pre_load=False)\n\n if not have_results:\n xprint(c.r + \"Nothing matched :(\\n\" + c.w)\n continue\n\n results = g.model.songs\n s, score = _best_song_match(results, artist + \" \" + ttitle, length)\n cc = c.g if score > 85 else c.y\n cc = c.r if score < 75 else cc\n xprint(\"Matched: %s%s%s - %s \\n[%sMatch confidence: \"\n \"%s%s]\\n\" % (c.y, s.title, c.w, fmt_time(s.length),\n cc, score, c.w))\n yield s",
"def match_track_spotify(\n track: Track,\n access_token: str,\n match_title=True,\n match_album=True,\n match_artist=True,\n *match_custom\n) -> bool:\n # Make sure all the custom attributes are valid\n for req in match_custom:\n if not hasattr(track, req):\n raise AttributeError\n spotify_results = spotify_track_search(\n \"{} {}\".format(track.title, track.artist)\n if track.artist != UNKNOWN_ARTIST\n else track.title,\n access_token,\n )\n if \"error\" in spotify_results:\n print(\"error {} {}\".format(spotify_results[\"status\"], spotify_results[\"error\"]))\n return False\n for strack in spotify_results:\n if match_title and strack[\"name\"] != track.title:\n continue\n if match_artist and strack[\"artists\"][0][\"name\"] != track.artist:\n continue\n if match_album and strack[\"album\"][\"name\"] != track.album:\n continue\n reqs_matched = False if match_custom else True\n for req in match_custom:\n if req not in strack:\n raise AttributeError\n if strack[req] != getattr(track, req):\n reqs_matched = False\n break\n if not reqs_matched:\n continue\n track.spotify_id = strack[\"id\"]\n track.save()\n return True\n return False",
"async def async_is_playing_new_track(self):\n if self._playing_mediabrowser and self._media_source_uri is not None:\n # don't trigger new track flag for local mediabrowser files\n return False\n \n if self._icecast_name != None:\n import unicodedata\n artmed = unicodedata.normalize('NFKD', str(self._media_artist) + str(self._media_title)).lower()\n artmedd = u\"\".join([c for c in artmed if not unicodedata.combining(c)])\n if artmedd.find(self._icecast_name.lower()) != -1 or artmedd.find(self._source.lower()) != -1:\n # don't trigger new track flag for icecast streams where track name contains station name or source name; save some energy by not quering last.fm with this\n self._media_image_url = None\n return False\n\n if self._media_artist != self._media_prev_artist or self._media_title != self._media_prev_title:\n return True\n else:\n return False",
"def album_detection(user):\n seen_tracks = user.seen_tracks\n\n list_of_albums = {}\n album_number_of_tracks = {}\n for track in seen_tracks:\n if \"album\" not in track:\n continue\n\n if track[\"name\"] == \"sanjake\":\n continue\n\n album_name = track[\"album\"][\"name\"]\n if album_name not in list_of_albums:\n list_of_albums[album_name] = 0\n album_number_of_tracks[album_name] = track[\"album\"][\"total_tracks\"]\n list_of_albums[album_name] += 1\n\n if list_of_albums[album_name] > 1 and list_of_albums[album_name] == album_number_of_tracks[album_name]:\n print(f\"Album search detected: {album_name}, number of tracks: {album_number_of_tracks[album_name]}\")\n print(f\"User: {user.email_address}\")",
"def test_return_track():\n from ..lbvTracks import get_lbv_track\n spiral_arm, track = survey.get_spiral_slice(track = \"carina_near\", \n return_track = True)\n track2 = get_lbv_track(reid_track = \"carina_near\")\n\n\n assert np.allclose(track, track2, equal_nan = True)",
"def check_video_pruning(self, artist, name, title):\n\n\t\tweeders = ['cover','live','vevo','remix']\t\t\t# words that we want to ignore in our video search\n\t\tname_contains_weed_word = any(weed_word in name.lower() for weed_word in weeders) \n\t\tartist_cointains_weed_word = any(weed_word in artist.lower() for weed_word in weeders)\n\t\tvideo_title_contains_weed_word = any(weed_word in title.lower() for weed_word in weeders)\n\n\t\t# ensure that the artist or track name does not actually include the weeders Ex. live house\n\t\tif video_title_contains_weed_word and (name_contains_weed_word is False and artist_cointains_weed_word is False):\n\t\t\tret_val = True\n\t\telse:\n\t\t\tret_val = False\n\n\n\n\t\t# check duration of song\n\n\t\treturn ret_val",
"def test_get_pl_tracks(self):\n\n # Playlist 1\n result1 = self.client.get(\"playlist/pl1\")\n self.assertEqual(result1.status_code, 200)\n self.assertIn(b\"Track 1\", result1.data)\n self.assertIn(b\"Track 3\", result1.data)\n self.assertNotIn(b\"Track 5\", result1.data)\n\n # Playlist 2\n result2 = self.client.get(\"playlist/pl2\")\n self.assertEqual(result2.status_code, 200)\n self.assertIn(b\"Track 4\", result2.data)\n self.assertIn(b\"Track 5\", result2.data)\n self.assertNotIn(b\"Track 1\", result2.data)",
"def test_display_tracks(self):\n\n result = self.client.get(\"/tracks\")\n self.assertEqual(result.status_code, 200)\n self.assertIn(b\"Track 1\", result.data)\n self.assertIn(b\"Track 5\", result.data)",
"def isTrackSolo(*args, **kwargs):\n pass",
"def tracks_are_similar(track_a, track_b):\n sim_ratio_ab = track_a.similarity_ratio(track_b)\n sim_ratio_ba = track_b.similarity_ratio(track_a)\n sim_ratio = max(sim_ratio_ab, sim_ratio_ba)\n log.info(f\"Similarity checks: {sim_ratio_ab}, {sim_ratio_ba}\")\n return sim_ratio >= MINIMUM_ACCEPTED_TRACK_MATCH_RATIO",
"def match(self) -> bool:",
"def isTrackSelected(*args, **kwargs):\n pass",
"def matches(self):\n return False",
"def test_songs_played(self):\n self.assertEqual(self.show.song_booleans, {\n 'you-enjoy-myself': 1,\n 'tweezer': 0\n })",
"def test_two_tracks_same_album(self):\n self.add_mp3(filename='1.mp3')\n self.add_mp3(filename='2.mp3')\n (added, status) = self.app.add_album(self.filenames)\n self.assertEqual(added, True)\n self.assertEqual(self.get_album_count(), 1)\n album = Album.get_by_artist_album(self.app.curs, 'Artist', 'Album')\n self.assertNotEqual(album, None)\n self.assertEqual(album.artist, 'Artist')\n self.assertEqual(album.album, 'Album')\n self.assertEqual(album.album_type, 'album')\n self.assertEqual(album.totalseconds, 4)\n self.assertEqual(album.totaltracks, 2)",
"def is_trombone_track(track):\n for message in track:\n if message.type == \"program_change\" and hasattr(message, \"program\"): \n return 56 < message.program and message.program < 65\n return False",
"def on_track(self):\n for goal in self.goals:\n if not goal.on_track:\n return False\n return True",
"def test_get_all_need_transform_two_tracks(self):\n track = Track(artist='Artist', album='Album', title='Title')\n pk = track.insert(self.app.db,\n self.app.curs,\n 'xmms',\n datetime.datetime.now())\n pk = track.insert(self.app.db,\n self.app.curs,\n 'xmms',\n datetime.datetime.now())\n self.assertEqual(self.get_track_count(), 2)\n\n tracks = Track.get_all_need_transform(self.app.curs, 1)\n self.assertEqual(len(tracks), 2)",
"def test_audio_features(self):\n\n # 1ehPJRt49h6N0LoryqKZXq, 8737: How Far I'll Go (Alessia Cara Version) by Alessia Cara\n # 2fGFaTDbE8aS4f31fM0XE4, 5037: Pop 101 (feat. Anami Vice) by Marianas Trench\n targets = {8737: {'danceability': 0.317,\n 'energy': 0.562,\n 'key': 9,\n 'loudness': -9.609,\n 'mode': 1,\n 'speechiness': 0.395,\n 'acousticness': 0.124,\n 'instrumentalness': 0.000144,\n 'liveness': 0.0667,\n 'valence': 0.127,\n 'tempo': 181.100,\n 'duration_ms': 175507,\n 'time_signature': 4,\n },\n 5037: {'danceability': 0.756,\n 'energy': 0.658,\n 'key': 11,\n 'loudness': -6.128,\n 'mode': 0,\n 'speechiness': 0.202,\n 'acousticness': 0.0581,\n 'instrumentalness': 0,\n 'liveness': 0.0674,\n 'valence': 0.640,\n 'tempo': 120.018,\n 'duration_ms': 247829,\n 'time_signature': 4,\n },\n }\n\n results = {track.i_id: track for track in self.tracks if track.i_id in targets}\n\n for target, expecteds in targets.iteritems():\n result = results[target]\n for key, expected in expecteds.iteritems():\n self.assertEqual(result.__getattr__(key), expected)",
"def is_matching(self, plays):\n match = self.plays_to_edges(plays)\n return nx.is_maximal_matching(self.graph, match) # TODO",
"def tracks(self):\r\n return None",
"def test_track(self):\r\n track = FedexTrackRequest(CONFIG_OBJ)\r\n track.TrackPackageIdentifier.Type = 'TRACKING_NUMBER_OR_DOORTAG'\r\n track.TrackPackageIdentifier.Value = '798114182456'\r\n track.send_request()\r\n \r\n for match in track.response.TrackDetails:\r\n # This should be the same tracking number on the response that we\r\n # asked for in the request.\r\n self.assertEqual(match.TrackingNumber, tracking_num)",
"def test_two_tracks_mismatched_album(self):\n self.add_mp3(filename='1.mp3')\n self.add_mp3(filename='2.mp3', set_album=True, album='Album 2')\n (added, status) = self.app.add_album(self.filenames)\n self.assertEqual(added, False)\n self.assertIn('changed to', status)\n self.assertEqual(self.get_album_count(), 0)",
"def test_get_all_need_transform_no_tracks_matched(self):\n track = Track(artist='Artist', album='Album', title='Title', last_transform=1)\n pk = track.insert(self.app.db,\n self.app.curs,\n 'xmms',\n datetime.datetime.now())\n self.assertEqual(self.get_track_count(), 1)\n\n tracks = Track.get_all_need_transform(self.app.curs, 1)\n self.assertEqual(len(tracks), 0)",
"def test_tracks():\n f = Level3File(get_test_data('nids/KOUN_SDUS34_NSTTLX_201305202016'))\n for data in f.sym_block[0]:\n if 'track' in data:\n x, y = np.array(data['track']).T\n assert len(x)\n assert len(y)",
"def played(p1, p2):\n conn, cur = connect()\n if p1 > p2:\n p1, p2 = p2, p1\n cur.execute(\"SELECT * FROM MATCHES WHERE P1 = %s and P2 = %s;\", (p1, p2,))\n row = cur.fetchone()\n conn.close()\n return row is not None",
"def matches(self):\n pass",
"def _trackHasTrackOrSubTrackItems(self, track):\n if (\n len(track.items()) > 0 or\n (isinstance(track, hiero.core.VideoTrack) and len( [ item for item in itertools.chain(*track.subTrackItems()) ] ) > 0)\n ):\n return True\n else:\n return False",
"def testForcedTrackTrue(self):\n\n trackLine = _buildTrackLine(20, 'audio',\n {'hello': 'goodbye', 'forced_track': '1'})\n\n trackID, trackType, trackDict = tools._trackInfo(trackLine)\n\n self.assertTrue(\n 'forced_track' in trackDict.keys()\n )\n\n self.assertEqual(\n trackDict['forced_track'],\n '1'\n )"
] | [
"0.7484322",
"0.7024405",
"0.6608633",
"0.6429307",
"0.6215027",
"0.6211795",
"0.61840993",
"0.617188",
"0.615257",
"0.61429",
"0.61068577",
"0.60989845",
"0.6077977",
"0.6003023",
"0.6001033",
"0.5992812",
"0.59848595",
"0.59843254",
"0.59752345",
"0.59705555",
"0.5970469",
"0.5955978",
"0.5937416",
"0.59163517",
"0.59126526",
"0.5897822",
"0.58872354",
"0.5883942",
"0.5856121",
"0.57914907"
] | 0.75236106 | 0 |
Returns the latitude and longitude of a point at a distance dist [m] with a degree deg from lat,lon | def getPoint(lat,lon,deg,dist):
point={}
point['LAT'] = Geodesic.WGS84.Direct(lat,lon,deg,dist)['lat2']
point['LON'] = Geodesic.WGS84.Direct(lat,lon,deg,dist)['lon2']
return point | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def nearlonlat_zl(lon,lat,lonp,latp): # needed for the next function get_FVCOM_bottom_temp \r\n # approximation for small distance \r\n cp=np.cos(latp*np.pi/180.) \r\n dx=(lon-lonp)*cp\r\n dy=lat-latp \r\n xi=np.argmin(abs(dx)) \r\n yi=np.argmin(abs(dy))\r\n min_dist=111*np.sqrt(dx[xi]**2+dy[yi]**2)\r\n return xi,yi,min_dist",
"def dd2dm(lat,lon):\r\n lat_d = int(abs(lat)) #calculate latitude degrees\r\n lat_m = (abs(lat) - lat_d) * 60. #calculate latitude minutes\r\n\r\n lon_d = int(abs(lon))\r\n lon_m = (abs(lon) - lon_d) * 60.\r\n \r\n la=lat_d*100.+lat_m\r\n lo=lon_d*100.+lon_m\r\n return la,lo",
"def dist_in_meters(coords, pt, is_geo=False):\n xe = coords[:, 0]\n ye = coords[:, 1]\n xp = pt[0]\n yp = pt[1]\n if is_geo:\n d = _get_dist_geo(xe, ye, xp, yp)\n else:\n d = np.sqrt(np.square(xe - xp) + np.square(ye - yp))\n return d",
"def calcPosition (lat, lon):\n nauticalMilePerLat = 60.00721\n nauticalMilePerLongitude = 60.10793\n rad = math.pi / 180.0\n milesPerNauticalMile = 1.15078\n \n y = lat * nauticalMilePerLat\n x = math.cos(lat * rad) * lon * nauticalMilePerLongitude\n\n return x * milesPerNauticalMile * 1609.344, y * milesPerNauticalMile * 1609.344",
"def _getXYZ ( lon, lat ):\n d2r = pi / 180.\n rlon, rlat = ( d2r * lon, d2r * lat )\n x = cos(rlat) * cos(rlon)\n y = cos(rlat) * sin(rlon)\n z = sin(rlat)\n return (x,y,z)",
"def project(self, (lng, lat)):\n x = lng * DEG_TO_RAD\n lat = max(min(MAX_LATITUDE, lat), -MAX_LATITUDE)\n y = lat * DEG_TO_RAD\n y = math.log(math.tan((math.pi / 4) + (y / 2)))\n return (x*EARTH_RADIUS, y*EARTH_RADIUS)",
"def gps_to_coords(self,lat,lon):\n\n if (lat <= self.min_lat or lat >= self.max_lat or lon <= self.min_lon or lon >= self.max_lon):\n return (-1,-1)\n\n lat_spot = int((self.max_lat-lat)/self.lat_step)\n lon_spot = int((lon-self.min_lon)/self.lon_step)\n #print \"lat: %f lon: %f lat_spot: %f lon_spot: %f\" % (lat,lon,lat_spot,lon_spot)\n return (lat_spot,lon_spot)",
"def getPoint(self, a):\n lng = self.source.center.lng + (self.target.center.lng - self.source.center.lng) * min(a, 1)\n lat = self.source.center.lat + (self.target.center.lat - self.source.center.lat) * min(a, 1)\n return lng, lat",
"def coord_distance(lat1, lon1, lat2, lon2):\n lon1, lat1, lon2, lat2 = map(math.radians, [lon1, lat1, lon2, lat2])\n dlon = lon2 - lon1\n dlat = lat2 - lat1\n a = math.sin(dlat / 2) ** 2 + math.cos(lat1) * math.cos(lat2) * math.sin(dlon / 2) ** 2\n km = 2 * 6367 * math.asin(math.sqrt(a))\n mi = 0.621371 * km\n return mi",
"def lat_lon_box(lat, dist):\n r_earth = 6371.\n d_2r = dist/(2.*r_earth)\n dlat = 2. * (d_2r)\n dlon = 2. * np.arcsin((np.sin(d_2r))/(np.cos(lat)))\n dlat *= 180./np.pi\n dlon *= 180./np.pi\n return abs(dlat), abs(dlon)",
"def point(pt, angle, dist):\n x, y = pt\n return dist * cos(angle) + x, dist * sin(angle) + y,",
"def coord_dist_meters(lat1, lon1, lat2, lon2):\n # convert decimal degrees to radians\n lon1, lat1, lon2, lat2 = map(math.radians, [lon1, lat1, lon2, lat2])\n\n # haversine formula\n dlon = lon2 - lon1\n dlat = lat2 - lat1\n a = math.sin(dlat/2)**2 + math.cos(lat1) * \\\n math.cos(lat2) * math.sin(dlon/2)**2\n c = 2 * math.asin(math.sqrt(a))\n r = 6371000 # Radius of earth in meters. Use 3956 for miles\n return c * r",
"def ll2xyz(lon_pt,lat_pt):\n\n xPt = np.cos(lat_pt) * np.cos(lon_pt)\n yPt = np.cos(lat_pt) * np.sin(lon_pt)\n zPt = np.sin(lat_pt)\n return [xPt,yPt,zPt]",
"def calculateDistanceBetweenPoints(lat1,lon1,lat2,lon2):\n\treturn Geodesic.WGS84.Inverse(lat1,lon1, lat2, lon2)['s12']",
"def distance_to_point(self, lat, lon):\n R = 6371. # Radius of earth in km\n lat = np.radians(lat)\n lon = np.radians(lon)\n dlat = lat - xu.radians(self['lat'].values)\n dlon = lon - xu.radians(self['lon'].values)\n a = xu.sin(dlat/2)**2 + xu.cos(lat) * xu.cos(xu.radians(self['lat'].values)) * \\\n xu.sin(dlon/2)**2\n c = 2 * xu.arctan2(xu.sqrt(a), xu.sqrt(1.0-a))\n return R*c",
"def compute_loc(self, loc, dist, bearing):\n lat = self.nwsli_provider[loc][\"lat\"]\n lon = self.nwsli_provider[loc][\"lon\"]\n # shortcut\n if dist == 0:\n return lon, lat\n meters = distance(float(dist), \"MI\").value(\"M\")\n northing = meters * math.cos(math.radians(bearing)) / 111111.0\n easting = (\n meters\n * math.sin(math.radians(bearing))\n / math.cos(math.radians(lat))\n / 111111.0\n )\n return lon + easting, lat + northing",
"def lat_lons(self):",
"def getlatlon(self):\n lat = np.pi/2.0 - self._th\n time = self.gettime()\n lon = self._phi - 2*np.pi*time/86164.09164\n return lat, lon",
"def map(self, lat, long):\r\n rxy = self._r*np.sqrt(1-np.cos(lat))\r\n x = rxy*np.cos(long)\r\n y = rxy*np.sin(long)\r\n return (x, y)",
"def map(self, lat, long):\r\n rxy = self._r*lat/(np.pi/2)\r\n x = rxy*np.cos(long)\r\n y = rxy*np.sin(long)\r\n return (x, y)",
"def coords(self) -> Tuple[float, float]:\n return self.lat, self.lng",
"def get_latlon_point(self, row, col):\n p1 = Proj(self.src.crs)\n window = rasterio.windows.Window(col, row, 1, 1)\n trnsfrm = self.src.window_transform(window)\n T1 = trnsfrm * Affine.translation(0.5, 0.5)\n p2 = Proj(proj='latlong', datum='WGS84')\n x, y = self.src.xy(row, col)\n lon, lat = transform(p1, p2, x, y)\n return lat, lon",
"def merc(lat, lon):\n\tr_major = 6378137.000\n\tx = r_major * math.radians(lon)\n\tscale = x/lon\n\ty = 180.0/math.pi * math.log(math.tan(math.pi/4.0 + lat * (math.pi/180.0)/2.0)) * scale\n\treturn (x, y)",
"def lat_lng(row):\r\n lat = row[\"latitude\"]\r\n lng = row[\"longitude\"]\r\n n = int(lat/GRANULARITY)\r\n nlat_start = n * GRANULARITY\r\n nlat_end = nlat_start + GRANULARITY\r\n nlg=int(lng/GRANULARITY)\r\n nlng_start = nlg * GRANULARITY\r\n nlng_end = nlng_start + GRANULARITY\r\n latlng=[(nlat_start,nlng_start), (nlat_start,nlng_end), (nlat_end,nlng_end), (nlat_end,nlng_start)]\r\n return latlng",
"def calculate_distance(self, my_lat, my_lon, pts, is_mile=False):\n scale = 1.0\n if is_mile:\n scale = 0.000621371\n\n if isinstance(my_lat, tf.Tensor) or isinstance(pts, tf.Tensor):\n return tf.sqrt(((my_lat - pts[:, 0]) * 110000)**2 + ((my_lon - pts[:, 1]) * 90000) **\n 2) * scale\n else:\n return np.sqrt(((my_lat - pts[:, 0]) * 110000)**2 + ((my_lon - pts[:, 1]) * 90000) **\n 2) * scale",
"def coordinate(self) -> Tuple[float, float]:\n return self.lat, self.lon",
"def getDist(lat1,long1,lat2,long2):\n\tlat1 = math.radians(lat1)\n\tlong1 = math.radians(long1)\n\tlat2 = math.radians(lat2)\n\tlong2 = math.radians(long2)\n\tR = 6371 # km\n\td = cmath.acos(cmath.sin(lat1) * cmath.sin(lat2) + \\\n\tcmath.cos(lat1) * cmath.cos(lat2) *\n\tcmath.cos(long2 - long1)) * R\n\treturn abs(d) # cast to float",
"def coord_distance(lat1, lon1, lat2, lon2):\n\tlon1, lat1, lon2, lat2 = map(math.radians, [lon1, lat1, lon2, lat2])\n\tdlon = lon2 - lon1\n\tdlat = lat2 - lat1\n\ta = math.sin(dlat/2)**2 + math.cos(lat1) * math.cos(lat2) * math.sin(dlon/2)**2\n\tc = 2 * math.asin(math.sqrt(a))\n\tkm = 6367 * c \n\treturn km",
"def coords_to_gps(self,coords):\n return ((self.max_lat - (self.lat_step * (0.5+coords[0]))),(self.min_lon + (self.lon_step * (0.5+coords[1]))))",
"def get_latlon():\n\t\n iss.compute() # Get the lat/long values from ephem\n long_value = [float(i) for i in str(iss.sublong).split(\":\")]\n if long_value[0] < 0:\n long_value[0] = abs(long_value[0])\n cam.exif_tags['GPS.GPSLongitudeRef'] = \"W\"\n else:\n cam.exif_tags['GPS.GPSLongitudeRef'] = \"E\"\n cam.exif_tags['GPS.GPSLongitude'] = '%d/1,%d/1,%d/10' % (long_value[0], long_value[1], long_value[2]*10)\n lat_value = [float(i) for i in str(iss.sublat).split(\":\")]\n if lat_value[0] < 0:\n lat_value[0] = abs(lat_value[0])\n cam.exif_tags['GPS.GPSLatitudeRef'] = \"S\"\n else:\n cam.exif_tags['GPS.GPSLatitudeRef'] = \"N\"\n cam.exif_tags['GPS.GPSLatitude'] = '%d/1,%d/1,%d/10' % (lat_value[0], lat_value[1], lat_value[2]*10)\n return (iss.sublat / degree, iss.sublong / degree)"
] | [
"0.6628833",
"0.6418653",
"0.63349956",
"0.6279293",
"0.6263374",
"0.6240362",
"0.61993515",
"0.61689377",
"0.61456704",
"0.613241",
"0.6097714",
"0.6058454",
"0.6048507",
"0.60454327",
"0.6029386",
"0.6023586",
"0.60026014",
"0.6000665",
"0.59811217",
"0.59565634",
"0.59213364",
"0.59075135",
"0.5907362",
"0.5903214",
"0.5902074",
"0.5889967",
"0.58852965",
"0.587671",
"0.5876278",
"0.587599"
] | 0.7408886 | 0 |
Use the current date, add ".0", to build a suffix for the Docker tag. | def _build_tag_suffix() -> str:
now = datetime.datetime.now(tz=datetime.timezone.utc).astimezone()
return now.strftime(".%Y%m%d.0") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def tag_time():\n return time.strftime(\"%Y-%m-%d_%I.%M%p_\")",
"def build_image_name(self, tag):\n return self.repository_name + ':' + tag",
"def date_tag():\n import pylab\n pylab.figtext(0.04, 0.02, str(datetime.datetime.today())[:16], size=8)",
"def docker_image_tag(self, app):\n return f\"briefcase/{app.bundle}.{app.app_name.lower()}:{app.target_vendor}-{app.target_codename}\"",
"def get_image_name(name: str, tag: str, image_prefix: str = \"\") -> str:\n versioned_tag = name.strip() + \":\" + tag.strip()\n if image_prefix:\n versioned_tag = image_prefix.strip().rstrip(\"/\") + \"/\" + versioned_tag\n return versioned_tag",
"def next_deploy_tag(location):\n ensure_dir(location)\n with utils.cd(location):\n timestamp = datetime.utcnow()\n date = timestamp.strftime('%F')\n cmd = ['/usr/bin/git', 'tag', '--list']\n tag_fmt = os.path.join(TAG_PREFIX, '{}', '*')\n cmd.append(tag_fmt.format(date))\n seq = len(subprocess.check_output(cmd).splitlines()) + 1\n tag_fmt = os.path.join(TAG_PREFIX, '{0}', '{1:04d}')\n return tag_fmt.format(date, seq)",
"def do_version_tag(args, image_name_tag, image_name):\n if args.versiontag is True:\n date_stamp = \"{:%Y%m%d%H%M%S}\".format(datetime.now())\n version_tag = args.tag + '-' + date_stamp\n image_name_version_tag = f\"{image_name}:{version_tag}\"\n return_code = tag(image_name_tag, image_name_version_tag)\n if return_code == 0:\n push(args, image_name_version_tag)",
"def file_suffix(self):\n return f'{self.image_count:05}' if self.sequential_naming else \\\n datetime.datetime.now().strftime(\"%Y-%m-%d-%H-%M-%S\")",
"def to_filetag(self) -> str:\n return self.strftime(f\"{self.FormatCode.YEAR.WITH_CENTURY}{self.FormatCode.MONTH.NUM}{self.FormatCode.DAY.NUM}\")",
"def date() -> str:\n\n return datetime.strftime(datetime.today(), _fmt)",
"def create_simple_exp_name():\n now = datetime.datetime.now(dateutil.tz.tzlocal())\n timestamp = now.strftime('%Y_%m_%d_%H_%M_%S')\n return timestamp",
"def generate_image_filename():\n now = datetime.now().strftime('%a-%w-%b-%H:%M:%S')\n return 'CCTV_{0}.jpg'.format(now)",
"def _create_time_stamp() -> str:\n\n return datetime.datetime.now().strftime(\"%Y%m%d\")",
"def current_date_time_stamp():\n return datetime.now().strftime('%Y.%m.%d %H:%M:%S.%f')[:-7]",
"def get_datecode():\n now = datetime.utcnow()\n return now.strftime(\"%Y%m%d\")",
"def __formatDate(self, num):\n if len(num) < 2:\n num = '0'+num\n return num",
"def format_version(epoch, version, release):\n full_version = \"{}:{}\".format(epoch, version) if epoch else version\n if release:\n full_version += \"-{}\".format(release)\n return full_version",
"def format_release_version(version, build_id_to_inject):\n subs = version.split(\".\")\n subs[-1] = build_id_to_inject\n return '.'.join(subs)",
"def container_image_name(registry, component_name, version):\n if version is None:\n image = component_name + ':dev'\n else:\n image = '%s/%s:%s' % (registry, component_name, version)\n\n return image",
"def get_current_date(fmt=\"%Y-%m-%d\"):\n return datetime.datetime.now().strftime(fmt)",
"def _generate_output_name(extension):\n output_name = 'TDG_{:%Y-%m-%d_%H-%M-%S}.{}'.format(datetime.now(), extension)\n return output_name",
"def get_version_string():\n major, minor, micro, patch, tag, relnum, is_release = VERSION\n\n version = '%s.%s' % (major, minor)\n\n if micro or patch:\n version += '.%s' % micro\n\n if patch:\n version += '.%s' % patch\n\n if tag != 'final':\n if tag == 'rc':\n version += ' RC'\n else:\n version += ' %s ' % tag\n\n version += '%s' % relnum\n\n if not is_release:\n version += ' (dev)'\n\n return version",
"def ingame_formatted(dt: datetime) -> str:\n return dt.strftime(\"%Y - %B\")",
"def date_stamp():\n return datetime.fromtimestamp(time()).strftime('%Y.%m.%d')",
"def get_file_name(image_dir, image_name_prefix, current_count):\n if imageNumOn:\n # you could also use os.path.join to construct image path file_path\n file_path = image_dir+ \"/\"+image_name_prefix+str(current_count)+\".jpg\"\n else:\n right_now = datetime.datetime.now()\n file_path = (\"%s/%s%04d%02d%02d-%02d%02d%02d.jpg\"\n % (image_dir, image_name_prefix,\n right_now.year, right_now.month, right_now.day,\n right_now.hour, right_now.minute, right_now.second))\n return file_path",
"def generate_envoy_image_name_from_tag(image_tag: str) -> str:\n image_prefix = get_envoy_image_prefix(image_tag)\n envoy_image = \"{prefix}:{hash}\".format(prefix=image_prefix, hash=image_tag)\n return envoy_image",
"def make_version_string(version_info):\n\n version_info = list(version_info)\n\n numbers = []\n while version_info and isinstance(version_info[0], int):\n numbers.append(str(version_info.pop(0)))\n version_str = '.'.join(numbers)\n\n if not version_info:\n return version_str\n\n assert len(version_info) % 2 == 0\n while version_info:\n suffix_type = version_info.pop(0)\n suffix_number = version_info.pop(0)\n\n if suffix_type in {'a', 'b', 'rc'}:\n suffix = f'{suffix_type}{suffix_number}'\n elif suffix_type in {'dev', 'post'}:\n suffix = f'.{suffix_type}{suffix_number}'\n else:\n raise ValueError(f\"Unknown suffix type '{suffix_type}'\")\n version_str += suffix\n\n return version_str",
"def format_date(value: int) -> str:\n\n return (datetime(1970, 1, 1) + timedelta(milliseconds=value)).strftime('%Y%m%d')",
"def default_timestamp():\n date = datetime.datetime.now().replace(microsecond=0)\n return date",
"def do_latest_tag(args, image_name_tag, image_name):\n if args.latest is True:\n if tag(image_name_tag, image_name+':latest'):\n push(args, image_name+':latest')"
] | [
"0.6210457",
"0.60344297",
"0.60149807",
"0.5938053",
"0.58830386",
"0.58757097",
"0.58196324",
"0.57140756",
"0.56970084",
"0.5608625",
"0.5577733",
"0.55712014",
"0.54981995",
"0.54633343",
"0.54130644",
"0.5384314",
"0.5372933",
"0.536144",
"0.53580433",
"0.5355393",
"0.53384244",
"0.5324651",
"0.5294476",
"0.5293207",
"0.5228636",
"0.5219814",
"0.52112895",
"0.5203191",
"0.51979357",
"0.51935875"
] | 0.7845467 | 0 |
Determine the sposi version to use; parse "wip" in a special way. | def osi_version() -> str:
if sp_osi is None:
return find.find_sp_osi_version()
if sp_osi == "wip":
return find.find_sp_osi_version() + defs.VERSION_WIP_SUFFIX
return sp_osi | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_friendly_of_version(self, ofproto):\n if ofproto.OFP_VERSION == 1:\n _of_version = \"1.0\"\n elif ofproto.OFP_VERSION == 4:\n _of_version = \"1.3\"\n else:\n _of_version = \"Unknown version \" + \\\n str(ofproto.OFP_VERSION)\n return _of_version",
"def test_fireweather():\n res = vtec.get_ps_string(\"FW\", \"A\")\n assert res == \"Fire Weather Watch\"\n res = vtec.get_ps_string(\"FW\", \"W\")\n assert res == \"Red Flag Warning\"",
"def test_fireweather():\n res = vtec.get_ps_string(\"FW\", \"A\")\n assert res == \"Fire Weather Watch\"\n res = vtec.get_ps_string(\"FW\", \"W\")\n assert res == \"Red Flag Warning\"",
"def get_ipver_str(ip_version):\n return IP_VERSION_DICT.get(ip_version, '')",
"def get_ip_version(network):\r\n if netaddr.IPNetwork(network).version == 6:\r\n return \"IPv6\"\r\n elif netaddr.IPNetwork(network).version == 4:\r\n return \"IPv4\"",
"def _version(self):\r\n # using url to distinguish between page versions.\r\n if self.product_page_url.find(\"/groceries/\")>1:\r\n return \"groceries\"\r\n\r\n if \"PDP-Version2\" in self.tree_html.xpath(\"//body/@class\")[0]:\r\n return \"version-2\"\r\n\r\n return \"direct\"",
"def _extract_nos_version(self, data) -> None:\n match = re.search(r'Junos: (\\S+)', data)\n if match:\n self.version = match.group(1).strip()\n else:\n self.logger.warning(\n f'Cannot parse version from {self.address}:{self.port}')\n self.version = \"all\"",
"def get_version():\n ver = '0.0.0'\n req = restcall(0, 'config', 10.0)\n if req['text'] is not None:\n try: \n tree = ET.fromstring(req['text'])\n ver = tree.findall('app_version')[0].text\n if ver is None:\n ver = '0.0.0'\n _LOGGER.info(\"ISY: firmware version: %s\", ver)\n except ET.ParseError:\n _LOGGER.error(\"No version information found on ISY.\")\n return ver",
"def get_ip_version(network):\n if netaddr.IPNetwork(network).version == 6:\n return \"IPv6\"\n elif netaddr.IPNetwork(network).version == 4:\n return \"IPv4\"",
"def get_version(self) -> str:\n return 'syslog2IRC'",
"def _ismip(lp):\n return lp.isMIP() or len(lp.sos1) or len(lp.sos2)",
"def get_Version(param):\n\n line = subprocess.check_output(['grep', 'Processed with ASKAPsoft', param])\n str_line = line.decode('utf-8')\n\n askapsoft = re.findall('ASKAPsoft\\ version\\ [0-9].+', str_line)[0].split()[-1]\n\n return askapsoft",
"def parse_version(header, data):\n log = unpack('<I', data)\n game, save = unpack('<7sxf', header)\n if save == -1:\n save = unpack('<I', header)\n if save == 37:\n save = 37.0\n else:\n save /= (1<<16)\n version = get_version(game.decode('ascii'), round(save, 2), log)\n return version, game.decode('ascii'), round(save, 2), log",
"def get_version():\n return '%d.%d.%d' % version_info",
"def version(self):\n version = self.get_rpc().getnetworkinfo()[\"subversion\"]\n version = version.replace(\"/\", \"\").replace(\"Satoshi:\", \"v\")\n return version",
"def wordnet_pos(pos):\n pos = (pos or '').strip(punctuation + whitespace).lower()\n if pos.startswith('j') or 'adj'.startswith(pos):\n return wordnet.ADJ\n elif pos.startswith('v'):\n return wordnet.VERB\n elif pos.startswith('n'):\n return wordnet.NOUN\n elif pos.startswith('r') or 'adv'.startswith(pos):\n return wordnet.ADV\n elif pos.startswith('in') or 'prep'.startswith(pos):\n return u'p'\n elif pos.startswith('fw'):\n return u'v'\n else:\n return None",
"def get_fw_version(self):\n summary = self.get_version_summary()\n pattern = '\\$.*? .*? .*? .*? .*? .*? .*? .*? (.*?) \\r\\n' \n fw_version = re.findall(pattern,summary).pop()\n return fw_version",
"def get_soc_name():\n return get_soc_spec(\"SOC_VERSION\")",
"def _fix_version(name, version):\n version = sanitise_utf8(version)\n if version.lower().startswith(name.lower()):\n version = version[len(name):].lstrip()\n # Some engines unfortunately include usage instructions in the version\n # string (apparently for the sake of kgsGTP); try to clean this up.\n if len(version) > 64:\n # MoGo\n a, b, c = version.partition(\". Please read http:\")\n if b:\n return a\n # Pachi\n a, b, c = version.partition(\": I'm playing\")\n if b:\n return a\n # Other\n return version.split()[0]\n return version",
"def determine_senior_version(model_versions: dict) -> str:\r\n senior_ver = parse(\"0.0\")\r\n for ver in model_versions.values():\r\n if senior_ver < parse(ver):\r\n senior_ver = parse(ver)\r\n return str(senior_ver)",
"def protocol(ver):\r\n if ver == 1:\r\n return 1\r\n\r\n if ver == 2:\r\n return 2\r\n\r\n\r\n raise ValueError",
"def get_nipper_version():\n nipper_version = nipper_xml.find(\"./information/generator/version\").text\n return nipper_version",
"def siteipstr(self) :\n\t\ttry :\n\t\t\treturn self._siteipstr\n\t\texcept Exception as e:\n\t\t\traise e",
"def version(p):\n # Input file\n f = '/'.join([p, 'collector.stats'])\n check_path(f)\n\n # Open file with universal newline support\n with open(f, 'rU') as fh:\n for line in fh.readlines():\n if 'Appliance version' in line:\n version = line.split(':')[1].strip()\n break\n\n return version",
"def get_version(self):\n\t\ttry:\n\t\t\tp = subprocess.Popen([self.sexpath], stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n\t\texcept:\n\t\t\traise RuntimeError(\"Could not run SExtractor. Is the path '%s' correct ? If not, specify sexpath='/path/to/sextractor'\" % self.sexpath)\n\t\tout, err = p.communicate()\n\t\tversion_match = re.search(\"[Vv]ersion ([0-9\\.])+\", err.decode(encoding='UTF-8'))\n\t\tif version_match is False:\n\t\t\traise RuntimeError(\"Could not determine SExctractor version, check the output of running '%s'\" % (self.sexpath))\n\t\tversion = str(version_match.group()[8:])\n\t\tassert len(version) != 0\n\t\treturn version",
"def machinesig(p):\n l = license(p)\n if l is not None:\n machinesig = l.split('-')[2]\n else:\n machinesig = None\n\n return machinesig",
"def _wnpos(pos: str) -> str:\n pos = pos.lower()\n wnpos = \"n\"\n\n if pos.startswith(\"j\"):\n wnpos = \"a\"\n elif pos[0] in ('n', 'r', 'v'):\n wnpos = pos[0]\n\n return wnpos",
"def software(s):\n try:\n import maya.mel as mel\n version = mel.eval(\"$tmp = getApplicationVersionAsFloat();\")\n return \"Maya, %s\" % version\n except ImportError:\n pass\n return \"Unknown software.\"",
"def getVersionNumber(binaryString, startPos=0):\n if (len(binaryString) - startPos) < PRIMARY_HEADER_BYTE_SIZE:\n raise Error(\"packet header is too small\")\n return ((binaryString[startPos + 0] & 0xE0) >> 5)",
"def do_get_version(self, arg):\n arg = arg\n print(self.phil.if_version)"
] | [
"0.57119393",
"0.5551508",
"0.5551508",
"0.5520948",
"0.52187574",
"0.5203324",
"0.51943564",
"0.5150682",
"0.51446897",
"0.5108943",
"0.5092248",
"0.50849783",
"0.5068254",
"0.5054343",
"0.50500673",
"0.5028197",
"0.5011638",
"0.5010556",
"0.5002635",
"0.4990394",
"0.49899623",
"0.4975151",
"0.49614167",
"0.49481848",
"0.49346983",
"0.49302632",
"0.49282604",
"0.49263418",
"0.4915106",
"0.49147263"
] | 0.6766283 | 0 |
Rebuild the container for a single component. | def build_component(component: str) -> None:
parts: Final = component.split("-", maxsplit=1)
if len(parts) != 2: # noqa: PLR2004 # this will go away with match/case
sys.exit(f"Internal error: build_component() invoked with {component=!r}")
kolla_component, kolla_service = parts
build: Final = prepare.build_dockerfile(cfg, files, kolla_component, kolla_service)
with tempfile.NamedTemporaryFile(
mode="wt", encoding="UTF-8", prefix="Dockerfile."
) as dockerfile:
dockerfile.write(build.dockerfile)
dockerfile.flush()
subprocess.check_call(["ls", "-l", "--", dockerfile.name])
subprocess.check_call(["cat", "--", dockerfile.name])
cmd: Final[list[str | pathlib.Path]] = [
"docker",
"build",
"-t",
f"storpool/{build.container_name}{cfg.tag_suffix}",
"--rm",
*(["--no-cache"] if no_cache else []),
*(["--pull"] if pull else []),
"-f",
dockerfile.name,
"--",
datadir,
]
cmd_str: Final = shlex.join(str(word) for word in cmd)
cfg.diag(lambda: f"Running `{cmd_str}`")
try:
subprocess.run(cmd, check=True)
except (OSError, subprocess.CalledProcessError) as err:
sys.exit(f"Could not run `{cmd_str}`: {err}") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def build(self):\n self.rebuild = False\n self.redraw = True",
"def rebuild(context):\n clean(context)\n build(context, cache=False)",
"def main(\n *,\n component: list[str],\n no_cache: bool,\n pull: bool,\n quiet: bool,\n release: str,\n sp_osi: str | None,\n tag_suffix: str | None,\n) -> None:\n\n def build_component(component: str) -> None:\n \"\"\"Rebuild the container for a single component.\"\"\"\n parts: Final = component.split(\"-\", maxsplit=1)\n if len(parts) != 2: # noqa: PLR2004 # this will go away with match/case\n sys.exit(f\"Internal error: build_component() invoked with {component=!r}\")\n kolla_component, kolla_service = parts\n build: Final = prepare.build_dockerfile(cfg, files, kolla_component, kolla_service)\n\n with tempfile.NamedTemporaryFile(\n mode=\"wt\", encoding=\"UTF-8\", prefix=\"Dockerfile.\"\n ) as dockerfile:\n dockerfile.write(build.dockerfile)\n dockerfile.flush()\n subprocess.check_call([\"ls\", \"-l\", \"--\", dockerfile.name])\n subprocess.check_call([\"cat\", \"--\", dockerfile.name])\n\n cmd: Final[list[str | pathlib.Path]] = [\n \"docker\",\n \"build\",\n \"-t\",\n f\"storpool/{build.container_name}{cfg.tag_suffix}\",\n \"--rm\",\n *([\"--no-cache\"] if no_cache else []),\n *([\"--pull\"] if pull else []),\n \"-f\",\n dockerfile.name,\n \"--\",\n datadir,\n ]\n cmd_str: Final = shlex.join(str(word) for word in cmd)\n cfg.diag(lambda: f\"Running `{cmd_str}`\")\n try:\n subprocess.run(cmd, check=True)\n except (OSError, subprocess.CalledProcessError) as err:\n sys.exit(f\"Could not run `{cmd_str}`: {err}\")\n\n if release not in prepare.ALL_RELEASES:\n sys.exit(\n f\"Unsupported release {release!r}, must be one of {' '.join(prepare.ALL_RELEASES)}\"\n )\n if any(comp for comp in component if comp not in ALL_COMPONENTS):\n sys.exit(f\"Unrecognized components, must be one or more of {' '.join(ALL_COMPONENTS)}\")\n cfg: Final = build_config(quiet=quiet, release=release, sp_osi=sp_osi, tag_suffix=tag_suffix)\n\n datadir: Final = cfg.topdir / defs.DATA_DIR\n files: Final = prepare.prepare_data_files(cfg, datadir)\n\n for comp in component:\n build_component(comp)",
"def _update_container(self):\n client = docker.from_env()\n self.container = client.containers.get(self.session.container_id)",
"def build(self):\n for component, type in self.__get_data(\"comps\").items():\n self.add_comp(component, type)\n\n self.logger.info('Build of {} finished'.format(self.name))",
"def refresh(self):\n\t\tself.win.refresh()\n\t\tfor c in self.components:\n\t\t\tc.refresh()",
"def rebuild(self):\n self.set_image(self.ui_manager.get_universal_empty_surface())\n\n if self.text_block is not None:\n self.text_block.set_dimensions((self.rect_width, -1))\n\n self.relative_rect.height = self.text_block.rect.height\n self.relative_rect.width = self.text_block.rect.width\n self.rect.width = self.text_block.rect.width\n self.rect.height = self.text_block.rect.height",
"def rebuild(self, target_context, payload):\n return compiler.CompileResult._rebuild(target_context, *payload)",
"def build_container_image(self) -> None:\n print_version_of_tools()\n try:\n self.fs_watcher.start()\n runner = PluginsRunner(self,\n self.plugins_conf,\n self.plugin_files,\n self.keep_plugins_running,\n plugins_results=self.data.plugins_results)\n runner.run()\n finally:\n self.fs_watcher.finish()",
"def container_refresh(self, kwargs=None):\n scode, hosts = Rest.get('Host')\n filter = {}\n n = 1\n e = {}\n data = []\n for host in hosts:\n os.environ[\"DOCKER_HOST\"] = host['Ip'] + \":\" + str(host['Port'])\n self.client = docker.from_env()\n filter['Ip'] = os.environ[\"DOCKER_HOST\"].split(':')[0]\n try:\n containers = self.client.containers.list(all, **kwargs)\n except docker.errors.APIError as e:\n Console.error(e.explanation)\n Rest.delete('Container', filter)\n continue\n if len(containers) == 0:\n print(\"No containers exist \" + str(host['Ip']))\n Rest.delete('Container', filter)\n continue\n\n for containerm in containers:\n container = containerm.__dict__['attrs']\n container['Ip'] = os.environ[\"DOCKER_HOST\"].split(':')[0]\n data.append(container)\n d = {}\n d['Ip'] = os.environ[\"DOCKER_HOST\"].split(':')[0]\n d['Id'] = container['Id']\n d['Name'] = container['Name']\n d['Image'] = container['Config']['Image']\n d['Status'] = container['State']['Status']\n d['StartedAt'] = container['State']['StartedAt']\n e[n] = d\n n = n + 1\n Rest.delete('Container', filter)\n Rest.post('Container', data)\n Console.ok(str(Printer.dict_table(e, order=['Ip', 'Id', 'Name', 'Image', 'Status', 'StartedAt'])))",
"def update(self, ca):\n\n self.components = ca.get_all_components()",
"def _rebuild(self):\n for shape, record in iter(self):\n self.write_row(shape, record)\n self.__isBuilt = True",
"def rebuild(self):\n self.from_samples(self.samples)",
"def _component_changed(self, old, new):\n canvas = self.canvas\n if old is not None:\n canvas.remove(old)\n if new is not None:\n canvas.add(new)",
"def _refresh(self):\r\n\r\n # Remove all the widgets from the layout\r\n self.__clear_layout()\r\n\r\n # Check if adding a top stretch is needed\r\n if self.top_stretch:\r\n self.__layout.addStretch()\r\n\r\n # Re-build layout from list\r\n for widget in self.widgets_list:\r\n self.__layout.addWidget(widget)\r\n\r\n # Check if adding a bottom stretch is needed\r\n if self.bottom_stretch:\r\n self.__layout.addStretch()",
"def components(build_reset, monkeypatch):\n controllers, visuals, htmls = create_components()\n\n app = App(__name__, rows=len(visuals), sidebar=True)\n for controller in controllers:\n # pylint: disable=protected-access\n assert COMPONENT_REGISTRY[controller._uuid] == controller\n app.add_sidebar(controller)\n\n for vis in visuals:\n # pylint: disable=protected-access\n assert COMPONENT_REGISTRY[vis._uuid] == vis\n app.add(vis)\n\n for htm in htmls:\n # pylint: disable=protected-access\n assert COMPONENT_REGISTRY[htm._uuid] == htm\n app.add_sidebar(htm)\n\n assert len(COMPONENT_REGISTRY) == len(controllers) + 2 * len(visuals) + len(htmls)\n\n # pylint: disable=protected-access\n app._build()\n\n # run second time to make sure nothing weird happens with subsequent builds\n app._build()\n\n with server_check(app) as server:\n yield server",
"def reBuild(self): # redefine the rebuild method for loss function (polymorphism)\n self.updateRange()\n self.buildLine()\n self.normalize() # normalize loss function to have total area of 1 ",
"def update_container():\n return exec_fn(_update_container)",
"def build(self):\n\n raise NotImplementedError(\"Implement build() method\")",
"def _get_container(self) -> Container:\n obj = self.get_container()\n return to_container(obj)",
"def reload(self):\n self.containers = list(filter(_check_alive_container, self.containers))",
"def build_container(client):\n client.images.build(path=os.path.join(os.path.abspath(\"\"), \"docker\"), tag=\"scrape_light\")",
"def update_component():\n # written by Anders Deleuran, andersholdendeleuran.com\n import Grasshopper as gh\n\n def call_back(e):\n \"\"\"Defines a callback action\"\"\"\n gh_env.Component.ExpireSolution(False)\n # Get the Grasshopper document\n ghDoc = gh_env.Component.OnPingDocument()\n # Schedule this component to expire\n ghDoc.ScheduleSolution(loop_interval, gh.Kernel.GH_Document.GH_ScheduleDelegate(call_back))",
"def build(self, rebuild=False):\n if not self._final or rebuild:\n\n LOGGER.info(\"Use %s to create background\", self.name)\n image = self._build_background()\n\n LOGGER.info(\"Use %s to concatenate images\", self.name)\n image = self._build_matrix(image)\n\n LOGGER.info(\"Use %s to assemble final image\", self.name)\n self._final = self._build_final_image(image)\n\n LOGGER.info(\"Use %s to draw texts\", self.name)\n self._build_texts(self._final)\n\n if self._outlines:\n LOGGER.info(\"Use %s to outline boundary borders\", self.name)\n self._build_outlines(self._final)\n\n return self._final",
"def add(self, *components):\n for component in components:\n if component.container is not None:\n component.container.remove(component)\n component.container = self\n self._components.extend(components)",
"def components(self, value):\n if value.shape != self._components.shape:\n raise ValueError(\n \"Trying to replace components of shape {} with some of \"\n \"shape {}\".format(self.components.shape, value.shape)\n )\n else:\n np.copyto(self._components, value, casting=\"safe\")",
"def build (self):\n raise NotImplementedError",
"def returnDocker(self):\r\n # Ensure there's a widget to return\r\n if self.widget:\r\n self.widgetDocker.setWidget(self.widget)\r\n self.widget = None\r\n self.widgetDocker = None",
"def build(self):\n self._remove_swarm_keys()\n self._remove_pod_keys()\n self._set_image()\n self._translate_docker_properties()",
"def build(self):\n pass"
] | [
"0.5731498",
"0.5508753",
"0.5297388",
"0.5239832",
"0.52090853",
"0.50444996",
"0.5013207",
"0.4967973",
"0.49337313",
"0.49103594",
"0.48967397",
"0.48830613",
"0.48656592",
"0.48563054",
"0.48440525",
"0.48366556",
"0.48345816",
"0.48149598",
"0.48129988",
"0.4789811",
"0.47855335",
"0.47750816",
"0.4740757",
"0.4702435",
"0.47014713",
"0.46739227",
"0.46696195",
"0.46671712",
"0.46665528",
"0.46315068"
] | 0.55748755 | 1 |
Group some charactesitics by postal code area (first 3 letters) | def postalcode_area_studies():
dfpawnshop = pd.read_csv(pawnmtl.csv)
cpdic = getPostalCodeDic()
for ik in cpdic.keys():
print ik, cpdic[ik] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def parse_postalCA(self):\n \n index = self.index\n \n if len(self.words[index]['word']) != 3:\n return None, 0\n postal = self.words[index]['word']\n index += 1\n if index == self.length:\n return None, 0\n \n if len(self.words[index]['word']) != 3:\n return None, 0\n postal += self.words[index]['word']\n \n return postal, 2",
"def area_code(self):\n return self.number[:3]",
"def padded_area_code(phone_number):\r\n area_code = grab_area_code(phone_number)\r\n return area_code + \"*******\"",
"def extract_postcode(s):\n pc_regex = r'([Gg][Ii][Rr] 0[Aa]{2})|((([A-Za-z][0-9]{1,2})|(([A-Za-z][A-Ha-hJ-Yj-y]'\n pc_regex += r'[0-9]{1,2})|(([A-Za-z][0-9][A-Za-z])|([A-Za-z][A-Ha-hJ-Yj-y][0-9]?[A-Za-z]'\n pc_regex += r'))))\\s?[0-9][A-Za-z]{2})'\n\n re_search = re.search(pc_regex, s)\n if re_search:\n p = re_search.group(0)\n else:\n p = ''\n return p",
"def parse_postalUS(self):\n \n index = self.index\n \n # US Postal Code\n if len(self.words[index]['word']) != 5 or not self.words[index]['word'].isdigit():\n return None, 0\n postal = self.words[index]['word']\n \n if index + 1 < self.length:\n if self.words[index+1]['word'] == '-':\n index += 2\n if index == self.length:\n return None, 0\n if len(self.words[index]['word']) == 4 and self.words[index]['word'].isdigit():\n postal += '-' + self.words[index]['word']\n return postal, 3\n else:\n return postal, 1\n \n return postal, 1",
"def country(alpha_2_code: str) -> None:",
"def postcode(full_address):\n return capture_address_element(POSTCODE_PATTERN, full_address)",
"def get_postal_codes(pts):\n codigos = np.zeros((len(pts),))\n for i, p in tqdm(enumerate(pts), desc=\"GETTING POSTAL CODES\"):\n p = Point(p[0], p[1])\n for j in range(cod_postales.shape[0]):\n if cod_postales.geometry.iloc[j].contains(p):\n codigos[i] = cod_postales.geocodigo.iloc[j]\n return codigos[codigos != 0]",
"def addresses( data ) :\n return list( set(chain.from_iterable( [ re.sub(r'\\[.*?\\]\\s+','',x['C1']).split('; ') for x in data ] )))",
"def geo_coder(house_number, boro_code, street_name, zip_code): \r\n wa1 = '1B{}{}{}{}{}C{}{}'.format(rightpad(house_number, 16), rightpad('', 38), boro_code, rightpad('', 10), rightpad(street_name, 32), rightpad('', 113), rightpad(zip_code, 5))\r\n wa1 = rightpad(wa1, 1200)\r\n wa2 = rightpad('', 4300)\r\n NYCGeo.NYCgeo(wa1, wa2)\r\n return wa1, wa2",
"def main(postalcode):\n places = postalcodes_mexico.places(postalcode)\n click.echo(places)\n return 0",
"def process_capture_groups(group):\n group = group[0][0].replace(',', '')\n group = group.split(' ')\n street_type = group[-1]\n street_name = ' '.join(group[:-1])\n if street_type in STREET_ABBREVIATION_TO_NAME.keys():\n street_type = STREET_ABBREVIATION_TO_NAME[street_type]\n return street_name, street_type",
"def find_places(query):\n parts = str(query).split(' ')\n for i, p in enumerate(parts):\n p = p.replace('-', ' ').strip()\n try:\n postal_code = int(p)\n if len(postal_code) == 4:\n print(postal_code, parts[i+1])\n # Check \n #response = get_osm_location(\"{postal_code} {name}\")\n #lon = response['lon']\n #lat = response['lat']\n #poly = \n except Exception as e:\n continue",
"def normalise_postcode(postcode):\n\n postcode = NON_ALPHA_RE.sub(\"\", postcode.upper())\n postcode = postcode[:-3] + \" \" + postcode[-3:]\n if POSTCODE_RE.match(postcode):\n return postcode\n return None",
"def postal():\r\n return _linecache.getline(\r\n _os.path.join(_os.path.abspath(_os.path.dirname(__file__)), 'ca_postal_codes.csv'),\r\n _random.randrange(0, 917358)\r\n ).strip(\"\\n\")",
"def clean_postcodes(postcodes):\n postcode_df = pd.DataFrame({'Postcode':postcodes})\n postcode_df['Postcode'] = postcode_df['Postcode'].str.upper()\n\n # If length is not 7 get rid of spaces. This fixes e.g. \"SW19 2AZ\" -> \"SW192AZ\"\n postcode_df['Postcode'] = postcode_df['Postcode'].where(\n postcode_df['Postcode'].str.len() == 7, postcode_df['Postcode'].str.replace(\" \", \"\"))\n\n # If length is 5 (e.g. \"W67HZ\") add two spaces in the middle (-> \"W6 7HZ\")\n postcode_df['Postcode'] = postcode_df['Postcode'].where(\n postcode_df['Postcode'].str.len() != 5,\n postcode_df['Postcode'].str[:2]+ \" \" + postcode_df['Postcode'].str[2:])\n\n # If length is 6 (e.g. \"SW72AZ\") add a space in the middle and end(-> \"SW7 2AZ\")\n postcode_df['Postcode'] = postcode_df['Postcode'].where(\n postcode_df['Postcode'].str.len() != 6,\n postcode_df['Postcode'].str[:3]+ \" \" + postcode_df['Postcode'].str[3:])\n\n return postcode_df['Postcode'].to_numpy()",
"def clean_address(self, s):\n # The letter \"O\" instead of the numeral \"0\" is a common mistake.\n s = re.sub(\n r\"\\b[A-Z][O0-9][A-Z]\\s?[O0-9][A-Z][O0-9]\\b\", lambda x: x.group(0).replace(\"O\", \"0\"), clean_string(s)\n )\n for k, v in province_or_territory_abbreviations().items():\n # Replace a province/territory name with its abbreviation.\n s = re.sub(\n r\"[,\\n ]+\"\n r\"\\(?\" + k + r\"\\)?\"\n r\"(?=(?:[,\\n ]+Canada)?(?:[,\\n ]+[A-Z][0-9][A-Z]\\s?[0-9][A-Z][0-9])?\\Z)\",\n \" \" + v,\n s,\n )\n # Add spaces between province/territory abbreviation, FSA and LDU and remove \"Canada\".\n return re.sub(\n r\"[,\\n ]+\" r\"([A-Z]{2})\" r\"(?:[,\\n ]+Canada)?\" r\"[,\\n ]+([A-Z][0-9][A-Z])\\s?([0-9][A-Z][0-9])\" r\"\\Z\",\n r\" \\1 \\2 \\3\",\n s,\n )",
"def postal_codes(self) -> pulumi.Output[Optional[Sequence[str]]]:\n return pulumi.get(self, \"postal_codes\")",
"def _remove_area_code(phone):\n\n if not phone.startswith('+46'):\n return phone\n else:\n return '0' + phone[3:]",
"def cadena_palabras_con_a(cadena):\n lista=cadena.split(\" \")\n total = \"\"\n for palabra in lista:\n if palabra.startswith(\"a\") or palabra.startswith(\"A\"):\n total += palabra + \" \"\n return total",
"def genCharGroup(self):\n alphabet = list('abcdefghijklmnopqrstuvwxyz') #Creates a list of all the alphabet characters\n group = []\n count = 0\n while count != 3: #While the loop total does not equal 3\n i = random.choice(alphabet) #Make a random choice\n alphabet.remove(i) #Remove it from the alphabet\n group.append(i) #And add it to the group array\n count += 1 #Add one to the loop total\n return str(''.join(group)) #Return the string of 3 characters to the user",
"def ranCharGroup(self):\n group = self.genCharGroup() + ' ' + self.genCharGroup() + ' ' + self.genCharGroup() + ' ' + self.genCharGroup()\n return group #Returns a string of 4 character groups",
"def character_map(text):\n\n print(f\"Total character count: {len(text)}\\n\")\n\n characters = sorted(list(set(text))) # Get sorted list of individual characters\n n_to_char = {}\n char_to_n = {}\n\n num = 0\n for char in characters:\n n_to_char[num] = char\n char_to_n[char] = num\n num += 1\n\n return characters, n_to_char, char_to_n",
"def form_pw_groups(self, p123, pw):\n p1_x = p123['p1_x']\n p2_x = p123['p2_x']\n p3_x = p123['p3_x']\n pw1 = pw[p1_x, :]\n pw2 = pw[p2_x, :]\n pw3 = pw[p3_x, :]\n # [B, N, 3(x,y,z), 3(p1,p2,p3)]\n pw_groups = torch.cat([pw1[:, :, np.newaxis], pw2[:, :, np.newaxis], pw3[:, :, np.newaxis]], 2)\n return pw_groups",
"def __convert_group(n):\n output = ''\n\n if(n == '100'):\n output = \"CIEN \"\n elif(n[0] != '0'):\n output = CENTENAS[int(n[0]) - 1]\n\n k = int(n[1:])\n if(k <= 20):\n output += UNIDADES[k]\n else:\n if((k > 30) & (n[2] != '0')):\n output += '%sY %s' % (DECENAS[int(n[1]) - 2], UNIDADES[int(n[2])])\n else:\n output += '%s%s' % (DECENAS[int(n[1]) - 2], UNIDADES[int(n[2])])\n\n return output",
"def name_area(lon, lat):\n \n point = Point(lon, lat)\n area = [point.within(polygon) for polygon in districts.geometry]\n \n return(districts[area].EDNAME.values)",
"def geocode(postcode):\n key = current_app.config.get(\"OS_PLACES_API_KEY\")\n formatted_addresses = FormattedAddressLookup(key=key).by_postcode(postcode)\n response = [{\"formatted_address\": address} for address in formatted_addresses if address]\n return Response(json.dumps(response), mimetype=\"application/json\")",
"def extract_zipcode(full_address):\n full_address = full_address.strip()\n last_space_index = full_address.rindex(\" \")\n zipcode = full_address[last_space_index + 1 : ]\n return zipcode",
"def get_3letter(countries):\n url = URL(\"http://www.worldatlas.com/aatlas/ctycodes.htm\")\n html = url.download()\n dom = DOM(html)\n country_table = dom.by_tag('table.tableWrap')\n iso2_list = []\n iso3_list = []\n density_list = []\n\n for table in country_table:\n\n # Check if the ISO-2 Code is in our wikipedia dictionary, than add every value to a separate list.\n for country in table('tr')[1:]:\n iso2_code = country.by_tag('td.cell02')[0].content.strip()\n iso3_code = country.by_tag('td.cell03')[0].content.strip()\n print iso2_code, iso3_code\n if iso2_code in countries:\n iso2_list.append(iso2_code)\n iso3_list.append(iso3_code)\n density_list.append(countries[iso2_code])\n\n # A check to see if we miss countries from the wikipedia page.\n for iso2 in iso2_list:\n if iso2 in countries:\n pass\n else:\n print 'MISSING', iso2\n\n f1 = open(\"countrycodes.txt\", \"w\")\n # Reformat the data to fit the json.dump function.\n json_d = []\n for i in range(len(iso2_list)):\n json_d.append({'iso2': iso2_list[i], 'iso3': iso3_list[i], 'density': density_list[i]})\n f1.write(\"{0},{1}\\n\".format(iso2_list[i], iso3_list[i]))\n with open('densities.txt', 'a') as f:\n json.dump(json_d, f, indent=4)",
"def formatAddress():\n # Strings to load data\n stringFile = '/Users/Louis/Documents/Research/Code/cleanedData/'\n days = {'cleaned01-Dec-2015':2,# tuesday\n 'cleaned02-Dec-2015':3,# wednesday\n 'cleaned03-Dec-2015':4,# ...\n 'cleaned04-Dec-2015':5,\n 'cleaned07-Dec-2015':1,\n 'cleaned08-Dec-2015':2,\n 'cleaned09-Dec-2015':3,\n 'cleaned10-Dec-2015':4,\n 'cleaned11-Dec-2015':5,\n 'cleaned14-Dec-2015':1,\n 'cleaned15-Dec-2015':2,\n 'cleaned16-Dec-2015':3,\n 'cleaned17-Dec-2015':4,\n 'cleaned18-Dec-2015':5,\n 'cleaned21-Dec-2015':1}\n \n # Store results\n addresses = []\n CourierSuppliedAddresses = []\n \n for day in days.keys():\n # Configuration for CSV reading\n with open(stringFile+day+'_modified.csv') as csvfile:\n # Dictionary containing the info\n reader = csv.DictReader(csvfile,delimiter = ',')\n # print(day)\n \n for row in reader:\n addresses.append(row['Address'])\n CourierSuppliedAddresses.append(row['CourierSuppliedAddress'])\n \n addresses = list(set(addresses))\n addresses.sort()\n \n CourierSuppliedAddresses = list(set(CourierSuppliedAddresses))\n CourierSuppliedAddresses.sort()\n return addresses, CourierSuppliedAddresses"
] | [
"0.6344374",
"0.57528186",
"0.568245",
"0.56237084",
"0.5567839",
"0.5556221",
"0.54706067",
"0.54336405",
"0.53458565",
"0.5316024",
"0.5240724",
"0.5203791",
"0.5191933",
"0.51890206",
"0.51539314",
"0.5117294",
"0.5079633",
"0.5059187",
"0.50442296",
"0.50300467",
"0.50061685",
"0.49985257",
"0.49704504",
"0.49182338",
"0.48948717",
"0.4870776",
"0.48670864",
"0.48631632",
"0.48580334",
"0.4849378"
] | 0.61772764 | 1 |
Fills in placeholders with previous entries (if such available) should be called via ajax (similar to evaluate) | def placeholders_fill_in_last_response():
task_key = request.vars.task_key
if auth.is_logged_in():
rows = db(task_query(task_key)).select()
if len(rows) > 1:
raise RuntimeError("DB error: learn table has too many (%s) entries with task_key=%s, user_id=%s " % (len(rows), task_key, auth.user_id))
if len(rows) == 1:
responses = rows.first().responses
evaluations = rows.first().evaluations
js_tpl_fillin = " fill_in_placeholder( placeholders['%(task_key)s'][%(nr)s], '%(response)s' ); \n "
js_tpl_highlight = " highlight_placeholder( placeholders['%(task_key)s'][%(nr)s], '%(state)s' );\n"
js_result_fillin = []
js_result_highlight = []
for nr, response, state in zip(range(len(responses)), responses, evaluations):
response = escape_quotes( response )
js_result_fillin.append(js_tpl_fillin % locals())
js_result_highlight.append( js_tpl_highlight % locals() )
if len(evaluations) > len(responses): # syntax (or other context error)
syntax_error = evaluations[-1]
return ( ''.join(js_result_fillin)
+ wrap_js_settimeout( "alert('%s\\n\\n%s'); \n" % ("Neteisingas lygiavimas..?", escape_quotes( syntax_error ) ) )
)
return ''.join(js_result_fillin+['\n']+js_result_highlight)
return "" | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def reload_placeholder(update):\n pass",
"def FillForm(string_for_substitution, dictionary_of_vars):\n return_string = string_for_substitution\n for i in re.findall(\"//%%(.*)%%//\", string_for_substitution):\n return_string = re.sub(\"//%%\" + i + \"%%//\", dictionary_of_vars[i],\n return_string)\n return return_string",
"def setPlaceholderStrings(self, dataslice):\n\t\tself.placeholderstrings = []\n\t\tfor valuelist in dataslice:\n\t\t\tplaceholders = ['?'] * len(valuelist)\n\t\t\tself.placeholderstrings.append(', '.join(placeholders))",
"def insert_evaluate_variables(text, var_dict):\n if isinstance(text, list):\n text.insert(0, '{% load quest_render_tags %}')\n rndr_string = '\\n'.join(text)\n else:\n rndr_string = r'{% load quest_render_tags %} ' + text\n\n var_dict_rendered = {}\n for key, values in var_dict.iteritems():\n var_dict_rendered[key] = values[1]\n\n tmplte = Template(rndr_string)\n cntxt = Context(var_dict_rendered)\n return tmplte.render(cntxt)",
"def handle_data(self, data):\n if data.strip():\n self._content_list.append((self._current_tag, data))\n self._html += f\"{{{'placeholder_'+str(self._index)}}}\"\n self._index += 1",
"def process_request(request):\n initial_data = request.POST[\"initial_data\"]\n if re.search(\"datetime.date\\\\((.*?)\\\\)\", initial_data):\n date_val = re.findall(\"datetime.date\\\\((.*?)\\\\)\", initial_data)\n for date in date_val:\n dates = list(map(int, date.split(\", \")))\n initial_data = re.sub(\"datetime.date\\\\((.*?)\\\\)\",\n \"'\" + datetime.date(dates[0], dates[1], dates[2]).strftime(\"%d %B, %Y\") + \"'\",\n initial_data, 1)\n initial_data = json.loads(initial_data.replace(\"'\", \"\\\"\"))\n\n old_data = json.loads(request.POST[\"prev_data\"].replace(\"'\", \"\\\"\")) if \"prev_data\" in request.POST else None\n data = []\n try:\n if old_data is not None:\n data += old_data\n\n data.append({\n \"short_description\": request.POST[\"short_description\"],\n \"particulars\": request.POST[\"particulars\"],\n \"quantity\": request.POST[\"quantity\"],\n \"unit\": request.POST[\"unit\"],\n \"unit_price\": request.POST[\"unit_price\"],\n \"total_cost\": str(float(request.POST[\"quantity\"]) * float(request.POST[\"unit_price\"]))\n })\n except MultiValueDictKeyError:\n data = old_data\n\n return initial_data, data",
"def on_submit(self, text):\n self.pp = [float(i.text) for i in self.text_boxes]\n self.pp_values = self.pp.copy()\n self.pp_mapping()\n self.redraw()",
"def replace_params(self):\n raw_sql = self.raw_sql\n for placeholder in self.to_replace:\n newreg = re.compile(placeholder)\n repl = self.get_replacement_value(placeholder)\n if repl:\n raw_sql = newreg.sub(str(repl), raw_sql)\n self.sql = raw_sql",
"def replace_placeholders(self, placeholder_dict):\n\n for placeholder, value in placeholder_dict.items():\n placeholder_wrapped = f\"{self.marker_string}{placeholder}{self.marker_string}\"\n\n if placeholder not in self.unresolved_placeholders:\n self.hd.log.warn(f\"Placeholder {placeholder} not found in sequence.\")\n else:\n self.sequence = self.sequence.replace(f\"{placeholder_wrapped}\", str(value))\n self.unresolved_placeholders.discard(placeholder)",
"def _create_placeholders(self):\n raise NotImplementedError",
"def callback(self):\r\n self.entry_contents1=self.e1.get()\r\n self.entry_contents2=self.e2.get()\r\n self.entry_contents3=self.e3.get()\r\n self.entry_contents4=self.e4.get()\r\n self.entry_contents5=self.e5.get()\r\n self.entry_contents6=self.e6.get()",
"def _fill_template_text(\n self,\n template: Dict[Text, Any],\n template_vars: Dict[Text, Any]\n ) -> Dict[Text, Any]:\n line_text_keys = [\"text\", \"altText\", \"label\", \"uri\"]\n try:\n for key in line_text_keys:\n if key in template:\n template[key] = template[key].format(**template_vars)\n except KeyError as e:\n logger.exception(\n \"Failed to fill line template '{}'. \"\n \"Tried to replace '{}' but could not find \"\n \"a value for it. There is no slot with this \"\n \"name nor did you pass the value explicitly \"\n \"when calling the template. Return template \"\n \"without filling the template. \"\n \"\".format(template, e.args[0]))\n return template",
"def _prefill_placeholders(placeholders, files, user_values):\n placeholder_values = dict()\n\n for placeholder in placeholders:\n if placeholder in files:\n # Placeholder name is a filename, so get the path\n path = files[placeholder]\n if not isinstance(path, str):\n try:\n path = path(**user_values)\n except ValueError:\n # Placeholder could not be pre-filled given the supplied\n # values by the user.\n continue\n\n # Add the path as possible placeholder value\n placeholder_values[placeholder] = path\n\n return placeholder_values",
"def register_inputs(self, args_):\n # TODO Should we be able to rebuild?\n def traversal_function(obj):\n if obj.id.value not in self.placeholders:\n self.placeholders[obj.id.value] = obj\n self.input_placeholder_ids.append(obj.id.value)\n\n self.input_placeholder_ids = []\n Role.nested_object_traversal(args_, traversal_function, PlaceHolder)\n self.input_placeholder_ids = tuple(self.input_placeholder_ids)",
"def test_placeholders(self):\n page = create_page('page', 'page.html', 'en', published=True)\n url = reverse('api:placeholder-list')\n response = self.client.get(url, formst='json')\n self.assertEqual(len(response.data), 1)\n self.assertEqual(response.data[0]['slot'], 'content')\n page2 = create_page('page2', 'feature.html', 'en', published=True)\n response = self.client.get(url, formst='json')\n self.assertEqual(len(response.data), 3)\n self.assertEqual(response.data[1]['slot'], 'feature')\n self.assertEqual(response.data[2]['slot'], 'content')",
"def fill_template(template, replacements):\n content = template\n for src, target in replacements.iteritems():\n content = content.replace(src, target)\n return content",
"def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n placeholders = {\n 'group_id': 'group_id',\n 'first_name': 'first_name',\n 'starter': 'starter',\n 'main': 'main',\n 'dessert': 'dessert',\n 'special_diet': 'special_diet',\n 'requirements': 'requirements',\n }\n\n for field in self.fields:\n placeholder = placeholders[field]\n self.fields[field].widget.attrs['placeholder'] = placeholder\n self.fields[field].widget.attrs['class'] = 'stripe-style-input'",
"def update():\n #here’s the \"back end\" that outputs a JSON array of up to 10 places (i.e., cities)\n #that fall within the specified bounds (i.e., within the rectangle defined by those corners)\n\n # the request.args is bringing a \"dictionary\" object for you(from update() function in scripts.js)\n\n #in Python a programmer can raise an error exception at any point in a program\n #The statements used to deal with exceptions are raise and except\n\n #ensure parameters are present\n if not request.args.get(\"sw\"):\n raise RuntimeError(\"missing sw\")\n if not request.args.get(\"ne\"):\n raise RuntimeError(\"missing ne\")\n\n\n # A regular expression is a special sequence of characters that helps you match or find other strings or sets of strings\n #The module re provides full support for Perl-like regular expressions in Python.\n\n #ensure parameters are in lat,lng format\n if not re.search(\"^-?\\d+(?:\\.\\d+)?,-?\\d+(?:\\.\\d+)?$\", request.args.get(\"sw\")):\n raise RuntimeError(\"invalid sw\")\n\n #This function searches for first occurrence of RE pattern within string with,it returns a match object on success, none on failure\n if not re.search(\"^-?\\d+(?:\\.\\d+)?,-?\\d+(?:\\.\\d+)?$\", request.args.get(\"ne\")):\n raise RuntimeError(\"invalid ne\")\n\n # explode southwest corner into two variables\n (sw_lat, sw_lng) = [float(s) for s in request.args.get(\"sw\").split(\",\")]\n\n # explode northeast corner into two variables\n (ne_lat, ne_lng) = [float(s) for s in request.args.get(\"ne\").split(\",\")]\n\n # find 10 cities within view, pseudorandomly chosen if more within view\n if (sw_lng <= ne_lng):\n\n # doesn't cross the antimeridian\n rows = db.execute(\"\"\"SELECT * FROM places\n WHERE :sw_lat <= latitude AND latitude <= :ne_lat AND (:sw_lng <= longitude AND longitude <= :ne_lng)\n GROUP BY country_code, place_name, admin_code1\n ORDER BY RANDOM()\n LIMIT 10\"\"\",\n sw_lat=sw_lat, ne_lat=ne_lat, sw_lng=sw_lng, ne_lng=ne_lng)\n\n else:\n\n # crosses the antimeridian\n rows = db.execute(\"\"\"SELECT * FROM places\n WHERE :sw_lat <= latitude AND latitude <= :ne_lat AND (:sw_lng <= longitude OR longitude <= :ne_lng)\n GROUP BY country_code, place_name, admin_code1\n ORDER BY RANDOM()\n LIMIT 10\"\"\",\n sw_lat=sw_lat, ne_lat=ne_lat, sw_lng=sw_lng, ne_lng=ne_lng)\n\n # output places as JSON; jsonify takes the python dictionary(list) and converts it to a JSON object\n return jsonify(rows)",
"def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n placeholders = {\n 'code': 'Enter coupon code',\n }\n\n self.fields['code'].widget.attrs['autofocus'] = True\n for field in self.fields:\n placeholder = f'{placeholders[field]}'\n\n self.fields[field].widget.attrs['placeholder'] = placeholder\n self.fields[field].label = False",
"def resubmit(self, _):\n kw = {}\n for k, v in self.form.items():\n if v.edit_text != PARAM_DEFAULT_VALUE:\n kw[k] = v.edit_text\n try:\n self.execution.update_job_params(**kw)\n except QMapError as e:\n self.widget.original_widget = urwid.Text(e)",
"def render( request, etype, value, tb ):",
"def review():\n entry = request.args.get(\"q\")\n html = entry_dict[entry]['html']\n raw = entry_dict[entry]['html_raw']\n if entry in memorize_dict:\n memo_obj_list = memorize_dict[entry]\n else:\n memo_obj_list = []\n return render_template(\"main.html\",\n entry=entry,\n raw=html,\n html=html,\n memoObjList=memo_obj_list)",
"def placeholders(self):\n x = [i.placeholder for i in self._input_desc]\n return x[0] if len(x) == 1 else x",
"def prepare_template(self, rest_handler, key=''):\n template_values = {}\n template_values['page_title'] = self.format_title('Edit Question')\n template_values['main_content'] = self.get_form(rest_handler, key=key)\n\n return template_values",
"def resubmit(self, _):\n kw = {}\n for k,v in self.form.items():\n if v.edit_text != PARAM_DEFAULT_VALUE:\n kw[k] = v.edit_text\n if self.callback is None:\n try:\n self.job.resubmit(**kw)\n except QMapError as e:\n self.widget.original_widget = urwid.Text(e)\n else:\n self.callback(**kw)",
"def test_prep_fields_called_html_output(self):\n pass",
"def place_holder(self, xpath, loop_index, text_from_xml, text_index, ids, eng_list):\r\n try:\r\n element_text = self.object.element_get_property(self.util.client,\r\n xpath[loop_index]['zone'],\r\n xpath[loop_index]['xpath'],\r\n xpath[loop_index]['index'],\r\n \"placeholder\", self.logger_name)\r\n if element_text:\r\n self.logger.info(\"Testing StringID == \" + str(ids[text_index]))\r\n self.logger.info(\"English Text == \" + eng_list[text_index])\r\n self.util.text_compare2(self.common, text_from_xml[text_index], element_text, ids[text_index],\r\n self.logger_name)\r\n except:\r\n print\" Value not found\"",
"def show_results(ninja_id):\n query = \"SELECT \" # get the ninja based on ninja id\n data = {} # ninja id\n mysql = # connect to mysql \n ninja = mysql.query_db(query, data)\n return render_template ('submitted_info.html', ninja = ninja) # [{ninja_data: \"stuff\"}]",
"def update_query(self):\n text = self.lineedit.text()\n self.results = self.lookup(text)\n self.update_ui()\n self.copy_entry(self.table.currentRow(), self.table.currentColumn())",
"def _generatePlaceholderText(self, obj, **args):\n result = [x for x in obj.getAttributes() if x.startswith('placeholder-text:')]\n return [x.replace('placeholder-text:', '') for x in result]"
] | [
"0.55820477",
"0.55585647",
"0.53613096",
"0.5329101",
"0.5301409",
"0.51870173",
"0.5107881",
"0.50597113",
"0.50417787",
"0.50273234",
"0.5014483",
"0.4979621",
"0.49399748",
"0.48602873",
"0.48537135",
"0.4786001",
"0.4781956",
"0.4773322",
"0.47684172",
"0.47568554",
"0.47341394",
"0.47331437",
"0.47262847",
"0.47261584",
"0.47133416",
"0.47077265",
"0.4678634",
"0.46696335",
"0.46648195",
"0.46557376"
] | 0.71383286 | 0 |
func returns true only if leave can be granted | def isLeaveLeft(self,leave_type,days):
if leave_type == 1 :
return days<=self.earned_balance
elif leave_type == 2 :
return days<=self.hp_balance
elif leave_type == 3 :
return days*2<=self.hp_balance
else :
return False | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def can_leave_team(uid):\n current_user = get_user(uid=uid)\n current_team = api.team.get_team(current_user[\"tid\"])\n if current_team[\"team_name\"] == current_user[\"username\"]:\n return False\n if current_team[\"creator\"] == uid and current_team[\"size\"] != 1:\n return False\n if len(api.submissions.get_submissions(uid=uid)) > 0:\n return False\n return True",
"def allow_exit(self, mover):\n return True",
"def test_not_logged_user_cannot_leave(self):\n\n utils.test_not_logged_cannot_access(self, self.url)",
"def can_act(self) -> bool:\n return self.cooldown < 1",
"def can_act(self) -> bool:\n return self.cooldown < 1",
"def can_throw(life):\n\treturn can_hold_item(life)",
"def canAct(self) -> bool:\n return self.cooldown < 1",
"def all_leave(self):\n return self.num_leaves == self.num_workers",
"def can_exit(self) -> bool:\n return False",
"async def leave(ctx, *, check=\"\"):\r\n # if botv.isAdmin(ctx.message.author) and check == \"now, bot\":\r\n # if necessary, save checks can go here; check presently commented out because botv can\r\n # fail to initialize in testing\r\n await bot.say(\"Allan, please add dialogue!\")\r\n quit()",
"def can_act(self, **kwargs):\n return True",
"def can_exist_outside_of_game(self):\n return True",
"def can_exist_outside_of_game(self):\n return True",
"def gameOver(self):\n\t\treturn self.lives == 0",
"def can_make_action(self) -> bool:\n return not(self.has_pending_action or self.is_dead())",
"def canCancel(self) -> bool:\n ...",
"def canCancel(self) -> bool:\n ...",
"def canCancel(self) -> bool:\n ...",
"def canCancel(self) -> bool:\n ...",
"def check_allowed(self):\n if self.state_model.op_state in [\n DevState.FAULT,\n DevState.UNKNOWN,\n DevState.ON,\n ]:\n tango.Except.throw_exception(\n f\"Disable() is not allowed in current state {self.state_model.op_state}\",\n \"Failed to invoke Disable command on SdpMasterLeafNode.\",\n \"SdpMasterLeafNode.Disable() \",\n tango.ErrSeverity.ERR,\n )\n return True",
"def can_callback(ir):\n return isinstance(ir, Call) and ir.can_reenter()",
"def _onchange_restrict_access(self, stage_id):\n print('----------',self.env.uid)\n # if self.env.uid != 1 :\n raise exceptions.Warning('You are not allowed to change the stages, Please contact the Administrator')\n return True\n return {}",
"def on_leave(self, event):\n self.pre_check(event)\n self.remove_player(event.guild.id)",
"def can_flyover(self):\n return False",
"async def leave(ctx):\n if ctx.message.channel.name.lower() not in tod_channels:\n return\n\n room = ctx.message.channel.name.lower()\n if room not in tod_games:\n await amor_manager.say(\"Truth Or Dare not in progress in {}\".format(room))\n else:\n player = ctx.message.author.name\n if player.lower() not in list(tod_games[room]['participants'].keys()):\n await amor_manager.say(\"{}, you cannot leave the game if you have not joined\".format(player))\n elif player == tod_games[room]['host']:\n await amor_manager.say(\"{}, you cannot leave the game you're the host\".format(player))\n else:\n del tod_games[room]['participants'][player.lower()]\n await amor_manager.say(\"{} has left Truth or Dare.\".format(player))",
"def allowedToEnter(self):\n if base.cr.isPaid():\n return True\n place = base.cr.playGame.getPlace()\n myHoodId = ZoneUtil.getCanonicalHoodId(place.zoneId)\n if myHoodId in \\\n (ToontownGlobals.ToontownCentral,\n ToontownGlobals.MyEstate,\n ToontownGlobals.GoofySpeedway,\n ):\n # trialer going to TTC/Estate/Goofy Speedway, let them through\n return True\n return False",
"def game_allowed(self, uid=0):\n return True",
"def check_if_can_evolve(self):\n # This sounds similar to generate actions\n pass",
"def can_reach_exit(self, position):\n return self.__verify_exit_path(position)",
"def game_over(self):\n return self.lives() < 0"
] | [
"0.6118813",
"0.5955954",
"0.5953626",
"0.59532577",
"0.59532577",
"0.585946",
"0.5846823",
"0.57779187",
"0.5727706",
"0.5714538",
"0.5692051",
"0.5681357",
"0.5681357",
"0.55972326",
"0.55932873",
"0.5589373",
"0.5589373",
"0.5589373",
"0.5589373",
"0.55799425",
"0.556054",
"0.555443",
"0.5546013",
"0.5505967",
"0.5505404",
"0.5465385",
"0.54457235",
"0.54285806",
"0.5425259",
"0.54203814"
] | 0.60508454 | 1 |
For CV Extract val_perc% of the training set as the validation set. | def get_train_val(train: datasets, test_transform: transforms,
dataset: str, val_perc: float = 0.1):
dataset_length = train.data.shape[0]
directory = 'datasets/val_permutations/'
create_if_not_exists(directory)
file_name = dataset + '.pt'
if os.path.exists(directory + file_name):
perm = torch.load(directory + file_name)
else:
perm = torch.randperm(dataset_length)
torch.save(perm, directory + file_name)
train.data = train.data[perm]
train.targets = np.array(train.targets)[perm]
test_dataset = ValidationDataset(train.data[:int(val_perc * dataset_length)],
train.targets[:int(val_perc * dataset_length)],
transform=test_transform)
train.data = train.data[int(val_perc * dataset_length):]
train.targets = train.targets[int(val_perc * dataset_length):]
return train, test_dataset | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def split_train_validation_and_test(num_examples, val_percentage, test_percentage):\n all_samples_idx = np.arange(num_examples)\n np.random.shuffle(all_samples_idx)\n test_examples = int(np.ceil(num_examples * test_percentage))\n val_examples = int(np.ceil(num_examples * val_percentage))\n # Train and validation indexes\n train_idx = all_samples_idx[0:len(all_samples_idx) - test_examples - val_examples]\n val_idx = all_samples_idx[len(all_samples_idx) - test_examples - val_examples:len(all_samples_idx) - test_examples]\n test_idx = all_samples_idx[len(all_samples_idx) - test_examples:]\n train_idx.sort()\n val_idx.sort()\n test_idx.sort()\n\n return [train_idx, val_idx, test_idx]",
"def train_val_split(X, Y, val_percentage):\n dataset_size = X.shape[0]\n idx = np.arange(0, dataset_size)\n np.random.shuffle(idx) \n \n train_size = int(dataset_size*(1-val_percentage))\n idx_train = idx[:train_size]\n idx_val = idx[train_size:]\n X_train, Y_train = X[idx_train], Y[idx_train]\n X_val, Y_val = X[idx_val], Y[idx_val]\n return X_train, Y_train, X_val, Y_val",
"def train_val_split(X, Y, val_percentage):\n dataset_size = X.shape[0]\n idx = np.arange(0, dataset_size)\n np.random.shuffle(idx) \n \n train_size = int(dataset_size*(1-val_percentage))\n idx_train = idx[:train_size]\n idx_val = idx[train_size:]\n X_train, Y_train = X[idx_train], Y[idx_train]\n X_val, Y_val = X[idx_val], Y[idx_val]\n return X_train, Y_train, X_val, Y_val",
"def generate_train_val_split(cls, examples, labels, pct_val):\n num_classes = len(set(labels))\n\n num_val_images = int(len(examples) * pct_val) // num_classes\n\n val_x = []\n val_y = []\n train_x = []\n train_y = []\n\n cts = {x: 0 for x in range(num_classes)}\n for img, class_idx in zip(examples, labels):\n # allow labeled\n if cts[class_idx] < num_val_images:\n val_x.append(img)\n val_y.append(class_idx)\n cts[class_idx] += 1\n else:\n train_x.append(img)\n train_y.append(class_idx)\n\n val_x = np.stack(val_x)\n train_x = np.stack(train_x)\n return val_x, val_y, train_x, train_y",
"def get_scores(self, X_val):\n \n if not self.clf_fit:\n raise RuntimeError('Call clf.fit before clf.predict.')\n \n # Create predictions from learners\n preds = list()\n for i in range(self.num_base_learners):\n pred = self.clfs[i].predict(X_val)\n preds.append(pred)\n \n # Average results\n preds = np.vstack(preds)\n preds = preds.T\n \n scores = list()\n for pred in preds:\n scores.append(float(sum(pred))/float(preds.shape[1]))\n \n return scores",
"def stratifier(self, data, labels, classifiers, cv, output_dir):\n\t\tresults_proba = collections.defaultdict(dict)\n\t\tdict_y_test = collections.defaultdict()\n\t\tsss = StratifiedShuffleSplit(n_splits=cv, test_size=0.2, random_state=3)\n\t\tsss.get_n_splits(data, labels)\n\t\ti = 1\n\t\tself.logger.info('Training processing ...')\n\t\tloop = sss.split(data, labels)\n\t\tt = tqdm(loop)\n\t\tl = collections.defaultdict(dict)\n\t\tfor train_index, test_index in t:\n\t\t\tt.set_description('Cross-validation n°')\n\t\t\tx_train, x_test = data.values[train_index], data.values[test_index]\n\t\t\ty_train, y_test = labels[train_index], labels[test_index]\n\t\t\tdict_y_test[i] = y_test\n\t\t\tresults_proba, tmp_l = \\\n\t\t\t\tself.classification(\n\t\t\t\t\ti, classifiers, results_proba, x_train, x_test, y_train, y_test)\n\t\t\t[l[d].update(tmp_l[d]) for d in tmp_l]\n\t\t\ti += 1\n\t\t[l[clf].update({'Mean': np.mean(np.asarray(list(l[clf].values())))})\n\t\t for clf in l]\n\t\tlog_cv = pd.DataFrame(l)\n\t\tlog_cv.index.names = ['Cross-validation']\n\t\tlog_cv.to_csv(output_dir + '/Cross-validation_accuracy.csv',\n\t\t index=True, sep='\\t')\n\t\tprint('Cross-validation results : \\n')\n\t\tprint(log_cv)\n\n\t\treturn results_proba, dict_y_test, classifiers",
"def cross_validation_score(self, model, x, y, cv, groups):\n losses = []\n for train_idx, test_idx in cv.split(x, y, groups):\n x_tr, x_te = x[train_idx], x[test_idx]\n y_tr, y_te = y[train_idx], y[test_idx]\n\n model.fit(x_tr, y_tr)\n if self.is_classier:\n test_preds = model.predict_proba(x_te)[:, 1]\n else:\n test_preds = model.predict(x_te)[:,]\n loss = self.loss_metric(y_true=y_te, y_pred=test_preds)\n losses.append(loss)\n return np.mean(losses)",
"def get_training_and_validation_df():\n df = get_cleaned_processed_df()\n val_df = pd.DataFrame.from_csv(VALIDATION_DATA_PATH)\n y_train = df.pop(\"label\")\n y_val = val_df.pop(\"label\")\n\n df, val_df = complete_columns(df, val_df)\n df.fillna(0, inplace=True)\n val_df.fillna(0, inplace=True)\n df = fill_text_features(df)\n val_df = fill_text_features(val_df)\n\n df = drop_text_features(df)\n val_df = drop_text_features(val_df)\n return df.values, y_train, val_df.values, y_val",
"def cv_performance(posTrainData,negTrainData, num_folds):\n length = len(negTrainData)\n splits = split_cv(length, num_folds)\n accuracy_array = []\n for split in splits:\n accuracy = 0\n train_pos = []\n train_neg = []\n test_neg = []\n test_pos = []\n for x in split.train:\n train_pos.append(posTrainData[x])\n train_neg.append(negTrainData[x])\n for x in split.test:\n test_pos.append(posTrainData[x])\n test_neg.append(negTrainData[x])\n nb = Nb(train_pos,train_neg)\n confusion=nb.confusion_matrix(test_pos,test_neg)\n accuracy = nb.accuracy(confusion)\n accuracy_array.append(accuracy)\n\n return accuracy_array",
"def cross_validation_accuracy(clf, X, labels, k):\n ###TODO\n\n cv = KFold(n=len(labels),n_folds=k)\n accuracies = []\n\n \n for train_indices, test_indices in cv:\n \n clf.fit(X[train_indices], labels[train_indices])\n predicted = clf.predict(X[test_indices])\n acc = accuracy_score(labels[test_indices], predicted)\n accuracies.append(acc)\n \n #print('accuracies = ',accuracies) \n #avg = np.mean(accuracies,dtype=np.float64)\n return(np.mean(accuracies,dtype=np.float64))",
"def crossValidate(dataset, folds):\n\tshuffle(dataset)\n\tcv_results = []\n\tprecision_recall_acc = []\n\tfoldSize = int(len(dataset)/folds)\n\tfor i in range(0,len(dataset),foldSize):\n\t\t# preparing data\n\t\tvalD = dataset[i:i+foldSize]\n\t\ttestD = dataset[:i]+dataset[i+foldSize:] #list(set(dataset)-set(dataset[i:i+foldSize]))\n\t\t# Training\n\t\tprint(\"*\"*60)\n\t\tprint(\"Training on data-set size \"+str(len(testD))+\" of batch \"+str(i/(foldSize)))\n\t\tclassi = trainClassifier(testD)\n\t\t# Prediction on validation data \n\t\tprint(\"Predicting on heldout data-set size...\"+str(len(valD))+\" of batch \"+str(i/(foldSize)))\n\t\ty_true = list(map(lambda t: t[1], valD))\n\t\ty_pred = predictLabels(valD,classi)\t\t\n\t\t# Performance Metrics\t\t\n\t\t# average based on macro as it calculate metrics for each label, and find their unweighted mean.\n\t\tprecision_recall = list(precision_recall_fscore_support(y_true, y_pred, average='macro'))\n\t\tacc = accuracy_score(y_true,y_pred)\n\t\tprecision_recall[-1] = acc\n\t\tprint(precision_recall)\n\t\tprecision_recall_acc.append(precision_recall)\n\tdf = pd.DataFrame(precision_recall_acc,columns = [\"Precision\",\"Recall\",\"F1 score\",\"Accuracy Score\"])\n\tprint(df)\n\tcv_results = df.mean().tolist()\n\treturn cv_results",
"def validate(val_loader, net, epoch):\n batch_time = meter.TimeMeter(True)\n data_time = meter.TimeMeter(True)\n prec = meter.ClassErrorMeter(topk=[1], accuracy=True)\n retrieval_map = meter.RetrievalMAPMeter()\n\n # testing mode\n net.eval()\n\n total_seen_class = [0 for _ in range(40)]\n total_right_class = [0 for _ in range(40)]\n\n for i, (views, dps, pcs, labels) in enumerate(val_loader):\n batch_time.reset()\n\n views = views.to(device=config.device)\n pcs = pcs.to(device=config.device)\n dps = views.to(device=config.device)\n labels = labels.to(device=config.device)\n\n f_pc, f_mv, f_dp, _, _, _, de_p, de_v, de_d, dis_p, dis_v, dis_d, cls_p, cls_v, cls_d, fts, preds = net(pcs, views, dps) # bz x C x H x W\n # prec.add(preds.data, labels.data)\n\n prec.add(preds.data, labels.data)\n retrieval_map.add(fts.detach() / torch.norm(fts.detach(), 2, 1, True), labels.detach())\n for j in range(views.size(0)):\n total_seen_class[labels.data[j]] += 1\n total_right_class[labels.data[j]] += (np.argmax(preds.data.cpu(), 1)[j] == labels.cpu()[j])\n\n if i % config.print_freq == 0:\n print(f'Epoch: [{epoch}][{i}/{len(val_loader)}]\\t'\n f'Batch Time {batch_time.value():.3f}\\t'\n f'Epoch Time {data_time.value():.3f}\\t'\n f'Prec@1 {prec.value(1):.3f}\\t')\n\n mAP = retrieval_map.mAP()\n print(f' instance accuracy at epoch {epoch}: {prec.value(1)} ')\n print(\n f' mean class accuracy at epoch {epoch}: {(np.mean(np.array(total_right_class) / np.array(total_seen_class, dtype=np.float)))} ')\n print(f' map at epoch {epoch}: {mAP} ')\n return prec.value(1), mAP",
"def cross_val_pred_both(model, X_train, y_train, X_test, cv=5, n_class=2, problem_type='infer'):\n if problem_type == 'infer':\n problem_type = get_problem_type(y_train)\n if problem_type == 'classification':\n pred_train = np.zeros((len(y_train), n_class))\n pred_test = np.zeros((len(X_test), n_class))\n else:\n pred_train = np.zeros(len(y_train))\n pred_test = np.zeros(len(X_test))\n \n if cv > 1:\n kfold=KFold(len(X_train), n_folds=cv)\n\n if problem_type == 'classification':\n for train_index, test_index in kfold:\n model.fit(X_train.iloc[train_index], y_train.iloc[train_index])\n pred_train[test_index] = model.predict_proba(X_train.iloc[test_index])\n pred_test = pred_test + model.predict_proba(X_test)\n else:\n for train_index, test_index in kfold:\n model.fit(X_train.iloc[train_index], y_train.iloc[train_index])\n pred_train[test_index] = model.predict(X_train.iloc[test_index])\n pred_test = pred_test + model.predict(X_test) \n\n pred_test = pred_test/float(cv)\n elif cv == 1:\n if problem_type == 'classification':\n model.fit(X_train, y_train)\n pred_train = model.predict_proba(X_train)\n pred_test = model.predict_proba(X_test)\n else:\n model.fit(X_train, y_train)\n pred_train = model.predict(X_train)\n pred_test = model.predict(X_test) \n return pred_train, pred_test",
"def fit_cv(self, train_loader, val_src, val_trg, device):\r\n\r\n val_src = torch.as_tensor(val_src).float()\r\n val_trg = torch.as_tensor(val_trg).float()\r\n\r\n val_src = val_src.to(device)\r\n val_trg = val_trg.to(device)\r\n\r\n optimizer = optim.Adam(self.parameters(), lr=self.learning_rate)\r\n criterion = torch.nn.MSELoss(reduction='mean')\r\n\r\n history = np.zeros((self.num_epochs, 2))\r\n\r\n for epoch in range(self.num_epochs):\r\n self.train()\r\n train_epoch_loss = 0\r\n for i, (src, trg) in enumerate(train_loader):\r\n src = torch.as_tensor(src).float()\r\n src = src.to(device)\r\n trg = torch.as_tensor(trg).float()\r\n trg = trg.to(device)\r\n\r\n train_output = self.forward(src) # 1x197\r\n\r\n loss = criterion(train_output, trg)\r\n\r\n loss.backward()\r\n optimizer.step()\r\n train_epoch_loss += loss.item()\r\n\r\n\r\n # on validation set\r\n self.eval()\r\n val_output = self.forward(val_src)\r\n loss = criterion(val_output, val_trg)\r\n val_epoch_loss = loss.item()\r\n history[epoch] = [train_epoch_loss/(i+1), val_epoch_loss]\r\n\r\n print('Epoch: {}/{} Train Loss: {:.4f} Validation Loss:{:.4f}'\r\n .format(epoch, self.num_epochs, train_epoch_loss/(i+1), val_epoch_loss))\r\n\r\n if train_epoch_loss/(i+1) < self.threshold:\r\n break\r\n\r\n return history[:epoch]",
"def report_cv_stats(n_fold, model, samples, labels, comment=None):\n\n # compute n-fold cross validation accuracy for model\n accuracy = cross_validation.cross_val_score(model, samples, labels, cv=n_fold)\n\n # compute mean and standard deviation\n accuracy_m = accuracy.mean()\n accuracy_s = accuracy.std()\n\n text = \"\"\n if comment:\n text = \"(\" + comment + \")\"\n\n print(\"Accuracy\" + text + \": %0.2f (+/- %0.2f)\" % (accuracy_m * 100, accuracy_s * 100 * 2))\n\n return accuracy_m, accuracy_s",
"def _compute_final_accuracies(self, meval):\n valid_accuracy = self.eval_child_model(meval, self.data_loader, 'val')\n if self.hparams.eval_test:\n test_accuracy = self.eval_child_model(meval, self.data_loader, 'test')\n else:\n test_accuracy = 0\n tf.logging.info('Test Accuracy: {}'.format(test_accuracy))\n return valid_accuracy, test_accuracy",
"def cross_validation(whole_train_data, whole_train_labels, k, k_fold):\n accuracies = []\n for i in range(k_fold):\n train_data, train_labels, validation_data, validation_labels = split_train_and_validation(whole_train_data, whole_train_labels, i, k_fold)\n accuracy = knn(train_data, train_labels, validation_data, validation_labels, k)\n accuracies.append(accuracy)\n avg_accuracy = np.mean(accuracies)\n return avg_accuracy",
"def validate(val_loader, model, epoch, cfg):\n batch_time = meter.TimeMeter(True)\n data_time = meter.TimeMeter(True)\n prec = meter.ClassErrorMeter(topk=[1], accuracy=True)\n\n # testing mode\n model.eval()\n\n for i, (shapes, labels) in enumerate(val_loader):\n batch_time.reset()\n # bz x 12 x 3 x 224 x 224\n labels = labels.long().view(-1)\n shapes = Variable(shapes)\n labels = Variable(labels)\n\n # shift data to GPU\n if cfg.cuda:\n shapes = shapes.cuda()\n labels = labels.cuda()\n\n # forward, backward optimize\n preds = model(shapes)\n\n if cfg.have_aux:\n preds, aux = preds\n\n prec.add(preds.data, labels.data)\n\n if i % cfg.print_freq == 0:\n print('Epoch: [{0}][{1}/{2}]\\t'\n 'Batch Time {batch_time:.3f}\\t'\n 'Epoch Time {data_time:.3f}\\t'\n 'Prec@1 {top1:.3f}\\t'.format(\n epoch, i, len(val_loader), batch_time=batch_time.value(),\n data_time=data_time.value(), top1=prec.value(1)))\n\n print('mean class accuracy at epoch {0}: {1} '.format(epoch, prec.value(1)))\n\n return prec.value(1)",
"def _get_val_metrics(\n self,\n ) -> Tuple[_Metrics, torch.Tensor, torch.Tensor, torch.Tensor]:\n # Turn off batch-norm updates\n self.model.eval()\n\n with torch.no_grad():\n metrics = _Metrics()\n\n for val_img, val_gt in tqdm(\n self.val_loader, desc=\"Validating\", leave=False\n ):\n val_img = val_img.to(self.device)\n val_gt = val_gt.to(self.device)\n\n with autocast(enabled=self.config.mixed_precision):\n val_pred = self.model(val_img)[0]\n metrics.class_loss += self.class_loss_fn(val_pred, val_gt)\n\n metrics.accuracy += self._get_acc(val_pred, val_gt)\n metrics.f1_score += self._get_f1(val_pred, val_gt)\n\n metrics.class_loss /= len(self.val_loader)\n metrics.accuracy /= len(self.val_loader)\n metrics.f1_score /= len(self.val_loader)\n\n return metrics, val_img, val_gt, torch.sigmoid(val_pred)",
"def percent_accuracy(self, test_set, predicted_values):\r\n\r\n correct = 0\r\n for i in range(len(test_set)):\r\n if test_set[i].classification == predicted_values[i]:\r\n correct += 1\r\n return correct / len(test_set)",
"def validate(val_loader, net, epoch, print_pr=False):\n batch_time = meter.TimeMeter(True)\n data_time = meter.TimeMeter(True)\n prec = meter.ClassErrorMeter(topk=[1], accuracy=True)\n retrieval_map = meter.RetrievalMAPMeter()\n\n # testing mode\n net.eval()\n\n total_seen_class = [0 for _ in range(40)]\n total_right_class = [0 for _ in range(40)]\n\n for i, (views, pcs, labels) in enumerate(val_loader):\n batch_time.reset()\n\n views = views.to(device=config.device)\n pcs = pcs.to(device=config.device)\n labels = labels.to(device=config.device)\n\n preds, fts = net(pcs, views, get_fea=True) # bz x C x H x W\n\n # prec.add(preds.data, labels.data)\n\n prec.add(preds.data, labels.data)\n retrieval_map.add(fts.detach()/torch.norm(fts.detach(), 2, 1, True), labels.detach())\n for j in range(views.size(0)):\n total_seen_class[labels.data[j]] += 1\n total_right_class[labels.data[j]] += (np.argmax(preds.data,1)[j] == labels.cpu()[j])\n\n\n if i % config.print_freq == 0:\n print(f'Epoch: [{epoch}][{i}/{len(val_loader)}]\\t'\n f'Batch Time {batch_time.value():.3f}\\t'\n f'Epoch Time {data_time.value():.3f}\\t'\n f'Prec@1 {prec.value(1):.3f}\\t'\n f'Mean Class accuracy {(np.mean(np.array(total_right_class)/np.array(total_seen_class,dtype=np.float))):.3f}')\n\n mAP = retrieval_map.mAP()\n print(f' instance accuracy at epoch {epoch}: {prec.value(1)} ')\n print(f' mean class accuracy at epoch {epoch}: {(np.mean(np.array(total_right_class)/np.array(total_seen_class,dtype=np.float)))} ')\n print(f' map at epoch {epoch}: {mAP} ')\n if print_pr:\n print(f'pr: {retrieval_map.pr()}')\n return prec.value(1), mAP",
"def train_with_validation_provided(self, features, labels, val_features, val_labels):\n hist = self.model.fit(\n features, labels, batch_size=self.config['training']['batch_size'],\n epochs=self.config['training']['epochs'],\n validation_data=(val_features, val_labels),\n validation_freq=self.config['training']['validation_frequency'],\n callbacks=[TensorBoard(log_dir=self.config['model']['tensorboard_dir'])])\n return hist",
"def validation_step(self):\n # NO NEED TO CHANGE THIS FUNCTION\n logits = self.model.forward(self.X_val)\n loss = cross_entropy_loss(Y_val, logits)\n\n accuracy_train = calculate_accuracy(\n X_train, Y_train, self.model)\n accuracy_val = calculate_accuracy(\n X_val, Y_val, self.model)\n return loss, accuracy_train, accuracy_val",
"def set_train_test_validation_fraction(self, train_fraction, test_fraction, validation_fraction) -> None:\n total = train_fraction + test_fraction + validation_fraction\n self.__train_fraction = float(train_fraction) / total\n self.__test_fraction = float(test_fraction) / total\n self.__validation_fraction = float(validation_fraction) / total",
"def cross_validate(pipeline, data, cv=4):\n print \"Running cross validation...\"\n (Xcv, ycv) = data\n kfold = KFold(n_splits=cv, shuffle=True, random_state=42)\n results = []\n for train_idx, val_idx in kfold.split(Xtrain):\n pipeline.fit(Xcv[train_idx], ycv[train_idx])\n results.append(accuracy_score(\n ycv[val_idx], pipeline.predict(Xcv[val_idx])\n ))\n print \"{} +/- {}\".format(np.mean(results), np.std(results))",
"def train_dev_test_split(data, train_pct=0.7):\n train_len, dev_len, test_len = create_split_bounds(len(data), train_pct)\n\n # Train (70%)\n train = data[0:train_len]\n\n # Dev (15%)\n dev_ub = (train_len + dev_len)\n dev = data[train_len:dev_ub]\n\n # Test (15%)\n test = data[dev_ub:]\n\n assert \"One of the sets contains an unexpected number of elements\", \\\n (len(train) == train_len and len(dev) == dev_len and len(test) == test_len)\n\n return train, dev, test",
"def _doValidation(self, val_dl: torch.utils.data.DataLoader):\n\n # Initialize variables for tracking loss, correct predictions, total samples, and labels\n val_loss = 0.0\n correct = 0\n total = 0\n true_labels = []\n pred_labels = []\n\n # Set the model to evaluation mode (disables gradient computation and dropout)\n self.eval()\n\n # Disable gradient tracking for efficiency\n with torch.no_grad():\n # Iterate over the validation data loader\n for x_batch, y_batch in val_dl:\n # Forward pass to obtain model predictions\n y_pred = self.forward(x_batch)\n # Compute the loss between the predictions and the ground truth\n loss = self.criterion(y_pred, y_batch)\n val_loss += loss.item()\n\n # Get the predicted labels by selecting the maximum value along the second dimension\n _, predicted = torch.max(y_pred.data, 1)\n # Update the count of total samples and correct predictions\n total += y_batch.size(0)\n correct += (predicted == y_batch).sum().item()\n\n # Extend the true and predicted labels lists\n true_labels.extend(y_batch.tolist())\n pred_labels.extend(predicted.tolist())\n\n # Compute the average validation loss\n val_loss /= len(val_dl)\n # Calculate the weighted F1 score for the true and predicted labels\n val_f1 = f1_score(true_labels, pred_labels, average='weighted') * 100\n\n # Return the validation loss, F1 score, true labels, and predicted labels\n return val_loss, val_f1, true_labels, pred_labels",
"def splitTrainValidate(df, perc_training = 0.8):\n train = df.sample(frac=perc_training)#, random_state=200)\n validate = df.drop(train.index)\n return (train, validate)",
"def svm_cv(self, nsplits: int = 5) -> (float, float, float):\r\n c_cand = [0.1, 0.5, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 20, 50, 100]\r\n cv = KFold(n_splits=nsplits)\r\n acc_result = []\r\n for c in c_cand:\r\n acc_result_c = []\r\n for train, test in cv.split(self.x):\r\n x_train = self.x[train, :]\r\n x_test = self.x[test, :]\r\n y_train = self.y[train]\r\n y_test = self.y[test]\r\n model = SVC(C=c, gamma='auto').fit(x_train, y_train)\r\n y_predict = model.predict(x_test)\r\n acc_result_c.append(binary_acc(y_test, y_predict))\r\n acc_result.append(np.mean(acc_result_c))\r\n best_c = c_cand[acc_result.index(max(acc_result))]\r\n return max(acc_result), np.std(acc_result), best_c",
"def _cross_val(self, X, y, classifier, nfolds=10):\n\n # Make sure dimensions agree\n assert X.shape[0] == y.shape[0], \"Number of observations should equal\" \\\n \"number of labels.\"\n\n # Concatenate data in order to shuffle without changing X-y correspondence\n data = np.c_[X, y]\n\n # Shuffle data (swaps rows when 2D - works OK for us)\n np.random.seed(42)\n np.random.shuffle(data)\n\n # Split data into (almost) equal folds (returns a list of arrays)\n # and we cast the list into a numpy array in order to do list indexing\n data = np.array(np.array_split(data, nfolds))\n\n # Do the k-fold cross-validation\n accs = []\n for k in range(nfolds):\n # Get current test set\n X_k_test = data[k][:, :-1]\n y_k_test = data[k][:, -1]\n\n # Get remaining indices and current training set\n remaining_idx = [i for i, v in enumerate(data) if i != k]\n X_k_train = np.vstack(data[remaining_idx])[:, :-1]\n y_k_train = np.vstack(data[remaining_idx])[:, -1]\n\n # Fit and predict with classifier\n classifier.fit(X_k_train, y_k_train)\n yhat = classifier.predict(X_k_test)\n\n # Store error rate\n accs.append(self._accuracy(y_k_test, yhat))\n\n return np.array(accs)"
] | [
"0.64801544",
"0.60825336",
"0.60825336",
"0.60382175",
"0.6003134",
"0.5998898",
"0.5993488",
"0.59562606",
"0.59560895",
"0.59555876",
"0.59392226",
"0.58753514",
"0.5857634",
"0.5845691",
"0.5838349",
"0.58149654",
"0.5813357",
"0.58046526",
"0.58020353",
"0.5790992",
"0.5790922",
"0.5783583",
"0.5779352",
"0.57604885",
"0.57227194",
"0.57022685",
"0.5702145",
"0.5696298",
"0.5678812",
"0.56635547"
] | 0.7022956 | 0 |
DNS query to get TXT record list of google networks | def google_rr_dns_query(record: str) -> Optional[str]:
try:
res = resolver.resolve(record, 'TXT')
return str(res.rrset[0].strings[0], 'utf-8')
except (resolver.NoAnswer, resolver.NXDOMAIN) as error:
raise NetworkError(f'Error querying TXT record for {record}: {error}') from error | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def getdns(self):\r\n filename = r\"dns_profiles.txt\"\r\n fp = open(filename)\r\n data = []\r\n for lines in fp.readlines():\r\n data.append(list(map(float, lines.split())))\r\n #use the fundamental string function 'append','split' to extract floating point number\r\n fp.close()\r\n dns_data = np.array(data) #transfer list to array\r\n self.dns_z = dns_data[:, 0] / 1000 #z-plus -> z/h\r\n self.dns_u = dns_data[:, 1] # u-plus\r\n self.dns_uw = dns_data[:, 2]\r\n self.dns_uu = dns_data[:, 3]\r\n self.dns_ww = dns_data[:, 4]\r\n self.dns_vv = dns_data[:, 5]\r\n self.dns_tau = dns_data[:, 7]\r\n self.dns_tot = dns_data[:, 8]",
"def gethostbyname(self, hostname, dnsserv='192.112.36.4'):\n ipaddrlist = []\n cnames = []\n temp = []\n if(self.caching):\n rcache = RecordCache(self.ttl)\n rcord = rcache.lookup(hostname, Type.ANY, Class.IN)\n if(rcord):\n for rec in rcord:\n if rec.type_ == Type.A:\n arec = rec.rdata\n ipaddrlist.append(arec.address)\n elif rec.type_ == Type.CNAME:\n crec = rec.rdata\n cnames.append(crec.cname)\n if ipaddrlist:\n return hostname, cnames, ipaddrlist\n elif cnames:\n return self.gethostbyname(cnames[0], dnsserv)\n \n\n sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n sock.settimeout(self.timeout)\n\n # Create and send query\n question = Question(Name(str(hostname)), Type.A, Class.IN)\n header = Header(9001, 0, 1, 0, 0, 0)\n header.qr = 0\n header.opcode = 0\n header.rd = 1\n query = Message(header, [question])\n sock.sendto(query.to_bytes(), (str(dnsserv), 53))\n\n # Receive response\n data = sock.recv(2048)\n response = Message.from_bytes(data)\n print(\"Number of answers: \" +str(len(response.answers)))\n print(\"Number of authorities: \" + str(len(response.authorities)))\n print(\"Number of additionals: \" + str(len(response.additionals)))\n\n # Get data\n aliaslist = cnames\n ipaddrlist = []\n dnslist = []\n \n while response.answers:\n for answer in response.answers:\n if answer.type_ == Type.A:\n print(\"found A RR\")\n if(self.caching):\n rcache.add_record(answer)\n ipaddrlist.append(answer.rdata.address)\n if answer.type_ == Type.CNAME:\n aliaslist.append(answer.rdata.cname)\n if answer.type_ == Type.NS:\n dnslist.append(answer.rdata.nsdname)\n if ipaddrlist:\n return hostname, aliaslist, ipaddrlist\n elif aliaslist:\n question = Question(Name(aliaslist[0]), Type.A, Class.IN)\n query = Message(header, [question])\n sock.sendto(query.to_bytes(), (dnsserv, 53))\n data = sock.recv(2048)\n response = Message.from_bytes(data)\n elif dnslist:\n nsname = dnslist.pop()\n maybe_dnsserv = self.getnsaddr(nsname, response.additionals)\n if maybe_dnsserv:\n dnsserv = maybe_dnsserv\n else:\n pass\n sock.sendto(query.to_bytes(), (dnsserv, 53))\n data = sock.recv(2048)\n response = Message.from_bytes(data)\n else:\n break\n\n if response.authorities:\n for authority in response.authorities:\n if authority.type_ != Type.NS:\n pass\n dnslist.append(authority.rdata.nsdname)\n while dnslist:\n nsname = dnslist.pop()\n maybe_next_dnsserv = self.getnsaddr(nsname, response.additionals)\n if maybe_next_dnsserv:\n next_dns_serv = maybe_next_dnsserv\n else:\n pass\n (hname, aliasl, ipaddrl) = self.gethostbyname(hostname, nsname)\n if ipaddrl:\n return hname, aliasl, ipaddrl",
"def cli(ctx, domain, ip_address, hostname):\n zone = getzone(domain)\n #print('.%s:%s:%s' % (domain, ip_address, hostname))\n for r in zone:\n if r['type'] == 'CNAME':\n print('C%s:%s' %( r['name'], r['content']))\n elif r['type'] == 'TXT':\n print('\\'%s:%s' %( r['name'], r['content']))\n elif r['type'] == 'MX':\n pass\n elif r['type'] == 'A':\n print('=%s:%s' %( r['name'], r['content']))\n else:\n exit('unknown DNS record type: %s' % r['type'])",
"def test_getdnsrecords(self, kasserver):\n assert kasserver.get_dns_records(\"example.com\") == self.RESPONSE_PARSED",
"def print_all_dns_records():\n for domain in sorted(get_domains()):\n dns_records = get_domain_dns_records(domain)\n print(domain)\n pprint(dns_records)\n print(\"*\" * 50)\n # TODO: poor man's rate limiter. improve?\n time.sleep(2)",
"def get_dns(self):\n return self.mycam.devicemgmt.GetDNS()",
"def google_dns_resolver(target):\n url = f\"https://dns.google/resolve?name={target}&type=A\"\n \n r = requests.get(url=url)\n if r.status_code != 200:\n return None\n else:\n for result in json.loads(r.text)[\"Answer\"]:\n try:\n if not (\n ipaddress.IPv4Address(result[\"data\"]).is_private\n or ipaddress.IPv4Address(result[\"data\"]).is_loopback\n or ipaddress.IPv4Address(result[\"data\"]).is_link_local\n ):\n return result[\"data\"]\n else:\n continue\n except ipaddress.AddressValueError:\n continue\n # if the loop terminates without any result return None\n return None",
"def google_dns_resolver(target):\n url = f\"https://dns.google/resolve?name={target}&type=A\"\n \n r = requests.get(url=url)\n if r.status_code != 200:\n return None\n else:\n for result in json.loads(r.text)[\"Answer\"]:\n try:\n if not (\n ipaddress.IPv4Address(result[\"data\"]).is_private\n or ipaddress.IPv4Address(result[\"data\"]).is_loopback\n or ipaddress.IPv4Address(result[\"data\"]).is_link_local\n ):\n return result[\"data\"]\n else:\n continue\n except ipaddress.AddressValueError:\n continue\n # if the loop terminates without any result return None\n return None",
"def get_dns_list(self):\n return self.get_ipv4_dns_list()",
"def get_name_servers(self, \n ipv4_gateway_mac: str = '01:23:45:67:89:0a',\n ipv6_gateway_mac: str = '01:23:45:67:89:0b',\n domain: str = 'google.com') -> List[Dict[str, str]]:\n\n # region Clear results list\n ns_servers: List[Dict[str, str]] = list()\n self.results.clear()\n # endregion\n\n # region Start sniffer\n if not self.quiet:\n self.base.print_info('Get NS records of domain: ' + domain + ' ...')\n self._sniff_start(self.your_mac_address, self.your_ipv4_address, self.your_ipv6_address, 53)\n # endregion\n\n # region Send DNS queries\n raw_socket: socket = socket(AF_PACKET, SOCK_RAW)\n raw_socket.bind((self.network_interface, 0))\n\n name_servers_addresses = self.base.get_system_name_servers()\n for name_server_address in name_servers_addresses:\n if self.base.ip_address_validation(name_server_address):\n if self.base.ip_address_in_network(name_server_address, self.your_ipv4_network):\n name_server_mac: str = self.arp_scan.get_mac_address(self.network_interface, name_server_address)\n else:\n name_server_mac: str = ipv4_gateway_mac\n dns_query = self.dns.make_ns_query(ethernet_src_mac=self.your_mac_address,\n ethernet_dst_mac=name_server_mac,\n ip_src=self.your_ipv4_address,\n ip_dst=name_server_address,\n udp_src_port=randint(2049, 65535),\n udp_dst_port=53,\n transaction_id=randint(1, 65535),\n name=domain)\n raw_socket.send(dns_query)\n # endregion\n\n # region Resolve NS servers\n sleep(5)\n self._sniff_stop()\n\n ns_servers_names: List[str] = list()\n ns_servers_addresses: List[str] = list()\n\n for ns_server in self.results:\n ns_servers_names.append(ns_server['NS'])\n\n for ns_server_name in ns_servers_names:\n try:\n ns_server_addresses = gethostbyname_ex(ns_server_name)\n if len(ns_server_addresses) > 0:\n for ns_server_address in ns_server_addresses[2]:\n if ns_server_address not in ns_servers_addresses:\n ns_servers_addresses.append(ns_server_address)\n except herror:\n pass\n\n for ns_server_address in ns_servers_addresses:\n if self.base.ip_address_validation(ns_server_address):\n ns_servers.append({'IPv4 address': ns_server_address,\n 'MAC address': ipv4_gateway_mac})\n if self.base.ipv6_address_validation(ns_server_address):\n ns_servers.append({'IPv6 address': ns_server_address,\n 'MAC address': ipv6_gateway_mac})\n\n return ns_servers\n # endregion",
"def get_botnet_domains():\n\n fw = \"<HTTPS://YOUR_FORTIGATE_IP:YOUR_FORTIGATE_PORT>\"\n\n path = \"/api/v2/monitor/system/botnet-domains/hits/?access_token=\"\n\n token = \"<YOUR_API_KEY>\"\n\n content_filter = \"\"\n\n if content_filter != \"\":\n url = fw + path + token + content_filter\n else:\n url = fw + path + token\n\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n \n try:\n r = requests.get(url, verify=False).json()\n except Exception:\n print(\"Something went wrong. Is the url correct? Exiting...\")\n sys.exit()\n\n for key in r['results']:\n print()\n for k,v in key.items():\n print(\"{0:6} : {1}\".format(k.upper(), str(v)))",
"def get_dns_records_from_godaddy(self) -> list:\n\n headers = {\"Authorization\": \"sso-key {}:{}\".format(self.api_key, self.secret_key)}\n dns_records = []\n for dns_record in self.dns_records:\n url = \"https://api.godaddy.com/v1/domains/{}/records/{}/{}\".format(dns_record[\"domain\"],\n dns_record[\"dns_record_type\"],\n dns_record[\"name\"])\n dns_records.append(get(url, headers=headers).text)\n return dns_records",
"def collect_results(name: str) -> dict:\n full_response = {}\n\n target_name = dns.name.from_text(name)\n\n # lookup CNAME\n response = lookup(target_name, dns.rdatatype.CNAME)\n cnames = []\n if response is not None:\n for answers in response.answer:\n for answer in answers:\n cnames.append({\"name\": answer, \"alias\": name})\n\n # lookup A\n response = lookup(target_name, dns.rdatatype.A)\n arecords = []\n\n if response is not None:\n for answers in response.answer:\n a_name = answers.name\n for answer in answers:\n if answer.rdtype == 1: # A record\n arecords.append({\"name\": a_name, \"address\": str(answer)})\n\n # lookup AAAA\n response = lookup(target_name, dns.rdatatype.AAAA)\n aaaarecords = []\n\n if response is not None:\n for answers in response.answer:\n aaaa_name = answers.name\n for answer in answers:\n if answer.rdtype == 28: # AAAA record\n aaaarecords.append({\"name\": aaaa_name, \"address\": str(answer)})\n\n # lookup MX\n response = lookup(target_name, dns.rdatatype.MX)\n mxrecords = []\n if response is not None:\n for answers in response.answer:\n mx_name = answers.name\n for answer in answers:\n if answer.rdtype == 15: # MX record\n mxrecords.append({\"name\": mx_name,\n \"preference\": answer.preference,\n \"exchange\": str(answer.exchange)})\n\n full_response[\"CNAME\"] = cnames\n full_response[\"A\"] = arecords\n full_response[\"AAAA\"] = aaaarecords\n full_response[\"MX\"] = mxrecords\n\n return full_response",
"def dns_retentions(self):\n url_path = 'dns/retentions'\n self.logger.debug(\"Get possible retentions for '/dns' per each granularity\")\n return self._common_get(url_path)",
"def collect_results(name: str) -> dict:\n full_response = {}\n target_name = dns.name.from_text(name)\n # lookup CNAME\n response = lookup(target_name, dns.rdatatype.CNAME)\n cnames = []\n for answers in response.answer:\n for answer in answers:\n cnames.append({\"name\": answer, \"alias\": name})\n # lookup A\n response = lookup(target_name, dns.rdatatype.A)\n arecords = []\n for answers in response.answer:\n a_name = answers.name\n for answer in answers:\n if answer.rdtype == 1: # A record\n arecords.append({\"name\": a_name, \"address\": str(answer)})\n # lookup AAAA\n response = lookup(target_name, dns.rdatatype.AAAA)\n aaaarecords = []\n for answers in response.answer:\n aaaa_name = answers.name\n for answer in answers:\n if answer.rdtype == 28: # AAAA record\n aaaarecords.append({\"name\": aaaa_name, \"address\": str(answer)})\n # lookup MX\n response = lookup(target_name, dns.rdatatype.MX)\n mxrecords = []\n for answers in response.answer:\n mx_name = answers.name\n for answer in answers:\n if answer.rdtype == 15: # MX record\n mxrecords.append({\"name\": mx_name,\n \"preference\": answer.preference,\n \"exchange\": str(answer.exchange)})\n\n full_response[\"CNAME\"] = cnames\n full_response[\"A\"] = arecords\n full_response[\"AAAA\"] = aaaarecords\n full_response[\"MX\"] = mxrecords\n\n return full_response",
"def reverse_dns_sna(ipaddress):\n\n r = requests.get(\"http://api.statdns.com/x/%s\" % ipaddress)\n\n if r.status_code == 200:\n names = []\n\n for item in r.json()['answer']:\n name = str(item['rdata']).strip(\".\")\n names.append(name)\n\n return names\n elif r.json()['code'] == 503:\n # NXDOMAIN - no PTR record\n return None",
"def test_getdnsrecord(self, kasserver):\n assert (\n kasserver.get_dns_record(\"www.example.com\", \"A\") == self.RESPONSE_PARSED[0]\n )",
"def query_dns_records(event, context):\n ids = ['SOA', 'TXT', 'MX', 'NS', 'DNSKEY']\n dn = event['queryStringParameters'][query_parameter].lower()\n body = {'scanDate': (datetime.datetime.now(datetime.timezone.utc) +\n datetime.timedelta(hours=8)).isoformat().upper()[:26],\n 'scanRecordTypes': ids,\n 'domain': dn,\n 'records': {}}\n\n try:\n try:\n for record_type in ids:\n try:\n answers = dns.resolver.query(dn, record_type)\n records = []\n for data in answers:\n records.append(data.to_text())\n body['records'][record_type] = records\n except (dns.resolver.NoAnswer, dns.resolver.NoNameservers, dns.exception.Timeout):\n pass # might fail per record_type, perfectly fine\n\n # insert into DynamoDB\n dynamodb = boto3.resource('dynamodb')\n table = dynamodb.Table(table_name)\n table.put_item(Item=body)\n status_code = 200\n result = json.dumps(body)\n\n except dns.resolver.NXDOMAIN:\n status_code = 404 # domain no longer exists, or domain not found :)\n result = ''\n\n except KeyError: # insufficient queryStringParameters\n status_code = 400\n result = ''\n\n return {'statusCode': status_code,\n 'headers': headers,\n 'body': result}",
"def dns(self, **kwargs):\n self.logger.debug(f\"Get RealTime DNS data\")\n url_path = 'dns'\n body = self._make_body(kwargs)\n return self._common_post(request_path=url_path, body=body)",
"def parse_digtxt(querystr,resultset):\n response = pydig.query(querystr, 'txt')\n for elem in response[0].split():\n if 'include:' in elem:\n resultset = parse_digtxt(elem[8:], resultset)\n else:\n if 'ip4' in elem:\n if elem[4:] not in resultset:\n resultset[elem[4:]] = \"GCP\"\n if 'ip6' in elem:\n if elem[4:] not in resultset:\n resultset[elem[4:]] = \"GCP\"\n return resultset",
"def process_google_rr_ranges(record: str, loader_class):\n networks = []\n includes = []\n\n for field in google_rr_dns_query(record).split(' '):\n match = RE_IPV4.match(field)\n if match:\n networks.append(loader_class(match.groupdict()['prefix']))\n continue\n\n match = RE_IPV6.match(field)\n if match:\n networks.append(loader_class(match.groupdict()['prefix']))\n continue\n\n match = RE_INCLUDE.match(field)\n if match:\n include = match.groupdict()['rr']\n networks.extend(\n process_google_rr_ranges(include, loader_class)\n )\n includes.append(include)\n continue\n\n return networks",
"def get_dynamic_dns(self):\n return self.mycam.devicemgmt.GetDynamicDNS()",
"def dns(self):\n\n return value_list_to_comma('DNS', self._peer.dns)",
"def _read_dns_(dns, cnt):\r\n \r\n dn_names = None\r\n dn_ids = None\r\n dn_iaps = [None]*10\r\n \r\n for dn in dns.DN:\r\n if dn.ref == 'Name':\r\n dn_names = dn.value\r\n if dn.ref == 'DNId':\r\n dn_ids = dn.value\r\n if dn.ref == 'IAP':\r\n dn_iaps[0] = dn.value\r\n if dn.ref == 'IAP2':\r\n dn_iaps[1] = dn.value\r\n if dn.ref == 'IAP3':\r\n dn_iaps[2] = dn.value\r\n if dn.ref == 'IAP4':\r\n dn_iaps[3] = dn.value\r\n if dn.ref == 'IAP5':\r\n dn_iaps[4] = dn.value\r\n if dn.ref == 'IAP6':\r\n dn_iaps[5] = dn.value\r\n if dn.ref == 'IAP7':\r\n dn_iaps[6] = dn.value\r\n if dn.ref == 'IAP8':\r\n dn_iaps[7] = dn.value\r\n if dn.ref == 'IAP9':\r\n dn_iaps[8] = dn.value\r\n if dn.ref == 'IAP10':\r\n dn_iaps[9] = dn.value\r\n \r\n logger.info('Parsed DN names: %s' % dn_names)\r\n logger.info('Parsed DN ids: %s' % dn_ids)\r\n logger.info('Parsed DN iaps: %s' % dn_iaps)\r\n \r\n for i in range(len(dn_names)):\r\n mydn = Dn()\r\n mydn.set_id(dn_ids[i])\r\n mydn.set_name(dn_names[i])\r\n myiaps = [None]*10\r\n for j in range(10):\r\n myiaps[j] = dn_iaps[j][i]\r\n mydn.set_iaps(myiaps)\r\n cnt.add_dn(mydn)\r\n return cnt",
"def create_dns_dictionary(self, path_tracefile):\n responses = self.get_dns_responses(path_tracefile)\n dns_dict = dict()\n for response in responses:\n for x in range(response[DNS].ancount): # answer count, how many IP adresses are returned for the query\n try: # answer count could also include 'DNS SRV Resource Record' which does not have a 'rrname' attribute so ancount is wrong if there is such a record -> TODO get amount of DNSRR instead of using ancount\n domain = getattr(response[DNSRR][x], 'rrname').decode(\"utf-8\") # domain (this is returned in bytes so decode)\n ip = getattr(response[DNSRR][x], 'rdata') # IP adres of the domain, TODO make this work for multiple ip adresses for one domain (Test with [0] at end)\n dns_dict[ip] = domain[:-1] #remove last char '.' \n except:\n continue\n return dns_dict",
"def _lv_pydns_lookup(name):\n if not DNS.defaults[\"server\"]:\n DNS.DiscoverNameServers()\n req = DNS.Request(name=name, qtype=\"srv\", protocol=\"udp\")\n for retries_left in [3, 2, 1, 0]:\n try:\n response = req.req()\n if response and response.header[\"tc\"]:\n # truncated, rerun with tcp\n req = DNS.Request(name=name, qtype=\"srv\", protocol=\"tcp\")\n continue\n break\n except DNS.Base.DNSError:\n if not retries_left:\n raise\n time.sleep(1) # retry after sleeping a second\n if not response or not response.answers:\n return []\n result = []\n for a in response.answers:\n if a[\"typename\"].lower() != \"srv\":\n continue\n if isinstance(a[\"data\"], list):\n result.extend(a[\"data\"])\n else:\n result.append(a[\"data\"])\n return result",
"def _lv_dns_lookup(name):\n if dns is None:\n return _lv_pydns_lookup(name)\n resp = dns.resolver.query(name, \"srv\")\n if resp.response.flags & dns.flags.TC:\n resp = dns.resolver.query(name, \"srv\", tcp=True)\n return [(a.priority, a.weight, a.port, a.target.to_text(True)) for a in resp]",
"def recursive_dns_lookup(target_name, qtype, root_servers_list):\n\n # Base case\n if not root_servers_list:\n return None\n\n # Create dns query based on the target_name (website)\n # and qtype (queue type: CNAME, A, AAAA, or MX)\n dns_query = dns.message.make_query(target_name, qtype)\n\n for server in root_servers_list:\n # Doing a try catch to check if the dns server times out,\n # if it does then we continue and try another server\n try:\n query_response = dns.query.udp(dns_query, server, 3)\n except dns.exception.Timeout:\n continue\n # If there's an answer in the response\n if query_response.answer:\n # Search through the response.answer for possible answers\n for response_answers in query_response.answer:\n #print(\"response_answers: \", response_answers)\n for response_answer in response_answers:\n #print(\"Response_answer\", response_answer)\n target_name = str(response_answer)[:-1] # Removes the period at the end\n #print(\"Target_name\", target_name)\n # If we don't get the reponse we're after then\n # continue searching through the root_servers\n if response_answer.rdtype != qtype:\n if response_answer.rdtype == 5:\n return recursive_dns_lookup(target_name, qtype, ROOT_SERVERS)\n else:\n # Return the answer we wanted\n return query_response\n else: # If there isn't an answer in the response then we check additional\n\n # If we do have something in additional then get the stuff inside\n if query_response.additional:\n ip_addresses = []\n for response_additional in query_response.additional:\n #print(\"response_additional: \", response_additional)\n # Convert to string then send to function for parsing the address out\n response_additional_str = str(response_additional)\n\n #print(\"function get_address resp:\", resp)\n resp_elements = response_additional_str.split()\n #print(\"function get_address resp_elements:\", resp_elements)\n ip_address = []\n for resp_element in resp_elements:\n #print(\"function get_address resp_element:\", resp_element)\n if resp_element != 'A':\n continue\n else:\n #print(\"function get_address resp_element = A:\", resp_element)\n #print(\"function get_address address:\", resp_elements[-1])\n ip_address.append(resp_elements[-1])\n ip_addresses += ip_address\n\n return recursive_dns_lookup(target_name, qtype, ip_addresses)",
"def test_dns(self):\n rv = extract_ids(X509_DNS_ONLY)\n assert [\n DNSPattern(b\"www.twistedmatrix.com\"),\n DNSPattern(b\"twistedmatrix.com\")\n ] == rv",
"def getNodeDNS(self,node):\n data = self.connect('get','nodes/%s/dns' % (node),None)\n return data"
] | [
"0.6515697",
"0.6348239",
"0.6325451",
"0.6286375",
"0.62465525",
"0.6237042",
"0.6229455",
"0.6229455",
"0.6196531",
"0.6195094",
"0.6082494",
"0.60716206",
"0.60715824",
"0.6052533",
"0.60521525",
"0.60312563",
"0.60121626",
"0.5998611",
"0.5934739",
"0.59268296",
"0.5904943",
"0.58739",
"0.58312863",
"0.5818731",
"0.5806879",
"0.5799662",
"0.57967144",
"0.57844275",
"0.5781621",
"0.5740916"
] | 0.6482904 | 1 |
Fill the missing values(NaN) in column with the mean value of the group the row belongs to. The rows are grouped based on the values of another column | def fill_with_group_average(df, group, column):
#df=None
df[column].fillna(df.groupby(group)[column].transform('mean'), inplace=True)
return df | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def fill_mean(df):\n df = df.fillna(df.mean().fillna(0).to_dict())\n return df",
"def mean_impute(self, column_val):\n mean = np.mean(column_val)\n column_val = column_val.fillna(mean)\n return column_val",
"def filling_nan_values(df: pd.DataFrame) -> pd.DataFrame: \n ratio = df.count()/len(df) \n cols = ratio[ratio < 1].index\n for col in cols: \n print(f\"Filling Column:{col}\")\n df[col] = df[col].fillna(df[col].mean())\n return df",
"def mean_replace_nan(dataframe, median=False):\n tmp = dataframe\n\n if median:\n tmp_med = tmp[median]\n tmp_med = tmp_med.fillna(tmp_med.median())\n \n tmp = tmp.fillna(tmp.mean())\n\n if median:\n tmp[tmp_med.columns] = tmp_med\n\n return tmp",
"def set_nan_rows_to_normalized_mean(X):\n\n C_norm = np.linalg.norm(X, axis=1)\n\n C_real = np.mean(X[~np.isnan(C_norm), :], axis=0)\n C_real = C_real / np.linalg.norm(C_real, axis=0)\n\n # Set the NaN rows to the mean.\n X[np.isnan(C_norm), :] = np.tile(C_real, (sum(np.isnan(C_norm)), 1))\n\n return X",
"def mean_nan(A):\n dat = np.ma.masked_array(A, np.isnan(A))\n mean = np.mean(dat, axis=0)\n return mean.filled(np.nan)",
"def replace_nan(data):\r\n lst_ind = np.array(['valence_intensity', 'anger_intensity',\r\n 'fear_intensity', 'sadness_intensity', 'joy_intensity'])\r\n for i in lst_ind:\r\n native = data[:][i]\r\n avg = np.nanmean(native)\r\n data[:][i] = np.where(np.isnan(native), avg, native)\r\n return data",
"def replace_missingvalues_bandmean(X):\n if X.ndim != 4:\n raise ValueError('Input not valid, no [pic, row, column, band] data format')\n\n zeros = np.where(X[:,:,:] == 0)\n\n bandmean = {}\n\n for i in sorted(np.unique(zeros[3])):\n bandmean.update({i:np.mean(X[:,:,:,i])})\n\n for i in range(0,len(zeros[0])):\n pic, row, column, band = zeros[0][i],zeros[1][i],zeros[2][i],zeros[3][i]\n mean = bandmean.get(band)\n X[pic,row,column,band] = int(mean)\n\n return X",
"def __fillnan(df):\n\t\tcol_names = ['budget', 'popularity', 'runtime', 'vote_average', 'vote_count']\n\t\tfor col_name in col_names:\n\t\t\tdf[col_name] = df[col_name].fillna(df[col_name].median())\n\t\treturn df",
"def scipy_nanmean(x, axis=0):\n x, axis = _chk_asarray(x,axis)\n x = x.copy()\n Norig = x.shape[axis]\n factor = 1.0-np.sum(np.isnan(x),axis)*1.0/Norig\n\n x[np.isnan(x)] = 0\n return np.mean(x,axis)/factor",
"def fill_nan(A):\n\tinds = np.arange(A.shape[0])\n\tgood = np.where(np.isfinite(A))\n\tA[np.isnan(A)] = np.interp(inds[np.isnan(A)], inds[good], A[good])\n\treturn A",
"def compute_column_means_with_incomplete_data(df):\n X = np.array(df)\n return np.nanmean(X, axis = 0)",
"def compute_column_means_with_incomplete_data(df):\n X = np.array(df)\n return np.nanmean(X, axis = 0)",
"def fill_lower_part(data, index, aver_num=3):\n if type(data) is LidarDataset:\n d = data['data']\n else:\n d = data\n to_fill = np.ma.masked_invalid(d[..., index:index+aver_num]).mean(axis=-1)[..., np.newaxis]\n d[..., :index] = to_fill",
"def fill_nan(array):\n idx = np.arange(array.shape[0])\n good = np.where(np.isfinite(array))\n interp = interpolate.interp1d(idx[good], array[good], bounds_error=False)\n return np.where(np.isfinite(array), array, interp(idx))",
"def fill_weather_nans(column, df, agg_func='median'):\n # aggregate data to obtain median value for a particular site, month, and day\n agg_weather_df = pd.DataFrame(df.groupby(['site_id', 'month', 'day'])[column].agg(agg_func))\n\n # check for missing values in the aggregated data\n if agg_weather_df[column].isnull().any():\n # fill NaNs using interpolation\n agg_df = agg_weather_df[column].interpolate(limit_direction='both',\n inplace=True)\n agg_weather_df.update(agg_df, overwrite=False)\n\n # set index before updating input DataFrame\n df.set_index(['site_id', 'month', 'day'], inplace=True)\n df.update(agg_weather_df, overwrite=False)\n\n # reset index\n df.reset_index(inplace=True)",
"def fill_nan(x):\n (n_rows, wdw) = x.shape\n new_x = np.zeros((n_rows,wdw)); new_x[:] = np.nan\n for i in range(n_rows):\n indMissing = np.where(np.isnan(x[i,:]))[0]\n l = len(x[i,indMissing]) #number of MVs\n if l < 4*wdw/5: #20% available values otherwise discarded\n new_x[i,:] = x[i,:]\n if l > 0 and indMissing[0] == 0: #missing value at index 0 \n c = 0\n while c + 1 < len(indMissing) and indMissing[c+1] == indMissing[c] + 1:\n c += 1\n new_x[i,:c+1] = x[i,c+1] #first nans replaced by first non nan value\n indMissing = np.where(np.isnan(new_x[i,:]))[0]\n l = len(new_x[i,indMissing])\n if l > 0 and indMissing[0] > 0:\n new_x[i,:] = interpolate1d(new_x[i,:]) #interpolate intermediate nans\n ind = np.where(~np.isnan(new_x).all(axis=1))[0]\n new_x = new_x[ind] #remove NaNs \n \n return new_x, ind",
"def nanmean(array_data, axis=0):\n\n mdat = np.ma.masked_array(array_data, np.isnan(array_data));\n retval = np.mean(mdat, axis=axis);\n \n return retval;",
"def fill_nan_in_numeric(df):\n print(\" --- Filling NaN in Numerics.\")\n thresh = get_min_filled_threshold(df)\n columns = df.columns\n numerical = [x for x in columns if x.startswith('n_')]\n # fill NaN with mean or median, based on std dev\n for col in numerical:\n filled = get_non_missing_count(df[col])\n if filled < thresh:\n df[col] = df[col].fillna(-1)\n else:\n std = df[col].std()\n if std < 1:\n mean = df[col].mean()\n df[col] = df[col].fillna(mean)\n else:\n median = df[col].median()\n df[col] = df[col].fillna(mean)\n\n print(\" --- Finished filling NaN in Numerics.\")\n return df",
"def nanmean(self, axis=0, **kwargs) -> \"Dataset\":\n return self.aggregate(axis=axis, func=np.nanmean, **kwargs)",
"def fill_median(df):\n df = df.fillna(df.median().fillna(0).to_dict())\n return df",
"def fill_missing(self):\n df = self.df\n # Filling with default values\n logger.debug(\"Filling from distributions...\")\n for field in HeatStrokeDataFiller.default_map or field in HeatStrokeDataFiller.positive_default:\n if field not in df.columns:\n logger.warning(\"(%s) missing from data-frame columns\" % field)\n continue\n logger.debug(\"Setting missing in \\\"%s\\\" to default: %s\" % (field, HeatStrokeDataFiller.default_map[field]))\n default_value = HeatStrokeDataFiller.default_map[field]\n\n where = HeatStrokeDataFiller.find_where_missing(df, field, find_nan=True, find_str=False)\n how_many_to_fill = np.sum(where)\n if field in HeatStrokeDataFiller.positive_default:\n # Use default positive dietributions\n distribution = HeatStrokeDataFiller.positive_default[field]\n df[field].loc[where] = distribution(how_many_to_fill)\n else:\n logger.debug(\"Using default %s for field: %s\" % (default_value, field))\n # Use default values\n df[field].loc[where] = np.array([default_value] * how_many_to_fill)\n\n # Filling with Zeros\n logger.debug(\"Fillling with zeros...\")\n for field in HeatStrokeDataFiller.fields_to_fill_with_zero:\n if field not in df.columns:\n logger.warning(\"\\\"%s\\\" missing from columns\" % field)\n continue\n logger.debug(\"Setting missing in \\\"%s\\\" to 0\" % field)\n\n where = HeatStrokeDataFiller.find_where_missing(df, field, find_nan=True, find_str=True)\n how_many_to_fill = np.sum(where)\n df[field].loc[where] = np.zeros(how_many_to_fill)\n\n # Filling in columns with the average from the rest of the column\n logger.debug(\"Filling with agerages...\")\n for field in HeatStrokeDataFiller.fields_to_fill_with_average:\n if field not in df.columns:\n logger.warning(\"\\\"%s\\\" missing from data-frame columns\" % field)\n continue\n\n where = HeatStrokeDataFiller.find_where_missing(df, field, find_nan=True, find_str=True)\n data = df[field][np.invert(where)]\n mean = np.mean(data)\n std = np.std(data)\n if mean == np.nan or std == np.nan:\n mean, std = (0, 0)\n logger.debug(\"Setting missing in \\\"%s\\\" with: %.3f +/- %.3f\" % (field, mean, std))\n how_many_to_fill = np.sum(where)\n df[field].loc[where] = mean + std * np.random.random(how_many_to_fill)\n\n fields_not_modified = set(df.columns) - set(HeatStrokeDataFiller.default_map.keys()) - HeatStrokeDataFiller.fields_to_fill_with_zero - HeatStrokeDataFiller.fields_to_fill_with_zero\n logger.debug(\"Fields not modified: %s\" % fields_not_modified.__str__())\n return df",
"def fill_missing(df,strategy='mean', missingValue=np.nan):\n c_name = [n for n,d in df.dtypes if d != 'string' and d != 'boolean']\n imp = Imputer(inputCols=c_name,outputCols=c_name,strategy=strategy, missingValue=missingValue).fit(df)\n return imp,imp.transform(df)",
"def nanmean(x: torch.FloatTensor):\n nan_mask = torch.isnan(x)\n denominator = (~nan_mask).sum()\n if denominator.eq(0):\n return torch.full((1, ), fill_value=float('nan'), device=x.device)\n else:\n numerator = x[~nan_mask].sum()\n return torch.true_divide(numerator, denominator)",
"def mean_of_group(gb):\n if type(gb.get_group(1)) is pd.DataFrame:\n d = {}\n for name, df in gb:\n mean = np.nanmean(df.values)\n d.update({name: mean})\n s = pd.Series(d)\n return s\n \n else:\n items= gb.get_group(1).items\n d = {key: {} for key in items}\n for name, p in gb:\n for i in items:\n mean = np.nanmean(p[i].values)\n d[i].update({name: mean})\n df = pd.DataFrame(d)\n return df",
"def fill_data(column, data):\n data[column].fillna(data[column].value_counts().index[0], inplace=True)",
"def missing_values(df):\n\n # penalise missing review scores\n df[\"prop_review_score\"].fillna(-1, inplace=True)\n df[\"prop_location_score1\"].fillna(-1, inplace=True)\n df[\"prop_location_score2\"].fillna(-1, inplace=True)\n df[\"visitor_hist_starrating\"].fillna(-1, inplace=True)\n df[\"visitor_hist_adr_usd\"].fillna(-1, inplace=True)\n\n # replace price by mean of hotels with same starrating\n mean_price_starrating = df.groupby(\"prop_starrating\")[\"prop_log_historical_price\"].transform(\"mean\")\n df[\"prop_log_historical_price\"].fillna(mean_price_starrating, inplace=True)\n\n # fill by worst possible value in dataset\n aff_min = df[\"srch_query_affinity_score\"].min()\n df[\"srch_query_affinity_score\"].fillna(aff_min, inplace=True)\n\n # TODO: is dit worst???? hoezo is verder weg slechter?\n orig_max = df[\"orig_destination_distance\"].max()\n df[\"orig_destination_distance\"].fillna(orig_max, inplace=True)\n\n # remaining mv's are replaced by mean of column\n # df = df.fillna(df.mean())\n print(\"er zijn nog zoveel nans: \", df.isnull().sum().sum())\n\n return df",
"def fillna_median(data, columns, grouping=False, val='median', verbose=True):\n for col in columns:\n if grouping:\n data[col].fillna(data.groupby(grouping)[col].transform(val), inplace=True)\n meds = data.groupby(grouping)[col].median()\n else:\n meds = data[col].median()\n data[col].fillna(meds, inplace=True)\n if verbose:\n print('Medians: ')\n print(meds)",
"def fill_nan_in_category(df):\n print(\" --- Filling NaN in Categories.\")\n columns = df.columns\n categorical = [x for x in columns if x.startswith('c_')]\n df[categorical] = df[categorical].fillna('missing')\n print(\" --- Finished filling NaN in Categories.\")\n return df",
"def fillna(self, value=None, downcast=None):\n raise NotImplementedError(\"isna is not defined for MultiIndex\")"
] | [
"0.733223",
"0.6911329",
"0.6804728",
"0.6377485",
"0.63724154",
"0.6184612",
"0.6125718",
"0.61130387",
"0.61074495",
"0.60799503",
"0.6020397",
"0.6011322",
"0.6011322",
"0.59590447",
"0.58487135",
"0.5830829",
"0.5822329",
"0.58109444",
"0.57864994",
"0.57757205",
"0.57416546",
"0.57337564",
"0.572438",
"0.5717974",
"0.57159275",
"0.5703465",
"0.56843853",
"0.5662889",
"0.5654533",
"0.5648324"
] | 0.8269875 | 0 |
Return all the rows(with all columns) where the value in a certain 'column' is greater than the average value of that column. row where row.column > mean(data.column) | def get_rows_greater_than_avg(df, column):
df= df[df[column] > df[column].mean()]
return df | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def filter_rows_by_highest_abs_val_mean(df, max_=MAX_NUM_ROWS):\n top_rows = numpy.abs(df.mean(axis=1)).nlargest(max_)\n return df.ix[top_rows.index]",
"def demo_one_filter():\n data = [1.3, 2.7, 0.8, 4.1, 4.3, -0.1]\n avg = np.mean(data)\n print \"average value is:\", avg\n\n # create iterator that filters to keep only above average data\n above_avg_iter = filter(lambda x: x > avg, data) # returns iterator for data above the avg\n\n print \"values strictly above average are:\", list(above_avg_iter)",
"def drop_rows_with_outliers(df, columns, sigma=3):\n selection = np.full(len(df.index), True, dtype=np.dtype('bool'))\n if not isinstance(columns, list):\n columns = [columns]\n for var in columns:\n std_var = np.std(df[var])\n mean_var = np.mean(df[var])\n in_range = np.logical_and(df[var] > mean_var - sigma*std_var,\n df[var] < mean_var + sigma*std_var)\n selection = np.logical_and(selection, in_range)\n return df[selection]",
"def get_mean_difference(self, data):\n # Create a temporary blank list.\n temp = []\n\n # Get the number of columns in the DataFrame.\n col = data.shape[1]\n\n # Iterate the number of columns and only select the column having\n # the data for means. Since there is only two groups, the subtraction\n # will be hardcoded. There are two possible scenarios where the first\n # mean is larger than the second mean or vise versa. When the difference\n # is acquired, add it to the temporary list.\n for x in range(col):\n if x % 2 == 0:\n if data.loc[0][x] >= data.loc[1][x]:\n diff = data.loc[0][x] - data.loc[1][x]\n temp.append(diff)\n elif data.loc[0][x] < data.loc[1][x]: \n diff = data.loc[1][x] - data.loc[0][x]\n temp.append(diff)\n\n # Convert the list to a Series.\n means = pd.Series(temp)\n\n return means",
"def remove_outliers(df, std_threshold: float = 3):\n\n df = df[np.abs(df - df.mean()) <= (std_threshold * df.std())]\n return df",
"def process_dataframe(self, dataframe):\n return dataframe[dataframe.ix[:,0] < dataframe.ix[:,1]]",
"def process_dataframe(self, dataframe):\n return dataframe[dataframe.ix[:,0] < dataframe.ix[:,1]]",
"def filter(data,col,low,high):\n inds = np.where(data[:,col]>=low)\n data_trim = data[inds]\n inds = np.where(data_trim[:,col]<=high)\n data_trim = data_trim[inds]\n return data_trim",
"def filter(data,col,low,high):\n inds = np.where(data[:,col]>=low)\n data_trim = data[inds]\n inds = np.where(data_trim[:,col]<=high)\n data_trim = data_trim[inds]\n return data_trim",
"def checkStdDev(df,thr):\n greaterThanThreshold = True\n positions= np.array([])\n for i in range(1,df.shape[0]):\n stdDev = np.std(df.iloc[i,1:].astype(np.longdouble))\n if (stdDev < thr):\n greaterThanThreshold = False\n positions = np.append(positions,i)\n \n return greaterThanThreshold",
"def remove(df,column_to_filter,standard_deviations=3):\n import math\n #This function will flatten the row of the dataframe\n def flatten_column(row):\n return tuple(float(x) for x in row)\n stats = df.select(column_to_filter).rdd.flatMap(flatten_column).stats()\n mean = stats.mean()\n variance = stats.variance()\n stddev = math.sqrt(variance)\n stddev_threshhold = stddev*standard_deviations\n print(stddev_threshhold)\n from pyspark.sql.functions import lit\n df = df.where(\"abs({column_to_filter} - {mean}) > {stddev_threshhold}\"\\\n .format(column_to_filter=column_to_filter,mean=mean,stddev_threshhold=stddev_threshhold))\n return df",
"def __call__(self, x):\n return np.mean(self.observations <= x)",
"def return_in_norm_df(df, col, sigma):\n return np.abs(df[col] - df[col].mean()) <= (sigma*df[col].std())",
"def filter_rows_by_variance(df, max_=MAX_NUM_ROWS):\n top_rows = df.var(axis=1).nlargest(max_)\n return df.ix[top_rows.index]",
"def record_high(df, df2, agg='max'):\n if agg == 'max':\n return df[df['max'] > df2['max']].drop('mean', axis=1)\n elif agg == 'mean':\n return df[df['mean'] > df2['mean']].drop('max', axis=1)\n else:\n raise ValueError('unknown test')",
"def remove_outliers(self, matrix):\n input = matrix[:, :-1]\n row_incides_to_delete = []\n for j, column in enumerate(input.transpose()):\n self.feature_means.append(np.mean(column))\n self.feature_stds.append(np.std(column))\n\n for i, row in enumerate(input):\n cell = input[i, j]\n if cell > self.feature_means[j] + 3 * self.feature_stds[j] or cell < self.feature_means[j] - 3 * \\\n self.feature_stds[j]:\n row_incides_to_delete.append(i)\n matrix = np.delete(matrix, row_incides_to_delete, 0)\n return matrix, len(list(set(row_incides_to_delete)))",
"def column_stats(table, column):\n conn = psycopg2.connect(dbname='db', user='grok')\n # Establish cursor\n cursor = conn.cursor()\n try:\n # Execute query\n cursor.execute('SELECT '+column+' from '+table+';')\n records = cursor.fetchall()\n except:\n return []\n values = []\n for row in records:\n values.append(row)\n values = np.array(values)\n return (np.mean(values), np.median(values))",
"def check_outlier(dataframe, col_name):\n low_limit, up_limit = outlier_thresholds(dataframe, col_name, 0.05, 0.95)\n if dataframe[(dataframe[col_name] > up_limit) |\n (dataframe[col_name] < low_limit)].any(axis=None):\n return True\n else:\n return False",
"def filter_outliers(data: pd.Series, std: int=3) -> pd.Series:\n return data[(data - data.mean()).abs() <= (std * data.std())]",
"def handle_invalid(x, column_names=None):\n\n invalid_value = -999.0\n invalid_threshold = 0.7\n\n # Remove columns with a pct of invalid values above 70%\n pct_undef = (x <= invalid_value).mean(axis=0)\n below_thresh = pct_undef < invalid_threshold\n\n print(f\"{(~below_thresh).sum()} columns are above the invalid threshold. Removing\", end=\"\\n\\t\")\n if column_names is not None:\n print(*column_names[~below_thresh], sep=\"\\n\\t\")\n column_names = column_names[below_thresh]\n\n x = x[:, below_thresh]\n\n # Replace -999 with mean value of remaining values for each column still in dataset\n for i in range(x.shape[1]):\n col = x[:, i]\n mean = col[col > invalid_value].mean()\n col[col <= invalid_value] = mean\n\n return x, column_names",
"def replace(df,column_to_filter,standard_deviations=3):\n import math\n #This function will flatten the row of the dataframe\n def flatten_column(row):\n return tuple(float(x) for x in row)\n stats = df.select(column_to_filter).rdd.flatMap(flatten_column).stats()\n mean = stats.mean()\n variance = stats.variance()\n stddev = math.sqrt(variance)\n stddev_threshhold = stddev*standard_deviations\n # print(stddev_threshhold)\n from pyspark.sql.functions import lit,abs\n from pyspark.sql.functions import when\n\n df = df.withColumn(column_to_filter,\n when((abs(df[column_to_filter] - mean) > stddev_threshhold) & ((df[column_to_filter] - mean) > 0), (mean+stddev_threshhold))\n .otherwise(df[column_to_filter]))\n df = df.withColumn(column_to_filter,\n when((abs(df[column_to_filter] - mean) > stddev_threshhold) & ((df[column_to_filter] - mean) < 0), (mean-stddev_threshhold))\n .otherwise(df[column_to_filter]))\n\n return df",
"def get_outliers(a_dataframe):\n outliers_list = []\n for category in a_dataframe.dtypes.keys():\n try:\n column = a_dataframe.loc[:, category]\n mean = np.mean(column) # check if category is numeric\n except TypeError:\n pass\n else:\n # print_hist(column, category)\n st_dev = np.std(column)\n limit_hi = mean + 2 * st_dev\n limit_lo = mean - 2 * st_dev\n flag_bad = (column < limit_lo) | (column > limit_hi)\n if category != \"fnlwgt\": # skip 'fnlwgt' var. 'cos I'll delete it\n outliers_list.append(flag_bad)\n num_outliers = sum(flag_bad)\n print_stats(category, column,\n limit_hi, limit_lo,\n num_outliers\n )\n\n return outliers_list",
"def detect_outlier(column, max_dev=2):\n column_mean = np.mean(column)\n column_std = np.std(column)\n dist_from_mean = abs(column - column_mean)\n outlier_filter = dist_from_mean > max_dev * column_std\n ids = np.arange(len(column))\n return ids[outlier_filter]",
"def extract_relevant_rows(df, column_name, column_value, not_equal=False):\n\n if not_equal:\n return df.loc[df[column_name] != column_value]\n\n return df.loc[df[column_name] == column_value]",
"def reject_outliers(self, data, m=2):\n std = np.std(data)\n return data[abs(data - np.median(data)) < m * std]",
"def compute_column_means_with_incomplete_data(df):\n X = np.array(df)\n return np.nanmean(X, axis = 0)",
"def compute_column_means_with_incomplete_data(df):\n X = np.array(df)\n return np.nanmean(X, axis = 0)",
"def remove_outliers(df, column_pup, maxdev = 2.5, allowp=0.1, \r\n column_x = None, column_y = None, left = None, right = None, top = None, bottom = None):\r\n \r\n # off-screen samples\r\n ## check if proper argumnets are passed\r\n if None in [column_x, column_y, left, right, top, bottom]:\r\n warnings.warn(\"Screen information not properly specified. Out-of-screen samples will not be removed.\")\r\n df[column_pup+'_rm'] = df[column_pup]\r\n ## remove out-of-screen samples\r\n else:\r\n conditions = ((df[column_x] < left) | (df[column_x] > right) | (df[column_y] < top) | (df[column_y] > bottom))\r\n df[column_pup+'_rm'] = np.where(conditions, np.nan, df[column_pup])\r\n \r\n # samples with a large SD\r\n mean = df[column_pup+'_rm'].mean(skipna=True)\r\n std = df[column_pup+'_rm'].std(skipna=True)\r\n \r\n # if std is reasonably small then no outlier will be declared\r\n if std >= allowp*mean:\r\n lower = mean - maxdev*std\r\n upper = mean + maxdev*std\r\n conditions2 = ((df[column_pup+'_rm']<lower) | (df[column_pup+'_rm']>upper))\r\n df[column_pup+'_rm'] = np.where(conditions2, np.nan, df[column_pup+'_rm'])\r\n \r\n return df",
"def outlier_determine_threshold(df, col):\r\n df = df.copy(deep=True)\r\n keep_looping = True\r\n number_of_loops = 1\r\n thresh = 5\r\n while keep_looping:\r\n if number_of_loops >= 10:\r\n break\r\n mask_outliers = is_outlier(df[col], thresh=thresh).astype(int)\r\n dfout_index = df.iloc[np.where(mask_outliers>0)].index\r\n pct_outliers = len(dfout_index)/len(df)\r\n if pct_outliers == 0:\r\n if thresh > 5:\r\n thresh = thresh - 5\r\n elif thresh == 5:\r\n return thresh\r\n else:\r\n thresh = thresh - 1\r\n elif pct_outliers <= 0.01:\r\n keep_looping = False\r\n else:\r\n thresh_multiplier = int((pct_outliers/0.01)*0.5)\r\n thresh = thresh*thresh_multiplier\r\n number_of_loops += 1\r\n print(' %s Outlier threshold = %d' %(col, thresh))\r\n return thresh",
"def filter_rows_by_max_abs_val(df, max_=MAX_NUM_ROWS):\n df_temp = df.abs()\n top_rows = df_temp.max(axis=1).nlargest(max_)\n return df.ix[top_rows.index]"
] | [
"0.6379522",
"0.6149635",
"0.58458763",
"0.5754565",
"0.56743234",
"0.56359506",
"0.56359506",
"0.56054884",
"0.56054884",
"0.54775643",
"0.5467246",
"0.5397171",
"0.5395946",
"0.5346881",
"0.5316547",
"0.5313301",
"0.5276455",
"0.52631354",
"0.524703",
"0.52108675",
"0.52035844",
"0.517738",
"0.517637",
"0.51031053",
"0.5086962",
"0.5054679",
"0.5054679",
"0.50439554",
"0.50415796",
"0.50398886"
] | 0.8586459 | 0 |
Takes a junitxml filename or path to said file. From this file it extracts the testsuite node and adds it to the junit_docker.xml file, in the process it adds a name to the testsuite (the suite param) and changes the classname from tests. to {suite}. Finaly, it removes the original file. This is because jenkins was not handleing multiple junit files being exposed well. The classnames are changed so that the tests are grouped by what container they ran in. | def merge_to_junit_xml(filename: str, suite: str) -> None:
junit_docker = Path("junit_docker.xml")
if junit_docker.exists():
tree = ElementTree.parse(junit_docker)
root = tree.getroot()
for testsuite in root:
if testsuite.get("name", None) == suite:
root.remove(testsuite)
x_tree = ElementTree.parse(filename)
x_root = x_tree.getroot()
x_root[0].attrib["name"] = suite
fix_classname(x_root[0], suite)
root.append(x_root[0])
else:
tree = ElementTree.parse(filename)
root = tree.getroot()
root[0].attrib["name"] = suite
fix_classname(root[0], suite)
tree.write(junit_docker)
os.remove(filename) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def as_xunit(self, filename):\n suite_node = ElementTree.Element(\"testsuite\")\n suite_node.attrib[\"name\"] = self.testsuite.name\n suite_node.attrib[\"tests\"] = str(self.testsuite.ntests)\n suite_node.attrib[\"failures\"] = str(self.testsuite.nfailed)\n if self.testsuite.package:\n suite_node.attrib[\"package\"] = self.testsuite.package\n\n cases = self.testsuite.testcases\n for testcase in cases:\n case_node = ElementTree.SubElement(suite_node, \"testcase\")\n case_node.attrib[\"classname\"] = testcase.classname\n case_node.attrib[\"name\"] = testcase.name\n if testcase.failed:\n failure_node = ElementTree.SubElement(case_node, \"failure\")\n failure_node.attrib[\"type\"] = TEST_FAILURE_TYPE\n failure_node.text = testcase.failure_descr\n # Serialize to file\n tree = ElementTree.ElementTree(suite_node)\n tree.write(filename, encoding=\"utf-8\", xml_declaration=True)",
"def fetch_junit(dir_name, url):\r\n dir_to_look = dir_name\r\n failed_junit = []\r\n onlyfiles = [f for f in listdir(dir_to_look) if isfile(join(dir_to_look, f))]\r\n \"\"\" if multiple files are there check all files \"\"\"\r\n for i in onlyfiles:\r\n update_dir = str(directory) + \"/\"\r\n xmldoc = minidom.parse(update_dir + i) # parse file\r\n testsuite = xmldoc.getElementsByTagName(\"testsuite\")[0]\r\n status = xmldoc.getElementsByTagName(\"testsuite\")[0].getAttribute(\"failures\")\r\n if status != \"0\":\r\n testcase = testsuite.getElementsByTagName(\"testcase\")\r\n t_name = testsuite.getElementsByTagName(\"testcase\")[0].getAttribute(\"name\")\r\n for test_cases in testcase:\r\n classname = test_cases.getAttribute(\"classname\")\r\n name = test_cases.getAttribute(\"name\")\r\n failure = test_cases.getElementsByTagName(\"failure\") # check for failure exception\r\n for failed_test in failure:\r\n junit_test = classname + \".\" + name\r\n failed_junit.append(junit_test) # append all tests to a list\r\n\r\n \"\"\"com.cs.tools.content.MyDecksLoaderTest.testGetSlidesXMLHasImageAndThumbnailUrls\r\n package - com.cs.tools.content\r\n group - MyDecksLoaderTest\r\n test_name - testGetSlidesXMLHasImageAndThumbnailUrls\"\"\"\r\n for j in failed_junit:\r\n \"\"\" \r\n Apply some regular expression to find test_name and group and package\r\n \"\"\"\r\n lst1 = j.split('.')\r\n test_name = lst1[-1]\r\n group = lst1[-2]\r\n val1 = re.sub(r'.[a-zA-Z]*$', \"\", j)\r\n package = re.sub(r'.[a-zA-Z]*$', \"\", val1)\r\n # Generate URL to publish failed test link in stash/bitbucket\r\n url = url + \"testReport/junit/\" + package + \"/\" + group + \"/\" + test_name\r\n print(\"[\" + j + \"] (\" + url + \")\")",
"def make_testsuite(testsuite: Dict) -> NoReturn:\n # validate testsuite format\n load_testsuite(testsuite)\n\n testsuite_config = testsuite[\"config\"]\n testsuite_path = testsuite_config[\"path\"]\n testsuite_variables = convert_variables(\n testsuite_config.get(\"variables\", {}), testsuite_path\n )\n\n logger.info(f\"start to make testsuite: {testsuite_path}\")\n\n # create directory with testsuite file name, put its testcases under this directory\n testsuite_path = ensure_file_abs_path_valid(testsuite_path)\n testsuite_dir, file_suffix = os.path.splitext(testsuite_path)\n # demo_testsuite.yml => demo_testsuite_yml\n testsuite_dir = f\"{testsuite_dir}_{file_suffix.lstrip('.')}\"\n\n for testcase in testsuite[\"testcases\"]:\n # get referenced testcase content\n testcase_file = testcase[\"testcase\"]\n testcase_path = __ensure_absolute(testcase_file)\n testcase_dict = load_test_file(testcase_path)\n testcase_dict.setdefault(\"config\", {})\n testcase_dict[\"config\"][\"path\"] = testcase_path\n\n # override testcase name\n testcase_dict[\"config\"][\"name\"] = testcase[\"name\"]\n # override base_url\n base_url = testsuite_config.get(\"base_url\") or testcase.get(\"base_url\")\n if base_url:\n testcase_dict[\"config\"][\"base_url\"] = base_url\n # override verify\n if \"verify\" in testsuite_config:\n testcase_dict[\"config\"][\"verify\"] = testsuite_config[\"verify\"]\n # override variables\n # testsuite testcase variables > testsuite config variables\n testcase_variables = convert_variables(\n testcase.get(\"variables\", {}), testcase_path\n )\n testcase_variables = merge_variables(testcase_variables, testsuite_variables)\n # testsuite testcase variables > testcase config variables\n testcase_dict[\"config\"][\"variables\"] = convert_variables(\n testcase_dict[\"config\"].get(\"variables\", {}), testcase_path\n )\n testcase_dict[\"config\"][\"variables\"].update(testcase_variables)\n\n # override weight\n if \"weight\" in testcase:\n testcase_dict[\"config\"][\"weight\"] = testcase[\"weight\"]\n\n # make testcase\n testcase_pytest_path = make_testcase(testcase_dict, testsuite_dir)\n pytest_files_run_set.add(testcase_pytest_path)",
"def save_xunit(self,filename):\n f = open(filename,'w')\n f.write('<?xml version=\"1.0\" encoding=\"UTF-8\"?>')\n f.write('<testsuite name=\"fbtest\" tests=\"%i\" errors=\"%i\" failures=\"%i\" untested=\"%i\" skip=\"%i\">' %\n (len(self.results),self.get_error_count(),self.get_fail_count(),\n self.get_untested_count(),self.get_skipped_count()))\n for result in self.values():\n if result.outcome == Result.PASS:\n f.write('<testcase classname=\"Test\" name=\"%s\" time=\"%.3f\" />' % (\n result.id,result.get_elapsed()))\n else:\n f.write('<testcase classname=\"Test\" name=\"%s\" time=\"%.3f\">' % (\n result.id,result.get_elapsed()))\n if result.outcome == Result.ERROR:\n if result.has_key(Result.EXCEPTION):\n e = result[Result.EXCEPTION]\n exc = e[:e.find(':')]\n msg = e[e.find(':')+2:]\n exc = exc[exc.find(\"'\")+1:exc.rfind(\"'\")]\n msg = msg.lstrip()\n f.write('<error type=%s message=%s>' % (self._quoteattr(exc),\n self._quoteattr(msg)))\n f.write('</error>')\n else:\n msg = result.get_cause()\n f.write('<error type=\"error\" message=%s>' % (self._quoteattr(msg)))\n f.write('</error>')\n elif result.outcome == Result.FAIL:\n for key in ['ISQL_stripped_diff','Python_stripped_diff',\n 'ISQL_stderr_stripped_diff',\n 'Python_stderr_stripped_diff']:\n if result.has_key(key):\n cdata = as_utf8(result[key])\n f.write('<failure type=\"fail\" message=%s>' % self._quoteattr(result.get_cause()))\n f.write('<![CDATA[%s]]>' % escape_cdata(cdata))\n f.write('</failure>')\n elif result.outcome == Result.UNTESTED:\n f.write('<failure type=\"untested\" message=%s>' % self._quoteattr(result.get_cause()))\n f.write('</failure>')\n elif result.outcome == Result.SKIPPED:\n f.write('<failure type=\"skipped\" message=%s>' % self._quoteattr(result.get_cause()))\n f.write('</failure>')\n f.write('</testcase>')\n f.write('</testsuite>')\n f.close()",
"def extract_suite_name(file_path, project_name):\n\n suite_name = str(project_name) + \".\"\n suite_name = suite_name + os.path.splitext(str(file_path).replace(os_sep, \".\"))[0]\n return suite_name",
"def runTestSuites(self):\n \n self.testsuitesToXML()\n \n\n tss = []\n jobStatus = {}\n for t in self.testsuites:\n d = t.testsuitedir\n runner = os.path.join(self.basepath, 'testSuiteRunner.py')\n tdir = os.path.join(d, 'testsuite.out')\n cmd = 'python %s %s>& %s' % (runner, d,tdir)\n #print 'about to popen the cmd: %s' % cmd\n tss.append((t.name, popen2.Popen3(cmd)))\n jobStatus[t.name] = ('running', nowSecs())\n ntests = len(tss)\n printJobStatus(jobStatus)\n\n while tss:\n toRemove = [p for p in tss if p[1].poll() != -1]\n if toRemove:\n [tss.remove(p) for p in toRemove]\n for p in toRemove:\n jobStatus[p[0]] = ('completed', nowSecs())\n\n printJobStatus(jobStatus)\n time.sleep(10)\n\n print 'all %d tests have completed' % ntests",
"def create_test(self, test_case, file_name):\n with open(os.path.join(self.tests, file_name), 'w+') as f:\n f.write(test_case)",
"def suite():\n # patch it to work here\n package_def = 'app.test'\n\n suite = unittest.TestSuite()\n\n for other_suite in iter_suites(package_def):\n suite.addTest(other_suite)\n return suite",
"def gen_junit(self):\n\n test_attrs = [\n \"polarion-project-id\", \"polarion-custom-description\",\n \"polarion-custom-plannedin\", \"polarion-custom-isautomated\",\n \"polarion-custom-tags\"\n ]\n\n test_attrs_values = [\n self.args.ts, self.args.desc,\n self.args.rel, True, self.args.tags\n ]\n\n # This block allows for a dynamic dictionary to be created\n # depending on arguments passed.\n props = {\n key: value for key, value in zip(test_attrs,\n test_attrs_values)\n if value is not None\n }\n\n self._gen_polarion_property_file(test_attrs, test_attrs_values,\n self.args.tr, self.args.tc,\n property_file=self.args.pf)\n\n test_case = [TestCase(self.args.tc.pop(0), '', self.args.et)]\n\n if len(self.args.tc) >= 1:\n for cases in self.args.tc:\n test_case.append(TestCase(cases, '', self.args.et))\n\n testsuite = [TestSuite(self.args.project, test_case, properties=props)]\n\n with open(self.args.output_f, 'w') as results:\n TestSuite.to_file(results, testsuite)\n if self.args.ur:\n self._upload(self.polarion_url, self.args.output_f,\n self.username, self.password)",
"def parametrize(testcase_klass, filename='', filepath=''):\n testloader = unittest.TestLoader()\n testnames = testloader.getTestCaseNames(testcase_klass)\n suite = unittest.TestSuite()\n for name in testnames:\n suite.addTest(testcase_klass(name, filename=filename, filepath=filepath))\n return suite",
"def generate_test_suite(errors, output_file):\n test_suite = ElementTree.Element('testsuite')\n test_suite.attrib['errors'] = str(len(errors))\n test_suite.attrib['failures'] = str(0)\n test_suite.attrib['name'] = 'Cppcheck errors'\n test_suite.attrib['tests'] = str(len(errors))\n test_suite.attrib['time'] = str(1)\n\n for file_name, errors in errors.items():\n test_case = ElementTree.SubElement(test_suite,\n 'testcase',\n name=os.path.relpath(file_name))\n for error in errors:\n ElementTree.SubElement(test_case,\n 'error',\n file=os.path.relpath(error.file),\n line=str(error.line),\n message='{}: {}'.format(error.line, error.message))\n\n tree = ElementTree.ElementTree(test_suite)\n tree.write(output_file, encoding='utf-8', xml_declaration=True)",
"def build_test_suite(loader, tests, pattern, test_case_factory):\n suite = unittest.TestSuite()\n data_root = os.path.join(os.path.dirname(__file__), '..', '..', '..', 'spec')\n assert os.path.exists(data_root)\n test_files = []\n absroot = os.path.abspath(data_root)\n for (dirpath, dirnames, filenames) in os.walk(absroot):\n for filename in filenames:\n if filename.endswith(\".txt\"):\n test_file = os.path.join(dirpath, filename)\n test_files.append(test_file)\n test_files.sort()\n for test_file in test_files:\n test_name = test_file[len(absroot)+1:]\n spec_test = _read_spec_test(test_file)\n test_class = test_case_factory(test_file, test_name, spec_test)\n if test_class:\n suite.addTests(loader.loadTestsFromTestCase(test_class))\n return suite",
"def run_test(self, testcase, name, options):\n name = options.suite+'_'+name\n cmd = options.solver+' '\n if not options.cat_options is None:\n cmd += options.cat_options+' '\n cmd += options.file\n print( \"Running test suite '%s' test '%s' command '%s'\" % \\\n (options.suite, name, cmd))\n pyutilib.subprocess.run(cmd, outfile=options.currdir+'test_'+name+\".out\")\n testcase.failUnlessFileEqualsBaseline(\n options.currdir+'test_'+name+\".out\",\n options.currdir+'test_'+name+\".txt\")",
"def parse_test_files():\n a_copy = PY_FILES[::]\n for f in a_copy:\n if 'test' in f:\n TEST_FILES.append(f)\n PY_FILES.remove(f)",
"def main():\n # add all new test suites per test module here\n suite_date = test_date.suite()\n suite_ng = test_ng.suite()\n suite_page = test_page.suite()\n suite_container = test_container.suite()\n\n # add the suite to be tested here\n alltests = unittest.TestSuite((suite_date,\n suite_ng,\n suite_page,\n suite_container))\n\n # run the suite\n runner = unittest.TextTestRunner()\n runner.run(alltests)",
"def __merge_container_reports(self):\n print('Copying container output xml files to top level')\n files_to_merge = []\n try:\n for suite in self.execution_file_json['suites']:\n if 'suitefile' in suite:\n name = suite['suitefile'].replace('.robot', '')\n else:\n name = suite['suitedirectory']\n print('Copying xml file for suite: %s' % name)\n output_xml_path = os.path.join(self.output_path, name, ParallelRunner.ROBOT_XML.replace('SUITE', name))\n destination_path = os.path.join(self.output_path, ParallelRunner.ROBOT_XML.replace('SUITE', name))\n shutil.copyfile(src=output_xml_path, dst=destination_path)\n files_to_merge.append(destination_path)\n except Exception:\n pass\n print('Merging container output xml into html report')\n try:\n log_path = os.path.join(self.output_path, 'allsuites_log.html')\n report_path = os.path.join(self.output_path, 'allsuites_report.html')\n rebot(*files_to_merge, name='AllSuites', log=log_path, report=report_path)\n except Exception as e:\n print('Error merging container xml output: %s' % str(e))\n raise",
"def _process_test_suite(self, logfile):\r\n\r\n print '***' * 10\r\n print 'Output will be generated in folder {}\\n'.format(self.output_dir_path)\r\n\r\n command = 'robot --outputdir {0} -r {1}_report.html -l {1}_log.html -o {1}_output.xml {1}.robot'.format(\r\n self.output_dir_path, self.name)\r\n\r\n return self._run_command(command, logfile)",
"def scan_for_tests(self):\n log_info(\"scanning for tests in '%s'\" % self.directory)\n for filename in os.listdir(self.directory):\n base, ext = os.path.splitext(filename)\n fullname = os.path.join(self.directory, filename)\n if ext == SUITE:\n if base.endswith(CLEANUP):\n base = base.rsplit(CLEANUP, 1)[0]\n self.cleanup[base] = fullname\n else:\n self.suites[base] = fullname\n if ext == CONFIGURATION:\n self.conf[base] = fullname\n if ext == TEST:\n self.tests[base] = fullname",
"def suite():\n # Get a list of all files.\n files = glob.glob(os.path.join(os.path.dirname(os.path.abspath(\n inspect.getfile(inspect.currentframe()))), \"test*.py\"))\n files = [os.path.splitext(os.path.basename(_i))[0] for _i in files]\n\n modules = []\n # try to import all files.\n for module in files:\n try:\n module = __import__(module, globals(), locals())\n except:\n warnings.warn(\"Module %s could not be imported\" % module)\n continue\n modules.append(module)\n\n suite = unittest.TestSuite()\n for module in modules:\n for attrib in dir(module):\n value = getattr(module, attrib)\n try:\n if issubclass(value, unittest.TestCase):\n suite.addTest(unittest.makeSuite(value, \"test\"))\n except:\n pass\n return suite",
"def _run_testsuite(self, pyunit_testcase):\n suite = unittest.defaultTestLoader.loadTestsFromTestCase(\n pyunit_testcase\n )\n suite_result = unittest.TextTestRunner().run(suite)\n\n # Since we can't reliably inspect the individual testcases of a PyUnit\n # suite, we put all results into a single \"testcase\" report. This\n # will only list failures and errors and not give detail on individual\n # assertions like with MultiTest.\n testcase_report = TestCaseReport(\n name=self._TESTCASE_NAME, uid=self._TESTCASE_NAME\n )\n\n for call, error in suite_result.errors:\n assertion_obj = assertions.RawAssertion(\n description=str(call), content=str(error).strip(), passed=False\n )\n testcase_report.append(\n schemas.base.registry.serialize(assertion_obj)\n )\n\n for call, error in suite_result.failures:\n assertion_obj = assertions.RawAssertion(\n description=str(call), content=str(error).strip(), passed=False\n )\n testcase_report.append(\n schemas.base.registry.serialize(assertion_obj)\n )\n\n # In case of no failures or errors we need to explicitly mark the\n # testsuite as passed.\n if not testcase_report.entries:\n log_entry = entries_base.Log(\n \"All PyUnit testcases passed\", description=\"PyUnit success\"\n )\n testcase_report.append(schemas.base.registry.serialize(log_entry))\n\n testcase_report.runtime_status = RuntimeStatus.FINISHED\n\n # We have to wrap the testcase report in a testsuite report.\n return TestGroupReport(\n name=pyunit_testcase.__name__,\n uid=pyunit_testcase.__name__,\n category=ReportCategories.TESTSUITE,\n entries=[testcase_report],\n )",
"def setUpSuite():\n global _output_dir\n global _suite_configured\n\n if _suite_configured:\n return\n\n def remove_output_dir():\n global _output_dir\n if _output_dir != '':\n try:\n shutil.rmtree(_output_dir)\n except FileNotFoundError:\n pass\n\n atexit.register(remove_output_dir)\n _output_dir = tempfile.mkdtemp(dir=TESTS_DIR)\n\n os.environ['VOC_BUILD_DIR'] = os.path.join(_output_dir, 'build')\n os.environ['VOC_DIST_DIR'] = os.path.join(_output_dir, 'dist')\n\n # If the code has been precompiled, we don't have to\n # compile it as part of the test suite setup.\n precompile = os.environ.get('PRECOMPILE', 'true').lower() == 'true'\n if not precompile:\n _suite_configured = True\n return\n\n proc = subprocess.Popen(\n \"ant java\",\n stdin=subprocess.PIPE,\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT,\n shell=True,\n )\n\n try:\n out, err = proc.communicate(timeout=30)\n except subprocess.TimeoutExpired:\n proc.kill()\n out, err = proc.communicate()\n raise\n\n if proc.returncode != 0:\n raise Exception(\"Error compiling java sources: \" + out.decode('ascii'))\n\n _suite_configured = True",
"def create_task(testset_path):\n task_suite = unittest.TestSuite() # 测试套件\n testsets = load_testcases_by_path(testset_path)\n print('testsets ----> %s\\n' % testsets)\n for testset in testsets:\n print('testset ----> %s\\n' % testset)\n suite = create_suite(testset)",
"def _writeMockResultFile(result):\n with open(result.filename, 'w') as f:\n f.write('<?xml version=\"1.0\" encoding=\"UTF-8\"?>\\n')\n if len(result.suites) > 1 or result.noSuitesRoot is False:\n f.write('<testsuites>\\n')\n for suite in result.suites:\n f.write('<testsuite tests=\"'+str(suite.tests)+'\" failures=\"'+str(suite.fail)+'\" time=\"'+str(suite.time)+'\" errors=\"'+str(suite.errors)+'\" name=\"'+suite.name+'\">\\n')\n for case in suite.cases:\n f.write('<testcase name=\"'+case.name+'\" status=\"run\" time=\"'+str(case.time)+'\" classname=\"'+case.classname+'\">\\n')\n for error in case.errorList:\n f.write('<failure message=\"'+error.value+'\" type=\"'+error.value+'\"/>\\n')\n f.write('</testcase>\\n')\n f.write('</testsuite>\\n')\n if len(result.suites) > 1 or result.noSuitesRoot is False:\n f.write('</testsuites>\\n')",
"def write_tests(project_name, root_dir):\r\n test_path = get_file_path(root_dir, \"tests\", \"%s_tests.py\" % project_name) #Get the path for setup.py\r\n test_content = get_test_text(project_name)\r\n \r\n test_file = open(test_path, 'w')\r\n test_file.write(test_content)\r\n test_file.close()\r\n print_file(test_path)",
"def testsuite():\n \n tests = unittest.TestSuite()\n\n parse_tests = unittest.makeSuite(ParseTestCase, 'test')\n tests = unittest.TestSuite( (tests, parse_tests) )\n\n return tests",
"def main():\r\n args = getargs()\r\n dir_name = args.dir_name\r\n url = args.url\r\n fetch_junit(dir_name, url)",
"def extract_testsuite(self, testsuite, info):\n\n for testcase in testsuite.getchildren():\n\n self.extract_testcase(testcase, info, testsuite.get(\"name\"))",
"def construct_case(filename, name):\n\n def make_test(test_name, definition, i):\n def m(self):\n if name in SKIP_TESTS.get(self.es_version, ()) or name in SKIP_TESTS.get(\n \"*\", ()\n ):\n raise SkipTest()\n self.run_code(definition)\n\n m.__doc__ = \"%s:%s.test_from_yaml_%d (%s): %s\" % (\n __name__,\n name,\n i,\n \"/\".join(filename.split(\"/\")[-2:]),\n test_name,\n )\n m.__name__ = \"test_from_yaml_%d\" % i\n return m\n\n with open(filename) as f:\n tests = list(yaml.load_all(f))\n\n attrs = {\"_yaml_file\": filename}\n i = 0\n for test in tests:\n for test_name, definition in test.items():\n if test_name in (\"setup\", \"teardown\"):\n attrs[\"_%s_code\" % test_name] = definition\n continue\n\n attrs[\"test_from_yaml_%d\" % i] = make_test(test_name, definition, i)\n i += 1\n\n return type(name, (YamlTestCase,), attrs)",
"def run_test_suite(self, test_config):\n # Folder to store suite results\n test_config['test_suite_start_time'] = datetime.datetime.now().strftime(\n '%Y%m%dT%H%M%S')\n\n instance = cluster_local.UseLocalInstances()\n for i in range(test_config['repeat']):\n self.run_benchmark(test_config, instance, copy=i)\n\n suite_dir_name = '{}_{}'.format(test_config['test_suite_start_time'],\n test_config['test_id'])\n reporting.process_folder(\n os.path.join(self.workspace, 'results', suite_dir_name),\n report_config=self.auto_test_config)",
"def proc_docker_file(directory):\n print \"TASK-RUNNING\"\n os.rename(directory, directory + '_working')\n directory += '_working'\n try:\n dockerstack_agent.builder.do_build(directory)\n rmtree(directory)\n except Exception as e:\n traceback.print_exc()\n print \"TASK-ERROR\"\n raise e\n #finally:\n #Remove the directory\n\n print \"TASK-COMPLETE\""
] | [
"0.6713883",
"0.58498186",
"0.5828557",
"0.57560843",
"0.5434094",
"0.54201895",
"0.54086524",
"0.53342706",
"0.532954",
"0.53058827",
"0.52480894",
"0.5212408",
"0.52075857",
"0.5189893",
"0.51804215",
"0.5173441",
"0.5135969",
"0.5100733",
"0.5098095",
"0.50912726",
"0.50742126",
"0.5048579",
"0.5036182",
"0.5020875",
"0.50128347",
"0.5000995",
"0.4999133",
"0.49889016",
"0.49798828",
"0.4979831"
] | 0.84821534 | 1 |
get all versions of inmanta packages into a freeze file, to make the environment inside docker like the one outside | def pip_lock_file() -> None:
with open("requirements.freeze.all", "w") as ff:
subprocess.check_call([sys.executable, "-m", "pip", "freeze"], stdout=ff)
with open("requirements.freeze.tmp", "w") as ff:
subprocess.check_call(["grep", "inmanta", "requirements.freeze.all"], stdout=ff)
# pip freeze can produce lines with @ that refer to folders outside the container
# see also https://github.com/pypa/pip/issues/8174
# also ignore inmanta-dev-dependencies as this is pinned in the requirements.dev.txt
with open("requirements.freeze", "w") as ff:
subprocess.check_call(
[
"grep",
"-v",
"-e",
"@",
"-e",
"inmanta-dev-dependencies",
"-e",
"inmanta-module-",
"requirements.freeze.tmp",
],
stdout=ff,
)
yield | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def freeze():\n dependencies = sh('pip freeze', capture=True).split(os.linesep)\n\n with open('requirements.txt', 'w') as file:\n for dep in dependencies:\n if not dep.startswith('bones-testing'):\n file.write(dep+'\\n')",
"def freeze():\n proc = subprocess.run(['pip', 'freeze'], stdout=subprocess.PIPE)\n with open('requirements.txt', 'wb') as fout:\n fout.write(proc.stdout)",
"def packages():",
"def prod_server():\n sh(\"bin/pip freeze -r requirements.txt production/requirements.txt\")",
"def test_make_freeze(self):\n expected_output = ['Flask==0.10.1']\n output = list(requirementsfinder.make_freeze(self.fullexample_path))\n self.assertEqual(expected_output,output)",
"def _freeze(requirements, python):\n output = []\n try:\n version_out = subprocess.check_output(\n [python, \"--version\"], stderr=subprocess.STDOUT)\n output.append(version_out)\n version_all = version_out.decode('utf-8').split()[1]\n version = '.'.join(version_all.split('.')[:2])\n with fixtures.TempDir() as temp:\n output.append(subprocess.check_output(\n [python, '-m', 'venv', temp.path]))\n pip_bin = os.path.join(temp.path, 'bin', 'pip')\n output.append(subprocess.check_output(\n [pip_bin, 'install', '-U', 'pip', 'setuptools', 'wheel']))\n output.append(subprocess.check_output(\n [pip_bin, 'install', '-r', requirements]))\n freeze = subprocess.check_output(\n [pip_bin, 'freeze'])\n output.append(freeze)\n return (version, _parse_freeze(freeze.decode('utf-8')))\n except Exception as exc:\n if isinstance(exc, subprocess.CalledProcessError):\n output.append(exc.output)\n raise Exception(\n \"Failed to generate freeze: %s %s\"\n % (b'\\n'.join(output).decode('utf-8'), exc))",
"def get_package_list():\n pip_freeze = subprocess.check_output(('pip', 'freeze')).decode('utf8')\n package_list = [x.strip().split('==') for x in pip_freeze.split('\\n') if x.find('==') != -1]\n package_list = [(x[0].lower(), x[1]) for x in package_list]\n return package_list",
"def set_installed_packages():\n global INSTALLED_PACKAGES, REQUIRED_VERSION\n if INSTALLED_PACKAGES:\n return\n\n if os.path.exists(BIN_PYTHON):\n pip = subprocess.Popen(\n (BIN_PYTHON, '-m', 'pip', 'freeze'),\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE\n )\n (stdout, stderr) = pip.communicate()\n pip.wait()\n\n INSTALLED_PACKAGES = [normalize_package_name(r.decode().split('==')[0].lower()) for r in stdout.split()]\n REQUIRED_VERSION = next((package for package in INSTALLED_PACKAGES if re.match(r'^lore[!<>=]', package)), None)\n if REQUIRED_VERSION:\n REQUIRED_VERSION = re.split(r'[!<>=]', REQUIRED_VERSION)[-1]",
"def getInstalledPackages():\n reqs = subprocess.check_output([sys.executable,\n '-m', 'pip', 'freeze'])\n installed_packages = [r.decode().split('==')[0]\n for r in reqs.split()]\n return installed_packages",
"def complete_env() -> Python:\n return Python([\n 'click==0.0.1',\n 'googleapis-common-protos==0.0.1',\n 'numpy==0.0.1',\n 'pandas==0.0.1',\n 'Pillow==0.0.1',\n 'requests==0.0.1',\n 'scikit-learn==0.0.1',\n 'torch==0.0.1',\n 'urllib3==0.0.1',\n 'PyYAML==0.0.1',\n ]) # `verta` and `cloudpickle` included by default",
"def listpacks(all: bool=False) -> [str, str]:\n\t# execute command\n\tcommand = ['pip', 'freeze']\n\tif all:\n\t\tcommand.append('--all')\n\tproc = subprocess.Popen(command, stdout=subprocess.PIPE)\n\tproc.wait()\n\n\t# process returned data\n\tlines = proc.stdout.read().decode('utf8')\n\tlines = list(\n\t\tfilter(\n\t\t\tlambda inf: inf[0] and inf[0].split(' ')[0].lower() != '-e',\n\t\t\tmap(\n\t\t\t\tlambda inf: list(map(\n\t\t\t\t\tlambda x: x.lower().strip(),\n\t\t\t\t\tinf.split('==')\n\t\t\t\t\t)),\n\t\t\t\tlines.split('\\n')\n\t\t\t)\n\t\t)\n\t)\n\n\treturn lines",
"def install_deps():\n with open('requirements.txt', 'r') as f:\n packages = f.readlines()\n new_pkgs = []\n for resource in packages:\n new_pkgs.append(resource.strip())\n return new_pkgs",
"def freeze():\n do('export FLASK_CONFIG=config/dev.py && %s/bin/python manage.py freeze' % venv_path)",
"def build_env_wheels() -> Iterable[Path]:\n return []",
"def _get_package_data() -> list[list[str]]:\n moddata = []\n modlist: tuple[str, ...] = (\n \"click\",\n \"cryptography\",\n \"globus_cli\",\n \"globus_sdk\",\n \"jmespath\",\n \"requests\",\n )\n if verbosity() < 2:\n modlist = (\"globus_cli\", \"globus_sdk\", \"requests\")\n\n for mod in modlist:\n cur = [mod]\n try:\n loaded_mod = __import__(mod)\n except ImportError:\n loaded_mod = None\n\n for attr in (\"__version__\", \"__file__\", \"__path__\"):\n # if loading failed, be sure to pad with error messages\n if loaded_mod is None:\n cur.append(\"[import failed]\")\n continue\n\n try:\n attrval = getattr(loaded_mod, attr)\n except AttributeError:\n attrval = \"\"\n cur.append(attrval)\n moddata.append(cur)\n\n return moddata",
"def get_used_release_specs(package, installed_version=None):",
"def update_requirements():\n\n require('code_root', provided_by=env.environments)\n requirements = os.path.join(env.code_root, 'requirements')\n sdists = os.path.join(requirements, 'sdists')\n base_cmd = ['pip install']\n base_cmd += ['-q -E %(virtualenv_root)s' % env]\n base_cmd += ['--no-index --find-links=file://%s' % sdists]\n # install GDAL by hand, before anything else that might depend on it\n cmd = base_cmd + ['--no-install \"GDAL==1.6.1\"']\n sudo(' '.join(cmd), user=env.deploy_user)\n # this directory won't exist if GDAL was already installed\n if files.exists('%(virtualenv_root)s/build/GDAL' % env):\n sudo('rm -f %(virtualenv_root)s/build/GDAL/setup.cfg' % env, user=env.deploy_user)\n with cd('%(virtualenv_root)s/build/GDAL' % env):\n sudo('%(virtualenv_root)s/bin/python setup.py build_ext '\n '--gdal-config=gdal-config '\n '--library-dirs=/usr/lib '\n '--libraries=gdal1.6.0 '\n '--include-dirs=/usr/include/gdal '\n 'install' % env, user=env.deploy_user)\n # force reinstallation of OpenBlock every time\n with settings(warn_only=True):\n sudo('pip uninstall -y -E %(virtualenv_root)s ebpub ebdata obadmin' % env)\n for file_name in ['ebpub.txt', 'ebdata.txt', 'obadmin.txt', 'openrural.txt']:\n apps = os.path.join(requirements, file_name)\n cmd = base_cmd + ['--requirement %s' % apps]\n sudo(' '.join(cmd), user=env.deploy_user)",
"def get_installed_packages():\n global INSTALLED_PACKAGES\n chk = Popen(\"{} -m pip freeze\".format(sys.executable),\n shell=True, stdout=PIPE)\n installed = chk.communicate()[0].decode().splitlines()\n for pkg in installed:\n item = pkg.split(\"==\")\n INSTALLED_PACKAGES[item[0]] = item[1]",
"def full_pip_freeze(docker_image, pip):\n\n match = None\n pip_vers = None\n\n try:\n\n cmd = ['sudo', 'docker', 'run', '--env', \"LD_LIBRARY_PATH=''\", '--rm', '-ti',\n '--entrypoint={}'.format(pip), docker_image, 'freeze']\n\n print(' '.join(cmd))\n r = sp.Popen(cmd, stdout=sp.PIPE, stderr=sp.PIPE, universal_newlines=True)\n r.wait()\n raw_output = str(r.stdout.read())\n\n package_vers_dict = {}\n raw_output = raw_output.split('\\n')\n output = []\n\n for op in raw_output:\n if op.find('==') > -1:\n output.append(op)\n\n if len(output) == 0:\n print('No packages for pip {}'.format(pip_vers))\n return (pip_vers, {})\n\n output = [item.split('==') for item in output]\n\n for val in output:\n package_vers_dict[val[0]] = val[1]\n\n\n except Exception as e:\n print('error extractiong pip info')\n print(e)\n\n return (pip_vers, package_vers_dict)",
"def getversions(package_name: str) -> list:\n\t# execute command\n\tproc = subprocess.Popen(['pip', 'install', package_name+'==CRASHME'], stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n\tproc.wait()\n\n\t# processed returned data\n\tlines = proc.stderr.read().decode('utf8')\n\tsearchterm = \"(from versions:\"\n\tstart = lines.find(searchterm) + len(searchterm)\n\tend = lines.find(\")\", start)\n\tlines = lines[start:end].split(',')\n\tlines = list(map(lambda x: x.strip(), lines))\n\n\treturn lines",
"def list_package(all: bool = False) -> List[List[str]]:\n if not all:\n pkgs_info = read_installation_records()\n else:\n pkgs_info = []\n for pkg in pkg_resources.working_set:\n pkgs_info.append([pkg.project_name, pkg.version])\n\n return pkgs_info",
"def packages(self):\n return []",
"def bootstrap():\n local('virtualenv fabric_factory/ve')",
"def format(session):\n session.install('-rrequirements-dev.txt')\n run_yapf(session, diff=False)",
"def get_imported_packages(self):\n package_versions_dict = {'python': sys.version, 'SasView': sas.system.version.__version__}\n err_version_dict = {}\n no_version_list = []\n # Generate a list of standard modules by looking at the local python library\n try:\n standard_lib = [path.stem.split('.')[0] for path in pathlib.Path(pathlib.__file__)\n .parent.absolute().glob('*')]\n except Exception:\n standard_lib = ['abc', 'aifc', 'antigravity', 'argparse', 'ast', 'asynchat', 'asyncio', 'asyncore',\n 'base64', 'bdb', 'binhex', 'bisect', 'bz2', 'calendar', 'cgi', 'cgitb', 'chunk', 'cmd',\n 'code', 'codecs', 'codeop', 'collections', 'colorsys', 'compileall', 'concurrent',\n 'configparser', 'contextlib', 'contextvars', 'copy', 'copyreg', 'cProfile', 'crypt',\n 'csv', 'ctypes', 'curses', 'dataclasses', 'datetime', 'dbm', 'decimal', 'difflib',\n 'dis', 'distutils', 'doctest', 'email', 'encodings', 'ensurepip', 'enum', 'filecmp',\n 'fileinput', 'fnmatch', 'formatter', 'fractions', 'ftplib', 'functools', 'genericpath',\n 'getopt', 'getpass', 'gettext', 'glob', 'graphlib', 'gzip', 'hashlib', 'heapq', 'hmac',\n 'html', 'http', 'idlelib', 'imaplib', 'imghdr', 'imp', 'importlib', 'inspect', 'io',\n 'ipaddress', 'json', 'keyword', 'lib2to3', 'linecache', 'locale', 'logging', 'lzma',\n 'mailbox', 'mailcap', 'mimetypes', 'modulefinder', 'msilib', 'multiprocessing', 'netrc',\n 'nntplib', 'ntpath', 'nturl2path', 'numbers', 'opcode', 'operator', 'optparse', 'os',\n 'pathlib', 'pdb', 'pickle', 'pickletools', 'pipes', 'pkgutil', 'platform', 'plistlib',\n 'poplib', 'posixpath', 'pprint', 'profile', 'pstats', 'pty', 'pyclbr', 'pydoc',\n 'pydoc_data', 'py_compile', 'queue', 'quopri', 'random', 're', 'reprlib', 'rlcompleter',\n 'runpy', 'sched', 'secrets', 'selectors', 'shelve', 'shlex', 'shutil', 'signal',\n 'site-packages', 'site', 'smtpd', 'smtplib', 'sndhdr', 'socket', 'socketserver', 'sqlite3',\n 'sre_compile', 'sre_constants', 'sre_parse', 'ssl', 'stat', 'statistics', 'string',\n 'stringprep', 'struct', 'subprocess', 'sunau', 'symbol', 'symtable', 'sysconfig',\n 'tabnanny', 'tarfile', 'telnetlib', 'tempfile', 'test', 'textwrap', 'this', 'threading',\n 'timeit', 'tkinter', 'token', 'tokenize', 'trace', 'traceback', 'tracemalloc', 'tty',\n 'turtle', 'turtledemo', 'types', 'typing', 'unittest', 'urllib', 'uu', 'uuid', 'venv',\n 'warnings', 'wave', 'weakref', 'webbrowser', 'wsgiref', 'xdrlib', 'xml', 'xmlrpc',\n 'zipapp', 'zipfile', 'zipimport', 'zoneinfo', '_aix_support', '_bootlocale',\n '_bootsubprocess', '_collections_abc', '_compat_pickle', '_compression', '_markupbase',\n '_osx_support', '_pydecimal', '_pyio', '_py_abc', '_sitebuiltins', '_strptime',\n '_threading_local', '_weakrefset', '__future__', '__phello__', '__pycache__']\n standard_lib.extend(sys.builtin_module_names)\n standard_lib.append(\"sas\")\n\n for module_name in sys.modules.keys():\n\n package_name = module_name.split('.')[0]\n\n # A built in python module or a local file, which have no version, only the python/SasView version\n if package_name in standard_lib or package_name in package_versions_dict:\n continue\n\n # Import module\n try:\n package = __import__(package_name)\n except Exception as e:\n err_version_dict[package_name] = f\"Unknown: {e} when attempting to import module\"\n continue\n\n # Retrieving the modules version using the __version__ attribute\n if hasattr(package, '__version__'):\n # Module has __version__ attribute\n try:\n package_versions_dict[package_name] = package.__version__\n continue\n except Exception as e:\n # Unable to access module\n err_version_dict[package_name] = f\"Unknown: {e} when attempting to access {package_name} \" \\\n f\"version using .__version__\"\n pass\n\n # Retrieving the modules version using the pkg_resources package\n # Unreliable, so second option\n try:\n package_versions_dict[package_name] = pkg_resources.get_distribution(package_name).version\n except Exception:\n # Modules that cannot be found by pkg_resources\n pass\n else:\n continue\n\n # Modules version number could not be attained by any of the previous methods\n\n no_version_list.append(package_name)\n\n # Currently not required for any packages used by SasView\n # Retrieving the modules version using the version attribute\n # if hasattr(package, 'version'):\n # # Module has version attribute\n # try:\n # if isinstance(package.version, str):\n # print(package)\n # package_versions_dict[package_name] = package.version\n # continue\n # except Exception as e:\n # # Unable to access module\n # err_version_dict[package_name] = f\"Unknown: {e} when attempting to access {package_name} \" \\\n # f\"version using .version\"\n # pass\n\n # Clean up\n package_versions_dict = self.remove_duplicate_modules(package_versions_dict)\n no_version_dict = self.format_no_version_list(package_versions_dict, no_version_list)\n\n return {\"results\": package_versions_dict, \"no_results\": no_version_dict, \"errors\": err_version_dict}",
"def cmd_generate_requirements(): \n \n for env in ('dev', 'test'):\n source = Path(ROOT, \"requirements\", f\"{env}.txt\")\n target = Path(ROOT, \"requirements\", f\"{env}.in\")\n os.system(f\"pip-compile --output-file={source} {target}\")",
"def cmd_generate_requirements(): \n \n for env in ('dev', 'test'):\n source = Path(ROOT, \"requirements\", f\"{env}.txt\")\n target = Path(ROOT, \"requirements\", f\"{env}.in\")\n os.system(f\"pip-compile --output-file={source} {target}\")",
"def install_deps():\n default = open('requirements.txt', 'r').readlines()\n new_pkgs = []\n links = []\n for resource in default:\n if 'git+https' in resource:\n pkg = resource.split('#')[-1]\n links.append(resource.strip())\n new_pkgs.append(pkg.replace('egg=', '').rstrip())\n else:\n new_pkgs.append(resource.strip())\n return new_pkgs, links",
"def create_package_list(base):\n\n return [base] + [\"{}.{}\".format(base, pkg) for pkg in find_packages(base)]",
"def list_packages(pretty=False) -> Dict:\n\n packages = dict()\n lp = Commands._list_packages()\n inst_packages = lp.stdout.split('\\n')[:-1]\n\n for package in inst_packages:\n name, version = package.split('==')[0], package.split('==')[1]\n packages[name] = version\n \n if pretty:\n import json\n return json.dumps(packages, sort_keys=True, indent=4)\n return packages"
] | [
"0.71111107",
"0.6972196",
"0.6772253",
"0.6622416",
"0.6376752",
"0.62745297",
"0.6273083",
"0.62702495",
"0.6251193",
"0.60614055",
"0.5943885",
"0.592271",
"0.58881456",
"0.5876001",
"0.58746445",
"0.57682693",
"0.5724752",
"0.570974",
"0.56927276",
"0.56916934",
"0.5672256",
"0.5661367",
"0.56589544",
"0.5656562",
"0.5647212",
"0.56129205",
"0.56129205",
"0.5594862",
"0.5592861",
"0.5581197"
] | 0.70190084 | 1 |
Return the list of docker files that should be used to run the tests against. | def _get_dockerfiles_for_test() -> str:
project_root_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
dockerfiles_dir = os.path.join(project_root_dir, "dockerfiles")
if sys.version_info[0:2] == (3, 6):
return os.path.join(dockerfiles_dir, "centos7.Dockerfile")
elif sys.version_info[0:2] == (3, 9):
return os.path.join(dockerfiles_dir, "rocky8.Dockerfile")
else:
raise Exception(
"Running the tests with INMANTA_TEST_INFRA_SETUP=true is only supported using a python3.6 or python3.9 venv"
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def find_docker_files():\n docker_dirs = []\n if os.path.exists(TMP_DIR):\n docker_dirs = [os.path.join(TMP_DIR, d) for d in os.listdir(TMP_DIR)\n if os.path.isdir(os.path.join(TMP_DIR, d)) and\n not d.endswith('_working')]\n docker_dirs.sort(key=lambda x: os.path.getmtime(x))\n\n return docker_dirs",
"def get_test_files():\n repo_fs()\n return TEST_FILES",
"def find_docker_compose_services():\n dir_list = os.listdir(BASE_DIR)\n directories = [d for d in dir_list if os.path.isdir(os.path.join(BASE_DIR, d))]\n\n return [d for d in directories if 'docker-compose.yml' in os.listdir(os.path.join(BASE_DIR, d))]",
"def get_dockerfile_content(self):\n\n dockerfile_content: List[str] = [\n 'FROM nginx:latest',\n '# Update and install required packages',\n 'RUN apt-get update',\n 'RUN apt-get install vim -y',\n '',\n 'COPY ./.docker/config/nginx.conf /etc/nginx/conf.d/nginx.conf',\n '',\n 'ENTRYPOINT [\"nginx\"]',\n 'CMD [\"-g\",\"daemon off;\"]'\n ]\n return dockerfile_content",
"def _configFiles(self):\n import glob\n ret = [] \n for ext in self.configManager.extensions:\n ret.extend(\n glob.glob(f\"{self.pipelinesDir}/{self.pipeName}/*{ext}\"))\n return ret",
"def get_test_files(self):\n raise NotImplementedError",
"def _build_docker_images(self):\n print(f\"+ building {len(self.neurodocker_specs)} Docker images\")\n self.docker_status = []\n for sha1, neurodocker_dict in self.neurodocker_specs.items():\n try:\n print(\"++ building image: {}\".format(neurodocker_dict))\n cg.docker_main(\n self.working_dir,\n neurodocker_dict,\n sha1,\n build_context=self.build_context,\n )\n self.docker_status.append(\"docker ok\")\n except Exception as e:\n self.docker_status.append(\n \"failed to build image with SHA1 {}: {}\".format(sha1, e)\n )",
"def containers():\n # TODO: can there be multiple names?\n cmd = [ 'docker', 'ps', '--format', '{{.Names}}' ]\n with popen_text(cmd) as docker:\n for ln in docker.stdout:\n yield ln[:-1]",
"def get_eval_config_files(self):\n return list(\n resources.get_files_in_folder(\n \"config/tests/evaluation/evaluate_test_configs\"))",
"def get_postprocess_config_files(self):\n return list(\n resources.get_files_in_folder(\n \"config/tests/postprocessing/postprocess_test_configs\"))",
"def get_test_files():\n test_files = os.listdir('./test')\n return [\n create_test_file_name(test_file)\n for test_file in test_files\n if is_valid_test_file(test_files)\n ]",
"def getDockerfiles(fileList) -> dict:\n paths = {}\n for word in fileList:\n if \"/Dockerfile\" in word:\n try:\n path = word.split('/')\n image = path[0]\n tag = path[1]\n paths[len(paths)] = (image, tag)\n except IndexError: \n print(\"Image name and tag are required in path as \\'image/tag/Dockerfile\\'\")\n print(paths)\n return paths",
"def get_yml_files():\n repo_fs()\n return YML_FILES",
"def list_docker_images():\n raw_result = subprocess.getstatusoutput('docker images')\n return result_handler(raw_result)",
"def resolve_running_docker_containers():\n container_ids = terminal.docker_ps(ps_filter='name={}'.format(CONDUCTR_NAME_PREFIX))\n container_names = [terminal.docker_inspect(container_id, '{{.Name}}')[1:] for container_id in container_ids]\n return sorted(container_names)",
"def get_tests():\n\tret = []\n\tfor walk_tuple in os.walk(webnotes.defs.modules_path):\n\t\tfor test_file in filter(lambda x: x.startswith('test') and x.endswith('.py'), walk_tuple[2]):\n\t\t\tdir_path = os.path.relpath(walk_tuple[0], webnotes.defs.modules_path)\n\t\t\tif dir_path=='.':\n\t\t\t\tret.append(test_file[:-3])\n\t\t\telse:\n\t\t\t\tret.append(dir_path.replace('/', '.') + '.' + test_file[:-3])\t\t\t\n\treturn ret",
"def docker():\n try:\n client = docker_from_env(\n version=os.environ.get('DOCKER_API_VERSION', '1.24'))\n\n containers = []\n\n for container in client.containers.list():\n include_container = False\n if INTERESTING_CONTAINERS.search(container.name):\n include_container = True\n else:\n for tag in container.image.attrs.get('RepoTags', []):\n if INTERESTING_TAGS.match(tag):\n include_container = True\n break\n\n if not include_container:\n continue\n\n docker_metrics = {\n \"stats_type\": \"docker\",\n \"docker\": {\n \"id\": container.short_id,\n \"name\": container.name,\n \"status\": container.status,\n \"labels\": [\"%s=%s\" % (k, v)\n for k, v in container.labels.items()],\n \"tags\": container.image.attrs['RepoTags'],\n 'created': container.image.attrs['Created'],\n }\n }\n if 'version' in container.labels:\n docker_metrics['docker']['image_version'] = \\\n container.labels['version']\n containers.append(docker_metrics)\n\n except Exception as exc:\n logging.debug(\"Error gathering Docker info: %s\", exc)\n return []\n\n return containers",
"def dockerfile_dir(self):\n return self._dockerfile_dir",
"def gather_tests(self):\n rosie_tests_dir = os.path.join(cp_tests_dir(),\n \"circuitpython\",\n \"rosie_tests\")\n test_files = []\n for test in os.scandir(rosie_tests_dir):\n # TODO: implement exclusions by board\n if test.path.endswith(\".py\"):\n test_files.append(TestObject(test.path))\n\n return test_files",
"def get_config_files(self):\n flag, i = self.inotify\n\n if flag:\n kwargs = {}\n\n if PY3:\n kwargs['timeout_s'] = 0\n\n filenames = set()\n\n for event in i.event_gen(**kwargs):\n if event is None:\n break\n\n filenames.add(event[3])\n\n return list(filenames)\n\n else:\n return os.listdir(self.watch)",
"def test_get_container_assets(self):\n pass",
"def get_all_envs(op_root, op_version=None):\n\n if not op_version:\n op_version = get_oarphpy_version(op_root)\n \n envs = []\n dockers_dir = os.path.join(op_root, 'docker')\n for fname in os.listdir(dockers_dir):\n if fname.endswith('.Dockerfile'):\n class Env(DockerEnv):\n DOCKERFILE_PATH = os.path.join(dockers_dir, fname)\n IMAGE_NAME = fname.replace('.Dockerfile', '')\n IMAGE_VERSION = op_version\n SRC_ROOT = op_root\n envs.append(Env)\n return envs",
"def test_requirements_docker():\n with open(\".docker/Pipfile\") as f:\n pipfile_contents = toml.load(f)\n docker_requirements = set(list(pipfile_contents[\"packages\"].keys()))\n\n pip_requirements = get_parsed_requirements(\"requirements.txt\")\n assert pip_requirements.issubset(\n docker_requirements\n ), f\"Docker Pipfile misses: {pip_requirements.difference(docker_requirements)}\"",
"def get_test_files(dirname):\n if not os.path.isdir(dirname):\n return []\n path = dirname + \"/{}\"\n return list(map(path.format, sorted(os.listdir(dirname))))",
"def docker_compose_file(pytestconfig: Any) -> Any:\n return os.path.join(str(pytestconfig.rootdir), \"./\", \"docker-compose.yml\")",
"def ls():\n # TODO: listing all availabe containers form sequence\n return",
"def files_exist(self):\n\n passed = []\n warned = []\n failed = []\n ignored = []\n\n # NB: Should all be files, not directories\n # List of lists. Passes if any of the files in the sublist are found.\n #: test autodoc\n try:\n _, short_name = self.nf_config[\"manifest.name\"].strip(\"\\\"'\").split(\"/\")\n except ValueError:\n log.warning(\"Expected manifest.name to be in the format '<repo>/<pipeline>'. Will assume it is '<pipeline>'.\")\n short_name = self.nf_config[\"manifest.name\"].strip(\"\\\"'\").split(\"/\")\n\n files_fail = [\n [\".gitattributes\"],\n [\".gitignore\"],\n [\".nf-core.yml\"],\n [\".editorconfig\"],\n [\".prettierignore\"],\n [\".prettierrc.yml\"],\n [\"CHANGELOG.md\"],\n [\"CITATIONS.md\"],\n [\"CODE_OF_CONDUCT.md\"],\n [\"CODE_OF_CONDUCT.md\"],\n [\"LICENSE\", \"LICENSE.md\", \"LICENCE\", \"LICENCE.md\"], # NB: British / American spelling\n [\"nextflow_schema.json\"],\n [\"nextflow.config\"],\n [\"README.md\"],\n [os.path.join(\".github\", \".dockstore.yml\")],\n [os.path.join(\".github\", \"CONTRIBUTING.md\")],\n [os.path.join(\".github\", \"ISSUE_TEMPLATE\", \"bug_report.yml\")],\n [os.path.join(\".github\", \"ISSUE_TEMPLATE\", \"config.yml\")],\n [os.path.join(\".github\", \"ISSUE_TEMPLATE\", \"feature_request.yml\")],\n [os.path.join(\".github\", \"PULL_REQUEST_TEMPLATE.md\")],\n [os.path.join(\".github\", \"workflows\", \"branch.yml\")],\n [os.path.join(\".github\", \"workflows\", \"ci.yml\")],\n [os.path.join(\".github\", \"workflows\", \"linting_comment.yml\")],\n [os.path.join(\".github\", \"workflows\", \"linting.yml\")],\n [os.path.join(\"assets\", \"email_template.html\")],\n [os.path.join(\"assets\", \"email_template.txt\")],\n [os.path.join(\"assets\", \"sendmail_template.txt\")],\n [os.path.join(\"assets\", f\"nf-core-{short_name}_logo_light.png\")],\n [os.path.join(\"conf\", \"modules.config\")],\n [os.path.join(\"conf\", \"test.config\")],\n [os.path.join(\"conf\", \"test_full.config\")],\n [os.path.join(\"docs\", \"images\", f\"nf-core-{short_name}_logo_light.png\")],\n [os.path.join(\"docs\", \"images\", f\"nf-core-{short_name}_logo_dark.png\")],\n [os.path.join(\"docs\", \"output.md\")],\n [os.path.join(\"docs\", \"README.md\")],\n [os.path.join(\"docs\", \"README.md\")],\n [os.path.join(\"docs\", \"usage.md\")],\n [os.path.join(\"lib\", \"nfcore_external_java_deps.jar\")],\n [os.path.join(\"lib\", \"NfcoreTemplate.groovy\")],\n [os.path.join(\"lib\", \"Utils.groovy\")],\n [os.path.join(\"lib\", \"WorkflowMain.groovy\")],\n ]\n\n files_warn = [\n [\"main.nf\"],\n [os.path.join(\"assets\", \"multiqc_config.yml\")],\n [os.path.join(\"conf\", \"base.config\")],\n [os.path.join(\"conf\", \"igenomes.config\")],\n [os.path.join(\".github\", \"workflows\", \"awstest.yml\")],\n [os.path.join(\".github\", \"workflows\", \"awsfulltest.yml\")],\n [os.path.join(\"lib\", f\"Workflow{short_name[0].upper()}{short_name[1:]}.groovy\")],\n [\"modules.json\"],\n [\"pyproject.toml\"],\n ]\n\n # List of strings. Fails / warns if any of the strings exist.\n files_fail_ifexists = [\n \"Singularity\",\n \"parameters.settings.json\",\n \".nf-core.yaml\", # yml not yaml\n os.path.join(\"bin\", \"markdown_to_html.r\"),\n os.path.join(\"conf\", \"aws.config\"),\n os.path.join(\".github\", \"workflows\", \"push_dockerhub.yml\"),\n os.path.join(\".github\", \"ISSUE_TEMPLATE\", \"bug_report.md\"),\n os.path.join(\".github\", \"ISSUE_TEMPLATE\", \"feature_request.md\"),\n os.path.join(\"docs\", \"images\", f\"nf-core-{short_name}_logo.png\"),\n \".markdownlint.yml\",\n \".yamllint.yml\",\n os.path.join(\"lib\", \"Checks.groovy\"),\n os.path.join(\"lib\", \"Completion.groovy\"),\n os.path.join(\"lib\", \"Workflow.groovy\"),\n ]\n files_warn_ifexists = [\".travis.yml\"]\n\n # Remove files that should be ignored according to the linting config\n ignore_files = self.lint_config.get(\"files_exist\", [])\n\n def pf(file_path):\n return os.path.join(self.wf_path, file_path)\n\n # First - critical files. Check that this is actually a Nextflow pipeline\n if not os.path.isfile(pf(\"nextflow.config\")) and not os.path.isfile(pf(\"main.nf\")):\n failed.append(\"File not found: nextflow.config or main.nf\")\n raise AssertionError(\"Neither nextflow.config or main.nf found! Is this a Nextflow pipeline?\")\n\n # Files that cause an error if they don't exist\n for files in files_fail:\n if any([f in ignore_files for f in files]):\n continue\n if any([os.path.isfile(pf(f)) for f in files]):\n passed.append(f\"File found: {self._wrap_quotes(files)}\")\n else:\n failed.append(f\"File not found: {self._wrap_quotes(files)}\")\n\n # Files that cause a warning if they don't exist\n for files in files_warn:\n if any([f in ignore_files for f in files]):\n continue\n if any([os.path.isfile(pf(f)) for f in files]):\n passed.append(f\"File found: {self._wrap_quotes(files)}\")\n else:\n warned.append(f\"File not found: {self._wrap_quotes(files)}\")\n\n # Files that cause an error if they exist\n for file in files_fail_ifexists:\n if file in ignore_files:\n continue\n if os.path.isfile(pf(file)):\n failed.append(f\"File must be removed: {self._wrap_quotes(file)}\")\n else:\n passed.append(f\"File not found check: {self._wrap_quotes(file)}\")\n\n # Files that cause a warning if they exist\n for file in files_warn_ifexists:\n if file in ignore_files:\n continue\n if os.path.isfile(pf(file)):\n warned.append(f\"File should be removed: {self._wrap_quotes(file)}\")\n else:\n passed.append(f\"File not found check: {self._wrap_quotes(file)}\")\n\n # Files that are ignoed\n for file in ignore_files:\n ignored.append(f\"File is ignored: {self._wrap_quotes(file)}\")\n\n return {\"passed\": passed, \"warned\": warned, \"failed\": failed, \"ignored\": ignored}",
"def _get_pinned_docker_images() -> Mapping[str, Mapping[str, str]]:\n\n pinned_docker_images_file = resources_dir / \"pinned_docker_images.cfg\"\n all_pinned_docker_images = ConfigParser()\n all_pinned_docker_images.read(pinned_docker_images_file)\n return all_pinned_docker_images",
"def list_test_instances():\n run('ls -1 %s' % env.site_root)",
"def test_get_file_executors(self):\n pass"
] | [
"0.7441755",
"0.64881",
"0.64146507",
"0.63096654",
"0.6147907",
"0.61174136",
"0.6047549",
"0.6022984",
"0.6019697",
"0.59687704",
"0.59673595",
"0.5958125",
"0.5927277",
"0.5920401",
"0.5885158",
"0.58813536",
"0.58701384",
"0.58612794",
"0.58572733",
"0.5732197",
"0.5731872",
"0.5722287",
"0.56964636",
"0.56830007",
"0.56588197",
"0.56580746",
"0.5653032",
"0.5645791",
"0.5645636",
"0.56436586"
] | 0.75666106 | 0 |
log_loss / cross_entropy / categorical_crossentropy X is the logits y is labels (num_examples, 1) Note that y is not onehot encoded vector. It can be computed as y.argmax(axis=1) from onehot encoded vectors of labels if required. | def cross_entropy(X, y, using_onehot=True):
M = y.shape[0]
if using_onehot :
log_likelihood = -np.log(np.max(X * y, -1))
else:
log_likelihood = -np.log(X[range(M), y]) # 找到y对应的那个类别所对应的logit
loss = np.sum(log_likelihood) / M
return loss | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def softmax_cross_entropy(y, label):\r\n losses = np.sum((- np.log(y + g_epsilon) * label), axis=1)\r\n return losses\r\n pass",
"def cross_entropy_loss(self, logits, labels):\n return F.cross_entropy(logits, labels)",
"def softmax_cross_entropy_loss(logit, labels):\n p = softmax(logit)\n loss_i = - labels * np.log(p + 1e-8)\n return np.mean(loss_i)",
"def cross_entropy_loss(logits, labels, label_smoothing=0., dtype=jnp.float32):\n num_classes = logits.shape[-1]\n labels = jax.nn.one_hot(labels, num_classes, dtype=dtype)\n if label_smoothing > 0:\n labels = labels * (1 - label_smoothing) + label_smoothing / num_classes\n logp = jax.nn.log_softmax(logits.astype(dtype))\n return -jnp.mean(jnp.sum(logp * labels, axis=-1))",
"def loss(logits, labels):\n labels = tf.to_int64(labels)\n# labels = tf.to_float(labels)\n cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=labels, logits=logits, name='xentropy')\n# y_conv = tf.nn.softmax(logits)\n# cross_entropy = -tf.reduce_sum(labels*tf.log(y_conv))\n return tf.reduce_mean(cross_entropy, name='xentropy_mean')",
"def onehot_cross_entropy_loss(\n logits,\n labels,\n loss_reduction = LossReductionType\n .SUM_BY_NONZERO_WEIGHTS,\n **kwargs):\n del kwargs\n vocab_size = logits.shape[-1]\n labels_one_hot = jax.nn.one_hot(labels.astype(jnp.int32), vocab_size)\n weights = jax.numpy.where(labels > 0, 1, 0)\n return softmax_cross_entropy(\n logits,\n labels_one_hot,\n weights=weights,\n loss_reduction=loss_reduction)",
"def loss(y, y_pred):\n # assert_is_binary(y)\n # assert_is_stochastic(y_pred)\n is_binary(y)\n is_stochastic(y_pred)\n\n # prevent taking the log of 0\n eps = np.finfo(float).eps\n\n # each example is associated with a single class; sum the negative log\n # probability of the correct label over all samples in the batch.\n # observe that we are taking advantage of the fact that y is one-hot\n # encoded!\n cross_entropy = -np.sum(y * np.log(y_pred + eps))\n return cross_entropy",
"def loss(params: hk.Params, batch, label) -> jnp.ndarray:\r\n logits = net.apply(params, batch)\r\n labels = jax.nn.one_hot(label, n_classes)\r\n\r\n # Cross Entropy Loss\r\n softmax_xent = -jnp.sum(labels * jax.nn.log_softmax(logits))\r\n softmax_xent /= labels.shape[0]\r\n return softmax_xent",
"def loss(labels, logits):\n return sparse_categorical_crossentropy(labels, logits, from_logits=True)",
"def cross_entropy(t,y):\r\n #print(-1*t*np.log(y))\r\n #print(np.shape(np.log(y)))\r\n #print(np.shape(t))\r\n return t*np.log(y)*(-1)",
"def multiclass_log_loss(y_true, y_pred):\n eps=1e-15\n predictions = np.clip(y_pred, eps, 1 - eps)\n\n # normalize row sums to 1\n predictions /= predictions.sum(axis=1)[:, np.newaxis]\n\n actual = np.zeros(y_pred.shape)\n n_samples = actual.shape[0]\n actual[np.arange(n_samples), y_true.astype(int)] = 1\n vectsum = np.sum(actual * np.log(predictions))\n loss = -1.0 / n_samples * vectsum\n return loss",
"def _bce_loss_with_logits(output, labels, **kwargs):\n return F.binary_cross_entropy_with_logits(output, labels, reduction='none', **kwargs)",
"def cross_entropy(input: Tensor, target: Tensor) -> Tensor:\n norm_log = log_softmax(input, 1)\n\n np_one_hot = np.eye(input.shape[1])[target.data]\n tensor_one_hot = tensor(np_one_hot, 'one-hot', False, True)\n\n mask = -norm_log * tensor_one_hot\n mask_sum = sum(mask, 1)\n loss = sum(mask_sum, 0)\n\n return loss / input.shape[0]",
"def sigmoid_cross_entropy(y, label):\r\n losses = - np.log(y + g_epsilon) * label - np.log(1.0 - y + g_epsilon) * (1.0 - label)\r\n return losses",
"def xentropy_loss(self, logits, labels):\n labels = tf.cast(labels, tf.int32)\n logits = tf.reshape(logits, [tf.shape(logits)[0], -1, self.num_classes])\n labels = tf.reshape(labels, [tf.shape(labels)[0], -1])\n loss = tf.nn.sparse_softmax_cross_entropy_with_logits(\n logits=logits, labels=labels, name=\"loss\")\n\n return loss",
"def softmax_cross_entropy(logit, onehot, axis=-1):\n return SoftmaxCrossEntropy(axis).forward(logit, onehot)",
"def cross_entropy(y, y_hat):\n return -tf.math.log(\n tf.gather_nd(y_hat, tf.reshape(y, (-1, 1)), batch_dims=1)\n )",
"def onehot_argmax(logits):\n return T.extra_ops.to_one_hot(T.argmax(logits,-1),logits.shape[-1])",
"def cross_entropy(predictions, targets):\n likelihood = targets * np.log(predictions)\n return -np.sum(likelihood) / predictions.shape[0]",
"def get_cross_entropy(self):\n assert (self.dataset is not None) and (self.labels is not None), 'Logistic Regression requires a dataset and labels.'\n potential = 0.0\n logits = self.dataset @ self.parameters[:self.dataset.shape[1]]\n max_logits = torch.max(torch.zeros(logits.shape[0]),logits)\n potential = (-logits @ self.labels.t() + torch.sum(max_logits) + torch.sum(\n torch.log(torch.exp(-max_logits)+torch.exp(logits - max_logits))))# * n.reciprocal())\n return potential",
"def crossentropy_loss(y_true, y_pred):\n ce = tf.keras.losses.categorical_crossentropy(y_true, y_pred, from_logits=True) \n return ce",
"def categorical_crossentropy(self, y_hat, y):\n y_hat[y_hat == 0] = 10 ** -10\n return -np.sum(y * np.log(y_hat))",
"def multiclass_log_loss(y_true, y_pred, eps=1e-15):\r\n predictions = np.clip(y_pred, eps, 1 - eps)\r\n\r\n # normalize row sums to 1\r\n predictions /= predictions.sum(axis=1)[:, np.newaxis]\r\n\r\n actual = np.zeros(y_pred.shape)\r\n n_samples = actual.shape[0]\r\n actual[np.arange(n_samples), y_true.astype(int)] = 1\r\n vectsum = np.sum(actual * np.log(predictions))\r\n loss = -1.0 / n_samples * vectsum\r\n return loss",
"def binary_cross_entropy(y_true, y_preds):\n return np.sum(y_true * np.log(y_preds) + (1 - y_true) * np.log(1 - y_preds))",
"def cross_entropy(predicted, target):\n batch_size, num_classes = predicted.shape\n\n e_x = predicted.exp()\n log_e_x = e_x.log()\n a = log_sum_x_trick(predicted)\n x_n_offset = predicted - a\n\n exp_xn_offset = x_n_offset.exp()\n\n sum_exp_xn_offset = exp_xn_offset.sum(axis=1, keepdims=True)\n log_sum_exp_xn_offset = sum_exp_xn_offset.log()\n denominator = a + log_sum_exp_xn_offset\n log_softmax = log_e_x - denominator\n\n labels = to_one_hot(target, num_classes)\n prod = log_softmax*labels\n total = prod.sum()\n batch_size = tensor.Tensor(-batch_size)\n\n total = total / batch_size\n\n return total",
"def loss(logits, labels):\n labels = tf.to_int64(labels)\n cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(\n logits, labels, name='xentropy')\n return tf.reduce_mean(cross_entropy, name='xentropy_mean')",
"def categorical_crossentropy(predictions, targets):\n return theano.tensor.nnet.categorical_crossentropy(predictions, targets)",
"def categorical_crossentropy(predictions, targets):\n return theano.tensor.nnet.categorical_crossentropy(predictions, targets)",
"def loss(logits, labels):\r\n labels = tf.to_int64(labels)\r\n cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(\r\n labels=labels, logits=logits, name='xentropy')\r\n return tf.reduce_mean(cross_entropy, name='xentropy_mean')",
"def loss(logits, labels):\r\n labels = tf.to_int64(labels)\r\n cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(\r\n labels=labels, logits=logits, name='xentropy')\r\n return tf.reduce_mean(cross_entropy, name='xentropy_mean')"
] | [
"0.7834664",
"0.7679004",
"0.7651251",
"0.7648797",
"0.7558988",
"0.7466316",
"0.74370956",
"0.73649174",
"0.7329104",
"0.731339",
"0.7216395",
"0.72037876",
"0.71983844",
"0.71960145",
"0.71808535",
"0.7131986",
"0.7122538",
"0.7113255",
"0.70872647",
"0.7056309",
"0.7031268",
"0.70239174",
"0.70205927",
"0.7016109",
"0.7015288",
"0.7009635",
"0.7000881",
"0.7000881",
"0.6996683",
"0.6996683"
] | 0.8200977 | 0 |
Goes through the first column of input table and returns the first sequence of dates it finds. | def get_dates(raw_table) -> "list of dates":
dates = []
found_first = False
for i, dstr in enumerate([raw_table[i][0] for i in range(0, len(raw_table))]):
if dstr:
if len(dstr.split("/")) == 3:
d = datetime.datetime.strptime(dstr, '%m/%d/%Y')
elif len(dstr.split("-")) == 3:
d = datetime.datetime.strptime(dstr, '%Y-%m-%d')
else:
# Not necessarily an error, could just be a non-date cell
logging.debug("unknown date-format: {}".format(dstr))
continue
dates.append(d)
if not found_first:
found_first = True
logging.debug("Found first date: '{}' at i: {}".format(d.isoformat(), i))
elif found_first:
logging.debug("Last date: {}".format(d))
break
return dates | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def min_date(self, rows: List[Row], column: DateColumn) -> Date:\n cell_values = [row.values[column.name] for row in rows if row.values[column.name] is not None]\n if not cell_values:\n return Date(-1, -1, -1)\n if not all([isinstance(value, Date) for value in cell_values]):\n raise ExecutionError(f\"Invalid values for date selection function: {cell_values}\")\n return min(cell_values) # type: ignore",
"def select_date(self, rows: List[Row], column: DateColumn) -> Date:\n dates: List[Date] = []\n for row in rows:\n cell_value = row.values[column.name]\n if isinstance(cell_value, Date):\n dates.append(cell_value)\n\n return dates[0] if dates else Date(-1, -1, -1) # type: ignore",
"def _first_good_date(self, day):\n count = 0\n while True:\n try:\n self.data.loc[day - timedelta(count)]\n return day - timedelta(count)\n except KeyError:\n count += 1",
"def find_next_valid(data, date):\n correct_date = None\n while correct_date is None:\n try:\n _ = data.loc[date]\n correct_date = date\n except KeyError:\n date = add_time(date, day=1)\n return correct_date",
"def test_get_date_column_index_first_col(self, one_row_worksheet):\n\n actual_result = one_row_worksheet.get_date_column_index()\n assert actual_result == 0",
"def find_first_value(self, value, closest=False):\n value = pd.to_datetime(value)\n value = column.as_column(value).as_numerical[0]\n return self.as_numerical.find_first_value(value, closest=closest)",
"def first(x):\n try:\n x = x.to_series()\n except AttributeError:\n pass\n return list(x)[0]",
"def next_release_date(date):\n df = get_release_dates()\n df = df[df['ReleaseDate'] > date]\n return df['ReleaseDate'].iloc[0]",
"def next_determination_date(ddates,clo_idx):\n dd = ddates.loc[ddates['Fund']==clo_idx,'Determination Date']\n next_ddate = min(dd.loc[dd>pd.Timestamp.today()], key=lambda s: (s-pd.Timestamp.today()))\n return next_ddate",
"def test_ordinal_first(self):\n with open(\"tests/data_files/labor_day_dates.txt\", \"r\") as dates_file:\n dates_strings_list = dates_file.read().splitlines()\n\n for date_string in dates_strings_list:\n test_date = date(*[int(p) for p in date_string.split(\"-\")])\n labor_day = get_by_values(Ordinal.first, Weekday.Monday, Month.September, test_date.year)\n\n self.assertEquals(test_date, labor_day)",
"def next(self):\n return self.from_date(self.date_b)",
"def findNextDate(cnx, firstdate):\n\n cur = cnx.cursor()\n cur.execute(\"SELECT gross_date FROM boxoffice ORDER BY gross_date DESC LIMIT 1\")\n\n try:\n lastdate = cur.fetchone()[0]\n nextdate = datetime.strptime(lastdate, '%Y-%m-%d') + timedelta(days=1)\n nextdate = nextdate.strftime('%Y-%m-%d')\n except TypeError:\n nextdate = firstdate\n finally:\n cur.close()\n return nextdate",
"def get_first_selection(table, column_name):\n def replace(entry):\n if pd.isnull(entry):\n return None\n else:\n return re.sub(r',.*', '', entry)\n assert (isinstance(table, Table)), \"Input not a supported type.\"\n column = table.apply(replace, column_name)\n return table.append_column(column_name, column)",
"def first_day_of_month(date):\n return date.replace(day=1)",
"def first_day_of_month(date):\n return date.replace(day=1)",
"def _get_next_order_date(self, line, start_date):\n self.ensure_one()\n next_date = fields.Date.from_string(self.start_date)\n while next_date <= start_date:\n next_date = self.__get_next_term_date(\n next_date, line.ordering_unit, line.ordering_interval)\n return next_date",
"def first(seq):\n return next(iter(seq))",
"def start1(self): \n return self.ddmmyyyy(self.rowTime.start)",
"def find_first_meeting(quarter_start, meeting_days, meeting_start_td):\n # Find DOW\n numeric_meeting_days = [NUMERIC_DAY_BY_LETTER[day] for day in meeting_days]\n first_meeting_day = next(date for date in date_candidates(quarter_start) if date.weekday() in numeric_meeting_days)\n # first_meeting_day is a datetime w/ the time set at midnight before the first meeting\n # adding the meeting_start_td yields a datetime instance\n first_meeting = first_meeting_day + meeting_start_td\n\n return first_meeting_day + meeting_start_td",
"def next_payment_date(ddates,clo_idx):\n dd = ddates.loc[ddates['Fund']==clo_idx,'Payment Date'].dropna()\n next_date = min(dd.loc[dd>pd.Timestamp.today()], key=lambda s: (s-pd.Timestamp.today()))\n return next_date",
"def _get_first_element(cls, d):\n\n t = np.where(d[:, 2] > 0)[0]\n if len(t):\n return d[t[0], 0], d[t[0], 1], t[0]\n return None, None, None",
"def get_by_date(sequence, date):\r\n item = filter_by_date(sequence, date, date)\r\n return item.pop() if item else None",
"def get_date(row):\n year, month = row[['CompetitionOpenSinceYear', 'CompetitionOpenSinceMonth']]\n if not pd.isnull(year):\n return pd.Timestamp(int(year), int(month), 1)",
"def get_date_from_line(s):\n match = re.search(r'\\d{4}-\\d{2}-\\d{2}', s)\n first_date = datetime.strptime(match.group(), '%Y-%m-%d').date()\n return first_date",
"def first_of_next_month(ref_date):\n year, month = add_months(ref_date.year, ref_date.month, 1)\n return type(ref_date)(year, month, 1)",
"def get_dates_from_table(table):\n ths = table.find_all('th', attrs={'class': 'tide-table__day'})\n return [\n (date.fromisoformat(th.attrs['data-date']), int(th.attrs.get('colspan', 0)))\n for th in ths\n ]",
"def first(self, offset: pandas.DateOffset):\n return DataFrameDefault.register(pandas.DataFrame.first)(self, offset)",
"def _get_first_end_date(self):\n ins = acm.FInstrument['SACPI']\n market = \"internal\"\n start_year = acm.Time.DateAddDelta(acm.Time.FirstDayOfYear(self.start_date), 0, 0, -1)\n this_year_prices = acm.FPrice.Select(\"instrument='%s' and market='%s' and day>'%s' and day<'%s'\" \n % (ins.Name(), market, start_year, self.start_date))\n\n prices = sorted(this_year_prices, key=lambda price: price.Day(), reverse=True)\n last_sacpi_day = prices[0].Day()\n sacpi_plus_five_m = acm.Time.FirstDayOfMonth(acm.Time.DateAddDelta(last_sacpi_day, 0, 5, 0))\n return sacpi_plus_five_m",
"def test_get_index_of_day_one_day_list(self):\n days = [\"15.07.2013\"]\n self._test_find_day(days)\n self._test_giod(days, \"16.07.2013\", 0,\n -1, \"Find not existing day in an One-Day-List\")\n self._test_giod(days, \"16.07.2013\", 1,\n 0, \"Find not existing day in an One-Day-List with next=1.\")\n self._test_giod(days, \"16.07.2013\", -1,\n 0, \"Find not existing day in an One-Day-List with next=-1.\")\n self._test_giod(days, \"10.07.2013\", 0,\n -1, \"Find not existing day in an One-Day-List\")\n self._test_giod(days, \"10.07.2013\", 1,\n 0, \"Find not existing day in an One-Day-List with next=1.\")\n self._test_giod(days, \"10.07.2013\", -1,\n 0, \"Find not existing day in an One-Day-List with next=-1.\")",
"def fetch_first(self, tablename):\n\n query = 'select * from ' + tablename + \" ASC LIMIT 1\"\n try:\n self.__cur.execute(query)\n except Exception as e:\n self.__conn.rollback()\n raise e\n fetcheddata = self.__cur.fetchall()\n if fetcheddata:\n fetcheddata = fetcheddata[0]\n fetcheddata = self.__helper._functions__rowtodict([fetcheddata])\n return fetcheddata[0]\n return None"
] | [
"0.6035555",
"0.60046345",
"0.5898772",
"0.5828134",
"0.58066106",
"0.5686725",
"0.5653263",
"0.56269705",
"0.56151915",
"0.5590684",
"0.54638934",
"0.54195327",
"0.54144526",
"0.5412872",
"0.5412872",
"0.5394708",
"0.5388206",
"0.5380963",
"0.5372471",
"0.5351405",
"0.5299014",
"0.5291047",
"0.5286068",
"0.52644354",
"0.52565",
"0.5244639",
"0.52194184",
"0.5198938",
"0.5197125",
"0.51918703"
] | 0.65179384 | 0 |
Returns the list of tweets with a given hashtag in JSON format | def getByHashtags(hashtag):
# set page_limits. The default is 1
pages_limit = request.args.get('pages_limit') or 1
pages_limit = int(pages_limit)
raw_response = get_response(tw_api, 'search/tweets', { 'q': '#' + hashtag, 'count': 100 }, pages_limit)
list_response = convert_resp2list(raw_response)
return jsonify(list_response) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def filter_by_hashtag(tweets: list, hashtag: str) -> list:\n tweets_with_hashtag = {} # findall(): Kui tekstis on rohkem kui üks regulaaravaldisele vastav alamsõne saab kõikide vastete järjendi moodustada funktsiooniga findall()\n pattern = r\"#\\w+\" # \\w : tähed, numbrid, alakriips, + : 1 või rohkem\n for tweet in tweets: # r\"string\" on \"raw\" tüüpi string, mis tähendab, et kurakaldkriipsud(\"\\\") jäetakse teksti alles.\n find_hashtag = re.findall(pattern, tweet.content) # word:\\w\\w\\w. Regulaaravaldisele vastab täpne sõne \"word:\" ning sellele järgnevad 3 suvalist tähte.\n if find_hashtag:\n tweets_with_hashtag.setdefault(ht, []).append(tweet)\n return tweets_with_hashtag[hashtag]",
"def get_hashtag_tweets(self, hashtag,\n count=settings.TWITTER_DEFAULT_LIMIT):\n url = urljoin(self.base_url, \"/search/tweets.json\")\n response = self.session.get(\n url,\n params={\n \"q\": hashtag,\n \"count\": count,\n \"include_entities\": True\n },\n auth=self.__auth,\n )\n data = response.json()\n if response.ok:\n data = [Tweet(tweet_data) for tweet_data in data['statuses']]\n else:\n if 'error' in data:\n raise TwitterException(data['error'], code=response.status_code)\n elif 'errors' in data:\n error = data['errors'][0]\n raise TwitterException(error['message'], code=response.status_code)\n return data",
"def get_tweets_by_hashtag_route(hashtag):\n response, code = get_tweets_by_hashtag(\n hashtag, request.args.get('limit', 30))\n return jsonify(response), code",
"def get_tweets(hashtag):\n api = twitter.Api(consumer_key=TWITTER_API_CONSUMER_KEY,\n consumer_secret=TWITTER_API_CONSUMER_SECRET,\n access_token_key=TWITTER_API_ACCESS_TOKEN_KEY,\n access_token_secret=TWITTER_API_ACCESS_TOKEN_SECRET)\n\n query = (f\"q=%23{HASHTAG}%20-RT\"\n f\"&result_type=recent&since=2019-01-01&count={NUM_TWEETS}\")\n results = api.GetSearch(raw_query=query)\n\n return [\n format_tweet(tweet.AsDict())\n for tweet in results\n ]",
"def gettweets(request):\n temp = json.loads(request.body)\n print (temp['hashtags'])\n return Response(tw_fetcher.gethashes(temp['hashtags']), status=status.HTTP_201_CREATED)",
"def analyze_hashtag(self, hashtag, count=200):\n tweets = []\n\n for x in xrange(0, int(count / 100)):\n tweets.extend(self.tweet_fetcher.get_tweets(hashtag))\n\n analyzed_tweets = sort_tweets(self.sa.classify(tweets))\n\n self.analyzed_tweets = analyzed_tweets\n\n return analyzed_tweets",
"def get_hashtag_info(self, hashtag):\n uri = 'hashtags/' + hashtag\n return self.make_request(uri)",
"def extract_hashtags(tweet):\n tknzr = TweetTokenizer()\n hashtags = [token.lower() for token in tknzr.tokenize(tweet) if re.match(hashtag_re, token)]\n return hashtags",
"def list_tweets():\n tweets = []\n tuples = query_db('''\n select message.*, user.* from message, user\n where message.author_id = user.user_id\n order by message.pub_date desc limit ?''', [PER_PAGE])\n for tuple in tuples:\n tweet = {}\n tweet[\"username\"] = tuple['username']\n tweet[\"email\"] = tuple['email']\n tweet[\"text\"] = tuple['text']\n tweet[\"pub_date\"] = tuple['pub_date']\n tweets.append(tweet)\n return jsonify({'tweets':tweets}),200",
"def hashtags(self):\n return [tag[\"text\"] for tag in self.status.hashtags]",
"def buildHashtagsDict(tweets):\n hashtags = {}\n for tweet in tweets:\n if tweet['entities']['hashtags']:\n for hashtag in tweet['entities']['hashtags']:\n tag = hashtag['text'].lower().strip()\n if tag not in hashtags:\n hashtags[tag] = 1\n else:\n hashtags[tag] += 1\n return hashtags",
"def get_hashtags():\r\n hashtags_list = cache.get('hashtags-list')\r\n if hashtags_list is None:\r\n pipeline = [\r\n {\"$unwind\": \"$entities\"},\r\n {\"$unwind\": \"$entities.hashtags\"},\r\n {\"$unwind\": \"$entities.hashtags.text\"},\r\n {\"$group\": {\"_id\": \"$entities.hashtags.text\", \"count\":\r\n {\"$sum\": 1}}},\r\n {\"$sort\": SON([(\"count\", -1), (\"_id\", -1)])}]\r\n\r\n hashtags = mongo_coll_tweets.aggregate(pipeline)\r\n hashtags_list = []\r\n for hashtag in hashtags:\r\n hashtags_list.append((list(hashtag.values())[1], list(hashtag.values())[0]))\r\n\r\n cache.set('hashtags-list', hashtags_list,\r\n cfg['flask_cache_timeout'] * 60)\r\n\r\n return hashtags_list",
"def remove_hashtag(lista_tweets):\n\n novos_tweets = []\n\n for tweet in lista_tweets:\n texto = re.sub(r\"#\\S+\", \"\", tweet)\n novos_tweets.append(texto)\n\n return novos_tweets",
"def get_tweets(keyword):\n url = 'http://search.twitter.com/search.json?q='\n\n page = urllib.urlopen('%s%s' % (url, keyword))\n blob = page.read()\n jsonblob = json.loads(blob)\n return jsonblob",
"def get_hashtags(list):\n hashtags = []\n for h in list:\n hashtags.append(h['text'])\n return hashtags",
"def get_tweets():\r\n tweets = models.Tweet.query.all()\r\n output = []\r\n\r\n for tweet in tweets:\r\n tweet_data = {'id': tweet.id,\r\n 'content': tweet.text_content,\r\n 'username': tweet.username,\r\n 'timestamp': tweet.timestamp.isoformat(),\r\n 'likes_count': models.Like.query.filter(models.Like.post_id == tweet.id).count(),\r\n 'retweets_count': models.Retweet.query.filter(models.Retweet.post_id == tweet.id).count()}\r\n\r\n output.append(tweet_data)\r\n\r\n return {\"tweets\": output}",
"def hashtags(max: int = None):\n for hashtag in client.hashtags(max=max):\n print(json.dumps(hashtag))",
"def get_tweets():\n broken_json = read_tweets()\n #\n # Remove the last comma and wrap in a json list\n #\n parsed = json.loads('[%s]' % broken_json[:-1])\n return parsed",
"def get_hashtags(self):\n\t\t# Only first level comments should be checked for hashtag. Maybe.\n\t\tpassl",
"def get_tweets(user, num = 200):\n tweets = []\n \n for tweet in user.home_timeline(count = num):\n edited_tweet = tweet.text\n edited_tweet = edited_tweet.encode(encoding='UTF-8', errors='Ignore') \n tweets.append(edited_tweet)\n return tweets",
"def ajax_get_hashtags():\r\n f = request.args.get('f', 0, type=int)\r\n t = request.args.get('t', 0, type=int)\r\n\r\n hashtags_list = get_hashtags()\r\n\r\n try:\r\n if t == 0:\r\n return jsonify(dict(hashtags_list[f:]))\r\n elif t > len(hashtags_list):\r\n return jsonify(dict(hashtags_list[f:]))\r\n else:\r\n return jsonify(dict(hashtags_list[f:t]))\r\n except:\r\n return False",
"def process_tweet(tweet):\n d = {}\n d['hastags'] = [hashtag['text'] for hashtag in tweet['entities']['hashtags']]\n d['text'] = tweet['text']\n d['user'] = tweet['user']['screen_name']\n d['user_loc'] = tweet['user']['location']\n return d",
"def show_search_results():\n\n #Get values from search-box via AJAX\n current_keyword = request.form.get('search').lower()\n print \"**********************\"\n print current_keyword\n print \"**********************\"\n tweets = get_tweets_by_api(term=current_keyword)\n\n result = []\n\n for tweet in tweets:\n # Exclude retweets since they appear as duplicatses to endu ser\n if tweet.retweeted_status is None:\n # Convert tweet text from unicode to text\n tweet_id = tweet.id\n text = unicodedata.normalize('NFKD', tweet.text).encode('ascii', 'ignore')\n # Find URL in text and bind to url\n # url = re.search('((?:http|https)(?::\\\\/{2}[\\\\w]+)(?:[\\\\/|\\\\.]?)(?:[^\\\\s\"]*))', text)\n url = re.findall('http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\\(\\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+', text)\n # Remove URL from text\n text_wo_url = re.sub(r'^https?:\\/\\/.*[\\r\\n]*', '', text, flags=re.MULTILINE)\n # Handle / Name\n user = unicodedata.normalize('NFKD', tweet.user.screen_name).encode('ascii', 'ignore')\n # Count of favorites\n favorite_count = tweet.favorite_count\n #Return dictionary of hashtags with hashtag as key and number of occurances as value\n if tweet.hashtags:\n # Convert hashtags from unicode to string\n ht_list = []\n for hashtag in tweet.hashtags:\n ht_str = unicodedata.normalize('NFKD', hashtag.text).encode('ascii', 'ignore')\n ht_list.append(ht_str.lower())\n hashtags = Counter(ht_list)\n else:\n hashtags = tweet.hashtags\n # Convert tweet from unicode to datetime\n created_at = tweet.created_at\n # format created_at string to ISO 8610\n created_at_str = time.strftime('%Y-%m-%d %H:%M:%S', time.strptime(created_at, '%a %b %d %H:%M:%S +0000 %Y'))\n # create a moment from the string\n created_at = moment.date(created_at_str, 'YYYY-MM-DD HH:mm:ss')\n result.append({'created_at': created_at_str, 'tweet_text': text_wo_url, 'user': user,\n 'favorite_count': favorite_count, 'hashtags': hashtags,\n 'url': url, 'tweet_id': tweet_id})\n\n print \"&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&\"\n print result\n print \"&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&\"\n\n return jsonify(result=result) #, tweets",
"def readHashtags():\n next_max_id = True\n reader = HashtagReader()\n while next_max_id:\n if next_max_id is True:\n next_max_id = ''\n _ = api.getUserFeed(usernameId=userId, maxid=next_max_id)\n reader.items.extend(api.LastJson.get('items', []))\n next_max_id = api.LastJson.get('next_max_id', '')\n reader.checkBannedTags()\n reader.printHashtagsDict()",
"def get_top_hashtags_from_twitter_api(country='Japan', extended_search=True, debug=False):\n trends = get_top_trends_from_twitter(country=country, exclude_hashtags=False)\n trends = json.loads(trends)\n\n trending_hashtags = [t['label'] for t in trends]\n\n #print(json.dumps(trends, indent=4, ensure_ascii=False))\n\n queries = [t['query'] for t in trends]\n\n if debug:\n #[print(x) for x in trends]\n #[print(x) for x in queries]\n queries = [queries[0]]\n\n full_hashtags_list = []\n for query in queries:\n #print(query)\n # there is no country filter, but there is language filter at least\n if country == 'Japan':\n responses = api.GetSearch(term=query, locale='ja', return_json=True)\n try: responses = responses['statuses']\n except: print(responses)\n else:\n responses = api.GetSearch(term=query, return_json=True)\n try: responses = responses['statuses']\n except: print(responses)\n\n #print(json.dumps(responses, indent=4, ensure_ascii=False))\n\n trend_hashtags_list = []\n for response in responses:\n if debug: print(json.dumps(response, indent=4, ensure_ascii=False))\n text = response['text']\n\n hashtags_list = response['entities']['hashtags']\n\n if len(hashtags_list) > 0:\n hashtags_list = [h['text'] for h in hashtags_list]\n [trend_hashtags_list.append(h) for h in hashtags_list]\n\n full_hashtags_list.append(trend_hashtags_list)\n\n flat_hashtags_list = [item for sublist in full_hashtags_list for item in sublist]\n\n # turn it into a set to clear duplicates, then append #\n flat_hashtags_list = list(set(flat_hashtags_list))\n flat_hashtags_list = ['#'+h for h in flat_hashtags_list]\n\n flat_tier_list = []\n for h in flat_hashtags_list:\n if h in trending_hashtags:\n flat_tier_list.append(1)\n else:\n flat_tier_list.append(2)\n\n output = []\n for hashtag, tier in zip(flat_hashtags_list, flat_tier_list):\n output.append({\n \"label\": hashtag,\n \"tier\": tier\n })\n\n sorted_output = sorted(output, key=lambda x: x['tier'])\n\n output_json = json.dumps(sorted_output, ensure_ascii=False)\n return output_json",
"def get_tweets(self):\r\n now = datetime.datetime.now()\r\n tweet_json = self.api.get_tweets(self.last, now)\r\n self.last = now\r\n return [Tweet(x) for x in tweet_json]",
"def get_posts(username):\r\n\r\n # Authenticate to Twitter\r\n auth = tweepy.OAuthHandler(twitter_credentials.CONSUMER_KEY, twitter_credentials.CONSUMER_SECRET)\r\n auth.set_access_token(twitter_credentials.ACCESS_TOKEN, twitter_credentials.ACCESS_TOKEN_SECRET)\r\n\r\n api = tweepy.API(auth)\r\n\r\n try:\r\n api.verify_credentials()\r\n print(\"Authentication OK\")\r\n except:\r\n print(\"Error during authentication\")\r\n\r\n alltweets=[]\r\n\r\n new_tweets = api.user_timeline(screen_name = username,count=200,tweet_mode='extended')\r\n status = new_tweets[0]\r\n json_str = json.dumps(status._json)\r\n\r\n #convert to string\r\n json_str = json.dumps(status._json)\r\n #deserialise string into python object\r\n parsed = json.loads(json_str)\r\n print(json.dumps(parsed, indent=4, sort_keys=True))\r\n\r\n # save most recent tweets\r\n alltweets.extend(new_tweets)\r\n\r\n # save the id of the oldest tweet less one\r\n oldest = alltweets[-1].id - 1\r\n\r\n # keep grabbing tweets until there are no tweets left to grab\r\n while len(new_tweets) > 0:\r\n print(f\"getting tweets before {oldest}\")\r\n\r\n # all subsiquent requests use the max_id param to prevent duplicates\r\n new_tweets = api.user_timeline(screen_name=username, count=200, max_id=oldest,tweet_mode='extended')\r\n\r\n # save most recent tweets\r\n alltweets.extend(new_tweets)\r\n\r\n # update the id of the oldest tweet less one\r\n oldest = alltweets[-1].id - 1\r\n\r\n print(f\"...{len(alltweets)} tweets downloaded so far\")\r\n\r\n\r\n outtweets=[]\r\n\r\n\r\n for item in alltweets:\r\n\r\n mined = {\r\n 'tweet_id': item.id,\r\n 'name': item.user.name,\r\n 'screen_name': item.user.screen_name,\r\n 'retweet_count': item.retweet_count,\r\n 'lang' : item.lang,\r\n 'text': item.full_text,\r\n 'mined_at': datetime.datetime.now(),\r\n 'created_at': item.created_at,\r\n 'favourite_count': item.favorite_count,\r\n 'hashtags': item.entities['hashtags'],\r\n 'status_count': item.user.statuses_count,\r\n 'location': item.place,\r\n 'source_device': item.source\r\n }\r\n\r\n try:\r\n mined['retweet_text'] = item.retweeted_status.full_text # In case the tweet is a RT, there is a need to\r\n # retrieve the retweet_text field which contains the full comment (up to 280 char) accompanying the retweet\r\n except:\r\n mined['retweet_text'] = ''\r\n\r\n outtweets.extend([mined])\r\n\r\n return outtweets",
"def get_tweets(api):\n return api.user_timeline()",
"def get_readable_hashtags(cls, hashtag_list):\n\n list_of_readable_hashtags = []\n\n for hashtag in hashtag_list:\n hashtag = '#' + hashtag + \" \"\n list_of_readable_hashtags.append(hashtag)\n\n return list_of_readable_hashtags",
"def list_user_tweets(username):\n userdata = query_db('select * from user where username = ?',\n [username], one=True)\n if userdata is None:\n abort(404)\n else:\n user_details = {\"username\": userdata['username'],\"user_id\":userdata['user_id']}\n\n followed = False\n if request.json.get('user_id') is not None:\n followed = query_db('''select 1 from follower where\n follower.who_id = ? and follower.whom_id = ?''',\n [request.json.get('user_id'), user_details.get('user_id')],\n one=True) is not None\n\n user_tweets = []\n if user_details is None:\n return jsonify({'message': 'User not found'}), 404\n tuples = query_db('''\n select message.*, user.* from message, user where\n user.user_id = message.author_id and user.user_id = ?\n order by message.pub_date desc limit ?''',\n [user_details['user_id'], PER_PAGE])\n\n for tuple in tuples:\n user_tweet = {}\n user_tweet[\"username\"] = tuple['username']\n user_tweet[\"email\"] = tuple['email']\n user_tweet[\"text\"] = tuple['text']\n user_tweet[\"pub_date\"] = tuple['pub_date']\n user_tweets.append(user_tweet)\n\n return jsonify({'user_tweets':user_tweets, 'followed' : followed, 'user_details':user_details}),200"
] | [
"0.76560086",
"0.7615131",
"0.7581406",
"0.74934655",
"0.7097482",
"0.6735754",
"0.66883725",
"0.6683702",
"0.6654017",
"0.6516785",
"0.6509575",
"0.6500619",
"0.64606947",
"0.64300966",
"0.6401342",
"0.6375272",
"0.63466364",
"0.62755454",
"0.62103456",
"0.62082505",
"0.6136515",
"0.6128049",
"0.6107556",
"0.6088074",
"0.60502565",
"0.6015545",
"0.5948045",
"0.5926561",
"0.591168",
"0.58975863"
] | 0.8051795 | 0 |
Test density function for multiple values at once | def test_density_multiple(self):
earth = PREM()
radii = np.linspace(0, 6500e3, 6501)
expected = [earth.density(r) for r in radii]
assert np.array_equal(earth.density(radii), expected) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_density_multiple(self):\n earth = CoreMantleCrustModel()\n radii = np.linspace(0, 6500e3, 6501)\n expected = [earth.density(r) for r in radii]\n assert np.array_equal(earth.density(radii), expected)",
"def test_probability_density(self):\n # Setup\n copula = GaussianMultivariate(GaussianUnivariate)\n copula.fit(self.data)\n X = np.array([2000., 200., 0.])\n expected_result = 0.032245296420409846\n\n # Run\n result = copula.probability_density(X)\n\n # Check\n assert expected_result - 1e-16 < result < expected_result + 1e-16",
"def _check_density(density, n_features):\n if density == \"auto\":\n density = 1 / np.sqrt(n_features)\n\n elif density <= 0 or density > 1:\n raise ValueError(\"Expected density in range ]0, 1], got: %r\" % density)\n return density",
"def test_density(self):\n earth = CoreMantleCrustModel()\n assert earth.density(0) == 14\n assert earth.density(1e6) == 14\n assert earth.density(3.464e6) == 14\n assert earth.density(3.5e6) == 3.4\n assert earth.density(5e6) == 3.4\n assert earth.density(6.338e6) == 3.4\n assert earth.density(6.378e6) == 2.9",
"def probability_density(self, X):\n raise NotImplementedError",
"def sd(vals):",
"def rate_density(x, a):\n return a * x",
"def testDensityCalculation(self):\n known_densities = np.array([1.76776695e-01, 1.76776695e-01, 1.76776695e-01,\n 4.59619433e-01, 4.59619433e-01, 1.76776695e-01, 5.00000000e-01, \n 8.84538011e-02, 3.40206909e-02, 2.26040275e-04])\n densities = nb._get_local_densities() \n np.testing.assert_allclose(densities, known_densities)",
"def test_density(self):\n sol = Mader(p_cj=3.0e11, d_cj=8.0e5, gamma=3.0, u_piston=0.0)\n r = np.array([0.7, 0.8])\n t = 6.25e-6\n solrt = sol(r, t)\n np.testing.assert_allclose(solrt.density[0], 2.26666666666663)",
"def _estimate_density(self, x):\n\n self.density_, self.bins_ = np.histogram(x, bins=10, density=True)",
"def example():\n\tN=9000\n\t#true distribution -> uniform between 0 and 3\n\ttrue = np.zeros(N)\n\ttrue[:N/3-1] = 0.5\n\ttrue[N/3:2*N/3-1] = 1.5\n\ttrue[2*N/3:] = 2.5\n\t# measured distribution -> fixed normal distribution with sigma=.5 and mean=1.5\n\tmeasured = np.zeros(N)\n\tmeasured[:1300-1] = 0.5\n\tmeasured[1300:1300+6000-1] = 1.5\n\tmeasured[1300+6000:] = 2.5\n\t# Also return the generating true pdf\n\tx = np.linspace(-1, 5, 500)\n\tpdf = np.zeros([2, len(x)])\n\tpdf[0] = x\n\tpdf[1] = scs.uniform.pdf(x, 0, 3)\n\n\treturn measured, true, pdf, [0,1,2,3]",
"def test_kde_scipy(limits):\n data = np.random.normal(0, 1, 10000)\n grid, density_own = _kde(data, custom_lims=limits)\n density_sp = gaussian_kde(data).evaluate(grid)\n np.testing.assert_almost_equal(density_own.sum(), density_sp.sum(), 1)",
"def find_density(attr, D, h):\n d = D.shape[1]\n n = D.shape[0]\n total = 0\n for xi in D:\n kernel = find_kernel_value(attr, xi, h, d)\n total += kernel\n return total / (n * h ** d)",
"def density(self):\r\n return self.count_ones() / float(self.xspan * self.yspan)",
"def test_density(self, radius, density):\n earth = PREM()\n assert earth.density(radius) == pytest.approx(density, rel=1e-5)",
"def test_exercise_1():\n a, b = 5, 0\n fvals = []\n grid = np.linspace(-3, 4)\n for value in grid:\n fvals.append(get_test_function(value, a, b))\n plt.plot(grid, fvals)",
"def KDE(x, (ll, ul)=('',''),res=1024.):\n #r.assign(\"x\", x)\n \n if ll :\n rn=arange(ll,ul,(ul-ll)/res)\n #print x.shape,rn.shape\n est = kde.gaussian_kde(x.ravel()).evaluate(rn)\n #r.assign(\"ll\", ll)\n #r.assign(\"ul\", ul)\n #est = r('density(x,from=ll, to=ul)') #trims the density borders\n else:\n ll = min(x)\n ul = max(x)\n rn=arange(ll,ul,(ul-ll)/res)\n est = kde.gaussian_kde(x).evaluate(rn)\n #est = r('density(x)')\n print 'No - KDE'\n return {'y':est,'x':rn}",
"def density(self, arg):\n out = 0\n for weight, mean, std in zip(self.weights, self.means, self.stds):\n scale = std * self.data['maturity']**.5\n loc = ((mean - self.data['riskfree']) *\n self.data['maturity'] - scale**2)\n out += weight * scs.norm(loc, scale).pdf(arg)\n return out",
"def prob_density_func(xs,norm=True,data_range='data'):\n if data_range=='data':\n dist_keys = set(xs)\n elif data_range=='ext_data':\n dist_keys = range(min(xs),max(xs)+1)\n else:\n dist_keys = data_range\n \n pdf = dict([(k,0.0) for k in dist_keys])\n for x in xs:\n pdf[x] += 1.0\n if norm:\n pdf.update([(k,pdf[k]/sum(pdf.values())) for k in pdf.keys()])\n return pdf",
"def make_conditional_density(bgm_fit, threshold, sigma, width):\n pass",
"def test_plot_density_no_subset():\n model_ab = from_dict(\n {\n \"a\": np.random.normal(size=200),\n \"b\": np.random.normal(size=200),\n }\n )\n model_bc = from_dict(\n {\n \"b\": np.random.normal(size=200),\n \"c\": np.random.normal(size=200),\n }\n )\n axes = plot_density([model_ab, model_bc])\n assert axes.size == 3",
"def get_density(xs, ys, mu, sigma, DIMENSION=2):\n return np.array([[kde(np.array([x,y]), mu, sigma, DIMENSION) for x in xs] for y in ys])",
"def _calc_density(x: np.ndarray, y: np.ndarray):\n from scipy.stats import gaussian_kde\n\n # Calculate the point density\n xy = np.vstack([x, y])\n z = gaussian_kde(xy)(xy)\n\n min_z = np.min(z)\n max_z = np.max(z)\n\n # Scale between 0 and 1\n scaled_z = (z - min_z) / (max_z - min_z)\n\n return scaled_z",
"def test_stddev(self):\n self.assertEqual(stddev(list1, sample=False), np.std(list1))\n self.assertEqual(stddev(list1), np.std(list1, ddof=1))",
"def test_densities():\n\n actual, r, wt = GridGenerator.make_grid(400)\n grid = 4*pi*r**2*wt\n\n data = AtomData()\n\n print(\"\\nINTEGRATED DENSITY TEST\")\n print(\"=======================\")\n for a in list(data.nuclear_charge.keys()):\n atom = Atom(a)\n Nel = data.electron_count[a]\n d0, d1, g0, g1, t0, t1, l0, l1 = atom.get_densities(r)\n\n # Count electrons per spin channel\n s_occ = AtomData.s_occ.get(a, [0, 0])\n p_occ = AtomData.p_occ.get(a, [0, 0])\n d_occ = AtomData.d_occ.get(a, [0, 0])\n f_occ = AtomData.f_occ.get(a, [0, 0])\n nela = np.sum(s_occ[0])+np.sum(p_occ[0])+np.sum(d_occ[0])+np.sum(f_occ[0])\n nelb = np.sum(s_occ[1])+np.sum(p_occ[1])+np.sum(d_occ[1])+np.sum(f_occ[1])\n assert(nela+nelb == Nel)\n\n id0 = np.dot(d0, grid)\n id1 = np.dot(d1, grid)\n\n diff_0 = id0 - nela\n percent_diff_0 = 100*diff_0/nela\n\n # Check to catch for Hydrogen having no beta electrons\n if nelb > 0.0:\n diff_1 = id1 - nelb\n percent_diff_1 = 100*diff_1/nelb\n else:\n diff_1 = 0.0\n percent_diff_1 = 0.0\n\n print(\"{:>3} - N_0 = ({:4.1f}) {:+2.6e}%, N_1 = ({:4.1f}) {:+2.6e}%, {:}\".format(a, id0, percent_diff_0, id1, percent_diff_1, \"PASSED\" if max(abs(diff_0), abs(diff_1)) < 1e-4 else \"FAILED - \"))\n\n print(\"\\nINTEGRATED KINETIC TEST\")\n print(\"=======================\")\n for a in list(data.ke_test.keys()):\n atom = Atom(a)\n t_bm = data.ke_test[a]\n d0, d1, g0, g1, t0, t1, l0, l1 = atom.get_densities(r)\n\n it0 = np.dot(t0, grid)\n it1 = np.dot(t1, grid)\n itot = it0 + it1\n\n diff = itot - t_bm\n print(\"{:>3} - T = {:+.6e}%, {:}\".format(a, 100*diff/t_bm, \"PASSED\" if abs(100*diff/t_bm) < 1e-2 else \"FAILED - \"))\n\n\n # The integral of the Laplacian over all space should be 0. Check that.\n print(\"\\nINTEGRATED LAPLACIAN TEST\")\n print(\"=========================\")\n for a in list(AtomData.ke_test.keys()):\n atom = Atom(a)\n\n d0, d1, g0, g1, t0, t1, l0, l1 = atom.get_densities(r)\n\n il0 = np.dot(grid, l0)\n il1 = np.dot(grid, l1)\n print(\"{:>3} - L_0 = {:+.6e}, L_1 = {:+.6e}, {:}\".format(a, il0, il1, \"PASSED\" if max(abs(il0), abs(il1)) < 1e-6 else \"FAILED - \"))\n\n\n print(\"\\nFINITE DIFFERENCE GRADIENT TEST\")\n print(\"===============================\")\n print(\"Testing gradient evaluation function against finite difference estimate...\")\n ne = Atom(\"Ne\") # Let's use \"the guvnor\"\n # We only need to test a few points around the core\n fdh = 1e-8\n fdr = np.arange(0.9, 0.9+fdh*10, fdh)\n d0, d1, g0, g1, t0, t1, l0, l1 = ne.get_densities(fdr)\n\n # First the first central difference\n fdiff = (d0[2:]-d0[:-2])/(2*fdh) # Construct the central difference\n if np.allclose(fdiff, g0[1:-1], atol=1e-1): # finite difference is not perfect, so lenient tollerance\n print(\"Gradient: PASSED\")\n else:\n print(\"Gradient: FAILED -\")\n\n print(\"\\nELEMENT COLOR FUNCTIONS TEST\")\n print(\"===========================\")\n test_obj = [Atom(\"H\"), Atom(\"C\"), Atom(\"O\")]\n test_str = [\"H\", \"C\", \"O\"]\n ref = np.array([[1., 1., 1.], [0.565, 0.565, 0.565], [1. , 0.051, 0.051]])\n\n if np.allclose( np.array(get_colors_for_elements(test_obj)), ref):\n print(\"\\nColor from objects: PASSED\")\n else:\n print(\"\\nColor from objects: FAILED -\")\n\n if np.allclose( np.array(get_colors_for_elements(test_str)), ref):\n print(\"Color from strings: PASSED\")\n else:\n print(\"Color from strings: FAILED -\")\n\n if HAVE_LIBXC:\n test_functional='GGA_X_PBE'\n print(\"\\nATOMIC EXCHANGE ENERGIES WITH {}\".format(test_functional))\n print(\"============================================\")\n for a in list(data.ke_test.keys()):\n atom = Atom(a)\n nE, vrho, vsigma, vtau, vlapl = atom.libxc_eval(r, functional=test_functional, restricted=False)\n Exc = (np.dot(nE, grid)).item()\n print('{:3s} {:.10f}'.format(a, Exc))\n else:\n print(\"\\nNot doing energy calculations due to lack of libxc.\\n\")",
"def test_f_divergence(alpha, dist1, dist2):\n def f_alpha(alpha):\n if alpha == 1:\n def f(x):\n return x * np.log2(x)\n elif alpha == -1:\n def f(x):\n return - np.log2(x)\n else:\n def f(x):\n return 4.0 / (1.0 - alpha**2) * (1.0 - np.power(x, (1.0 + alpha) / 2))\n return f\n\n def f_tsallis(alpha):\n if alpha == 1:\n def f(x):\n return -np.log2(x)\n else:\n def f(x):\n return (np.power(x, 1.0 - alpha) - 1.0) / (alpha - 1.0)\n return f\n\n test_functions = [\n (f_alpha(alpha), partial(alpha_divergence, alpha=alpha)),\n (f_tsallis(alpha), partial(tsallis_divergence, alpha=alpha)),\n ]\n\n for f, div_func in test_functions:\n div1 = f_divergence(dist1, dist2, f)\n div2 = div_func(dist1, dist2)\n assert div1 == pytest.approx(div2, abs=1e-1)",
"def test_multiple(self):\n df = self.df.copy()\n n = df.index.size\n arr = df.values\n out = np_cross_ratios(arr)\n self.assertTrue(np.isfinite(out).any())\n self.assertTrue((out[np.isfinite(out)] > 0).all())\n self.assertTrue(out.shape == (n, self.d, self.d))",
"def test_F(x, y, level):\n if len(x) < 2 or len(y) < 2:\n return True\n vx = np.var(x, 0, ddof=1)\n vy = np.var(y, 0, ddof=1)\n vx, vy = vx[vx*vy>0], vy[vx*vy>0]\n if len(vx)==0:\n return False\n F = vx/vy\n p_value = stat.f.cdf(F, len(x)-1, len(y)-1)\n p_value = 2*np.min([p_value, 1-p_value], axis=0)\n if np.any(p_value < level):\n return False\n else:\n return True",
"def test_density_exp(self):\n run_bottleneck.remote(100, 1, 10, render=False)",
"def density(self):\n raise TypeError(\"The density function is not support on a Multigraph.\")"
] | [
"0.72719777",
"0.6636601",
"0.62585735",
"0.6246495",
"0.6211138",
"0.61260253",
"0.6116143",
"0.61090654",
"0.60379136",
"0.603717",
"0.60360193",
"0.6025429",
"0.5937065",
"0.5931737",
"0.58895713",
"0.58884156",
"0.58661264",
"0.5830155",
"0.58052385",
"0.5774332",
"0.5773483",
"0.57370794",
"0.5716788",
"0.5691888",
"0.56750166",
"0.5664361",
"0.5650916",
"0.5650025",
"0.56336975",
"0.56317353"
] | 0.73319197 | 0 |
Test density function for multiple values at once | def test_density_multiple(self):
earth = CoreMantleCrustModel()
radii = np.linspace(0, 6500e3, 6501)
expected = [earth.density(r) for r in radii]
assert np.array_equal(earth.density(radii), expected) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_density_multiple(self):\n earth = PREM()\n radii = np.linspace(0, 6500e3, 6501)\n expected = [earth.density(r) for r in radii]\n assert np.array_equal(earth.density(radii), expected)",
"def test_probability_density(self):\n # Setup\n copula = GaussianMultivariate(GaussianUnivariate)\n copula.fit(self.data)\n X = np.array([2000., 200., 0.])\n expected_result = 0.032245296420409846\n\n # Run\n result = copula.probability_density(X)\n\n # Check\n assert expected_result - 1e-16 < result < expected_result + 1e-16",
"def _check_density(density, n_features):\n if density == \"auto\":\n density = 1 / np.sqrt(n_features)\n\n elif density <= 0 or density > 1:\n raise ValueError(\"Expected density in range ]0, 1], got: %r\" % density)\n return density",
"def test_density(self):\n earth = CoreMantleCrustModel()\n assert earth.density(0) == 14\n assert earth.density(1e6) == 14\n assert earth.density(3.464e6) == 14\n assert earth.density(3.5e6) == 3.4\n assert earth.density(5e6) == 3.4\n assert earth.density(6.338e6) == 3.4\n assert earth.density(6.378e6) == 2.9",
"def probability_density(self, X):\n raise NotImplementedError",
"def sd(vals):",
"def rate_density(x, a):\n return a * x",
"def testDensityCalculation(self):\n known_densities = np.array([1.76776695e-01, 1.76776695e-01, 1.76776695e-01,\n 4.59619433e-01, 4.59619433e-01, 1.76776695e-01, 5.00000000e-01, \n 8.84538011e-02, 3.40206909e-02, 2.26040275e-04])\n densities = nb._get_local_densities() \n np.testing.assert_allclose(densities, known_densities)",
"def test_density(self):\n sol = Mader(p_cj=3.0e11, d_cj=8.0e5, gamma=3.0, u_piston=0.0)\n r = np.array([0.7, 0.8])\n t = 6.25e-6\n solrt = sol(r, t)\n np.testing.assert_allclose(solrt.density[0], 2.26666666666663)",
"def _estimate_density(self, x):\n\n self.density_, self.bins_ = np.histogram(x, bins=10, density=True)",
"def example():\n\tN=9000\n\t#true distribution -> uniform between 0 and 3\n\ttrue = np.zeros(N)\n\ttrue[:N/3-1] = 0.5\n\ttrue[N/3:2*N/3-1] = 1.5\n\ttrue[2*N/3:] = 2.5\n\t# measured distribution -> fixed normal distribution with sigma=.5 and mean=1.5\n\tmeasured = np.zeros(N)\n\tmeasured[:1300-1] = 0.5\n\tmeasured[1300:1300+6000-1] = 1.5\n\tmeasured[1300+6000:] = 2.5\n\t# Also return the generating true pdf\n\tx = np.linspace(-1, 5, 500)\n\tpdf = np.zeros([2, len(x)])\n\tpdf[0] = x\n\tpdf[1] = scs.uniform.pdf(x, 0, 3)\n\n\treturn measured, true, pdf, [0,1,2,3]",
"def test_kde_scipy(limits):\n data = np.random.normal(0, 1, 10000)\n grid, density_own = _kde(data, custom_lims=limits)\n density_sp = gaussian_kde(data).evaluate(grid)\n np.testing.assert_almost_equal(density_own.sum(), density_sp.sum(), 1)",
"def find_density(attr, D, h):\n d = D.shape[1]\n n = D.shape[0]\n total = 0\n for xi in D:\n kernel = find_kernel_value(attr, xi, h, d)\n total += kernel\n return total / (n * h ** d)",
"def density(self):\r\n return self.count_ones() / float(self.xspan * self.yspan)",
"def test_density(self, radius, density):\n earth = PREM()\n assert earth.density(radius) == pytest.approx(density, rel=1e-5)",
"def test_exercise_1():\n a, b = 5, 0\n fvals = []\n grid = np.linspace(-3, 4)\n for value in grid:\n fvals.append(get_test_function(value, a, b))\n plt.plot(grid, fvals)",
"def KDE(x, (ll, ul)=('',''),res=1024.):\n #r.assign(\"x\", x)\n \n if ll :\n rn=arange(ll,ul,(ul-ll)/res)\n #print x.shape,rn.shape\n est = kde.gaussian_kde(x.ravel()).evaluate(rn)\n #r.assign(\"ll\", ll)\n #r.assign(\"ul\", ul)\n #est = r('density(x,from=ll, to=ul)') #trims the density borders\n else:\n ll = min(x)\n ul = max(x)\n rn=arange(ll,ul,(ul-ll)/res)\n est = kde.gaussian_kde(x).evaluate(rn)\n #est = r('density(x)')\n print 'No - KDE'\n return {'y':est,'x':rn}",
"def density(self, arg):\n out = 0\n for weight, mean, std in zip(self.weights, self.means, self.stds):\n scale = std * self.data['maturity']**.5\n loc = ((mean - self.data['riskfree']) *\n self.data['maturity'] - scale**2)\n out += weight * scs.norm(loc, scale).pdf(arg)\n return out",
"def prob_density_func(xs,norm=True,data_range='data'):\n if data_range=='data':\n dist_keys = set(xs)\n elif data_range=='ext_data':\n dist_keys = range(min(xs),max(xs)+1)\n else:\n dist_keys = data_range\n \n pdf = dict([(k,0.0) for k in dist_keys])\n for x in xs:\n pdf[x] += 1.0\n if norm:\n pdf.update([(k,pdf[k]/sum(pdf.values())) for k in pdf.keys()])\n return pdf",
"def make_conditional_density(bgm_fit, threshold, sigma, width):\n pass",
"def test_plot_density_no_subset():\n model_ab = from_dict(\n {\n \"a\": np.random.normal(size=200),\n \"b\": np.random.normal(size=200),\n }\n )\n model_bc = from_dict(\n {\n \"b\": np.random.normal(size=200),\n \"c\": np.random.normal(size=200),\n }\n )\n axes = plot_density([model_ab, model_bc])\n assert axes.size == 3",
"def get_density(xs, ys, mu, sigma, DIMENSION=2):\n return np.array([[kde(np.array([x,y]), mu, sigma, DIMENSION) for x in xs] for y in ys])",
"def _calc_density(x: np.ndarray, y: np.ndarray):\n from scipy.stats import gaussian_kde\n\n # Calculate the point density\n xy = np.vstack([x, y])\n z = gaussian_kde(xy)(xy)\n\n min_z = np.min(z)\n max_z = np.max(z)\n\n # Scale between 0 and 1\n scaled_z = (z - min_z) / (max_z - min_z)\n\n return scaled_z",
"def test_stddev(self):\n self.assertEqual(stddev(list1, sample=False), np.std(list1))\n self.assertEqual(stddev(list1), np.std(list1, ddof=1))",
"def test_densities():\n\n actual, r, wt = GridGenerator.make_grid(400)\n grid = 4*pi*r**2*wt\n\n data = AtomData()\n\n print(\"\\nINTEGRATED DENSITY TEST\")\n print(\"=======================\")\n for a in list(data.nuclear_charge.keys()):\n atom = Atom(a)\n Nel = data.electron_count[a]\n d0, d1, g0, g1, t0, t1, l0, l1 = atom.get_densities(r)\n\n # Count electrons per spin channel\n s_occ = AtomData.s_occ.get(a, [0, 0])\n p_occ = AtomData.p_occ.get(a, [0, 0])\n d_occ = AtomData.d_occ.get(a, [0, 0])\n f_occ = AtomData.f_occ.get(a, [0, 0])\n nela = np.sum(s_occ[0])+np.sum(p_occ[0])+np.sum(d_occ[0])+np.sum(f_occ[0])\n nelb = np.sum(s_occ[1])+np.sum(p_occ[1])+np.sum(d_occ[1])+np.sum(f_occ[1])\n assert(nela+nelb == Nel)\n\n id0 = np.dot(d0, grid)\n id1 = np.dot(d1, grid)\n\n diff_0 = id0 - nela\n percent_diff_0 = 100*diff_0/nela\n\n # Check to catch for Hydrogen having no beta electrons\n if nelb > 0.0:\n diff_1 = id1 - nelb\n percent_diff_1 = 100*diff_1/nelb\n else:\n diff_1 = 0.0\n percent_diff_1 = 0.0\n\n print(\"{:>3} - N_0 = ({:4.1f}) {:+2.6e}%, N_1 = ({:4.1f}) {:+2.6e}%, {:}\".format(a, id0, percent_diff_0, id1, percent_diff_1, \"PASSED\" if max(abs(diff_0), abs(diff_1)) < 1e-4 else \"FAILED - \"))\n\n print(\"\\nINTEGRATED KINETIC TEST\")\n print(\"=======================\")\n for a in list(data.ke_test.keys()):\n atom = Atom(a)\n t_bm = data.ke_test[a]\n d0, d1, g0, g1, t0, t1, l0, l1 = atom.get_densities(r)\n\n it0 = np.dot(t0, grid)\n it1 = np.dot(t1, grid)\n itot = it0 + it1\n\n diff = itot - t_bm\n print(\"{:>3} - T = {:+.6e}%, {:}\".format(a, 100*diff/t_bm, \"PASSED\" if abs(100*diff/t_bm) < 1e-2 else \"FAILED - \"))\n\n\n # The integral of the Laplacian over all space should be 0. Check that.\n print(\"\\nINTEGRATED LAPLACIAN TEST\")\n print(\"=========================\")\n for a in list(AtomData.ke_test.keys()):\n atom = Atom(a)\n\n d0, d1, g0, g1, t0, t1, l0, l1 = atom.get_densities(r)\n\n il0 = np.dot(grid, l0)\n il1 = np.dot(grid, l1)\n print(\"{:>3} - L_0 = {:+.6e}, L_1 = {:+.6e}, {:}\".format(a, il0, il1, \"PASSED\" if max(abs(il0), abs(il1)) < 1e-6 else \"FAILED - \"))\n\n\n print(\"\\nFINITE DIFFERENCE GRADIENT TEST\")\n print(\"===============================\")\n print(\"Testing gradient evaluation function against finite difference estimate...\")\n ne = Atom(\"Ne\") # Let's use \"the guvnor\"\n # We only need to test a few points around the core\n fdh = 1e-8\n fdr = np.arange(0.9, 0.9+fdh*10, fdh)\n d0, d1, g0, g1, t0, t1, l0, l1 = ne.get_densities(fdr)\n\n # First the first central difference\n fdiff = (d0[2:]-d0[:-2])/(2*fdh) # Construct the central difference\n if np.allclose(fdiff, g0[1:-1], atol=1e-1): # finite difference is not perfect, so lenient tollerance\n print(\"Gradient: PASSED\")\n else:\n print(\"Gradient: FAILED -\")\n\n print(\"\\nELEMENT COLOR FUNCTIONS TEST\")\n print(\"===========================\")\n test_obj = [Atom(\"H\"), Atom(\"C\"), Atom(\"O\")]\n test_str = [\"H\", \"C\", \"O\"]\n ref = np.array([[1., 1., 1.], [0.565, 0.565, 0.565], [1. , 0.051, 0.051]])\n\n if np.allclose( np.array(get_colors_for_elements(test_obj)), ref):\n print(\"\\nColor from objects: PASSED\")\n else:\n print(\"\\nColor from objects: FAILED -\")\n\n if np.allclose( np.array(get_colors_for_elements(test_str)), ref):\n print(\"Color from strings: PASSED\")\n else:\n print(\"Color from strings: FAILED -\")\n\n if HAVE_LIBXC:\n test_functional='GGA_X_PBE'\n print(\"\\nATOMIC EXCHANGE ENERGIES WITH {}\".format(test_functional))\n print(\"============================================\")\n for a in list(data.ke_test.keys()):\n atom = Atom(a)\n nE, vrho, vsigma, vtau, vlapl = atom.libxc_eval(r, functional=test_functional, restricted=False)\n Exc = (np.dot(nE, grid)).item()\n print('{:3s} {:.10f}'.format(a, Exc))\n else:\n print(\"\\nNot doing energy calculations due to lack of libxc.\\n\")",
"def test_f_divergence(alpha, dist1, dist2):\n def f_alpha(alpha):\n if alpha == 1:\n def f(x):\n return x * np.log2(x)\n elif alpha == -1:\n def f(x):\n return - np.log2(x)\n else:\n def f(x):\n return 4.0 / (1.0 - alpha**2) * (1.0 - np.power(x, (1.0 + alpha) / 2))\n return f\n\n def f_tsallis(alpha):\n if alpha == 1:\n def f(x):\n return -np.log2(x)\n else:\n def f(x):\n return (np.power(x, 1.0 - alpha) - 1.0) / (alpha - 1.0)\n return f\n\n test_functions = [\n (f_alpha(alpha), partial(alpha_divergence, alpha=alpha)),\n (f_tsallis(alpha), partial(tsallis_divergence, alpha=alpha)),\n ]\n\n for f, div_func in test_functions:\n div1 = f_divergence(dist1, dist2, f)\n div2 = div_func(dist1, dist2)\n assert div1 == pytest.approx(div2, abs=1e-1)",
"def test_multiple(self):\n df = self.df.copy()\n n = df.index.size\n arr = df.values\n out = np_cross_ratios(arr)\n self.assertTrue(np.isfinite(out).any())\n self.assertTrue((out[np.isfinite(out)] > 0).all())\n self.assertTrue(out.shape == (n, self.d, self.d))",
"def test_F(x, y, level):\n if len(x) < 2 or len(y) < 2:\n return True\n vx = np.var(x, 0, ddof=1)\n vy = np.var(y, 0, ddof=1)\n vx, vy = vx[vx*vy>0], vy[vx*vy>0]\n if len(vx)==0:\n return False\n F = vx/vy\n p_value = stat.f.cdf(F, len(x)-1, len(y)-1)\n p_value = 2*np.min([p_value, 1-p_value], axis=0)\n if np.any(p_value < level):\n return False\n else:\n return True",
"def test_density_exp(self):\n run_bottleneck.remote(100, 1, 10, render=False)",
"def density(self):\n raise TypeError(\"The density function is not support on a Multigraph.\")"
] | [
"0.73319197",
"0.6636601",
"0.62585735",
"0.6246495",
"0.6211138",
"0.61260253",
"0.6116143",
"0.61090654",
"0.60379136",
"0.603717",
"0.60360193",
"0.6025429",
"0.5937065",
"0.5931737",
"0.58895713",
"0.58884156",
"0.58661264",
"0.5830155",
"0.58052385",
"0.5774332",
"0.5773483",
"0.57370794",
"0.5716788",
"0.5691888",
"0.56750166",
"0.5664361",
"0.5650916",
"0.5650025",
"0.56336975",
"0.56317353"
] | 0.72719777 | 1 |
Ensure gitfusionuser has permissions to write to depot. | def check_p4gf_user_write_permission(self):
gf_client_map = P4.Map()
gf_client_map.insert("//...", "//client/...")
utp = p4gf_protect.UserToProtect(self.ctx.p4)
prot = utp.user_to_protect(p4gf_const.P4GF_USER)
gf_write_filter = prot.map_for_perm(p4gf_protect.WRITE)
gf_write_filter = P4.Map.join(gf_write_filter, gf_client_map)
if not gf_write_filter.includes('//{depot}/...'.format(depot=p4gf_const.P4GF_DEPOT)):
raise RuntimeError(_('permission denied')) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _check_writable_(self):\n self._check_within_context_()\n if self._mode != 'w':\n raise Exception('Cannot update database: read only mode')",
"def fix_permissions(cls):\n\n try:\n build_dir = environ[\"TRAVIS_BUILD_DIR\"]\n commands = [\n \"sudo chown -R travis:travis %s\" % (build_dir),\n \"sudo chgrp -R travis %s\" % (build_dir),\n \"sudo chmod -R g+rwX %s\" % (build_dir),\n \"sudo chmod 777 -Rf %s.git\" % (build_dir + directory_separator),\n r\"sudo find %s -type d -exec chmod g+x '{}' \\;\" % (build_dir),\n ]\n\n for command in commands:\n Helpers.Command(command, True).execute()\n\n if (\n Helpers.Command(\"git config core.sharedRepository\", False).execute()\n == \"\"\n ):\n Helpers.Command(\n \"git config core.sharedRepository group\", False\n ).execute()\n except KeyError:\n pass",
"def has_write_permission(request):\n user = request.user\n return user.is_superuser",
"def _check_stream_writable(self, fe_commit):\n if not self._current_branch.stream_name:\n return\n prefix = self._current_branch.writable_stream_name + '/'\n for fe_file in fe_commit['files']:\n gwt_path = fe_file['path']\n depot_path = self.ctx.gwt_path(gwt_path).to_depot()\n if depot_path.startswith(prefix):\n continue\n\n human_msg = (_(\n \"Cannot commit {sha1} '{gwt_path}' to '{depot_path}'.\"\n \" Paths not in stream '{stream}' are read-only for branch '{b}'.\")\n .format( sha1 = p4gf_util.abbrev(fe_commit['sha1'])\n , gwt_path = gwt_path\n , depot_path = depot_path\n , stream = self._current_branch.writable_stream_name\n , b = self._current_branch.branch_id ))\n raise PreflightException(human_msg)",
"def setup_permissions():\n sudo('chown %s:%s -R %s' % (env.apache_user, env.apache_user, env.whole_path_symlinked))",
"def _validate_can_write(self):\n if self._mode not in WRITE_MODES:\n raise IOError(\"File is not writable\")\n if self.Writable == 'no':\n raise IOError(\"'Writable' flag is 'no'\")",
"def testUpdateAccessAllowed(self):\n for user in (self.contributor, self.delegate, self.owner, self.root):\n response = self.runPut(user, sequencer=self.hiseq2000.sodar_uuid, data=self.post_data)\n self.response_200(response)\n data = json.loads(response.content.decode(\"utf-8\"))\n self.assertEqual(data[\"vendor_id\"], self.post_data[\"vendor_id\"])",
"def ensure_correct_user(self):\n username = getpass.getuser()\n # xxx thow a suitable exception.\n assert username == 'debrepo', ('this command must be run as user `debrepo`, not', username)",
"def require_project_writer(project):\n if not test_project_writer(project):\n raise cherrypy.HTTPError(403)",
"def canwrite(self):\n return False",
"def check_writable ( self,\n fspath, mkdir_chown=False, mkdir_chmod=False, mkdir_p=True\n ):\n success = False\n\n ERRNOS_IGNORE = { errno.EACCES, }\n\n try:\n if self.do_touch ( fspath ):\n success = True\n\n except IOError as ioerr:\n if ioerr.errno == errno.EPERM:\n pass\n elif ioerr.errno == errno.ENOENT:\n try:\n if self.dodir (\n os.path.dirname ( fspath ),\n chown=mkdir_chown, chmod=mkdir_chmod, mkdir_p=mkdir_p\n ) and self.do_touch ( fspath ):\n success = True\n\n except ( OSError, IOError ) as err:\n if err.errno == errno.EPERM:\n pass\n elif err.errno in ERRNOS_IGNORE:\n self.error (\n 'Got {name} with unexpected '\n 'errno={code:d} ({code_name})\\n'.format (\n name = err.__class__.__name__,\n code = err.errno,\n code_name = errno.errorcode [err.errno],\n )\n )\n else:\n raise\n # -- end <try again>\n elif ioerr.errno in ERRNOS_IGNORE:\n self.error (\n 'Got {name} with unexpected '\n 'errno={code:d} ({code_name})\\n'.format (\n name = ioerr.__class__.__name__,\n code = ioerr.errno,\n code_name = errno.errorcode [ioerr.errno],\n )\n )\n else:\n raise\n return success",
"def test_rename_org_permissions(client: Client) -> None:\n with dev_login(client, 'admin'):\n # Create an org\n resp = client.post('/api/v1/org', json={\n 'name': 'testorg12'\n })\n assert 200 <= resp.status_code <= 300\n org_id = resp.json['id']\n\n with dev_login(client, 'user'):\n # Check that can't rename the org\n resp = client.put(f'/api/v1/org/{org_id}/rename/kevinwasheretestrename')\n assert resp.status_code == 403",
"def check_owner(data=None, **kw):\n if data and 'owner_id' in data and not data['owner_id'] == current_user.id:\n raise ProcessingException(description=\"No write privileges\",\n code=401)",
"def test_user_cannot_write(app, resource):\n with app.user():\n data = {}\n\n # Try to post something\n app.client.post('/' + resource,\n data=data,\n assert_status=403)\n\n # Create fake item, try to patch/delete it\n _id = app.data.driver.db[resource].insert({})\n app.client.patch('/%s/%s' % (resource, _id),\n data=data,\n assert_status=403)\n app.client.delete('/%s/%s' % (resource, _id),\n assert_status=403)",
"def require_project_administrator(project):\n if not test_project_administrator(project):\n raise cherrypy.HTTPError(403)",
"def check_permission():\n if IS_ADMIN:\n out_info(\"Running as Root/Admin\")\n else:\n out_warning(\"Running without root/admin privileges\")",
"def test_update_write_fail(self):\n self.task_storage.add(self.my_task)\n os.chmod(self.test_task_filename, 0400)\n\n self.assertRaises(IOError, self.task_storage.update, self.my_task)",
"def MakeWritable():\n return shell.ShellCommand(\n name = \"make writable\",\n haltOnFailure = 1,\n description = [\"making writable\"],\n descriptionDone = [\"made writable\"],\n command = [\"chmod\", \"-R\", \"+w\", \".\"],\n )",
"def can_edit_or_403(self, user):\n if self.get_permission_level(user) < self.OWNER_PERMISSION:\n raise PermissionDenied\n return True",
"def can_write(self, auth_param: str) -> bool:\n perms = self._get_workspace_permissions([auth_param])\n return self._has_write_perm(perms.get(auth_param, WorkspacePermission.NONE))",
"def has_write_permission(request):\n # TODO: Stop users from writing other users' pageranks. Why should that be so hard?\n return request.user.is_authenticated",
"def test_project_writer(project):\n if is_server_administrator():\n return True\n if is_project_administrator(project):\n return True\n if is_project_writer(project):\n return True\n return False",
"def can_edit_or_403(self, user):\n if user.id != self.game_master.id:\n raise PermissionDenied\n return True",
"def deploy():\n with cd(\"~/public_html/\"):\n run(\"/usr/local/cpanel/3rdparty/bin/git pull\")\n\n with cd(\"~/public_html/skin/frontend/gemz/default/tools/\"):\n run(\"grunt default\")\n #sudo(\"/scripts/enablefileprotect\")",
"def test_new_write_permissions(self, context, permissions):\n\n context.config_exists.return_value = False\n permissions.return_value = False\n\n runner = CliRunner()\n result = runner.invoke(cli_node_new_configuration, [\n \"--name\", \"some-name\",\n \"--environment\", \"application\"\n ])\n\n # check that error is produced\n self.assertEqual(result.output[:7], \"[error]\")\n\n # check non-zero exit code\n self.assertEqual(result.exit_code, 1)",
"def setup_volume_access( user_email, volume_name, caps, RG_port, slice_secret, RG_closure=None ):\n client = connect_syndicate()\n \n try:\n rc = ensure_volume_access_right_exists( user_email, volume_name, caps )\n assert rc is True, \"Failed to create access right for %s in %s\" % (user_email, volume_name)\n \n except Exception, e:\n logger.exception(e)\n return False\n \n RG_name = syndicate_provisioning.make_gateway_name( \"OpenCloud\", \"RG\", volume_name, \"localhost\" )\n RG_key_password = syndicate_provisioning.make_gateway_private_key_password( RG_name, slice_secret )\n \n try:\n rc = syndicate_provisioning.ensure_RG_exists( client, user_email, volume_name, RG_name, \"localhost\", RG_port, RG_key_password, closure=RG_closure )\n except Exception, e:\n logger.exception(e)\n return False\n \n return True",
"def check_write_permission():\n if platform != \"android\":\n return True\n from android.permissions import Permission, check_permission\n permission = Permission.WRITE_EXTERNAL_STORAGE\n return check_permission(permission)",
"def check_write_permission():\n if platform != \"android\":\n return True\n from android.permissions import Permission, check_permission\n permission = Permission.WRITE_EXTERNAL_STORAGE\n return check_permission(permission)",
"def write_authorize(cls, user, obj):\n if not cls._meta.model.published_where_is_examiner(user).filter(id=obj.id):\n raise PermissionDenied()\n if obj.id == None:\n raise PermissionDenied() # We only allow update",
"def test_auth_sharable_admin(self):\n self.do_sharable(True, 'pattieblack', None, tenant='froggy',\n is_admin=True)"
] | [
"0.61675584",
"0.59262735",
"0.5913495",
"0.57361317",
"0.57273996",
"0.5724241",
"0.5694993",
"0.56913006",
"0.56799525",
"0.56661433",
"0.56573737",
"0.5642319",
"0.5618935",
"0.56070924",
"0.5567714",
"0.5564018",
"0.5546084",
"0.55418223",
"0.5533661",
"0.54613906",
"0.54562056",
"0.5452126",
"0.5428295",
"0.5416481",
"0.5403857",
"0.53887177",
"0.53742164",
"0.53742164",
"0.5371639",
"0.5362076"
] | 0.66640127 | 0 |
Return a dict of depot_path => user of any locked files. | def _find_locked_by(self):
fstat_flags = NTR('otherLock | otherOpen0 & headType=*+l')
any_locked_files = {} # depot_path : user
for branch_chunk in self.ctx.iter_writable_branch_chunks():
# Skip any newly defined branches: they're new, won't contain any
# files yet, and won't get a view definition until later at per-
# commit preflight time.
bvl = [b for b in branch_chunk if b.view_lines]
if not bvl:
continue
with self.ctx.switched_to_union(bvl):
r = self.ctx.p4run('fstat', '-F', fstat_flags, '-m1',
'//{}/...'.format(self.ctx.p4.client),
log_warnings=logging.DEBUG)
# Collect a dictionary of the locked files from the writable union of branch views
for lf in r:
user = lf['otherOpen'][0] if 'otherOpen' in lf else NTR('<unknown>')
any_locked_files[lf['depotFile']] = user
return any_locked_files | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def getLockInfoOfNonDerivedFiles(self, ids, wspLockId):\n sql = \"\"\"\n SELECT cdb_file.cdb_lock,\n cdb_file.cdb_lock_id,\n cdb_file.cdbf_object_id,\n cdb_file.cdb_object_id,\n angestellter.name AS mapped_cdb_lock_name\n FROM\n cdb_file\n LEFT JOIN\n angestellter\n ON\n cdb_file.cdb_lock = angestellter.personalnummer\n WHERE\n cdb_file.cdb_classname = 'cdb_file'\n AND (cdb_file.cdb_belongsto='' OR cdb_file.cdb_belongsto IS NULL)\n \"\"\"\n records = partionedSqlQuery(sql, \"cdb_file.cdbf_object_id\", ids)\n res = defaultdict(dict)\n for r in records:\n status = u\"not\"\n lockerName = u\"\"\n locker = r.cdb_lock\n if locker:\n lockerName = r.mapped_cdb_lock_name\n if lockerName is None:\n misc.cdblogv(misc.kLogMsg, 0,\n \"WsObjectCache, warning: file '%s' of document '%s' is locked\"\n \" by unknown user '%s' (no matching name in 'angestellter')\"\n % (r.cdb_object_id, r.cdbf_object_id, locker))\n lockerName = u\"\"\n if locker == auth.persno:\n status = u\"self\"\n lockId = r.cdb_lock_id\n if lockId and wspLockId:\n if lockId != wspLockId:\n status = u\"other_ws\"\n else:\n status = u\"other\"\n res[r.cdbf_object_id][r.cdb_object_id] = {'status': status, 'locker': lockerName}\n return res",
"def renku_op(self):\n\n files = [f for f in self.cache.get_files(self.user) if f.exists()]\n return {\"files\": sorted(files, key=lambda rec: (rec.is_dir, rec.relative_path))}",
"def pipfile_lock_names(self):\n return ext_split(self.pipfile_locks, \"Pipfile.lock\")",
"def fs_files_private_used(self):\n return self._fs_files_private_used",
"def getuserrepos_keys(gh, user):\n repos = getuserrepos(gh, user)\n return repos[0].keys()",
"def locked(user):\n\n cmd = \"lsuser -a account_locked {}\".format(user)\n cmd += ' | grep \"account_locked=true\"'\n out = __salt__[\"cmd.run_all\"](cmd, output_loglevel=\"trace\", python_shell=True)\n\n ret = []\n\n lines = out[\"stdout\"].splitlines()\n for line in lines:\n ret.append(line.split()[0])\n\n return ret",
"def pip_lock_file() -> None:\n with open(\"requirements.freeze.all\", \"w\") as ff:\n subprocess.check_call([sys.executable, \"-m\", \"pip\", \"freeze\"], stdout=ff)\n with open(\"requirements.freeze.tmp\", \"w\") as ff:\n subprocess.check_call([\"grep\", \"inmanta\", \"requirements.freeze.all\"], stdout=ff)\n # pip freeze can produce lines with @ that refer to folders outside the container\n # see also https://github.com/pypa/pip/issues/8174\n # also ignore inmanta-dev-dependencies as this is pinned in the requirements.dev.txt\n with open(\"requirements.freeze\", \"w\") as ff:\n subprocess.check_call(\n [\n \"grep\",\n \"-v\",\n \"-e\",\n \"@\",\n \"-e\",\n \"inmanta-dev-dependencies\",\n \"-e\",\n \"inmanta-module-\",\n \"requirements.freeze.tmp\",\n ],\n stdout=ff,\n )\n yield",
"def protectedfiles(self):\n return self._protectedpaths",
"def list_users_in_pool():\n files = []\n USERS_DIR = os.path.join(UPLOAD_DIRECTORY, \"users\")\n for filename in os.listdir(USERS_DIR):\n path = os.path.join(USERS_DIR, filename)\n if os.path.isdir(path):\n files.append(filename)\n return jsonify(files)",
"def locked(self):\n return self._filelock.locked",
"def locked(self):\r\n for result in p4run('opened', '-a', self.__path):\r\n if '+l' in result['type'] or '+m' in result['type']:\r\n user = P4User(result['user'])\r\n client = P4Client(result['client'])\r\n return user, client",
"def getUsers(self):\n return [u[0] for u in pwd.getpwall()\n if (u[5].startswith('/home/') and u[6].endswith('sh'))]",
"def git_user_updates(self):\n\n suffixes = ['.pacnew', '.pacsave', '.pacorig']\n etc_files = {n: EtcPath(self.root_dir, n) for n in\n list_rpaths(self.root_dir, ROOT_SUBDIR,\n suffixes=suffixes)}\n etc_tracked = self.repo.tracked_files('etc-tmp')\n\n # Build the list of etc-tmp files that are different from their\n # counterpart in /etc.\n self.repo.checkout('etc-tmp')\n to_check_in_master = []\n for rpath in etc_files:\n if rpath in etc_tracked:\n # Issue #16. Do not add an /etc file that has been made not\n # readable after a pacman upgrade.\n if (etc_files[rpath].digest != b'' and\n etc_files[rpath] != etc_tracked[rpath]):\n to_check_in_master.append(rpath)\n\n master_tracked = self.repo.tracked_files('master-tmp')\n\n # Build the list of master-tmp files:\n # * To add when the file does not exist in master-tmp and its\n # counterpart in etc-tmp is different from the /etc file.\n # * To update when the file exists in master-tmp and is different\n # from the /etc file.\n for rpath in to_check_in_master:\n if rpath not in master_tracked:\n self.master_commits.user_updated.rpaths.append(rpath)\n self.repo.checkout('master-tmp')\n for rpath in etc_files:\n if (rpath in master_tracked and rpath not in\n self.master_commits.added.rpaths):\n if etc_files[rpath].digest == b'':\n warn('cannot read %s' % etc_files[rpath].path)\n elif etc_files[rpath] != master_tracked[rpath]:\n self.master_commits.user_updated.rpaths.append(rpath)\n\n for rpath in self.master_commits.user_updated.rpaths:\n copy_file(rpath, self.root_dir, self.repodir)\n self.master_commits.user_updated.commit()",
"def _update_lock_files(self, lock_files):\n _, _lock_file, _other_lock_files = _temporary_files[\n self._subarray._partition_file\n ]\n _other_lock_files.update(set(lock_files))\n if _lock_file in _other_lock_files:\n # If the lock file managed by this rank is in the list of\n # lock files managed by other ranks, remove it from there\n _other_lock_files.remove(_lock_file)",
"def get_config():\n\t# At the beginning, look for access token.\n\t# If token files do not exist, register the token first.\n\tif not os.path.exists(users_dir) or len(os.listdir(users_dir)) == 0:\n\t\tregister()\n\tfor user_dir in [x[0] for x in os.walk(users_dir)][1:]:\n\t\tuser_name = os.path.basename(user_dir)\n\t\tusers[user_name] = {}\n\t\tfor varname in conf_files:\n\t\t\tpath = user_dir + '/' + varname\n\t\t\tif os.path.exists(path):\n\t\t\t\tf = open(path, 'r')\n\t\t\t\tread = f.read();\n\t\t\t\tusers[user_name][varname] = read\n\t\t\t\tf.close()\n\t\t\telse:\n\t\t\t\tshutil.rmtree(user_dir)\n\t\t\t\tusers.pop(user_name)\n\t\t\t\tprint('Missing config file of @'+user_name+'.')\n\t\t\t\tprint('Type `register()` to relogin.')\n\t\t\t\tbreak",
"def get_user_folders_dict(user_id):\n return { folder['full_name'] : folder['id'] for folder in canvas_api.pull_folders(user_id) }",
"def get_lock_file(name):\n\n # Sanitize the global lock name by using URL-style quoting, which\n # keeps most ASCII characters (nice) and turns the rest into ASCII.\n name = urllib.parse.quote_plus(name)\n\n # Add a global thing for ourself.\n name = \"py_exclusivelock_\" + name\n\n if os.path.isdir(\"/var/lock\"):\n return \"/var/lock/%s.lock\" % name\n return os.path.join(tempfile.gettempdir(), name + \".pid\")",
"def get_dirty_paths_by_status(self) -> Dict[str, List[Path]]:\n output = zsplit(git.status(\"--porcelain\", \"-z\").stdout.decode())\n return bucketize(\n output,\n key=lambda line: line[0],\n value_transform=lambda line: Path(line[3:]),\n )",
"def listusers():\n allusers = []\n with open('/etc/passwd', 'r') as pw:\n for l in pw.readlines():\n allusers.append(l.split(':')[0])\n users = [ d for d in os.listdir(\"/home\") if d in allusers ]\n return(users)",
"def _unpack_owning_proxyfs(self, req):\n\n return (req.environ.get(utils.ENV_IS_BIMODAL),\n req.environ.get(utils.ENV_OWNING_PROXYFS))",
"def get_lock_entry(project_dir: Path, command: Dict[str, Any]) -> Dict[str, Any]:\n deps = get_fileinfo(project_dir, command.get(\"deps\", []))\n outs = get_fileinfo(project_dir, command.get(\"outputs\", []))\n outs_nc = get_fileinfo(project_dir, command.get(\"outputs_no_cache\", []))\n return {\n \"cmd\": f\"{COMMAND} run {command['name']}\",\n \"script\": command[\"script\"],\n \"deps\": deps,\n \"outs\": [*outs, *outs_nc],\n \"spacy_version\": about.__version__,\n \"spacy_git_version\": GIT_VERSION,\n }",
"def _read_lockfile(self):\n try:\n with open(self.lockfile) as f:\n return f.read()\n except EnvironmentError as e:\n if e.errno in self.NOT_EXIST_ERRORS:\n return None\n raise",
"def test_lock_missing_cache_entries_gets_all_hashes(PipenvInstance, tmpdir):\n\n with temp_environ():\n os.environ[\"PIPENV_CACHE_DIR\"] = str(tmpdir.strpath)\n with PipenvInstance(chdir=True) as p:\n p._pipfile.add(\"pathlib2\", \"*\")\n assert \"pathlib2\" in p.pipfile[\"packages\"]\n c = p.pipenv(\"install\")\n assert c.return_code == 0, (c.err, (\"\\n\".join([\"{0}: {1}\\n\".format(k, v) for k, v in os.environ.items()])))\n c = p.pipenv(\"lock --clear\")\n assert c.return_code == 0, c.err\n assert \"pathlib2\" in p.lockfile[\"default\"]\n assert \"scandir\" in p.lockfile[\"default\"]\n assert isinstance(p.lockfile[\"default\"][\"scandir\"][\"hashes\"], list)\n assert len(p.lockfile[\"default\"][\"scandir\"][\"hashes\"]) > 1",
"def locks(self):\r\n params = {'f' : 'json'}\r\n url = \"%s/lockInfos\" % self._url\r\n return self._con.post(url, params)['lockInfos']",
"def get_lock_file():\n if OPTIONS.pidfile:\n return expanduser(OPTIONS.pidfile)\n\n if os.name == 'posix':\n return '/var/run/pickup.pid'\n elif os.name == 'nt':\n lock_file = join(os.environ['APPDATA'], 'pickup', 'pickup.pid')\n os.makedirs(dirname(lock_file))\n return lock_file\n else:\n LOG.error('Unable to create the lock file on this OS (%r)' % os.name)\n sys.exit(9)",
"def lockfile(self):\n return op.join(self._basedir, self._lockfilename)",
"def getFileUsed():\n\n # output will be {} if the file passed into Configuration._readConfigFile\n # can not be found in the standard paths returned by\n # Configuration._getConfigPaths.\n output = Configuration._readConfigFile(USER_CONFIG) #pylint: disable=protected-access\n if output != {}:\n return USER_CONFIG\n return DEFAULT_CONFIG",
"def acquire(self):\r\n start_time = time.time()\r\n import getpass\r\n userName = getpass.getuser()\r\n import platform\r\n computerName = platform.uname()[1]\r\n while True:\r\n try:\r\n self.fd = os.open(self.lockfile, os.O_CREAT|os.O_EXCL|os.O_RDWR)\r\n os.write(self.fd, userName + '\\n')\r\n os.write(self.fd, computerName + '\\n')\r\n os.write(self.fd, time.ctime(time.time()))\r\n break;\r\n except OSError as e:\r\n if e.errno != errno.EEXIST and e.errno != errno.EACCES:\r\n raise \r\n if (time.time() - start_time) >= self.timeout:\r\n if e.errno == errno.EEXIST:\r\n raise FileLockException(\"Timeout occured.\")\r\n else:\r\n raise FileLockException(\"Access denied.\")\r\n time.sleep(self.delay)\r\n self.is_locked = True",
"def build_client_snapshot(self):\n self.client_snapshot = {}\n for dirpath, dirs, files in os.walk(self.cfg['sharing_path']):\n for filename in files:\n filepath = os.path.join(dirpath, filename)\n unwanted_file = False\n for r in Daemon.IGNORED_REGEX:\n if re.match(r, filepath) is not None:\n unwanted_file = True\n print 'Ignored Path:', filepath\n break\n if not unwanted_file:\n relative_path = self.relativize_path(filepath)\n with open(filepath, 'rb') as f:\n self.client_snapshot[relative_path] = ['', hashlib.md5(f.read()).hexdigest()]",
"def files():\n return get_cached(\"files.json\")"
] | [
"0.5967704",
"0.58132005",
"0.5756078",
"0.5530532",
"0.55011237",
"0.54566556",
"0.538601",
"0.5357067",
"0.5334322",
"0.5324496",
"0.53202266",
"0.5259777",
"0.5258062",
"0.5243739",
"0.5193406",
"0.5120801",
"0.50814766",
"0.5075854",
"0.5063202",
"0.50581175",
"0.5055285",
"0.50483",
"0.50286",
"0.5020283",
"0.50198704",
"0.50076884",
"0.49807033",
"0.49726886",
"0.49723318",
"0.49659762"
] | 0.7146783 | 0 |
Ensure the entire sequence of commits will (likely) go through without any errors related to permissions or locks. Raises an exception if anything goes wrong. | def check_commits(self, commits):
LOG.info('Checking Perforce permissions and locks')
self.ctx.checkpoint("copy_to_p4._preflight_check")
# Stop if files are opened in our repo client
# We expect this to be none, since we have the view lock
opened = self.ctx.p4.run(['opened', '-m1'])
if opened:
raise PreflightException(_('There are files opened by Git Fusion for this repo.'))
# fetch the repo setting only, without cascading to global config
is_read_only = self.ctx.repo_config.getboolean(p4gf_config.SECTION_REPO,
p4gf_config.KEY_READ_ONLY,
fallback=False)
if is_read_only:
raise PreflightException(_("Push to repo {repo_name} prohibited.")
.format(repo_name=self.ctx.config.repo_name))
# get a list of stream depots for later checks for read-only paths
depots = self.ctx.p4.run(['depots'])
self.stream_depots = set([d['name'] for d in depots if d['type'] == 'stream'])
any_locked_files = self._find_locked_by()
LOG.debug("any_locked_files {0}".format(any_locked_files))
case_conflict_checker = None
if not self.ctx.server_is_case_sensitive:
case_conflict_checker = CaseConflictChecker(self.ctx)
case_conflict_checker.read_perforce_paths()
ui_name = self._curr_ref_ui_name()
if ui_name:
progress_msg = _('Checking commits for {ref}...').format(ref=ui_name)
else:
progress_msg = _('Checking commits...')
with ProgressReporter.Determinate(len(commits)):
for commit in commits:
ProgressReporter.increment(progress_msg)
self.g2p_user.get_author_pusher_owner(commit)
rev = commit['sha1']
if not self.assigner.is_assigned(commit['sha1']):
continue
self.check_commit(commit)
for branch_id in self.assigner.branch_id_list(rev):
self.check_commit_for_branch(
commit
, branch_id
, any_locked_files
, case_conflict_checker )
if case_conflict_checker:
cc_text = case_conflict_checker.conflict_text()
if cc_text:
raise PreflightException(cc_text) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_commit(self):\n # TODO: Test errors while committing and recovery\n pass",
"def commit_unless_managed(self):\n if not self.is_managed():\n self.commit()",
"def check_commit(self, commit):\n # pylint: disable=too-many-branches\n if LOG.isEnabledFor(logging.DEBUG):\n LOG.debug('check_commit() Checking mark={} sha1={} file-ct={} -- {}'\n .format( commit['mark']\n , p4gf_util.abbrev(commit['sha1'])\n , len(commit['files'])\n , repr(commit['data'])[:20].splitlines()[0]))\n\n if not commit['author_p4user']:\n raise PreflightException(_(\"User '{user}' not permitted to commit\")\n .format(user=commit['author']['email'].strip('<>')))\n\n if 'merge' in commit:\n ref_is_review = (self.gsreview_coll and\n self.gsreview_coll.ref_in_review_list(self._current_prt.ref))\n if not ref_is_review and not self.ctx.merge_commits:\n raise PreflightException(_('Merge commits are not enabled for this repo.'))\n if (not ref_is_review and\n not self.ctx.branch_creation and self.assigner.have_anonymous_branches):\n msg = _('Git branch creation is prohibited for this repo.')\n p4_branch_names_non_lw = [b.git_branch_name for b in self.ctx.branch_dict().values()\n if b.git_branch_name and not b.is_lightweight]\n if len(p4_branch_names_non_lw) > 1:\n msg += _('\\nThis repo has more than one named branch.'\n '\\nTry altering the push order - '\n 'pushing branches with merge ancestors first.')\n raise PreflightException(msg)\n if LOG.isEnabledFor(logging.DEBUG):\n for parent_mark in commit['merge']:\n parent_sha1 = self.fast_export_marks.get_commit(parent_mark)[:7]\n LOG.debug(\"check_commit() merge mark={} sha1={}\"\n .format(parent_mark, parent_sha1))\n\n if not self.ctx.submodules and 'files' in commit:\n for f in commit['files']:\n if f.get('mode') == '160000':\n if 'first_commit' in commit and not self._path_added(f.get('path'), commit):\n LOG.debug2('check_commit() passed {} in {}'.format(\n f.get('path'), p4gf_util.abbrev(commit['sha1'])))\n continue\n raise PreflightException(\n _('Git submodules not permitted: path={path} commit={commit_sha1}')\n .format(path=f.get('path'), commit_sha1=p4gf_util.abbrev(commit['sha1'])))\n\n for f in commit['files']:\n LOG.debug3(\"check_commit : commit files: \" + _log_fe_file(f))\n err = check_valid_filename(f['path'], self.ctx)\n if err:\n raise PreflightException(err)\n if self.ctx.is_lfs_enabled:\n self._check_lfs(commit, f)\n\n # Warn user about any jobs that appear to not exist\n jobs = G2PJob.lookup_jobs(self.ctx, G2PJob.extract_jobs(commit['data']))\n if jobs:\n for job_id in jobs:\n r = self.ctx.p4run('jobs', '-e', 'job={}'.format(job_id))\n if not r:\n _print_error(_(\"Job '{job_id}' doesn't exist\").format(job_id=job_id))\n # Create pending changes for any Git-Swarm reviews",
"def commitchanges(self): # 3\n res = self.__obj.commitchanges()\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)",
"def _auto_commit(self):\n\n # Check if we are supposed to do an auto-commit\n if not self.auto_commit or self.auto_commit_every_n is None:\n return\n\n if self.count_since_commit >= self.auto_commit_every_n:\n self.commit()",
"def commit(self) -> None:\n pass",
"def commit_check(ctx):\n result = ctx.run(f\"{VENV_PREFIX} cz check --rev-range master..\", warn=True)\n if result.exited == 3: # NO_COMMIT_FOUND\n exit(0)\n else:\n exit(result.exited)",
"def commit(self):\n pass",
"def commit(self):\n pass",
"def commit(self):\n pass",
"def commit(self):\n pass",
"def commit(self):\n pass",
"def ensure_full_commit(self):\n path = self._database_path('_ensure_full_commit')\n res = self._request_session.post(path, headers={\"Content-Type\": \"application/json\"})\n res.raise_for_status()\n return res.json()",
"def commit(self):\n # PEP 249\n pass",
"def commit(self):\n return",
"def commit(self):\n raise NotImplementedError",
"def _do_commit(self):",
"def clean_for_commit(self):",
"def resolve_conflicts(self, commit=True):\n pass # pragma: no cover",
"def commit(self) -> None:\n with self.lock:\n self.wait(self._commit_gen())",
"def manual_transaction():\r\n try:\r\n yield\r\n except Exception:\r\n transaction.rollback()\r\n log.exception('Due to an error, this transaction has been rolled back')\r\n raise\r\n else:\r\n transaction.commit()",
"def _check_write_consistency(self):\n self.logger.warning('Not checking write consistency')",
"def database_commit(connector):\n try:\n connector.commit()\n except Exception as e:\n raise Exception(\n \"An error occurred while committing the modifications in the database: %s\"\n % e\n )",
"def __commit(self):\n from sqlalchemy.exc import IntegrityError\n\n try:\n db.session.commit()\n except IntegrityError:\n db.session.rollback()",
"def prepare_for_commit(self):",
"def Commit(self):\n try:\n self.commit_changes([])\n return True\n except:\n return False",
"async def commit(self):\n if await self.is_valid():\n await self.update(committed=True).apply()",
"def check_commit_for_branch( self\n , commit\n , branch_id\n , any_locked_files\n , case_conflict_checker ):\n rev = commit['sha1']\n if LOG.isEnabledFor(logging.DEBUG):\n LOG.debug(\"check_commit_for_branch() \"\n \"Checking branch={} mark={} sha1={} file-ct={} -- {}\"\n .format( branch_id\n , commit['mark']\n , p4gf_util.abbrev(rev)\n , len(commit['files'])\n , repr(commit['data'])[:20].splitlines()[0]))\n\n if self._already_copied_commit(rev, branch_id):\n return\n\n # following checks assume client has been set for branch\n self.ensure_branch_preflight(commit, branch_id)\n with self.ctx.switched_to_branch(\n self._current_branch\n , set_client=self.set_client_on_branch_switch\n ):\n if case_conflict_checker:\n case_conflict_checker.read_fast_export_commit(\n commit, self._current_branch)\n\n # Empty commits require root-level .p4gf_placeholder to be mapped\n # in the current branch view.\n if not commit['files'] and not self._is_placeholder_mapped():\n raise PreflightException(\n _(\"Empty commit {sha1} not permitted. Git Fusion branch views\"\n \" must include root to permit empty commits.\")\n .format(sha1=p4gf_util.abbrev(rev)))\n\n with Timer(CHECK_PROTECTS):\n self._check_protects(commit['author_p4user'], commit['files'])\n\n with Timer(CHECK_OVERLAP):\n self._check_overlap(commit)\n\n # fetch the branch setting only, without cascading to repo/global config\n if self._current_branch.is_read_only:\n raise PreflightException(_(\"Push to branch {branch} prohibited.\")\n .format(branch=self._current_branch.git_branch_name))\n self._check_stream_writable(commit)\n self._check_stream_in_classic(commit)\n\n LOG.debug('checking locked files under //{}/...'.format(self.ctx.p4.client))\n if any_locked_files:\n # Convert the git commit paths to depotPaths\n files_in_commit = [self.ctx.gwt_path(f['path']).to_depot()\n for f in commit['files']]\n LOG.debug(\"files_in_commit {0}\".format(files_in_commit))\n for f in files_in_commit:\n if f in any_locked_files:\n # Collect the names (and clients) of users with locked files.\n # Report back to the pusher so they can take appropriate action.\n msg = _('{file} - locked by {user}').format(file=f,\n user=any_locked_files[f])\n LOG.info(msg)\n raise PreflightException(msg)\n\n # +++ Spend time extracting Jobs and P4Changelist owner\n # here if we actually do need to call\n # the preflight-commit hook.\n if self.ctx.preflight_hook.is_callable():\n jobs = G2PJob.extract_jobs(commit['data'])\n jobs2 = G2PJob.lookup_jobs(self.ctx, jobs)\n self.ctx.preflight_hook(\n ctx = self.ctx\n , fe_commit = commit\n , branch_id = branch_id\n , jobs = jobs2\n )",
"def begin_commit(self, _stacklevel=1):\n if self.commit_phase:\n warnings.warn('the transaction already is on commit phase; '\n 'begin_commit() method seems called twice or more' +\n self.format_commit_stack(),\n category=TransactionWarning,\n stacklevel=1 + _stacklevel)\n return\n self.commit_phase = True\n if self.session.verbose_transaction_error:\n self.commit_stack = traceback.format_stack()\n self.session.client.multi()",
"def commit(self):"
] | [
"0.70281065",
"0.62827456",
"0.61202234",
"0.6114499",
"0.6076478",
"0.6076178",
"0.60664564",
"0.6021291",
"0.6021291",
"0.6021291",
"0.6021291",
"0.6021291",
"0.6007565",
"0.59706897",
"0.596416",
"0.5880884",
"0.5870122",
"0.5866803",
"0.5857918",
"0.58138025",
"0.5733545",
"0.57154626",
"0.571265",
"0.5704343",
"0.5680234",
"0.56675124",
"0.5650749",
"0.5634057",
"0.5629054",
"0.5614872"
] | 0.6676914 | 1 |
Prior to copying a commit, perform a set of checks for a specific branch to ensure the commit will (likely) go through successfully. | def check_commit_for_branch( self
, commit
, branch_id
, any_locked_files
, case_conflict_checker ):
rev = commit['sha1']
if LOG.isEnabledFor(logging.DEBUG):
LOG.debug("check_commit_for_branch() "
"Checking branch={} mark={} sha1={} file-ct={} -- {}"
.format( branch_id
, commit['mark']
, p4gf_util.abbrev(rev)
, len(commit['files'])
, repr(commit['data'])[:20].splitlines()[0]))
if self._already_copied_commit(rev, branch_id):
return
# following checks assume client has been set for branch
self.ensure_branch_preflight(commit, branch_id)
with self.ctx.switched_to_branch(
self._current_branch
, set_client=self.set_client_on_branch_switch
):
if case_conflict_checker:
case_conflict_checker.read_fast_export_commit(
commit, self._current_branch)
# Empty commits require root-level .p4gf_placeholder to be mapped
# in the current branch view.
if not commit['files'] and not self._is_placeholder_mapped():
raise PreflightException(
_("Empty commit {sha1} not permitted. Git Fusion branch views"
" must include root to permit empty commits.")
.format(sha1=p4gf_util.abbrev(rev)))
with Timer(CHECK_PROTECTS):
self._check_protects(commit['author_p4user'], commit['files'])
with Timer(CHECK_OVERLAP):
self._check_overlap(commit)
# fetch the branch setting only, without cascading to repo/global config
if self._current_branch.is_read_only:
raise PreflightException(_("Push to branch {branch} prohibited.")
.format(branch=self._current_branch.git_branch_name))
self._check_stream_writable(commit)
self._check_stream_in_classic(commit)
LOG.debug('checking locked files under //{}/...'.format(self.ctx.p4.client))
if any_locked_files:
# Convert the git commit paths to depotPaths
files_in_commit = [self.ctx.gwt_path(f['path']).to_depot()
for f in commit['files']]
LOG.debug("files_in_commit {0}".format(files_in_commit))
for f in files_in_commit:
if f in any_locked_files:
# Collect the names (and clients) of users with locked files.
# Report back to the pusher so they can take appropriate action.
msg = _('{file} - locked by {user}').format(file=f,
user=any_locked_files[f])
LOG.info(msg)
raise PreflightException(msg)
# +++ Spend time extracting Jobs and P4Changelist owner
# here if we actually do need to call
# the preflight-commit hook.
if self.ctx.preflight_hook.is_callable():
jobs = G2PJob.extract_jobs(commit['data'])
jobs2 = G2PJob.lookup_jobs(self.ctx, jobs)
self.ctx.preflight_hook(
ctx = self.ctx
, fe_commit = commit
, branch_id = branch_id
, jobs = jobs2
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _check_branch(opt, params):\n\n # Check the current branch and hash\n _get_branch(opt)\n\n if params.git_branch != opt.git_branch or params.git_hash != opt.git_hash:\n msg = 'You are not on the right branch or commit. Please run the following in the repository: \\n'\n msg += f'git checkout {params.git_branch}\\n'\n msg += f'git revert {params.git_hash}'\n sys.exit(msg)",
"def commit_check(ctx):\n result = ctx.run(f\"{VENV_PREFIX} cz check --rev-range master..\", warn=True)\n if result.exited == 3: # NO_COMMIT_FOUND\n exit(0)\n else:\n exit(result.exited)",
"def _already_copied_commit(self, commit_sha1, branch_id):\n if not self.already_copied_commit_runner:\n return False\n return self.already_copied_commit_runner.already_copied_commit(\n commit_sha1, branch_id)",
"def check_commit(self, commit):\n # pylint: disable=too-many-branches\n if LOG.isEnabledFor(logging.DEBUG):\n LOG.debug('check_commit() Checking mark={} sha1={} file-ct={} -- {}'\n .format( commit['mark']\n , p4gf_util.abbrev(commit['sha1'])\n , len(commit['files'])\n , repr(commit['data'])[:20].splitlines()[0]))\n\n if not commit['author_p4user']:\n raise PreflightException(_(\"User '{user}' not permitted to commit\")\n .format(user=commit['author']['email'].strip('<>')))\n\n if 'merge' in commit:\n ref_is_review = (self.gsreview_coll and\n self.gsreview_coll.ref_in_review_list(self._current_prt.ref))\n if not ref_is_review and not self.ctx.merge_commits:\n raise PreflightException(_('Merge commits are not enabled for this repo.'))\n if (not ref_is_review and\n not self.ctx.branch_creation and self.assigner.have_anonymous_branches):\n msg = _('Git branch creation is prohibited for this repo.')\n p4_branch_names_non_lw = [b.git_branch_name for b in self.ctx.branch_dict().values()\n if b.git_branch_name and not b.is_lightweight]\n if len(p4_branch_names_non_lw) > 1:\n msg += _('\\nThis repo has more than one named branch.'\n '\\nTry altering the push order - '\n 'pushing branches with merge ancestors first.')\n raise PreflightException(msg)\n if LOG.isEnabledFor(logging.DEBUG):\n for parent_mark in commit['merge']:\n parent_sha1 = self.fast_export_marks.get_commit(parent_mark)[:7]\n LOG.debug(\"check_commit() merge mark={} sha1={}\"\n .format(parent_mark, parent_sha1))\n\n if not self.ctx.submodules and 'files' in commit:\n for f in commit['files']:\n if f.get('mode') == '160000':\n if 'first_commit' in commit and not self._path_added(f.get('path'), commit):\n LOG.debug2('check_commit() passed {} in {}'.format(\n f.get('path'), p4gf_util.abbrev(commit['sha1'])))\n continue\n raise PreflightException(\n _('Git submodules not permitted: path={path} commit={commit_sha1}')\n .format(path=f.get('path'), commit_sha1=p4gf_util.abbrev(commit['sha1'])))\n\n for f in commit['files']:\n LOG.debug3(\"check_commit : commit files: \" + _log_fe_file(f))\n err = check_valid_filename(f['path'], self.ctx)\n if err:\n raise PreflightException(err)\n if self.ctx.is_lfs_enabled:\n self._check_lfs(commit, f)\n\n # Warn user about any jobs that appear to not exist\n jobs = G2PJob.lookup_jobs(self.ctx, G2PJob.extract_jobs(commit['data']))\n if jobs:\n for job_id in jobs:\n r = self.ctx.p4run('jobs', '-e', 'job={}'.format(job_id))\n if not r:\n _print_error(_(\"Job '{job_id}' doesn't exist\").format(job_id=job_id))\n # Create pending changes for any Git-Swarm reviews",
"def ensure_branch_preflight(self, commit, branch_id):\n log = LOG.getChild('ensure_branch_preflight')\n branch = self.ctx.branch_dict().get(branch_id)\n # branch should never be None here. p4gf_branch_id.Assigner() must\n # create Branch objects for each assignment.\n\n if self._current_branch \\\n and self._current_branch.branch_id == branch_id:\n log.debug(\"sha={} want branch_id={} curr branch_id={} NOP\"\n .format( commit['sha1'][:7]\n , branch_id[:7]\n , self._current_branch.branch_id[:7]))\n log.debug(\"staying on branch {}\"\n .format(self.ctx.branch_dict().get(branch_id)))\n\n return branch\n\n cbid = self._current_branch.branch_id if self._current_branch else 'None'\n log.debug(\"sha={} want branch_id={} curr branch_id={} switch\"\n .format(commit['sha1'][:7], branch_id[:7], cbid[:7]))\n\n if not branch.view_lines:\n self.finish_branch_definition(commit, branch)\n\n elif branch.view_p4map:\n # if this is a stream branch, check for mutation of the stream's\n # view by comparing with the original view saved in p4gf_config2\n if branch.original_view_lines:\n original_view_lines = '\\n'.join(branch.original_view_lines)\n view_lines = p4gf_path_convert.convert_view_to_no_client_name(branch.view_lines)\n if not view_lines == original_view_lines:\n raise PreflightException(\n _('Unable to push. Stream view changed from:\\n'\n '{old_view}\\nto:\\n{new_view}')\n .format(old_view=original_view_lines, new_view=view_lines))\n # Find existing depot branch for branch view's LHS.\n lhs = branch.view_p4map.lhs()\n branch.depot_branch = self.ctx.depot_branch_info_index() \\\n .find_depot_path(lhs[0])\n\n log.debug(\"switching to branch {}\".format(branch))\n\n # By now we should have a branch and a branch.view_lines.\n # First remove current branch's files from workspace\n # Client spec is set to normdir\n self._current_branch = branch\n return branch",
"def precommit(exit=True):\n tmpdir = tempfile.mkdtemp()\n\n try:\n copy_index(tmpdir)\n\n modified = check_output(['git', 'diff', '--cached', '--name-only',\n '--diff-filter=ACMRT'])\n modified = [name.strip() for name in modified.splitlines()]\n path = os.environ['PATH']\n with pushd(tmpdir) as prevdir:\n conf = load_conf()\n # Activate the virtualenv before running checks\n if 'env' in conf:\n binpath = os.path.abspath(os.path.join(prevdir,\n conf['env']['path'],\n 'bin'))\n if binpath not in path.split(os.pathsep):\n path = binpath + os.pathsep + path\n retcode = run_checks(conf.get('hooks_all', []),\n conf.get('hooks_modified', []), modified,\n path)\n\n if exit:\n sys.exit(retcode)\n else:\n return retcode\n finally:\n shutil.rmtree(tmpdir)",
"def protect_pr_branch_with_tests_if_any_exist(org: Organization, repo: Repository,\n branches: Dict[str, Branch]) -> List[Change[str]]:\n def execute_test_protection(change: Change[str], branch: Branch, existing_checks: Set[str],\n known_status_checks: Set[str], known_checkruns: Set[str]) -> Change[str]:\n\n all_known_checks = known_status_checks | known_checkruns # For convenience later to treat them as a single set\n\n print_debug(\"[%s] Changing status checks on branch '%s' to [%s]\" %\n (highlight(repo.name), highlight(branch.name),\n highlight(\", \".join(list(all_known_checks)))))\n try:\n if existing_checks:\n branch.edit_required_status_checks(strict=True, contexts=list(all_known_checks))\n else:\n safe_branch_edit_protection(\n branch,\n strict=True,\n contexts=list(all_known_checks),\n )\n except GithubException as e:\n print_error(\"Can't edit required status checks on repo %s branch %s: %s\" %\n (repo.name, branch.name, str(e)))\n return change.failure()\n return change.success()\n\n prb = get_pr_branch(repo, branches)\n if not prb:\n return []\n\n existing_checks = set() # type: Set[str]\n try:\n rqs = prb.get_required_status_checks()\n except GithubException:\n # the repository has currently no status checks\n pass\n else:\n if len(rqs.contexts) > 0:\n # The repository already has some status checks\n existing_checks = set(rqs.contexts)\n print_debug(\"Branch %s on repo %s already has status checks [%s]\" %\n (highlight(prb.name), highlight(repo.name), highlight(\", \".join(existing_checks))))\n\n # the repository currently has no status checks, let's see if any came in within the last 7 days\n sevendaysago = datetime.now() - timedelta(days=7)\n commits = repo.get_commits(prb.name, since=sevendaysago)\n known_status_checks = set() # type: Set[str]\n known_checkruns = set() # type: Set[str]\n for commit in commits:\n for status in commit.get_statuses(): # type: CommitStatus\n if status.context not in known_status_checks:\n print_debug(\"New status check [%s]: %s %s '%s'\" %\n (commit.sha, status.updated_at,\n status.context, status.description))\n known_status_checks.add(status.context)\n for checkrun in commit.get_check_runs(): # type: CheckRun\n if checkrun.name not in known_checkruns:\n print_debug(\"New check run [%s]: %s %s %s\" %\n (commit.sha, checkrun.completed_at, checkrun.name, checkrun.app))\n known_checkruns.add(checkrun.name)\n\n all_known_checks = known_status_checks | known_checkruns # For convenience later to treat them as a single set\n print_debug(\"Found status checks [%s]\" % \", \".join(all_known_checks))\n\n if all_known_checks and all_known_checks != existing_checks:\n # add all known checks as required checks\n print_debug('Adding checks [%s] to branch %s on repo %s' %\n (highlight(\", \".join((all_known_checks) - existing_checks)),\n highlight(prb.name), highlight(repo.name)))\n return [Change(\n meta=ChangeMetadata(\n executor=execute_test_protection,\n params=[prb, existing_checks, known_status_checks, known_checkruns]\n ),\n action=ChangeActions.REPLACE if existing_checks else ChangeActions.ADD,\n before=\"%s checks\" % len(existing_checks) if existing_checks else \"No checks\",\n after=\"%s checks\" % len(all_known_checks),\n )]\n return []",
"def test_branch_can_be_copied():\n\n setup_org()\n setup_repo()\n\n responses.add(responses.GET, \"https://api.github.com/repos/my-org/my-repo/branches/master\",\n body=my_repo_branch,\n content_type='text/json',\n status=200)\n\n responses.add(responses.POST, \"https://api.github.com/repos/my-org/my-repo/git/refs\",\n body=my_new_ref,\n content_type='text/json',\n status=201)\n\n responses.add(responses.GET, \"https://api.github.com/repos/my-org/my-repo/branches/main\",\n body=my_repo_branch,\n content_type='text/json',\n status=200)\n\n token = '__dummy__'\n org = \"my-org\"\n client = GithubRestClient(token)\n new_branch_name = \"main\"\n\n repo = get_repository(client, org, \"my-repo\")\n new_branch = copy_branch(repo, repo.default_branch, new_branch_name)\n assert None is not new_branch",
"def check_branch(subcommand, branch):\n if subcommand != \"checkout\":\n return\n # first make sure actual branch name was given\n if branch is None:\n return \"Branch name to checkout must be supplied with '-b' option\"\n # next check that the local repo is clean\n cmd = [\"git\", \"status\", \"--untracked-files=no\", \"--porcelain\"]\n p = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, check=True, universal_newlines=True)\n if p.stdout.strip():\n return \"Need to have clean working tree to checkout!\\n\\n\" + p.stdout\n # next check that the branch name doesn't already exist\n cmd = [\"git\", \"show-ref\", \"--verify\", \"--quiet\", \"refs/heads/\" + branch]\n p = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, check=False)\n if not p.returncode:\n return f\"Branch {branch!r} already exists\"",
"def check_fast_forward(self, branch):\n proc = run_cmd(self.git + ['rev-list', '%s-tmp..%s' %\n (branch, branch), '--'])\n if proc.stdout.strip():\n # Commits have been made on the main branch since the last update\n # command.\n raise EmtError('cannot fast-forward the %s branch, please '\n 'run again the update command' % branch)",
"def test_commit_on_unborn_branch(tmp_path: Path) -> None:\n repository = Repository.init(tmp_path / \"repository\")\n repository.commit(message=\"initial\")\n\n assert not repository.head.commit.parents",
"def verify_branch(path, expected_branch=\"master\"):\n\n sys.stdout.write(\" - Verifying your branch is %s:\" % expected_branch)\n branch = run_in_component(path, ['git', 'rev-parse', '--abbrev-ref', 'HEAD'])\n branch = branch.strip()\n\n if branch == expected_branch:\n print(\" OKAY\")\n return\n\n print(\" FAILED\")\n\n raise GenericError(\"You must be on branch %s to release, you are on %s\" % (expected_branch, branch))",
"def _ensure_commit(git_sha1):\n cmd = [\"git\", \"cat-file\", \"-e\", git_sha1 + \"^{commit}\"]\n p = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, check=False)\n if p.returncode == 0:\n # we have the commit locally\n return\n # we don't have the commit, must fetch\n cmd = [\"git\", \"fetch\", \"https://github.com/pytorch/pytorch.git\", git_sha1]\n p = subprocess.run(cmd, check=True)",
"def __gitStashBranch(self):\n pfile = self.project.getProjectFile()\n lastModified = QFileInfo(pfile).lastModified().toString()\n shouldReopen = (\n self.vcs.gitStashBranch(self.project.getProjectPath()) or\n QFileInfo(pfile).lastModified().toString() != lastModified\n )\n if shouldReopen:\n res = E5MessageBox.yesNo(\n self.parent(),\n self.tr(\"Create Branch\"),\n self.tr(\"\"\"The project should be reread. Do this now?\"\"\"),\n yesDefault=True)\n if res:\n self.project.reopenProject()",
"def _pushb2ctxcheckheads(pushop, bundler):\n # * 'force' do not check for push race,\n # * if we don't push anything, there are nothing to check.\n if not pushop.force and pushop.outgoing.ancestorsof:\n allowunrelated = b'related' in bundler.capabilities.get(\n b'checkheads', ()\n )\n emptyremote = pushop.pushbranchmap is None\n if not allowunrelated or emptyremote:\n bundler.newpart(b'check:heads', data=iter(pushop.remoteheads))\n else:\n affected = set()\n for branch, heads in pycompat.iteritems(pushop.pushbranchmap):\n remoteheads, newheads, unsyncedheads, discardedheads = heads\n if remoteheads is not None:\n remote = set(remoteheads)\n affected |= set(discardedheads) & remote\n affected |= remote - set(newheads)\n if affected:\n data = iter(sorted(affected))\n bundler.newpart(b'check:updated-heads', data=data)",
"def ensure_tracking_branches(args):\n man = load_manifest()\n for (name, project) in man.projects.iteritems():\n repo = GitRepo(workdir_for_project(project))\n branch_missing = repo.command(\n [\"rev-parse\", \"--verify\", \"-q\", project.refspec],\n capture_stdout=True)\n \n if branch_missing:\n logging.warn(\"Branch %s does not exist in project %s. checking out.\" %\n (project.refspec, name))\n repo.command([\"branch\", \"--track\",\n project.tracking_branch, project.remote_refspec])",
"def _preflight_check(ctx, prl, gsreview_coll):\n LOG.debug('pre-receive preflight check for %s', ctx.config.repo_name)\n branch_dict = ctx.branch_dict()\n for prt in prl:\n branch = _is_gitref_in_gf(prt.ref, branch_dict, is_lightweight=False)\n ref_is_review = gsreview_coll and gsreview_coll.ref_in_review_list(prt.ref)\n if ref_is_review:\n if not ctx.swarm_reviews:\n raise RuntimeError(_(\n \"Swarm reviews are not authorized for this repo.\"\n \"\\nRejecting push of '{ref}'.\").format(ref=prt.ref))\n elif not ctx.branch_creation and not branch:\n raise RuntimeError(_(\n \"Branch creation is not authorized for this repo.\"\n \"\\nRejecting push of '{ref}'.\").format(ref=prt.ref))",
"def prepare_deploy(ticket=None, msg=None, branch=None):\n test()\n commit(ticket, msg)\n push(branch)\n pull(branch)",
"def lint_commit_base(commit):\n success = True\n # Merge commits have two parents, we maintain a linear history.\n if len(commit.parents) > 1:\n error(\n \"Please resolve merges by re-basing. Merge commits are not allowed.\",\n commit)\n success = False\n\n return success",
"def check_commit_problems(self, commit, diff):\n\n # Initialise\n self._license_problem = False\n self._commit_problem = False\n self._commit_notes = defaultdict(list)\n\n # Unsafe regex checks...\n unsafe_matches = list()\n unsafe_matches.append( r\"\\b(KRun::runCommand|K3?ShellProcess|setUseShell|setShellCommand)\\b\\s*[\\(\\r\\n]\" )\n unsafe_matches.append( r\"\\b(system|popen|mktemp|mkstemp|tmpnam|gets|syslog|strptime)\\b\\s*[\\(\\r\\n]\" )\n unsafe_matches.append( r\"(scanf)\\b\\s*[\\(\\r\\n]\" )\n valid_filename_regex = r\"\\.(cpp|cc|cxx|C|c\\+\\+|c|l|y||h|H|hh|hxx|hpp|h\\+\\+|qml)$\"\n\n # Retrieve the diff and do the problem checks...\n filename = unicode(\"\")\n filediff = list()\n for line in diff:\n file_change = re.match( \"^diff --(cc |git a\\/.+ b\\/)(.+)$\", line )\n if file_change:\n # Are we changing file? If so, we have the full diff, so do a license check....\n if filename != \"\" and commit.files_changed[ filename ][\"change\"] in ['A'] and re.search(valid_filename_regex, filename):\n self.check_commit_license(filename, ''.join(filediff))\n\n filediff = list()\n filename = file_change.group(2)\n continue\n\n # Diff headers are bogus\n if re.match(\"@@ -\\d+,\\d+ \\+\\d+ @@\", line):\n filediff = list()\n continue\n\n # Do an incremental check for *.desktop syntax errors....\n if re.search(\"\\.desktop$\", filename) and re.search(\"[^=]+=.*[ \\t]$\", line) and line.startswith(\"+\") and not re.match(\"^\\+#\", line):\n self._commit_notes[filename].append( \"[TRAILING SPACE] **\" )\n self._commit_problem = True\n\n # Check for things which are unsafe...\n for safety_match in unsafe_matches:\n match = re.match(safety_match, line)\n if match:\n note = \"[POSSIBLY UNSAFE: {0}] **\".format( match.group(1) )\n self._commit_notes[filename].append(note)\n self._commit_problem = True\n\n # Store the diff....\n filediff.append(line)\n\n if filename != \"\" and commit.files_changed[ filename ][\"change\"] in ['A'] and re.search(valid_filename_regex, filename):\n self.check_commit_license(filename, ''.join(filediff))",
"def _check_overlap(self, fe_commit):\n # +++ Avoid O(b branches * r rev) checks when\n # overlap is impossible because current branch\n # overlaps no other branch.\n if self._current_branch not in self._overlapping_branch_list():\n return\n\n for fe_file in fe_commit['files']:\n gwt_path = fe_file['path']\n depot_path = self.ctx.gwt_to_depot_path(gwt_path)\n\n for branch in self._overlapping_branch_list():\n if branch == self._current_branch:\n continue\n if not branch.intersects_depot_path(depot_path):\n continue\n\n LOG.debug(\"_check_overlap() branch {br1} <> {br2}\"\n \" gwt={gwt:<40} {dp}\\n{view}\"\n .format(\n br1 = p4gf_util.abbrev(self._current_branch.branch_id)\n , br2 = p4gf_util.abbrev(branch.branch_id)\n , gwt = gwt_path\n , dp = depot_path\n , view = \"\\n\".join(branch.view_p4map.as_array())\n ))\n\n if self._current_branch.is_new_fp_from_push or branch.is_new_fp_from_push:\n current_branch_name = self._current_branch.git_branch_name\n if self._current_branch.is_new_fp_from_push:\n current_branch_name += '(new)'\n other_branch_name = branch.git_branch_name\n if branch.is_new_fp_from_push:\n other_branch_name += '(new)'\n human_msg = (_(\n \"Perforce: Cannot commit {sha1} '{gwt_path}' to '{depot_path}'.\\n\"\n \" You are attempting to push and create a new fully populated branch\\n\"\n \" with paths which overlap another branch. Contact your admin\\n\"\n \" to configure non-conflicting destination branch paths.\\n\"\n \" Branches: '{b1}', '{b2}'\")\n .format( sha1 = p4gf_util.abbrev(fe_commit['sha1'])\n , gwt_path = gwt_path\n , depot_path = depot_path\n , b1 = current_branch_name\n , b2 = other_branch_name ))\n else:\n human_msg = (_(\n \"Cannot commit {sha1} '{gwt_path}' to '{depot_path}'.\"\n \" Paths that overlap multiple Git Fusion branches are read-only.\"\n \" Branches: '{b1}', '{b2}'\")\n .format( sha1 = p4gf_util.abbrev(fe_commit['sha1'])\n , gwt_path = gwt_path\n , depot_path = depot_path\n , b1 = self._current_branch.branch_id\n , b2 = branch.branch_id ))\n raise PreflightException(human_msg)",
"def test_branch_commit_set(repository: Repository) -> None:\n head, heads = repository.head, repository.heads\n heads[\"branch\"] = head.commit\n updatefile(repository.path / \"a\")\n branch = repository.branch(\"branch\")\n branch.commit = head.commit\n assert head.commit == branch.commit",
"def test_commit(self):\n # TODO: Test errors while committing and recovery\n pass",
"def test_branching(self):\r\n repo_dir = self.GIT_REPO_DIR\r\n # Test successful import from command\r\n if not os.path.isdir(repo_dir):\r\n os.mkdir(repo_dir)\r\n self.addCleanup(shutil.rmtree, repo_dir)\r\n\r\n # Checkout non existent branch\r\n with self.assertRaisesRegexp(GitImportError, GitImportError.REMOTE_BRANCH_MISSING):\r\n git_import.add_repo(self.TEST_REPO, repo_dir / 'edx4edx_lite', 'asdfasdfasdf')\r\n\r\n # Checkout new branch\r\n git_import.add_repo(self.TEST_REPO,\r\n repo_dir / 'edx4edx_lite',\r\n self.TEST_BRANCH)\r\n def_ms = modulestore()\r\n # Validate that it is different than master\r\n self.assertIsNotNone(def_ms.get_course(self.TEST_BRANCH_COURSE))\r\n\r\n # Attempt to check out the same branch again to validate branch choosing\r\n # works\r\n git_import.add_repo(self.TEST_REPO,\r\n repo_dir / 'edx4edx_lite',\r\n self.TEST_BRANCH)\r\n\r\n # Delete to test branching back to master\r\n delete_course(def_ms, contentstore(),\r\n self.TEST_BRANCH_COURSE,\r\n True)\r\n self.assertIsNone(def_ms.get_course(self.TEST_BRANCH_COURSE))\r\n git_import.add_repo(self.TEST_REPO,\r\n repo_dir / 'edx4edx_lite',\r\n 'master')\r\n self.assertIsNone(def_ms.get_course(self.TEST_BRANCH_COURSE))\r\n self.assertIsNotNone(def_ms.get_course(SlashSeparatedCourseKey.from_deprecated_string(self.TEST_COURSE)))",
"def verify_up_to_date(path, branch=\"master\"):\n\n sys.stdout.write(\" - Verifying your branch up to date:\")\n run_in_component(path, ['git', 'remote', 'update'])\n\n result = run_in_component(path, ['git', 'rev-list', 'HEAD...origin/%s' % branch, '--count'])\n count = int(result.strip())\n\n if count == 0:\n print(\" OKAY\")\n return\n\n print(\" FAILED\")\n\n raise GenericError(\"You branch is not up-to-date with remote branch: %d different commits\" % count)",
"def requires_branch(f):\n @functools.wraps(f)\n def check_branch(self, *args, **kwargs):\n if self.branch is None:\n raise error.ExpectationFailed(\n 'This operation requires an active release branch')\n return f(self, *args, **kwargs)\n return check_branch",
"def prepare_for_commit(self):",
"def check_commits(self, commits):\n LOG.info('Checking Perforce permissions and locks')\n self.ctx.checkpoint(\"copy_to_p4._preflight_check\")\n\n # Stop if files are opened in our repo client\n # We expect this to be none, since we have the view lock\n opened = self.ctx.p4.run(['opened', '-m1'])\n if opened:\n raise PreflightException(_('There are files opened by Git Fusion for this repo.'))\n\n # fetch the repo setting only, without cascading to global config\n is_read_only = self.ctx.repo_config.getboolean(p4gf_config.SECTION_REPO,\n p4gf_config.KEY_READ_ONLY,\n fallback=False)\n if is_read_only:\n raise PreflightException(_(\"Push to repo {repo_name} prohibited.\")\n .format(repo_name=self.ctx.config.repo_name))\n\n # get a list of stream depots for later checks for read-only paths\n depots = self.ctx.p4.run(['depots'])\n self.stream_depots = set([d['name'] for d in depots if d['type'] == 'stream'])\n any_locked_files = self._find_locked_by()\n LOG.debug(\"any_locked_files {0}\".format(any_locked_files))\n case_conflict_checker = None\n if not self.ctx.server_is_case_sensitive:\n case_conflict_checker = CaseConflictChecker(self.ctx)\n case_conflict_checker.read_perforce_paths()\n\n ui_name = self._curr_ref_ui_name()\n if ui_name:\n progress_msg = _('Checking commits for {ref}...').format(ref=ui_name)\n else:\n progress_msg = _('Checking commits...')\n\n with ProgressReporter.Determinate(len(commits)):\n for commit in commits:\n ProgressReporter.increment(progress_msg)\n\n self.g2p_user.get_author_pusher_owner(commit)\n\n rev = commit['sha1']\n if not self.assigner.is_assigned(commit['sha1']):\n continue\n\n self.check_commit(commit)\n\n for branch_id in self.assigner.branch_id_list(rev):\n self.check_commit_for_branch(\n commit\n , branch_id\n , any_locked_files\n , case_conflict_checker )\n\n if case_conflict_checker:\n cc_text = case_conflict_checker.conflict_text()\n if cc_text:\n raise PreflightException(cc_text)",
"def lint(session):\n session.install(\"pre-commit\")\n session.run(\"pre-commit\", \"run\", \"--all-files\")",
"def process_throw(self):\n prl = self.prl\n ctx = self.context\n\n # Tell server_common about the refs that Git wants to move.\n PRLFile(ctx.config.repo_name).write(prl)\n\n # Delete the file that signals whether our hooks ran or not.\n fname = os.path.join(ctx.repo_dirs.repo_container, p4gf_const.P4GF_PRE_RECEIVE_FLAG)\n if os.path.exists(fname):\n os.unlink(fname)\n\n # reject pushes if not fast-forward\n _check_fast_forward(prl)\n\n # Swarm review creates new Git merge commits. Must occur before\n # branch assignment so that the review reference can be moved to\n # the new merge commit.\n with Timer('swarm pre-copy'):\n gsreview_coll = GSReviewCollection.from_prl(ctx, prl.set_heads)\n if gsreview_coll:\n gsreview_coll.pre_copy_to_p4(prl.set_heads)\n\n # New depot branches create new fully populated Branch definitions.\n # Must occur before branch assignment so that we can assign\n # incoming commits to these new branches.\n # Modifies PreReceiveTuple refs.\n with Timer('depot branch pre-copy'):\n ndb_coll = NDBCollection.from_prl(ctx, prl.set_heads, gsreview_coll)\n if ndb_coll:\n ndb_coll.pre_copy_to_p4()\n\n _preflight_check(ctx, prl.set_heads, gsreview_coll)\n self._preflight_tags()\n # do _not_ write changes to space consumption\n PushLimits(self.context).enforce(prl.set_heads)\n\n fast_push = FastPush.from_pre_receive(\n ctx = ctx\n , prl = prl\n , gsreview_coll = gsreview_coll\n , ndb = ndb_coll\n )\n if fast_push:\n fast_push.pre_receive()\n write_packet_fast_push(fast_push)\n else:\n self.prl = prl = _set_old_sha1_for_branch_adds(ctx, prl)\n assigner = _assign_branches(ctx, prl)\n export_data = None\n g2p = None\n if assigner:\n g2p = p4gf_copy_to_p4.G2P(ctx, assigner, gsreview_coll)\n export_data = self._preflight_heads(gsreview_coll, g2p)\n\n # Write background push packet to file as JSON for consumption in\n # background push processing (see CopyOnlyHook).\n extras = dict()\n if export_data:\n extras['fast-export'] = export_data\n if g2p and g2p.lfs_row_list:\n extras[\"lfs_row_list\"] = [row.to_dict() for row in g2p.lfs_row_list]\n if gsreview_coll:\n # reset the handled state, we will process the reviews again in copy phase\n reviews = gsreview_coll.to_dict()\n for dikt in reviews['reviews']:\n dikt['handled'] = False\n extras['gsreview'] = reviews\n if ndb_coll:\n extras['ndb'] = ndb_coll.to_dict()\n write_packet(ctx, assigner, prl, extras)\n\n # If receiving a push over SSH, or the push payload over HTTP,\n # report the push identifier to the user via standard error stream.\n # Any earlier in the process and HTTP will not deliver it, any\n # later and the connection will have already been closed.\n if p4gf_const.P4GF_FORK_PUSH in os.environ:\n sys.stderr.write(_(\"Commencing push {push_id} processing...\\n\")\n .format(push_id=self.context.push_id))\n sys.stderr.flush()\n\n return 0"
] | [
"0.66593283",
"0.653673",
"0.64837676",
"0.64365125",
"0.63583773",
"0.628439",
"0.6173651",
"0.6014642",
"0.6005059",
"0.5976242",
"0.5954787",
"0.58620733",
"0.5859521",
"0.5851176",
"0.58046544",
"0.57973",
"0.579227",
"0.57077634",
"0.56944895",
"0.56923765",
"0.56910425",
"0.5656167",
"0.5641875",
"0.5637214",
"0.5594983",
"0.5580973",
"0.55732477",
"0.55719966",
"0.5538973",
"0.551497"
] | 0.72327125 | 0 |
If not already switched to and synced to the correct branch for the given commit, do so. If this is a new lightweight branch, perform whatever creation we can do at preflight time. We don't have commits/marks for any notyetsubmitted parent commits, so the depot_branch_info will often lack a correct parent or fully populated basis. depot tree, along with a branchinfo file branch mapping, along with entry in p4gf_config2 (if not anonymous) Return requested branch | def ensure_branch_preflight(self, commit, branch_id):
log = LOG.getChild('ensure_branch_preflight')
branch = self.ctx.branch_dict().get(branch_id)
# branch should never be None here. p4gf_branch_id.Assigner() must
# create Branch objects for each assignment.
if self._current_branch \
and self._current_branch.branch_id == branch_id:
log.debug("sha={} want branch_id={} curr branch_id={} NOP"
.format( commit['sha1'][:7]
, branch_id[:7]
, self._current_branch.branch_id[:7]))
log.debug("staying on branch {}"
.format(self.ctx.branch_dict().get(branch_id)))
return branch
cbid = self._current_branch.branch_id if self._current_branch else 'None'
log.debug("sha={} want branch_id={} curr branch_id={} switch"
.format(commit['sha1'][:7], branch_id[:7], cbid[:7]))
if not branch.view_lines:
self.finish_branch_definition(commit, branch)
elif branch.view_p4map:
# if this is a stream branch, check for mutation of the stream's
# view by comparing with the original view saved in p4gf_config2
if branch.original_view_lines:
original_view_lines = '\n'.join(branch.original_view_lines)
view_lines = p4gf_path_convert.convert_view_to_no_client_name(branch.view_lines)
if not view_lines == original_view_lines:
raise PreflightException(
_('Unable to push. Stream view changed from:\n'
'{old_view}\nto:\n{new_view}')
.format(old_view=original_view_lines, new_view=view_lines))
# Find existing depot branch for branch view's LHS.
lhs = branch.view_p4map.lhs()
branch.depot_branch = self.ctx.depot_branch_info_index() \
.find_depot_path(lhs[0])
log.debug("switching to branch {}".format(branch))
# By now we should have a branch and a branch.view_lines.
# First remove current branch's files from workspace
# Client spec is set to normdir
self._current_branch = branch
return branch | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def check_commit_for_branch( self\n , commit\n , branch_id\n , any_locked_files\n , case_conflict_checker ):\n rev = commit['sha1']\n if LOG.isEnabledFor(logging.DEBUG):\n LOG.debug(\"check_commit_for_branch() \"\n \"Checking branch={} mark={} sha1={} file-ct={} -- {}\"\n .format( branch_id\n , commit['mark']\n , p4gf_util.abbrev(rev)\n , len(commit['files'])\n , repr(commit['data'])[:20].splitlines()[0]))\n\n if self._already_copied_commit(rev, branch_id):\n return\n\n # following checks assume client has been set for branch\n self.ensure_branch_preflight(commit, branch_id)\n with self.ctx.switched_to_branch(\n self._current_branch\n , set_client=self.set_client_on_branch_switch\n ):\n if case_conflict_checker:\n case_conflict_checker.read_fast_export_commit(\n commit, self._current_branch)\n\n # Empty commits require root-level .p4gf_placeholder to be mapped\n # in the current branch view.\n if not commit['files'] and not self._is_placeholder_mapped():\n raise PreflightException(\n _(\"Empty commit {sha1} not permitted. Git Fusion branch views\"\n \" must include root to permit empty commits.\")\n .format(sha1=p4gf_util.abbrev(rev)))\n\n with Timer(CHECK_PROTECTS):\n self._check_protects(commit['author_p4user'], commit['files'])\n\n with Timer(CHECK_OVERLAP):\n self._check_overlap(commit)\n\n # fetch the branch setting only, without cascading to repo/global config\n if self._current_branch.is_read_only:\n raise PreflightException(_(\"Push to branch {branch} prohibited.\")\n .format(branch=self._current_branch.git_branch_name))\n self._check_stream_writable(commit)\n self._check_stream_in_classic(commit)\n\n LOG.debug('checking locked files under //{}/...'.format(self.ctx.p4.client))\n if any_locked_files:\n # Convert the git commit paths to depotPaths\n files_in_commit = [self.ctx.gwt_path(f['path']).to_depot()\n for f in commit['files']]\n LOG.debug(\"files_in_commit {0}\".format(files_in_commit))\n for f in files_in_commit:\n if f in any_locked_files:\n # Collect the names (and clients) of users with locked files.\n # Report back to the pusher so they can take appropriate action.\n msg = _('{file} - locked by {user}').format(file=f,\n user=any_locked_files[f])\n LOG.info(msg)\n raise PreflightException(msg)\n\n # +++ Spend time extracting Jobs and P4Changelist owner\n # here if we actually do need to call\n # the preflight-commit hook.\n if self.ctx.preflight_hook.is_callable():\n jobs = G2PJob.extract_jobs(commit['data'])\n jobs2 = G2PJob.lookup_jobs(self.ctx, jobs)\n self.ctx.preflight_hook(\n ctx = self.ctx\n , fe_commit = commit\n , branch_id = branch_id\n , jobs = jobs2\n )",
"def git_branch():\n result, output = popen('git branch', False, False)\n branch = None\n for line in output:\n if line.startswith('*'):\n branch = line.split('*')[-1].strip()\n break\n return branch",
"def _set_tracking_branch_commit(self, branch, remote, depth):\n\n branch_output = fmt.ref_string(branch)\n origin = self._remote(remote)\n return_code = self.fetch(remote, depth=depth, ref=branch)\n if return_code != 0:\n raise ClowderGitError(msg=colored(' - Failed to fech', 'red'))\n if not self.existing_local_branch(branch):\n message = colored(' - No local branch ', 'red') + branch_output + '\\n'\n self._print(message)\n self._exit(message)\n if not self.existing_remote_branch(branch, remote):\n message = colored(' - No remote branch ', 'red') + branch_output + '\\n'\n self._print(message)\n self._exit(message)\n local_branch = self.repo.heads[branch]\n remote_branch = origin.refs[branch]\n if local_branch.commit != remote_branch.commit:\n message_1 = colored(' - Existing remote branch ', 'red')\n message_2 = colored(' on different commit', 'red')\n message = message_1 + branch_output + message_2 + '\\n'\n self._print(message)\n self._exit(message_1)\n return_code = self._set_tracking_branch(remote, branch)\n if return_code != 0:\n self._exit(colored(' - Failed to set tracking branch', 'red'))",
"def free_branch(allow_create=False, str_if_none=False):\n repo = git.repo()\n current_branch = repo.current_branch()\n result = None\n if current_branch:\n if not is_nice_branch(current_branch):\n result = current_branch\n else:\n proposed_branch = re.sub( nice_branch_regex, \"\", current_branch)\n if not repo.has_branch(proposed_branch):\n if allow_create:\n result = proposed_branch\n repo.create_branch(proposed_branch,startpoint=remote_branch())\n else:\n result = proposed_branch\n if not result and str_if_none:\n result = no_branch\n return result",
"def _get_rebasebranch(self):\n logging.info('--- Get Rebasebranch ---')\n local_branch_candidates = {\n branch for branch in self.local_branches\n if branch == self.options.rebasebranch}\n remote_branch_candidates = {\n branch for branch in self.remote_branches\n if self.options.rebasebranch in branch}\n try:\n found_local_branch = local_branch_candidates.pop()\n except KeyError:\n gitwrapper.exit_with_error(\n 'No local branches named %r found.',\n self.options.rebasebranch)\n #\n if local_branch_candidates:\n gitwrapper.exit_with_error(\n 'Too many matching local branches found: %s, %s.',\n found_local_branch,\n ', '.join(local_branch_candidates))\n #\n if not remote_branch_candidates:\n gitwrapper.exit_with_error(\n 'No remote branches named %r found.',\n self.options.rebasebranch)\n #\n if len(remote_branch_candidates) > 2:\n # 1 if remote is not pushed, 2 if its pushed to remote\n gitwrapper.exit_with_error(\n 'Too many matching remote branches found: %s.',\n ', '.join(remote_branch_candidates))\n #\n self.local_branches = {found_local_branch}\n self.remote_branches = remote_branch_candidates\n logging.info('Found local branch %r.', found_local_branch)\n logging.info(\n 'Found remote branches %s.'\n ' and '.join(repr(branch) for branch in self.remote_branches))\n # We only rebase the specified branch\n self.tags = set()",
"def create_branch(self, name: str, base_commit: str = None) -> heads.BranchHead:\n self.__verify_repo_initialized()\n if (not is_ascii(name)) or (not is_suitable_user_key(name)):\n err = ValueError(\n f'Branch name provided: {name} invalid. Must contain only alpha-numeric '\n f'or \".\" \"_\" \"-\" ascii characters. And be <= 64 Characters')\n raise err from None\n createdBranch = heads.create_branch(\n branchenv=self._env.branchenv,\n name=name,\n base_commit=base_commit)\n return createdBranch",
"def branch(self, current_path):\n p = subprocess.Popen(\n [\"git\", \"show-ref\"],\n stdout=PIPE,\n stderr=PIPE,\n cwd=os.path.join(self.root_dir, current_path),\n )\n output, error = p.communicate()\n if p.returncode == 0:\n results = []\n try:\n current_branch = self.get_current_branch(current_path)\n for line in output.decode(\"utf-8\").splitlines():\n # The format for git show-ref is '<SHA-1 ID> <space> <reference name>'\n # For this method we are only interested in reference name.\n # Reference : https://git-scm.com/docs/git-show-ref#_output\n commit_sha = line.strip().split()[0].strip()\n reference_name = line.strip().split()[1].strip()\n if self._is_branch(reference_name):\n branch_name = self._get_branch_name(reference_name)\n is_current_branch = self._is_current_branch(\n branch_name, current_branch\n )\n is_remote_branch = self._is_remote_branch(reference_name)\n upstream_branch_name = None\n if not is_remote_branch:\n upstream_branch_name = self.get_upstream_branch(\n current_path, branch_name\n )\n tag = self._get_tag(current_path, commit_sha)\n results.append(\n {\n \"is_current_branch\": is_current_branch,\n \"is_remote_branch\": is_remote_branch,\n \"name\": branch_name,\n \"upstream\": upstream_branch_name,\n \"top_commit\": commit_sha,\n \"tag\": tag,\n }\n )\n\n # Remote branch is seleted use 'git branch -a' as fallback machanism\n # to get add detached head on remote branch to preserve older functionality\n # TODO : Revisit this to checkout new local branch with same name as remote\n # when the remote branch is seleted, VS Code git does the same thing.\n if current_branch == \"HEAD\":\n results.append(\n {\n \"is_current_branch\": True,\n \"is_remote_branch\": False,\n \"name\": self._get_detached_head_name(current_path),\n \"upstream\": None,\n \"top_commit\": None,\n \"tag\": None,\n }\n )\n return {\"code\": p.returncode, \"branches\": results}\n except Exception as downstream_error:\n return {\n \"code\": p.returncode,\n \"command\": \"git show-ref\",\n \"message\": str(downstream_error),\n }\n else:\n return {\n \"code\": p.returncode,\n \"command\": \"git show-ref\",\n \"message\": error.decode(\"utf-8\"),\n }",
"def _get_git_branch_and_commit() -> Tuple[str, str]:\n branch_name = \"NO_BRANCH\"\n commit = \"NO_COMMIT\"\n try:\n repo = Repo(__file__, search_parent_directories=True)\n try:\n branch_name = str(repo.active_branch)\n except TypeError:\n pass # Keep current/default branch_name\n commit = str(repo.commit())\n if repo.is_dirty():\n commit += \" + uncomitted changes\"\n except InvalidGitRepositoryError:\n pass # Keep current/default branch_name and commit\n return branch_name, commit",
"def _check_branch(opt, params):\n\n # Check the current branch and hash\n _get_branch(opt)\n\n if params.git_branch != opt.git_branch or params.git_hash != opt.git_hash:\n msg = 'You are not on the right branch or commit. Please run the following in the repository: \\n'\n msg += f'git checkout {params.git_branch}\\n'\n msg += f'git revert {params.git_hash}'\n sys.exit(msg)",
"def create_branch(self):\n os.chdir(str(self.repository_path))\n sh.git.checkout('master')\n sh.git.checkout('-b', self.branch)\n logger.debug('Branch {} created', self.branch)",
"def get_repo_branch(self):\n # Load HEAD and find ref.\n with open('{path}HEAD'.format(path=self.workpath), 'rb') as fp:\n ref = fp.read().strip().decode().split(': ')[1]\n\n print('[+] Downloading {}'.format(ref))\n\n # Requests for head hash and save\n head_url = '{base_url}{ref}'.format(base_url=self.base_url, ref=ref)\n data = self._request(head_url).read().strip()\n\n # Save the hash inside the ref file into the target place.\n ref_path = '/'.join(ref.split('/')[:-1])\n if not os.path.exists('{path}{ref_path}'.format(path=self.workpath, ref_path=ref_path)):\n os.makedirs('{path}{ref_path}'.format(path=self.workpath, ref_path=ref_path))\n with open('{path}{ref}'.format(path=self.workpath, ref=ref), 'wb') as fp:\n fp.write(data)\n\n # After get ref->head_hash, why not share it.\n self.head_hash = data.decode()",
"def get_git_branch(path: str) -> Optional[str]:\n try:\n from git import Repo\n except ImportError as e:\n _logger.warning(\n \"Failed to import Git (the Git executable is probably not on your PATH),\"\n \" so Git SHA is not available. Error: %s\",\n e,\n )\n return None\n\n try:\n if os.path.isfile(path):\n path = os.path.dirname(path)\n repo = Repo(path, search_parent_directories=True)\n return repo.active_branch.name\n except Exception:\n return None",
"def get_git_branch():\n branch = \"\"\n try:\n # git > 2.22 could do 'git branch --show-current'\n branch = check_output(\n ['git', 'rev-parse', '--abbrev-ref', 'HEAD'])\n\n # No git installed or project downloaded as a .zip\n except Exception:\n pass\n\n return branch.strip()",
"def test_heads_create_new_branch_at_ancestor(repository: Repository) -> None:\n parent = repository.head.commit\n updatefile(repository.path / \"a\")\n branch = repository.heads.create(\"branch\", parent)\n assert parent == branch.commit",
"def test_heads_create_new_branch_commit(repository: Repository) -> None:\n branch = repository.heads.create(\"branch\", repository.head.commit)\n assert repository.head.commit == branch.commit",
"def _switchBranch(self, release):\n if release is None:\n self.branch = None\n self.branch_dir = None\n log.info('No release branch available')\n else:\n self.wc.update()\n assert self.wc.exists('branches/' + release)\n io.linesToFile(self.path(self.BRANCH_FILE), [release])\n self.branch = release\n self.branch_dir = 'branches/' + release\n self.wc.update(self.branch_dir, depth='infinity')\n log.info('Working on branch ' + self.branch)",
"def test_heads_create_new_branch_at_another_branch(repository: Repository) -> None:\n main = repository.head\n branch1 = repository.heads.create(\"branch1\")\n\n repository.checkout(branch1)\n repository.commit()\n\n repository.checkout(main)\n branch2 = repository.heads.create(\"branch2\", branch1.commit)\n\n assert branch1.commit == branch2.commit",
"def get_branch():\n command = [\"git\", \"branch\", \"--show-current\"]\n with subprocess.Popen(command, stdout=subprocess.PIPE) as proc:\n branch_str = proc.stdout.readline()\n return branch_str.decode(\"utf-8\").rstrip()",
"def __gitBranch(self):\n pfile = self.project.getProjectFile()\n lastModified = QFileInfo(pfile).lastModified().toString()\n shouldReopen = (\n self.vcs.gitBranch(self.project.getProjectPath())[1] or\n QFileInfo(pfile).lastModified().toString() != lastModified\n )\n if shouldReopen:\n res = E5MessageBox.yesNo(\n self.parent(),\n self.tr(\"Branch\"),\n self.tr(\"\"\"The project should be reread. Do this now?\"\"\"),\n yesDefault=True)\n if res:\n self.project.reopenProject()",
"def GetBranch():\n m = BRANCH_REGEX.match(RCS_FILE)\n if m:\n return m.group(2)\n return DEFAULT_BRANCH",
"def test_branching(self):\r\n repo_dir = self.GIT_REPO_DIR\r\n # Test successful import from command\r\n if not os.path.isdir(repo_dir):\r\n os.mkdir(repo_dir)\r\n self.addCleanup(shutil.rmtree, repo_dir)\r\n\r\n # Checkout non existent branch\r\n with self.assertRaisesRegexp(GitImportError, GitImportError.REMOTE_BRANCH_MISSING):\r\n git_import.add_repo(self.TEST_REPO, repo_dir / 'edx4edx_lite', 'asdfasdfasdf')\r\n\r\n # Checkout new branch\r\n git_import.add_repo(self.TEST_REPO,\r\n repo_dir / 'edx4edx_lite',\r\n self.TEST_BRANCH)\r\n def_ms = modulestore()\r\n # Validate that it is different than master\r\n self.assertIsNotNone(def_ms.get_course(self.TEST_BRANCH_COURSE))\r\n\r\n # Attempt to check out the same branch again to validate branch choosing\r\n # works\r\n git_import.add_repo(self.TEST_REPO,\r\n repo_dir / 'edx4edx_lite',\r\n self.TEST_BRANCH)\r\n\r\n # Delete to test branching back to master\r\n delete_course(def_ms, contentstore(),\r\n self.TEST_BRANCH_COURSE,\r\n True)\r\n self.assertIsNone(def_ms.get_course(self.TEST_BRANCH_COURSE))\r\n git_import.add_repo(self.TEST_REPO,\r\n repo_dir / 'edx4edx_lite',\r\n 'master')\r\n self.assertIsNone(def_ms.get_course(self.TEST_BRANCH_COURSE))\r\n self.assertIsNotNone(def_ms.get_course(SlashSeparatedCourseKey.from_deprecated_string(self.TEST_COURSE)))",
"def branch(self, name=None, clean=None, force=None):\n\n if name and clean:\n raise ValueError('Cannot use both name and clean')\n\n self._client.execute('branch', name, f=force, C=clean)\n\n if name:\n return name\n elif not clean:\n return out.strip()\n else:\n return out[len('reset working directory to branch '):]",
"def get_branch(self):\n if self._repository:\n return self._repository.dirstate.branch()",
"def _make_release_branch(self):\n user = getpass.getuser()\n if not user == self._user:\n raise Error('the command should only be run as user %s' % self._user)\n branch = self._branch\n # get the latest master updates\n subprocess.check_call('git remote update', shell=True)\n subprocess.check_call('git checkout master', shell=True)\n # does a git pull and updates the submodules\n GitUtil.update_submodules()\n # get the latest commit before the release is cut\n self._latest_commit = GitUtil.get_latest_commit()\n print 'Making release branch %s' % branch\n # create the new release branch\n GitUtil.create_branch(branch)\n print TermColor.ColorStr('Created remote branch %s' % branch, 'GREEN')",
"def switch_branch(branch, rdir):\r\n # Get the latest remote\r\n try:\r\n cmd_log(['git', 'fetch', ], rdir)\r\n except subprocess.CalledProcessError as ex:\r\n log.exception('Unable to fetch remote: %r', ex.output)\r\n raise GitImportError(GitImportError.CANNOT_BRANCH)\r\n\r\n # Check if the branch is available from the remote.\r\n cmd = ['git', 'ls-remote', 'origin', '-h', 'refs/heads/{0}'.format(branch), ]\r\n try:\r\n output = cmd_log(cmd, rdir)\r\n except subprocess.CalledProcessError as ex:\r\n log.exception('Getting a list of remote branches failed: %r', ex.output)\r\n raise GitImportError(GitImportError.CANNOT_BRANCH)\r\n if not branch in output:\r\n raise GitImportError(GitImportError.REMOTE_BRANCH_MISSING)\r\n # Check it the remote branch has already been made locally\r\n cmd = ['git', 'branch', '-a', ]\r\n try:\r\n output = cmd_log(cmd, rdir)\r\n except subprocess.CalledProcessError as ex:\r\n log.exception('Getting a list of local branches failed: %r', ex.output)\r\n raise GitImportError(GitImportError.CANNOT_BRANCH)\r\n branches = []\r\n for line in output.split('\\n'):\r\n branches.append(line.replace('*', '').strip())\r\n\r\n if branch not in branches:\r\n # Checkout with -b since it is remote only\r\n cmd = ['git', 'checkout', '--force', '--track',\r\n '-b', branch, 'origin/{0}'.format(branch), ]\r\n try:\r\n cmd_log(cmd, rdir)\r\n except subprocess.CalledProcessError as ex:\r\n log.exception('Unable to checkout remote branch: %r', ex.output)\r\n raise GitImportError(GitImportError.CANNOT_BRANCH)\r\n # Go ahead and reset hard to the newest version of the branch now that we know\r\n # it is local.\r\n try:\r\n cmd_log(['git', 'reset', '--hard', 'origin/{0}'.format(branch), ], rdir)\r\n except subprocess.CalledProcessError as ex:\r\n log.exception('Unable to reset to branch: %r', ex.output)\r\n raise GitImportError(GitImportError.CANNOT_BRANCH)",
"def branch(self, new_branch_id: str, empty: bool = False) -> None:\n self._check_connection()\n if empty:\n source = {}\n elif self._ref:\n source = {\n \"origin\": f\"{self._account}/{self._db}/{self._repo}/commit/{self._ref}\"\n }\n else:\n source = {\n \"origin\": f\"{self._account}/{self._db}/{self._repo}/branch/{self._branch}\"\n }\n\n self._dispatch(\"post\", self._branch_url(new_branch_id), source)",
"def get_branch(project_root: str) -> str:\n if os.path.isfile(os.path.join(os.path.abspath(project_root), os.pardir, os.pardir) + '/VERSION'):\n with open(os.path.join(os.path.abspath(project_root), os.pardir, os.pardir) + '/VERSION') as f:\n return f.read().replace('\\n', '')\n\n child = subprocess.Popen('cd {0} && git rev-parse --abbrev-ref HEAD'.format(project_root),\n shell=True,\n stdout=subprocess.PIPE,\n stderr=subprocess.DEVNULL)\n exit_code = child.wait()\n branch = child.stdout.read().decode()\n if len(branch) != 0:\n branch = branch.replace('\\n', '')\n else:\n return 'unknown'\n if exit_code == 0 and branch != 'HEAD':\n return branch\n else:\n return 'unknown'",
"def test_branch_commit_get(repository: Repository) -> None:\n branch = repository.head\n assert repository.heads[branch.name] == branch.commit",
"def git_branch():\n git_branch = None\n branches = git(['branch']).split('\\n')\n pattern = re.compile(r'^\\*[ ]+(?P<branch>.*)$')\n for branch in branches:\n matches = pattern.match(branch)\n if matches:\n git_branch = matches.group('branch')\n return git_branch, '.', '.'",
"def get_branch(repo, ref='HEAD'):\n return subprocess.check_output(['git', 'rev-parse', '--verify',\n '--abbrev-ref', ref], cwd=repo).rstrip()"
] | [
"0.6567766",
"0.615474",
"0.6144223",
"0.6067799",
"0.5996831",
"0.5930555",
"0.5926467",
"0.58662117",
"0.58450073",
"0.5838003",
"0.58024377",
"0.57683945",
"0.5761088",
"0.574812",
"0.56977725",
"0.5657013",
"0.56563455",
"0.56526315",
"0.5631415",
"0.56144863",
"0.5603229",
"0.5601096",
"0.55965817",
"0.5592245",
"0.5590516",
"0.5570385",
"0.55598897",
"0.55356",
"0.55279696",
"0.5525877"
] | 0.6462633 | 1 |
Does this branch map our placeholder file? Returns nonFalse if mapped, None or empty string if not. | def _is_placeholder_mapped(self):
return self.ctx.gwt_path(
p4gf_const.P4GF_EMPTY_CHANGELIST_PLACEHOLDER).to_depot() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def isBasedInHiddenFile(self):\n #type: () -> Optional[bool]\n return (\n None if self.realFileName is None #if before\n else self.realFileName != self.fileName\n )",
"def fileProcessed(self,fileInstance):\n if hasattr(fileInstance,\"name\"): name=fileInstance.name\n elif hasattr(fileInstance,\"url\"): name=fileInstance.url\n if name in self.emptyFileFlag: return self.emptyFileFlag[name]\n else: return False",
"def is_local(self):\n if not \"COLLABORATIVE\" in self._file.upper():\n LOGGER.debug(['AIE4606', 'match_false'], {'file': self._file})\n return True\n else:\n LOGGER.debug(['AIE4607', 'match_true'], {'file': self._file})\n return False\n return self._is_local",
"def isPregenerated(self):\n return self.fileLocation is not None",
"def check_exist(name, map):\r\n f = open(PATH,mode='r')\r\n file = yaml.load(f)\r\n f.close()\r\n if file is None:\r\n return (False, -1, -9, -9, [])\r\n elif name in file:\r\n if \"CSV\" in file[name]:\r\n return (True, file[name][\"id\"], file[name][\"hash\"], file[name][\"csv_hash\"], file[name][\"children\"])\r\n else:\r\n return (True, file[name][\"id\"], file[name][\"hash\"], -9, file[name][\"children\"])\r\n elif name+\"_\"+map in file:\r\n n = name+\"_\"+map\r\n if \"CSV\" in file[n]:\r\n return (True, file[n][\"id\"], file[n][\"hash\"], file[n][\"csv_hash\"], file[n][\"children\"])\r\n else:\r\n return (True, file[n][\"id\"], file[n][\"hash\"], -9, file[n][\"children\"])\r\n return (False, -1, -9, -9, [])",
"def has_remap(self):\n return self.mapping1 is not None or self.mapping2 is not None",
"def check(self):\n return self.tile==\"\"",
"def init_place(self):\n if self._f == None:\n return False\n else:\n return True",
"def is_placeholder(self):\n return _child(self.__nvXxPr.nvPr, 'p:ph') is not None",
"def get_gt_map(raster_map, gt_maps):\n\n for gt_m in gt_maps:\n map_name = ntpath.basename(raster_map).split(\".\")[0]\n gt_map_name = ntpath.basename(gt_m).split(\".\")[0].replace(\"_y\", \"\")\n\n if map_name == gt_map_name:\n logger.info(\"X: %s Y: %s\", map_name, gt_map_name)\n\n return gt_m\n\n logger.warning(\"Unable to get ground truth image for %s\", raster_map)\n\n return None",
"def __nonzero__(self):\n return any(self.path)",
"def is_present(self):\n return self.file_is_present()",
"def has_merge(self) -> Optional[str]:\n return self.source_name is not None",
"def verifyShiftFile(self):\n if self['refimage'] and fu.findFile(self['refimage']):\n return True\n else: return False",
"def has_filename(self):\n if self.filename == \"untitled\":\n return False\n else:\n return True",
"def checkMap(self):\n return True",
"def __nonzero__(self):\n\n return not ipset.ipmap_is_empty(self.map)",
"def has_picture(self):\n try:\n first = self.picture_planets()[0]\n except IndexError:\n first = None\n\n return first is not None",
"def has_fileout(self):\n return self.fileout is not None",
"def _get_path_or_dummy(self, fuse_path):\n cache_path = self.converter.to_cache_path(fuse_path)\n dummy_cache_path = self.converter.add_dummy_ending(cache_path)\n if os.path.exists(cache_path):\n return cache_path\n elif os.path.exists(dummy_cache_path):\n return dummy_cache_path\n return None",
"def has_file(self) -> bool:\n return self._file is not None",
"def check_mapping_file(mapping_fp,\r\n output_dir=\".\",\r\n has_barcodes=True,\r\n char_replace=\"_\",\r\n verbose=True,\r\n variable_len_barcodes=False,\r\n disable_primer_check=False,\r\n added_demultiplex_field=None,\r\n suppress_html=False):\r\n\r\n header, mapping_data, run_description, errors, warnings =\\\r\n process_id_map(open(mapping_fp, 'U'), disable_primer_check,\r\n has_barcodes, char_replace, variable_len_barcodes,\r\n added_demultiplex_field, strip_quotes=False, suppress_stripping=True)\r\n\r\n if not suppress_html:\r\n formatted_html = format_mapping_html_data(header, mapping_data,\r\n errors, warnings)\r\n\r\n output_html = join(output_dir +\r\n basename(mapping_fp).replace('.txt', '') + \".html\")\r\n\r\n html_f = open(output_html, \"w\")\r\n html_f.write(formatted_html)\r\n\r\n # get QIIME directory\r\n qiime_dir = get_qiime_project_dir()\r\n\r\n # Write javascript file necessary for mouseover tooltips.\r\n # move javascript file to javascript output directory\r\n copyfile(join(qiime_dir, 'qiime', 'support_files',\r\n 'js/overlib.js'), join(output_dir, 'overlib.js'))\r\n\r\n corrected_mapping_data = correct_mapping_data(mapping_data,\r\n header, char_replace)\r\n\r\n output_corrected_fp = join(output_dir +\r\n basename(mapping_fp).replace('.txt', '') + \"_corrected.txt\")\r\n\r\n write_corrected_mapping(output_corrected_fp, header, run_description,\r\n corrected_mapping_data)\r\n\r\n output_log_fp = join(output_dir +\r\n basename(mapping_fp).replace('.txt', '') + \".log\")\r\n\r\n write_log_file(output_log_fp, errors, warnings)\r\n\r\n if verbose:\r\n if errors or warnings:\r\n print \"Errors and/or warnings detected in mapping file. Please \" +\\\r\n \"check the log and html file for details.\"\r\n else:\r\n print \"No errors or warnings were found in mapping file.\"",
"def check_for_map(self):\n try:\n map = self.global_dict['fsp_out_map']\n except AttributeError:\n out_msg = \"Not computing nearest distance. Need to have loaded an ESDF\"\n print(out_msg)\n return None\n\n if map is None:\n out_msg = \"Not computing nearest distance. Need to have loaded an ESDF\"\n print(out_msg)\n return None\n\n return map",
"def missing_mappings(self):\n return [ mapping for mapping in self.mapping_names() if not config.file_in_cache(self.name, self.observatory) ]",
"def check_tilename(self, tilename):\n\n check = False\n self.decode_tilename(tilename)\n check = True\n return check",
"def is_map(self, alias):\n maps = {\"Ensembl2Reactome_All_Levels\": False,\n \"ReactomePathways\": True,\n \"reactome.homo_sapiens.interactions.tab-delimited\": False,\n \"ReactomePathwaysRelation\": True}\n return maps[alias]",
"def is_prepared(self):\n return os.path.exists(os.path.join(self.location, INFO_NM))",
"def found_empty_file(self):\n self.is_empty = True",
"def expected_output(self):\n expected_output_file = path.splitext(self.source_name)[0] + \".expected\"\n if not path.exists(expected_output_file):\n return None\n else:\n with open(expected_output_file, \"r\", encoding=\"utf8\") as f:\n return f.read()",
"def mapping(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"mapping\")"
] | [
"0.56640106",
"0.5638936",
"0.55160695",
"0.5359276",
"0.5341279",
"0.5287153",
"0.5251435",
"0.52480775",
"0.521388",
"0.5208746",
"0.5203581",
"0.51782346",
"0.51517147",
"0.5145897",
"0.51427215",
"0.51353425",
"0.5104298",
"0.5096157",
"0.5081865",
"0.506784",
"0.5066481",
"0.5057402",
"0.5038275",
"0.503023",
"0.50233287",
"0.5019814",
"0.5016763",
"0.4975096",
"0.4967411",
"0.4949422"
] | 0.693119 | 0 |
If any of the files in this commit intersect any fully populated branch (other than the current branch), then reject this commit. Shared/common/overlapping paths in branch views must be readonly from Git. Otherwise you end up with a Git push of commit on one Git branch inserting changes into other Git branches behind Git's back. To modify shared paths, either do so from Perforce, or create a Git Fusion repo with no more than one branch that maps that shared path. | def _check_overlap(self, fe_commit):
# +++ Avoid O(b branches * r rev) checks when
# overlap is impossible because current branch
# overlaps no other branch.
if self._current_branch not in self._overlapping_branch_list():
return
for fe_file in fe_commit['files']:
gwt_path = fe_file['path']
depot_path = self.ctx.gwt_to_depot_path(gwt_path)
for branch in self._overlapping_branch_list():
if branch == self._current_branch:
continue
if not branch.intersects_depot_path(depot_path):
continue
LOG.debug("_check_overlap() branch {br1} <> {br2}"
" gwt={gwt:<40} {dp}\n{view}"
.format(
br1 = p4gf_util.abbrev(self._current_branch.branch_id)
, br2 = p4gf_util.abbrev(branch.branch_id)
, gwt = gwt_path
, dp = depot_path
, view = "\n".join(branch.view_p4map.as_array())
))
if self._current_branch.is_new_fp_from_push or branch.is_new_fp_from_push:
current_branch_name = self._current_branch.git_branch_name
if self._current_branch.is_new_fp_from_push:
current_branch_name += '(new)'
other_branch_name = branch.git_branch_name
if branch.is_new_fp_from_push:
other_branch_name += '(new)'
human_msg = (_(
"Perforce: Cannot commit {sha1} '{gwt_path}' to '{depot_path}'.\n"
" You are attempting to push and create a new fully populated branch\n"
" with paths which overlap another branch. Contact your admin\n"
" to configure non-conflicting destination branch paths.\n"
" Branches: '{b1}', '{b2}'")
.format( sha1 = p4gf_util.abbrev(fe_commit['sha1'])
, gwt_path = gwt_path
, depot_path = depot_path
, b1 = current_branch_name
, b2 = other_branch_name ))
else:
human_msg = (_(
"Cannot commit {sha1} '{gwt_path}' to '{depot_path}'."
" Paths that overlap multiple Git Fusion branches are read-only."
" Branches: '{b1}', '{b2}'")
.format( sha1 = p4gf_util.abbrev(fe_commit['sha1'])
, gwt_path = gwt_path
, depot_path = depot_path
, b1 = self._current_branch.branch_id
, b2 = branch.branch_id ))
raise PreflightException(human_msg) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def check_commit_for_branch( self\n , commit\n , branch_id\n , any_locked_files\n , case_conflict_checker ):\n rev = commit['sha1']\n if LOG.isEnabledFor(logging.DEBUG):\n LOG.debug(\"check_commit_for_branch() \"\n \"Checking branch={} mark={} sha1={} file-ct={} -- {}\"\n .format( branch_id\n , commit['mark']\n , p4gf_util.abbrev(rev)\n , len(commit['files'])\n , repr(commit['data'])[:20].splitlines()[0]))\n\n if self._already_copied_commit(rev, branch_id):\n return\n\n # following checks assume client has been set for branch\n self.ensure_branch_preflight(commit, branch_id)\n with self.ctx.switched_to_branch(\n self._current_branch\n , set_client=self.set_client_on_branch_switch\n ):\n if case_conflict_checker:\n case_conflict_checker.read_fast_export_commit(\n commit, self._current_branch)\n\n # Empty commits require root-level .p4gf_placeholder to be mapped\n # in the current branch view.\n if not commit['files'] and not self._is_placeholder_mapped():\n raise PreflightException(\n _(\"Empty commit {sha1} not permitted. Git Fusion branch views\"\n \" must include root to permit empty commits.\")\n .format(sha1=p4gf_util.abbrev(rev)))\n\n with Timer(CHECK_PROTECTS):\n self._check_protects(commit['author_p4user'], commit['files'])\n\n with Timer(CHECK_OVERLAP):\n self._check_overlap(commit)\n\n # fetch the branch setting only, without cascading to repo/global config\n if self._current_branch.is_read_only:\n raise PreflightException(_(\"Push to branch {branch} prohibited.\")\n .format(branch=self._current_branch.git_branch_name))\n self._check_stream_writable(commit)\n self._check_stream_in_classic(commit)\n\n LOG.debug('checking locked files under //{}/...'.format(self.ctx.p4.client))\n if any_locked_files:\n # Convert the git commit paths to depotPaths\n files_in_commit = [self.ctx.gwt_path(f['path']).to_depot()\n for f in commit['files']]\n LOG.debug(\"files_in_commit {0}\".format(files_in_commit))\n for f in files_in_commit:\n if f in any_locked_files:\n # Collect the names (and clients) of users with locked files.\n # Report back to the pusher so they can take appropriate action.\n msg = _('{file} - locked by {user}').format(file=f,\n user=any_locked_files[f])\n LOG.info(msg)\n raise PreflightException(msg)\n\n # +++ Spend time extracting Jobs and P4Changelist owner\n # here if we actually do need to call\n # the preflight-commit hook.\n if self.ctx.preflight_hook.is_callable():\n jobs = G2PJob.extract_jobs(commit['data'])\n jobs2 = G2PJob.lookup_jobs(self.ctx, jobs)\n self.ctx.preflight_hook(\n ctx = self.ctx\n , fe_commit = commit\n , branch_id = branch_id\n , jobs = jobs2\n )",
"def check_commit(self, commit):\n # pylint: disable=too-many-branches\n if LOG.isEnabledFor(logging.DEBUG):\n LOG.debug('check_commit() Checking mark={} sha1={} file-ct={} -- {}'\n .format( commit['mark']\n , p4gf_util.abbrev(commit['sha1'])\n , len(commit['files'])\n , repr(commit['data'])[:20].splitlines()[0]))\n\n if not commit['author_p4user']:\n raise PreflightException(_(\"User '{user}' not permitted to commit\")\n .format(user=commit['author']['email'].strip('<>')))\n\n if 'merge' in commit:\n ref_is_review = (self.gsreview_coll and\n self.gsreview_coll.ref_in_review_list(self._current_prt.ref))\n if not ref_is_review and not self.ctx.merge_commits:\n raise PreflightException(_('Merge commits are not enabled for this repo.'))\n if (not ref_is_review and\n not self.ctx.branch_creation and self.assigner.have_anonymous_branches):\n msg = _('Git branch creation is prohibited for this repo.')\n p4_branch_names_non_lw = [b.git_branch_name for b in self.ctx.branch_dict().values()\n if b.git_branch_name and not b.is_lightweight]\n if len(p4_branch_names_non_lw) > 1:\n msg += _('\\nThis repo has more than one named branch.'\n '\\nTry altering the push order - '\n 'pushing branches with merge ancestors first.')\n raise PreflightException(msg)\n if LOG.isEnabledFor(logging.DEBUG):\n for parent_mark in commit['merge']:\n parent_sha1 = self.fast_export_marks.get_commit(parent_mark)[:7]\n LOG.debug(\"check_commit() merge mark={} sha1={}\"\n .format(parent_mark, parent_sha1))\n\n if not self.ctx.submodules and 'files' in commit:\n for f in commit['files']:\n if f.get('mode') == '160000':\n if 'first_commit' in commit and not self._path_added(f.get('path'), commit):\n LOG.debug2('check_commit() passed {} in {}'.format(\n f.get('path'), p4gf_util.abbrev(commit['sha1'])))\n continue\n raise PreflightException(\n _('Git submodules not permitted: path={path} commit={commit_sha1}')\n .format(path=f.get('path'), commit_sha1=p4gf_util.abbrev(commit['sha1'])))\n\n for f in commit['files']:\n LOG.debug3(\"check_commit : commit files: \" + _log_fe_file(f))\n err = check_valid_filename(f['path'], self.ctx)\n if err:\n raise PreflightException(err)\n if self.ctx.is_lfs_enabled:\n self._check_lfs(commit, f)\n\n # Warn user about any jobs that appear to not exist\n jobs = G2PJob.lookup_jobs(self.ctx, G2PJob.extract_jobs(commit['data']))\n if jobs:\n for job_id in jobs:\n r = self.ctx.p4run('jobs', '-e', 'job={}'.format(job_id))\n if not r:\n _print_error(_(\"Job '{job_id}' doesn't exist\").format(job_id=job_id))\n # Create pending changes for any Git-Swarm reviews",
"def _abort_on_conflicting_untracked_paths(self) -> None:\n repo = get_git_repo()\n\n if not repo or self._base_commit is None:\n return\n\n changed_paths = set(\n self._status.added\n + self._status.modified\n + self._status.removed\n + self._status.unmerged\n )\n untracked_paths = {\n self._fname_to_path(repo, str(path))\n for path in (self._dirty_paths_by_status.get(StatusCode.Untracked, []))\n }\n overlapping_paths = untracked_paths & changed_paths\n\n if overlapping_paths:\n raise ActionFailure(\n \"Some paths that changed since the baseline commit now show up as untracked files. \"\n f\"Please commit or stash your untracked changes in these paths: {overlapping_paths}.\"\n )",
"def ensure_branch_preflight(self, commit, branch_id):\n log = LOG.getChild('ensure_branch_preflight')\n branch = self.ctx.branch_dict().get(branch_id)\n # branch should never be None here. p4gf_branch_id.Assigner() must\n # create Branch objects for each assignment.\n\n if self._current_branch \\\n and self._current_branch.branch_id == branch_id:\n log.debug(\"sha={} want branch_id={} curr branch_id={} NOP\"\n .format( commit['sha1'][:7]\n , branch_id[:7]\n , self._current_branch.branch_id[:7]))\n log.debug(\"staying on branch {}\"\n .format(self.ctx.branch_dict().get(branch_id)))\n\n return branch\n\n cbid = self._current_branch.branch_id if self._current_branch else 'None'\n log.debug(\"sha={} want branch_id={} curr branch_id={} switch\"\n .format(commit['sha1'][:7], branch_id[:7], cbid[:7]))\n\n if not branch.view_lines:\n self.finish_branch_definition(commit, branch)\n\n elif branch.view_p4map:\n # if this is a stream branch, check for mutation of the stream's\n # view by comparing with the original view saved in p4gf_config2\n if branch.original_view_lines:\n original_view_lines = '\\n'.join(branch.original_view_lines)\n view_lines = p4gf_path_convert.convert_view_to_no_client_name(branch.view_lines)\n if not view_lines == original_view_lines:\n raise PreflightException(\n _('Unable to push. Stream view changed from:\\n'\n '{old_view}\\nto:\\n{new_view}')\n .format(old_view=original_view_lines, new_view=view_lines))\n # Find existing depot branch for branch view's LHS.\n lhs = branch.view_p4map.lhs()\n branch.depot_branch = self.ctx.depot_branch_info_index() \\\n .find_depot_path(lhs[0])\n\n log.debug(\"switching to branch {}\".format(branch))\n\n # By now we should have a branch and a branch.view_lines.\n # First remove current branch's files from workspace\n # Client spec is set to normdir\n self._current_branch = branch\n return branch",
"def _check_stream_in_classic(self, fe_commit):\n if self._current_branch.stream_name:\n return\n\n depot_re = re.compile(r'^//([^/]+)/([^/]+)/.*$')\n for fe_file in fe_commit['files']:\n gwt_path = fe_file['path']\n depot_path = self.ctx.gwt_path(gwt_path).to_depot()\n m = depot_re.match(depot_path)\n if m:\n depot = m.group(1)\n if depot in self.stream_depots:\n stream = '//{}/{}'.format(m.group(1), m.group(2))\n human_msg = (\n _(\"Cannot commit {sha1} '{gwt_path}' to '{depot_path}'.\"\n \" Paths in stream '{stream}' are read-only for branch '{b}'.\")\n .format( sha1 = p4gf_util.abbrev(fe_commit['sha1'])\n , gwt_path = gwt_path\n , depot_path = depot_path\n , stream = stream\n , b = self._current_branch.branch_id ))\n raise PreflightException(human_msg)",
"def test_set_commits_empty_exclude(self):\n\n temp = self.Temp(self.items, conds=[EmptyExclude()])\n empty_exclude = temp.conds[0]\n empty_exclude.set_commits(temp.df)\n\n commit = CommitGit(self.items, conds=[EmptyExclude()])\n self.assertEqual(empty_exclude.included, commit.conds[0].included)",
"def _check_stream_writable(self, fe_commit):\n if not self._current_branch.stream_name:\n return\n prefix = self._current_branch.writable_stream_name + '/'\n for fe_file in fe_commit['files']:\n gwt_path = fe_file['path']\n depot_path = self.ctx.gwt_path(gwt_path).to_depot()\n if depot_path.startswith(prefix):\n continue\n\n human_msg = (_(\n \"Cannot commit {sha1} '{gwt_path}' to '{depot_path}'.\"\n \" Paths not in stream '{stream}' are read-only for branch '{b}'.\")\n .format( sha1 = p4gf_util.abbrev(fe_commit['sha1'])\n , gwt_path = gwt_path\n , depot_path = depot_path\n , stream = self._current_branch.writable_stream_name\n , b = self._current_branch.branch_id ))\n raise PreflightException(human_msg)",
"def test_cherrypick_conflict_edit(repository: Repository, path: Path) -> None:\n main = repository.head\n branch = repository.heads.create(\"branch\")\n\n repository.checkout(branch)\n updatefile(path, \"a\")\n\n repository.checkout(main)\n updatefile(path, \"b\")\n\n with pytest.raises(MergeConflictError, match=path.name):\n repository.cherrypick(branch.commit)",
"def test_commit_on_unborn_branch(tmp_path: Path) -> None:\n repository = Repository.init(tmp_path / \"repository\")\n repository.commit(message=\"initial\")\n\n assert not repository.head.commit.parents",
"def _abort_on_pending_changes(self) -> None:\n if set(self._dirty_paths_by_status) - {StatusCode.Untracked}:\n raise ActionFailure(\n \"Found pending changes in tracked files. Diff-aware runs require a clean git state.\"\n )",
"def test_cherrypick_with_untracked_files(repository: Repository, path: Path) -> None:\n main = repository.head\n branch = repository.heads.create(\"branch\")\n\n repository.checkout(branch)\n updatefile(path)\n\n untracked = repository.path / \"untracked-file\"\n untracked.touch()\n\n repository.checkout(main)\n repository.cherrypick(branch.commit)\n\n assert untracked.name not in repository.head.commit.tree",
"def __gitStashBranch(self):\n pfile = self.project.getProjectFile()\n lastModified = QFileInfo(pfile).lastModified().toString()\n shouldReopen = (\n self.vcs.gitStashBranch(self.project.getProjectPath()) or\n QFileInfo(pfile).lastModified().toString() != lastModified\n )\n if shouldReopen:\n res = E5MessageBox.yesNo(\n self.parent(),\n self.tr(\"Create Branch\"),\n self.tr(\"\"\"The project should be reread. Do this now?\"\"\"),\n yesDefault=True)\n if res:\n self.project.reopenProject()",
"def test_set_commits_merge_exclude(self):\n\n temp = self.Temp(self.items, conds=[MergeExclude()])\n merge_exclude = temp.conds[0]\n merge_exclude.set_commits(temp.df)\n\n commit = CommitGit(self.items, conds=[MergeExclude()])\n self.assertEqual(merge_exclude.included, commit.conds[0].included)",
"def git_removed_files(self):\n\n etc_tracked = self.repo.tracked_files('etc-tmp')\n for rpath in etc_tracked:\n etc_file = os.path.join(self.root_dir, rpath)\n if not os.path.lexists(etc_file):\n self.etc_commits.removed.rpaths.append(rpath)\n self.etc_commits.removed.commit()\n\n master_tracked = self.repo.tracked_files('master-tmp')\n for rpath in master_tracked:\n etc_file = os.path.join(self.root_dir, rpath)\n if not os.path.lexists(etc_file):\n self.master_commits.removed.rpaths.append(rpath)\n self.master_commits.removed.commit()",
"def _already_copied_commit(self, commit_sha1, branch_id):\n if not self.already_copied_commit_runner:\n return False\n return self.already_copied_commit_runner.already_copied_commit(\n commit_sha1, branch_id)",
"def path_touched(*paths, commit_range):\n return check_output([\n 'git', 'diff', '--name-only', commit_range, '--', *paths\n ]).decode('utf-8').strip() != ''",
"def test_cherrypick_conflict_deletion(repository: Repository, path: Path) -> None:\n updatefile(path, \"a\")\n\n main = repository.head\n branch = repository.heads.create(\"branch\")\n\n repository.checkout(branch)\n updatefile(path, \"b\")\n\n repository.checkout(main)\n removefile(path)\n\n with pytest.raises(MergeConflictError, match=path.name):\n repository.cherrypick(branch.commit)",
"def lint_commit_base(commit):\n success = True\n # Merge commits have two parents, we maintain a linear history.\n if len(commit.parents) > 1:\n error(\n \"Please resolve merges by re-basing. Merge commits are not allowed.\",\n commit)\n success = False\n\n return success",
"def resolve_conflicts(self, commit=True):\n pass # pragma: no cover",
"def test_no_change(self):\r\n git_export_utils.export_to_git(\r\n self.course.id,\r\n 'file://{0}'.format(self.bare_repo_dir)\r\n )\r\n\r\n with self.assertRaisesRegexp(GitExportError,\r\n str(GitExportError.CANNOT_COMMIT)):\r\n git_export_utils.export_to_git(\r\n self.course.id, 'file://{0}'.format(self.bare_repo_dir))",
"def _check_branch(opt, params):\n\n # Check the current branch and hash\n _get_branch(opt)\n\n if params.git_branch != opt.git_branch or params.git_hash != opt.git_hash:\n msg = 'You are not on the right branch or commit. Please run the following in the repository: \\n'\n msg += f'git checkout {params.git_branch}\\n'\n msg += f'git revert {params.git_hash}'\n sys.exit(msg)",
"def check_commits(self, commits):\n LOG.info('Checking Perforce permissions and locks')\n self.ctx.checkpoint(\"copy_to_p4._preflight_check\")\n\n # Stop if files are opened in our repo client\n # We expect this to be none, since we have the view lock\n opened = self.ctx.p4.run(['opened', '-m1'])\n if opened:\n raise PreflightException(_('There are files opened by Git Fusion for this repo.'))\n\n # fetch the repo setting only, without cascading to global config\n is_read_only = self.ctx.repo_config.getboolean(p4gf_config.SECTION_REPO,\n p4gf_config.KEY_READ_ONLY,\n fallback=False)\n if is_read_only:\n raise PreflightException(_(\"Push to repo {repo_name} prohibited.\")\n .format(repo_name=self.ctx.config.repo_name))\n\n # get a list of stream depots for later checks for read-only paths\n depots = self.ctx.p4.run(['depots'])\n self.stream_depots = set([d['name'] for d in depots if d['type'] == 'stream'])\n any_locked_files = self._find_locked_by()\n LOG.debug(\"any_locked_files {0}\".format(any_locked_files))\n case_conflict_checker = None\n if not self.ctx.server_is_case_sensitive:\n case_conflict_checker = CaseConflictChecker(self.ctx)\n case_conflict_checker.read_perforce_paths()\n\n ui_name = self._curr_ref_ui_name()\n if ui_name:\n progress_msg = _('Checking commits for {ref}...').format(ref=ui_name)\n else:\n progress_msg = _('Checking commits...')\n\n with ProgressReporter.Determinate(len(commits)):\n for commit in commits:\n ProgressReporter.increment(progress_msg)\n\n self.g2p_user.get_author_pusher_owner(commit)\n\n rev = commit['sha1']\n if not self.assigner.is_assigned(commit['sha1']):\n continue\n\n self.check_commit(commit)\n\n for branch_id in self.assigner.branch_id_list(rev):\n self.check_commit_for_branch(\n commit\n , branch_id\n , any_locked_files\n , case_conflict_checker )\n\n if case_conflict_checker:\n cc_text = case_conflict_checker.conflict_text()\n if cc_text:\n raise PreflightException(cc_text)",
"def ensure_remote_branch_is_tracked(branch):\n if branch == MASTER_BRANCH:\n # We don't need to explicitly track the master branch, so we're done.\n return\n\n # Ensure the specified branch is in the local branch list.\n output = subprocess.check_output(['git', 'branch', '--list'])\n for line in output.split('\\n'):\n if line.strip() == branch:\n # We are already tracking the remote branch\n break\n else:\n # We are not tracking the remote branch, so track it.\n try:\n sys.stdout.write(subprocess.check_output(\n ['git', 'checkout', '--track', 'origin/%s' % branch]))\n except subprocess.CalledProcessError:\n # Bail gracefully.\n raise SystemExit(1)",
"def __gitCherryPick(self):\n pfile = self.project.getProjectFile()\n lastModified = QFileInfo(pfile).lastModified().toString()\n shouldReopen = (\n self.vcs.gitCherryPick(self.project.getProjectPath()) or\n QFileInfo(pfile).lastModified().toString() != lastModified\n )\n if shouldReopen:\n res = E5MessageBox.yesNo(\n None,\n self.tr(\"Copy Commits\"),\n self.tr(\"\"\"The project should be reread. Do this now?\"\"\"),\n yesDefault=True)\n if res:\n self.project.reopenProject()",
"def test_merge_not_fail_draftpath_intersection(self):\n path_a = PathFactory.create(name=\"A\", geom=LineString((0, 0), (10, 0)))\n path_b = PathFactory.create(name=\"B\", geom=LineString((10, 0), (20, 0)))\n PathFactory.create(name=\"C\", geom=LineString((10, 0), (10, 10)), draft=True)\n response = self.client.post(reverse('core:path-drf-merge-path'), {'path[]': [path_a.pk, path_b.pk]})\n self.assertIn('success', response.json())",
"def test_resetcherrypick_keeps_unrelated_deletions(\n repository: Repository, paths: Iterator[Path]\n) -> None:\n main = repository.head\n update, _ = createbranches(repository, \"update\", \"latest\")\n path1, path2 = next(paths), next(paths)\n\n repository.checkout(update)\n updatefile(path1, \"a\")\n\n repository.checkout(main)\n updatefile(path1, \"b\")\n updatefile(path2)\n\n path2.unlink()\n\n with pytest.raises(MergeConflictError, match=path1.name):\n repository.cherrypick(update.commit)\n\n repository.resetcherrypick()\n\n assert not path2.exists()",
"def __gitBranch(self):\n pfile = self.project.getProjectFile()\n lastModified = QFileInfo(pfile).lastModified().toString()\n shouldReopen = (\n self.vcs.gitBranch(self.project.getProjectPath())[1] or\n QFileInfo(pfile).lastModified().toString() != lastModified\n )\n if shouldReopen:\n res = E5MessageBox.yesNo(\n self.parent(),\n self.tr(\"Branch\"),\n self.tr(\"\"\"The project should be reread. Do this now?\"\"\"),\n yesDefault=True)\n if res:\n self.project.reopenProject()",
"def test_resetcherrypick_keeps_unrelated_changes(\n repository: Repository, paths: Iterator[Path]\n) -> None:\n main = repository.head\n update, _ = createbranches(repository, \"update\", \"latest\")\n path1, path2 = next(paths), next(paths)\n\n repository.checkout(update)\n updatefile(path1, \"a\")\n\n repository.checkout(main)\n updatefile(path1, \"b\")\n updatefile(path2)\n\n path2.write_text(\"c\")\n\n with pytest.raises(MergeConflictError, match=path1.name):\n repository.cherrypick(update.commit)\n\n repository.resetcherrypick()\n\n assert path2.read_text() == \"c\"",
"def ensure_sync_master_branch(self):\n # TODO(robertocn): Investigate what causes the states mentioned in the\n # docstring in the first place.\n self.api.m.git('update-ref', 'refs/heads/master',\n 'refs/remotes/origin/master')\n self.api.m.git('checkout', 'master', cwd=self.api.m.path['checkout'])",
"def try_push_special_refs(repo):\n # test pushing to the 'private' dev/arcyd/ area, where arcyd will store\n # it's tracker branches\n repo('push', 'origin', '--dry-run', 'HEAD:refs/heads/dev/arcyd/test')\n\n # test pushing to the refs/arcyd area, where the 'landed' and 'abandoned'\n # archive branches will live\n repo('push', 'origin', '--dry-run', 'HEAD:refs/arcyd/test')"
] | [
"0.70267016",
"0.6255848",
"0.6211556",
"0.6012206",
"0.5753158",
"0.57477814",
"0.574088",
"0.5717972",
"0.57146573",
"0.5703639",
"0.5662798",
"0.5624615",
"0.5610022",
"0.5576558",
"0.5571656",
"0.5534969",
"0.5534018",
"0.5515806",
"0.5472234",
"0.5436517",
"0.5434736",
"0.54254645",
"0.5401665",
"0.5388296",
"0.5330198",
"0.5310682",
"0.52652127",
"0.5262493",
"0.5255708",
"0.5246082"
] | 0.7310925 | 0 |
If this is a stream branch, check that all files in the commit are writable. If any of the files is not writable then reject this commit. | def _check_stream_writable(self, fe_commit):
if not self._current_branch.stream_name:
return
prefix = self._current_branch.writable_stream_name + '/'
for fe_file in fe_commit['files']:
gwt_path = fe_file['path']
depot_path = self.ctx.gwt_path(gwt_path).to_depot()
if depot_path.startswith(prefix):
continue
human_msg = (_(
"Cannot commit {sha1} '{gwt_path}' to '{depot_path}'."
" Paths not in stream '{stream}' are read-only for branch '{b}'.")
.format( sha1 = p4gf_util.abbrev(fe_commit['sha1'])
, gwt_path = gwt_path
, depot_path = depot_path
, stream = self._current_branch.writable_stream_name
, b = self._current_branch.branch_id ))
raise PreflightException(human_msg) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _check_stream_in_classic(self, fe_commit):\n if self._current_branch.stream_name:\n return\n\n depot_re = re.compile(r'^//([^/]+)/([^/]+)/.*$')\n for fe_file in fe_commit['files']:\n gwt_path = fe_file['path']\n depot_path = self.ctx.gwt_path(gwt_path).to_depot()\n m = depot_re.match(depot_path)\n if m:\n depot = m.group(1)\n if depot in self.stream_depots:\n stream = '//{}/{}'.format(m.group(1), m.group(2))\n human_msg = (\n _(\"Cannot commit {sha1} '{gwt_path}' to '{depot_path}'.\"\n \" Paths in stream '{stream}' are read-only for branch '{b}'.\")\n .format( sha1 = p4gf_util.abbrev(fe_commit['sha1'])\n , gwt_path = gwt_path\n , depot_path = depot_path\n , stream = stream\n , b = self._current_branch.branch_id ))\n raise PreflightException(human_msg)",
"def check_commits(self, commits):\n LOG.info('Checking Perforce permissions and locks')\n self.ctx.checkpoint(\"copy_to_p4._preflight_check\")\n\n # Stop if files are opened in our repo client\n # We expect this to be none, since we have the view lock\n opened = self.ctx.p4.run(['opened', '-m1'])\n if opened:\n raise PreflightException(_('There are files opened by Git Fusion for this repo.'))\n\n # fetch the repo setting only, without cascading to global config\n is_read_only = self.ctx.repo_config.getboolean(p4gf_config.SECTION_REPO,\n p4gf_config.KEY_READ_ONLY,\n fallback=False)\n if is_read_only:\n raise PreflightException(_(\"Push to repo {repo_name} prohibited.\")\n .format(repo_name=self.ctx.config.repo_name))\n\n # get a list of stream depots for later checks for read-only paths\n depots = self.ctx.p4.run(['depots'])\n self.stream_depots = set([d['name'] for d in depots if d['type'] == 'stream'])\n any_locked_files = self._find_locked_by()\n LOG.debug(\"any_locked_files {0}\".format(any_locked_files))\n case_conflict_checker = None\n if not self.ctx.server_is_case_sensitive:\n case_conflict_checker = CaseConflictChecker(self.ctx)\n case_conflict_checker.read_perforce_paths()\n\n ui_name = self._curr_ref_ui_name()\n if ui_name:\n progress_msg = _('Checking commits for {ref}...').format(ref=ui_name)\n else:\n progress_msg = _('Checking commits...')\n\n with ProgressReporter.Determinate(len(commits)):\n for commit in commits:\n ProgressReporter.increment(progress_msg)\n\n self.g2p_user.get_author_pusher_owner(commit)\n\n rev = commit['sha1']\n if not self.assigner.is_assigned(commit['sha1']):\n continue\n\n self.check_commit(commit)\n\n for branch_id in self.assigner.branch_id_list(rev):\n self.check_commit_for_branch(\n commit\n , branch_id\n , any_locked_files\n , case_conflict_checker )\n\n if case_conflict_checker:\n cc_text = case_conflict_checker.conflict_text()\n if cc_text:\n raise PreflightException(cc_text)",
"def check_commit_for_branch( self\n , commit\n , branch_id\n , any_locked_files\n , case_conflict_checker ):\n rev = commit['sha1']\n if LOG.isEnabledFor(logging.DEBUG):\n LOG.debug(\"check_commit_for_branch() \"\n \"Checking branch={} mark={} sha1={} file-ct={} -- {}\"\n .format( branch_id\n , commit['mark']\n , p4gf_util.abbrev(rev)\n , len(commit['files'])\n , repr(commit['data'])[:20].splitlines()[0]))\n\n if self._already_copied_commit(rev, branch_id):\n return\n\n # following checks assume client has been set for branch\n self.ensure_branch_preflight(commit, branch_id)\n with self.ctx.switched_to_branch(\n self._current_branch\n , set_client=self.set_client_on_branch_switch\n ):\n if case_conflict_checker:\n case_conflict_checker.read_fast_export_commit(\n commit, self._current_branch)\n\n # Empty commits require root-level .p4gf_placeholder to be mapped\n # in the current branch view.\n if not commit['files'] and not self._is_placeholder_mapped():\n raise PreflightException(\n _(\"Empty commit {sha1} not permitted. Git Fusion branch views\"\n \" must include root to permit empty commits.\")\n .format(sha1=p4gf_util.abbrev(rev)))\n\n with Timer(CHECK_PROTECTS):\n self._check_protects(commit['author_p4user'], commit['files'])\n\n with Timer(CHECK_OVERLAP):\n self._check_overlap(commit)\n\n # fetch the branch setting only, without cascading to repo/global config\n if self._current_branch.is_read_only:\n raise PreflightException(_(\"Push to branch {branch} prohibited.\")\n .format(branch=self._current_branch.git_branch_name))\n self._check_stream_writable(commit)\n self._check_stream_in_classic(commit)\n\n LOG.debug('checking locked files under //{}/...'.format(self.ctx.p4.client))\n if any_locked_files:\n # Convert the git commit paths to depotPaths\n files_in_commit = [self.ctx.gwt_path(f['path']).to_depot()\n for f in commit['files']]\n LOG.debug(\"files_in_commit {0}\".format(files_in_commit))\n for f in files_in_commit:\n if f in any_locked_files:\n # Collect the names (and clients) of users with locked files.\n # Report back to the pusher so they can take appropriate action.\n msg = _('{file} - locked by {user}').format(file=f,\n user=any_locked_files[f])\n LOG.info(msg)\n raise PreflightException(msg)\n\n # +++ Spend time extracting Jobs and P4Changelist owner\n # here if we actually do need to call\n # the preflight-commit hook.\n if self.ctx.preflight_hook.is_callable():\n jobs = G2PJob.extract_jobs(commit['data'])\n jobs2 = G2PJob.lookup_jobs(self.ctx, jobs)\n self.ctx.preflight_hook(\n ctx = self.ctx\n , fe_commit = commit\n , branch_id = branch_id\n , jobs = jobs2\n )",
"def _validate_can_write(self):\n if self._mode not in WRITE_MODES:\n raise IOError(\"File is not writable\")\n if self.Writable == 'no':\n raise IOError(\"'Writable' flag is 'no'\")",
"def check_commit(self, commit):\n # pylint: disable=too-many-branches\n if LOG.isEnabledFor(logging.DEBUG):\n LOG.debug('check_commit() Checking mark={} sha1={} file-ct={} -- {}'\n .format( commit['mark']\n , p4gf_util.abbrev(commit['sha1'])\n , len(commit['files'])\n , repr(commit['data'])[:20].splitlines()[0]))\n\n if not commit['author_p4user']:\n raise PreflightException(_(\"User '{user}' not permitted to commit\")\n .format(user=commit['author']['email'].strip('<>')))\n\n if 'merge' in commit:\n ref_is_review = (self.gsreview_coll and\n self.gsreview_coll.ref_in_review_list(self._current_prt.ref))\n if not ref_is_review and not self.ctx.merge_commits:\n raise PreflightException(_('Merge commits are not enabled for this repo.'))\n if (not ref_is_review and\n not self.ctx.branch_creation and self.assigner.have_anonymous_branches):\n msg = _('Git branch creation is prohibited for this repo.')\n p4_branch_names_non_lw = [b.git_branch_name for b in self.ctx.branch_dict().values()\n if b.git_branch_name and not b.is_lightweight]\n if len(p4_branch_names_non_lw) > 1:\n msg += _('\\nThis repo has more than one named branch.'\n '\\nTry altering the push order - '\n 'pushing branches with merge ancestors first.')\n raise PreflightException(msg)\n if LOG.isEnabledFor(logging.DEBUG):\n for parent_mark in commit['merge']:\n parent_sha1 = self.fast_export_marks.get_commit(parent_mark)[:7]\n LOG.debug(\"check_commit() merge mark={} sha1={}\"\n .format(parent_mark, parent_sha1))\n\n if not self.ctx.submodules and 'files' in commit:\n for f in commit['files']:\n if f.get('mode') == '160000':\n if 'first_commit' in commit and not self._path_added(f.get('path'), commit):\n LOG.debug2('check_commit() passed {} in {}'.format(\n f.get('path'), p4gf_util.abbrev(commit['sha1'])))\n continue\n raise PreflightException(\n _('Git submodules not permitted: path={path} commit={commit_sha1}')\n .format(path=f.get('path'), commit_sha1=p4gf_util.abbrev(commit['sha1'])))\n\n for f in commit['files']:\n LOG.debug3(\"check_commit : commit files: \" + _log_fe_file(f))\n err = check_valid_filename(f['path'], self.ctx)\n if err:\n raise PreflightException(err)\n if self.ctx.is_lfs_enabled:\n self._check_lfs(commit, f)\n\n # Warn user about any jobs that appear to not exist\n jobs = G2PJob.lookup_jobs(self.ctx, G2PJob.extract_jobs(commit['data']))\n if jobs:\n for job_id in jobs:\n r = self.ctx.p4run('jobs', '-e', 'job={}'.format(job_id))\n if not r:\n _print_error(_(\"Job '{job_id}' doesn't exist\").format(job_id=job_id))\n # Create pending changes for any Git-Swarm reviews",
"def _check_writable_(self):\n self._check_within_context_()\n if self._mode != 'w':\n raise Exception('Cannot update database: read only mode')",
"def verify_blob_permissions(self, blob):\n path = self.csum_to_path(blob)\n return is_readonly(path)",
"def check_commit_problems(self, commit, diff):\n\n # Initialise\n self._license_problem = False\n self._commit_problem = False\n self._commit_notes = defaultdict(list)\n\n # Unsafe regex checks...\n unsafe_matches = list()\n unsafe_matches.append( r\"\\b(KRun::runCommand|K3?ShellProcess|setUseShell|setShellCommand)\\b\\s*[\\(\\r\\n]\" )\n unsafe_matches.append( r\"\\b(system|popen|mktemp|mkstemp|tmpnam|gets|syslog|strptime)\\b\\s*[\\(\\r\\n]\" )\n unsafe_matches.append( r\"(scanf)\\b\\s*[\\(\\r\\n]\" )\n valid_filename_regex = r\"\\.(cpp|cc|cxx|C|c\\+\\+|c|l|y||h|H|hh|hxx|hpp|h\\+\\+|qml)$\"\n\n # Retrieve the diff and do the problem checks...\n filename = unicode(\"\")\n filediff = list()\n for line in diff:\n file_change = re.match( \"^diff --(cc |git a\\/.+ b\\/)(.+)$\", line )\n if file_change:\n # Are we changing file? If so, we have the full diff, so do a license check....\n if filename != \"\" and commit.files_changed[ filename ][\"change\"] in ['A'] and re.search(valid_filename_regex, filename):\n self.check_commit_license(filename, ''.join(filediff))\n\n filediff = list()\n filename = file_change.group(2)\n continue\n\n # Diff headers are bogus\n if re.match(\"@@ -\\d+,\\d+ \\+\\d+ @@\", line):\n filediff = list()\n continue\n\n # Do an incremental check for *.desktop syntax errors....\n if re.search(\"\\.desktop$\", filename) and re.search(\"[^=]+=.*[ \\t]$\", line) and line.startswith(\"+\") and not re.match(\"^\\+#\", line):\n self._commit_notes[filename].append( \"[TRAILING SPACE] **\" )\n self._commit_problem = True\n\n # Check for things which are unsafe...\n for safety_match in unsafe_matches:\n match = re.match(safety_match, line)\n if match:\n note = \"[POSSIBLY UNSAFE: {0}] **\".format( match.group(1) )\n self._commit_notes[filename].append(note)\n self._commit_problem = True\n\n # Store the diff....\n filediff.append(line)\n\n if filename != \"\" and commit.files_changed[ filename ][\"change\"] in ['A'] and re.search(valid_filename_regex, filename):\n self.check_commit_license(filename, ''.join(filediff))",
"def _check_overlap(self, fe_commit):\n # +++ Avoid O(b branches * r rev) checks when\n # overlap is impossible because current branch\n # overlaps no other branch.\n if self._current_branch not in self._overlapping_branch_list():\n return\n\n for fe_file in fe_commit['files']:\n gwt_path = fe_file['path']\n depot_path = self.ctx.gwt_to_depot_path(gwt_path)\n\n for branch in self._overlapping_branch_list():\n if branch == self._current_branch:\n continue\n if not branch.intersects_depot_path(depot_path):\n continue\n\n LOG.debug(\"_check_overlap() branch {br1} <> {br2}\"\n \" gwt={gwt:<40} {dp}\\n{view}\"\n .format(\n br1 = p4gf_util.abbrev(self._current_branch.branch_id)\n , br2 = p4gf_util.abbrev(branch.branch_id)\n , gwt = gwt_path\n , dp = depot_path\n , view = \"\\n\".join(branch.view_p4map.as_array())\n ))\n\n if self._current_branch.is_new_fp_from_push or branch.is_new_fp_from_push:\n current_branch_name = self._current_branch.git_branch_name\n if self._current_branch.is_new_fp_from_push:\n current_branch_name += '(new)'\n other_branch_name = branch.git_branch_name\n if branch.is_new_fp_from_push:\n other_branch_name += '(new)'\n human_msg = (_(\n \"Perforce: Cannot commit {sha1} '{gwt_path}' to '{depot_path}'.\\n\"\n \" You are attempting to push and create a new fully populated branch\\n\"\n \" with paths which overlap another branch. Contact your admin\\n\"\n \" to configure non-conflicting destination branch paths.\\n\"\n \" Branches: '{b1}', '{b2}'\")\n .format( sha1 = p4gf_util.abbrev(fe_commit['sha1'])\n , gwt_path = gwt_path\n , depot_path = depot_path\n , b1 = current_branch_name\n , b2 = other_branch_name ))\n else:\n human_msg = (_(\n \"Cannot commit {sha1} '{gwt_path}' to '{depot_path}'.\"\n \" Paths that overlap multiple Git Fusion branches are read-only.\"\n \" Branches: '{b1}', '{b2}'\")\n .format( sha1 = p4gf_util.abbrev(fe_commit['sha1'])\n , gwt_path = gwt_path\n , depot_path = depot_path\n , b1 = self._current_branch.branch_id\n , b2 = branch.branch_id ))\n raise PreflightException(human_msg)",
"def check_unstaged_changes(self):\n pass",
"def has_write_access():\n filepath = CURRENT_BUFFER.name\n if not os.path.exists(filepath):\n # file does not exist, so this is a new buffer, we shall check\n # whether we have write access to the directory.\n return os.access(os.path.split(filepath)[0], os.W_OK)\n else:\n # existing file, check whether we have write access to it.\n return os.access(filepath, os.W_OK)",
"def can_safely_release(*repo_paths):\n if repo_has_uncommitted():\n return False\n if repo_has_incoming(*repo_paths):\n return False\n if repo_has_outgoing():\n return continue_with_outgoing()\n return True",
"def _verify_descriptors(self, msg):\n self.assertTrue(is_writable_file(msg.chlderr))\n self.assertTrue(is_writable_file(msg.chldout))\n self.assertTrue(is_writable_file(msg.chldnul))",
"def repo_has_uncommitted():\n buff = subprocess.check_output(['hg', 'status'])\n\n if len(buff):\n print('Dirty / uncommitted changes in repository!')\n return True\n\n return False",
"def warn_uncommitted_changes(force):\n output = subprocess.run([\"git\", \"status\"], capture_output=True, text=True,)\n if \"modified\" in output.stdout or \"Untracked\" in output.stdout:\n print(\"Warning: repository has uncommitted changes:\\n\")\n print(\"-----------------------------------------------------------------------\")\n print(f\"{output.stdout}\")\n print(\"-----------------------------------------------------------------------\")\n if not force:\n print(\"\\nRun with -f to override\")\n sys.exit(1)",
"def copy_file_check(self):\n pass",
"def verify_git_clean(path):\n\n sys.stdout.write(\" - Checking for uncommitted changes:\")\n result = run_in_component(path, ['git', 'status', '--porcelain=v1'])\n\n lines = [x for x in result.splitlines() if len(x) > 0]\n\n if len(lines) == 0:\n print(\" OKAY\")\n return\n\n print(\" FAILED\")\n\n raise GenericError(\"There are uncommitted changes in the component, please commit or stash them\")",
"def check_dirty(args):\n man = load_manifest()\n any_dirty = False\n for (name, project) in man.projects.iteritems():\n repo = GitRepo(workdir_for_project(project))\n any_dirty = check_dirty_repo(repo) or any_dirty\n return any_dirty",
"def _already_copied_commit(self, commit_sha1, branch_id):\n if not self.already_copied_commit_runner:\n return False\n return self.already_copied_commit_runner.already_copied_commit(\n commit_sha1, branch_id)",
"def filter_paths(self, blobs):\n # check against one map for read, one for write\n # if check fails, figure out if it was the view map or the protects\n # that caused the problem and report accordingly\n self.author_denied = []\n self.pusher_denied = []\n self.foruser_denied = []\n self.fusion_denied = []\n self.unmapped = []\n c2d = P4.Map.RIGHT2LEFT\n\n LOG.debug('filter_paths() write_filter: %s', self.write_filter)\n for blob in blobs:\n gwt_path = self.ctx.gwt_path(blob['path'])\n topath_c = gwt_path.to_client()\n topath_d = gwt_path.to_depot()\n\n LOG.debug('filter_paths() topath_d: %s', topath_d)\n # for all actions, need to check write access for dest path\n result = \" \" # zum loggen\n if topath_d and P4GF_DEPOT_OBJECTS_RE.match(topath_d):\n LOG.debug('filter_paths() topath_d in //.git-fusion/objects')\n continue\n # do not require user write access to //.git-fusion/branches\n if topath_d and P4GF_DEPOT_BRANCHES_RE.match(topath_d):\n LOG.debug('filter_paths() topath_d in //.git-fusion/branches')\n continue\n if not self.write_filter.includes(topath_c, c2d):\n if not self.view_map.includes(topath_c, c2d):\n self.unmapped.append(topath_c)\n result = NTR('unmapped')\n elif not (self.ignore_author_perms or\n self.write_protect_author.includes(topath_d)):\n self.author_denied.append(topath_c)\n result = NTR('author denied')\n elif (self.write_protect_pusher and\n not self.write_protect_pusher.includes(topath_d)):\n self.pusher_denied.append(topath_c)\n result = NTR('pusher denied')\n elif (self.write_protect_foruser and\n not self.write_protect_foruser.includes(topath_d)):\n self.foruser_denied.append(topath_c)\n result = NTR('foruser denied')\n elif not self.write_protect_fusion.includes(topath_d):\n self.fusion_denied.append(topath_c)\n result = NTR('Git Fusion denied')\n else:\n result = \"?\"\n LOG.error('filter_paths() {:<13} {}, {}, {}'\n .format(result, blob['path'], topath_d, topath_c))\n elif LOG.isEnabledFor(logging.DEBUG):\n LOG.debug('filter_paths() topath_c in write_filter: %s', topath_c)",
"def something_to_commit():\n\n # Procelain returns nothing if there's nothing to commit\n ret = subprocess.check_output([\"git\", \"status\", \"--porcelain\"])\n\n if (len(ret) > 0):\n return True\n\n return False",
"def test_file_access_allowed_with_disabled_security(self):\n hooks = setup_hooks(disable_security=True)\n\n result = hooks.act_on_cloned_repo(UNAUTHORIZED_READ_FILE_REPO)\n\n assert result.status == Status.SUCCESS\n assert (\n _output.test_result_header(\n \"FiboTest\",\n NUM_FIBO_TESTS,\n NUM_FIBO_TESTS,\n _output.SUCCESS_COLOR,\n )\n in result.msg\n )",
"def _can_checkout(wit_path) -> bool:\n\n current_id = _get_head(wit_path)\n changes_to_be_committed = _return_as_string(_get_changes_to_be_committed, wit_path, current_id)\n changes_not_staged_for_commit = _return_as_string(_get_changes_not_staged_for_commit, wit_path)\n if changes_to_be_committed + changes_not_staged_for_commit == '':\n return True\n logging.error(FileNotSavedError('Some files are not saved. Try \"status\" command to view them.'))\n return False",
"def _check_write_consistency(self):\n self.logger.warning('Not checking write consistency')",
"def writable(self):\n self._check_not_closed()\n return False",
"def is_commit_affecting_directory(self, commit, directory):\n exit_code = self.run([\n 'git', 'diff-tree', '--quiet', '--no-commit-id', '-r', commit,\n '--', directory\n ],\n return_exit_code=True)\n return exit_code == 1",
"def should_do_write():\n if not suffix_is_supported():\n return False\n\n if not has_write_access():\n return False\n\n # Files under exclude_dir should be exempted from writing.\n filepath = CURRENT_BUFFER.name\n file_dir = filepath.rsplit('/', 1)[0]\n exclude_dirs = vim.eval(\"g:BHExcludeDir\")\n exclude_dirs = [os.path.realpath(os.path.expanduser(_dir)) for _dir in exclude_dirs]\n for dirname in exclude_dirs:\n if file_dir.startswith(dirname):\n debug(\"File in BHExcludeDir, do not write header.\")\n return False\n\n # whitelist: files directly inside BHIn will have a header.\n in_list = vim.eval(\"g:BHIn\")\n for dirname in in_list:\n dirname = os.path.realpath(os.path.expanduser(dirname))\n if file_dir == dirname:\n debug(\"File in BHIn, do write.\")\n return True\n\n # whitelist: files under BHUnder or its sub-dir will have a header.\n under_list = vim.eval(\"g:BHUnder\")\n for dirname in under_list:\n dirname = os.path.realpath(os.path.expanduser(dirname))\n if filepath.startswith(dirname):\n debug(\"File under BHUnder, do write.\")\n return True\n\n debug(\"default, do not write header.\")\n return False",
"def _RaiseIfNotWritable(self):\n if not self._storage_file:\n raise IOError('Unable to write to closed storage writer.')",
"def test_no_change(self):\r\n git_export_utils.export_to_git(\r\n self.course.id,\r\n 'file://{0}'.format(self.bare_repo_dir)\r\n )\r\n\r\n with self.assertRaisesRegexp(GitExportError,\r\n str(GitExportError.CANNOT_COMMIT)):\r\n git_export_utils.export_to_git(\r\n self.course.id, 'file://{0}'.format(self.bare_repo_dir))",
"def _warn_about_git_filters(files):\n repository = project_context.repository\n\n src_attrs = []\n dst_attrs = []\n\n for path, attrs in repository.get_attributes(*files).items():\n src = Path(path)\n dst = files[src].relative_to(project_context.path)\n src = src.relative_to(project_context.path)\n attrs_text = \"\"\n for name, value in attrs.items():\n if value == \"unset\":\n attrs_text += f\" -{name}\"\n elif value == \"set\":\n attrs_text += f\" {name}\"\n else:\n attrs_text += f\" {name}={value}\"\n\n src_attrs.append(f\"{str(src)}{attrs_text}\")\n dst_attrs.append(f\"{str(dst)}{attrs_text}\")\n\n if src_attrs:\n src_attrs_str = \"\\n\\t\".join(src_attrs)\n dst_attrs_str = \"\\n\\t\".join(dst_attrs)\n communication.warn(\n f\"There are custom git attributes for the following files:\\n\\t{src_attrs_str}\\n\"\n f\"You need to edit '.gitattributes' and add the following:\\n\\t{dst_attrs_str}\"\n )"
] | [
"0.730989",
"0.62108445",
"0.6002569",
"0.59505546",
"0.5941183",
"0.5897741",
"0.5511451",
"0.5413663",
"0.5360557",
"0.53083956",
"0.52989227",
"0.5220939",
"0.5215751",
"0.5145717",
"0.5142257",
"0.50900465",
"0.5081833",
"0.5081318",
"0.50811625",
"0.50787395",
"0.50735444",
"0.50679284",
"0.5057552",
"0.504842",
"0.504448",
"0.5037058",
"0.503033",
"0.50176835",
"0.5012819",
"0.49988663"
] | 0.8073145 | 0 |
If this is a classic branch, check that none of the files in the commit are in stream depots and thus not writable. If any of the files is not writable then reject this commit. | def _check_stream_in_classic(self, fe_commit):
if self._current_branch.stream_name:
return
depot_re = re.compile(r'^//([^/]+)/([^/]+)/.*$')
for fe_file in fe_commit['files']:
gwt_path = fe_file['path']
depot_path = self.ctx.gwt_path(gwt_path).to_depot()
m = depot_re.match(depot_path)
if m:
depot = m.group(1)
if depot in self.stream_depots:
stream = '//{}/{}'.format(m.group(1), m.group(2))
human_msg = (
_("Cannot commit {sha1} '{gwt_path}' to '{depot_path}'."
" Paths in stream '{stream}' are read-only for branch '{b}'.")
.format( sha1 = p4gf_util.abbrev(fe_commit['sha1'])
, gwt_path = gwt_path
, depot_path = depot_path
, stream = stream
, b = self._current_branch.branch_id ))
raise PreflightException(human_msg) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _check_stream_writable(self, fe_commit):\n if not self._current_branch.stream_name:\n return\n prefix = self._current_branch.writable_stream_name + '/'\n for fe_file in fe_commit['files']:\n gwt_path = fe_file['path']\n depot_path = self.ctx.gwt_path(gwt_path).to_depot()\n if depot_path.startswith(prefix):\n continue\n\n human_msg = (_(\n \"Cannot commit {sha1} '{gwt_path}' to '{depot_path}'.\"\n \" Paths not in stream '{stream}' are read-only for branch '{b}'.\")\n .format( sha1 = p4gf_util.abbrev(fe_commit['sha1'])\n , gwt_path = gwt_path\n , depot_path = depot_path\n , stream = self._current_branch.writable_stream_name\n , b = self._current_branch.branch_id ))\n raise PreflightException(human_msg)",
"def check_commit_for_branch( self\n , commit\n , branch_id\n , any_locked_files\n , case_conflict_checker ):\n rev = commit['sha1']\n if LOG.isEnabledFor(logging.DEBUG):\n LOG.debug(\"check_commit_for_branch() \"\n \"Checking branch={} mark={} sha1={} file-ct={} -- {}\"\n .format( branch_id\n , commit['mark']\n , p4gf_util.abbrev(rev)\n , len(commit['files'])\n , repr(commit['data'])[:20].splitlines()[0]))\n\n if self._already_copied_commit(rev, branch_id):\n return\n\n # following checks assume client has been set for branch\n self.ensure_branch_preflight(commit, branch_id)\n with self.ctx.switched_to_branch(\n self._current_branch\n , set_client=self.set_client_on_branch_switch\n ):\n if case_conflict_checker:\n case_conflict_checker.read_fast_export_commit(\n commit, self._current_branch)\n\n # Empty commits require root-level .p4gf_placeholder to be mapped\n # in the current branch view.\n if not commit['files'] and not self._is_placeholder_mapped():\n raise PreflightException(\n _(\"Empty commit {sha1} not permitted. Git Fusion branch views\"\n \" must include root to permit empty commits.\")\n .format(sha1=p4gf_util.abbrev(rev)))\n\n with Timer(CHECK_PROTECTS):\n self._check_protects(commit['author_p4user'], commit['files'])\n\n with Timer(CHECK_OVERLAP):\n self._check_overlap(commit)\n\n # fetch the branch setting only, without cascading to repo/global config\n if self._current_branch.is_read_only:\n raise PreflightException(_(\"Push to branch {branch} prohibited.\")\n .format(branch=self._current_branch.git_branch_name))\n self._check_stream_writable(commit)\n self._check_stream_in_classic(commit)\n\n LOG.debug('checking locked files under //{}/...'.format(self.ctx.p4.client))\n if any_locked_files:\n # Convert the git commit paths to depotPaths\n files_in_commit = [self.ctx.gwt_path(f['path']).to_depot()\n for f in commit['files']]\n LOG.debug(\"files_in_commit {0}\".format(files_in_commit))\n for f in files_in_commit:\n if f in any_locked_files:\n # Collect the names (and clients) of users with locked files.\n # Report back to the pusher so they can take appropriate action.\n msg = _('{file} - locked by {user}').format(file=f,\n user=any_locked_files[f])\n LOG.info(msg)\n raise PreflightException(msg)\n\n # +++ Spend time extracting Jobs and P4Changelist owner\n # here if we actually do need to call\n # the preflight-commit hook.\n if self.ctx.preflight_hook.is_callable():\n jobs = G2PJob.extract_jobs(commit['data'])\n jobs2 = G2PJob.lookup_jobs(self.ctx, jobs)\n self.ctx.preflight_hook(\n ctx = self.ctx\n , fe_commit = commit\n , branch_id = branch_id\n , jobs = jobs2\n )",
"def check_commits(self, commits):\n LOG.info('Checking Perforce permissions and locks')\n self.ctx.checkpoint(\"copy_to_p4._preflight_check\")\n\n # Stop if files are opened in our repo client\n # We expect this to be none, since we have the view lock\n opened = self.ctx.p4.run(['opened', '-m1'])\n if opened:\n raise PreflightException(_('There are files opened by Git Fusion for this repo.'))\n\n # fetch the repo setting only, without cascading to global config\n is_read_only = self.ctx.repo_config.getboolean(p4gf_config.SECTION_REPO,\n p4gf_config.KEY_READ_ONLY,\n fallback=False)\n if is_read_only:\n raise PreflightException(_(\"Push to repo {repo_name} prohibited.\")\n .format(repo_name=self.ctx.config.repo_name))\n\n # get a list of stream depots for later checks for read-only paths\n depots = self.ctx.p4.run(['depots'])\n self.stream_depots = set([d['name'] for d in depots if d['type'] == 'stream'])\n any_locked_files = self._find_locked_by()\n LOG.debug(\"any_locked_files {0}\".format(any_locked_files))\n case_conflict_checker = None\n if not self.ctx.server_is_case_sensitive:\n case_conflict_checker = CaseConflictChecker(self.ctx)\n case_conflict_checker.read_perforce_paths()\n\n ui_name = self._curr_ref_ui_name()\n if ui_name:\n progress_msg = _('Checking commits for {ref}...').format(ref=ui_name)\n else:\n progress_msg = _('Checking commits...')\n\n with ProgressReporter.Determinate(len(commits)):\n for commit in commits:\n ProgressReporter.increment(progress_msg)\n\n self.g2p_user.get_author_pusher_owner(commit)\n\n rev = commit['sha1']\n if not self.assigner.is_assigned(commit['sha1']):\n continue\n\n self.check_commit(commit)\n\n for branch_id in self.assigner.branch_id_list(rev):\n self.check_commit_for_branch(\n commit\n , branch_id\n , any_locked_files\n , case_conflict_checker )\n\n if case_conflict_checker:\n cc_text = case_conflict_checker.conflict_text()\n if cc_text:\n raise PreflightException(cc_text)",
"def check_commit(self, commit):\n # pylint: disable=too-many-branches\n if LOG.isEnabledFor(logging.DEBUG):\n LOG.debug('check_commit() Checking mark={} sha1={} file-ct={} -- {}'\n .format( commit['mark']\n , p4gf_util.abbrev(commit['sha1'])\n , len(commit['files'])\n , repr(commit['data'])[:20].splitlines()[0]))\n\n if not commit['author_p4user']:\n raise PreflightException(_(\"User '{user}' not permitted to commit\")\n .format(user=commit['author']['email'].strip('<>')))\n\n if 'merge' in commit:\n ref_is_review = (self.gsreview_coll and\n self.gsreview_coll.ref_in_review_list(self._current_prt.ref))\n if not ref_is_review and not self.ctx.merge_commits:\n raise PreflightException(_('Merge commits are not enabled for this repo.'))\n if (not ref_is_review and\n not self.ctx.branch_creation and self.assigner.have_anonymous_branches):\n msg = _('Git branch creation is prohibited for this repo.')\n p4_branch_names_non_lw = [b.git_branch_name for b in self.ctx.branch_dict().values()\n if b.git_branch_name and not b.is_lightweight]\n if len(p4_branch_names_non_lw) > 1:\n msg += _('\\nThis repo has more than one named branch.'\n '\\nTry altering the push order - '\n 'pushing branches with merge ancestors first.')\n raise PreflightException(msg)\n if LOG.isEnabledFor(logging.DEBUG):\n for parent_mark in commit['merge']:\n parent_sha1 = self.fast_export_marks.get_commit(parent_mark)[:7]\n LOG.debug(\"check_commit() merge mark={} sha1={}\"\n .format(parent_mark, parent_sha1))\n\n if not self.ctx.submodules and 'files' in commit:\n for f in commit['files']:\n if f.get('mode') == '160000':\n if 'first_commit' in commit and not self._path_added(f.get('path'), commit):\n LOG.debug2('check_commit() passed {} in {}'.format(\n f.get('path'), p4gf_util.abbrev(commit['sha1'])))\n continue\n raise PreflightException(\n _('Git submodules not permitted: path={path} commit={commit_sha1}')\n .format(path=f.get('path'), commit_sha1=p4gf_util.abbrev(commit['sha1'])))\n\n for f in commit['files']:\n LOG.debug3(\"check_commit : commit files: \" + _log_fe_file(f))\n err = check_valid_filename(f['path'], self.ctx)\n if err:\n raise PreflightException(err)\n if self.ctx.is_lfs_enabled:\n self._check_lfs(commit, f)\n\n # Warn user about any jobs that appear to not exist\n jobs = G2PJob.lookup_jobs(self.ctx, G2PJob.extract_jobs(commit['data']))\n if jobs:\n for job_id in jobs:\n r = self.ctx.p4run('jobs', '-e', 'job={}'.format(job_id))\n if not r:\n _print_error(_(\"Job '{job_id}' doesn't exist\").format(job_id=job_id))\n # Create pending changes for any Git-Swarm reviews",
"def warn_uncommitted_changes(force):\n output = subprocess.run([\"git\", \"status\"], capture_output=True, text=True,)\n if \"modified\" in output.stdout or \"Untracked\" in output.stdout:\n print(\"Warning: repository has uncommitted changes:\\n\")\n print(\"-----------------------------------------------------------------------\")\n print(f\"{output.stdout}\")\n print(\"-----------------------------------------------------------------------\")\n if not force:\n print(\"\\nRun with -f to override\")\n sys.exit(1)",
"def verify_git_clean(path):\n\n sys.stdout.write(\" - Checking for uncommitted changes:\")\n result = run_in_component(path, ['git', 'status', '--porcelain=v1'])\n\n lines = [x for x in result.splitlines() if len(x) > 0]\n\n if len(lines) == 0:\n print(\" OKAY\")\n return\n\n print(\" FAILED\")\n\n raise GenericError(\"There are uncommitted changes in the component, please commit or stash them\")",
"def _check_overlap(self, fe_commit):\n # +++ Avoid O(b branches * r rev) checks when\n # overlap is impossible because current branch\n # overlaps no other branch.\n if self._current_branch not in self._overlapping_branch_list():\n return\n\n for fe_file in fe_commit['files']:\n gwt_path = fe_file['path']\n depot_path = self.ctx.gwt_to_depot_path(gwt_path)\n\n for branch in self._overlapping_branch_list():\n if branch == self._current_branch:\n continue\n if not branch.intersects_depot_path(depot_path):\n continue\n\n LOG.debug(\"_check_overlap() branch {br1} <> {br2}\"\n \" gwt={gwt:<40} {dp}\\n{view}\"\n .format(\n br1 = p4gf_util.abbrev(self._current_branch.branch_id)\n , br2 = p4gf_util.abbrev(branch.branch_id)\n , gwt = gwt_path\n , dp = depot_path\n , view = \"\\n\".join(branch.view_p4map.as_array())\n ))\n\n if self._current_branch.is_new_fp_from_push or branch.is_new_fp_from_push:\n current_branch_name = self._current_branch.git_branch_name\n if self._current_branch.is_new_fp_from_push:\n current_branch_name += '(new)'\n other_branch_name = branch.git_branch_name\n if branch.is_new_fp_from_push:\n other_branch_name += '(new)'\n human_msg = (_(\n \"Perforce: Cannot commit {sha1} '{gwt_path}' to '{depot_path}'.\\n\"\n \" You are attempting to push and create a new fully populated branch\\n\"\n \" with paths which overlap another branch. Contact your admin\\n\"\n \" to configure non-conflicting destination branch paths.\\n\"\n \" Branches: '{b1}', '{b2}'\")\n .format( sha1 = p4gf_util.abbrev(fe_commit['sha1'])\n , gwt_path = gwt_path\n , depot_path = depot_path\n , b1 = current_branch_name\n , b2 = other_branch_name ))\n else:\n human_msg = (_(\n \"Cannot commit {sha1} '{gwt_path}' to '{depot_path}'.\"\n \" Paths that overlap multiple Git Fusion branches are read-only.\"\n \" Branches: '{b1}', '{b2}'\")\n .format( sha1 = p4gf_util.abbrev(fe_commit['sha1'])\n , gwt_path = gwt_path\n , depot_path = depot_path\n , b1 = self._current_branch.branch_id\n , b2 = branch.branch_id ))\n raise PreflightException(human_msg)",
"def test_no_change(self):\r\n git_export_utils.export_to_git(\r\n self.course.id,\r\n 'file://{0}'.format(self.bare_repo_dir)\r\n )\r\n\r\n with self.assertRaisesRegexp(GitExportError,\r\n str(GitExportError.CANNOT_COMMIT)):\r\n git_export_utils.export_to_git(\r\n self.course.id, 'file://{0}'.format(self.bare_repo_dir))",
"def check_commit_problems(self, commit, diff):\n\n # Initialise\n self._license_problem = False\n self._commit_problem = False\n self._commit_notes = defaultdict(list)\n\n # Unsafe regex checks...\n unsafe_matches = list()\n unsafe_matches.append( r\"\\b(KRun::runCommand|K3?ShellProcess|setUseShell|setShellCommand)\\b\\s*[\\(\\r\\n]\" )\n unsafe_matches.append( r\"\\b(system|popen|mktemp|mkstemp|tmpnam|gets|syslog|strptime)\\b\\s*[\\(\\r\\n]\" )\n unsafe_matches.append( r\"(scanf)\\b\\s*[\\(\\r\\n]\" )\n valid_filename_regex = r\"\\.(cpp|cc|cxx|C|c\\+\\+|c|l|y||h|H|hh|hxx|hpp|h\\+\\+|qml)$\"\n\n # Retrieve the diff and do the problem checks...\n filename = unicode(\"\")\n filediff = list()\n for line in diff:\n file_change = re.match( \"^diff --(cc |git a\\/.+ b\\/)(.+)$\", line )\n if file_change:\n # Are we changing file? If so, we have the full diff, so do a license check....\n if filename != \"\" and commit.files_changed[ filename ][\"change\"] in ['A'] and re.search(valid_filename_regex, filename):\n self.check_commit_license(filename, ''.join(filediff))\n\n filediff = list()\n filename = file_change.group(2)\n continue\n\n # Diff headers are bogus\n if re.match(\"@@ -\\d+,\\d+ \\+\\d+ @@\", line):\n filediff = list()\n continue\n\n # Do an incremental check for *.desktop syntax errors....\n if re.search(\"\\.desktop$\", filename) and re.search(\"[^=]+=.*[ \\t]$\", line) and line.startswith(\"+\") and not re.match(\"^\\+#\", line):\n self._commit_notes[filename].append( \"[TRAILING SPACE] **\" )\n self._commit_problem = True\n\n # Check for things which are unsafe...\n for safety_match in unsafe_matches:\n match = re.match(safety_match, line)\n if match:\n note = \"[POSSIBLY UNSAFE: {0}] **\".format( match.group(1) )\n self._commit_notes[filename].append(note)\n self._commit_problem = True\n\n # Store the diff....\n filediff.append(line)\n\n if filename != \"\" and commit.files_changed[ filename ][\"change\"] in ['A'] and re.search(valid_filename_regex, filename):\n self.check_commit_license(filename, ''.join(filediff))",
"def check_unstaged_changes(self):\n pass",
"def _validate_can_write(self):\n if self._mode not in WRITE_MODES:\n raise IOError(\"File is not writable\")\n if self.Writable == 'no':\n raise IOError(\"'Writable' flag is 'no'\")",
"def can_safely_release(*repo_paths):\n if repo_has_uncommitted():\n return False\n if repo_has_incoming(*repo_paths):\n return False\n if repo_has_outgoing():\n return continue_with_outgoing()\n return True",
"def enforce_clean_option(args, run):\n repos = run.experiment_info[\"repositories\"]\n if not repos:\n raise RuntimeError(\n \"No version control detected. \"\n \"Cannot enforce clean repository.\\n\"\n \"Make sure that your sources under VCS and the \"\n \"corresponding python package is installed.\"\n )\n else:\n for repo in repos:\n if repo[\"dirty\"]:\n raise RuntimeError(\n \"EnforceClean: Uncommited changes in \"\n 'the \"{}\" repository.'.format(repo)\n )",
"def repo_has_uncommitted():\n buff = subprocess.check_output(['hg', 'status'])\n\n if len(buff):\n print('Dirty / uncommitted changes in repository!')\n return True\n\n return False",
"def test_file_access_allowed_with_disabled_security(self):\n hooks = setup_hooks(disable_security=True)\n\n result = hooks.act_on_cloned_repo(UNAUTHORIZED_READ_FILE_REPO)\n\n assert result.status == Status.SUCCESS\n assert (\n _output.test_result_header(\n \"FiboTest\",\n NUM_FIBO_TESTS,\n NUM_FIBO_TESTS,\n _output.SUCCESS_COLOR,\n )\n in result.msg\n )",
"def something_to_commit():\n\n # Procelain returns nothing if there's nothing to commit\n ret = subprocess.check_output([\"git\", \"status\", \"--porcelain\"])\n\n if (len(ret) > 0):\n return True\n\n return False",
"def test_set_commits_empty_exclude(self):\n\n temp = self.Temp(self.items, conds=[EmptyExclude()])\n empty_exclude = temp.conds[0]\n empty_exclude.set_commits(temp.df)\n\n commit = CommitGit(self.items, conds=[EmptyExclude()])\n self.assertEqual(empty_exclude.included, commit.conds[0].included)",
"def check_diff(src, dst):\n result = _subprocess(['git', '--no-pager', 'log', '--graph', '--abbrev-commit', '--pretty=oneline',\n '--no-merges', \"--\", f\"{src}\", f\"^{dst}\"])\n\n if result:\n print(f\"Warning: the following commits are present on {dst} but not on {src}: \\n{result}\")\n if args.force:\n print(f\"Warning: they will be overwritten on {dst} and discarded.\")\n else:\n print(f\"Warning: run with --force to overwrite and discard these commits from {dst}\")\n exit(1)",
"def _filter_committees_failing_weak_representation(self, profile: list[set[int]], committees: list[list[int]]) -> list[list[int]]:\n unique_approval_scores = self._compute_unique_approval_scores(profile)\n parties_deserving_representation = {party for party in self.parties if unique_approval_scores[party] >= self.n / self.k}\n possible_committees = [committee for committee in committees if parties_deserving_representation.issubset(set(committee))]\n return possible_committees",
"def _abort_on_pending_changes(self) -> None:\n if set(self._dirty_paths_by_status) - {StatusCode.Untracked}:\n raise ActionFailure(\n \"Found pending changes in tracked files. Diff-aware runs require a clean git state.\"\n )",
"def test_commit_on_unborn_branch(tmp_path: Path) -> None:\n repository = Repository.init(tmp_path / \"repository\")\n repository.commit(message=\"initial\")\n\n assert not repository.head.commit.parents",
"def _check_writable_(self):\n self._check_within_context_()\n if self._mode != 'w':\n raise Exception('Cannot update database: read only mode')",
"def is_staging_clean() -> bool:\n c = cmd.run(\"git diff --no-ext-diff --cached --name-only\")\n return not bool(c.out)",
"def commit_check(ctx):\n result = ctx.run(f\"{VENV_PREFIX} cz check --rev-range master..\", warn=True)\n if result.exited == 3: # NO_COMMIT_FOUND\n exit(0)\n else:\n exit(result.exited)",
"def _warn_about_git_filters(files):\n repository = project_context.repository\n\n src_attrs = []\n dst_attrs = []\n\n for path, attrs in repository.get_attributes(*files).items():\n src = Path(path)\n dst = files[src].relative_to(project_context.path)\n src = src.relative_to(project_context.path)\n attrs_text = \"\"\n for name, value in attrs.items():\n if value == \"unset\":\n attrs_text += f\" -{name}\"\n elif value == \"set\":\n attrs_text += f\" {name}\"\n else:\n attrs_text += f\" {name}={value}\"\n\n src_attrs.append(f\"{str(src)}{attrs_text}\")\n dst_attrs.append(f\"{str(dst)}{attrs_text}\")\n\n if src_attrs:\n src_attrs_str = \"\\n\\t\".join(src_attrs)\n dst_attrs_str = \"\\n\\t\".join(dst_attrs)\n communication.warn(\n f\"There are custom git attributes for the following files:\\n\\t{src_attrs_str}\\n\"\n f\"You need to edit '.gitattributes' and add the following:\\n\\t{dst_attrs_str}\"\n )",
"def has_unstaged_changes(repo):\n subprocess.check_call(['git', 'update-index', '-q', '--ignore-submodules',\n '--refresh'], cwd=repo)\n return subprocess.call(['git', 'diff-index', '--quiet', 'HEAD'],\n cwd=repo) != 0",
"def is_valid_commits(args):\n if args.commits is not None:\n return True\n return False",
"def check_heads(repo, their_heads, context):\n heads = repo.heads()\n heads_hash = hashutil.sha1(b''.join(sorted(heads))).digest()\n if not (\n their_heads == [b'force']\n or their_heads == heads\n or their_heads == [b'hashed', heads_hash]\n ):\n # someone else committed/pushed/unbundled while we\n # were transferring data\n raise error.PushRaced(\n b'repository changed while %s - please try again' % context\n )",
"def lint_commit_base(commit):\n success = True\n # Merge commits have two parents, we maintain a linear history.\n if len(commit.parents) > 1:\n error(\n \"Please resolve merges by re-basing. Merge commits are not allowed.\",\n commit)\n success = False\n\n return success",
"def test_check_contributing_state_ongoing_tasks_not_contributed(self):\r\n app = AppFactory.create()\r\n task = TaskFactory.create(app=app)\r\n user = UserFactory.create()\r\n\r\n contributing_state = helpers.check_contributing_state(app_id=app.id,\r\n user_id=user.id)\r\n\r\n assert contributing_state == 'can_contribute', contributing_state"
] | [
"0.73341",
"0.6207531",
"0.6191185",
"0.6037307",
"0.5875063",
"0.5831943",
"0.56964684",
"0.5686395",
"0.56552875",
"0.56157583",
"0.5488412",
"0.5477116",
"0.54372096",
"0.5435764",
"0.5399177",
"0.53548175",
"0.5332237",
"0.53187644",
"0.5318055",
"0.53075296",
"0.52779925",
"0.5258566",
"0.5231523",
"0.5215225",
"0.5206946",
"0.5195617",
"0.5192251",
"0.5180663",
"0.51744914",
"0.51710516"
] | 0.71837646 | 1 |
Return True if the named path was introduced in the HEAD commit. | def _path_added(self, path, fecommit):
# Because git-fast-export includes the entire tree in its output,
# regardless of whether the requested commit is the first in the
# branch or not, we need to check the repo itself to be certain if
# this path was truly introduced in this commit, or simply existed
# in the tree prior to the "first" commit.
commit = self.ctx.repo.get(fecommit['sha1'])
if commit is None:
# empty repository?
LOG.debug2("_path_added() commit {} is missing".format(fecommit['sha1']))
return True
for parent in commit.parents:
if p4gf_git.exists_in_tree(self.ctx.repo, path, parent.tree):
LOG.debug2("_path_added() {} exists in parent tree {}".format(
path, p4gf_util.abbrev(p4gf_pygit2.object_to_sha1(parent))))
return False
return True | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def git_has_object(project: Project, name: str) -> bool:\n ret = project.git(\"rev-parse\", \"--verify\", name, _ok_code=[0, 128])\n return ret.exit_code == 0",
"def test_heads_contains_true(repository: Repository) -> None:\n assert repository.head.name in repository.heads",
"def _is_branch(self, reference_name):\n return reference_name.startswith(\"refs/heads/\") or reference_name.startswith(\n \"refs/remotes/\"\n )",
"def is_versioned(target):\n\n assert os.path.exists(target), \"%s does not exist!\" % target\n git_tree = get_git_tree(target)\n\n versioned = False\n if git_tree is not None:\n output = gitopen([\"status\", \"--ignored\", \"--porcelain\", target], git_tree)\n if not (output.startswith(b\"!!\") or output.startswith(b\"??\")):\n versioned = True\n\n return versioned",
"def _is_tracked(filename, metadata):\n current_local_sha = local_metadata.get(filename, None)\n current_remote_sha = metadata.get(filename, None)\n return current_local_sha is not None \\\n and current_remote_sha is not None \\\n and current_local_sha == current_remote_sha",
"def local_changes():\n result, output = popen('git status', False, False)\n try:\n return not output[-1].startswith(\"nothing to commit\")\n except IndexError:\n return True",
"def _pre_commit_has_hallmark(pre_commit_file):\n with open(pre_commit_file) as fh:\n script = fh.read()\n if u'from jig' in script or u'jig init' in script:\n return True\n return False",
"def is_git_diff_header(diff_header):\n return any(l.startswith('diff --git') for l in diff_header.splitlines())",
"def is_branch(wit_path, branch):\n\n branches = _get_references_data(wit_path)\n del branches['HEAD']\n return branch in branches.keys()",
"def path_touched(*paths, commit_range):\n return check_output([\n 'git', 'diff', '--name-only', commit_range, '--', *paths\n ]).decode('utf-8').strip() != ''",
"def test_head_name(repository: Repository) -> None:\n head = repository._repository.references[\"HEAD\"]\n name = head.target.removeprefix(\"refs/heads/\")\n assert name == repository.head.name",
"def line_part_of_commit(file, line, commit):\n if line == '0': return False\n\n line_val = git(\"blame\", \"-l\", \"-L{0},{0}\".format(line), file)\n return line_val.split(\" \", 1)[0] == commit",
"def is_git_link():\n return islink('.git')",
"def _is_always_unsatisfied(self):\n # If this is a github sha tarball, then it is always unsatisfied\n # because the url has a commit sha in it and not the version\n # number.\n url = self._req.url\n if url:\n filename = filename_from_url(url)\n if filename.endswith(ARCHIVE_EXTENSIONS):\n filename, ext = splitext(filename)\n if is_git_sha(filename):\n return True\n return False",
"async def has(path: str) -> bool:\n _ = path.strip('/').split('/')\n bucket = _[0]\n key = '/'.join(_[1:])\n async with _create_client() as client:\n try:\n await client.head_object(Bucket=bucket, Key=key)\n return True\n except ClientError:\n return False",
"def test_heads_bool_empty(tmp_path: Path) -> None:\n repository = Repository.init(tmp_path / \"repository\")\n assert not repository.heads",
"def _can_checkout(wit_path) -> bool:\n\n current_id = _get_head(wit_path)\n changes_to_be_committed = _return_as_string(_get_changes_to_be_committed, wit_path, current_id)\n changes_not_staged_for_commit = _return_as_string(_get_changes_not_staged_for_commit, wit_path)\n if changes_to_be_committed + changes_not_staged_for_commit == '':\n return True\n logging.error(FileNotSavedError('Some files are not saved. Try \"status\" command to view them.'))\n return False",
"def is_git():\n return exists('.git') and not islink('.git')",
"def exists_ref(self, commit_id):\n pass",
"def repo_has_uncommitted():\n buff = subprocess.check_output(['hg', 'status'])\n\n if len(buff):\n print('Dirty / uncommitted changes in repository!')\n return True\n\n return False",
"def has_hash(self, h):\n rsp = h.hashlist(self.path)\n if re.search(\"\\n[0-9a-f]+\\smd5\\s%s\" % self.path, rsp):\n rval = True\n else:\n rval = False\n return rval",
"def exists(self):\n return self.islink() or exists(self._path)",
"def has_changes(self):\n if self.repo_is_empty:\n return True\n\n tree = self.repo.get(self.index.write_tree(self.repo))\n diff = tree.diff_to_tree(self.repo.get(self.repo.head.target).tree)\n return bool(diff)",
"def something_to_commit():\n\n # Procelain returns nothing if there's nothing to commit\n ret = subprocess.check_output([\"git\", \"status\", \"--porcelain\"])\n\n if (len(ret) > 0):\n return True\n\n return False",
"def exists(self):\r\n return os.path.exists(self.full_path)",
"def is_path(t, path):\n if label(t) != path[0]:\n return False\n if len(path) == 1:\n return True\n return any([is_path(b, path[1:]) for b in branches(t)])",
"def check_dependency(self, repo, minhash=None):\n try:\n p = Project.objects.get(repo_url=repo)\n except Project.DoesNotExist:\n return False\n j = p.last_successful_job()\n\n if j:\n if minhash:\n if p.commit_in_history(minhash, j.commit):\n # We already have a successful job that is new enough\n return True\n else:\n return True\n\n return False",
"def Exists(self, path: str) -> bool:\n ...",
"def exists(self, path):",
"def _git_exists_in_revision(path: Path, rev2: str) -> bool:\n # Surprise: On Windows, `git cat-file` doesn't work with backslash directory\n # separators in paths. We need to use Posix paths and forward slashes instead.\n cmd = [\"git\", \"cat-file\", \"-e\", f\"{rev2}:{path.as_posix()}\"]\n result = run(cmd, check=False, stderr=DEVNULL, env={\"LC_ALL\": \"C\"})\n return result.returncode == 0"
] | [
"0.6516155",
"0.63896835",
"0.6222142",
"0.6222135",
"0.60880154",
"0.60826945",
"0.6018103",
"0.5985125",
"0.59479386",
"0.5940511",
"0.59299994",
"0.59243816",
"0.59173447",
"0.5912473",
"0.5822935",
"0.58216715",
"0.5808256",
"0.5804621",
"0.5717881",
"0.56875396",
"0.56785035",
"0.5672228",
"0.5664909",
"0.5632297",
"0.5626049",
"0.561475",
"0.5596416",
"0.5569909",
"0.55685675",
"0.5555132"
] | 0.6642211 | 0 |
We have changed our branch_dict (or more likely finish_branch_definition()ed a branch within that dict) in a way that invalidates any cached calculations that consumed the branch dict. | def _invalidate_branch_cache(self):
self._cached_overlapping_branch_list = None | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def finish_branch_definition(self, commit, branch):\n assert self._finish_branch_definition\n self._finish_branch_definition.finish_branch_definition(commit, branch)\n self._invalidate_branch_cache()",
"def tree_removeDeadBranches():\n nonlocal d_tree\n d_tree = { k : v for k, v in d_tree.items() if v}\n # By creating a new binding for 'd_tree', we have effectively\n # severed the connection back to the original dictionary.\n # We now need to copy this d_tree to the self.d_inputTree\n # self.d_outputTree structures\n self.d_inputTree = d_tree\n self.d_outputTree = self.d_inputTree.copy()",
"def _reset_changes(self):\r\n self._original = {}\r\n if self.last_updated is not None:\r\n self._original['last_updated'] = self.last_updated",
"def _refresh_cache(self, data_dict):\r\n pass",
"def scale_branch_capacity(self, zone_name=None, branch_id=None):\n anticipated_branch = self._get_df_with_new_elements(\"branch\")\n if bool(zone_name) or bool(branch_id) is True:\n if \"branch\" not in self.ct:\n self.ct[\"branch\"] = {}\n if zone_name is not None:\n try:\n self._check_zone(list(zone_name.keys()))\n except ValueError:\n self.ct.pop(\"branch\")\n return\n if \"zone_id\" not in self.ct[\"branch\"]:\n self.ct[\"branch\"][\"zone_id\"] = {}\n for z in zone_name.keys():\n self.ct[\"branch\"][\"zone_id\"][self.grid.zone2id[z]] = zone_name[z]\n if branch_id is not None:\n diff = set(branch_id.keys()) - set(anticipated_branch.index)\n if len(diff) != 0:\n print(\"No branch with the following id:\")\n for i in list(diff):\n print(i)\n self.ct.pop(\"branch\")\n return\n else:\n if \"branch_id\" not in self.ct[\"branch\"]:\n self.ct[\"branch\"][\"branch_id\"] = {}\n for i in branch_id.keys():\n self.ct[\"branch\"][\"branch_id\"][i] = branch_id[i]\n else:\n print(\"<zone> and/or <branch_id> must be set. Return.\")\n return",
"def _clear_caches(self):\n self._brushes = {}\n self._formats = {}",
"def _hard_update(self, active, target):\n\n target.load_state_dict(active.state_dict())",
"def visit_branch(self, node, children):\n branch = {k: v for d in children for k, v in d.items()}\n # Verify that this is either an interpolated, rut or graft branch and not an illegal mix\n # If a path is specified it is a rut branch or if there is a local graft it is a grafted branch\n # If both path and local graft are present in the same branch it is illegal\n if branch.get('path', None): # Path specified, so there should be no local grafts in this branch\n lf = branch['leaf_faces']\n local_graft = [lf[n]['graft'] for n in lf if lf[n]['graft'] == 'local']\n if local_graft:\n raise GraftRutBranchConflict(branch=set(lf.keys()))\n # Return dictionary of leaf faces and an optional path keyed to the local rule\n return { node.rule_name: branch }",
"def update_dict(new,old):",
"def _problem_handle_prev_evals(self):\n self._update_reward_values()",
"def __cleanState__(self, stateDict):\n for k in list(stateDict.keys()):\n if k.startswith('_'):\n stateDict.pop(k)\n return stateDict",
"def dummy_update( self ):\r\n pass",
"def flushCaches(self):\n self.rehabTreeCache = {} \n self.frailRehabTreeCache = {} \n self.frailTreeCache = {}",
"def _dirty(self, name):\n\n name = self._array_name_1D_to_ND(name) or name\n if name=='pos':\n for v in self.ancestor._persistent_objects.values():\n if 'kdtree' in v:\n del v['kdtree']\n\n if not self.auto_propagate_off:\n for d_ar in self._dependency_tracker.get_dependents(name):\n if d_ar in self or self.has_family_key(d_ar):\n if self.is_derived_array(d_ar):\n del self[d_ar]\n self._dirty(d_ar)",
"def testDirtyRefresh(self):\n \n pass",
"def __exit__(self, type, value, traceback):\n self.flush_cache()",
"def cleanup_state_dict_to_server(self) -> dict:\n clean_state_dict = copy_dict(self.model.state_dict()) # not deepcopy\n if self.is_sparse:\n for layer, prefix in zip(self.model.param_layers, self.model.param_layer_prefixes):\n key = prefix + \".bias\"\n if isinstance(layer, SparseLinear) and key in clean_state_dict.keys():\n clean_state_dict[key] = clean_state_dict[key].view(-1)\n\n del_list = []\n del_suffix = \"placeholder\"\n for key in clean_state_dict.keys():\n if key.endswith(del_suffix):\n del_list.append(key)\n\n for del_key in del_list:\n del clean_state_dict[del_key]\n\n return clean_state_dict",
"def _validate_branch_args(self) -> None:\n lk = set(self.branch_losses.keys())\n dk = set(self.model._get_inner_keys(self.model.heads))\n has_same_keys = lk == dk\n\n mk = None\n if self.branch_metrics is not None:\n mk = set(self.branch_metrics.keys())\n has_same_keys = dk == lk == mk\n\n ek = None\n if self.branch_loss_params is not None:\n ek = set(self.branch_loss_params.keys())\n has_same_keys = dk == lk == mk == ek\n\n if not has_same_keys:\n raise ValueError(\n \"Got mismatching keys for branch dict args. \"\n f\"Branch losses: {lk}. \"\n f\"Branch loss params: {ek}. \"\n f\"Decoder branches: {dk}. \"\n f\"Metrics: {mk}. \"\n f\"(`metrics`, and `branch_loss_params` can be None)\"\n )",
"def _localSetState(self,pdict):\n self.mapping = pdict.pop('mapping')\n self.values = pdict.pop('values')",
"def _update_loose (self, dict):\n self.__dict__.update(dict)",
"def _finish_init(self):\n\n # This is usually done in set_other(), but we already set it as part of\n # the constructor.\n self.this_branch.fetch(self.other_branch,\n last_revision=self.other_basis)",
"def correct_branching(self, data, rembranch, cluster):\n # Generate two sets - one containing all of the cluster_indices in rembranch\n # and another containing the cluster_indices in rembranch's descendants.\n\n # Note - a lot of the time the two sets will be identical. This is the case\n # where data points are not directly linked to rembranch. If data points\n # are linked to rembranch then generating these two sets will identify the\n # data that are linked exclusively to rembranch - it is these data that need\n # to be redistributed.\n\n set_rembranch = set(rembranch.cluster_indices)\n set_rembranch_descendants = set(rembranch.descendants[0].cluster_indices)\n lendescendants = len(rembranch.descendants)\n for j in range(1, lendescendants):\n set_rembranch_descendants = set_rembranch_descendants | set(rembranch.descendants[j].cluster_indices)\n\n # The difference between these sets are the data points that are unique to\n # the branch we are trying to get rid of - merge these with cluster, after\n # which we can delete the branch.\n branch_indices = list(set_rembranch-set_rembranch_descendants)\n branch_indices = np.array(branch_indices)\n branch_indices = branch_indices[np.where(branch_indices != rembranch.cluster_idx)]\n if np.size(branch_indices) != 0.0:\n for i in range(len(branch_indices)):\n idx = np.squeeze(np.where(rembranch.cluster_indices == branch_indices[i]))#\n if np.size(idx)==1:\n cluster = merge_data(cluster, rembranch.cluster_members[idx], rembranch.cluster_indices[idx], data)\n else:\n for j in range(np.size(idx)):\n cluster = merge_data(cluster, rembranch.cluster_members[idx[j]], rembranch.cluster_indices[idx[j]], data)\n\n # Remove the branch and set corresponding data in cluster_arr to the cluster\n # idx.\n self.clusters.pop(rembranch.cluster_idx)\n idx = np.squeeze(np.where(self.cluster_arr[1,:] == rembranch.cluster_idx))\n if np.size(idx) != 0.0:\n if np.size(idx) == 1.0:\n self.cluster_arr[1, idx] = cluster.cluster_idx\n else:\n for j in range(np.size(idx)):\n self.cluster_arr[1, idx[j]] = cluster.cluster_idx\n\n # reset the antecedent/antecessor of the rembranch descendants\n for descendant in rembranch.descendants:\n descendant.reset_antecedent()\n descendant.reset_antecessor()\n\n return",
"def _cache_good_incumbents(self, incumbents_df: pd.DataFrame):\r\n\r\n if self.optimizer_config.num_cached_good_params == 0:\r\n return\r\n\r\n incumbents_df = incumbents_df[self.parameter_dimension_names]\r\n unprojected_incumbents_df = self.parameter_adapter.unproject_dataframe(incumbents_df, in_place=False)\r\n\r\n if self._good_configs_from_the_past_invocations_df is None:\r\n self._good_configs_from_the_past_invocations_df = unprojected_incumbents_df\r\n else:\r\n self._good_configs_from_the_past_invocations_df = pd.concat([self._good_configs_from_the_past_invocations_df, unprojected_incumbents_df])\r\n\r\n if len(self._good_configs_from_the_past_invocations_df.index) > self.optimizer_config.num_cached_good_params:\r\n self._good_configs_from_the_past_invocations_df = self._good_configs_from_the_past_invocations_df.sample(\r\n n=self.optimizer_config.num_cached_good_params,\r\n replace=False\r\n )",
"def branching_factor(self,b_dict = {}):\n \t\tfor head in self.deps:\n \t\t\tb_factor = len(self.deps[head])\n \t\t\tb_dict[b_factor] = b_dict.get(b_factor,0)+1\n \t\treturn b_dict",
"def post_apply(self):\n # Release grid storage. Note: this *will not* cause deallocation, as these\n # grids are actually shared with the hook solution\n for i in self.grids.values():\n i.release_storage()\n # Release local grid storage. This *will* cause deallocation\n for i in self.local_grids.values():\n i.release_storage()\n # Dump performance data\n self.soln.get_stats()",
"def post_commit_pending(self) -> None:\n for entity_id, db_states_meta in self._pending.items():\n self._id_map[entity_id] = db_states_meta.metadata_id\n self._pending.clear()",
"def warmup_cache(self):\n self.get_whitespace_changes()\n self.get_cvsheader_changes()\n self.get_unmodified_changes()\n self.get_used_changes()\n self.get_zapped_changes()\n self.get_undecided_changes()",
"def mapper_updated(self):\n self.invalidate()\n return",
"def _empty_branch_object():\n return {LEGALCODES_KEY: []}",
"def _localSetState(self,pdict):\n self.base = pdict.pop('base')"
] | [
"0.60243195",
"0.56031704",
"0.548174",
"0.54290056",
"0.52304393",
"0.52058154",
"0.5188001",
"0.5129975",
"0.5117015",
"0.511483",
"0.51062316",
"0.5055685",
"0.5053761",
"0.50004244",
"0.49610597",
"0.4951123",
"0.49497518",
"0.4948754",
"0.49486953",
"0.49447733",
"0.49241784",
"0.4923457",
"0.4920427",
"0.49167985",
"0.4911735",
"0.49099317",
"0.49064362",
"0.49019763",
"0.49009266",
"0.48829722"
] | 0.6802069 | 0 |
Return a list of fully populated branches that overlap other fully populated branches. Caches the result because we check every file revision path for overlap, and for huge repos with thousands of nonoverlapping LW branches, just iterating through the branch list starts to waste measurable CPU time. | def _overlapping_branch_list(self):
if self._cached_overlapping_branch_list is not None:
return self._cached_overlapping_branch_list
have_overlap = set()
for outer in p4gf_branch.iter_fp_non_deleted(self.ctx.branch_dict()):
outer_lhs = P4.Map()
outer_lhs.insert(outer.view_p4map.lhs())
for inner in p4gf_branch.iter_fp_non_deleted(self.ctx.branch_dict()):
if outer == inner:
continue
overlap = P4.Map.join(outer_lhs, inner.view_p4map)
# Any non-exclusionary lines shared between branches?
for line in overlap.as_array():
if line.startswith('-') or line.startswith('"-'):
continue
# Yep. Non-exclusionary line implies overlap
have_overlap.add(outer)
have_overlap.add(inner)
break
self._cached_overlapping_branch_list = have_overlap
return self._cached_overlapping_branch_list | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _get_branches_to_merge(branch):\n branches = [(branch, branch.subfolder or '')]\n for dependency in branch.branch_dependency_ids:\n branches.append((dependency.merge_with_branch_id, dependency.merge_subfolder or ''))\n return branches[::-1]",
"def branches_full(config, args):\n for b in config.repo.branches():\n yield config.repo.branch(b.name)",
"def __branch(self):\n\n if len(np.unique(self.__data[1][self.__indexes])) <= 1:\n return []\n\n branches = []\n disc_max = -np.inf\n disc_max_col = None\n\n for col in range(self.__data[0].shape[1]):\n if col in self.__cols_exclude:\n continue\n disc = self.disc(col)\n if disc > disc_max:\n disc_max = disc\n disc_max_col = col\n\n if disc_max_col == None:\n return branches\n \n uniques = np.unique(self.__data[0][self.__indexes, disc_max_col])\n cols_exclude = [col for col in self.__cols_exclude]\n cols_exclude.append(disc_max_col)\n for unique in uniques:\n indexes = (self.__data[0][:, disc_max_col] == unique)\n indexes = np.logical_and(self.__indexes, indexes)\n rule = self.__rule(disc_max_col, unique)\n branches.append(dtree(self.__data, self.__n_groups, self.__max_depth - 1, indexes, cols_exclude, rule, self.__groups))\n \n return branches",
"def _listBranches(self):\n assert self.wc.exists('branches')\n branches = self.wc.ls('branches')\n\n # Some early release branches used a different naming scheme\n # that doesn't sort properly with new-style release names. We\n # filter those out here, along with empty lines.\n branches = [b.strip('/') for b in branches\n if MELANGE_RELEASE_RE.match(b.strip('/'))]\n\n return sorted(branches)",
"def branches(self) -> list[str]:\n _args: list[Arg] = []\n _ctx = self._select(\"branches\", _args)\n return _ctx.execute_sync(list[str])",
"def find_branches(self, commit, repo):\n ref_dict = repo.repo.refs.as_dict()\n branches = []\n for branch, branch_id in [(b, ref_dict[b]) for b in repo.branches]:\n obj = repo.repo[branch_id]\n if commit.id == obj.id:\n branches.append((branch, obj))\n return branches",
"def branches(self):\n return sorted([\n br[20:] for br in self.repo.refs.keys() if (\n br.startswith('refs/remotes/origin/') and\n br[20:] != 'HEAD'\n )\n ])",
"def _get_branches(self):\n logging.info('--- Get Branches ---')\n self.local_branches = set(self.find_branches())\n self.remote_branches = set(self.find_branches(remote=True))\n # Tags are remote branches that start with \"tags/\".\n self.tags = {\n single_branch for single_branch in self.remote_branches\n if PRX_SVNTAGS_PREFIX.match(single_branch)}",
"def __gitMergedBranchList(self):\n self.vcs.gitListTagBranch(self.project.getProjectPath(), False,\n listAll=False, merged=True)",
"def get_branches( self ):\n\n branches = [ self ]\n\n for i in range( len( self.children ) ):\n branches.extend( self.children[i].get_branches() )\n\n return branches",
"def get_branches(self, *, refs=[\"refs/heads\", \"refs/remotes\"]):\n # type: (Sequence[str]) -> List[Branch]\n stdout = self.git(\n \"for-each-ref\",\n (\n \"--format=\"\n \"%(HEAD)%00\"\n \"%(refname)%00\"\n \"%(upstream)%00\"\n \"%(upstream:remotename)%00\"\n \"%(upstream:track,nobracket)%00\"\n \"%(committerdate:unix)%00\"\n \"%(objectname)%00\"\n \"%(contents:subject)\"\n ),\n *refs\n ) # type: str\n branches = [\n branch\n for branch in (\n self._parse_branch_line(line)\n for line in filter_(stdout.splitlines())\n )\n if branch.name != \"HEAD\"\n ]\n store.update_state(self.repo_path, {\"branches\": branches})\n return branches",
"def base_branches() -> list[str]:\n branches = []\n\n default = sh(\"git rev-parse --abbrev-ref origin/HEAD\").removeprefix(\"origin/\")\n branches.append(default)\n\n releases = sh(\n \"git branch --all --sort=-committerdate --list *release/* | head -10\"\n ).splitlines()\n releases = [b.removeprefix(\"*\").strip() for b in releases]\n branches.extend(releases)\n\n return branches",
"def list_branches(self) -> PagingList[Branch]:\n return PagingList(lambda offset, limit: self._generate_branches(None, offset, limit), 128)",
"def find_branches(self, remote=False):\n arguments = ['--no-color']\n if remote:\n arguments.append('-r')\n #\n for branch in self.git.branch(*arguments).splitlines():\n branch = branch.replace('*', '').strip()\n if branch:\n yield branch\n #\n #",
"def dirty_branches(self):\n # If no course index has been set, then no branches have changed\n if self.index is None:\n return []\n\n # If there was no index in the database to start with, then all branches\n # are dirty by definition\n if self.initial_index is None:\n return list(self.index.get('versions', {}).keys())\n\n # Return branches whose ids differ between self.index and self.initial_index\n return [\n branch\n for branch, _id\n in self.index.get('versions', {}).items()\n if self.initial_index.get('versions', {}).get(branch) != _id\n ]",
"def branches(self):\r\n url = '{0}/{1}'.format(self.get_url(), 'branches')\r\n\r\n return http.Request('GET', url), parsers.parse_json",
"def list_branches(self) -> List[str]:\n self.__verify_repo_initialized()\n branches = heads.get_branch_names(self._env.branchenv)\n return branches",
"def branches(self):\r\n url = self.base_url + 'branches/'\r\n return json.loads(self.bb.load_url(url))",
"def _check_overlap(self, fe_commit):\n # +++ Avoid O(b branches * r rev) checks when\n # overlap is impossible because current branch\n # overlaps no other branch.\n if self._current_branch not in self._overlapping_branch_list():\n return\n\n for fe_file in fe_commit['files']:\n gwt_path = fe_file['path']\n depot_path = self.ctx.gwt_to_depot_path(gwt_path)\n\n for branch in self._overlapping_branch_list():\n if branch == self._current_branch:\n continue\n if not branch.intersects_depot_path(depot_path):\n continue\n\n LOG.debug(\"_check_overlap() branch {br1} <> {br2}\"\n \" gwt={gwt:<40} {dp}\\n{view}\"\n .format(\n br1 = p4gf_util.abbrev(self._current_branch.branch_id)\n , br2 = p4gf_util.abbrev(branch.branch_id)\n , gwt = gwt_path\n , dp = depot_path\n , view = \"\\n\".join(branch.view_p4map.as_array())\n ))\n\n if self._current_branch.is_new_fp_from_push or branch.is_new_fp_from_push:\n current_branch_name = self._current_branch.git_branch_name\n if self._current_branch.is_new_fp_from_push:\n current_branch_name += '(new)'\n other_branch_name = branch.git_branch_name\n if branch.is_new_fp_from_push:\n other_branch_name += '(new)'\n human_msg = (_(\n \"Perforce: Cannot commit {sha1} '{gwt_path}' to '{depot_path}'.\\n\"\n \" You are attempting to push and create a new fully populated branch\\n\"\n \" with paths which overlap another branch. Contact your admin\\n\"\n \" to configure non-conflicting destination branch paths.\\n\"\n \" Branches: '{b1}', '{b2}'\")\n .format( sha1 = p4gf_util.abbrev(fe_commit['sha1'])\n , gwt_path = gwt_path\n , depot_path = depot_path\n , b1 = current_branch_name\n , b2 = other_branch_name ))\n else:\n human_msg = (_(\n \"Cannot commit {sha1} '{gwt_path}' to '{depot_path}'.\"\n \" Paths that overlap multiple Git Fusion branches are read-only.\"\n \" Branches: '{b1}', '{b2}'\")\n .format( sha1 = p4gf_util.abbrev(fe_commit['sha1'])\n , gwt_path = gwt_path\n , depot_path = depot_path\n , b1 = self._current_branch.branch_id\n , b2 = branch.branch_id ))\n raise PreflightException(human_msg)",
"def branches(self):\r\n url = '{0}/branches/'.format(self.get_url())\r\n request = http.Request('GET', url)\r\n\r\n return request, parsers.parse_json",
"def stale_pr_branches(config, args):\n repo = config.repo\n for pr in repo.pull_requests(state=\"closed\"):\n if pr.head.repo == pr.base.repo and repo.branch(pr.head.ref):\n yield {\n \"html_url\": pr.html_url,\n \"base_branch\": pr.base.ref,\n \"head_branch\": pr.head.ref,\n }",
"def get_branches(self):\n\n # gets all branches in repository\n branches_endpoint = f'/repos/{self.repo}/branches'\n response = self._get_request(branches_endpoint)\n # guard condition\n if response.status_code != STATUS_CODE_OK:\n return None\n # deserialize\n branches_page = response.json()\n\n return [\n {\n 'name': branch['displayId']\n } for branch in branches_page['values']\n ]",
"def get_merged_prs(start_ref, end_ref):\r\n ensure_pr_fetch()\r\n start_unmerged_branches = set(\r\n branch.strip() for branch in\r\n git.branch(all=True, no_merged=start_ref).splitlines()\r\n )\r\n end_merged_branches = set(\r\n branch.strip() for branch in\r\n git.branch(all=True, merged=end_ref).splitlines()\r\n )\r\n merged_between_refs = start_unmerged_branches & end_merged_branches\r\n merged_prs = set()\r\n for branch in merged_between_refs:\r\n match = PR_BRANCH_RE.search(branch)\r\n if match:\r\n merged_prs.add(int(match.group(1)))\r\n return merged_prs",
"def branches(self):\n return self.in_lines + self.out_lines",
"def get_branch_list(request, project_id):\n if request.method == 'GET':\n project_entry = GitProjectEntry.objects.filter(id=project_id).first()\n if project_entry is None:\n return res.get_response(404, 'project not found', {})\n\n branch_entries = GitBranchEntry.objects.filter(project=project_entry)\n\n branches = []\n for entry in branch_entries:\n obj = entry.as_object()\n merge_target_entry = GitBranchMergeTargetEntry.objects.filter(\n project=project_entry,\n current_branch=entry\n ).first()\n\n if merge_target_entry is not None:\n obj['target_branch_name'] = merge_target_entry.target_branch.name\n\n branches.append(obj)\n\n return res.get_response(200, '', branches)\n\n return res.get_only_get_allowed({})",
"def __gitBranchList(self):\n self.vcs.gitListTagBranch(self.project.getProjectPath(), False)",
"def get_branches(self):\n\n # gets all branches in repository\n branches_endpoint = f'/repositories/{self.owner}/{self.repo}/refs/branches'\n filter_param = {'fields': 'values.name'}\n response = self._get_request(branches_endpoint, filter_param)\n # guard condition\n if response.status_code != STATUS_CODE_OK:\n raise BitbucketRequestSenderExc(\n f'Invalid parameter(s) in: owner: {self.owner},'\n f' repo: {self.repo}')\n # deserialize\n branches_page = response.json()\n\n return [\n {\n 'name': branch['name']\n } for branch in branches_page['values']\n ]",
"def missing_branches(self):\n upstream_tags = self.upstream_model.tags_from_semver_point(\n enums.K8S_STARTING_SEMVER\n )\n deb_branches = self.deb_model.base.branches_from_semver_point(\n enums.K8S_STARTING_SEMVER\n )\n return list(set(upstream_tags) - set(deb_branches))",
"def getmergesets (lblob,prct,areaop=min): \n sz = len(lblob)\n bmerged = [False for i in range(sz)]\n for i,blob in enumerate(lblob): blob.ID = i # make sure ID assigned\n lmergeset = [] # set of merged blobs (boxes)\n for i in range(sz):\n blob0 = lblob[i]\n for j in range(sz):\n if i == j: continue\n blob1 = lblob[j]\n # if blob0.band != blob1.band: continue # NB: this was only used when preventing frequency band crossing!! (2/18/21)\n # enough overlap between bboxes? \n if blob0.getintersection(blob1).area() >= prct * areaop(blob0.area(),blob1.area()):\n # merge them\n bmerged[i]=bmerged[j]=True\n found = False\n for k,mergeset in enumerate(lmergeset): # determine if either of these bboxes are in existing mergesets\n if i in mergeset or j in mergeset: # one of the bboxes in an existing mergeset?\n found = True\n if i not in mergeset: mergeset.add(i) # i not already there? add it in\n if j not in mergeset: mergeset.add(j) # j not already there? add it in\n if not found: # did not find either bbox in an existing mergeset? then create a new mergeset\n mergeset = set()\n mergeset.add(i)\n mergeset.add(j)\n lmergeset.append(mergeset)\n return lmergeset, bmerged",
"def _invalidate_branch_cache(self):\n self._cached_overlapping_branch_list = None"
] | [
"0.6201244",
"0.6164248",
"0.61058915",
"0.6089111",
"0.60666037",
"0.58286184",
"0.5817222",
"0.574742",
"0.57133055",
"0.570316",
"0.5672118",
"0.5664436",
"0.56305796",
"0.55732614",
"0.55578953",
"0.54982585",
"0.5476394",
"0.5456892",
"0.5438345",
"0.5427029",
"0.5400545",
"0.53685087",
"0.536286",
"0.530519",
"0.5284353",
"0.5283073",
"0.5263428",
"0.5243425",
"0.5238168",
"0.52377"
] | 0.7866057 | 0 |
If gfe_file is under Git LFS control, require that its large file content exist somewhere, either in our upload cache (it's new!) or in depot dedupe storage (already got it). | def _check_lfs(self, fe_commit, fe_file):
# Deleted files carry no LFS pointer.
if "sha1" not in fe_file:
return
# Symlinks and non-files carry no LFS pointer.
if fe_file.get("mode") not in [ FileModeStr.PLAIN
, FileModeStr.EXECUTABLE ]:
return
# Files not under Git LFS control should not carry LFS
# pointer information. While legal and permissible,
# this is usually a mistake (misconfigured Git client)
# and something most users want caught before the push
# gets into Helix.
is_tracked = self.ctx.lfs_tracker.is_tracked_git(
commit_sha1 = fe_commit["sha1"]
, gwt_path = fe_file["path"])
LOG.debug3("_check_lfs() tracked {lfs} commit {commit_sha1} gwt {gwt}"
.format( commit_sha1 = p4gf_util.abbrev(fe_commit["sha1"])
, lfs = 1 if is_tracked else 0
, gwt = fe_file["path"]
))
if not is_tracked:
lfs_row = LFSRow.from_gfe(self.ctx, fe_commit, fe_file)
if lfs_row:
raise PreflightException(
_("Push of Git LFS text pointer not tracked by LFS:"
"\ncommit {commit_sha1} path {gwt_path}")
.format( commit_sha1 = p4gf_util.abbrev(fe_commit["sha1"])
, gwt_path = fe_file["path"] ))
return
# Files under Git LFS control should carry LFS pointer
# information, but sometimes might not, and that's
# okay.
lfs_row = LFSRow.from_gfe(self.ctx, fe_commit, fe_file)
if not lfs_row:
return
# But if they DO carry LFS pointer information, that
# pointer needs to point to a valid LFS large file
# either already in Perforce or recently uploaded.
if not lfs_row.large_file_source:
LOG.error("LFS text pointer missing content.")
LOG.error("LFS commit {}".format(p4gf_util.abbrev(fe_commit["sha1"])))
LOG.error("LFS lfs oid {}".format(lfs_row.large_file_oid))
LOG.error("LFS ptr {blob_sha1} {blob_mode} {gwt}"
.format( blob_sha1 = p4gf_util.abbrev(fe_file["sha1"])
, blob_mode = p4gf_util.mode_str(fe_file["mode"])
, gwt = fe_file["path"]))
LOG.error("LFS upload {}".format(lfs_row.to_lfsfs().cache_path(self.ctx)))
LOG.error("LFS de-dupe {}".format(lfs_row.to_lfsfs().depot_path(self.ctx)))
raise PreflightException(_("Push of Git LFS text pointer missing content:"
"\ncommit {commit_sha1} path {gwt_path}")
.format( commit_sha1 = p4gf_util.abbrev(fe_commit["sha1"])
, gwt_path = fe_file["path"] ))
# We have an acceptable LFS text pointer.
# Remember it for later.
self.lfs_row_list.append(lfs_row) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_file_managed_keep_source_false_http(\n file, tmp_path, remote_grail_scene33, modules\n):\n name = str(tmp_path / \"testfile\")\n # Run the state\n ret = file.managed(\n name=name,\n source=remote_grail_scene33.url,\n source_hash=remote_grail_scene33.hash,\n keep_source=False,\n )\n assert ret.result is True\n\n # Now make sure that the file is not cached\n ret = modules.cp.is_cached(remote_grail_scene33.url)\n assert not ret, \"File is still cached at {}\".format(ret)",
"def test_file_managed_http_source_no_hash(file, tmp_path, remote_grail_scene33):\n name = str(tmp_path / \"testfile\")\n ret = file.managed(name=name, source=remote_grail_scene33.url, skip_verify=False)\n # This should fail because no hash was provided\n assert ret.result is False",
"def check_needs_upload(self, path):\n if self.upload_always:\n return True\n fn = '/'.join([self.hdfs_home, '.knitDeps', os.path.basename(path)])\n if self.hdfs and self.hdfs.exists(fn):\n st = os.stat(path)\n size = st.st_size\n t = st.st_mtime\n info = self.hdfs.info(fn)\n if info['size'] == size and t < info['last_mod']:\n return False\n else:\n return True\n else:\n return True",
"def check_file(self, path, approve_if_no_dbhash=False):\r\n if self.mod.filehash:\r\n h = create_filehash(path)\r\n return h == self.mod.filehash\r\n return approve_if_no_dbhash",
"def is_too_large(usr_file: str) -> bool:\n if usr_file.file_size >= MAX_FILESIZE_DOWNLOAD:\n return True\n else:\n return False",
"def test_managed_local_source_with_source_hash(\n file, tmp_path, grail_scene33_file, grail_scene33_file_hash, proto, dest_file_exists\n):\n name = tmp_path / \"local_source_with_source_hash\"\n\n if dest_file_exists:\n name.touch()\n\n # Test with wrong hash\n bad_hash = grail_scene33_file_hash[::-1]\n\n ret = file.managed(\n name=str(name),\n source=proto + str(grail_scene33_file),\n source_hash=\"sha256={}\".format(bad_hash),\n )\n assert ret.result is False\n assert not ret.changes\n assert \"does not match actual checksum\" in ret.comment\n\n # Now with the right hash\n ret = file.managed(\n name=str(name),\n source=proto + str(grail_scene33_file),\n source_hash=\"sha256={}\".format(grail_scene33_file_hash),\n )\n assert ret.result is True",
"def allowed_injected_file_path_bytes(context):\n return FLAGS.quota_max_injected_file_path_bytes",
"def file_populated(filepath):\n\n return file_exists(filepath) and os.stat(filepath).st_size > 0",
"def should_fix_git_symlinked():\n if sys.platform == 'win32':\n path = (os.path.dirname(__file__) +\n r'\\data\\samplesite\\stories\\theming.rst')\n try:\n if os.path.getsize(path) < 200:\n return True\n except Exception:\n pass\n return False",
"def test_file_managed_http_source_skip_verify(file, tmp_path, remote_grail_scene33):\n name = str(tmp_path / \"testfile\")\n ret = file.managed(name=name, source=remote_grail_scene33.url, skip_verify=True)\n assert ret.result is True",
"def check_file(filename, force, expected_file_size=1):\n if os.path.exists(filename):\n if force or os.path.getsize(filename) < expected_file_size:\n logger.debug(\" .. Removing old file '%s'.\", filename)\n os.remove(filename)\n return False\n else:\n return True\n return False",
"def _check_content_length(r: requests.Response):\n content_length = r.headers.get('Content-Length')\n if content_length is None:\n logger.debug('Cannot check length before downloading file')\n return\n\n if int(content_length) > MAX_DOWNLOAD_BYTES:\n raise FetchFileTooBigError(\n 'File length is {} bytes'.format(content_length)\n )",
"def validate_file(fpath, file_hash, algorithm='auto', chunk_size=65535):\n if (algorithm == 'sha256') or (algorithm == 'auto' and len(file_hash) == 64):\n hasher = 'sha256'\n else:\n hasher = 'md5'\n\n if str(_hash_file(fpath, hasher, chunk_size)) == str(file_hash):\n return True\n else:\n return False",
"def testFetchGs(self):\n # pylint: disable=unused-argument\n def _Fetch(_ctx, cmd, capture_output):\n # Touch file we tried to copy too.\n osutils.Touch(cmd[-1])\n\n self.gs_mock.AddCmdResult(\n ['cp', '-v', '--', partial_mock.Ignore(), partial_mock.Ignore()],\n side_effect=_Fetch)\n\n key = ('gs',)\n url = 'gs://some.site.localdomain/file_go_boom'\n with self.cache.Lookup(key) as ref:\n self.assertFalse(ref.Exists())\n ref.Assign(url)\n self.assertTrue(ref.Exists())",
"def test_file_integrity_remove_file_in_case_of_fail():\n test_file = open('./testfile.tmp', 'a')\n test_file.close()\n test_file_path = os.path.realpath('./testfile.tmp')\n test_file_md5 = hashlib.md5(open(test_file_path, 'rb').read()).hexdigest()\n\n bad_md5 = 'some_noise_%s' % test_file_md5\n\n PackageDownloadHelper.check_file_integrity(test_file_path, bad_md5)\n\n assert not os.path.isfile(test_file_path)",
"def test_file_managed_http_source(file, tmp_path, remote_grail_scene33):\n name = str(tmp_path / \"testfile\")\n ret = file.managed(\n name=name,\n source=remote_grail_scene33.url,\n source_hash=remote_grail_scene33.hash,\n skip_verify=False,\n )\n assert ret.result is True",
"def test_file_integrity_return_error_in_case_of_bad_md5():\n test_file = open('./testfile.tmp', 'a')\n test_file.close()\n\n test_file_path = os.path.realpath('./testfile.tmp')\n test_file_md5 = hashlib.md5(open(test_file_path, 'rb').read()).hexdigest()\n\n bad_md5 = 'some_noise_%s' % test_file_md5\n\n result = PackageDownloadHelper.check_file_integrity(test_file_path, bad_md5)\n\n assert isinstance(result, ApiResponse)",
"def has_file(\n path_or_repo: Union[str, os.PathLike],\n filename: str,\n revision: Optional[str] = None,\n proxies: Optional[Dict[str, str]] = None,\n token: Optional[Union[bool, str]] = None,\n **deprecated_kwargs,\n):\n use_auth_token = deprecated_kwargs.pop(\"use_auth_token\", None)\n if use_auth_token is not None:\n warnings.warn(\n \"The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers.\", FutureWarning\n )\n if token is not None:\n raise ValueError(\"`token` and `use_auth_token` are both specified. Please set only the argument `token`.\")\n token = use_auth_token\n\n if os.path.isdir(path_or_repo):\n return os.path.isfile(os.path.join(path_or_repo, filename))\n\n url = hf_hub_url(path_or_repo, filename=filename, revision=revision)\n headers = build_hf_headers(token=token, user_agent=http_user_agent())\n\n r = requests.head(url, headers=headers, allow_redirects=False, proxies=proxies, timeout=10)\n try:\n hf_raise_for_status(r)\n return True\n except GatedRepoError as e:\n logger.error(e)\n raise EnvironmentError(\n f\"{path_or_repo} is a gated repository. Make sure to request access at \"\n f\"https://huggingface.co/{path_or_repo} and pass a token having permission to this repo either by \"\n \"logging in with `huggingface-cli login` or by passing `token=<your_token>`.\"\n ) from e\n except RepositoryNotFoundError as e:\n logger.error(e)\n raise EnvironmentError(f\"{path_or_repo} is not a local folder or a valid repository name on 'https://hf.co'.\")\n except RevisionNotFoundError as e:\n logger.error(e)\n raise EnvironmentError(\n f\"{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for this \"\n f\"model name. Check the model page at 'https://huggingface.co/{path_or_repo}' for available revisions.\"\n )\n except requests.HTTPError:\n # We return false for EntryNotFoundError (logical) as well as any connection error.\n return False",
"def validate_file(fpath, file_hash, algorithm='auto', chunk_size=65535):\n if ((algorithm is 'sha256') or\n (algorithm is 'auto' and len(file_hash) is 64)):\n hasher = 'sha256'\n else:\n hasher = 'md5'\n\n if str(_hash_file(fpath, hasher, chunk_size)) == str(file_hash):\n return True\n else:\n return False",
"def is_hash_locally_cached(self, ipfs_hash: str, ipfs_refs_local=None) -> bool:\n output = run([\"ipfs\", \"files\", \"stat\", \"--with-local\", \"--size\", f\"/ipfs/{ipfs_hash}\"])\n if \"(100.00%)\" in output:\n log(\"already fully cached\", \"green\")\n log(output)\n return True\n else:\n log(\"not fully cached\", \"red\")\n log(output)\n return False",
"def test_6_1_8_etc_gshadow_isfile(host):\n assert host.file(ETC_GSHADOW).is_file",
"def test_is_not_google_file(self):\r\n bad_file = StringIO.StringIO()\r\n bad_file.write('failing tests please')",
"def check_large_file(self, **kw):\n\n\tif not 'define_name' in kw:\n\t\tkw['define_name'] = 'HAVE_LARGEFILE'\n\tif not 'execute' in kw:\n\t\tkw['execute'] = True\n\n\tif not 'features' in kw:\n\t\tif self.env.CXX:\n\t\t\tkw['features'] = ['cxx', 'cxxprogram']\n\t\telse:\n\t\t\tkw['features'] = ['c', 'cprogram']\n\n\tkw['fragment'] = LARGE_FRAGMENT\n\tkw['msg'] = 'Checking for large file support'\n\ttry:\n\t\tself.check(**kw)\n\texcept self.errors.ConfigurationError:\n\t\tpass\n\telse:\n\t\treturn True\n\n\tkw['msg'] = 'Checking for -D_FILE_OFFSET_BITS=64'\n\tkw['defines'] = ['_FILE_OFFSET_BITS=64']\n\ttry:\n\t\tself.check(**kw)\n\texcept self.errors.ConfigurationError:\n\t\tpass\n\telse:\n\t\tself.define('_FILE_OFFSET_BITS', 64)\n\t\treturn True\n\n\tself.fatal('There is no support for large files')",
"def _verfiy_upload(self, file_local_path, file_id):\n local_sha1 = hash_utils.calc_file_sha1_hex_str(file_local_path)\n metadata = self._get_file_metadata(file_id)\n\n if 'sha1Hash' in metadata['file']['hashes']:\n if metadata['file']['hashes']['sha1Hash'].lower() != local_sha1:\n # Hashes don't match, delete the file on the server\n self.delete_item_by_id(file_id)\n logger.error('Checksums after upload of file {} to OneDrive didn\\'t match, '\n 'deleted the file on the server.'.format(file_local_path))\n return False\n\n return True",
"def test_get_path_not_exist(self):\n\n expected = False\n actual = PyFunceble.path.isfile(self.file)\n self.assertEqual(expected, actual)\n\n expected = None\n actual = Hash(self.file).get()\n self.assertEqual(expected, actual)",
"def test_source_package_exists(self):\n response = self.client.head(\n f'/filemanager/api/{self.upload_id}/content',\n headers={'Authorization': self.token}\n )\n self.assertEqual(response.status_code, status.OK)",
"def file_present(self,imagefile=None):\n import hashlib\n if self.filesize()==0:\n return False # empty files are never present\n if imagefile==None:\n imagefile=self.imagefile # use this one\n for hashname in ['md5','sha1']:\n oldhash = self.tag(hashname)\n if oldhash:\n newhash = hashlib.new(hashname,self.contents(imagefile=imagefile)).hexdigest()\n return oldhash==newhash\n raise ValueError,\"Cannot process file \"+self.filename()+\": no hash in \"+str(self)",
"def validate_file(fpath, file_hash, algorithm='auto', chunk_size=65535):\n hasher = _resolve_hasher(algorithm, file_hash)\n\n if str(_hash_file(fpath, hasher, chunk_size)) == str(file_hash):\n return True\n else:\n return False",
"def upload_content(self, input_file_name, input_content, input_file_desc=\"\", input_content_type=DEFAULT_CTNT_TYPE_JSON, input_public_flag=True):\n\n # get gist list and generate gist file table\n list_gists_obj = self.list_gists()\n if list_gists_obj:\n gist_file_table_dict = self.generate_gist_file_table(list_gists_obj)\n else:\n logger.error(\n \"Cannot get gist list of user [%s], skip upload file to avoid file name duplicate!\" % self.user_name)\n return None\n\n # create new gist if file not exist, update gist if file exists\n if input_file_name in gist_file_table_dict:\n if GISTUtil.DEFAULT_GIST_MAX_LIMIT_FLAG and gist_file_table_dict[input_file_name][\"revision_count\"] >= GISTUtil.DEFAULT_GIST_MAX_LIMIT_COMMITS:\n delete_response_obj = self.delete_gist(gist_file_table_dict[input_file_name][\"id\"])\n query_single_gist_obj = self.list_single_gist(gist_file_table_dict[input_file_name][\"id\"])\n if delete_response_obj and not query_single_gist_obj:\n response_gist_obj = self.create_new_gist(input_file_name, input_content, input_file_desc,\n input_content_type,\n input_public_flag)\n else:\n response_gist_obj = self.update_existing_gist(gist_file_table_dict[input_file_name][\"id\"],\n input_file_name, input_content, input_file_desc,\n input_content_type)\n else:\n response_gist_obj = self.update_existing_gist(gist_file_table_dict[input_file_name][\"id\"],\n input_file_name, input_content, input_file_desc,\n input_content_type)\n else:\n response_gist_obj = self.create_new_gist(input_file_name, input_content, input_file_desc, input_content_type,\n input_public_flag)\n\n # get raw url of upload file\n if response_gist_obj:\n tmp_file_list = response_gist_obj.json().get(\"files\", {}).values()\n if len(tmp_file_list) == 1:\n file_download_url = tmp_file_list[0].get(\"raw_url\", None)\n else:\n logger.error(\"Gist upload failed, return obj format incorrect [%s]\" % response_gist_obj.json())\n return None\n return file_download_url\n else:\n return None",
"def test_local_uploader_upload_wrong_file(self, mock):\r\n mock.save.return_value = None\r\n u = LocalUploader()\r\n file = FileStorage(filename='test.txt')\r\n res = u.upload_file(file, container='user_3')\r\n err_msg = (\"Upload file should return False, \\\r\n as this extension is not allowed\")\r\n assert res is False, err_msg"
] | [
"0.5939489",
"0.5912179",
"0.58820075",
"0.5847848",
"0.58317304",
"0.5623303",
"0.5542459",
"0.5534037",
"0.5512348",
"0.5486995",
"0.54663646",
"0.54334575",
"0.5396594",
"0.5396052",
"0.5381335",
"0.53738886",
"0.53469056",
"0.5326318",
"0.530705",
"0.530241",
"0.52834505",
"0.52727556",
"0.5272555",
"0.5270352",
"0.52681243",
"0.5263224",
"0.52626044",
"0.52568734",
"0.5249778",
"0.5247285"
] | 0.6616472 | 0 |
Init view map for client. | def init_view(self):
self.view_map = self.ctx.clientmap | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def map_viewing_client():\n\n # Read configuration settings\n config = gis.get_config()\n if config.opt_gis_layout == 1:\n window = True\n else:\n window = False\n\n # @ToDo Make Configurable\n toolbar = True\n\n map = define_map(window=window, toolbar=toolbar, config=config)\n\n response.title = T(\"Map Viewing Client\")\n return dict(map=map)",
"def __init__(self, maps):\n self._maps = maps",
"def draw_map(self):\n self.vis.draw_map()",
"def create_map(self):\n self.map = MapContainer(\n parent=self,\n style={\n 'top': self.margin[0],\n 'right': self.margin[1],\n 'bottom': self.margin[2],\n 'left': self.margin[3],\n 'aspect': 1.0,\n 'align': 'center',\n 'vertical-align': 'center' \n },\n map_size=self.map_size\n )\n self.add_node(self.map)",
"def _set_folium_map(self):",
"def setupMap(self) :\n\t\tself.Dmap = OnscreenImage(image = 'models/mapTopView.png', \\\n\t\t\t\t\t #pos = (.8,0,.6), scale = .4)\n\t\t\t\t\t pos = (0.8,0,0.6), scale = .4)\n\t\tself.Dmap.setTransparency(TransparencyAttrib.MAlpha)\n\t\tself.dot = OnscreenImage(image = 'models/dot.png', \\\n\t\t\t\t\t pos = (1,0,1), scale = .01)\n\n\t\t# Set the dot's position in the 2d map\n\t\t#self.dot.setPos(0,0,0)\n#\t\t 0.0+self.Dmap.getX(),0, \\\n#\t\t 0.0+self.Dmap.getY())\n\t#\t self.avatarNP.getX()/(self.modelSizeX+0.0+self.Dmap.getX()),0, \\\n\t#\t self.avatarNP.getY()/(self.modelSizeY+0.0+self.Dmap.getY()))\n\t\tself.dot.setPos( \\\n\t\t (self.avatarNP.getX()/(self.modelSizeX))*0.79+0.4, 0, \\\n\t\t (self.avatarNP.getY()/(self.modelSizeY))*0.79+0.21)\n\t\tself.dotOrigin = self.dot.getPos()",
"def __init__(self):\n self._map = {}",
"def __init__(self):\n self.map = {}",
"def __init__(self, frame, mapWindow, Map):\n self.map = Map\n self.frame = frame\n self.mapWindow = mapWindow\n self.toolbar = None\n self.layerName = {}",
"def initView(self):\n return {}",
"def __init__(self, map_state):\n self.map_state = map_state\n self.image = map_prepare.GFX[\"misc\"][\"interface\"]\n self.make_widgets()",
"def _set_folium_map(self):\n m = Map(features=[self], width=self._width, height=self._height)\n self._folium_map = m.draw()",
"def new_map(self):\n self.wizard = NewMap(self)",
"def generate_map(\n self, console: Console, size: Size, viewport: Region, scroll: Offset\n ) -> LayoutMap:",
"def new_map(self):\n self.map = Map()\n self.player.roomId = 0\n return self.map",
"def SetupView(self):\r\n size = self.GetClientSizeTuple()\r\n height = self.maxtop - self.maxbottom\r\n width = self.maxright - self.maxleft\r\n \r\n #The ratio of the width to the height in the client-area\r\n screenratio = float(size[0]) / float(size[1])\r\n \r\n #The ratio of the world window. Because of divide-by-0, we have to make a special-case assignment\r\n if height == 0 or width == 0:\r\n ratio = screenratio\r\n else:\r\n ratio = width / height\r\n\r\n #Should seem familiar, since we did it in class...\r\n if ratio > screenratio:\r\n glViewport(0, (size[1] - (size[0] / ratio)) / 2, size[0], size[0] / ratio)\r\n if ratio < screenratio:\r\n glViewport((size[0] - size[1] * ratio) / 2, 0, size[1] * ratio, size[1])\r\n \r\n \r\n #I need to find an appropriate border value. It's scaled by the client-area because the world-window zooms, thus skewing any normal border given.\r\n if width == 0 or height == 0:\r\n xborder = 1\r\n yborder = 1\r\n else:\r\n xscale = size[0] / width\r\n xborder = 10 / xscale\r\n yscale = size[1] / height\r\n yborder = 10 / yscale\r\n \r\n glMatrixMode(GL_PROJECTION)\r\n glLoadIdentity()\r\n gluOrtho2D(self.maxleft - xborder, self.maxright + xborder, self.maxbottom - yborder, self.maxtop + yborder)",
"def __init__(self,\n coupling_map,\n initial_layout=None):\n super().__init__()\n self.coupling_map = coupling_map\n self.initial_layout = initial_layout",
"def setUp(self):\n self.response = self.client.get('/map/')",
"def __init__(self, island_map):\n self.island_map = island_map\n self.landscape_dict = {'M': Mountain,\n 'O': Ocean,\n 'J': Jungle,\n 'S': Savannah,\n 'D': Desert}",
"def basic_map(proj):\n fig = plt.figure(figsize=(15, 10))\n add_metpy_logo(fig, 0, 80, size='large')\n view = fig.add_axes([0, 0, 1, 1], projection=proj)\n view.set_extent([-120, -70, 20, 50])\n view.add_feature(cfeature.STATES.with_scale('50m'))\n view.add_feature(cfeature.OCEAN)\n view.add_feature(cfeature.COASTLINE)\n view.add_feature(cfeature.BORDERS, linestyle=':')\n return fig, view",
"def build_maps():\n return render_template(\"maps.html\")",
"def show_map(self):\n self.m1.display()",
"def from_map_view(cls, map_view, figures):\n renderers = [map_view.add_figure(figure) for figure in figures]\n return cls(renderers)",
"def map():\n\n return render_template(\"map.html\")",
"def setUp(self):\n SetUp.setUp()\n self.response = self.client.get('/map/')",
"def home(request):\n # Get list of sensors and create sensors MVLayer:\n sensors = get_all_sensors()\n features = []\n lat_list = []\n lng_list = []\n\n if sensors is not None:\n for sensor in sensors:\n lat_list.append(sensor.latitude)\n lng_list.append(sensor.longitude)\n\n sensor_feature = {\n 'type': 'Feature',\n 'geometry': {\n 'type': 'Point',\n 'coordinates': [sensor.longitude, sensor.latitude]\n },\n 'properties': {\n 'id': sensor.id,\n 'latitude': sensor.latitude,\n 'longitude': sensor.longitude\n }\n }\n features.append(sensor_feature)\n\n # Define GeoJSON FeatureCollection\n sensors_feature_collection = {\n 'type': 'FeatureCollection',\n 'crs': {\n 'type': 'name',\n 'properties': {\n 'name': 'EPSG:4326'\n }\n },\n 'features': features\n }\n\n # Create a Map View Layer\n sensors_layer = MVLayer(\n source='GeoJSON',\n options=sensors_feature_collection,\n legend_title='Sensors',\n layer_options={\n 'style': {\n 'image': {\n 'circle': {\n 'radius': 8,\n 'fill': {'color': '#d84e1f'},\n 'stroke': {'color': '#ffffff', 'width': 1},\n }\n }\n }\n },\n feature_selection=True\n )\n\n\n # Define view centered on sensor locations\n try:\n view_center = [sum(lng_list) / float(len(lng_list)), sum(lat_list) / float(len(lat_list))]\n except ZeroDivisionError:\n view_center = [-98.6, 39.8]\n\n view_options = MVView(\n projection='EPSG:4326',\n center=view_center,\n zoom=4.5,\n maxZoom=18,\n minZoom=2\n )\n\n sensor_map = MapView(\n height='100%',\n width='100%',\n layers=[sensors_layer],\n basemap='OpenStreetMap',\n view=view_options\n )\n\n context = {\n 'sensor_map': sensor_map,\n }\n\n return render(request, 'open_air/home.html', context)",
"def __init__(self):\n self.map = [None] * 103",
"def __init__(self, gameMap, initDirec=None, initBodies=None, initTypes=None):\n\t\tself._map = gameMap\n\t\tself._initDirec = initDirec\n\t\tself._initTypes = initTypes\n\t\tself._initBodies = initBodies\n\t\tself.reset(False)",
"def __init__(self, gdf_map):\n\n self.fig = plt.figure(figsize=(13,13))\n self.ax = self.fig.add_subplot(1,1,1)\n self.fontsize = 20\n\n self.city_markersize = 6\n self.city_marker = 'o'\n self.city_markercolor = 'k'\n\n self.map = gdf_map",
"def map():\n return render_template('map.html')"
] | [
"0.73483825",
"0.6393654",
"0.6328302",
"0.62651557",
"0.6215494",
"0.621435",
"0.6202144",
"0.61714244",
"0.60852766",
"0.60486645",
"0.5990444",
"0.595002",
"0.59376174",
"0.593404",
"0.5898968",
"0.58576566",
"0.5853716",
"0.5853698",
"0.5803742",
"0.5777687",
"0.576832",
"0.5748894",
"0.5746217",
"0.57306325",
"0.57243407",
"0.5702035",
"0.5701673",
"0.5695492",
"0.56954324",
"0.5694526"
] | 0.90606475 | 0 |
Run list of paths through filter and set list of paths that don't pass. | def filter_paths(self, blobs):
# check against one map for read, one for write
# if check fails, figure out if it was the view map or the protects
# that caused the problem and report accordingly
self.author_denied = []
self.pusher_denied = []
self.foruser_denied = []
self.fusion_denied = []
self.unmapped = []
c2d = P4.Map.RIGHT2LEFT
LOG.debug('filter_paths() write_filter: %s', self.write_filter)
for blob in blobs:
gwt_path = self.ctx.gwt_path(blob['path'])
topath_c = gwt_path.to_client()
topath_d = gwt_path.to_depot()
LOG.debug('filter_paths() topath_d: %s', topath_d)
# for all actions, need to check write access for dest path
result = " " # zum loggen
if topath_d and P4GF_DEPOT_OBJECTS_RE.match(topath_d):
LOG.debug('filter_paths() topath_d in //.git-fusion/objects')
continue
# do not require user write access to //.git-fusion/branches
if topath_d and P4GF_DEPOT_BRANCHES_RE.match(topath_d):
LOG.debug('filter_paths() topath_d in //.git-fusion/branches')
continue
if not self.write_filter.includes(topath_c, c2d):
if not self.view_map.includes(topath_c, c2d):
self.unmapped.append(topath_c)
result = NTR('unmapped')
elif not (self.ignore_author_perms or
self.write_protect_author.includes(topath_d)):
self.author_denied.append(topath_c)
result = NTR('author denied')
elif (self.write_protect_pusher and
not self.write_protect_pusher.includes(topath_d)):
self.pusher_denied.append(topath_c)
result = NTR('pusher denied')
elif (self.write_protect_foruser and
not self.write_protect_foruser.includes(topath_d)):
self.foruser_denied.append(topath_c)
result = NTR('foruser denied')
elif not self.write_protect_fusion.includes(topath_d):
self.fusion_denied.append(topath_c)
result = NTR('Git Fusion denied')
else:
result = "?"
LOG.error('filter_paths() {:<13} {}, {}, {}'
.format(result, blob['path'], topath_d, topath_c))
elif LOG.isEnabledFor(logging.DEBUG):
LOG.debug('filter_paths() topath_c in write_filter: %s', topath_c) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def exclude_filter(excl_filter, paths):\n misses = set()\n for p in paths:\n if re.search(excl_filter, p) is None:\n misses.add(p)\n\n return misses",
"def clean_dir_filtered(dr, filters):\n # type: (path, List[str]) -> None\n for f in os.listdir(dr):\n for fltr in filters:\n if fltr in f:\n os.remove(f)\n continue",
"def remove_paths(self, test):\n ii = 0\n while ii < len(self.paths):\n if test(self.paths[ii]):\n self.paths.pop(ii)\n else:\n ii += 1\n return self",
"def test_filter_files(self):\n expected = [\n (\"/subdir1/fichier1\", False),\n (\"/subdir1/fichier4\", False),\n (\"/subdir1/subsubdir1\", False),\n ]\n files = [\n (\"/subdir1/fichier1\", False),\n (\"/subdir2/fichier2\", False),\n (\"/subdir2/fichier3\", False),\n (\"/subdir1/fichier4\", False),\n (\"/subdir1/subsubdir1/fichier1\", False),\n (\"/subdir1/subsubdir1/\", False),\n ]\n self.assertEqual(\n list(self.path_translator.filter_files(files, \"/subdir1\")),\n expected)",
"def run_filters(incl_filters, excl_filters, paths):\n # Run include filters\n if incl_filters:\n incl_paths = set()\n for incl_filt in incl_filters:\n ps = include_filter(incl_filt, paths)\n incl_paths = incl_paths.union(ps)\n else:\n incl_paths = paths\n\n # Run exclude filters\n if excl_filters:\n for excl_filt in excl_filters:\n incl_paths = exclude_filter(excl_filt, incl_paths)\n\n return incl_paths",
"def _filter_return_url_from_list(self, paths, har=None):\r\n if not har:\r\n har = self.har\r\n \r\n matches = []\r\n for entry in har[\"log\"][\"entries\"]:\r\n for path in paths:\r\n if path in entry[\"request\"][\"url\"]:\r\n temp = entry[\"request\"][\"url\"].encode('ascii', 'ignore')\r\n matches.append(temp)\r\n return matches",
"def pclean(self):\n path_list_pruned = []\n for p in self.path_list:\n if not os.path.exists(p):\n print(\"Does not exist! \", p)\n elif p in path_list_pruned:\n print(\"Duplicate found \", p)\n else:\n p = os.path.normpath(p) # remove double slashes and stuff\n path_list_pruned.append(p)\n\n self.path_list = path_list_pruned\n self.pupdate()",
"def filter_trip_list_to_not_arrived(trip_list_df, pathset_paths_df):\n FastTripsLogger.debug(\"filter_trip_list_to_not_arrived(): trip_list_df len=%d head()=\\n%s\" % (len(trip_list_df), trip_list_df.head().to_string()))\n FastTripsLogger.debug(\"filter_trip_list_to_not_arrived(): pathset_paths_df len=%d head()=\\n%s\" % (len(pathset_paths_df), pathset_paths_df.head().to_string()))\n\n # filter to only the chosen paths\n chosen_paths_df = pathset_paths_df.loc[pathset_paths_df[Assignment.SIM_COL_PAX_CHOSEN] >= 0, [Passenger.TRIP_LIST_COLUMN_TRIP_LIST_ID_NUM, Assignment.SIM_COL_PAX_CHOSEN]]\n\n # add chosen index\n trip_list_df_to_return = pandas.merge(left =trip_list_df,\n right =chosen_paths_df,\n how =\"left\")\n # use it to filter to null chosen\n trip_list_df_to_return = trip_list_df_to_return.loc[pandas.isnull(trip_list_df_to_return[Assignment.SIM_COL_PAX_CHOSEN])]\n # remove chosen column\n trip_list_df_to_return.drop([Assignment.SIM_COL_PAX_CHOSEN], axis=1, inplace=True)\n\n FastTripsLogger.debug(\"filter_trip_list_to_not_arrived(): trip_list_df_to_return len=%d head()=\\n%s\" % (len(trip_list_df_to_return), trip_list_df_to_return.head().to_string()))\n return trip_list_df_to_return",
"def filter(self):\n self._printer('Standard Walk')\n count = Counter(length=3)\n for directory in self.directory:\n self._printer('Searching ' + directory)\n for root, directories, files in os.walk(directory, topdown=self.topdown):\n root = root[len(str(directory)) + 1:]\n self._printer(str(count.up) + \": Explored path - \" + str(root), stream=True)\n if self.filters.validate(root):\n # Check that non-empty folders flag is on and we're at the max directory level\n if self.filters.non_empty_folders and self.filters.get_level(root) == self.filters.max_level:\n # Check that the path is not an empty folder\n if os.path.isdir(directory + os.sep + root):\n # Get paths in folder without walking directory\n paths = os.listdir(directory + os.sep + root)\n\n # Check that any of the paths are files and not just directories\n if paths and any(os.path.isfile(os.path.join(directory, p)) for p in paths):\n self.add_path(directory, root)\n\n else:\n for filename in files:\n fullname = os.path.join(root, filename)\n if self.filters.validate(fullname):\n # Join the two strings in order to form the full filepath.\n self.add_path(directory, fullname)",
"def filter_paths(self, paths):\n formatted_paths = set()\n for path in paths:\n formatted_path = []\n if self.include_entity:\n if len(path) == 3:\n continue\n formatted_path.append(self.idx_to_node[path[0]].get_name())\n for rdx in range(0, (len(path)-1)/2):\n formatted_path.append(self.idx_to_relation[path[rdx*2+1]])\n formatted_path.append(self.idx_to_node[path[rdx*2+2]].get_name())\n else:\n if len(path) == 1:\n continue\n for rel_idx in path:\n formatted_path.append(self.idx_to_relation[rel_idx])\n formatted_paths.add(tuple(formatted_path))\n return formatted_paths",
"def test_FilterPaths(self):\n diff_file_chromium1_h = ['some diff']\n diff_web_tests_html = ['more diff']\n diff_presubmit = ['morer diff']\n diff_test_expectations = ['morest diff']\n mock_input_api = MockInputApi()\n mock_input_api.files = [\n MockAffectedFile('file_chromium1.h', diff_file_chromium1_h),\n MockAffectedFile(\n mock_input_api.os_path.join('web_tests', 'some_tests.html'),\n diff_web_tests_html),\n MockAffectedFile(\n mock_input_api.os_path.join('web_tests', 'TestExpectations'),\n diff_test_expectations),\n # Note that this path must have a slash, whereas most other paths\n # must have os-standard path separators.\n MockAffectedFile('blink/PRESUBMIT', diff_presubmit),\n ]\n # Access to a protected member _FilterPaths\n # pylint: disable=W0212\n filtered = PRESUBMIT._FilterPaths(mock_input_api)\n self.assertEqual(['file_chromium1.h'], filtered)",
"def without_paths(self, paths: List[Tuple[str, ...]]) -> \"Sample\":\n without_paths = copy(self)\n without_paths.kwargs = {\n key: value\n for key, value in self.kwargs.items()\n if not any(\n all(first == second for first, second in zip(key, path))\n for path in paths\n )\n }\n return without_paths",
"def filter_excluded_paths(self, filter_excluded_paths: ConfigNodePropertyArray):\n\n self._filter_excluded_paths = filter_excluded_paths",
"def empty_paths(self):\n self.paths[:]",
"def filter_files(self, path):\n excludes = r'|'.join([fnmatch.translate(x) for x in self.project.EXCLUDES]) or r'$.'\n for root, dirs, files in os.walk(path, topdown=True):\n dirs[:] = [d for d in dirs if not re.match(excludes, d)]\n dirs[:] = [os.path.join(root, d) for d in dirs]\n rel_path = os.path.relpath(root, path)\n\n paths = []\n for f in files:\n if rel_path == '.':\n file_path = f\n else:\n file_path = os.path.join(rel_path, f)\n if not re.match(excludes, file_path):\n paths.append(f)\n\n files[:] = paths\n yield root, dirs, files",
"def CleanPaths(pathlist):\n for path1 in pathlist:\n for path2 in pathlist[::-1]:\n if path2[::-1] == path1:\n pathlist.remove(path2)\n break",
"def include_filter(incl_filter, paths):\n hits = set()\n for p in paths:\n if re.search(incl_filter, p):\n hits.add(p)\n\n return hits",
"def get_only_paths(coll, pred, prefix_path=(), stop_at=None, stop_below=None):\n all_paths = get_all_paths(coll, prefix_path=prefix_path, stop_at=stop_at, stop_below=stop_below)\n return list(filter(pred, all_paths))",
"def filter(self, filters):",
"def run(self):\n # Look through the path names.\n for path in self.paths:\n\n # If the path name doesn't match, do nothing.\n if not self.regex.search(path): return\n\n # Execute the child actions.\n self.context.tokens['Path'] = path\n exitcode = super(FilterPathList, self).run()\n\n # If only looking for the first, or an error is reported,\n # bail out early.\n if self.matchfirst or exitcode != 0: return exitcode\n\n # None of the path names matched.\n return 0",
"def test_prune_ignore_list(self, ignore_list, expected_ignored_dirs,\n expected_ignored_files):\n all_dirs = []\n all_files = []\n\n for path in self.paths:\n rel_dir_root = path[\"root\"]\n dirs = path[\"dirs\"]\n files = path[\"files\"]\n self.file_scanner._prune(\n rel_dir_root, dirs, files, ignore_list=ignore_list)\n all_dirs.extend(dirs)\n all_files.extend(files)\n\n [self.assertNotIn(d, all_dirs) for d in expected_ignored_dirs]\n [self.assertNotIn(f, all_files) for f in expected_ignored_files]",
"def filter_paths(movement, paths, time_threshold):\r\n\r\n # check if all inputs are positive integers\r\n conditions_value = time_threshold <= 0\r\n if conditions_value:\r\n raise ValueError(\"Input values need to be positive\")\r\n\r\n # Variable that store paths equal to or larger than time threshold\r\n pass_paths = []\r\n\r\n # Pull out time variable\r\n T = movement['t']\r\n\r\n # Run through each path and check whether the time spending\r\n # on the path is equal to or larger than the time threshold\r\n for path in paths:\r\n start_time, end_time = T[path].ravel()\r\n if (end_time - start_time) >= time_threshold:\r\n pass_paths.append(path)\r\n\r\n return(pass_paths)",
"def filter_paths(pathnames, patterns=None, ignore_patterns=None):\n result = []\n if patterns is None:\n patterns = ['*']\n if ignore_patterns is None:\n ignore_patterns = []\n for pathname in pathnames:\n if match_patterns(pathname, patterns) and not match_patterns(pathname,\n ignore_patterns):\n result.append(pathname)\n return result",
"def optimize_path_filter():\n print(\"optimize_path_filter...\")\n if len(gCodeBlocks) == 0:\n print(\"no gcode loaded: cannot apply filter\")\n return\n block_to_filter = gCodeBlocks[-1]\n\n g01blocks = block_to_filter.g01blocks\n ng01 = len(g01blocks)\n\n print(block_to_filter)\n\n for ri in range(ng01-1):\n if ri % 10 == 0:\n print(ri, end='\\r')\n next_block_index = ri + 1\n idx_shortest = g01blocks[ri].shortestPathToStart2(g01blocks, next_block_index)\n if idx_shortest is not None:\n if idx_shortest != next_block_index:\n g01blocks[next_block_index], g01blocks[idx_shortest] = \\\n g01blocks[idx_shortest], g01blocks[next_block_index]\n\n print()\n # rearrange original lines\n block_to_filter.lines = []\n for g01block in block_to_filter.g01blocks:\n for line in g01block.lines:\n block_to_filter.lines.append(line)\n\n print(\"optimize_path_filter done.\")",
"def _filter_file_list(files, local_metadata, remote_metadata):\n def _is_tracked(filename, metadata):\n \"\"\"\n Is the filename tracked in the remote metadata dict.\n The file may be not even locally tracked yet\n \"\"\"\n current_local_sha = local_metadata.get(filename, None)\n current_remote_sha = metadata.get(filename, None)\n return current_local_sha is not None \\\n and current_remote_sha is not None \\\n and current_local_sha == current_remote_sha\n\n def _is_inside_ignored_dir(filename):\n \"\"\" Is the filename inside any of the IGNORE_DIRS list \"\"\"\n ignore_dirs = ['./' + x for x in IGNORE_DIRS]\n return any([filename.startswith(x) for x in ignore_dirs])\n\n def _has_ignored_extension(filename):\n return any([ext in IGNORE_EXTENSIONS\n for ext in filename.split('.')[1:]])\n\n files = [f for f in files\n if not _is_inside_ignored_dir(f)\n and not _has_ignored_extension(f)\n and not _is_tracked(f, remote_metadata)]\n return files",
"def filter_paths(paths : dict, spec : str) -> dict:\n all_paths = defaultdict(dict)\n for mag in paths.keys():\n specs = get_specs(mag, spec)\n paths_restructured = defaultdict(list)\n for path in paths[mag]:\n for s in specs:\n if s in path:\n paths_restructured[s].append(path)\n all_paths[mag] = paths_restructured\n return all_paths",
"def list_cleanup(self, data):\n for data_value in list(data):\n # TODO: Add DEBUG logging (?)\n for filter_key, filter_value in self.required.items():\n if filter_key in data_value.keys():\n if isinstance(filter_value, str) and self.exact_match:\n if data_value[filter_key] != filter_value:\n data.remove(data_value)\n break\n elif isinstance(filter_value, str) and (not self.exact_match):\n if data_value[filter_key] is None:\n data.remove(data_value)\n break\n if filter_value not in data_value[filter_key]:\n data.remove(data_value)\n break\n elif isinstance(filter_value, list) and self.exact_match:\n if data_value[filter_key] not in filter_value:\n data.remove(data_value)\n break\n elif isinstance(filter_value, list) and (not self.exact_match):\n if data_value[filter_key] is None:\n data.remove(data_value)\n break\n found_match = False\n for filter_value_item in filter_value:\n if filter_value_item in data_value[filter_key]:\n found_match = True\n if not found_match:\n data.remove(data_value)\n break\n else:\n self.logger.warning(msg=\"List_Cleanup: None of the cases matched. Data: %s Filter: %s\" % (data_value, self.filter))\n # TODO: Handle other possible cases\n else:\n self.logger.warning(msg=\"List_Cleanup: Filter key: %s not present in Data: %s\" % (filter_key, data_value))\n continue\n\n for data_value in list(data):\n # TODO: Add DEBUG logging (?)\n for filter_key, filter_value in self.excluded.items():\n if filter_key in data_value.keys():\n if isinstance(filter_value, str) and self.exact_match:\n if data_value[filter_key] == filter_value:\n data.remove(data_value)\n break\n elif isinstance(filter_value, str) and (not self.exact_match):\n if data_value[filter_key] is None:\n continue\n if filter_value in data_value[filter_key]:\n data.remove(data_value)\n break\n elif isinstance(filter_value, list) and self.exact_match:\n if data_value[filter_key] in filter_value:\n data.remove(data_value)\n break\n elif isinstance(filter_value, list) and (not self.exact_match):\n if data_value[filter_key] is None:\n continue\n found_match = False\n for filter_value_item in filter_value:\n if filter_value_item in data_value[filter_key]:\n found_match = True\n if found_match:\n data.remove(data_value)\n break\n else:\n self.logger.warning(msg=\"List_Cleanup: None of the cases matched. Data: %s Filter: %s\" % (data_value, self.filter))\n # TODO: Handle other possible cases\n else:\n self.logger.warning(msg=\"List_Cleanup: Filter key: %s not present in Data: %s\" % (filter_key, data_value))\n continue\n\n return data",
"def _filter_samples(sample_dirs, changed_files):\n result = []\n for sample_dir in sample_dirs:\n for changed_file in changed_files:\n if changed_file.startswith(sample_dir):\n result.append(sample_dir)\n\n return list(set(result))",
"def some_run_path(experiment_path, filters=None):\n must_be = [\"cfg.yaml\", \".__leaf\"]\n must_not_be = [\".__lock\", \".__crash\", \".__end\", \".__start\"]\n with os.scandir(experiment_path) as fit:\n for entry in fit:\n if not entry.name.startswith(\".\") and entry.is_dir():\n subexp_path = os.path.join(experiment_path, entry.name)\n with os.scandir(subexp_path) as fit2:\n for entry2 in fit2:\n if not entry2.name.startswith(\".\") and entry2.is_dir():\n run_path = os.path.join(subexp_path, entry2.name)\n done_before = False\n mandatory_files = []\n with os.scandir(run_path) as fit3:\n for entry3 in fit3:\n if entry3.name in must_not_be:\n done_before = True\n break\n if entry3.name in must_be:\n mandatory_files.append(entry3.name)\n if done_before or set(mandatory_files) != set(must_be):\n continue\n if filters and not experiment_matches(run_path, filters):\n print(f\"Skipping {run_path:s} as it was filtered out.\")\n continue\n yield run_path",
"def filter_paths(path):\n return [\"{}/{}\".format(path, f) for f in os.listdir(path) if\n f.endswith(FILE_EXTENSION_VM)]"
] | [
"0.6571253",
"0.6168529",
"0.6118564",
"0.60856956",
"0.6034449",
"0.6013353",
"0.5978687",
"0.58510804",
"0.58142954",
"0.58128434",
"0.57699585",
"0.5687041",
"0.5659206",
"0.56480026",
"0.5642498",
"0.5623489",
"0.5614679",
"0.5568684",
"0.556686",
"0.550684",
"0.5481009",
"0.547893",
"0.54509985",
"0.54235226",
"0.5411147",
"0.53807765",
"0.5379591",
"0.53677636",
"0.53603035",
"0.53562605"
] | 0.6176403 | 1 |
Print the given message to the error stream, as well as to the log. | def _print_error(msg):
sys.stderr.write(msg + '\n')
LOG.error(msg) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def error(message):\n print(message, file=sys.stderr)",
"def log_error(message):\n sys.stderr.write(message)\n sys.stderr.flush()",
"def log_error(self, message):\n u = six.text_type\n log_line = (\n u('{0:%Y-%m-%d %H:%M:%S} [FALCON] [ERROR] {1} {2}?{3} => {4}\\n').\n format(datetime.now(), self.method, self.path, self.query_string,\n message)\n )\n\n self._wsgierrors.write(log_line)",
"def _print_error(message):\n sys.stderr.write(str(message) + \"\\n\")\n sys.stderr.flush()",
"def print_error(self, message: str=\"\", src_file: str=\"\") -> None:\n if self._verbosity_level >= int(VerbosityLevel.VERBOSITY_LEVEL1):\n _mes = src_file + \": \" + message\n if self._print_statements_enabled:\n print(\"ERROR \\t\\t- \", src_file + \": \\t\" + message)\n logging.error(_mes)",
"def error(msg):\n if logger.level <= logging.ERROR:\n print('\\n~ ' + msg)\n logger.info(msg)",
"def errorPrint(msg, file=errorOutput):\n global errorLogger\n # print(\"----------------> errorLogger=%s\" % str(errorLogger))\n if errorLogger is not None:\n errorLogger.error(msg)\n else:\n taggedPrint(\"ERROR\", msg, file=file)",
"def err(*message, **kwargs):\n print(*message, file=sys.stderr, **kwargs)",
"def msg_err(message):\n to_stdout(\" !!! {message}\".format(message=message), colorf=red, bold=True)\n if _logger:\n _logger.error(message)",
"def print_stderr(message):\r\n if LogOptions.stderr_log_level() != LogOptions.LOG_LEVEL_NONE:\r\n print(message, file=sys.stderr)",
"def print_error(msg):\n print(\"[{}] {}\".format(datetime.now(), msg), file=sys.stderr)",
"def log_error(self, msg):\n self.log(msg, level=LOG_ERROR)",
"def log_error(self, msg):\n self.logger.error(msg)",
"def logerror(self, msg):\n self.logger.error(msg)",
"def logerror(msg):\n sys.stderr.write(str(msg) + '\\n')\n sys.stderr.flush()",
"def log_error(err):\n print(err)",
"def error(error_message: str):\n logger.error(error_message)",
"def err(message):\n\n timestamp = format_time(get_time())\n message = '{} - [ERROR] - {}'.format(timestamp, message)\n _log_status(message)",
"def log_error(self, message):\n # log the datetime+message to error_log.txt\n curr_time = datetime.datetime.now().strftime(\"%H:%M:%S \"\n \"%Y-%m-%d\")\n with open(ERROR_FILE_PATH, \"a+\") as error_file:\n error_file.write(\"{} $ {}\\n\".format(curr_time, message))",
"def print_err(msg):\n print(msg, file=sys.stderr)",
"def log_err(msg):\n msg = 'ERROR: {0}\\n'.format(msg)\n sys.stderr.write(msg)",
"def error(self, msg, stderr=True):\n self.log(msg, level=self.ERROR, stderr=stderr)",
"def error(self, message: str):\n self.log(Level.ERROR, message)",
"def error():\n logging.error(\"ERROR\")\n print('ERROR')",
"def error(cls, message):\n print('[ERROR] {0}'.format(message))",
"def log_error(self, message):\n self.logger.error(RED_RESET.format(thing=message))\n return",
"def error(message):\n print str(message)",
"def error(message):\n global LAST_LOG\n LAST_LOG = message\n cprint('\\r[ERR] {0}'.format(message), 'red', file=sys.stderr)",
"def printerr(msg):\n print(msg, file=sys.stderr)",
"def print_error_message(message):\r\n return print('ERROR:',message)"
] | [
"0.7898539",
"0.7735587",
"0.76756036",
"0.75126797",
"0.74449044",
"0.7419306",
"0.7378443",
"0.7376303",
"0.73306",
"0.73223484",
"0.7314951",
"0.7312553",
"0.72963977",
"0.7293763",
"0.72898746",
"0.7254633",
"0.7225247",
"0.7214274",
"0.72096974",
"0.7171879",
"0.7138568",
"0.7101563",
"0.7088874",
"0.7072581",
"0.7065365",
"0.7064435",
"0.7033326",
"0.70261234",
"0.7013357",
"0.6983994"
] | 0.7820772 | 1 |
Check if c will be rejected by P4D as nonprintable. P4D rejects "nonprintable" characters with | def is_p4d_printable(c):
if ord(c) < 0x20:
return False
if ord(c) == 0x7F:
return False
return True | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def is_printable(c):\n return ord(c)>=32 or c in ['\\r','\\n', '\\t']",
"def is_printable(s):\n for c in s:\n if c not in PRINTABLE_CHARACTERS:\n return False\n return True",
"def is_printable(b):\n return b in e(string.printable)",
"def is_string_printable(string_):\n return set(string_) - set(string.printable)",
"def ascii_printable(s: str) -> bool:\n return frozenset(s).issubset(_ascii_pa)",
"def __contains_nonascii_characters(string):\n for c in string:\n if not ord(c) < 128:\n return True\n return False",
"def test_contains_nonprintable_characters(self):\n result = attributeAsLDIF(b\"key\", b\"val\\xFFue\")\n self.assertEqual(result, b\"key:: %s\\n\" % encode(b\"val\\xFFue\"))",
"def _has_non_ascii_characters(data_string):\r\n try:\r\n data_string.encode('ascii')\r\n except UnicodeEncodeError:\r\n return True\r\n\r\n return False",
"def _validate_ascii(message):\n return all(ord(c) < 128 for c in message)",
"def RemoveNonUtf8BadChars(line):\n return \"\".join([ch for ch in line if ch in printable])",
"def check_ascii_compliance(plaintext: bytes) -> bool:\n return all(c < 128 for c in plaintext)",
"def keep_chr(char):\n return (unicodedata.category(char).startswith('P') and\n (char != \"#\" and char != \"@\" and char != \"&\"))",
"def replace_nonprintables(string):\n\tnew_string = \"\"\n\tmodified = 0\n\tfor c in string:\n\t\to = ord(c)\n\t\tif (o <= 31):\n\t\t\tnew_string += \"^\" + chr(ord('@') + o)\n\t\t\tmodified += 1\n\t\telif (o == 127):\n\t\t\tnew_string += \"^?\"\n\t\t\tmodified += 1\n\t\telse:\n\t\t\tnew_string += c\n\tif modified and Config.Config().urlencoding_mode != \"fixbucket\":\n\t\twarning(\"%d non-printable characters replaced in: %s\" % (modified, new_string))\n\treturn new_string",
"def non_secret_char(c):\n return c",
"def is_ascii(token):\n\n printable = set(string.printable)\n\n for char in token:\n if char not in printable:\n return False\n\n return True",
"def string_to_onlyascii(string):\n valids = [item for item in string if item.isascii() and item.isprintable()]\n return \"\".join(valids)",
"def test_value_special_chars(self):\n raw = [\n 0x48,\n 0x65,\n 0x79,\n 0x21,\n 0x3F,\n 0x24,\n 0x20,\n 0xC4,\n 0xD6,\n 0xDC,\n 0xE4,\n 0xF6,\n 0xFC,\n 0xDF,\n ]\n string = \"Hey!?$ ÄÖÜäöüß\"\n self.assertEqual(DPTString.to_knx(string), raw)\n self.assertEqual(DPTString.from_knx(raw), string)",
"def has_invalid_characters(filen=None,text=None):\n if filen is not None:\n with open(filen,'r') as fp:\n for line in fp:\n for c in set(line.replace('\\n','').replace('\\t','')):\n if ord(c) > 127 or ord(c) < 32:\n return True\n else:\n for c in set(text.replace('\\n','').replace('\\t','')):\n if ord(c) > 127 or ord(c) < 32:\n return True\n return False",
"def what_in_string(printable_string):\n if SCCS_ID in printable_string:\n content = re.sub(r\"^.*\" + re.escape(SCCS_ID), \"\", printable_string)\n content = re.sub(r'(\"|>|\\n|\\\\).*', \"\", content)\n if parameters[\"No formatting\"]:\n print(content)\n else:\n print(\"\\t\" + content)\n\n return True\n\n return False",
"def _maybe_show_implicit_non_ascii_error(self, node):\n if six.PY3:\n return\n if not isinstance(node.s, bytes):\n return\n if not any(ord(c) > 127 for c in node.s):\n return\n if any(\n self.filename.endswith(suffix)\n for suffix in self.config.IGNORED_FILES_FOR_EXPLICIT_STRING_LITERALS\n ):\n return\n # for multiline strings, the lineno is the last line and the col_offset is -1\n # there appears to be no simple way to get to the beginning of the string, and therefore no\n # way to determine whether there is a b prefix, so just ignore these strings\n if node.col_offset == -1:\n return\n line = self._lines()[node.lineno - 1]\n char = line[node.col_offset]\n if char in (\"b\", \"u\"):\n return\n self._show_error_if_checking(\n node,\n \"string containing non-ASCII characters should be explicitly marked as bytes or \"\n \"unicode\",\n error_code=ErrorCode.implicit_non_ascii_string,\n )",
"def test_removeIllegalCharacters(self):\n data = \"Contains\\x03 control\\x06 characters\\x12 some\\x0a\\x09allowed\\x0d\"\n after, changed = removeIllegalCharacters(data)\n self.assertEquals(after, \"Contains control characters some\\x0a\\x09allowed\\x0d\")\n self.assertTrue(changed)\n\n data = \"Contains\\x09only\\x0a legal\\x0d\"\n after, changed = removeIllegalCharacters(data)\n self.assertEquals(after, \"Contains\\x09only\\x0a legal\\x0d\")\n self.assertFalse(changed)",
"def printable(a):\n\treturn \"\".join([\n\t\tchr(c).isprintable() and chr(c) or \"\\\\x{0:02x}\".format(c)\n\t\tfor c in a\n\t])",
"def isPrintableKey(event_string):\n\n if event_string == \"space\":\n reply = True\n else:\n unicodeString = event_string.decode(\"UTF-8\")\n reply = (len(unicodeString) == 1) \\\n and (unicodeString.isalnum() or unicodeString.isspace()\n or unicodedata.category(unicodeString)[0] in ('P', 'S'))\n debug.println(debug.LEVEL_FINEST,\n \"orca.isPrintableKey: returning: %s\" % reply)\n return reply",
"def has_invalid_characters(self):\n return has_invalid_characters(text=self._sample_sheet.show())",
"def only_silence(string):\n for character in string:\n if not character in ('', ' ', '\\t', '\\n'):\n return False\n return True",
"def no_bad_uni_chars(x, _bad_chars=bad_uni_chars):\n return not any(y in _bad_chars for y in x)",
"def validate(data, badchars):\n assert(all(b not in data for b in badchars))",
"def search_bad_chars() -> str:\n\n lines = get_input()\n bad_chars = \"\\\\\"+hex(0) # x00 is always a badchar\n \n for i in range(1,255,8):\n for i in range(i,i+7):\n lines[i] = int(lines[i],16)\n if(hex(i) != hex(lines[i])):\n bad_chars += \"\\\\\"+hex(i)\n \n print(\"Found these bad characters:\",bad_chars)\n\n return bad_chars",
"def test_bad_chars_from_threshold(self):\r\n exp1 = [\r\n '\\t',\r\n '\\n',\r\n '\\r',\r\n ' ',\r\n '!',\r\n '\"',\r\n '#',\r\n '$',\r\n '%',\r\n '&',\r\n \"'\",\r\n '(',\r\n ')',\r\n '*',\r\n '+',\r\n ',',\r\n '-',\r\n '.',\r\n '/',\r\n '0',\r\n '1',\r\n '2',\r\n '3',\r\n '4',\r\n '5',\r\n '6',\r\n '7',\r\n '8',\r\n '9',\r\n ':',\r\n ';',\r\n '<',\r\n '=',\r\n '>',\r\n '?',\r\n '@',\r\n 'A',\r\n 'B']\r\n exp2 = ['\\t',\r\n '\\n',\r\n '\\r',\r\n ' ',\r\n '!',\r\n '\"',\r\n '#',\r\n '$',\r\n '%',\r\n '&',\r\n \"'\",\r\n '(',\r\n ')',\r\n '*',\r\n '+',\r\n ',',\r\n '-',\r\n '.',\r\n '/',\r\n '0',\r\n '1',\r\n '2',\r\n '3',\r\n '4',\r\n '5',\r\n '6',\r\n '7',\r\n '8',\r\n '9',\r\n ':',\r\n ';',\r\n '<',\r\n '=',\r\n '>',\r\n '?',\r\n '@',\r\n 'A',\r\n 'B',\r\n 'C',\r\n 'D',\r\n 'E',\r\n 'F',\r\n 'G',\r\n 'H',\r\n 'I',\r\n 'J',\r\n 'K',\r\n 'L',\r\n 'M',\r\n 'N',\r\n 'O',\r\n 'P',\r\n 'Q',\r\n 'R',\r\n 'S',\r\n 'T',\r\n 'U',\r\n 'V',\r\n 'W',\r\n 'X',\r\n 'Y',\r\n 'Z',\r\n '[',\r\n '\\\\',\r\n ']',\r\n '^',\r\n '_',\r\n '`',\r\n 'a',\r\n 'b',\r\n 'c',\r\n 'd',\r\n 'e',\r\n 'f',\r\n 'g',\r\n 'h',\r\n 'i',\r\n 'j',\r\n 'k',\r\n 'l',\r\n 'm',\r\n 'n',\r\n 'o',\r\n 'p',\r\n 'q',\r\n 'r',\r\n 's',\r\n 't',\r\n 'u',\r\n 'v',\r\n 'w',\r\n 'x',\r\n 'y',\r\n 'z',\r\n '{',\r\n '|',\r\n '}',\r\n '~']\r\n exp3 = [\r\n '\\t',\r\n '\\n',\r\n '\\r',\r\n ' ',\r\n '!',\r\n '\"',\r\n '#',\r\n '$',\r\n '%',\r\n '&',\r\n \"'\",\r\n '(',\r\n ')',\r\n '*',\r\n '+',\r\n ',',\r\n '-',\r\n '.',\r\n '/',\r\n '0',\r\n '1',\r\n '2',\r\n '3',\r\n '4',\r\n '5',\r\n '6',\r\n '7',\r\n '8',\r\n '9',\r\n ':',\r\n ';',\r\n '<',\r\n '=',\r\n '>',\r\n '?',\r\n '@']\r\n self.assertEqual(bad_chars_from_threshold('B'),\r\n {}.fromkeys(exp1))\r\n self.assertEqual(bad_chars_from_threshold(''), {})\r\n self.assertEqual(bad_chars_from_threshold('~'),\r\n {}.fromkeys(exp2))\r\n self.assertEqual(bad_chars_from_threshold('@'),\r\n {}.fromkeys(exp3))",
"def barcode_is_valid(s):\n return (bool(re.match(r'^[ATGC]*$',s))\n or barcode_is_10xgenomics(s))"
] | [
"0.8065967",
"0.7819546",
"0.7561507",
"0.70877075",
"0.6687797",
"0.66823083",
"0.66677797",
"0.66154623",
"0.6585473",
"0.65472776",
"0.6435375",
"0.64282465",
"0.6415623",
"0.63944894",
"0.6227201",
"0.6072948",
"0.60558593",
"0.60486054",
"0.6011463",
"0.59352165",
"0.59224325",
"0.5907709",
"0.589274",
"0.5839199",
"0.58294207",
"0.5802999",
"0.58029133",
"0.58011556",
"0.5794504",
"0.57834697"
] | 0.7970663 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.