body
stringlengths 26
98.2k
| body_hash
int64 -9,222,864,604,528,158,000
9,221,803,474B
| docstring
stringlengths 1
16.8k
| path
stringlengths 5
230
| name
stringlengths 1
96
| repository_name
stringlengths 7
89
| lang
stringclasses 1
value | body_without_docstring
stringlengths 20
98.2k
|
|---|---|---|---|---|---|---|---|
def get_lbs_for_center_crop(crop_size, data_shape):
'\n :param crop_size:\n :param data_shape: (b,c,x,y(,z)) must be the whole thing!\n :return:\n '
lbs = []
for i in range((len(data_shape) - 2)):
lbs.append(((data_shape[(i + 2)] - crop_size[i]) // 2))
return lbs
| 5,384,268,792,373,000,000
|
:param crop_size:
:param data_shape: (b,c,x,y(,z)) must be the whole thing!
:return:
|
data/crop_and_pad_augmentations.py
|
get_lbs_for_center_crop
|
bowang-lab/shape-attentive-unet
|
python
|
def get_lbs_for_center_crop(crop_size, data_shape):
'\n :param crop_size:\n :param data_shape: (b,c,x,y(,z)) must be the whole thing!\n :return:\n '
lbs = []
for i in range((len(data_shape) - 2)):
lbs.append(((data_shape[(i + 2)] - crop_size[i]) // 2))
return lbs
|
def crop(data, seg=None, crop_size=128, margins=(0, 0, 0), crop_type='center', pad_mode='constant', pad_kwargs={'constant_values': 0}, pad_mode_seg='constant', pad_kwargs_seg={'constant_values': 0}):
'\n crops data and seg (seg may be None) to crop_size. Whether this will be achieved via center or random crop is\n determined by crop_type. Margin will be respected only for random_crop and will prevent the crops form being closer\n than margin to the respective image border. crop_size can be larger than data_shape - margin -> data/seg will be\n padded with zeros in that case. margins can be negative -> results in padding of data/seg followed by cropping with\n margin=0 for the appropriate axes\n :param data: b, c, x, y(, z)\n :param seg:\n :param crop_size:\n :param margins: distance from each border, can be int or list/tuple of ints (one element for each dimension).\n Can be negative (data/seg will be padded if needed)\n :param crop_type: random or center\n :return:\n '
if (not isinstance(data, (list, tuple, np.ndarray))):
raise TypeError('data has to be either a numpy array or a list')
data_shape = tuple(([len(data)] + list(data[0].shape)))
data_dtype = data[0].dtype
dim = (len(data_shape) - 2)
if (seg is not None):
seg_shape = tuple(([len(seg)] + list(seg[0].shape)))
seg_dtype = seg[0].dtype
if (not isinstance(seg, (list, tuple, np.ndarray))):
raise TypeError('data has to be either a numpy array or a list')
assert all([(i == j) for (i, j) in zip(seg_shape[2:], data_shape[2:])]), ('data and seg must have the same spatial dimensions. Data: %s, seg: %s' % (str(data_shape), str(seg_shape)))
if (type(crop_size) not in (tuple, list, np.ndarray)):
crop_size = ([crop_size] * dim)
else:
assert (len(crop_size) == (len(data_shape) - 2)), 'If you provide a list/tuple as center crop make sure it has the same dimension as your data (2d/3d)'
if (not isinstance(margins, (np.ndarray, tuple, list))):
margins = ([margins] * dim)
data_return = np.zeros(([data_shape[0], data_shape[1]] + list(crop_size)), dtype=data_dtype)
if (seg is not None):
seg_return = np.zeros(([seg_shape[0], seg_shape[1]] + list(crop_size)), dtype=seg_dtype)
else:
seg_return = None
for b in range(data_shape[0]):
data_shape_here = ([data_shape[0]] + list(data[b].shape))
if (seg is not None):
seg_shape_here = ([seg_shape[0]] + list(seg[b].shape))
if (crop_type == 'center'):
lbs = get_lbs_for_center_crop(crop_size, data_shape_here)
elif (crop_type == 'random'):
lbs = get_lbs_for_random_crop(crop_size, data_shape_here, margins)
else:
raise NotImplementedError('crop_type must be either center or random')
need_to_pad = ([[0, 0]] + [[abs(min(0, lbs[d])), abs(min(0, (data_shape_here[(d + 2)] - (lbs[d] + crop_size[d]))))] for d in range(dim)])
ubs = [min((lbs[d] + crop_size[d]), data_shape_here[(d + 2)]) for d in range(dim)]
lbs = [max(0, lbs[d]) for d in range(dim)]
slicer_data = ([slice(0, data_shape_here[1])] + [slice(lbs[d], ubs[d]) for d in range(dim)])
data_cropped = data[b][tuple(slicer_data)]
if (seg_return is not None):
slicer_seg = ([slice(0, seg_shape_here[1])] + [slice(lbs[d], ubs[d]) for d in range(dim)])
seg_cropped = seg[b][tuple(slicer_seg)]
if any([(i > 0) for j in need_to_pad for i in j]):
data_return[b] = np.pad(data_cropped, need_to_pad, pad_mode, **pad_kwargs)
if (seg_return is not None):
seg_return[b] = np.pad(seg_cropped, need_to_pad, pad_mode_seg, **pad_kwargs_seg)
else:
data_return[b] = data_cropped
if (seg_return is not None):
seg_return[b] = seg_cropped
return (data_return, seg_return)
| -4,820,768,818,868,650,000
|
crops data and seg (seg may be None) to crop_size. Whether this will be achieved via center or random crop is
determined by crop_type. Margin will be respected only for random_crop and will prevent the crops form being closer
than margin to the respective image border. crop_size can be larger than data_shape - margin -> data/seg will be
padded with zeros in that case. margins can be negative -> results in padding of data/seg followed by cropping with
margin=0 for the appropriate axes
:param data: b, c, x, y(, z)
:param seg:
:param crop_size:
:param margins: distance from each border, can be int or list/tuple of ints (one element for each dimension).
Can be negative (data/seg will be padded if needed)
:param crop_type: random or center
:return:
|
data/crop_and_pad_augmentations.py
|
crop
|
bowang-lab/shape-attentive-unet
|
python
|
def crop(data, seg=None, crop_size=128, margins=(0, 0, 0), crop_type='center', pad_mode='constant', pad_kwargs={'constant_values': 0}, pad_mode_seg='constant', pad_kwargs_seg={'constant_values': 0}):
'\n crops data and seg (seg may be None) to crop_size. Whether this will be achieved via center or random crop is\n determined by crop_type. Margin will be respected only for random_crop and will prevent the crops form being closer\n than margin to the respective image border. crop_size can be larger than data_shape - margin -> data/seg will be\n padded with zeros in that case. margins can be negative -> results in padding of data/seg followed by cropping with\n margin=0 for the appropriate axes\n :param data: b, c, x, y(, z)\n :param seg:\n :param crop_size:\n :param margins: distance from each border, can be int or list/tuple of ints (one element for each dimension).\n Can be negative (data/seg will be padded if needed)\n :param crop_type: random or center\n :return:\n '
if (not isinstance(data, (list, tuple, np.ndarray))):
raise TypeError('data has to be either a numpy array or a list')
data_shape = tuple(([len(data)] + list(data[0].shape)))
data_dtype = data[0].dtype
dim = (len(data_shape) - 2)
if (seg is not None):
seg_shape = tuple(([len(seg)] + list(seg[0].shape)))
seg_dtype = seg[0].dtype
if (not isinstance(seg, (list, tuple, np.ndarray))):
raise TypeError('data has to be either a numpy array or a list')
assert all([(i == j) for (i, j) in zip(seg_shape[2:], data_shape[2:])]), ('data and seg must have the same spatial dimensions. Data: %s, seg: %s' % (str(data_shape), str(seg_shape)))
if (type(crop_size) not in (tuple, list, np.ndarray)):
crop_size = ([crop_size] * dim)
else:
assert (len(crop_size) == (len(data_shape) - 2)), 'If you provide a list/tuple as center crop make sure it has the same dimension as your data (2d/3d)'
if (not isinstance(margins, (np.ndarray, tuple, list))):
margins = ([margins] * dim)
data_return = np.zeros(([data_shape[0], data_shape[1]] + list(crop_size)), dtype=data_dtype)
if (seg is not None):
seg_return = np.zeros(([seg_shape[0], seg_shape[1]] + list(crop_size)), dtype=seg_dtype)
else:
seg_return = None
for b in range(data_shape[0]):
data_shape_here = ([data_shape[0]] + list(data[b].shape))
if (seg is not None):
seg_shape_here = ([seg_shape[0]] + list(seg[b].shape))
if (crop_type == 'center'):
lbs = get_lbs_for_center_crop(crop_size, data_shape_here)
elif (crop_type == 'random'):
lbs = get_lbs_for_random_crop(crop_size, data_shape_here, margins)
else:
raise NotImplementedError('crop_type must be either center or random')
need_to_pad = ([[0, 0]] + [[abs(min(0, lbs[d])), abs(min(0, (data_shape_here[(d + 2)] - (lbs[d] + crop_size[d]))))] for d in range(dim)])
ubs = [min((lbs[d] + crop_size[d]), data_shape_here[(d + 2)]) for d in range(dim)]
lbs = [max(0, lbs[d]) for d in range(dim)]
slicer_data = ([slice(0, data_shape_here[1])] + [slice(lbs[d], ubs[d]) for d in range(dim)])
data_cropped = data[b][tuple(slicer_data)]
if (seg_return is not None):
slicer_seg = ([slice(0, seg_shape_here[1])] + [slice(lbs[d], ubs[d]) for d in range(dim)])
seg_cropped = seg[b][tuple(slicer_seg)]
if any([(i > 0) for j in need_to_pad for i in j]):
data_return[b] = np.pad(data_cropped, need_to_pad, pad_mode, **pad_kwargs)
if (seg_return is not None):
seg_return[b] = np.pad(seg_cropped, need_to_pad, pad_mode_seg, **pad_kwargs_seg)
else:
data_return[b] = data_cropped
if (seg_return is not None):
seg_return[b] = seg_cropped
return (data_return, seg_return)
|
def pad_nd_image_and_seg(data, seg, new_shape=None, must_be_divisible_by=None, pad_mode_data='constant', np_pad_kwargs_data=None, pad_mode_seg='constant', np_pad_kwargs_seg=None):
'\n Pads data and seg to new_shape. new_shape is thereby understood as min_shape (if data/seg is already larger then\n new_shape the shape stays the same for the dimensions this applies)\n :param data:\n :param seg:\n :param new_shape: if none then only must_be_divisible_by is applied\n :param must_be_divisible_by: UNet like architectures sometimes require the input to be divisibly by some number. This\n will modify new_shape if new_shape is not divisibly by this (by increasing it accordingly).\n must_be_divisible_by should be a list of int (one for each spatial dimension) and this list must have the same\n length as new_shape\n :param pad_mode_data: see np.pad\n :param np_pad_kwargs_data:see np.pad\n :param pad_mode_seg:see np.pad\n :param np_pad_kwargs_seg:see np.pad\n :return:\n '
sample_data = pad_nd_image(data, new_shape, mode=pad_mode_data, kwargs=np_pad_kwargs_data, return_slicer=False, shape_must_be_divisible_by=must_be_divisible_by)
if (seg is not None):
sample_seg = pad_nd_image(seg, new_shape, mode=pad_mode_seg, kwargs=np_pad_kwargs_seg, return_slicer=False, shape_must_be_divisible_by=must_be_divisible_by)
else:
sample_seg = None
return (sample_data, sample_seg)
| -694,869,453,499,894,400
|
Pads data and seg to new_shape. new_shape is thereby understood as min_shape (if data/seg is already larger then
new_shape the shape stays the same for the dimensions this applies)
:param data:
:param seg:
:param new_shape: if none then only must_be_divisible_by is applied
:param must_be_divisible_by: UNet like architectures sometimes require the input to be divisibly by some number. This
will modify new_shape if new_shape is not divisibly by this (by increasing it accordingly).
must_be_divisible_by should be a list of int (one for each spatial dimension) and this list must have the same
length as new_shape
:param pad_mode_data: see np.pad
:param np_pad_kwargs_data:see np.pad
:param pad_mode_seg:see np.pad
:param np_pad_kwargs_seg:see np.pad
:return:
|
data/crop_and_pad_augmentations.py
|
pad_nd_image_and_seg
|
bowang-lab/shape-attentive-unet
|
python
|
def pad_nd_image_and_seg(data, seg, new_shape=None, must_be_divisible_by=None, pad_mode_data='constant', np_pad_kwargs_data=None, pad_mode_seg='constant', np_pad_kwargs_seg=None):
'\n Pads data and seg to new_shape. new_shape is thereby understood as min_shape (if data/seg is already larger then\n new_shape the shape stays the same for the dimensions this applies)\n :param data:\n :param seg:\n :param new_shape: if none then only must_be_divisible_by is applied\n :param must_be_divisible_by: UNet like architectures sometimes require the input to be divisibly by some number. This\n will modify new_shape if new_shape is not divisibly by this (by increasing it accordingly).\n must_be_divisible_by should be a list of int (one for each spatial dimension) and this list must have the same\n length as new_shape\n :param pad_mode_data: see np.pad\n :param np_pad_kwargs_data:see np.pad\n :param pad_mode_seg:see np.pad\n :param np_pad_kwargs_seg:see np.pad\n :return:\n '
sample_data = pad_nd_image(data, new_shape, mode=pad_mode_data, kwargs=np_pad_kwargs_data, return_slicer=False, shape_must_be_divisible_by=must_be_divisible_by)
if (seg is not None):
sample_seg = pad_nd_image(seg, new_shape, mode=pad_mode_seg, kwargs=np_pad_kwargs_seg, return_slicer=False, shape_must_be_divisible_by=must_be_divisible_by)
else:
sample_seg = None
return (sample_data, sample_seg)
|
def extract_leegstand(self):
'Create a column indicating leegstand (no inhabitants on the address).'
self.data['leegstand'] = (~ self.data.inwnrs.notnull())
self.version += '_leegstand'
self.save()
| -4,992,713,222,237,245,000
|
Create a column indicating leegstand (no inhabitants on the address).
|
codebase/datasets/adres_dataset.py
|
extract_leegstand
|
petercuret/woonfraude
|
python
|
def extract_leegstand(self):
self.data['leegstand'] = (~ self.data.inwnrs.notnull())
self.version += '_leegstand'
self.save()
|
def enrich_with_woning_id(self):
'Add woning ids to the adres dataframe.'
adres_periodes = datasets.download_dataset('bwv_adres_periodes', 'bwv_adres_periodes')
self.data = self.data.merge(adres_periodes[['ads_id', 'wng_id']], how='left', left_on='adres_id', right_on='ads_id')
self.version += '_woningId'
self.save()
| 9,146,979,939,905,093,000
|
Add woning ids to the adres dataframe.
|
codebase/datasets/adres_dataset.py
|
enrich_with_woning_id
|
petercuret/woonfraude
|
python
|
def enrich_with_woning_id(self):
adres_periodes = datasets.download_dataset('bwv_adres_periodes', 'bwv_adres_periodes')
self.data = self.data.merge(adres_periodes[['ads_id', 'wng_id']], how='left', left_on='adres_id', right_on='ads_id')
self.version += '_woningId'
self.save()
|
def impute_values_for_bagless_addresses(self, adres):
'Impute values for adresses where no BAG-match could be found.'
clean.impute_missing_values(adres)
adres.fillna(value={'huisnummer_nummeraanduiding': 0, 'huisletter_nummeraanduiding': 'None', '_openbare_ruimte_naam_nummeraanduiding': 'None', 'huisnummer_toevoeging_nummeraanduiding': 'None', 'type_woonobject_omschrijving': 'None', 'eigendomsverhouding_id': 'None', 'financieringswijze_id': (- 1), 'gebruik_id': (- 1), 'reden_opvoer_id': (- 1), 'status_id_verblijfsobject': (- 1), 'toegang_id': 'None'}, inplace=True)
return adres
| -5,799,213,507,536,765,000
|
Impute values for adresses where no BAG-match could be found.
|
codebase/datasets/adres_dataset.py
|
impute_values_for_bagless_addresses
|
petercuret/woonfraude
|
python
|
def impute_values_for_bagless_addresses(self, adres):
clean.impute_missing_values(adres)
adres.fillna(value={'huisnummer_nummeraanduiding': 0, 'huisletter_nummeraanduiding': 'None', '_openbare_ruimte_naam_nummeraanduiding': 'None', 'huisnummer_toevoeging_nummeraanduiding': 'None', 'type_woonobject_omschrijving': 'None', 'eigendomsverhouding_id': 'None', 'financieringswijze_id': (- 1), 'gebruik_id': (- 1), 'reden_opvoer_id': (- 1), 'status_id_verblijfsobject': (- 1), 'toegang_id': 'None'}, inplace=True)
return adres
|
def enrich_with_bag(self, bag):
'Enrich the adres data with information from the BAG data. Uses the bag dataframe as input.'
bag = self.prepare_bag(bag)
self.data = self.prepare_adres(self.data)
self.data = self.match_bwv_bag(self.data, bag)
self.data = self.replace_string_nan_adres(self.data)
self.data = self.impute_values_for_bagless_addresses(self.data)
self.version += '_bag'
self.save()
print('The adres dataset is now enriched with BAG data.')
| 2,526,807,197,943,869,400
|
Enrich the adres data with information from the BAG data. Uses the bag dataframe as input.
|
codebase/datasets/adres_dataset.py
|
enrich_with_bag
|
petercuret/woonfraude
|
python
|
def enrich_with_bag(self, bag):
bag = self.prepare_bag(bag)
self.data = self.prepare_adres(self.data)
self.data = self.match_bwv_bag(self.data, bag)
self.data = self.replace_string_nan_adres(self.data)
self.data = self.impute_values_for_bagless_addresses(self.data)
self.version += '_bag'
self.save()
print('The adres dataset is now enriched with BAG data.')
|
def enrich_with_personen_features(self, personen):
'Add aggregated features relating to persons to the address dataframe. Uses the personen dataframe as input.'
adres = self.data
today = pd.to_datetime('today')
personen['geboortedatum'] = pd.to_datetime(personen['geboortedatum'], errors='coerce')
geboortedatum_mode = personen['geboortedatum'].mode()[0]
personen['leeftijd'] = (today - personen['geboortedatum'])
personen['leeftijd'] = personen['leeftijd'].apply((lambda x: (x.days / 365.25)))
adres_ids = adres.adres_id
personen_adres_ids = personen.ads_id_wa
intersect = set(adres_ids).intersection(set(personen_adres_ids))
inhabitant_locs = {}
print('Now looping over all address ids that have a link with one or more inhabitants...')
for (i, adres_id) in enumerate(intersect):
if ((i % 1000) == 0):
print(i)
inhabitant_locs[adres_id] = personen_adres_ids[(personen_adres_ids == adres_id)]
adres['aantal_personen'] = 0
adres['aantal_vertrokken_personen'] = (- 1)
adres['aantal_overleden_personen'] = (- 1)
adres['aantal_niet_uitgeschrevenen'] = (- 1)
adres['leegstand'] = True
adres['leeftijd_jongste_persoon'] = (- 1.0)
adres['leeftijd_oudste_persoon'] = (- 1.0)
adres['aantal_kinderen'] = 0
adres['percentage_kinderen'] = (- 1.0)
adres['aantal_mannen'] = 0
adres['percentage_mannen'] = (- 1.0)
adres['gemiddelde_leeftijd'] = (- 1.0)
adres['stdev_leeftijd'] = (- 1.0)
adres['aantal_achternamen'] = 0
adres['percentage_achternamen'] = (- 1.0)
for i in range(1, 8):
adres[f'gezinsverhouding_{i}'] = 0
adres[f'percentage_gezinsverhouding_{i}'] = 0.0
print('Now looping over all rows in the adres dataframe in order to add person information...')
for i in adres.index:
if ((i % 1000) == 0):
print(i)
row = adres.iloc[i]
adres_id = row['adres_id']
try:
inhab_locs = inhabitant_locs[adres_id].keys()
inhab = personen.loc[inhab_locs]
aantal_vertrokken_personen = sum(inhab['vertrekdatum_adam'].notnull())
aantal_overleden_personen = sum(inhab['overlijdensdatum'].notnull())
aantal_niet_uitgeschrevenen = len(inhab[(inhab['vertrekdatum_adam'].notnull() | inhab['overlijdensdatum'].notnull())])
adres['aantal_vertrokken_personen'] = aantal_vertrokken_personen
adres['aantal_overleden_personen'] = aantal_overleden_personen
adres['aantal_niet_uitgeschrevenen'] = aantal_niet_uitgeschrevenen
if (len(inhab) > aantal_niet_uitgeschrevenen):
adres['leegstand'] = False
aantal_personen = len(inhab)
adres.at[(i, 'aantal_personen')] = aantal_personen
leeftijd_jongste_persoon = min(inhab['leeftijd'])
adres.at[(i, 'leeftijd_jongste_persoon')] = leeftijd_jongste_persoon
leeftijd_oudste_persoon = max(inhab['leeftijd'])
adres.at[(i, 'leeftijd_oudste_persoon')] = leeftijd_oudste_persoon
aantal_kinderen = sum((inhab['leeftijd'] < 18))
adres.at[(i, 'aantal_kinderen')] = aantal_kinderen
adres.at[(i, 'percentage_kinderen')] = (aantal_kinderen / aantal_personen)
aantal_mannen = sum((inhab.geslacht == 'M'))
adres.at[(i, 'aantal_mannen')] = aantal_mannen
adres.at[(i, 'percentage_mannen')] = (aantal_mannen / aantal_personen)
gemiddelde_leeftijd = inhab.leeftijd.mean()
adres.at[(i, 'gemiddelde_leeftijd')] = gemiddelde_leeftijd
stdev_leeftijd = inhab.leeftijd.std()
adres.at[(i, 'stdev_leeftijd')] = (stdev_leeftijd if (aantal_personen > 1) else 0)
aantal_achternamen = inhab.naam.nunique()
adres.at[(i, 'aantal_achternamen')] = aantal_achternamen
adres.at[(i, 'percentage_achternamen')] = (aantal_achternamen / aantal_personen)
gezinsverhouding = inhab.gezinsverhouding.value_counts()
for key in gezinsverhouding.keys():
val = gezinsverhouding[key]
adres.at[(i, f'gezinsverhouding_{key}')] = val
adres.at[(i, f'percentage_gezinsverhouding_{key}')] = (val / aantal_personen)
except (KeyError, ValueError) as e:
pass
print('...done!')
self.data = adres
self.version += '_personen'
self.save()
print('The adres dataset is now enriched with personen data.')
| -7,579,026,273,688,002,000
|
Add aggregated features relating to persons to the address dataframe. Uses the personen dataframe as input.
|
codebase/datasets/adres_dataset.py
|
enrich_with_personen_features
|
petercuret/woonfraude
|
python
|
def enrich_with_personen_features(self, personen):
adres = self.data
today = pd.to_datetime('today')
personen['geboortedatum'] = pd.to_datetime(personen['geboortedatum'], errors='coerce')
geboortedatum_mode = personen['geboortedatum'].mode()[0]
personen['leeftijd'] = (today - personen['geboortedatum'])
personen['leeftijd'] = personen['leeftijd'].apply((lambda x: (x.days / 365.25)))
adres_ids = adres.adres_id
personen_adres_ids = personen.ads_id_wa
intersect = set(adres_ids).intersection(set(personen_adres_ids))
inhabitant_locs = {}
print('Now looping over all address ids that have a link with one or more inhabitants...')
for (i, adres_id) in enumerate(intersect):
if ((i % 1000) == 0):
print(i)
inhabitant_locs[adres_id] = personen_adres_ids[(personen_adres_ids == adres_id)]
adres['aantal_personen'] = 0
adres['aantal_vertrokken_personen'] = (- 1)
adres['aantal_overleden_personen'] = (- 1)
adres['aantal_niet_uitgeschrevenen'] = (- 1)
adres['leegstand'] = True
adres['leeftijd_jongste_persoon'] = (- 1.0)
adres['leeftijd_oudste_persoon'] = (- 1.0)
adres['aantal_kinderen'] = 0
adres['percentage_kinderen'] = (- 1.0)
adres['aantal_mannen'] = 0
adres['percentage_mannen'] = (- 1.0)
adres['gemiddelde_leeftijd'] = (- 1.0)
adres['stdev_leeftijd'] = (- 1.0)
adres['aantal_achternamen'] = 0
adres['percentage_achternamen'] = (- 1.0)
for i in range(1, 8):
adres[f'gezinsverhouding_{i}'] = 0
adres[f'percentage_gezinsverhouding_{i}'] = 0.0
print('Now looping over all rows in the adres dataframe in order to add person information...')
for i in adres.index:
if ((i % 1000) == 0):
print(i)
row = adres.iloc[i]
adres_id = row['adres_id']
try:
inhab_locs = inhabitant_locs[adres_id].keys()
inhab = personen.loc[inhab_locs]
aantal_vertrokken_personen = sum(inhab['vertrekdatum_adam'].notnull())
aantal_overleden_personen = sum(inhab['overlijdensdatum'].notnull())
aantal_niet_uitgeschrevenen = len(inhab[(inhab['vertrekdatum_adam'].notnull() | inhab['overlijdensdatum'].notnull())])
adres['aantal_vertrokken_personen'] = aantal_vertrokken_personen
adres['aantal_overleden_personen'] = aantal_overleden_personen
adres['aantal_niet_uitgeschrevenen'] = aantal_niet_uitgeschrevenen
if (len(inhab) > aantal_niet_uitgeschrevenen):
adres['leegstand'] = False
aantal_personen = len(inhab)
adres.at[(i, 'aantal_personen')] = aantal_personen
leeftijd_jongste_persoon = min(inhab['leeftijd'])
adres.at[(i, 'leeftijd_jongste_persoon')] = leeftijd_jongste_persoon
leeftijd_oudste_persoon = max(inhab['leeftijd'])
adres.at[(i, 'leeftijd_oudste_persoon')] = leeftijd_oudste_persoon
aantal_kinderen = sum((inhab['leeftijd'] < 18))
adres.at[(i, 'aantal_kinderen')] = aantal_kinderen
adres.at[(i, 'percentage_kinderen')] = (aantal_kinderen / aantal_personen)
aantal_mannen = sum((inhab.geslacht == 'M'))
adres.at[(i, 'aantal_mannen')] = aantal_mannen
adres.at[(i, 'percentage_mannen')] = (aantal_mannen / aantal_personen)
gemiddelde_leeftijd = inhab.leeftijd.mean()
adres.at[(i, 'gemiddelde_leeftijd')] = gemiddelde_leeftijd
stdev_leeftijd = inhab.leeftijd.std()
adres.at[(i, 'stdev_leeftijd')] = (stdev_leeftijd if (aantal_personen > 1) else 0)
aantal_achternamen = inhab.naam.nunique()
adres.at[(i, 'aantal_achternamen')] = aantal_achternamen
adres.at[(i, 'percentage_achternamen')] = (aantal_achternamen / aantal_personen)
gezinsverhouding = inhab.gezinsverhouding.value_counts()
for key in gezinsverhouding.keys():
val = gezinsverhouding[key]
adres.at[(i, f'gezinsverhouding_{key}')] = val
adres.at[(i, f'percentage_gezinsverhouding_{key}')] = (val / aantal_personen)
except (KeyError, ValueError) as e:
pass
print('...done!')
self.data = adres
self.version += '_personen'
self.save()
print('The adres dataset is now enriched with personen data.')
|
def add_hotline_features(self, hotline):
'Add the hotline features to the adres dataframe.'
merge = self.data.merge(hotline, on='wng_id', how='left')
adres_groups = merge.groupby(by='adres_id')
hotline_counts = adres_groups['id'].agg(['count'])
hotline_counts.columns = ['aantal_hotline_meldingen']
self.data = self.data.merge(hotline_counts, on='adres_id', how='left')
self.version += '_hotline'
self.save()
print('The adres dataset is now enriched with hotline data.')
| 4,715,285,952,275,173,000
|
Add the hotline features to the adres dataframe.
|
codebase/datasets/adres_dataset.py
|
add_hotline_features
|
petercuret/woonfraude
|
python
|
def add_hotline_features(self, hotline):
merge = self.data.merge(hotline, on='wng_id', how='left')
adres_groups = merge.groupby(by='adres_id')
hotline_counts = adres_groups['id'].agg(['count'])
hotline_counts.columns = ['aantal_hotline_meldingen']
self.data = self.data.merge(hotline_counts, on='adres_id', how='left')
self.version += '_hotline'
self.save()
print('The adres dataset is now enriched with hotline data.')
|
def SubPixel1D_v2(I, r):
'One-dimensional subpixel upsampling layer\n\n Based on https://github.com/Tetrachrome/subpixel/blob/master/subpixel.py\n '
with tf.compat.v1.name_scope('subpixel'):
(bsize, a, r) = I.get_shape().as_list()
bsize = tf.shape(input=I)[0]
X = tf.split(1, a, I)
if ('axis' in tf.squeeze.__code__.co_varnames):
X = tf.concat(1, [tf.squeeze(x, axis=1) for x in X])
elif ('squeeze_dims' in tf.squeeze.__code__.co_varnames):
X = tf.concat(1, [tf.squeeze(x, axis=[1]) for x in X])
else:
raise Exception('Unsupported version of tensorflow')
return tf.reshape(X, (bsize, (a * r), 1))
| 1,428,587,690,402,081,500
|
One-dimensional subpixel upsampling layer
Based on https://github.com/Tetrachrome/subpixel/blob/master/subpixel.py
|
src/models/layers/subpixel.py
|
SubPixel1D_v2
|
Lootwig/audio-super-res
|
python
|
def SubPixel1D_v2(I, r):
'One-dimensional subpixel upsampling layer\n\n Based on https://github.com/Tetrachrome/subpixel/blob/master/subpixel.py\n '
with tf.compat.v1.name_scope('subpixel'):
(bsize, a, r) = I.get_shape().as_list()
bsize = tf.shape(input=I)[0]
X = tf.split(1, a, I)
if ('axis' in tf.squeeze.__code__.co_varnames):
X = tf.concat(1, [tf.squeeze(x, axis=1) for x in X])
elif ('squeeze_dims' in tf.squeeze.__code__.co_varnames):
X = tf.concat(1, [tf.squeeze(x, axis=[1]) for x in X])
else:
raise Exception('Unsupported version of tensorflow')
return tf.reshape(X, (bsize, (a * r), 1))
|
def SubPixel1D(I, r):
'One-dimensional subpixel upsampling layer\n\n Calls a tensorflow function that directly implements this functionality.\n We assume input has dim (batch, width, r)\n '
with tf.compat.v1.name_scope('subpixel'):
X = tf.transpose(a=I, perm=[2, 1, 0])
X = tf.batch_to_space(X, [r], [[0, 0]])
X = tf.transpose(a=X, perm=[2, 1, 0])
return X
| 6,580,163,009,517,961,000
|
One-dimensional subpixel upsampling layer
Calls a tensorflow function that directly implements this functionality.
We assume input has dim (batch, width, r)
|
src/models/layers/subpixel.py
|
SubPixel1D
|
Lootwig/audio-super-res
|
python
|
def SubPixel1D(I, r):
'One-dimensional subpixel upsampling layer\n\n Calls a tensorflow function that directly implements this functionality.\n We assume input has dim (batch, width, r)\n '
with tf.compat.v1.name_scope('subpixel'):
X = tf.transpose(a=I, perm=[2, 1, 0])
X = tf.batch_to_space(X, [r], [[0, 0]])
X = tf.transpose(a=X, perm=[2, 1, 0])
return X
|
def SubPixel1D_multichan(I, r):
'One-dimensional subpixel upsampling layer\n\n Calls a tensorflow function that directly implements this functionality.\n We assume input has dim (batch, width, r).\n\n Works with multiple channels: (B,L,rC) -> (B,rL,C)\n '
with tf.compat.v1.name_scope('subpixel'):
(_, w, rc) = I.get_shape()
assert ((rc % r) == 0)
c = (rc / r)
X = tf.transpose(a=I, perm=[2, 1, 0])
X = tf.batch_to_space(X, [r], [[0, 0]])
X = tf.transpose(a=X, perm=[2, 1, 0])
return X
| -7,981,073,372,711,496,000
|
One-dimensional subpixel upsampling layer
Calls a tensorflow function that directly implements this functionality.
We assume input has dim (batch, width, r).
Works with multiple channels: (B,L,rC) -> (B,rL,C)
|
src/models/layers/subpixel.py
|
SubPixel1D_multichan
|
Lootwig/audio-super-res
|
python
|
def SubPixel1D_multichan(I, r):
'One-dimensional subpixel upsampling layer\n\n Calls a tensorflow function that directly implements this functionality.\n We assume input has dim (batch, width, r).\n\n Works with multiple channels: (B,L,rC) -> (B,rL,C)\n '
with tf.compat.v1.name_scope('subpixel'):
(_, w, rc) = I.get_shape()
assert ((rc % r) == 0)
c = (rc / r)
X = tf.transpose(a=I, perm=[2, 1, 0])
X = tf.batch_to_space(X, [r], [[0, 0]])
X = tf.transpose(a=X, perm=[2, 1, 0])
return X
|
def dkim_sign(message, dkim_domain=None, dkim_key=None, dkim_selector=None, dkim_headers=None):
'Return signed email message if dkim package and settings are available.'
try:
import dkim
except ImportError:
pass
else:
if (dkim_domain and dkim_key):
sig = dkim.sign(message, dkim_selector, dkim_domain, dkim_key, include_headers=dkim_headers)
message = (sig + message)
return message
| -6,159,254,177,365,536,000
|
Return signed email message if dkim package and settings are available.
|
django_ses/__init__.py
|
dkim_sign
|
mlissner/django-ses
|
python
|
def dkim_sign(message, dkim_domain=None, dkim_key=None, dkim_selector=None, dkim_headers=None):
try:
import dkim
except ImportError:
pass
else:
if (dkim_domain and dkim_key):
sig = dkim.sign(message, dkim_selector, dkim_domain, dkim_key, include_headers=dkim_headers)
message = (sig + message)
return message
|
def cast_nonzero_to_float(val):
'Cast nonzero number to float; on zero or None, return None'
if (not val):
return None
return float(val)
| 6,612,048,108,139,969,000
|
Cast nonzero number to float; on zero or None, return None
|
django_ses/__init__.py
|
cast_nonzero_to_float
|
mlissner/django-ses
|
python
|
def cast_nonzero_to_float(val):
if (not val):
return None
return float(val)
|
def open(self):
'Create a connection to the AWS API server. This can be reused for\n sending multiple emails.\n '
if self.connection:
return False
try:
self.connection = boto3.client('ses', aws_access_key_id=self._access_key_id, aws_secret_access_key=self._access_key, region_name=self._region_name, endpoint_url=self._endpoint_url, config=self._config)
except Exception:
if (not self.fail_silently):
raise
| -3,722,438,059,502,486,000
|
Create a connection to the AWS API server. This can be reused for
sending multiple emails.
|
django_ses/__init__.py
|
open
|
mlissner/django-ses
|
python
|
def open(self):
'Create a connection to the AWS API server. This can be reused for\n sending multiple emails.\n '
if self.connection:
return False
try:
self.connection = boto3.client('ses', aws_access_key_id=self._access_key_id, aws_secret_access_key=self._access_key, region_name=self._region_name, endpoint_url=self._endpoint_url, config=self._config)
except Exception:
if (not self.fail_silently):
raise
|
def close(self):
'Close any open HTTP connections to the API server.\n '
self.connection = None
| 3,509,590,564,129,190,400
|
Close any open HTTP connections to the API server.
|
django_ses/__init__.py
|
close
|
mlissner/django-ses
|
python
|
def close(self):
'\n '
self.connection = None
|
def send_messages(self, email_messages):
'Sends one or more EmailMessage objects and returns the number of\n email messages sent.\n '
if (not email_messages):
return
new_conn_created = self.open()
if (not self.connection):
return
num_sent = 0
source = settings.AWS_SES_RETURN_PATH
for message in email_messages:
if (settings.AWS_SES_CONFIGURATION_SET and ('X-SES-CONFIGURATION-SET' not in message.extra_headers)):
if callable(settings.AWS_SES_CONFIGURATION_SET):
message.extra_headers['X-SES-CONFIGURATION-SET'] = settings.AWS_SES_CONFIGURATION_SET(message, dkim_domain=self.dkim_domain, dkim_key=self.dkim_key, dkim_selector=self.dkim_selector, dkim_headers=self.dkim_headers)
else:
message.extra_headers['X-SES-CONFIGURATION-SET'] = settings.AWS_SES_CONFIGURATION_SET
if self._throttle:
global recent_send_times
now = datetime.now()
rate_limit = self.get_rate_limit()
logger.debug("send_messages.throttle rate_limit='{}'".format(rate_limit))
window = 2.0
window_start = (now - timedelta(seconds=window))
new_send_times = []
for time in recent_send_times:
if (time > window_start):
new_send_times.append(time)
recent_send_times = new_send_times
if (len(new_send_times) > ((rate_limit * window) * self._throttle)):
delta = (now - new_send_times[0])
total_seconds = ((delta.microseconds + ((delta.seconds + ((delta.days * 24) * 3600)) * (10 ** 6))) / (10 ** 6))
delay = (window - total_seconds)
if (delay > 0):
sleep(delay)
recent_send_times.append(now)
kwargs = dict(Source=(source or message.from_email), Destinations=message.recipients(), RawMessage={'Data': dkim_sign(message.message().as_string(), dkim_key=self.dkim_key, dkim_domain=self.dkim_domain, dkim_selector=self.dkim_selector, dkim_headers=self.dkim_headers)})
if self.ses_source_arn:
kwargs['SourceArn'] = self.ses_source_arn
if self.ses_from_arn:
kwargs['FromArn'] = self.ses_from_arn
if self.ses_return_path_arn:
kwargs['ReturnPathArn'] = self.ses_return_path_arn
try:
response = self.connection.send_raw_email(**kwargs)
message.extra_headers['status'] = 200
message.extra_headers['message_id'] = response['MessageId']
message.extra_headers['request_id'] = response['ResponseMetadata']['RequestId']
num_sent += 1
if ('X-SES-CONFIGURATION-SET' in message.extra_headers):
logger.debug("send_messages.sent from='{}' recipients='{}' message_id='{}' request_id='{}' ses-configuration-set='{}'".format(message.from_email, ', '.join(message.recipients()), message.extra_headers['message_id'], message.extra_headers['request_id'], message.extra_headers['X-SES-CONFIGURATION-SET']))
else:
logger.debug("send_messages.sent from='{}' recipients='{}' message_id='{}' request_id='{}'".format(message.from_email, ', '.join(message.recipients()), message.extra_headers['message_id'], message.extra_headers['request_id']))
except ResponseError as err:
error_keys = ['status', 'reason', 'body', 'request_id', 'error_code', 'error_message']
for key in error_keys:
message.extra_headers[key] = getattr(err, key, None)
if (not self.fail_silently):
raise
if new_conn_created:
self.close()
return num_sent
| -3,148,640,440,429,157,000
|
Sends one or more EmailMessage objects and returns the number of
email messages sent.
|
django_ses/__init__.py
|
send_messages
|
mlissner/django-ses
|
python
|
def send_messages(self, email_messages):
'Sends one or more EmailMessage objects and returns the number of\n email messages sent.\n '
if (not email_messages):
return
new_conn_created = self.open()
if (not self.connection):
return
num_sent = 0
source = settings.AWS_SES_RETURN_PATH
for message in email_messages:
if (settings.AWS_SES_CONFIGURATION_SET and ('X-SES-CONFIGURATION-SET' not in message.extra_headers)):
if callable(settings.AWS_SES_CONFIGURATION_SET):
message.extra_headers['X-SES-CONFIGURATION-SET'] = settings.AWS_SES_CONFIGURATION_SET(message, dkim_domain=self.dkim_domain, dkim_key=self.dkim_key, dkim_selector=self.dkim_selector, dkim_headers=self.dkim_headers)
else:
message.extra_headers['X-SES-CONFIGURATION-SET'] = settings.AWS_SES_CONFIGURATION_SET
if self._throttle:
global recent_send_times
now = datetime.now()
rate_limit = self.get_rate_limit()
logger.debug("send_messages.throttle rate_limit='{}'".format(rate_limit))
window = 2.0
window_start = (now - timedelta(seconds=window))
new_send_times = []
for time in recent_send_times:
if (time > window_start):
new_send_times.append(time)
recent_send_times = new_send_times
if (len(new_send_times) > ((rate_limit * window) * self._throttle)):
delta = (now - new_send_times[0])
total_seconds = ((delta.microseconds + ((delta.seconds + ((delta.days * 24) * 3600)) * (10 ** 6))) / (10 ** 6))
delay = (window - total_seconds)
if (delay > 0):
sleep(delay)
recent_send_times.append(now)
kwargs = dict(Source=(source or message.from_email), Destinations=message.recipients(), RawMessage={'Data': dkim_sign(message.message().as_string(), dkim_key=self.dkim_key, dkim_domain=self.dkim_domain, dkim_selector=self.dkim_selector, dkim_headers=self.dkim_headers)})
if self.ses_source_arn:
kwargs['SourceArn'] = self.ses_source_arn
if self.ses_from_arn:
kwargs['FromArn'] = self.ses_from_arn
if self.ses_return_path_arn:
kwargs['ReturnPathArn'] = self.ses_return_path_arn
try:
response = self.connection.send_raw_email(**kwargs)
message.extra_headers['status'] = 200
message.extra_headers['message_id'] = response['MessageId']
message.extra_headers['request_id'] = response['ResponseMetadata']['RequestId']
num_sent += 1
if ('X-SES-CONFIGURATION-SET' in message.extra_headers):
logger.debug("send_messages.sent from='{}' recipients='{}' message_id='{}' request_id='{}' ses-configuration-set='{}'".format(message.from_email, ', '.join(message.recipients()), message.extra_headers['message_id'], message.extra_headers['request_id'], message.extra_headers['X-SES-CONFIGURATION-SET']))
else:
logger.debug("send_messages.sent from='{}' recipients='{}' message_id='{}' request_id='{}'".format(message.from_email, ', '.join(message.recipients()), message.extra_headers['message_id'], message.extra_headers['request_id']))
except ResponseError as err:
error_keys = ['status', 'reason', 'body', 'request_id', 'error_code', 'error_message']
for key in error_keys:
message.extra_headers[key] = getattr(err, key, None)
if (not self.fail_silently):
raise
if new_conn_created:
self.close()
return num_sent
|
def run_python_tests():
' Runs the Python tests.\n Returns:\n True if the tests all succeed, False if there are failures. '
print('Starting tests...')
loader = unittest.TestLoader()
dir_path = os.path.dirname(os.path.realpath(__file__))
suite = loader.discover('rhodopsin/tests', top_level_dir=dir_path)
test_result = unittest.TextTestRunner(verbosity=2).run(suite)
if (not test_result.wasSuccessful()):
return False
return True
| 6,912,438,203,725,193,000
|
Runs the Python tests.
Returns:
True if the tests all succeed, False if there are failures.
|
run_tests.py
|
run_python_tests
|
djpetti/rhodopsin
|
python
|
def run_python_tests():
' Runs the Python tests.\n Returns:\n True if the tests all succeed, False if there are failures. '
print('Starting tests...')
loader = unittest.TestLoader()
dir_path = os.path.dirname(os.path.realpath(__file__))
suite = loader.discover('rhodopsin/tests', top_level_dir=dir_path)
test_result = unittest.TextTestRunner(verbosity=2).run(suite)
if (not test_result.wasSuccessful()):
return False
return True
|
def upgrade():
'Migrations for the upgrade.'
op.execute("\n UPDATE db_dbnode SET type = 'data.bool.Bool.' WHERE type = 'data.base.Bool.';\n UPDATE db_dbnode SET type = 'data.float.Float.' WHERE type = 'data.base.Float.';\n UPDATE db_dbnode SET type = 'data.int.Int.' WHERE type = 'data.base.Int.';\n UPDATE db_dbnode SET type = 'data.str.Str.' WHERE type = 'data.base.Str.';\n UPDATE db_dbnode SET type = 'data.list.List.' WHERE type = 'data.base.List.';\n ")
| -5,629,107,005,712,645,000
|
Migrations for the upgrade.
|
aiida/storage/psql_dos/migrations/versions/django_0009_base_data_plugin_type_string.py
|
upgrade
|
mkrack/aiida-core
|
python
|
def upgrade():
op.execute("\n UPDATE db_dbnode SET type = 'data.bool.Bool.' WHERE type = 'data.base.Bool.';\n UPDATE db_dbnode SET type = 'data.float.Float.' WHERE type = 'data.base.Float.';\n UPDATE db_dbnode SET type = 'data.int.Int.' WHERE type = 'data.base.Int.';\n UPDATE db_dbnode SET type = 'data.str.Str.' WHERE type = 'data.base.Str.';\n UPDATE db_dbnode SET type = 'data.list.List.' WHERE type = 'data.base.List.';\n ")
|
def downgrade():
'Migrations for the downgrade.'
op.execute("\n UPDATE db_dbnode SET type = 'data.base.Bool.' WHERE type = 'data.bool.Bool.';\n UPDATE db_dbnode SET type = 'data.base.Float.' WHERE type = 'data.float.Float.';\n UPDATE db_dbnode SET type = 'data.base.Int.' WHERE type = 'data.int.Int.';\n UPDATE db_dbnode SET type = 'data.base.Str.' WHERE type = 'data.str.Str.';\n UPDATE db_dbnode SET type = 'data.base.List.' WHERE type = 'data.list.List.';\n ")
| 3,713,483,839,730,805,000
|
Migrations for the downgrade.
|
aiida/storage/psql_dos/migrations/versions/django_0009_base_data_plugin_type_string.py
|
downgrade
|
mkrack/aiida-core
|
python
|
def downgrade():
op.execute("\n UPDATE db_dbnode SET type = 'data.base.Bool.' WHERE type = 'data.bool.Bool.';\n UPDATE db_dbnode SET type = 'data.base.Float.' WHERE type = 'data.float.Float.';\n UPDATE db_dbnode SET type = 'data.base.Int.' WHERE type = 'data.int.Int.';\n UPDATE db_dbnode SET type = 'data.base.Str.' WHERE type = 'data.str.Str.';\n UPDATE db_dbnode SET type = 'data.base.List.' WHERE type = 'data.list.List.';\n ")
|
def VirtualMachineRuntimeInfo(vim, *args, **kwargs):
'The RuntimeInfo data object type provides information about the execution state\n and history of a virtual machine.'
obj = vim.client.factory.create('{urn:vim25}VirtualMachineRuntimeInfo')
if ((len(args) + len(kwargs)) < 7):
raise IndexError(('Expected at least 8 arguments got: %d' % len(args)))
required = ['connectionState', 'consolidationNeeded', 'faultToleranceState', 'numMksConnections', 'powerState', 'recordReplayState', 'toolsInstallerMounted']
optional = ['bootTime', 'cleanPowerOff', 'dasVmProtection', 'device', 'host', 'maxCpuUsage', 'maxMemoryUsage', 'memoryOverhead', 'minRequiredEVCModeKey', 'needSecondaryReason', 'question', 'suspendInterval', 'suspendTime', 'dynamicProperty', 'dynamicType']
for (name, arg) in zip((required + optional), args):
setattr(obj, name, arg)
for (name, value) in kwargs.items():
if (name in (required + optional)):
setattr(obj, name, value)
else:
raise InvalidArgumentError(('Invalid argument: %s. Expected one of %s' % (name, ', '.join((required + optional)))))
return obj
| -7,396,303,371,140,011,000
|
The RuntimeInfo data object type provides information about the execution state
and history of a virtual machine.
|
pyvisdk/do/virtual_machine_runtime_info.py
|
VirtualMachineRuntimeInfo
|
Infinidat/pyvisdk
|
python
|
def VirtualMachineRuntimeInfo(vim, *args, **kwargs):
'The RuntimeInfo data object type provides information about the execution state\n and history of a virtual machine.'
obj = vim.client.factory.create('{urn:vim25}VirtualMachineRuntimeInfo')
if ((len(args) + len(kwargs)) < 7):
raise IndexError(('Expected at least 8 arguments got: %d' % len(args)))
required = ['connectionState', 'consolidationNeeded', 'faultToleranceState', 'numMksConnections', 'powerState', 'recordReplayState', 'toolsInstallerMounted']
optional = ['bootTime', 'cleanPowerOff', 'dasVmProtection', 'device', 'host', 'maxCpuUsage', 'maxMemoryUsage', 'memoryOverhead', 'minRequiredEVCModeKey', 'needSecondaryReason', 'question', 'suspendInterval', 'suspendTime', 'dynamicProperty', 'dynamicType']
for (name, arg) in zip((required + optional), args):
setattr(obj, name, arg)
for (name, value) in kwargs.items():
if (name in (required + optional)):
setattr(obj, name, value)
else:
raise InvalidArgumentError(('Invalid argument: %s. Expected one of %s' % (name, ', '.join((required + optional)))))
return obj
|
def __init__(self, list=None):
' A list of particle ids and names can be given to the constructor.\n '
self._list = []
if (list != None):
self._list = list
| -4,374,892,717,127,874,000
|
A list of particle ids and names can be given to the constructor.
|
FWCore/GuiBrowsers/python/Vispa/Plugins/EdmBrowser/ParticleDataList.py
|
__init__
|
7quantumphysics/cmssw
|
python
|
def __init__(self, list=None):
' \n '
self._list = []
if (list != None):
self._list = list
|
def addParticle(self, ids, names, particleData):
' Add a paricle with (multiple) ids and names to the list.\n '
if (not (isinstance(ids, list) and isinstance(names, list))):
raise TypeError("addParticle needs to lists as input: e.g. [1,-1],['d','dbar']")
self._list += [(ids, names, particleData)]
| -4,326,403,141,763,996,700
|
Add a paricle with (multiple) ids and names to the list.
|
FWCore/GuiBrowsers/python/Vispa/Plugins/EdmBrowser/ParticleDataList.py
|
addParticle
|
7quantumphysics/cmssw
|
python
|
def addParticle(self, ids, names, particleData):
' \n '
if (not (isinstance(ids, list) and isinstance(names, list))):
raise TypeError("addParticle needs to lists as input: e.g. [1,-1],['d','dbar']")
self._list += [(ids, names, particleData)]
|
def getDefaultName(self, name):
" Return the default (first in list) name given any of the particle's names.\n "
for items in self._list:
if (name in items[1]):
return items[1][0]
return name
| -5,270,448,408,394,749,000
|
Return the default (first in list) name given any of the particle's names.
|
FWCore/GuiBrowsers/python/Vispa/Plugins/EdmBrowser/ParticleDataList.py
|
getDefaultName
|
7quantumphysics/cmssw
|
python
|
def getDefaultName(self, name):
" \n "
for items in self._list:
if (name in items[1]):
return items[1][0]
return name
|
def getDefaultId(self, id):
" Return the default (first in list) id given any of the particle's ids.\n "
for items in self._list:
if (id in items[0]):
return items[0][0]
return id
| -8,276,303,927,237,939,000
|
Return the default (first in list) id given any of the particle's ids.
|
FWCore/GuiBrowsers/python/Vispa/Plugins/EdmBrowser/ParticleDataList.py
|
getDefaultId
|
7quantumphysics/cmssw
|
python
|
def getDefaultId(self, id):
" \n "
for items in self._list:
if (id in items[0]):
return items[0][0]
return id
|
def getIdFromName(self, name):
" Return the default (first in list) id given any of the particle's names.\n "
for items in self._list:
if (name in items[1]):
return items[0][0]
return 0
| 344,722,637,239,240,640
|
Return the default (first in list) id given any of the particle's names.
|
FWCore/GuiBrowsers/python/Vispa/Plugins/EdmBrowser/ParticleDataList.py
|
getIdFromName
|
7quantumphysics/cmssw
|
python
|
def getIdFromName(self, name):
" \n "
for items in self._list:
if (name in items[1]):
return items[0][0]
return 0
|
def getNameFromId(self, id):
" Return the default (first in list) name given any of the particle's ids.\n "
for items in self._list:
if (id in items[0]):
return items[1][0]
return 'unknown'
| 7,222,615,436,292,470,000
|
Return the default (first in list) name given any of the particle's ids.
|
FWCore/GuiBrowsers/python/Vispa/Plugins/EdmBrowser/ParticleDataList.py
|
getNameFromId
|
7quantumphysics/cmssw
|
python
|
def getNameFromId(self, id):
" \n "
for items in self._list:
if (id in items[0]):
return items[1][0]
return 'unknown'
|
def __init__(self, channel):
'Constructor.\n\n Args:\n channel: A grpc.Channel.\n '
self.Predict = channel.unary_unary('/onnxruntime.server.PredictionService/Predict', request_serializer=predict__pb2.PredictRequest.SerializeToString, response_deserializer=predict__pb2.PredictResponse.FromString)
| -8,563,973,921,117,573,000
|
Constructor.
Args:
channel: A grpc.Channel.
|
chapter2_training/cifar10/evaluate/src/proto/prediction_service_pb2_grpc.py
|
__init__
|
akiueno/ml-system-in-actions
|
python
|
def __init__(self, channel):
'Constructor.\n\n Args:\n channel: A grpc.Channel.\n '
self.Predict = channel.unary_unary('/onnxruntime.server.PredictionService/Predict', request_serializer=predict__pb2.PredictRequest.SerializeToString, response_deserializer=predict__pb2.PredictResponse.FromString)
|
def Predict(self, request, context):
'Missing associated documentation comment in .proto file.'
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
| 3,231,770,545,470,701,600
|
Missing associated documentation comment in .proto file.
|
chapter2_training/cifar10/evaluate/src/proto/prediction_service_pb2_grpc.py
|
Predict
|
akiueno/ml-system-in-actions
|
python
|
def Predict(self, request, context):
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
|
def test_valid_distribution(self):
'Test for a valid distribution.'
plugin = Plugin(distribution='norm')
self.assertEqual(plugin.distribution, stats.norm)
self.assertEqual(plugin.shape_parameters, [])
| 6,153,720,595,872,060,000
|
Test for a valid distribution.
|
improver_tests/ensemble_copula_coupling/ensemble_copula_coupling/test_ConvertLocationAndScaleParameters.py
|
test_valid_distribution
|
LaurenceBeard/improver
|
python
|
def test_valid_distribution(self):
plugin = Plugin(distribution='norm')
self.assertEqual(plugin.distribution, stats.norm)
self.assertEqual(plugin.shape_parameters, [])
|
def test_valid_distribution_with_shape_parameters(self):
'Test for a valid distribution with shape parameters.'
plugin = Plugin(distribution='truncnorm', shape_parameters=[0, np.inf])
self.assertEqual(plugin.distribution, stats.truncnorm)
self.assertEqual(plugin.shape_parameters, [0, np.inf])
| 8,800,657,711,953,296,000
|
Test for a valid distribution with shape parameters.
|
improver_tests/ensemble_copula_coupling/ensemble_copula_coupling/test_ConvertLocationAndScaleParameters.py
|
test_valid_distribution_with_shape_parameters
|
LaurenceBeard/improver
|
python
|
def test_valid_distribution_with_shape_parameters(self):
plugin = Plugin(distribution='truncnorm', shape_parameters=[0, np.inf])
self.assertEqual(plugin.distribution, stats.truncnorm)
self.assertEqual(plugin.shape_parameters, [0, np.inf])
|
def test_invalid_distribution(self):
'Test for an invalid distribution.'
msg = 'The distribution requested'
with self.assertRaisesRegex(AttributeError, msg):
Plugin(distribution='elephant')
| -2,428,895,901,677,872,600
|
Test for an invalid distribution.
|
improver_tests/ensemble_copula_coupling/ensemble_copula_coupling/test_ConvertLocationAndScaleParameters.py
|
test_invalid_distribution
|
LaurenceBeard/improver
|
python
|
def test_invalid_distribution(self):
msg = 'The distribution requested'
with self.assertRaisesRegex(AttributeError, msg):
Plugin(distribution='elephant')
|
def test_basic(self):
'Test string representation'
expected_string = '<ConvertLocationAndScaleParameters: distribution: norm; shape_parameters: []>'
result = str(Plugin())
self.assertEqual(result, expected_string)
| -7,172,860,046,943,809,000
|
Test string representation
|
improver_tests/ensemble_copula_coupling/ensemble_copula_coupling/test_ConvertLocationAndScaleParameters.py
|
test_basic
|
LaurenceBeard/improver
|
python
|
def test_basic(self):
expected_string = '<ConvertLocationAndScaleParameters: distribution: norm; shape_parameters: []>'
result = str(Plugin())
self.assertEqual(result, expected_string)
|
def setUp(self):
'Set up values for testing.'
self.location_parameter = np.array([(- 1), 0, 1])
self.scale_parameter = np.array([1, 1.5, 2])
| -2,617,334,872,451,485,000
|
Set up values for testing.
|
improver_tests/ensemble_copula_coupling/ensemble_copula_coupling/test_ConvertLocationAndScaleParameters.py
|
setUp
|
LaurenceBeard/improver
|
python
|
def setUp(self):
self.location_parameter = np.array([(- 1), 0, 1])
self.scale_parameter = np.array([1, 1.5, 2])
|
def test_truncated_at_zero(self):
'Test scaling shape parameters implying a truncation at zero.'
expected = [np.array([1.0, 0, (- 0.5)]), np.array([np.inf, np.inf, np.inf])]
shape_parameters = [0, np.inf]
plugin = Plugin(distribution='truncnorm', shape_parameters=shape_parameters)
plugin._rescale_shape_parameters(self.location_parameter, self.scale_parameter)
self.assertArrayAlmostEqual(plugin.shape_parameters, expected)
| 5,019,037,578,939,496,000
|
Test scaling shape parameters implying a truncation at zero.
|
improver_tests/ensemble_copula_coupling/ensemble_copula_coupling/test_ConvertLocationAndScaleParameters.py
|
test_truncated_at_zero
|
LaurenceBeard/improver
|
python
|
def test_truncated_at_zero(self):
expected = [np.array([1.0, 0, (- 0.5)]), np.array([np.inf, np.inf, np.inf])]
shape_parameters = [0, np.inf]
plugin = Plugin(distribution='truncnorm', shape_parameters=shape_parameters)
plugin._rescale_shape_parameters(self.location_parameter, self.scale_parameter)
self.assertArrayAlmostEqual(plugin.shape_parameters, expected)
|
def test_discrete_shape_parameters(self):
'Test scaling discrete shape parameters.'
expected = [np.array([(- 3), (- 2.666667), (- 2.5)]), np.array([7, 4, 2.5])]
shape_parameters = [(- 4), 6]
plugin = Plugin(distribution='truncnorm', shape_parameters=shape_parameters)
plugin._rescale_shape_parameters(self.location_parameter, self.scale_parameter)
self.assertArrayAlmostEqual(plugin.shape_parameters, expected)
| -8,064,486,279,954,355,000
|
Test scaling discrete shape parameters.
|
improver_tests/ensemble_copula_coupling/ensemble_copula_coupling/test_ConvertLocationAndScaleParameters.py
|
test_discrete_shape_parameters
|
LaurenceBeard/improver
|
python
|
def test_discrete_shape_parameters(self):
expected = [np.array([(- 3), (- 2.666667), (- 2.5)]), np.array([7, 4, 2.5])]
shape_parameters = [(- 4), 6]
plugin = Plugin(distribution='truncnorm', shape_parameters=shape_parameters)
plugin._rescale_shape_parameters(self.location_parameter, self.scale_parameter)
self.assertArrayAlmostEqual(plugin.shape_parameters, expected)
|
def test_alternative_distribution(self):
'Test specifying a distribution other than truncated normal. In\n this instance, no rescaling is applied.'
shape_parameters = [0, np.inf]
plugin = Plugin(distribution='norm', shape_parameters=shape_parameters)
plugin._rescale_shape_parameters(self.location_parameter, self.scale_parameter)
self.assertArrayEqual(plugin.shape_parameters, shape_parameters)
| -6,880,325,163,840,395,000
|
Test specifying a distribution other than truncated normal. In
this instance, no rescaling is applied.
|
improver_tests/ensemble_copula_coupling/ensemble_copula_coupling/test_ConvertLocationAndScaleParameters.py
|
test_alternative_distribution
|
LaurenceBeard/improver
|
python
|
def test_alternative_distribution(self):
'Test specifying a distribution other than truncated normal. In\n this instance, no rescaling is applied.'
shape_parameters = [0, np.inf]
plugin = Plugin(distribution='norm', shape_parameters=shape_parameters)
plugin._rescale_shape_parameters(self.location_parameter, self.scale_parameter)
self.assertArrayEqual(plugin.shape_parameters, shape_parameters)
|
def test_no_shape_parameters_exception(self):
'Test raising an exception when shape parameters are not specified\n for the truncated normal distribution.'
plugin = Plugin(distribution='truncnorm')
msg = 'For the truncated normal distribution'
with self.assertRaisesRegex(ValueError, msg):
plugin._rescale_shape_parameters(self.location_parameter, self.scale_parameter)
| -1,700,685,026,259,694,600
|
Test raising an exception when shape parameters are not specified
for the truncated normal distribution.
|
improver_tests/ensemble_copula_coupling/ensemble_copula_coupling/test_ConvertLocationAndScaleParameters.py
|
test_no_shape_parameters_exception
|
LaurenceBeard/improver
|
python
|
def test_no_shape_parameters_exception(self):
'Test raising an exception when shape parameters are not specified\n for the truncated normal distribution.'
plugin = Plugin(distribution='truncnorm')
msg = 'For the truncated normal distribution'
with self.assertRaisesRegex(ValueError, msg):
plugin._rescale_shape_parameters(self.location_parameter, self.scale_parameter)
|
def harmonic_mean(x):
'\n The `harmonic mean`_ is a kind of average that is calculated as\n the reciprocal_ of the arithmetic mean of the reciprocals.\n It is appropriate when calculating averages of rates_.\n\n .. _`harmonic mean`: https://en.wikipedia.org/wiki/Harmonic_mean\n .. _reciprocal: https://en.wikipedia.org/wiki/Multiplicative_inverse\n .. _rates: https://en.wikipedia.org/wiki/Rate_(mathematics)\n\n Equation:\n .. math::\n H = \\frac{n}{\\frac{1}{x_1}+\\frac{1}{x_2}+\\ldots+\\frac{1}{x_n}} =\n \\frac{n}{\\sum\\limits_{i=1}^n \\frac{1}{x_i}}\n\n Args:\n x: A list or tuple of numerical objects.\n\n Returns:\n A numerical object.\n\n Raises:\n TypeError: If the user passes something other than list or tuple.\n\n Examples:\n >>> harmonic_mean([1, 2, 4])\n 1.7142857142857142\n >>> harmonic_mean(7)\n Traceback (most recent call last):\n ...\n TypeError: harmonic_mean() expects a list or a tuple.\n '
if (type(x) not in [list, tuple]):
raise TypeError('harmonic_mean() expects a list or a tuple.')
reciprocals = [(1 / float(num)) for num in x]
return (1 / mean(reciprocals))
| 591,122,178,774,666,200
|
The `harmonic mean`_ is a kind of average that is calculated as
the reciprocal_ of the arithmetic mean of the reciprocals.
It is appropriate when calculating averages of rates_.
.. _`harmonic mean`: https://en.wikipedia.org/wiki/Harmonic_mean
.. _reciprocal: https://en.wikipedia.org/wiki/Multiplicative_inverse
.. _rates: https://en.wikipedia.org/wiki/Rate_(mathematics)
Equation:
.. math::
H = \frac{n}{\frac{1}{x_1}+\frac{1}{x_2}+\ldots+\frac{1}{x_n}} =
\frac{n}{\sum\limits_{i=1}^n \frac{1}{x_i}}
Args:
x: A list or tuple of numerical objects.
Returns:
A numerical object.
Raises:
TypeError: If the user passes something other than list or tuple.
Examples:
>>> harmonic_mean([1, 2, 4])
1.7142857142857142
>>> harmonic_mean(7)
Traceback (most recent call last):
...
TypeError: harmonic_mean() expects a list or a tuple.
|
simplestatistics/statistics/harmonic_mean.py
|
harmonic_mean
|
sheriferson/simple-statistics-py
|
python
|
def harmonic_mean(x):
'\n The `harmonic mean`_ is a kind of average that is calculated as\n the reciprocal_ of the arithmetic mean of the reciprocals.\n It is appropriate when calculating averages of rates_.\n\n .. _`harmonic mean`: https://en.wikipedia.org/wiki/Harmonic_mean\n .. _reciprocal: https://en.wikipedia.org/wiki/Multiplicative_inverse\n .. _rates: https://en.wikipedia.org/wiki/Rate_(mathematics)\n\n Equation:\n .. math::\n H = \\frac{n}{\\frac{1}{x_1}+\\frac{1}{x_2}+\\ldots+\\frac{1}{x_n}} =\n \\frac{n}{\\sum\\limits_{i=1}^n \\frac{1}{x_i}}\n\n Args:\n x: A list or tuple of numerical objects.\n\n Returns:\n A numerical object.\n\n Raises:\n TypeError: If the user passes something other than list or tuple.\n\n Examples:\n >>> harmonic_mean([1, 2, 4])\n 1.7142857142857142\n >>> harmonic_mean(7)\n Traceback (most recent call last):\n ...\n TypeError: harmonic_mean() expects a list or a tuple.\n '
if (type(x) not in [list, tuple]):
raise TypeError('harmonic_mean() expects a list or a tuple.')
reciprocals = [(1 / float(num)) for num in x]
return (1 / mean(reciprocals))
|
@pytest.mark.regions(['ap-southeast-1'])
@pytest.mark.instances(['c5.xlarge'])
@pytest.mark.oss(['alinux2'])
@pytest.mark.schedulers(['slurm', 'awsbatch'])
@pytest.mark.usefixtures('region', 'instance')
def test_tag_propagation(pcluster_config_reader, clusters_factory, scheduler, os):
"\n Verify tags from various sources are propagated to the expected resources.\n\n The following resources are checked for tags:\n - main CFN stack\n - head node\n - head node's root EBS volume\n - compute node (traditional schedulers)\n - compute node's root EBS volume (traditional schedulers)\n - shared EBS volume\n "
config_file_tags = {'ConfigFileTag': 'ConfigFileTagValue'}
version_tags = {'parallelcluster:version': get_pcluster_version()}
cluster_config = pcluster_config_reader()
cluster = clusters_factory(cluster_config)
cluster_name_tags = {'parallelcluster:cluster-name': cluster.name}
test_cases = [{'resource': 'Main CloudFormation Stack', 'tag_getter': get_main_stack_tags, 'expected_tags': (version_tags, config_file_tags)}, {'resource': 'Head Node', 'tag_getter': get_head_node_tags, 'expected_tags': (cluster_name_tags, {'Name': 'HeadNode', 'parallelcluster:node-type': 'HeadNode'})}, {'resource': 'Head Node Root Volume', 'tag_getter': get_head_node_root_volume_tags, 'expected_tags': (cluster_name_tags, {'parallelcluster:node-type': 'HeadNode'}), 'tag_getter_kwargs': {'cluster': cluster, 'os': os}}, {'resource': 'Compute Node', 'tag_getter': get_compute_node_tags, 'expected_tags': (cluster_name_tags, {'Name': 'Compute', 'parallelcluster:node-type': 'Compute'}, config_file_tags), 'skip': (scheduler == 'awsbatch')}, {'resource': 'Compute Node Root Volume', 'tag_getter': get_compute_node_root_volume_tags, 'expected_tags': (cluster_name_tags, {'parallelcluster:node-type': 'Compute'}, (config_file_tags if (scheduler == 'slurm') else {})), 'tag_getter_kwargs': {'cluster': cluster, 'os': os}, 'skip': (scheduler == 'awsbatch')}, {'resource': 'Shared EBS Volume', 'tag_getter': get_shared_volume_tags, 'expected_tags': (version_tags, config_file_tags)}]
for test_case in test_cases:
if test_case.get('skip'):
continue
logging.info('Verifying tags were propagated to %s', test_case.get('resource'))
tag_getter = test_case.get('tag_getter')
tag_getter_args = test_case.get('tag_getter_kwargs', {'cluster': cluster})
observed_tags = tag_getter(**tag_getter_args)
expected_tags = test_case['expected_tags']
assert_that(observed_tags).contains(*convert_tags_dicts_to_tags_list(expected_tags))
| -7,428,828,917,190,505,000
|
Verify tags from various sources are propagated to the expected resources.
The following resources are checked for tags:
- main CFN stack
- head node
- head node's root EBS volume
- compute node (traditional schedulers)
- compute node's root EBS volume (traditional schedulers)
- shared EBS volume
|
tests/integration-tests/tests/tags/test_tag_propagation.py
|
test_tag_propagation
|
eshpc/aws-parallelcluster
|
python
|
@pytest.mark.regions(['ap-southeast-1'])
@pytest.mark.instances(['c5.xlarge'])
@pytest.mark.oss(['alinux2'])
@pytest.mark.schedulers(['slurm', 'awsbatch'])
@pytest.mark.usefixtures('region', 'instance')
def test_tag_propagation(pcluster_config_reader, clusters_factory, scheduler, os):
"\n Verify tags from various sources are propagated to the expected resources.\n\n The following resources are checked for tags:\n - main CFN stack\n - head node\n - head node's root EBS volume\n - compute node (traditional schedulers)\n - compute node's root EBS volume (traditional schedulers)\n - shared EBS volume\n "
config_file_tags = {'ConfigFileTag': 'ConfigFileTagValue'}
version_tags = {'parallelcluster:version': get_pcluster_version()}
cluster_config = pcluster_config_reader()
cluster = clusters_factory(cluster_config)
cluster_name_tags = {'parallelcluster:cluster-name': cluster.name}
test_cases = [{'resource': 'Main CloudFormation Stack', 'tag_getter': get_main_stack_tags, 'expected_tags': (version_tags, config_file_tags)}, {'resource': 'Head Node', 'tag_getter': get_head_node_tags, 'expected_tags': (cluster_name_tags, {'Name': 'HeadNode', 'parallelcluster:node-type': 'HeadNode'})}, {'resource': 'Head Node Root Volume', 'tag_getter': get_head_node_root_volume_tags, 'expected_tags': (cluster_name_tags, {'parallelcluster:node-type': 'HeadNode'}), 'tag_getter_kwargs': {'cluster': cluster, 'os': os}}, {'resource': 'Compute Node', 'tag_getter': get_compute_node_tags, 'expected_tags': (cluster_name_tags, {'Name': 'Compute', 'parallelcluster:node-type': 'Compute'}, config_file_tags), 'skip': (scheduler == 'awsbatch')}, {'resource': 'Compute Node Root Volume', 'tag_getter': get_compute_node_root_volume_tags, 'expected_tags': (cluster_name_tags, {'parallelcluster:node-type': 'Compute'}, (config_file_tags if (scheduler == 'slurm') else {})), 'tag_getter_kwargs': {'cluster': cluster, 'os': os}, 'skip': (scheduler == 'awsbatch')}, {'resource': 'Shared EBS Volume', 'tag_getter': get_shared_volume_tags, 'expected_tags': (version_tags, config_file_tags)}]
for test_case in test_cases:
if test_case.get('skip'):
continue
logging.info('Verifying tags were propagated to %s', test_case.get('resource'))
tag_getter = test_case.get('tag_getter')
tag_getter_args = test_case.get('tag_getter_kwargs', {'cluster': cluster})
observed_tags = tag_getter(**tag_getter_args)
expected_tags = test_case['expected_tags']
assert_that(observed_tags).contains(*convert_tags_dicts_to_tags_list(expected_tags))
|
def convert_tags_dicts_to_tags_list(tags_dicts):
'Convert dicts of the form {key: value} to a list like [{"Key": key, "Value": value}].'
tags_list = []
for tags_dict in tags_dicts:
tags_list.extend([{'Key': key, 'Value': value} for (key, value) in tags_dict.items()])
return tags_list
| -4,554,017,946,200,980,000
|
Convert dicts of the form {key: value} to a list like [{"Key": key, "Value": value}].
|
tests/integration-tests/tests/tags/test_tag_propagation.py
|
convert_tags_dicts_to_tags_list
|
eshpc/aws-parallelcluster
|
python
|
def convert_tags_dicts_to_tags_list(tags_dicts):
tags_list = []
for tags_dict in tags_dicts:
tags_list.extend([{'Key': key, 'Value': value} for (key, value) in tags_dict.items()])
return tags_list
|
def get_cloudformation_tags(region, stack_name):
"\n Return the tags for the CFN stack with the given name\n\n The returned values is a list like the following:\n [\n {'Key': 'Key2', 'Value': 'Value2'},\n {'Key': 'Key1', 'Value': 'Value1'},\n ]\n "
cfn_client = boto3.client('cloudformation', region_name=region)
response = cfn_client.describe_stacks(StackName=stack_name)
return response['Stacks'][0]['Tags']
| -6,683,868,679,622,842,000
|
Return the tags for the CFN stack with the given name
The returned values is a list like the following:
[
{'Key': 'Key2', 'Value': 'Value2'},
{'Key': 'Key1', 'Value': 'Value1'},
]
|
tests/integration-tests/tests/tags/test_tag_propagation.py
|
get_cloudformation_tags
|
eshpc/aws-parallelcluster
|
python
|
def get_cloudformation_tags(region, stack_name):
"\n Return the tags for the CFN stack with the given name\n\n The returned values is a list like the following:\n [\n {'Key': 'Key2', 'Value': 'Value2'},\n {'Key': 'Key1', 'Value': 'Value1'},\n ]\n "
cfn_client = boto3.client('cloudformation', region_name=region)
response = cfn_client.describe_stacks(StackName=stack_name)
return response['Stacks'][0]['Tags']
|
def get_main_stack_tags(cluster):
"Return the tags for the cluster's main CFN stack."
return get_cloudformation_tags(cluster.region, cluster.cfn_name)
| -7,796,513,982,243,440,000
|
Return the tags for the cluster's main CFN stack.
|
tests/integration-tests/tests/tags/test_tag_propagation.py
|
get_main_stack_tags
|
eshpc/aws-parallelcluster
|
python
|
def get_main_stack_tags(cluster):
return get_cloudformation_tags(cluster.region, cluster.cfn_name)
|
def get_head_node_instance_id(cluster):
"Return the given cluster's head node's instance ID."
return cluster.cfn_resources.get('HeadNode')
| -6,527,855,288,032,906,000
|
Return the given cluster's head node's instance ID.
|
tests/integration-tests/tests/tags/test_tag_propagation.py
|
get_head_node_instance_id
|
eshpc/aws-parallelcluster
|
python
|
def get_head_node_instance_id(cluster):
return cluster.cfn_resources.get('HeadNode')
|
def get_ec2_instance_tags(instance_id, region):
'Return a list of tags associated with the given EC2 instance.'
logging.info('Getting tags for instance %s', instance_id)
return boto3.client('ec2', region_name=region).describe_instances(InstanceIds=[instance_id]).get('Reservations')[0].get('Instances')[0].get('Tags')
| 9,049,807,806,296,432,000
|
Return a list of tags associated with the given EC2 instance.
|
tests/integration-tests/tests/tags/test_tag_propagation.py
|
get_ec2_instance_tags
|
eshpc/aws-parallelcluster
|
python
|
def get_ec2_instance_tags(instance_id, region):
logging.info('Getting tags for instance %s', instance_id)
return boto3.client('ec2', region_name=region).describe_instances(InstanceIds=[instance_id]).get('Reservations')[0].get('Instances')[0].get('Tags')
|
def get_tags_for_volume(volume_id, region):
'Return the tags attached to the given EBS volume.'
logging.info('Getting tags for volume %s', volume_id)
return boto3.client('ec2', region_name=region).describe_volumes(VolumeIds=[volume_id]).get('Volumes')[0].get('Tags')
| -1,241,565,648,266,099,700
|
Return the tags attached to the given EBS volume.
|
tests/integration-tests/tests/tags/test_tag_propagation.py
|
get_tags_for_volume
|
eshpc/aws-parallelcluster
|
python
|
def get_tags_for_volume(volume_id, region):
logging.info('Getting tags for volume %s', volume_id)
return boto3.client('ec2', region_name=region).describe_volumes(VolumeIds=[volume_id]).get('Volumes')[0].get('Tags')
|
def get_head_node_root_volume_tags(cluster, os):
"Return the given cluster's head node's root volume's tags."
head_node_instance_id = get_head_node_instance_id(cluster)
root_volume_id = get_root_volume_id(head_node_instance_id, cluster.region, os)
return get_tags_for_volume(root_volume_id, cluster.region)
| 1,240,287,457,644,547,000
|
Return the given cluster's head node's root volume's tags.
|
tests/integration-tests/tests/tags/test_tag_propagation.py
|
get_head_node_root_volume_tags
|
eshpc/aws-parallelcluster
|
python
|
def get_head_node_root_volume_tags(cluster, os):
head_node_instance_id = get_head_node_instance_id(cluster)
root_volume_id = get_root_volume_id(head_node_instance_id, cluster.region, os)
return get_tags_for_volume(root_volume_id, cluster.region)
|
def get_head_node_tags(cluster):
"Return the given cluster's head node's tags."
head_node_instance_id = get_head_node_instance_id(cluster)
return get_ec2_instance_tags(head_node_instance_id, cluster.region)
| -2,295,178,007,714,998,800
|
Return the given cluster's head node's tags.
|
tests/integration-tests/tests/tags/test_tag_propagation.py
|
get_head_node_tags
|
eshpc/aws-parallelcluster
|
python
|
def get_head_node_tags(cluster):
head_node_instance_id = get_head_node_instance_id(cluster)
return get_ec2_instance_tags(head_node_instance_id, cluster.region)
|
def get_compute_node_root_volume_tags(cluster, os):
"Return the given cluster's compute node's root volume's tags."
compute_nodes = cluster.get_cluster_instance_ids(node_type='Compute')
assert_that(compute_nodes).is_length(1)
root_volume_id = get_root_volume_id(compute_nodes[0], cluster.region, os)
return get_tags_for_volume(root_volume_id, cluster.region)
| -3,110,508,624,131,773,400
|
Return the given cluster's compute node's root volume's tags.
|
tests/integration-tests/tests/tags/test_tag_propagation.py
|
get_compute_node_root_volume_tags
|
eshpc/aws-parallelcluster
|
python
|
def get_compute_node_root_volume_tags(cluster, os):
compute_nodes = cluster.get_cluster_instance_ids(node_type='Compute')
assert_that(compute_nodes).is_length(1)
root_volume_id = get_root_volume_id(compute_nodes[0], cluster.region, os)
return get_tags_for_volume(root_volume_id, cluster.region)
|
def get_compute_node_tags(cluster):
"Return the given cluster's compute node's tags."
compute_nodes = cluster.get_cluster_instance_ids(node_type='Compute')
assert_that(compute_nodes).is_length(1)
return get_ec2_instance_tags(compute_nodes[0], cluster.region)
| -1,093,552,564,996,228,600
|
Return the given cluster's compute node's tags.
|
tests/integration-tests/tests/tags/test_tag_propagation.py
|
get_compute_node_tags
|
eshpc/aws-parallelcluster
|
python
|
def get_compute_node_tags(cluster):
compute_nodes = cluster.get_cluster_instance_ids(node_type='Compute')
assert_that(compute_nodes).is_length(1)
return get_ec2_instance_tags(compute_nodes[0], cluster.region)
|
def get_ebs_volume_tags(volume_id, region):
'Return the tags associated with the given EBS volume.'
return boto3.client('ec2', region_name=region).describe_volumes(VolumeIds=[volume_id]).get('Volumes')[0].get('Tags')
| -2,903,476,295,029,446,000
|
Return the tags associated with the given EBS volume.
|
tests/integration-tests/tests/tags/test_tag_propagation.py
|
get_ebs_volume_tags
|
eshpc/aws-parallelcluster
|
python
|
def get_ebs_volume_tags(volume_id, region):
return boto3.client('ec2', region_name=region).describe_volumes(VolumeIds=[volume_id]).get('Volumes')[0].get('Tags')
|
def get_shared_volume_tags(cluster):
"Return the given cluster's EBS volume's tags."
shared_volume = cluster.cfn_resources.get('EBS0')
return get_ebs_volume_tags(shared_volume, cluster.region)
| -29,601,883,307,549,850
|
Return the given cluster's EBS volume's tags.
|
tests/integration-tests/tests/tags/test_tag_propagation.py
|
get_shared_volume_tags
|
eshpc/aws-parallelcluster
|
python
|
def get_shared_volume_tags(cluster):
shared_volume = cluster.cfn_resources.get('EBS0')
return get_ebs_volume_tags(shared_volume, cluster.region)
|
def get_pcluster_version():
'Return the installed version of the pclsuter CLI.'
return json.loads(sp.check_output('pcluster version'.split()).decode().strip()).get('version')
| -6,709,317,349,835,332,000
|
Return the installed version of the pclsuter CLI.
|
tests/integration-tests/tests/tags/test_tag_propagation.py
|
get_pcluster_version
|
eshpc/aws-parallelcluster
|
python
|
def get_pcluster_version():
return json.loads(sp.check_output('pcluster version'.split()).decode().strip()).get('version')
|
def make_deterministic(seed=0):
"Make results deterministic. If seed == -1, do not make deterministic.\n Running your script in a deterministic way might slow it down.\n Note that for some packages (eg: sklearn's PCA) this function is not enough.\n "
seed = int(seed)
if (seed == (- 1)):
return
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
| 2,571,610,496,660,509,700
|
Make results deterministic. If seed == -1, do not make deterministic.
Running your script in a deterministic way might slow it down.
Note that for some packages (eg: sklearn's PCA) this function is not enough.
|
commons.py
|
make_deterministic
|
gmberton/CosPlace
|
python
|
def make_deterministic(seed=0):
"Make results deterministic. If seed == -1, do not make deterministic.\n Running your script in a deterministic way might slow it down.\n Note that for some packages (eg: sklearn's PCA) this function is not enough.\n "
seed = int(seed)
if (seed == (- 1)):
return
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
|
def setup_logging(output_folder, exist_ok=False, console='debug', info_filename='info.log', debug_filename='debug.log'):
'Set up logging files and console output.\n Creates one file for INFO logs and one for DEBUG logs.\n Args:\n output_folder (str): creates the folder where to save the files.\n exist_ok (boolean): if False throw a FileExistsError if output_folder already exists\n debug (str):\n if == "debug" prints on console debug messages and higher\n if == "info" prints on console info messages and higher\n if == None does not use console (useful when a logger has already been set)\n info_filename (str): the name of the info file. if None, don\'t create info file\n debug_filename (str): the name of the debug file. if None, don\'t create debug file\n '
import os
import sys
import logging
import traceback
if ((not exist_ok) and os.path.exists(output_folder)):
raise FileExistsError(f'{output_folder} already exists!')
os.makedirs(output_folder, exist_ok=True)
base_formatter = logging.Formatter('%(asctime)s %(message)s', '%Y-%m-%d %H:%M:%S')
logger = logging.getLogger('')
logger.setLevel(logging.DEBUG)
if (info_filename != None):
info_file_handler = logging.FileHandler(f'{output_folder}/{info_filename}')
info_file_handler.setLevel(logging.INFO)
info_file_handler.setFormatter(base_formatter)
logger.addHandler(info_file_handler)
if (debug_filename != None):
debug_file_handler = logging.FileHandler(f'{output_folder}/{debug_filename}')
debug_file_handler.setLevel(logging.DEBUG)
debug_file_handler.setFormatter(base_formatter)
logger.addHandler(debug_file_handler)
if (console != None):
console_handler = logging.StreamHandler()
if (console == 'debug'):
console_handler.setLevel(logging.DEBUG)
if (console == 'info'):
console_handler.setLevel(logging.INFO)
console_handler.setFormatter(base_formatter)
logger.addHandler(console_handler)
def my_handler(type_, value, tb):
logger.info(('\n' + ''.join(traceback.format_exception(type, value, tb))))
logging.info('Experiment finished (with some errors)')
sys.excepthook = my_handler
| 3,354,185,008,153,865,000
|
Set up logging files and console output.
Creates one file for INFO logs and one for DEBUG logs.
Args:
output_folder (str): creates the folder where to save the files.
exist_ok (boolean): if False throw a FileExistsError if output_folder already exists
debug (str):
if == "debug" prints on console debug messages and higher
if == "info" prints on console info messages and higher
if == None does not use console (useful when a logger has already been set)
info_filename (str): the name of the info file. if None, don't create info file
debug_filename (str): the name of the debug file. if None, don't create debug file
|
commons.py
|
setup_logging
|
gmberton/CosPlace
|
python
|
def setup_logging(output_folder, exist_ok=False, console='debug', info_filename='info.log', debug_filename='debug.log'):
'Set up logging files and console output.\n Creates one file for INFO logs and one for DEBUG logs.\n Args:\n output_folder (str): creates the folder where to save the files.\n exist_ok (boolean): if False throw a FileExistsError if output_folder already exists\n debug (str):\n if == "debug" prints on console debug messages and higher\n if == "info" prints on console info messages and higher\n if == None does not use console (useful when a logger has already been set)\n info_filename (str): the name of the info file. if None, don\'t create info file\n debug_filename (str): the name of the debug file. if None, don\'t create debug file\n '
import os
import sys
import logging
import traceback
if ((not exist_ok) and os.path.exists(output_folder)):
raise FileExistsError(f'{output_folder} already exists!')
os.makedirs(output_folder, exist_ok=True)
base_formatter = logging.Formatter('%(asctime)s %(message)s', '%Y-%m-%d %H:%M:%S')
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
if (info_filename != None):
info_file_handler = logging.FileHandler(f'{output_folder}/{info_filename}')
info_file_handler.setLevel(logging.INFO)
info_file_handler.setFormatter(base_formatter)
logger.addHandler(info_file_handler)
if (debug_filename != None):
debug_file_handler = logging.FileHandler(f'{output_folder}/{debug_filename}')
debug_file_handler.setLevel(logging.DEBUG)
debug_file_handler.setFormatter(base_formatter)
logger.addHandler(debug_file_handler)
if (console != None):
console_handler = logging.StreamHandler()
if (console == 'debug'):
console_handler.setLevel(logging.DEBUG)
if (console == 'info'):
console_handler.setLevel(logging.INFO)
console_handler.setFormatter(base_formatter)
logger.addHandler(console_handler)
def my_handler(type_, value, tb):
logger.info(('\n' + .join(traceback.format_exception(type, value, tb))))
logging.info('Experiment finished (with some errors)')
sys.excepthook = my_handler
|
def donchian(high, low, lower_length=None, upper_length=None, offset=None, **kwargs):
'Indicator: Donchian Channels (DC)'
high = verify_series(high)
low = verify_series(low)
lower_length = (int(lower_length) if (lower_length and (lower_length > 0)) else 20)
upper_length = (int(upper_length) if (upper_length and (upper_length > 0)) else 20)
lower_min_periods = (int(kwargs['lower_min_periods']) if (('lower_min_periods' in kwargs) and (kwargs['lower_min_periods'] is not None)) else lower_length)
upper_min_periods = (int(kwargs['upper_min_periods']) if (('upper_min_periods' in kwargs) and (kwargs['upper_min_periods'] is not None)) else upper_length)
offset = get_offset(offset)
lower = low.rolling(lower_length, min_periods=lower_min_periods).min()
upper = high.rolling(upper_length, min_periods=upper_min_periods).max()
mid = (0.5 * (lower + upper))
if ('fillna' in kwargs):
lower.fillna(kwargs['fillna'], inplace=True)
mid.fillna(kwargs['fillna'], inplace=True)
upper.fillna(kwargs['fillna'], inplace=True)
if ('fill_method' in kwargs):
lower.fillna(method=kwargs['fill_method'], inplace=True)
mid.fillna(method=kwargs['fill_method'], inplace=True)
upper.fillna(method=kwargs['fill_method'], inplace=True)
if (offset != 0):
lower = lower.shift(offset)
mid = mid.shift(offset)
upper = upper.shift(offset)
lower.name = f'DCL_{lower_length}_{upper_length}'
mid.name = f'DCM_{lower_length}_{upper_length}'
upper.name = f'DCU_{lower_length}_{upper_length}'
mid.category = upper.category = lower.category = 'volatility'
data = {lower.name: lower, mid.name: mid, upper.name: upper}
dcdf = DataFrame(data)
dcdf.name = f'DC_{lower_length}_{upper_length}'
dcdf.category = mid.category
return dcdf
| -6,520,702,824,064,578,000
|
Indicator: Donchian Channels (DC)
|
pandas_ta/volatility/donchian.py
|
donchian
|
MyBourse/pandas-ta
|
python
|
def donchian(high, low, lower_length=None, upper_length=None, offset=None, **kwargs):
high = verify_series(high)
low = verify_series(low)
lower_length = (int(lower_length) if (lower_length and (lower_length > 0)) else 20)
upper_length = (int(upper_length) if (upper_length and (upper_length > 0)) else 20)
lower_min_periods = (int(kwargs['lower_min_periods']) if (('lower_min_periods' in kwargs) and (kwargs['lower_min_periods'] is not None)) else lower_length)
upper_min_periods = (int(kwargs['upper_min_periods']) if (('upper_min_periods' in kwargs) and (kwargs['upper_min_periods'] is not None)) else upper_length)
offset = get_offset(offset)
lower = low.rolling(lower_length, min_periods=lower_min_periods).min()
upper = high.rolling(upper_length, min_periods=upper_min_periods).max()
mid = (0.5 * (lower + upper))
if ('fillna' in kwargs):
lower.fillna(kwargs['fillna'], inplace=True)
mid.fillna(kwargs['fillna'], inplace=True)
upper.fillna(kwargs['fillna'], inplace=True)
if ('fill_method' in kwargs):
lower.fillna(method=kwargs['fill_method'], inplace=True)
mid.fillna(method=kwargs['fill_method'], inplace=True)
upper.fillna(method=kwargs['fill_method'], inplace=True)
if (offset != 0):
lower = lower.shift(offset)
mid = mid.shift(offset)
upper = upper.shift(offset)
lower.name = f'DCL_{lower_length}_{upper_length}'
mid.name = f'DCM_{lower_length}_{upper_length}'
upper.name = f'DCU_{lower_length}_{upper_length}'
mid.category = upper.category = lower.category = 'volatility'
data = {lower.name: lower, mid.name: mid, upper.name: upper}
dcdf = DataFrame(data)
dcdf.name = f'DC_{lower_length}_{upper_length}'
dcdf.category = mid.category
return dcdf
|
def as_create_table(self, table_name, overwrite=False):
'Reformats the query into the create table as query.\n\n Works only for the single select SQL statements, in all other cases\n the sql query is not modified.\n :param superset_query: string, sql query that will be executed\n :param table_name: string, will contain the results of the\n query execution\n :param overwrite, boolean, table table_name will be dropped if true\n :return: string, create table as query\n '
exec_sql = ''
sql = self.stripped()
if overwrite:
exec_sql = 'DROP TABLE IF EXISTS {table_name};\n'
exec_sql += 'CREATE TABLE {table_name} AS \n{sql}'
return exec_sql.format(**locals())
| 5,869,634,862,788,180,000
|
Reformats the query into the create table as query.
Works only for the single select SQL statements, in all other cases
the sql query is not modified.
:param superset_query: string, sql query that will be executed
:param table_name: string, will contain the results of the
query execution
:param overwrite, boolean, table table_name will be dropped if true
:return: string, create table as query
|
superset/sql_parse.py
|
as_create_table
|
AmberCa/incubator-superset
|
python
|
def as_create_table(self, table_name, overwrite=False):
'Reformats the query into the create table as query.\n\n Works only for the single select SQL statements, in all other cases\n the sql query is not modified.\n :param superset_query: string, sql query that will be executed\n :param table_name: string, will contain the results of the\n query execution\n :param overwrite, boolean, table table_name will be dropped if true\n :return: string, create table as query\n '
exec_sql =
sql = self.stripped()
if overwrite:
exec_sql = 'DROP TABLE IF EXISTS {table_name};\n'
exec_sql += 'CREATE TABLE {table_name} AS \n{sql}'
return exec_sql.format(**locals())
|
def display(request):
'Function view to display form in the standard manner.'
if (request.method == 'POST'):
form = FiboForm(request.POST)
if form.is_valid():
fibo = form.save(commit=False)
evensum = fibo.evenFiboSum()
fibo.save()
return render(request, 'problem2/solution2.html', {'evensum': evensum, 'form': form})
else:
form = FiboForm()
return render(request, 'problem2/solution2.html', {'form': form})
| 4,477,814,417,209,123,300
|
Function view to display form in the standard manner.
|
problem2/views.py
|
display
|
byteknacker/eulerapps
|
python
|
def display(request):
if (request.method == 'POST'):
form = FiboForm(request.POST)
if form.is_valid():
fibo = form.save(commit=False)
evensum = fibo.evenFiboSum()
fibo.save()
return render(request, 'problem2/solution2.html', {'evensum': evensum, 'form': form})
else:
form = FiboForm()
return render(request, 'problem2/solution2.html', {'form': form})
|
@staticmethod
def _get_series(i=0):
'\n\n :return:\n '
config = configparser.ConfigParser()
config.read('config.ini')
fourier_folder = config['Folder']['Output']
first_file = os.path.join(fourier_folder, os.listdir(fourier_folder)[i])
with open(first_file, 'r') as b:
j = json.load(b)
name = list(j.keys())[0]
song = j[name]
return (song, name)
| 8,986,104,419,332,724,000
|
:return:
|
test/test_b_plot.py
|
_get_series
|
cperales/Fourier-Clustering-song
|
python
|
@staticmethod
def _get_series(i=0):
'\n\n \n '
config = configparser.ConfigParser()
config.read('config.ini')
fourier_folder = config['Folder']['Output']
first_file = os.path.join(fourier_folder, os.listdir(fourier_folder)[i])
with open(first_file, 'r') as b:
j = json.load(b)
name = list(j.keys())[0]
song = j[name]
return (song, name)
|
@staticmethod
def _get_song(i=0):
'\n\n :return:\n '
config = configparser.ConfigParser()
config.read('config.ini')
song_folder = config['Folder']['Temp']
first_song = os.listdir(song_folder)[i]
(rate, aud_data) = read(os.path.join(song_folder, first_song))
if (len(aud_data) != len(aud_data.ravel())):
aud_data = np.mean(aud_data, axis=1)
return (aud_data, first_song)
| -8,418,300,708,451,570,000
|
:return:
|
test/test_b_plot.py
|
_get_song
|
cperales/Fourier-Clustering-song
|
python
|
@staticmethod
def _get_song(i=0):
'\n\n \n '
config = configparser.ConfigParser()
config.read('config.ini')
song_folder = config['Folder']['Temp']
first_song = os.listdir(song_folder)[i]
(rate, aud_data) = read(os.path.join(song_folder, first_song))
if (len(aud_data) != len(aud_data.ravel())):
aud_data = np.mean(aud_data, axis=1)
return (aud_data, first_song)
|
def test_diff(self):
'\n\n :return:\n '
config = configparser.ConfigParser()
config.read('config.ini')
image_folder = config['Folder']['Image']
(song_1, name_1) = self._get_series(i=0)
(song_2, name_2) = self._get_series(i=1)
diff_plot(song_1, song_2, filename=(name_1.split()[2].split('.')[0] + name_2.split()[2].split('.')[0]), folder=image_folder)
| 1,652,916,689,913,852,700
|
:return:
|
test/test_b_plot.py
|
test_diff
|
cperales/Fourier-Clustering-song
|
python
|
def test_diff(self):
'\n\n \n '
config = configparser.ConfigParser()
config.read('config.ini')
image_folder = config['Folder']['Image']
(song_1, name_1) = self._get_series(i=0)
(song_2, name_2) = self._get_series(i=1)
diff_plot(song_1, song_2, filename=(name_1.split()[2].split('.')[0] + name_2.split()[2].split('.')[0]), folder=image_folder)
|
def test_song(self):
'\n\n :return:\n '
config = configparser.ConfigParser()
config.read('config.ini')
image_folder = config['Folder']['Image']
(aud_data, name) = self._get_song()
song_plot(aud_data, filename=name.split('.')[0], folder=image_folder)
| -8,779,366,337,030,944,000
|
:return:
|
test/test_b_plot.py
|
test_song
|
cperales/Fourier-Clustering-song
|
python
|
def test_song(self):
'\n\n \n '
config = configparser.ConfigParser()
config.read('config.ini')
image_folder = config['Folder']['Image']
(aud_data, name) = self._get_song()
song_plot(aud_data, filename=name.split('.')[0], folder=image_folder)
|
@staticmethod
def get_supported_channels() -> list:
'List of supported channels.'
return list(ChannelMap.channel_map.keys())
| 313,114,182,041,640,400
|
List of supported channels.
|
scripts/channel_map.py
|
get_supported_channels
|
artelk/performance
|
python
|
@staticmethod
def get_supported_channels() -> list:
return list(ChannelMap.channel_map.keys())
|
@staticmethod
def get_supported_frameworks() -> list:
'List of supported frameworks'
frameworks = [ChannelMap.channel_map[channel]['tfm'] for channel in ChannelMap.channel_map]
return set(frameworks)
| 4,910,586,788,561,729,000
|
List of supported frameworks
|
scripts/channel_map.py
|
get_supported_frameworks
|
artelk/performance
|
python
|
@staticmethod
def get_supported_frameworks() -> list:
frameworks = [ChannelMap.channel_map[channel]['tfm'] for channel in ChannelMap.channel_map]
return set(frameworks)
|
@staticmethod
def get_target_framework_monikers(channels: list) -> list:
'\n Translates channel names to Target Framework Monikers (TFMs).\n '
monikers = [ChannelMap.get_target_framework_moniker(channel) for channel in channels]
return list(set(monikers))
| -8,264,586,632,849,845,000
|
Translates channel names to Target Framework Monikers (TFMs).
|
scripts/channel_map.py
|
get_target_framework_monikers
|
artelk/performance
|
python
|
@staticmethod
def get_target_framework_monikers(channels: list) -> list:
'\n \n '
monikers = [ChannelMap.get_target_framework_moniker(channel) for channel in channels]
return list(set(monikers))
|
@staticmethod
def get_target_framework_moniker(channel: str) -> str:
'\n Translate channel name to Target Framework Moniker (TFM)\n '
if (channel in ChannelMap.channel_map):
return ChannelMap.channel_map[channel]['tfm']
else:
raise Exception(('Channel %s is not supported. Supported channels %s' % (channel, ChannelMap.get_supported_channels())))
| 9,109,701,814,379,510,000
|
Translate channel name to Target Framework Moniker (TFM)
|
scripts/channel_map.py
|
get_target_framework_moniker
|
artelk/performance
|
python
|
@staticmethod
def get_target_framework_moniker(channel: str) -> str:
'\n \n '
if (channel in ChannelMap.channel_map):
return ChannelMap.channel_map[channel]['tfm']
else:
raise Exception(('Channel %s is not supported. Supported channels %s' % (channel, ChannelMap.get_supported_channels())))
|
@staticmethod
def get_channel_from_target_framework_moniker(target_framework_moniker: str) -> str:
'Translate Target Framework Moniker (TFM) to channel name'
for channel in ChannelMap.channel_map:
if (ChannelMap.channel_map[channel]['tfm'] == target_framework_moniker):
return channel
raise Exception(('Framework %s is not supported. Supported frameworks: %s' % (target_framework_moniker, ChannelMap.get_supported_frameworks())))
| 6,853,412,562,388,000,000
|
Translate Target Framework Moniker (TFM) to channel name
|
scripts/channel_map.py
|
get_channel_from_target_framework_moniker
|
artelk/performance
|
python
|
@staticmethod
def get_channel_from_target_framework_moniker(target_framework_moniker: str) -> str:
for channel in ChannelMap.channel_map:
if (ChannelMap.channel_map[channel]['tfm'] == target_framework_moniker):
return channel
raise Exception(('Framework %s is not supported. Supported frameworks: %s' % (target_framework_moniker, ChannelMap.get_supported_frameworks())))
|
def normalize_imagenet(x):
' Normalize input images according to ImageNet standards.\n Args:\n x (tensor): input images\n '
x = x.clone()
x[:, 0] = ((x[:, 0] - 0.485) / 0.229)
x[:, 1] = ((x[:, 1] - 0.456) / 0.224)
x[:, 2] = ((x[:, 2] - 0.406) / 0.225)
return x
| -5,227,346,449,647,160,000
|
Normalize input images according to ImageNet standards.
Args:
x (tensor): input images
|
examples/ImageRecon/OccNet/architectures.py
|
normalize_imagenet
|
AOE-khkhan/kaolin
|
python
|
def normalize_imagenet(x):
' Normalize input images according to ImageNet standards.\n Args:\n x (tensor): input images\n '
x = x.clone()
x[:, 0] = ((x[:, 0] - 0.485) / 0.229)
x[:, 1] = ((x[:, 1] - 0.456) / 0.224)
x[:, 2] = ((x[:, 2] - 0.406) / 0.225)
return x
|
def get_prior_z(device):
' Returns prior distribution for latent code z.\n Args:\n cfg (dict): imported yaml config\n device (device): pytorch device\n '
z_dim = 0
p0_z = dist.Normal(torch.zeros(z_dim, device=device), torch.ones(z_dim, device=device))
return p0_z
| 8,228,995,010,554,023,000
|
Returns prior distribution for latent code z.
Args:
cfg (dict): imported yaml config
device (device): pytorch device
|
examples/ImageRecon/OccNet/architectures.py
|
get_prior_z
|
AOE-khkhan/kaolin
|
python
|
def get_prior_z(device):
' Returns prior distribution for latent code z.\n Args:\n cfg (dict): imported yaml config\n device (device): pytorch device\n '
z_dim = 0
p0_z = dist.Normal(torch.zeros(z_dim, device=device), torch.ones(z_dim, device=device))
return p0_z
|
def forward(self, p, inputs, sample=True, **kwargs):
' Performs a forward pass through the network.\n Args:\n p (tensor): sampled points\n inputs (tensor): conditioning input\n sample (bool): whether to sample for z\n '
batch_size = p.size(0)
c = self.encode_inputs(inputs)
z = self.get_z_from_prior((batch_size,), sample=sample)
p_r = self.decode(p, z, c, **kwargs)
return p_r
| -8,092,593,553,562,814,000
|
Performs a forward pass through the network.
Args:
p (tensor): sampled points
inputs (tensor): conditioning input
sample (bool): whether to sample for z
|
examples/ImageRecon/OccNet/architectures.py
|
forward
|
AOE-khkhan/kaolin
|
python
|
def forward(self, p, inputs, sample=True, **kwargs):
' Performs a forward pass through the network.\n Args:\n p (tensor): sampled points\n inputs (tensor): conditioning input\n sample (bool): whether to sample for z\n '
batch_size = p.size(0)
c = self.encode_inputs(inputs)
z = self.get_z_from_prior((batch_size,), sample=sample)
p_r = self.decode(p, z, c, **kwargs)
return p_r
|
def compute_elbo(self, p, occ, inputs, **kwargs):
' Computes the expectation lower bound.\n Args:\n p (tensor): sampled points\n occ (tensor): occupancy values for p\n inputs (tensor): conditioning input\n '
c = self.encode_inputs(inputs)
q_z = self.infer_z(p, occ, c, **kwargs)
z = q_z.rsample()
p_r = self.decode(p, z, c, **kwargs)
rec_error = (- p_r.log_prob(occ).sum(dim=(- 1)))
kl = dist.kl_divergence(q_z, self.p0_z).sum(dim=(- 1))
elbo = ((- rec_error) - kl)
return (elbo, rec_error, kl)
| -2,864,902,931,423,070,000
|
Computes the expectation lower bound.
Args:
p (tensor): sampled points
occ (tensor): occupancy values for p
inputs (tensor): conditioning input
|
examples/ImageRecon/OccNet/architectures.py
|
compute_elbo
|
AOE-khkhan/kaolin
|
python
|
def compute_elbo(self, p, occ, inputs, **kwargs):
' Computes the expectation lower bound.\n Args:\n p (tensor): sampled points\n occ (tensor): occupancy values for p\n inputs (tensor): conditioning input\n '
c = self.encode_inputs(inputs)
q_z = self.infer_z(p, occ, c, **kwargs)
z = q_z.rsample()
p_r = self.decode(p, z, c, **kwargs)
rec_error = (- p_r.log_prob(occ).sum(dim=(- 1)))
kl = dist.kl_divergence(q_z, self.p0_z).sum(dim=(- 1))
elbo = ((- rec_error) - kl)
return (elbo, rec_error, kl)
|
def encode_inputs(self, inputs):
' Encodes the input.\n Args:\n input (tensor): the input\n '
c = self.encoder(inputs)
return c
| 5,463,329,561,843,520,000
|
Encodes the input.
Args:
input (tensor): the input
|
examples/ImageRecon/OccNet/architectures.py
|
encode_inputs
|
AOE-khkhan/kaolin
|
python
|
def encode_inputs(self, inputs):
' Encodes the input.\n Args:\n input (tensor): the input\n '
c = self.encoder(inputs)
return c
|
def decode(self, p, z, c, **kwargs):
' Returns occupancy probabilities for the sampled points.\n Args:\n p (tensor): points\n z (tensor): latent code z\n c (tensor): latent conditioned code c\n '
logits = self.decoder(p, z, c, **kwargs)
p_r = dist.Bernoulli(logits=logits)
return p_r
| -400,121,044,428,680,000
|
Returns occupancy probabilities for the sampled points.
Args:
p (tensor): points
z (tensor): latent code z
c (tensor): latent conditioned code c
|
examples/ImageRecon/OccNet/architectures.py
|
decode
|
AOE-khkhan/kaolin
|
python
|
def decode(self, p, z, c, **kwargs):
' Returns occupancy probabilities for the sampled points.\n Args:\n p (tensor): points\n z (tensor): latent code z\n c (tensor): latent conditioned code c\n '
logits = self.decoder(p, z, c, **kwargs)
p_r = dist.Bernoulli(logits=logits)
return p_r
|
def infer_z(self, p, occ, c, **kwargs):
' Infers z.\n Args:\n p (tensor): points tensor\n occ (tensor): occupancy values for occ\n c (tensor): latent conditioned code c\n '
batch_size = p.size(0)
mean_z = torch.empty(batch_size, 0).to(self.device)
logstd_z = torch.empty(batch_size, 0).to(self.device)
q_z = dist.Normal(mean_z, torch.exp(logstd_z))
return q_z
| 6,820,978,492,670,022,000
|
Infers z.
Args:
p (tensor): points tensor
occ (tensor): occupancy values for occ
c (tensor): latent conditioned code c
|
examples/ImageRecon/OccNet/architectures.py
|
infer_z
|
AOE-khkhan/kaolin
|
python
|
def infer_z(self, p, occ, c, **kwargs):
' Infers z.\n Args:\n p (tensor): points tensor\n occ (tensor): occupancy values for occ\n c (tensor): latent conditioned code c\n '
batch_size = p.size(0)
mean_z = torch.empty(batch_size, 0).to(self.device)
logstd_z = torch.empty(batch_size, 0).to(self.device)
q_z = dist.Normal(mean_z, torch.exp(logstd_z))
return q_z
|
def get_z_from_prior(self, size=torch.Size([]), sample=True):
' Returns z from prior distribution.\n Args:\n size (Size): size of z\n sample (bool): whether to sample\n '
if sample:
z = self.p0_z.sample(size).to(self.device)
else:
z = self.p0_z.mean.to(self.device)
z = z.expand(*size, *z.size())
return z
| -7,939,061,773,836,317,000
|
Returns z from prior distribution.
Args:
size (Size): size of z
sample (bool): whether to sample
|
examples/ImageRecon/OccNet/architectures.py
|
get_z_from_prior
|
AOE-khkhan/kaolin
|
python
|
def get_z_from_prior(self, size=torch.Size([]), sample=True):
' Returns z from prior distribution.\n Args:\n size (Size): size of z\n sample (bool): whether to sample\n '
if sample:
z = self.p0_z.sample(size).to(self.device)
else:
z = self.p0_z.mean.to(self.device)
z = z.expand(*size, *z.size())
return z
|
def register(self, model, model_admin=None, **kwargs):
'\n Registers the given model with the given admin class. Once a model is\n registered in self.registry, we also add it to app registries in\n self.apps.\n\n If no model_admin is passed, it will use ModelAdmin2. If keyword\n arguments are given they will be passed to the admin class on\n instantiation.\n\n If a model is already registered, this will raise ImproperlyConfigured.\n '
if (model in self.registry):
raise ImproperlyConfigured(('%s is already registered in django-admin2' % model))
if (not model_admin):
model_admin = types.ModelAdmin2
self.registry[model] = model_admin(model, admin=self, **kwargs)
app_label = utils.model_options(model).app_label
if (app_label in self.apps.keys()):
self.apps[app_label][model] = self.registry[model]
else:
self.apps[app_label] = {model: self.registry[model]}
| 1,695,026,397,503,695,600
|
Registers the given model with the given admin class. Once a model is
registered in self.registry, we also add it to app registries in
self.apps.
If no model_admin is passed, it will use ModelAdmin2. If keyword
arguments are given they will be passed to the admin class on
instantiation.
If a model is already registered, this will raise ImproperlyConfigured.
|
djadmin2/core.py
|
register
|
PowerOlive/django-admin2
|
python
|
def register(self, model, model_admin=None, **kwargs):
'\n Registers the given model with the given admin class. Once a model is\n registered in self.registry, we also add it to app registries in\n self.apps.\n\n If no model_admin is passed, it will use ModelAdmin2. If keyword\n arguments are given they will be passed to the admin class on\n instantiation.\n\n If a model is already registered, this will raise ImproperlyConfigured.\n '
if (model in self.registry):
raise ImproperlyConfigured(('%s is already registered in django-admin2' % model))
if (not model_admin):
model_admin = types.ModelAdmin2
self.registry[model] = model_admin(model, admin=self, **kwargs)
app_label = utils.model_options(model).app_label
if (app_label in self.apps.keys()):
self.apps[app_label][model] = self.registry[model]
else:
self.apps[app_label] = {model: self.registry[model]}
|
def deregister(self, model):
'\n Deregisters the given model. Remove the model from the self.app as well\n\n If the model is not already registered, this will raise\n ImproperlyConfigured.\n '
try:
del self.registry[model]
except KeyError:
raise ImproperlyConfigured(('%s was never registered in django-admin2' % model))
app_label = utils.model_options(model).app_label
del self.apps[app_label][model]
if (self.apps[app_label] is {}):
del self.apps[app_label]
| -226,734,680,756,163,400
|
Deregisters the given model. Remove the model from the self.app as well
If the model is not already registered, this will raise
ImproperlyConfigured.
|
djadmin2/core.py
|
deregister
|
PowerOlive/django-admin2
|
python
|
def deregister(self, model):
'\n Deregisters the given model. Remove the model from the self.app as well\n\n If the model is not already registered, this will raise\n ImproperlyConfigured.\n '
try:
del self.registry[model]
except KeyError:
raise ImproperlyConfigured(('%s was never registered in django-admin2' % model))
app_label = utils.model_options(model).app_label
del self.apps[app_label][model]
if (self.apps[app_label] is {}):
del self.apps[app_label]
|
def register_app_verbose_name(self, app_label, app_verbose_name):
'\n Registers the given app label with the given app verbose name.\n\n If a app_label is already registered, this will raise\n ImproperlyConfigured.\n '
if (app_label in self.app_verbose_names):
raise ImproperlyConfigured(('%s is already registered in django-admin2' % app_label))
self.app_verbose_names[app_label] = app_verbose_name
| 8,412,480,849,148,175,000
|
Registers the given app label with the given app verbose name.
If a app_label is already registered, this will raise
ImproperlyConfigured.
|
djadmin2/core.py
|
register_app_verbose_name
|
PowerOlive/django-admin2
|
python
|
def register_app_verbose_name(self, app_label, app_verbose_name):
'\n Registers the given app label with the given app verbose name.\n\n If a app_label is already registered, this will raise\n ImproperlyConfigured.\n '
if (app_label in self.app_verbose_names):
raise ImproperlyConfigured(('%s is already registered in django-admin2' % app_label))
self.app_verbose_names[app_label] = app_verbose_name
|
def deregister_app_verbose_name(self, app_label):
'\n Deregisters the given app label. Remove the app label from the\n self.app_verbose_names as well.\n\n If the app label is not already registered, this will raise\n ImproperlyConfigured.\n '
try:
del self.app_verbose_names[app_label]
except KeyError:
raise ImproperlyConfigured(('%s app label was never registered in django-admin2' % app_label))
| -2,633,586,113,666,253,300
|
Deregisters the given app label. Remove the app label from the
self.app_verbose_names as well.
If the app label is not already registered, this will raise
ImproperlyConfigured.
|
djadmin2/core.py
|
deregister_app_verbose_name
|
PowerOlive/django-admin2
|
python
|
def deregister_app_verbose_name(self, app_label):
'\n Deregisters the given app label. Remove the app label from the\n self.app_verbose_names as well.\n\n If the app label is not already registered, this will raise\n ImproperlyConfigured.\n '
try:
del self.app_verbose_names[app_label]
except KeyError:
raise ImproperlyConfigured(('%s app label was never registered in django-admin2' % app_label))
|
def autodiscover(self):
'\n Autodiscovers all admin2.py modules for apps in INSTALLED_APPS by\n trying to import them.\n '
for app_name in [x for x in settings.INSTALLED_APPS]:
try:
import_module(('%s.admin2' % app_name))
except ImportError as e:
if (str(e).startswith('No module named') and ('admin2' in str(e))):
continue
raise e
| 4,519,707,043,250,492,400
|
Autodiscovers all admin2.py modules for apps in INSTALLED_APPS by
trying to import them.
|
djadmin2/core.py
|
autodiscover
|
PowerOlive/django-admin2
|
python
|
def autodiscover(self):
'\n Autodiscovers all admin2.py modules for apps in INSTALLED_APPS by\n trying to import them.\n '
for app_name in [x for x in settings.INSTALLED_APPS]:
try:
import_module(('%s.admin2' % app_name))
except ImportError as e:
if (str(e).startswith('No module named') and ('admin2' in str(e))):
continue
raise e
|
def get_admin_by_name(self, name):
'\n Returns the admin instance that was registered with the passed in\n name.\n '
for object_admin in self.registry.values():
if (object_admin.name == name):
return object_admin
raise ValueError(u'No object admin found with name {}'.format(repr(name)))
| 1,111,493,410,733,876,500
|
Returns the admin instance that was registered with the passed in
name.
|
djadmin2/core.py
|
get_admin_by_name
|
PowerOlive/django-admin2
|
python
|
def get_admin_by_name(self, name):
'\n Returns the admin instance that was registered with the passed in\n name.\n '
for object_admin in self.registry.values():
if (object_admin.name == name):
return object_admin
raise ValueError(u'No object admin found with name {}'.format(repr(name)))
|
def save(query: List[str], save_path: str, downloader, m3u_file: Optional[str]=None) -> None:
'\n Save metadata from spotify to the disk.\n\n ### Arguments\n - query: list of strings to search for.\n - save_path: Path to the file to save the metadata to.\n - threads: Number of threads to use.\n\n ### Notes\n - This function is multi-threaded.\n '
songs = parse_query(query, downloader.threads)
save_data = [song.json for song in songs]
with open(save_path, 'w', encoding='utf-8') as save_file:
json.dump(save_data, save_file, indent=4, ensure_ascii=False)
if m3u_file:
create_m3u_file(m3u_file, songs, downloader.output, downloader.output_format, False)
downloader.progress_handler.log(f"Saved {len(save_data)} song{('s' if (len(save_data) > 1) else '')} to {save_path}")
| 1,037,826,605,912,516,600
|
Save metadata from spotify to the disk.
### Arguments
- query: list of strings to search for.
- save_path: Path to the file to save the metadata to.
- threads: Number of threads to use.
### Notes
- This function is multi-threaded.
|
spotdl/console/save.py
|
save
|
phcreery/spotdl-v4
|
python
|
def save(query: List[str], save_path: str, downloader, m3u_file: Optional[str]=None) -> None:
'\n Save metadata from spotify to the disk.\n\n ### Arguments\n - query: list of strings to search for.\n - save_path: Path to the file to save the metadata to.\n - threads: Number of threads to use.\n\n ### Notes\n - This function is multi-threaded.\n '
songs = parse_query(query, downloader.threads)
save_data = [song.json for song in songs]
with open(save_path, 'w', encoding='utf-8') as save_file:
json.dump(save_data, save_file, indent=4, ensure_ascii=False)
if m3u_file:
create_m3u_file(m3u_file, songs, downloader.output, downloader.output_format, False)
downloader.progress_handler.log(f"Saved {len(save_data)} song{('s' if (len(save_data) > 1) else )} to {save_path}")
|
def get_arguments():
'Parse all the arguments provided from the CLI.\n\n Returns:\n A list of parsed arguments.\n '
parser = argparse.ArgumentParser(description='DeepLab-ResNet Network')
parser.add_argument('--model', type=str, default=MODEL, help='Model Choice (DeeplabMulti/DeeplabVGG/Oracle).')
parser.add_argument('--data-dir', type=str, default=DATA_DIRECTORY, help='Path to the directory containing the Cityscapes dataset.')
parser.add_argument('--data-list', type=str, default=DATA_LIST_PATH, help='Path to the file listing the images in the dataset.')
parser.add_argument('--ignore-label', type=int, default=IGNORE_LABEL, help='The index of the label to ignore during the training.')
parser.add_argument('--num-classes', type=int, default=NUM_CLASSES, help='Number of classes to predict (including background).')
parser.add_argument('--restore-from', type=str, default=RESTORE_FROM, help='Where restore model parameters from.')
parser.add_argument('--gpu', type=int, default=0, help='choose gpu device.')
parser.add_argument('--batchsize', type=int, default=4, help='choose gpu device.')
parser.add_argument('--set', type=str, default=SET, help='choose evaluation set.')
parser.add_argument('--save', type=str, default=SAVE_PATH, help='Path to save result.')
parser.add_argument('--input-size', type=str, default=INPUT_SIZE, help='Comma-separated string with height and width of source images.')
return parser.parse_args()
| -3,601,046,404,071,038,000
|
Parse all the arguments provided from the CLI.
Returns:
A list of parsed arguments.
|
generate_plabel_dark_zurich.py
|
get_arguments
|
qimw/UACDA
|
python
|
def get_arguments():
'Parse all the arguments provided from the CLI.\n\n Returns:\n A list of parsed arguments.\n '
parser = argparse.ArgumentParser(description='DeepLab-ResNet Network')
parser.add_argument('--model', type=str, default=MODEL, help='Model Choice (DeeplabMulti/DeeplabVGG/Oracle).')
parser.add_argument('--data-dir', type=str, default=DATA_DIRECTORY, help='Path to the directory containing the Cityscapes dataset.')
parser.add_argument('--data-list', type=str, default=DATA_LIST_PATH, help='Path to the file listing the images in the dataset.')
parser.add_argument('--ignore-label', type=int, default=IGNORE_LABEL, help='The index of the label to ignore during the training.')
parser.add_argument('--num-classes', type=int, default=NUM_CLASSES, help='Number of classes to predict (including background).')
parser.add_argument('--restore-from', type=str, default=RESTORE_FROM, help='Where restore model parameters from.')
parser.add_argument('--gpu', type=int, default=0, help='choose gpu device.')
parser.add_argument('--batchsize', type=int, default=4, help='choose gpu device.')
parser.add_argument('--set', type=str, default=SET, help='choose evaluation set.')
parser.add_argument('--save', type=str, default=SAVE_PATH, help='Path to save result.')
parser.add_argument('--input-size', type=str, default=INPUT_SIZE, help='Comma-separated string with height and width of source images.')
return parser.parse_args()
|
def main():
'Create the model and start the evaluation process.'
args = get_arguments()
(w, h) = map(int, args.input_size.split(','))
config_path = os.path.join(os.path.dirname(args.restore_from), 'opts.yaml')
with open(config_path, 'r') as stream:
config = yaml.load(stream)
args.model = config['model']
print(('ModelType:%s' % args.model))
print(('NormType:%s' % config['norm_style']))
gpu0 = args.gpu
batchsize = args.batchsize
model_name = os.path.basename(os.path.dirname(args.restore_from))
if (not os.path.exists(args.save)):
os.makedirs(args.save)
confidence_path = os.path.join(args.save, 'submit/confidence')
label_path = os.path.join(args.save, 'submit/labelTrainIds')
label_invalid_path = os.path.join(args.save, 'submit/labelTrainIds_invalid')
for path in [confidence_path, label_path, label_invalid_path]:
if (not os.path.exists(path)):
os.makedirs(path)
if (args.model == 'DeepLab'):
model = DeeplabMulti(num_classes=args.num_classes, use_se=config['use_se'], train_bn=False, norm_style=config['norm_style'])
elif (args.model == 'Oracle'):
model = Res_Deeplab(num_classes=args.num_classes)
if (args.restore_from == RESTORE_FROM):
args.restore_from = RESTORE_FROM_ORC
elif (args.model == 'DeeplabVGG'):
model = DeeplabVGG(num_classes=args.num_classes)
if (args.restore_from == RESTORE_FROM):
args.restore_from = RESTORE_FROM_VGG
if (args.restore_from[:4] == 'http'):
saved_state_dict = model_zoo.load_url(args.restore_from)
else:
saved_state_dict = torch.load(args.restore_from)
try:
model.load_state_dict(saved_state_dict)
except:
model = torch.nn.DataParallel(model)
model.load_state_dict(saved_state_dict)
model.eval()
model.cuda(gpu0)
testloader = data.DataLoader(DarkZurichDataSet(args.data_dir, args.data_list, crop_size=(h, w), resize_size=(w, h), mean=IMG_MEAN, scale=False, mirror=False, set=args.set), batch_size=batchsize, shuffle=False, pin_memory=True, num_workers=4)
scale = 1.25
testloader2 = data.DataLoader(DarkZurichDataSet(args.data_dir, args.data_list, crop_size=(round((h * scale)), round((w * scale))), resize_size=(round((w * scale)), round((h * scale))), mean=IMG_MEAN, scale=False, mirror=False, set=args.set), batch_size=batchsize, shuffle=False, pin_memory=True, num_workers=4)
if (version.parse(torch.__version__) >= version.parse('0.4.0')):
interp = nn.Upsample(size=(1080, 1920), mode='bilinear', align_corners=True)
else:
interp = nn.Upsample(size=(1080, 1920), mode='bilinear')
sm = torch.nn.Softmax(dim=1)
log_sm = torch.nn.LogSoftmax(dim=1)
kl_distance = nn.KLDivLoss(reduction='none')
prior = np.load('./utils/prior_all.npy').transpose((2, 0, 1))[np.newaxis, :, :, :]
prior = torch.from_numpy(prior)
for (index, img_data) in enumerate(zip(testloader, testloader2)):
(batch, batch2) = img_data
(image, _, name) = batch
(image2, _, name2) = batch2
inputs = image.cuda()
inputs2 = image2.cuda()
print(('\r>>>>Extracting feature...%04d/%04d' % ((index * batchsize), (args.batchsize * len(testloader)))), end='')
if (args.model == 'DeepLab'):
with torch.no_grad():
(output1, output2) = model(inputs)
output_batch = interp(sm(((0.5 * output1) + output2)))
heatmap_batch = torch.sum(kl_distance(log_sm(output1), sm(output2)), dim=1)
(output1, output2) = model(fliplr(inputs))
(output1, output2) = (fliplr(output1), fliplr(output2))
output_batch += interp(sm(((0.5 * output1) + output2)))
del output1, output2, inputs
(output1, output2) = model(inputs2)
output_batch += interp(sm(((0.5 * output1) + output2)))
(output1, output2) = model(fliplr(inputs2))
(output1, output2) = (fliplr(output1), fliplr(output2))
output_batch += interp(sm(((0.5 * output1) + output2)))
del output1, output2, inputs2
ratio = 0.95
output_batch = (output_batch.cpu() / 4)
output_batch = output_batch.data.numpy()
heatmap_batch = heatmap_batch.cpu().data.numpy()
elif ((args.model == 'DeeplabVGG') or (args.model == 'Oracle')):
output_batch = model(Variable(image).cuda())
output_batch = interp(output_batch).cpu().data.numpy()
output_batch = output_batch.transpose(0, 2, 3, 1)
score_batch = np.max(output_batch, axis=3)
output_batch = np.asarray(np.argmax(output_batch, axis=3), dtype=np.uint8)
threshold = 0.3274
for i in range(output_batch.shape[0]):
output_single = output_batch[i, :, :]
output_col = colorize_mask(output_single)
output = Image.fromarray(output_single)
name_tmp = name[i].split('/')[(- 1)]
dir_name = name[i].split('/')[(- 2)]
save_path = ((args.save + '/') + dir_name)
if (not os.path.isdir(save_path)):
os.mkdir(save_path)
output.save(('%s/%s' % (save_path, name_tmp)))
print(('%s/%s' % (save_path, name_tmp)))
output_col.save(('%s/%s_color.png' % (save_path, name_tmp.split('.')[0])))
if ((args.set == 'test') or (args.set == 'val')):
output.save(('%s/%s' % (label_path, name_tmp)))
output_single[(score_batch[i, :, :] < threshold)] = 255
output = Image.fromarray(output_single)
output.save(('%s/%s' % (label_invalid_path, name_tmp)))
confidence = (score_batch[i, :, :] * 65535)
confidence = np.asarray(confidence, dtype=np.uint16)
print(confidence.min(), confidence.max())
iio.imwrite(('%s/%s' % (confidence_path, name_tmp)), confidence)
return args.save
| -2,165,387,849,207,418,400
|
Create the model and start the evaluation process.
|
generate_plabel_dark_zurich.py
|
main
|
qimw/UACDA
|
python
|
def main():
args = get_arguments()
(w, h) = map(int, args.input_size.split(','))
config_path = os.path.join(os.path.dirname(args.restore_from), 'opts.yaml')
with open(config_path, 'r') as stream:
config = yaml.load(stream)
args.model = config['model']
print(('ModelType:%s' % args.model))
print(('NormType:%s' % config['norm_style']))
gpu0 = args.gpu
batchsize = args.batchsize
model_name = os.path.basename(os.path.dirname(args.restore_from))
if (not os.path.exists(args.save)):
os.makedirs(args.save)
confidence_path = os.path.join(args.save, 'submit/confidence')
label_path = os.path.join(args.save, 'submit/labelTrainIds')
label_invalid_path = os.path.join(args.save, 'submit/labelTrainIds_invalid')
for path in [confidence_path, label_path, label_invalid_path]:
if (not os.path.exists(path)):
os.makedirs(path)
if (args.model == 'DeepLab'):
model = DeeplabMulti(num_classes=args.num_classes, use_se=config['use_se'], train_bn=False, norm_style=config['norm_style'])
elif (args.model == 'Oracle'):
model = Res_Deeplab(num_classes=args.num_classes)
if (args.restore_from == RESTORE_FROM):
args.restore_from = RESTORE_FROM_ORC
elif (args.model == 'DeeplabVGG'):
model = DeeplabVGG(num_classes=args.num_classes)
if (args.restore_from == RESTORE_FROM):
args.restore_from = RESTORE_FROM_VGG
if (args.restore_from[:4] == 'http'):
saved_state_dict = model_zoo.load_url(args.restore_from)
else:
saved_state_dict = torch.load(args.restore_from)
try:
model.load_state_dict(saved_state_dict)
except:
model = torch.nn.DataParallel(model)
model.load_state_dict(saved_state_dict)
model.eval()
model.cuda(gpu0)
testloader = data.DataLoader(DarkZurichDataSet(args.data_dir, args.data_list, crop_size=(h, w), resize_size=(w, h), mean=IMG_MEAN, scale=False, mirror=False, set=args.set), batch_size=batchsize, shuffle=False, pin_memory=True, num_workers=4)
scale = 1.25
testloader2 = data.DataLoader(DarkZurichDataSet(args.data_dir, args.data_list, crop_size=(round((h * scale)), round((w * scale))), resize_size=(round((w * scale)), round((h * scale))), mean=IMG_MEAN, scale=False, mirror=False, set=args.set), batch_size=batchsize, shuffle=False, pin_memory=True, num_workers=4)
if (version.parse(torch.__version__) >= version.parse('0.4.0')):
interp = nn.Upsample(size=(1080, 1920), mode='bilinear', align_corners=True)
else:
interp = nn.Upsample(size=(1080, 1920), mode='bilinear')
sm = torch.nn.Softmax(dim=1)
log_sm = torch.nn.LogSoftmax(dim=1)
kl_distance = nn.KLDivLoss(reduction='none')
prior = np.load('./utils/prior_all.npy').transpose((2, 0, 1))[np.newaxis, :, :, :]
prior = torch.from_numpy(prior)
for (index, img_data) in enumerate(zip(testloader, testloader2)):
(batch, batch2) = img_data
(image, _, name) = batch
(image2, _, name2) = batch2
inputs = image.cuda()
inputs2 = image2.cuda()
print(('\r>>>>Extracting feature...%04d/%04d' % ((index * batchsize), (args.batchsize * len(testloader)))), end=)
if (args.model == 'DeepLab'):
with torch.no_grad():
(output1, output2) = model(inputs)
output_batch = interp(sm(((0.5 * output1) + output2)))
heatmap_batch = torch.sum(kl_distance(log_sm(output1), sm(output2)), dim=1)
(output1, output2) = model(fliplr(inputs))
(output1, output2) = (fliplr(output1), fliplr(output2))
output_batch += interp(sm(((0.5 * output1) + output2)))
del output1, output2, inputs
(output1, output2) = model(inputs2)
output_batch += interp(sm(((0.5 * output1) + output2)))
(output1, output2) = model(fliplr(inputs2))
(output1, output2) = (fliplr(output1), fliplr(output2))
output_batch += interp(sm(((0.5 * output1) + output2)))
del output1, output2, inputs2
ratio = 0.95
output_batch = (output_batch.cpu() / 4)
output_batch = output_batch.data.numpy()
heatmap_batch = heatmap_batch.cpu().data.numpy()
elif ((args.model == 'DeeplabVGG') or (args.model == 'Oracle')):
output_batch = model(Variable(image).cuda())
output_batch = interp(output_batch).cpu().data.numpy()
output_batch = output_batch.transpose(0, 2, 3, 1)
score_batch = np.max(output_batch, axis=3)
output_batch = np.asarray(np.argmax(output_batch, axis=3), dtype=np.uint8)
threshold = 0.3274
for i in range(output_batch.shape[0]):
output_single = output_batch[i, :, :]
output_col = colorize_mask(output_single)
output = Image.fromarray(output_single)
name_tmp = name[i].split('/')[(- 1)]
dir_name = name[i].split('/')[(- 2)]
save_path = ((args.save + '/') + dir_name)
if (not os.path.isdir(save_path)):
os.mkdir(save_path)
output.save(('%s/%s' % (save_path, name_tmp)))
print(('%s/%s' % (save_path, name_tmp)))
output_col.save(('%s/%s_color.png' % (save_path, name_tmp.split('.')[0])))
if ((args.set == 'test') or (args.set == 'val')):
output.save(('%s/%s' % (label_path, name_tmp)))
output_single[(score_batch[i, :, :] < threshold)] = 255
output = Image.fromarray(output_single)
output.save(('%s/%s' % (label_invalid_path, name_tmp)))
confidence = (score_batch[i, :, :] * 65535)
confidence = np.asarray(confidence, dtype=np.uint16)
print(confidence.min(), confidence.max())
iio.imwrite(('%s/%s' % (confidence_path, name_tmp)), confidence)
return args.save
|
def __init__(self, runscontainer, marginal_threshold=0.05):
'Wrapper for parameter_importance to save the importance-object/ extract the results. We want to show the\n top X most important parameter-fanova-plots.\n\n Parameters\n ----------\n runscontainer: RunsContainer\n contains all important information about the configurator runs\n marginal_threshold: float\n parameter/s must be at least this important to be mentioned\n '
super().__init__(runscontainer)
self.marginal_threshold = marginal_threshold
self.parameter_importance('fanova')
| -2,845,748,282,511,785,500
|
Wrapper for parameter_importance to save the importance-object/ extract the results. We want to show the
top X most important parameter-fanova-plots.
Parameters
----------
runscontainer: RunsContainer
contains all important information about the configurator runs
marginal_threshold: float
parameter/s must be at least this important to be mentioned
|
cave/analyzer/parameter_importance/fanova.py
|
__init__
|
automl/CAVE
|
python
|
def __init__(self, runscontainer, marginal_threshold=0.05):
'Wrapper for parameter_importance to save the importance-object/ extract the results. We want to show the\n top X most important parameter-fanova-plots.\n\n Parameters\n ----------\n runscontainer: RunsContainer\n contains all important information about the configurator runs\n marginal_threshold: float\n parameter/s must be at least this important to be mentioned\n '
super().__init__(runscontainer)
self.marginal_threshold = marginal_threshold
self.parameter_importance('fanova')
|
def parse_pairwise(p):
"parse pimp's way of having pairwise parameters as key as str and return list of individuals"
res = [tmp.strip("' ") for tmp in p.strip('[]').split(',')]
return res
| 8,489,956,221,889,464,000
|
parse pimp's way of having pairwise parameters as key as str and return list of individuals
|
cave/analyzer/parameter_importance/fanova.py
|
parse_pairwise
|
automl/CAVE
|
python
|
def parse_pairwise(p):
res = [tmp.strip("' ") for tmp in p.strip('[]').split(',')]
return res
|
def test_create_failure_recovery(self):
'Check that rollback still works with dynamic metadata.\n\n This test fails the second instance.\n '
tmpl = {'HeatTemplateFormatVersion': '2012-12-12', 'Resources': {'AResource': {'Type': 'OverwrittenFnGetRefIdType', 'Properties': {'Foo': 'abc'}}, 'BResource': {'Type': 'ResourceWithPropsType', 'Properties': {'Foo': {'Ref': 'AResource'}}}}}
self.stack = stack.Stack(self.ctx, 'update_test_stack', template.Template(tmpl), disable_rollback=True)
class FakeException(Exception):
pass
mock_create = self.patchobject(generic_rsrc.ResourceWithFnGetRefIdType, 'handle_create', side_effect=[FakeException, None])
mock_delete = self.patchobject(generic_rsrc.ResourceWithFnGetRefIdType, 'handle_delete', return_value=None)
self.stack.store()
self.stack.create()
self.assertEqual((stack.Stack.CREATE, stack.Stack.FAILED), self.stack.state)
self.assertEqual('abc', self.stack['AResource'].properties['Foo'])
updated_stack = stack.Stack(self.ctx, 'updated_stack', template.Template(tmpl), disable_rollback=True)
self.stack.update(updated_stack)
self.assertEqual((stack.Stack.UPDATE, stack.Stack.COMPLETE), self.stack.state)
self.assertEqual('abc', self.stack['AResource']._stored_properties_data['Foo'])
self.assertEqual('ID-AResource', self.stack['BResource']._stored_properties_data['Foo'])
mock_delete.assert_called_once_with()
self.assertEqual(2, mock_create.call_count)
| 8,971,451,799,159,772,000
|
Check that rollback still works with dynamic metadata.
This test fails the second instance.
|
heat/tests/test_stack.py
|
test_create_failure_recovery
|
openstack/heat
|
python
|
def test_create_failure_recovery(self):
'Check that rollback still works with dynamic metadata.\n\n This test fails the second instance.\n '
tmpl = {'HeatTemplateFormatVersion': '2012-12-12', 'Resources': {'AResource': {'Type': 'OverwrittenFnGetRefIdType', 'Properties': {'Foo': 'abc'}}, 'BResource': {'Type': 'ResourceWithPropsType', 'Properties': {'Foo': {'Ref': 'AResource'}}}}}
self.stack = stack.Stack(self.ctx, 'update_test_stack', template.Template(tmpl), disable_rollback=True)
class FakeException(Exception):
pass
mock_create = self.patchobject(generic_rsrc.ResourceWithFnGetRefIdType, 'handle_create', side_effect=[FakeException, None])
mock_delete = self.patchobject(generic_rsrc.ResourceWithFnGetRefIdType, 'handle_delete', return_value=None)
self.stack.store()
self.stack.create()
self.assertEqual((stack.Stack.CREATE, stack.Stack.FAILED), self.stack.state)
self.assertEqual('abc', self.stack['AResource'].properties['Foo'])
updated_stack = stack.Stack(self.ctx, 'updated_stack', template.Template(tmpl), disable_rollback=True)
self.stack.update(updated_stack)
self.assertEqual((stack.Stack.UPDATE, stack.Stack.COMPLETE), self.stack.state)
self.assertEqual('abc', self.stack['AResource']._stored_properties_data['Foo'])
self.assertEqual('ID-AResource', self.stack['BResource']._stored_properties_data['Foo'])
mock_delete.assert_called_once_with()
self.assertEqual(2, mock_create.call_count)
|
def test_store_saves_owner(self):
'owner_id attribute of Store is saved to the database when stored.'
self.stack = stack.Stack(self.ctx, 'owner_stack', self.tmpl)
stack_ownee = stack.Stack(self.ctx, 'ownee_stack', self.tmpl, owner_id=self.stack.id)
stack_ownee.store()
db_stack = stack_object.Stack.get_by_id(self.ctx, stack_ownee.id)
self.assertEqual(self.stack.id, db_stack.owner_id)
| -2,445,248,347,015,333,400
|
owner_id attribute of Store is saved to the database when stored.
|
heat/tests/test_stack.py
|
test_store_saves_owner
|
openstack/heat
|
python
|
def test_store_saves_owner(self):
self.stack = stack.Stack(self.ctx, 'owner_stack', self.tmpl)
stack_ownee = stack.Stack(self.ctx, 'ownee_stack', self.tmpl, owner_id=self.stack.id)
stack_ownee.store()
db_stack = stack_object.Stack.get_by_id(self.ctx, stack_ownee.id)
self.assertEqual(self.stack.id, db_stack.owner_id)
|
def test_store_saves_creds(self):
'A user_creds entry is created on first stack store.'
cfg.CONF.set_default('deferred_auth_method', 'password')
self.stack = stack.Stack(self.ctx, 'creds_stack', self.tmpl)
self.stack.store()
db_stack = stack_object.Stack.get_by_id(self.ctx, self.stack.id)
user_creds_id = db_stack.user_creds_id
self.assertIsNotNone(user_creds_id)
user_creds = ucreds_object.UserCreds.get_by_id(self.ctx, user_creds_id)
self.assertEqual(self.ctx.username, user_creds.get('username'))
self.assertEqual(self.ctx.password, user_creds.get('password'))
self.assertIsNone(user_creds.get('trust_id'))
self.assertIsNone(user_creds.get('trustor_user_id'))
expected_context = context.RequestContext.from_dict(self.ctx.to_dict())
expected_context.auth_token = None
stored_context = self.stack.stored_context().to_dict()
self.assertEqual(expected_context.to_dict(), stored_context)
self.stack.store()
self.assertEqual(user_creds_id, db_stack.user_creds_id)
| -9,213,545,049,745,668,000
|
A user_creds entry is created on first stack store.
|
heat/tests/test_stack.py
|
test_store_saves_creds
|
openstack/heat
|
python
|
def test_store_saves_creds(self):
cfg.CONF.set_default('deferred_auth_method', 'password')
self.stack = stack.Stack(self.ctx, 'creds_stack', self.tmpl)
self.stack.store()
db_stack = stack_object.Stack.get_by_id(self.ctx, self.stack.id)
user_creds_id = db_stack.user_creds_id
self.assertIsNotNone(user_creds_id)
user_creds = ucreds_object.UserCreds.get_by_id(self.ctx, user_creds_id)
self.assertEqual(self.ctx.username, user_creds.get('username'))
self.assertEqual(self.ctx.password, user_creds.get('password'))
self.assertIsNone(user_creds.get('trust_id'))
self.assertIsNone(user_creds.get('trustor_user_id'))
expected_context = context.RequestContext.from_dict(self.ctx.to_dict())
expected_context.auth_token = None
stored_context = self.stack.stored_context().to_dict()
self.assertEqual(expected_context.to_dict(), stored_context)
self.stack.store()
self.assertEqual(user_creds_id, db_stack.user_creds_id)
|
def test_store_saves_creds_trust(self):
'A user_creds entry is created on first stack store.'
cfg.CONF.set_override('deferred_auth_method', 'trusts')
self.patchobject(keystone.KeystoneClientPlugin, '_create', return_value=fake_ks.FakeKeystoneClient(user_id='auser123'))
self.stack = stack.Stack(self.ctx, 'creds_stack', self.tmpl)
self.stack.store()
db_stack = stack_object.Stack.get_by_id(self.ctx, self.stack.id)
user_creds_id = db_stack.user_creds_id
self.assertIsNotNone(user_creds_id)
user_creds = ucreds_object.UserCreds.get_by_id(self.ctx, user_creds_id)
self.assertIsNone(user_creds.get('username'))
self.assertIsNone(user_creds.get('password'))
self.assertEqual('atrust', user_creds.get('trust_id'))
self.assertEqual('auser123', user_creds.get('trustor_user_id'))
auth = self.patchobject(context.RequestContext, 'trusts_auth_plugin')
self.patchobject(auth, 'get_access', return_value=fakes.FakeAccessInfo([], None, None))
expected_context = context.RequestContext(trust_id='atrust', trustor_user_id='auser123', request_id=self.ctx.request_id, is_admin=False).to_dict()
stored_context = self.stack.stored_context().to_dict()
self.assertEqual(expected_context, stored_context)
self.stack.store()
self.assertEqual(user_creds_id, db_stack.user_creds_id)
keystone.KeystoneClientPlugin._create.assert_called_with()
| -2,844,117,463,037,988,400
|
A user_creds entry is created on first stack store.
|
heat/tests/test_stack.py
|
test_store_saves_creds_trust
|
openstack/heat
|
python
|
def test_store_saves_creds_trust(self):
cfg.CONF.set_override('deferred_auth_method', 'trusts')
self.patchobject(keystone.KeystoneClientPlugin, '_create', return_value=fake_ks.FakeKeystoneClient(user_id='auser123'))
self.stack = stack.Stack(self.ctx, 'creds_stack', self.tmpl)
self.stack.store()
db_stack = stack_object.Stack.get_by_id(self.ctx, self.stack.id)
user_creds_id = db_stack.user_creds_id
self.assertIsNotNone(user_creds_id)
user_creds = ucreds_object.UserCreds.get_by_id(self.ctx, user_creds_id)
self.assertIsNone(user_creds.get('username'))
self.assertIsNone(user_creds.get('password'))
self.assertEqual('atrust', user_creds.get('trust_id'))
self.assertEqual('auser123', user_creds.get('trustor_user_id'))
auth = self.patchobject(context.RequestContext, 'trusts_auth_plugin')
self.patchobject(auth, 'get_access', return_value=fakes.FakeAccessInfo([], None, None))
expected_context = context.RequestContext(trust_id='atrust', trustor_user_id='auser123', request_id=self.ctx.request_id, is_admin=False).to_dict()
stored_context = self.stack.stored_context().to_dict()
self.assertEqual(expected_context, stored_context)
self.stack.store()
self.assertEqual(user_creds_id, db_stack.user_creds_id)
keystone.KeystoneClientPlugin._create.assert_called_with()
|
def test_stored_context_err(self):
'Test stored_context error path.'
self.stack = stack.Stack(self.ctx, 'creds_stack', self.tmpl)
ex = self.assertRaises(exception.Error, self.stack.stored_context)
expected_err = 'Attempt to use stored_context with no user_creds'
self.assertEqual(expected_err, str(ex))
| 4,702,206,411,824,754,000
|
Test stored_context error path.
|
heat/tests/test_stack.py
|
test_stored_context_err
|
openstack/heat
|
python
|
def test_stored_context_err(self):
self.stack = stack.Stack(self.ctx, 'creds_stack', self.tmpl)
ex = self.assertRaises(exception.Error, self.stack.stored_context)
expected_err = 'Attempt to use stored_context with no user_creds'
self.assertEqual(expected_err, str(ex))
|
def test_load_honors_owner(self):
'Loading a stack from the database will set the owner_id.\n\n Loading a stack from the database will set the owner_id of the\n resultant stack appropriately.\n '
self.stack = stack.Stack(self.ctx, 'owner_stack', self.tmpl)
stack_ownee = stack.Stack(self.ctx, 'ownee_stack', self.tmpl, owner_id=self.stack.id)
stack_ownee.store()
saved_stack = stack.Stack.load(self.ctx, stack_id=stack_ownee.id)
self.assertEqual(self.stack.id, saved_stack.owner_id)
| 7,915,637,699,835,126,000
|
Loading a stack from the database will set the owner_id.
Loading a stack from the database will set the owner_id of the
resultant stack appropriately.
|
heat/tests/test_stack.py
|
test_load_honors_owner
|
openstack/heat
|
python
|
def test_load_honors_owner(self):
'Loading a stack from the database will set the owner_id.\n\n Loading a stack from the database will set the owner_id of the\n resultant stack appropriately.\n '
self.stack = stack.Stack(self.ctx, 'owner_stack', self.tmpl)
stack_ownee = stack.Stack(self.ctx, 'ownee_stack', self.tmpl, owner_id=self.stack.id)
stack_ownee.store()
saved_stack = stack.Stack.load(self.ctx, stack_id=stack_ownee.id)
self.assertEqual(self.stack.id, saved_stack.owner_id)
|
def test_stack_load_no_param_value_validation(self):
'Test stack loading with disabled parameter value validation.'
tmpl = template_format.parse('\n heat_template_version: 2013-05-23\n parameters:\n flavor:\n type: string\n description: A flavor.\n constraints:\n - custom_constraint: nova.flavor\n resources:\n a_resource:\n type: GenericResourceType\n ')
fc = fakes.FakeClient()
self.patchobject(nova.NovaClientPlugin, 'client', return_value=fc)
fc.flavors = mock.Mock()
flavor = collections.namedtuple('Flavor', ['id', 'name'])
flavor.id = '1234'
flavor.name = 'dummy'
fc.flavors.get.return_value = flavor
test_env = environment.Environment({'flavor': '1234'})
self.stack = stack.Stack(self.ctx, 'stack_with_custom_constraint', template.Template(tmpl, env=test_env))
self.stack.validate()
self.stack.store()
self.stack.create()
stack_id = self.stack.id
self.assertEqual((stack.Stack.CREATE, stack.Stack.COMPLETE), self.stack.state)
loaded_stack = stack.Stack.load(self.ctx, stack_id=self.stack.id)
self.assertEqual(stack_id, loaded_stack.parameters['OS::stack_id'])
fc.flavors.get.assert_called_once_with('1234')
| 3,320,778,543,385,687,000
|
Test stack loading with disabled parameter value validation.
|
heat/tests/test_stack.py
|
test_stack_load_no_param_value_validation
|
openstack/heat
|
python
|
def test_stack_load_no_param_value_validation(self):
tmpl = template_format.parse('\n heat_template_version: 2013-05-23\n parameters:\n flavor:\n type: string\n description: A flavor.\n constraints:\n - custom_constraint: nova.flavor\n resources:\n a_resource:\n type: GenericResourceType\n ')
fc = fakes.FakeClient()
self.patchobject(nova.NovaClientPlugin, 'client', return_value=fc)
fc.flavors = mock.Mock()
flavor = collections.namedtuple('Flavor', ['id', 'name'])
flavor.id = '1234'
flavor.name = 'dummy'
fc.flavors.get.return_value = flavor
test_env = environment.Environment({'flavor': '1234'})
self.stack = stack.Stack(self.ctx, 'stack_with_custom_constraint', template.Template(tmpl, env=test_env))
self.stack.validate()
self.stack.store()
self.stack.create()
stack_id = self.stack.id
self.assertEqual((stack.Stack.CREATE, stack.Stack.COMPLETE), self.stack.state)
loaded_stack = stack.Stack.load(self.ctx, stack_id=self.stack.id)
self.assertEqual(stack_id, loaded_stack.parameters['OS::stack_id'])
fc.flavors.get.assert_called_once_with('1234')
|
def test_encrypt_parameters_false_parameters_stored_plaintext(self):
'Test stack loading with disabled parameter value validation.'
tmpl = template_format.parse('\n heat_template_version: 2013-05-23\n parameters:\n param1:\n type: string\n description: value1.\n param2:\n type: string\n description: value2.\n hidden: true\n resources:\n a_resource:\n type: GenericResourceType\n ')
env1 = environment.Environment({'param1': 'foo', 'param2': 'bar'})
self.stack = stack.Stack(self.ctx, 'test', template.Template(tmpl, env=env1))
cfg.CONF.set_override('encrypt_parameters_and_properties', False)
self.stack.store()
db_stack = stack_object.Stack.get_by_id(self.ctx, self.stack.id)
params = db_stack.raw_template.environment['parameters']
self.assertEqual('foo', params['param1'])
self.assertEqual('bar', params['param2'])
| 1,876,205,515,915,799,000
|
Test stack loading with disabled parameter value validation.
|
heat/tests/test_stack.py
|
test_encrypt_parameters_false_parameters_stored_plaintext
|
openstack/heat
|
python
|
def test_encrypt_parameters_false_parameters_stored_plaintext(self):
tmpl = template_format.parse('\n heat_template_version: 2013-05-23\n parameters:\n param1:\n type: string\n description: value1.\n param2:\n type: string\n description: value2.\n hidden: true\n resources:\n a_resource:\n type: GenericResourceType\n ')
env1 = environment.Environment({'param1': 'foo', 'param2': 'bar'})
self.stack = stack.Stack(self.ctx, 'test', template.Template(tmpl, env=env1))
cfg.CONF.set_override('encrypt_parameters_and_properties', False)
self.stack.store()
db_stack = stack_object.Stack.get_by_id(self.ctx, self.stack.id)
params = db_stack.raw_template.environment['parameters']
self.assertEqual('foo', params['param1'])
self.assertEqual('bar', params['param2'])
|
def test_parameters_stored_encrypted_decrypted_on_load(self):
'Test stack loading with disabled parameter value validation.'
tmpl = template_format.parse('\n heat_template_version: 2013-05-23\n parameters:\n param1:\n type: string\n description: value1.\n param2:\n type: string\n description: value2.\n hidden: true\n resources:\n a_resource:\n type: GenericResourceType\n ')
env1 = environment.Environment({'param1': 'foo', 'param2': 'bar'})
self.stack = stack.Stack(self.ctx, 'test', template.Template(tmpl, env=env1))
cfg.CONF.set_override('encrypt_parameters_and_properties', True)
self.stack.store()
db_tpl = db_api.raw_template_get(self.ctx, self.stack.t.id)
db_params = db_tpl.environment['parameters']
self.assertEqual('foo', db_params['param1'])
self.assertEqual('cryptography_decrypt_v1', db_params['param2'][0])
self.assertIsNotNone(db_params['param2'][1])
loaded_stack = stack.Stack.load(self.ctx, stack_id=self.stack.id)
params = loaded_stack.t.env.params
self.assertEqual('foo', params.get('param1'))
self.assertEqual('bar', params.get('param2'))
loaded_stack.state_set(self.stack.CREATE, self.stack.COMPLETE, 'for_update')
env2 = environment.Environment({'param1': 'foo', 'param2': 'new_bar'})
new_stack = stack.Stack(self.ctx, 'test_update', template.Template(tmpl, env=env2))
loaded_stack.update(new_stack)
self.assertEqual((loaded_stack.UPDATE, loaded_stack.COMPLETE), loaded_stack.state)
db_tpl = db_api.raw_template_get(self.ctx, loaded_stack.t.id)
db_params = db_tpl.environment['parameters']
self.assertEqual('foo', db_params['param1'])
self.assertEqual('cryptography_decrypt_v1', db_params['param2'][0])
self.assertIsNotNone(db_params['param2'][1])
loaded_stack1 = stack.Stack.load(self.ctx, stack_id=self.stack.id)
params = loaded_stack1.t.env.params
self.assertEqual('foo', params.get('param1'))
self.assertEqual('new_bar', params.get('param2'))
| -1,018,863,454,073,504,000
|
Test stack loading with disabled parameter value validation.
|
heat/tests/test_stack.py
|
test_parameters_stored_encrypted_decrypted_on_load
|
openstack/heat
|
python
|
def test_parameters_stored_encrypted_decrypted_on_load(self):
tmpl = template_format.parse('\n heat_template_version: 2013-05-23\n parameters:\n param1:\n type: string\n description: value1.\n param2:\n type: string\n description: value2.\n hidden: true\n resources:\n a_resource:\n type: GenericResourceType\n ')
env1 = environment.Environment({'param1': 'foo', 'param2': 'bar'})
self.stack = stack.Stack(self.ctx, 'test', template.Template(tmpl, env=env1))
cfg.CONF.set_override('encrypt_parameters_and_properties', True)
self.stack.store()
db_tpl = db_api.raw_template_get(self.ctx, self.stack.t.id)
db_params = db_tpl.environment['parameters']
self.assertEqual('foo', db_params['param1'])
self.assertEqual('cryptography_decrypt_v1', db_params['param2'][0])
self.assertIsNotNone(db_params['param2'][1])
loaded_stack = stack.Stack.load(self.ctx, stack_id=self.stack.id)
params = loaded_stack.t.env.params
self.assertEqual('foo', params.get('param1'))
self.assertEqual('bar', params.get('param2'))
loaded_stack.state_set(self.stack.CREATE, self.stack.COMPLETE, 'for_update')
env2 = environment.Environment({'param1': 'foo', 'param2': 'new_bar'})
new_stack = stack.Stack(self.ctx, 'test_update', template.Template(tmpl, env=env2))
loaded_stack.update(new_stack)
self.assertEqual((loaded_stack.UPDATE, loaded_stack.COMPLETE), loaded_stack.state)
db_tpl = db_api.raw_template_get(self.ctx, loaded_stack.t.id)
db_params = db_tpl.environment['parameters']
self.assertEqual('foo', db_params['param1'])
self.assertEqual('cryptography_decrypt_v1', db_params['param2'][0])
self.assertIsNotNone(db_params['param2'][1])
loaded_stack1 = stack.Stack.load(self.ctx, stack_id=self.stack.id)
params = loaded_stack1.t.env.params
self.assertEqual('foo', params.get('param1'))
self.assertEqual('new_bar', params.get('param2'))
|
def test_parameters_created_encrypted_updated_decrypted(self):
'Test stack loading with disabled parameter value validation.'
tmpl = template_format.parse('\n heat_template_version: 2013-05-23\n parameters:\n param1:\n type: string\n description: value1.\n param2:\n type: string\n description: value2.\n hidden: true\n resources:\n a_resource:\n type: GenericResourceType\n ')
cfg.CONF.set_override('encrypt_parameters_and_properties', True)
env1 = environment.Environment({'param1': 'foo', 'param2': 'bar'})
self.stack = stack.Stack(self.ctx, 'test', template.Template(tmpl, env=env1))
self.stack.store()
cfg.CONF.set_override('encrypt_parameters_and_properties', False)
loaded_stack = stack.Stack.load(self.ctx, stack_id=self.stack.id)
loaded_stack.state_set(self.stack.CREATE, self.stack.COMPLETE, 'for_update')
env2 = environment.Environment({'param1': 'foo', 'param2': 'new_bar'})
new_stack = stack.Stack(self.ctx, 'test_update', template.Template(tmpl, env=env2))
self.assertEqual(['param2'], loaded_stack.env.encrypted_param_names)
loaded_stack.update(new_stack)
self.assertEqual([], loaded_stack.env.encrypted_param_names)
| -1,445,745,818,561,343,700
|
Test stack loading with disabled parameter value validation.
|
heat/tests/test_stack.py
|
test_parameters_created_encrypted_updated_decrypted
|
openstack/heat
|
python
|
def test_parameters_created_encrypted_updated_decrypted(self):
tmpl = template_format.parse('\n heat_template_version: 2013-05-23\n parameters:\n param1:\n type: string\n description: value1.\n param2:\n type: string\n description: value2.\n hidden: true\n resources:\n a_resource:\n type: GenericResourceType\n ')
cfg.CONF.set_override('encrypt_parameters_and_properties', True)
env1 = environment.Environment({'param1': 'foo', 'param2': 'bar'})
self.stack = stack.Stack(self.ctx, 'test', template.Template(tmpl, env=env1))
self.stack.store()
cfg.CONF.set_override('encrypt_parameters_and_properties', False)
loaded_stack = stack.Stack.load(self.ctx, stack_id=self.stack.id)
loaded_stack.state_set(self.stack.CREATE, self.stack.COMPLETE, 'for_update')
env2 = environment.Environment({'param1': 'foo', 'param2': 'new_bar'})
new_stack = stack.Stack(self.ctx, 'test_update', template.Template(tmpl, env=env2))
self.assertEqual(['param2'], loaded_stack.env.encrypted_param_names)
loaded_stack.update(new_stack)
self.assertEqual([], loaded_stack.env.encrypted_param_names)
|
def test_parameters_stored_decrypted_successful_load(self):
'Test stack loading with disabled parameter value validation.'
tmpl = template_format.parse('\n heat_template_version: 2013-05-23\n parameters:\n param1:\n type: string\n description: value1.\n param2:\n type: string\n description: value2.\n hidden: true\n resources:\n a_resource:\n type: GenericResourceType\n ')
env1 = environment.Environment({'param1': 'foo', 'param2': 'bar'})
self.stack = stack.Stack(self.ctx, 'test', template.Template(tmpl, env=env1))
cfg.CONF.set_override('encrypt_parameters_and_properties', False)
self.stack.store()
db_tpl = db_api.raw_template_get(self.ctx, self.stack.t.id)
db_params = db_tpl.environment['parameters']
self.assertEqual('foo', db_params['param1'])
self.assertEqual('bar', db_params['param2'])
loaded_stack = stack.Stack.load(self.ctx, stack_id=self.stack.id)
params = loaded_stack.t.env.params
self.assertEqual('foo', params.get('param1'))
self.assertEqual('bar', params.get('param2'))
| 8,764,875,422,488,754,000
|
Test stack loading with disabled parameter value validation.
|
heat/tests/test_stack.py
|
test_parameters_stored_decrypted_successful_load
|
openstack/heat
|
python
|
def test_parameters_stored_decrypted_successful_load(self):
tmpl = template_format.parse('\n heat_template_version: 2013-05-23\n parameters:\n param1:\n type: string\n description: value1.\n param2:\n type: string\n description: value2.\n hidden: true\n resources:\n a_resource:\n type: GenericResourceType\n ')
env1 = environment.Environment({'param1': 'foo', 'param2': 'bar'})
self.stack = stack.Stack(self.ctx, 'test', template.Template(tmpl, env=env1))
cfg.CONF.set_override('encrypt_parameters_and_properties', False)
self.stack.store()
db_tpl = db_api.raw_template_get(self.ctx, self.stack.t.id)
db_params = db_tpl.environment['parameters']
self.assertEqual('foo', db_params['param1'])
self.assertEqual('bar', db_params['param2'])
loaded_stack = stack.Stack.load(self.ctx, stack_id=self.stack.id)
params = loaded_stack.t.env.params
self.assertEqual('foo', params.get('param1'))
self.assertEqual('bar', params.get('param2'))
|
def serve_paste(app, global_conf, **kw):
'pserve / paster serve / waitress replacement / integration\n\n You can pass as parameters:\n\n transports = websockets, xhr-multipart, xhr-longpolling, etc...\n policy_server = True\n '
serve(app, **kw)
return 0
| -5,353,821,925,766,461,000
|
pserve / paster serve / waitress replacement / integration
You can pass as parameters:
transports = websockets, xhr-multipart, xhr-longpolling, etc...
policy_server = True
|
socketio/server.py
|
serve_paste
|
jykim16/gevent-socketio
|
python
|
def serve_paste(app, global_conf, **kw):
'pserve / paster serve / waitress replacement / integration\n\n You can pass as parameters:\n\n transports = websockets, xhr-multipart, xhr-longpolling, etc...\n policy_server = True\n '
serve(app, **kw)
return 0
|
def __init__(self, *args, **kwargs):
'This is just like the standard WSGIServer __init__, except with a\n few additional ``kwargs``:\n\n :param resource: The URL which has to be identified as a\n socket.io request. Defaults to the /socket.io/ URL.\n\n :param transports: Optional list of transports to allow. List of\n strings, each string should be one of\n handler.SocketIOHandler.handler_types.\n\n :param policy_server: Boolean describing whether or not to use the\n Flash policy server. Default True.\n\n :param policy_listener: A tuple containing (host, port) for the\n policy server. This is optional and used only if policy server\n is set to true. The default value is 0.0.0.0:843\n\n :param heartbeat_interval: int The timeout for the server, we\n should receive a heartbeat from the client within this\n interval. This should be less than the\n ``heartbeat_timeout``.\n\n :param heartbeat_timeout: int The timeout for the client when\n it should send a new heartbeat to the server. This value\n is sent to the client after a successful handshake.\n\n :param close_timeout: int The timeout for the client, when it\n closes the connection it still X amounts of seconds to do\n re open of the connection. This value is sent to the\n client after a successful handshake.\n\n :param log_file: str The file in which you want the PyWSGI\n server to write its access log. If not specified, it\n is sent to `stderr` (with gevent 0.13).\n\n '
self.sockets = {}
if ('namespace' in kwargs):
print('DEPRECATION WARNING: use resource instead of namespace')
self.resource = kwargs.pop('namespace', 'socket.io')
else:
self.resource = kwargs.pop('resource', 'socket.io')
self.transports = kwargs.pop('transports', None)
if kwargs.pop('policy_server', True):
wsock = args[0]
try:
(address, port) = wsock.getsockname()
except AttributeError:
try:
address = wsock[0]
except TypeError:
try:
address = wsock.address[0]
except AttributeError:
address = wsock.cfg_addr[0]
policylistener = kwargs.pop('policy_listener', (address, 10843))
self.policy_server = FlashPolicyServer(policylistener)
else:
self.policy_server = None
self.config = {'heartbeat_timeout': 60, 'close_timeout': 60, 'heartbeat_interval': 25}
for f in ('heartbeat_timeout', 'heartbeat_interval', 'close_timeout'):
if (f in kwargs):
self.config[f] = int(kwargs.pop(f))
if (not ('handler_class' in kwargs)):
kwargs['handler_class'] = SocketIOHandler
if (not ('ws_handler_class' in kwargs)):
self.ws_handler_class = WebSocketHandler
else:
self.ws_handler_class = kwargs.pop('ws_handler_class')
log_file = kwargs.pop('log_file', None)
if log_file:
kwargs['log'] = open(log_file, 'a')
super(SocketIOServer, self).__init__(*args, **kwargs)
| 2,082,469,375,877,520,000
|
This is just like the standard WSGIServer __init__, except with a
few additional ``kwargs``:
:param resource: The URL which has to be identified as a
socket.io request. Defaults to the /socket.io/ URL.
:param transports: Optional list of transports to allow. List of
strings, each string should be one of
handler.SocketIOHandler.handler_types.
:param policy_server: Boolean describing whether or not to use the
Flash policy server. Default True.
:param policy_listener: A tuple containing (host, port) for the
policy server. This is optional and used only if policy server
is set to true. The default value is 0.0.0.0:843
:param heartbeat_interval: int The timeout for the server, we
should receive a heartbeat from the client within this
interval. This should be less than the
``heartbeat_timeout``.
:param heartbeat_timeout: int The timeout for the client when
it should send a new heartbeat to the server. This value
is sent to the client after a successful handshake.
:param close_timeout: int The timeout for the client, when it
closes the connection it still X amounts of seconds to do
re open of the connection. This value is sent to the
client after a successful handshake.
:param log_file: str The file in which you want the PyWSGI
server to write its access log. If not specified, it
is sent to `stderr` (with gevent 0.13).
|
socketio/server.py
|
__init__
|
jykim16/gevent-socketio
|
python
|
def __init__(self, *args, **kwargs):
'This is just like the standard WSGIServer __init__, except with a\n few additional ``kwargs``:\n\n :param resource: The URL which has to be identified as a\n socket.io request. Defaults to the /socket.io/ URL.\n\n :param transports: Optional list of transports to allow. List of\n strings, each string should be one of\n handler.SocketIOHandler.handler_types.\n\n :param policy_server: Boolean describing whether or not to use the\n Flash policy server. Default True.\n\n :param policy_listener: A tuple containing (host, port) for the\n policy server. This is optional and used only if policy server\n is set to true. The default value is 0.0.0.0:843\n\n :param heartbeat_interval: int The timeout for the server, we\n should receive a heartbeat from the client within this\n interval. This should be less than the\n ``heartbeat_timeout``.\n\n :param heartbeat_timeout: int The timeout for the client when\n it should send a new heartbeat to the server. This value\n is sent to the client after a successful handshake.\n\n :param close_timeout: int The timeout for the client, when it\n closes the connection it still X amounts of seconds to do\n re open of the connection. This value is sent to the\n client after a successful handshake.\n\n :param log_file: str The file in which you want the PyWSGI\n server to write its access log. If not specified, it\n is sent to `stderr` (with gevent 0.13).\n\n '
self.sockets = {}
if ('namespace' in kwargs):
print('DEPRECATION WARNING: use resource instead of namespace')
self.resource = kwargs.pop('namespace', 'socket.io')
else:
self.resource = kwargs.pop('resource', 'socket.io')
self.transports = kwargs.pop('transports', None)
if kwargs.pop('policy_server', True):
wsock = args[0]
try:
(address, port) = wsock.getsockname()
except AttributeError:
try:
address = wsock[0]
except TypeError:
try:
address = wsock.address[0]
except AttributeError:
address = wsock.cfg_addr[0]
policylistener = kwargs.pop('policy_listener', (address, 10843))
self.policy_server = FlashPolicyServer(policylistener)
else:
self.policy_server = None
self.config = {'heartbeat_timeout': 60, 'close_timeout': 60, 'heartbeat_interval': 25}
for f in ('heartbeat_timeout', 'heartbeat_interval', 'close_timeout'):
if (f in kwargs):
self.config[f] = int(kwargs.pop(f))
if (not ('handler_class' in kwargs)):
kwargs['handler_class'] = SocketIOHandler
if (not ('ws_handler_class' in kwargs)):
self.ws_handler_class = WebSocketHandler
else:
self.ws_handler_class = kwargs.pop('ws_handler_class')
log_file = kwargs.pop('log_file', None)
if log_file:
kwargs['log'] = open(log_file, 'a')
super(SocketIOServer, self).__init__(*args, **kwargs)
|
def get_socket(self, sessid=''):
'Return an existing or new client Socket.'
socket = self.sockets.get(sessid)
if (sessid and (not socket)):
return None
if (socket is None):
socket = Socket(self, self.config)
self.sockets[socket.sessid] = socket
else:
socket.incr_hits()
return socket
| 537,170,999,850,106,900
|
Return an existing or new client Socket.
|
socketio/server.py
|
get_socket
|
jykim16/gevent-socketio
|
python
|
def get_socket(self, sessid=):
socket = self.sockets.get(sessid)
if (sessid and (not socket)):
return None
if (socket is None):
socket = Socket(self, self.config)
self.sockets[socket.sessid] = socket
else:
socket.incr_hits()
return socket
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.