hexsha stringlengths 40 40 | size int64 7 1.04M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 4 247 | max_stars_repo_name stringlengths 4 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 368k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 247 | max_issues_repo_name stringlengths 4 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 247 | max_forks_repo_name stringlengths 4 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.04M | avg_line_length float64 1.77 618k | max_line_length int64 1 1.02M | alphanum_fraction float64 0 1 | original_content stringlengths 7 1.04M | filtered:remove_function_no_docstring int64 -102 942k | filtered:remove_class_no_docstring int64 -354 977k | filtered:remove_delete_markers int64 0 60.1k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
237cf432cb2a9b8f4203ddc700aff3c83f06d064 | 1,302 | py | Python | deploy/run.py | dato-code/Strata-Now | 995ed8851275996ea82131bf6e3d05131c620f60 | [
"BSD-3-Clause"
] | 4 | 2015-05-05T16:05:35.000Z | 2016-05-19T21:02:25.000Z | deploy/run.py | turi-code/Strata-Now | 995ed8851275996ea82131bf6e3d05131c620f60 | [
"BSD-3-Clause"
] | 1 | 2015-10-07T18:59:39.000Z | 2015-10-07T18:59:39.000Z | deploy/run.py | dato-code/Strata-Now | 995ed8851275996ea82131bf6e3d05131c620f60 | [
"BSD-3-Clause"
] | 6 | 2015-05-05T02:11:07.000Z | 2016-05-16T19:24:22.000Z | import graphlab as gl
from models import *
path = "s3://gl-demo-usw2/predictive_service/demolab/ps-1.6"
ps = gl.deploy.predictive_service.load(path)
# Define dependencies
state = {'details_filename': '../data/talks.json',
'speakers_filename': '../data/speakers.json',
'details_sf': '../data/talks.gl',
'speakers_sf': '../data/speakers.gl'}
# Data carpentry
details = parse_details(state['details_sf'])
speakers_dict, speakers = parse_speakers(state['speakers_sf'])
details = clean_timing(details)
details, talks_per_speaker = join_speaker_data_into_details(details, speakers)
details_dict, trimmed = create_details_dict(details)
# Create nearest neighbor model and get nearest items
nn_model, nearest = build_nn_model(details)
# Deploy models as a predictive service
upload_list_page(ps, trimmed)
upload_speaker(ps, talks_per_speaker)
upload_item_sim(ps, details, nn_model, nearest)
#########################################################
# Ad hoc testing
# Via Python client
print ps.query('stratanow_item_sim', input={'item_ids': ['43169'], 'how_many':5})
# Via Curl
# !curl -X POST -d '{"api_key": "b9b8dd75-a6d3-4903-b6a7-2dc691d060d8", "data":{"input": {"item_ids":["43750"], "how_many": 5}}}' stratanow-175425062.us-west-2.elb.amazonaws.com/data/item_sim
| 33.384615 | 191 | 0.704301 | import graphlab as gl
from models import *
path = "s3://gl-demo-usw2/predictive_service/demolab/ps-1.6"
ps = gl.deploy.predictive_service.load(path)
# Define dependencies
state = {'details_filename': '../data/talks.json',
'speakers_filename': '../data/speakers.json',
'details_sf': '../data/talks.gl',
'speakers_sf': '../data/speakers.gl'}
# Data carpentry
details = parse_details(state['details_sf'])
speakers_dict, speakers = parse_speakers(state['speakers_sf'])
details = clean_timing(details)
details, talks_per_speaker = join_speaker_data_into_details(details, speakers)
details_dict, trimmed = create_details_dict(details)
# Create nearest neighbor model and get nearest items
nn_model, nearest = build_nn_model(details)
# Deploy models as a predictive service
upload_list_page(ps, trimmed)
upload_speaker(ps, talks_per_speaker)
upload_item_sim(ps, details, nn_model, nearest)
#########################################################
# Ad hoc testing
# Via Python client
print ps.query('stratanow_item_sim', input={'item_ids': ['43169'], 'how_many':5})
# Via Curl
# !curl -X POST -d '{"api_key": "b9b8dd75-a6d3-4903-b6a7-2dc691d060d8", "data":{"input": {"item_ids":["43750"], "how_many": 5}}}' stratanow-175425062.us-west-2.elb.amazonaws.com/data/item_sim
| 0 | 0 | 0 |
602796473f89d9acc225140c9078f5ec9c51d423 | 7,892 | py | Python | fuzzylogic.py | bydzen/fuzzy-logic-sc | ccbff1a34bb19048f540244aafd38197cc12318d | [
"MIT"
] | 1 | 2021-08-22T13:52:52.000Z | 2021-08-22T13:52:52.000Z | fuzzylogic.py | bydzen/fuzzy-logic-sc | ccbff1a34bb19048f540244aafd38197cc12318d | [
"MIT"
] | null | null | null | fuzzylogic.py | bydzen/fuzzy-logic-sc | ccbff1a34bb19048f540244aafd38197cc12318d | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import xlwt
#!gdown --id 1bb2irg5nFZhoFkpjWPQHJPBBr8FiK8l7
dataRestoran = pd.read_excel('restoran.xlsx')
print(dataRestoran)
# akan menghasilkan nilai kelayakan yang lebih bervariasi
titikPelayanan = [25, 37, 58, 65, 78, 89, 101]
titikMakanan = [3, 5, 8, 11]
grafikPelayanan()
grafikMakanan()
# print(fuzzificationPelayanan(dataRestoran))
# print(fuzzificationMakanan(dataRestoran))
dataFuzzyPelayanan = fuzzificationPelayanan(dataRestoran)
dataFuzzyMakanan = fuzzificationMakanan(dataRestoran)
# print(inference(dataFuzzyPelayanan, dataFuzzyMakanan))
arrx = [0, 30, 60, 99]
arry = [0, 1, 1, 1]
fig, ax = plt.subplots(nrows=1, figsize=(10, 4))
plt.xticks([30, 60, 99])
plt.yticks([0, 1])
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
plt.xlabel("Nilai Kelayakan skala [0,100]")
plt.ylabel("u")
plt.margins(y=0.17)
plt.title("FK singleton untuk Nilai Kelayakan")
plt.bar(arrx, arry, color=['red', 'red', 'orange', 'green'], width=[
0.4, 0.4, 0.4, 0.4], label="Runtime CycleSort")
rects = ax.patches
labels = ["", "rendah", "sedang", "tinggi"]
for rect, label in zip(rects, labels):
height = rect.get_height()
ax.text(rect.get_x() + rect.get_width() / 2, height+0.0088, label,
ha='center', va='bottom')
plt.show()
dataFuzzyRules = inference(dataFuzzyPelayanan, dataFuzzyMakanan)
hasilDefuzz = defuzzyfication(dataFuzzyRules)
dataRestoran["Result"] = hasilDefuzz
hasilAkhir = dataRestoran.sort_values(by="Result", ascending=False)[:10]
hasilAkhir
print("\nHasil Akhir:\n", hasilAkhir)
# Write Peringkat ke file xls.
# peringkat = xlwt.Workbook()
# ws = peringkat.add_sheet('Output')
# ws.write(0, 0, 'Record id')
# i = 1
# for j in hasilAkhir["id"]:
# ws.write(i, 0, j)
# i += 1
# peringkat.save('peringkat.xls')
| 30.94902 | 86 | 0.593512 | # -*- coding: utf-8 -*-
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import xlwt
#!gdown --id 1bb2irg5nFZhoFkpjWPQHJPBBr8FiK8l7
dataRestoran = pd.read_excel('restoran.xlsx')
print(dataRestoran)
# akan menghasilkan nilai kelayakan yang lebih bervariasi
titikPelayanan = [25, 37, 58, 65, 78, 89, 101]
titikMakanan = [3, 5, 8, 11]
def trapesium(x, a, b, c, d):
nilaiKeanggotaan = 0
if x <= a or x >= d:
nilaiKeanggotaan = 0
elif a < x < b:
nilaiKeanggotaan = (x-a)/(b-a)
elif b <= x < c:
nilaiKeanggotaan = 1
elif c <= x < d:
nilaiKeanggotaan = -(x-d)/(d-c)
return nilaiKeanggotaan
def segitiga(x, a, b, c):
nilaiKeanggotaan = 0
if x <= a or x >= c:
nilaiKeanggotaan = 0
elif a < x <= b:
nilaiKeanggotaan = (x-a)/(b-a)
elif b < x <= c:
nilaiKeanggotaan = -(x-c)/(c-b)
return nilaiKeanggotaan
def keanggotaanPelayanan(x):
kurang = trapesium(x, -1, 0, titikPelayanan[0], titikPelayanan[1])
cukup = trapesium(
x, titikPelayanan[0], titikPelayanan[1], titikPelayanan[2], titikPelayanan[3])
bagus = trapesium(
x, titikPelayanan[2], titikPelayanan[3], titikPelayanan[4], titikPelayanan[5])
sangatBagus = trapesium(
x, titikPelayanan[4], titikPelayanan[5], titikPelayanan[6], titikPelayanan[6])
return kurang, cukup, bagus, sangatBagus
def keanggotaanMakanan(x):
kurangEnak = trapesium(x, -1, 0, titikMakanan[0], titikMakanan[1])
enak = segitiga(x, titikMakanan[0], titikMakanan[1], titikMakanan[2])
sangatEnak = trapesium(
x, titikMakanan[1], titikMakanan[2], titikMakanan[3], titikMakanan[3])
return kurangEnak, enak, sangatEnak
def grafikPelayanan():
x_axis = np.arange(0, 101, 1)
len_x = titikPelayanan[len(titikPelayanan)-1]
fig, ax = plt.subplots(nrows=1, figsize=(15, 5))
kurang, cukup, bagus, sangatBagus = [], [], [], []
for x in range(len_x):
kurang.append(keanggotaanPelayanan(x)[0])
cukup.append(keanggotaanPelayanan(x)[1])
bagus.append(keanggotaanPelayanan(x)[2])
sangatBagus.append(keanggotaanPelayanan(x)[3])
ax.plot(x_axis, kurang, 'g', linewidth=1.5, label='kurang')
ax.plot(x_axis, cukup, 'r', linewidth=1.5, label='cukup')
ax.plot(x_axis, bagus, 'b', linewidth=1.5, label='bagus')
ax.plot(x_axis, sangatBagus, 'c', linewidth=1.5, label='sangat bagus')
ax.set_title("Pelayanan")
ax.legend()
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.get_xaxis().tick_bottom()
ax.get_yaxis().tick_left()
plt.tight_layout()
plt.show()
def grafikMakanan():
x_axis = np.arange(0, 11, 1)
len_x = titikMakanan[len(titikMakanan)-1]
fig, ax = plt.subplots(nrows=1, figsize=(15, 5))
kurangEnak, enak, sangatEnak = [], [], []
for x in range(len_x):
kurangEnak.append(keanggotaanMakanan(x)[0])
enak.append(keanggotaanMakanan(x)[1])
sangatEnak.append(keanggotaanMakanan(x)[2])
ax.plot(x_axis, kurangEnak, 'g', linewidth=1.5, label='kurang enak')
ax.plot(x_axis, enak, 'r', linewidth=1.5, label='enak')
ax.plot(x_axis, sangatEnak, 'b', linewidth=1.5, label='sangat enak')
ax.set_title("Makanan")
ax.legend()
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.get_xaxis().tick_bottom()
ax.get_yaxis().tick_left()
plt.tight_layout()
plt.show()
grafikPelayanan()
grafikMakanan()
def fuzzificationPelayanan(dataRestoran):
hasilFuzzy = []
for i in range(len(dataRestoran)):
temp = keanggotaanPelayanan(dataRestoran["pelayanan"][i])
hasilFuzzy.append(temp)
return hasilFuzzy
def fuzzificationMakanan(dataRestoran):
hasilFuzzy = []
for i in range(len(dataRestoran)):
temp = keanggotaanMakanan(dataRestoran["makanan"][i])
hasilFuzzy.append(temp)
return hasilFuzzy
# print(fuzzificationPelayanan(dataRestoran))
# print(fuzzificationMakanan(dataRestoran))
def inference(dataFuzzyPelayanan, dataFuzzyMakanan):
nilaiKelayakan = []
for i in range(len(dataFuzzyMakanan)):
pelayanan = dataFuzzyPelayanan[i] # [0 1.0 0 0]
makanan = dataFuzzyMakanan[i]
rendah = []
sedang = []
tinggi = []
for j in range(len(pelayanan)):
temp = 0
for k in range(len(makanan)):
if j == 0 and k == 0:
temp = min(pelayanan[j], makanan[k])
rendah.append(temp)
if j == 0 and k == 1:
temp = min(pelayanan[j], makanan[k])
rendah.append(temp)
if j == 0 and k == 2:
temp = min(pelayanan[j], makanan[k])
rendah.append(temp)
if j == 1 and k == 0:
temp = min(pelayanan[j], makanan[k])
rendah.append(temp)
if j == 1 and k == 1:
temp = min(pelayanan[j], makanan[k])
sedang.append(temp)
if j == 1 and k == 2:
temp = min(pelayanan[j], makanan[k])
sedang.append(temp)
if j == 2 and k == 0:
temp = min(pelayanan[j], makanan[k])
rendah.append(temp)
if j == 2 and k == 1:
temp = min(pelayanan[j], makanan[k])
sedang.append(temp)
if j == 2 and k == 2:
temp = min(pelayanan[j], makanan[k])
tinggi.append(temp)
if j == 3 and k == 0:
temp = min(pelayanan[j], makanan[k])
sedang.append(temp)
if j == 3 and k == 1:
temp = min(pelayanan[j], makanan[k])
tinggi.append(temp)
if j == 3 and k == 2:
temp = min(pelayanan[j], makanan[k])
tinggi.append(temp)
nkRendah = max(rendah)
nkSedang = max(sedang)
nkTinggi = max(tinggi)
nilaiKelayakan.append([nkRendah, nkSedang, nkTinggi])
return nilaiKelayakan
dataFuzzyPelayanan = fuzzificationPelayanan(dataRestoran)
dataFuzzyMakanan = fuzzificationMakanan(dataRestoran)
# print(inference(dataFuzzyPelayanan, dataFuzzyMakanan))
def defuzzyfication(dataFuzzyRules):
hasilDefuzz = []
for data in dataFuzzyRules:
y = ((data[0]*30) + (data[1]*60) + (data[2]*99)) / \
(data[0] + data[1] + data[2])
hasilDefuzz.append(y)
return hasilDefuzz
arrx = [0, 30, 60, 99]
arry = [0, 1, 1, 1]
fig, ax = plt.subplots(nrows=1, figsize=(10, 4))
plt.xticks([30, 60, 99])
plt.yticks([0, 1])
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
plt.xlabel("Nilai Kelayakan skala [0,100]")
plt.ylabel("u")
plt.margins(y=0.17)
plt.title("FK singleton untuk Nilai Kelayakan")
plt.bar(arrx, arry, color=['red', 'red', 'orange', 'green'], width=[
0.4, 0.4, 0.4, 0.4], label="Runtime CycleSort")
rects = ax.patches
labels = ["", "rendah", "sedang", "tinggi"]
for rect, label in zip(rects, labels):
height = rect.get_height()
ax.text(rect.get_x() + rect.get_width() / 2, height+0.0088, label,
ha='center', va='bottom')
plt.show()
dataFuzzyRules = inference(dataFuzzyPelayanan, dataFuzzyMakanan)
hasilDefuzz = defuzzyfication(dataFuzzyRules)
dataRestoran["Result"] = hasilDefuzz
hasilAkhir = dataRestoran.sort_values(by="Result", ascending=False)[:10]
hasilAkhir
print("\nHasil Akhir:\n", hasilAkhir)
# Write Peringkat ke file xls.
# peringkat = xlwt.Workbook()
# ws = peringkat.add_sheet('Output')
# ws.write(0, 0, 'Record id')
# i = 1
# for j in hasilAkhir["id"]:
# ws.write(i, 0, j)
# i += 1
# peringkat.save('peringkat.xls')
| 5,765 | 0 | 230 |
deb1cc2b3769ab3932370a34f4aedc4d58995c77 | 866 | py | Python | src/circuitsascode/__init__.py | devbisme/circuitsascode | 497ac4650b0da8d65f4b22eceefe918b66f915b4 | [
"MIT"
] | 3 | 2021-08-28T15:02:53.000Z | 2022-01-17T21:43:31.000Z | src/circuitsascode/__init__.py | devbisme/circuitsascode | 497ac4650b0da8d65f4b22eceefe918b66f915b4 | [
"MIT"
] | null | null | null | src/circuitsascode/__init__.py | devbisme/circuitsascode | 497ac4650b0da8d65f4b22eceefe918b66f915b4 | [
"MIT"
] | null | null | null | # The MIT License (MIT) - Copyright (c) 2021 xesscorp
"""
Categorized collections of circuits.
"""
import sys
import pint
# Create a shortcut name for "circuitsascode".
sys.modules["casc"] = sys.modules["circuitsascode"]
# For electrical units like ohms, volts, etc.
units = pint.UnitRegistry()
if sys.version_info[:2] >= (3, 8):
# TODO: Import directly (no need for conditional) when `python_requires = >= 3.8`
from importlib.metadata import PackageNotFoundError, version # pragma: no cover
else:
from importlib_metadata import PackageNotFoundError, version # pragma: no cover
try:
# Change here if project is renamed and does not equal the package name
dist_name = __name__
__version__ = version(dist_name)
except PackageNotFoundError: # pragma: no cover
__version__ = "unknown"
finally:
del version, PackageNotFoundError
| 27.935484 | 85 | 0.732102 | # The MIT License (MIT) - Copyright (c) 2021 xesscorp
"""
Categorized collections of circuits.
"""
import sys
import pint
# Create a shortcut name for "circuitsascode".
sys.modules["casc"] = sys.modules["circuitsascode"]
# For electrical units like ohms, volts, etc.
units = pint.UnitRegistry()
if sys.version_info[:2] >= (3, 8):
# TODO: Import directly (no need for conditional) when `python_requires = >= 3.8`
from importlib.metadata import PackageNotFoundError, version # pragma: no cover
else:
from importlib_metadata import PackageNotFoundError, version # pragma: no cover
try:
# Change here if project is renamed and does not equal the package name
dist_name = __name__
__version__ = version(dist_name)
except PackageNotFoundError: # pragma: no cover
__version__ = "unknown"
finally:
del version, PackageNotFoundError
| 0 | 0 | 0 |
59e413e90db4fea7b7d53b3bfbe74dbdf3fef42a | 8,095 | py | Python | electrical_simulation/versuch_versuch.py | architecture-building-systems/bipv-tool | 3ae73a541754e5215dfd39ef837d72f4b80ef967 | [
"MIT"
] | 4 | 2019-02-18T14:10:49.000Z | 2021-04-23T09:03:13.000Z | electrical_simulation/versuch_versuch.py | architecture-building-systems/bipv-tool | 3ae73a541754e5215dfd39ef837d72f4b80ef967 | [
"MIT"
] | 1 | 2018-08-16T13:27:15.000Z | 2018-08-16T13:27:16.000Z | electrical_simulation/versuch_versuch.py | architecture-building-systems/bipv-tool | 3ae73a541754e5215dfd39ef837d72f4b80ef967 | [
"MIT"
] | null | null | null | import pandas as pd
import numpy as np
import json
import datetime
import miasole_module_two as ps
import pvlib.pvsystem as pvsyst
#import shaded_miasole as ps
import interconnection as connect
import matplotlib.pyplot as plt
def align_yaxis(ax1, v1, ax2, v2):
"""adjust ax2 ylimit so that v2 in ax2 is aligned to v1 in ax1"""
_, y1 = ax1.transData.transform((0, v1))
_, y2 = ax2.transData.transform((0, v2))
inv = ax2.transData.inverted()
_, dy = inv.transform((0, 0)) - inv.transform((0, y1-y2))
miny, maxy = ax2.get_ylim()
ax2.set_ylim(miny+dy, maxy+dy)
#This function finds the MPP for the measured data using the lists of I and V of the object
if __name__ == "__main__":
module_lookup_table_path = 'C:\Users\walkerl\Documents\MA_Local\Electrical_simulation\lookup\MIA_lookup.pkl'
lookup_table = pd.read_pickle(module_lookup_table_path)
lookup_table = lookup_table.astype('object')
number_of_subcells = 5
shading_string = 'completely shaded' #This variable does not change calculations but will app
irradiation_path = 'C:\Users\walkerl\Documents\MA_Local\Versuche\Messungen_17_08_15\meas_irrad.xlsx'
time = datetime.datetime(2017,8,15,11,40,16)
temp_sensor_name = 'RTD3'
ambient_temperature = get_temperature(irradiation_path, time, temp_sensor_name)
if time.minute < 10:
measurement_path = r'C:\Users\walkerl\Documents\MA_Local\Versuche\Messungen_17_08_15\15-08-2017 ' + \
str(time.hour)+ '_0' + str(time.minute) + '_' + str(time.second)
measurement_data_path = measurement_path + '.XLS'
shading_pattern_path = measurement_path + "_shading.json"
elif time.second < 10:
measurement_path = r'C:\Users\walkerl\Documents\MA_Local\Versuche\Messungen_17_08_15\15-08-2017 ' + \
str(time.hour) + '_' + str(time.minute) + '_0' + str(time.second)
measurement_data_path = measurement_path + '.XLS'
shading_pattern_path = measurement_path + "_shading.json"
elif time.second < 10 and time.minute < 10:
measurement_path = r'C:\Users\walkerl\Documents\MA_Local\Versuche\Messungen_17_08_15\15-08-2017 ' + \
str(time.hour) + '_0' + str(time.minute) + '_0' + str(time.second)
measurement_data_path = measurement_path + '.XLS'
shading_pattern_path = measurement_path + "_shading.json"
else:
measurement_path = r'C:\Users\walkerl\Documents\MA_Local\Versuche\Messungen_17_08_15\15-08-2017 ' + \
str(time.hour)+ '_' + str(time.minute) + '_' + str(time.second)
measurement_data_path = measurement_path + '.XLS'
shading_pattern_path = measurement_path + "_shading.json"
shading_pattern1 = get_shading_pattern(shading_pattern_path)
sensor_name1 = "Pyranometer 2 (W/m2)"
# sensor_name1 = "DNI (W/m2)"
database_path = r'C:\Users\walkerl\Documents\BIPV-planning-tool\BIPV-planning-tool\electrical_simulation\data\sam-library-cec-modules-2015-6-30.csv'
module_df = pvsyst.retrieve_sam(path=database_path)
irrad_value1 = get_irradiation_value(irradiation_path, time, sensor_name1)
irrad1 = create_irradiation_list(irrad_value1, shading_pattern1, partially_shaded_irrad=None)
irrad_on_sub_cells_ordered1 = rearrange_shading_pattern(irrad1,number_of_subcells)
i_module_sim1, v_module_sim1, lookup_table = ps.partial_shading(irrad_on_sub_cells_ordered1, temperature=ambient_temperature,
irrad_temp_lookup_df=lookup_table, module_df=module_df)
i_module_meas, v_module_meas = get_measured_iv_curves_from_excel(measurement_data_path)
mpp_measured = max(i_module_meas * v_module_meas)
mpp_simulated = max(i_module_sim1 * v_module_sim1)
print mpp_measured
print mpp_simulated
plt.plot(v_module_sim1, i_module_sim1, color='blue', linestyle='--')
plt.plot(v_module_meas, i_module_meas, color='blue' )
ax = plt.gca()
handles, labels = ax.get_legend_handles_labels()
ax.legend(handles, labels=['simulated IV ' , 'measured IV'],loc='upper left')
ax.set_title('Irradiation: ' + str(int(irrad_value1)) + ", T = " + str(ambient_temperature)+ u"\u00b0" + "C" + '\n Shaded cells: ' + shading_string)
ax.set_ylabel('Current I [A]')
ax.set_xlabel('Voltage V [V]')
ax.set_ylim(0,4)
ax.set_xlim(0,105)
ax2 = ax.twinx()
ax2.set_ylim(0, 50)
ax2.set_xlim(0,40)
ax2.set_ylabel("Power P [W]")
ax2.plot(v_module_sim1, v_module_sim1 * i_module_sim1, color='green', label='PV simulated', linestyle='--')
ax2.plot(v_module_meas, i_module_meas*v_module_meas, color='green', label='PV measured' )
handles, labels = ax2.get_legend_handles_labels()
ax2.legend(handles, labels=['simulated PV ', 'measured PV'])
align_yaxis(ax, 0, ax2, 0)
# plt.savefig("F:\Validation_final\Plots_MIA\single_module/" + shading_string + str(int(irrad_value1)) + '.png')
plt.show()
| 43.994565 | 152 | 0.664484 | import pandas as pd
import numpy as np
import json
import datetime
import miasole_module_two as ps
import pvlib.pvsystem as pvsyst
#import shaded_miasole as ps
import interconnection as connect
import matplotlib.pyplot as plt
def get_irradiation_value(path, time, sensor_name):
time = time.replace(second=0)
irrad_data_df = pd.read_excel(path)
irrad_data_df['Time'] = pd.to_datetime(irrad_data_df['Time'])
irrad_data_df.set_index('Time', inplace=True)
irrad = irrad_data_df.iloc[irrad_data_df.index.get_loc(time, method='nearest')][sensor_name]
return int(round(irrad,0))
def get_temperature(path, time, sensor_name):
time = time.replace(second=0)
irrad_data_df = pd.read_excel(path)
irrad_data_df['Time'] = pd.to_datetime(irrad_data_df['Time'])
irrad_data_df.set_index('Time', inplace=True)
temp = irrad_data_df.iloc[irrad_data_df.index.get_loc(time, method='nearest')][sensor_name]
return round(temp,0)
def align_yaxis(ax1, v1, ax2, v2):
"""adjust ax2 ylimit so that v2 in ax2 is aligned to v1 in ax1"""
_, y1 = ax1.transData.transform((0, v1))
_, y2 = ax2.transData.transform((0, v2))
inv = ax2.transData.inverted()
_, dy = inv.transform((0, 0)) - inv.transform((0, y1-y2))
miny, maxy = ax2.get_ylim()
ax2.set_ylim(miny+dy, maxy+dy)
def create_irradiation_list(irradiation, shade_pattern, partially_shaded_irrad):
irradiation_on_cells = []
for i in range(len(shade_pattern)):
if shade_pattern[i] ==1:
irradiation_on_cells.append(irradiation)
elif shade_pattern[i] == 0:
irradiation_on_cells.append(1) #not zero because the IV-curve simulator cannot handle 0
elif shade_pattern[i] == 2:
irradiation_on_cells.append(partially_shaded_irrad)
else:
print "The shading pattern hasn't been defined correctly"
return irradiation_on_cells
def get_measured_iv_curves_from_excel(filepath):
iv_df = pd.read_excel(filepath, header=17)
v_data = np.array(iv_df['U in V'].tolist())
i_data = np.array(iv_df['I in A'].tolist())
return i_data, v_data
def get_shading_pattern(filepath):
with open(filepath, 'r')as jfile:
return json.load(jfile)
def rearrange_shading_pattern(irradiation_pattern, number_of_subcells):
# The irradiation pattern is given in a list from module left to right, up to down.
# The Miasole module has subcells that are connected in parallel first which requires to rearrange the pattern
# e.g: [1,2,3,4, [1,5,9,
# 5,6,7,8, as the irradiation on subcells needs to be transformed to the following list: 2,6,10,
# 9,10,11,12] 3,7,11,
# 4,8,12]
new_pattern = []
for i in range(len(irradiation_pattern) / number_of_subcells):
for y in range(number_of_subcells):
new_pattern.append(irradiation_pattern[y * len(irradiation_pattern) / number_of_subcells + i])
return new_pattern
#This function finds the MPP for the measured data using the lists of I and V of the object
def get_mpp(self):
mpp =0
for counter in range(len(self.v_data)):
if self.v_data[counter] * self.i_data[counter] > mpp:
mpp = self.v_data[counter] * self.i_data[counter]
self.mpp_voltage = self.v_data[counter]
self.mpp_current = self.i_data[counter]
else:
pass
self.mpp = mpp
if __name__ == "__main__":
module_lookup_table_path = 'C:\Users\walkerl\Documents\MA_Local\Electrical_simulation\lookup\MIA_lookup.pkl'
lookup_table = pd.read_pickle(module_lookup_table_path)
lookup_table = lookup_table.astype('object')
number_of_subcells = 5
shading_string = 'completely shaded' #This variable does not change calculations but will app
irradiation_path = 'C:\Users\walkerl\Documents\MA_Local\Versuche\Messungen_17_08_15\meas_irrad.xlsx'
time = datetime.datetime(2017,8,15,11,40,16)
temp_sensor_name = 'RTD3'
ambient_temperature = get_temperature(irradiation_path, time, temp_sensor_name)
if time.minute < 10:
measurement_path = r'C:\Users\walkerl\Documents\MA_Local\Versuche\Messungen_17_08_15\15-08-2017 ' + \
str(time.hour)+ '_0' + str(time.minute) + '_' + str(time.second)
measurement_data_path = measurement_path + '.XLS'
shading_pattern_path = measurement_path + "_shading.json"
elif time.second < 10:
measurement_path = r'C:\Users\walkerl\Documents\MA_Local\Versuche\Messungen_17_08_15\15-08-2017 ' + \
str(time.hour) + '_' + str(time.minute) + '_0' + str(time.second)
measurement_data_path = measurement_path + '.XLS'
shading_pattern_path = measurement_path + "_shading.json"
elif time.second < 10 and time.minute < 10:
measurement_path = r'C:\Users\walkerl\Documents\MA_Local\Versuche\Messungen_17_08_15\15-08-2017 ' + \
str(time.hour) + '_0' + str(time.minute) + '_0' + str(time.second)
measurement_data_path = measurement_path + '.XLS'
shading_pattern_path = measurement_path + "_shading.json"
else:
measurement_path = r'C:\Users\walkerl\Documents\MA_Local\Versuche\Messungen_17_08_15\15-08-2017 ' + \
str(time.hour)+ '_' + str(time.minute) + '_' + str(time.second)
measurement_data_path = measurement_path + '.XLS'
shading_pattern_path = measurement_path + "_shading.json"
shading_pattern1 = get_shading_pattern(shading_pattern_path)
sensor_name1 = "Pyranometer 2 (W/m2)"
# sensor_name1 = "DNI (W/m2)"
database_path = r'C:\Users\walkerl\Documents\BIPV-planning-tool\BIPV-planning-tool\electrical_simulation\data\sam-library-cec-modules-2015-6-30.csv'
module_df = pvsyst.retrieve_sam(path=database_path)
irrad_value1 = get_irradiation_value(irradiation_path, time, sensor_name1)
irrad1 = create_irradiation_list(irrad_value1, shading_pattern1, partially_shaded_irrad=None)
irrad_on_sub_cells_ordered1 = rearrange_shading_pattern(irrad1,number_of_subcells)
i_module_sim1, v_module_sim1, lookup_table = ps.partial_shading(irrad_on_sub_cells_ordered1, temperature=ambient_temperature,
irrad_temp_lookup_df=lookup_table, module_df=module_df)
i_module_meas, v_module_meas = get_measured_iv_curves_from_excel(measurement_data_path)
mpp_measured = max(i_module_meas * v_module_meas)
mpp_simulated = max(i_module_sim1 * v_module_sim1)
print mpp_measured
print mpp_simulated
plt.plot(v_module_sim1, i_module_sim1, color='blue', linestyle='--')
plt.plot(v_module_meas, i_module_meas, color='blue' )
ax = plt.gca()
handles, labels = ax.get_legend_handles_labels()
ax.legend(handles, labels=['simulated IV ' , 'measured IV'],loc='upper left')
ax.set_title('Irradiation: ' + str(int(irrad_value1)) + ", T = " + str(ambient_temperature)+ u"\u00b0" + "C" + '\n Shaded cells: ' + shading_string)
ax.set_ylabel('Current I [A]')
ax.set_xlabel('Voltage V [V]')
ax.set_ylim(0,4)
ax.set_xlim(0,105)
ax2 = ax.twinx()
ax2.set_ylim(0, 50)
ax2.set_xlim(0,40)
ax2.set_ylabel("Power P [W]")
ax2.plot(v_module_sim1, v_module_sim1 * i_module_sim1, color='green', label='PV simulated', linestyle='--')
ax2.plot(v_module_meas, i_module_meas*v_module_meas, color='green', label='PV measured' )
handles, labels = ax2.get_legend_handles_labels()
ax2.legend(handles, labels=['simulated PV ', 'measured PV'])
align_yaxis(ax, 0, ax2, 0)
# plt.savefig("F:\Validation_final\Plots_MIA\single_module/" + shading_string + str(int(irrad_value1)) + '.png')
plt.show()
| 2,901 | 0 | 160 |
d49cc9d9d750d1b39deda00590557e916ce04222 | 1,231 | py | Python | venv/lib/python3.6/site-packages/zmq/sugar/version.py | paxthree/life-dashboard | 8c65857d1102331b1e913d8800c61af1f61d9532 | [
"CC0-1.0"
] | null | null | null | venv/lib/python3.6/site-packages/zmq/sugar/version.py | paxthree/life-dashboard | 8c65857d1102331b1e913d8800c61af1f61d9532 | [
"CC0-1.0"
] | 19 | 2020-01-28T21:41:50.000Z | 2022-03-11T23:17:39.000Z | thunau/lib/python3.6/site-packages/zmq/sugar/version.py | karlburkhart/thunau | 900dca0259b30a229cd84f6315a7da9be94cb355 | [
"MIT"
] | 1 | 2018-09-19T05:55:27.000Z | 2018-09-19T05:55:27.000Z | """PyZMQ and 0MQ version functions."""
# Copyright (C) PyZMQ Developers
# Distributed under the terms of the Modified BSD License.
from zmq.backend import zmq_version_info
VERSION_MAJOR = 16
VERSION_MINOR = 0
VERSION_PATCH = 4
VERSION_EXTRA = ""
__version__ = '%i.%i.%i' % (VERSION_MAJOR, VERSION_MINOR, VERSION_PATCH)
if VERSION_EXTRA:
__version__ = "%s.%s" % (__version__, VERSION_EXTRA)
version_info = (VERSION_MAJOR, VERSION_MINOR, VERSION_PATCH, float('inf'))
else:
version_info = (VERSION_MAJOR, VERSION_MINOR, VERSION_PATCH)
__revision__ = ''
def pyzmq_version():
"""return the version of pyzmq as a string"""
if __revision__:
return '@'.join([__version__,__revision__[:6]])
else:
return __version__
def pyzmq_version_info():
"""return the pyzmq version as a tuple of at least three numbers
If pyzmq is a development version, `inf` will be appended after the third integer.
"""
return version_info
def zmq_version():
"""return the version of libzmq as a string"""
return "%i.%i.%i" % zmq_version_info()
__all__ = ['zmq_version', 'zmq_version_info',
'pyzmq_version','pyzmq_version_info',
'__version__', '__revision__'
]
| 25.122449 | 86 | 0.692933 | """PyZMQ and 0MQ version functions."""
# Copyright (C) PyZMQ Developers
# Distributed under the terms of the Modified BSD License.
from zmq.backend import zmq_version_info
VERSION_MAJOR = 16
VERSION_MINOR = 0
VERSION_PATCH = 4
VERSION_EXTRA = ""
__version__ = '%i.%i.%i' % (VERSION_MAJOR, VERSION_MINOR, VERSION_PATCH)
if VERSION_EXTRA:
__version__ = "%s.%s" % (__version__, VERSION_EXTRA)
version_info = (VERSION_MAJOR, VERSION_MINOR, VERSION_PATCH, float('inf'))
else:
version_info = (VERSION_MAJOR, VERSION_MINOR, VERSION_PATCH)
__revision__ = ''
def pyzmq_version():
"""return the version of pyzmq as a string"""
if __revision__:
return '@'.join([__version__,__revision__[:6]])
else:
return __version__
def pyzmq_version_info():
"""return the pyzmq version as a tuple of at least three numbers
If pyzmq is a development version, `inf` will be appended after the third integer.
"""
return version_info
def zmq_version():
"""return the version of libzmq as a string"""
return "%i.%i.%i" % zmq_version_info()
__all__ = ['zmq_version', 'zmq_version_info',
'pyzmq_version','pyzmq_version_info',
'__version__', '__revision__'
]
| 0 | 0 | 0 |
4300d28866dc3941f3120411372fade12a422e39 | 3,846 | py | Python | penguin_random_house/parse_scripts/books.py | EvgenyTsydenov/book_recommender | bfbbed52907f979a6ba404f9d1f2ff8cfe142aff | [
"MIT"
] | null | null | null | penguin_random_house/parse_scripts/books.py | EvgenyTsydenov/book_recommender | bfbbed52907f979a6ba404f9d1f2ff8cfe142aff | [
"MIT"
] | null | null | null | penguin_random_house/parse_scripts/books.py | EvgenyTsydenov/book_recommender | bfbbed52907f979a6ba404f9d1f2ff8cfe142aff | [
"MIT"
] | null | null | null | import json
import os
from tqdm import tqdm
def parse_book(book_data: dict) -> dict:
"""Parse book core data."""
info = {}
for param in ['isbn', 'title', 'onsale', 'price',
'language', 'pages', 'publisher']:
info[param] = book_data.get(param)
info['cover'] = f'https://images.randomhouse.com/cover/{info["isbn"]}'
info['format_family'] = book_data.get('formatFamily')
info['projected_minutes'] = book_data.get('projectedMinutes')
info['series_number'] = book_data.get('seriesNumber')
return info
def parse_authors(authors_data: list) -> list:
"""Extract information about contributors."""
authors = []
for author in authors_data:
authors.append({
'author_id': author.get('authorId'),
'first_name': author.get('first'),
'last_name': author.get('last'),
'company': author.get('company'),
'client_source_id': author.get('clientSourceId'),
'role': author.get('contribRoleCode')
})
return authors
def parse_categories(category_data: list) -> list:
"""Extract information about categories.
Since we downloaded data about categories separately,
keep here only category_id and the sequence.
"""
categories = []
for cat in category_data:
# Read PRH docs about sequencing
if cat.get('seq', 0) > 0:
categories.append({
'category_id': cat.get('catId'),
'seq': cat.get('seq')
})
return categories
def parse_series(series_data: list) -> list:
"""Extract information about series."""
series = []
for item in series_data:
series.append({
'series_id': item.get('seriesCode'),
'name': item.get('seriesName'),
'description': item.get('description'),
'series_count': item.get('seriesCount'),
'is_numbered': item.get('isNumbered'),
'is_kids': item.get('isKids')
})
return series
def parse_works(works_data: list) -> list:
"""Extract information about works."""
works = []
for work in works_data:
works.append({
'work_id': work.get('workId'),
'title': work.get('title'),
'author': work.get('author'),
'onsale': work.get('onsale'),
'language': work.get('language'),
'series_number': work.get('seriesNumber')
})
return works
def parse_content(content_data: dict) -> dict:
"""Extract long text data."""
content = {}
for param in ['flapcopy', 'excerpt']:
content[param] = content_data.get(param)
return content
if __name__ == '__main__':
# Paths
path_raw_books = os.path.join('..', 'data_raw', 'books.txt')
path_parsed_books = os.path.join('..', 'data_interm', 'books.txt')
# Parse the file line by line
with open(path_raw_books, 'r') as books_raw:
with open(path_parsed_books, 'w') as books_parsed:
for book in tqdm(books_raw):
book_data = json.loads(book)
# Get core book data
info = parse_book(book_data['titles'][0])
# Parse relative info
embeds = {}
for embed in book_data['_embeds']:
embeds.update(embed)
info['authors'] = parse_authors(embeds['authors'])
info['categories'] = parse_categories(embeds['categories'])
info['series'] = parse_series(embeds['series'])
info['works'] = parse_works(embeds['works'])
info.update(parse_content(embeds['content']))
# Save
data_string = json.dumps(info)
books_parsed.write(data_string)
books_parsed.write('\n')
| 32.05 | 75 | 0.568903 | import json
import os
from tqdm import tqdm
def parse_book(book_data: dict) -> dict:
"""Parse book core data."""
info = {}
for param in ['isbn', 'title', 'onsale', 'price',
'language', 'pages', 'publisher']:
info[param] = book_data.get(param)
info['cover'] = f'https://images.randomhouse.com/cover/{info["isbn"]}'
info['format_family'] = book_data.get('formatFamily')
info['projected_minutes'] = book_data.get('projectedMinutes')
info['series_number'] = book_data.get('seriesNumber')
return info
def parse_authors(authors_data: list) -> list:
"""Extract information about contributors."""
authors = []
for author in authors_data:
authors.append({
'author_id': author.get('authorId'),
'first_name': author.get('first'),
'last_name': author.get('last'),
'company': author.get('company'),
'client_source_id': author.get('clientSourceId'),
'role': author.get('contribRoleCode')
})
return authors
def parse_categories(category_data: list) -> list:
"""Extract information about categories.
Since we downloaded data about categories separately,
keep here only category_id and the sequence.
"""
categories = []
for cat in category_data:
# Read PRH docs about sequencing
if cat.get('seq', 0) > 0:
categories.append({
'category_id': cat.get('catId'),
'seq': cat.get('seq')
})
return categories
def parse_series(series_data: list) -> list:
"""Extract information about series."""
series = []
for item in series_data:
series.append({
'series_id': item.get('seriesCode'),
'name': item.get('seriesName'),
'description': item.get('description'),
'series_count': item.get('seriesCount'),
'is_numbered': item.get('isNumbered'),
'is_kids': item.get('isKids')
})
return series
def parse_works(works_data: list) -> list:
"""Extract information about works."""
works = []
for work in works_data:
works.append({
'work_id': work.get('workId'),
'title': work.get('title'),
'author': work.get('author'),
'onsale': work.get('onsale'),
'language': work.get('language'),
'series_number': work.get('seriesNumber')
})
return works
def parse_content(content_data: dict) -> dict:
"""Extract long text data."""
content = {}
for param in ['flapcopy', 'excerpt']:
content[param] = content_data.get(param)
return content
if __name__ == '__main__':
# Paths
path_raw_books = os.path.join('..', 'data_raw', 'books.txt')
path_parsed_books = os.path.join('..', 'data_interm', 'books.txt')
# Parse the file line by line
with open(path_raw_books, 'r') as books_raw:
with open(path_parsed_books, 'w') as books_parsed:
for book in tqdm(books_raw):
book_data = json.loads(book)
# Get core book data
info = parse_book(book_data['titles'][0])
# Parse relative info
embeds = {}
for embed in book_data['_embeds']:
embeds.update(embed)
info['authors'] = parse_authors(embeds['authors'])
info['categories'] = parse_categories(embeds['categories'])
info['series'] = parse_series(embeds['series'])
info['works'] = parse_works(embeds['works'])
info.update(parse_content(embeds['content']))
# Save
data_string = json.dumps(info)
books_parsed.write(data_string)
books_parsed.write('\n')
| 0 | 0 | 0 |
b7d7274c982532facf17dec2970f658f11cf429a | 3,502 | py | Python | scripts/parse_proto.py | JustinGanzer/j2objc | feb43475d70a027c96d5e94874003ef65e6ae34b | [
"Apache-2.0"
] | null | null | null | scripts/parse_proto.py | JustinGanzer/j2objc | feb43475d70a027c96d5e94874003ef65e6ae34b | [
"Apache-2.0"
] | null | null | null | scripts/parse_proto.py | JustinGanzer/j2objc | feb43475d70a027c96d5e94874003ef65e6ae34b | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python3
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Parses metadata from a .proto file
Parses metadata from a .proto file including various options and a list of top
level messages and enums declared within the proto file.
"""
import itertools
import re
import string
class ProtoMetadata:
"""Parses a proto file to extract options and other metadata."""
multiple_files = False
package = ''
java_package = ''
java_api_version = 2
java_alt_api_package = ''
outer_class = ''
optimize_for = 'SPEED'
| 25.75 | 78 | 0.658195 | #!/usr/bin/python3
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Parses metadata from a .proto file
Parses metadata from a .proto file including various options and a list of top
level messages and enums declared within the proto file.
"""
import itertools
import re
import string
def CamelCase(name):
result = []
cap_next = True
for ch in name:
if cap_next and ch.islower():
result.append(ch.upper())
elif ch.isalnum():
result.append(ch)
cap_next = not ch.isalpha()
return ''.join(result)
class ProtoMetadata:
"""Parses a proto file to extract options and other metadata."""
multiple_files = False
package = ''
java_package = ''
java_api_version = 2
java_alt_api_package = ''
outer_class = ''
optimize_for = 'SPEED'
def __init__(self):
self.messages = []
self.enums = []
def MatchOptions(line, data):
# package
match = re.match(r'package\s*([\w\.]+)\s*;', line)
if match:
data.package = match.group(1)
# java package
match = re.match(r'option\s+java_package\s*=\s*"([^"]+)', line)
if match:
data.java_package = match.group(1)
# outer classname
match = re.match(r'option\s+java_outer_classname\s*=\s*"([^"]+)"', line)
if match:
data.outer_class = match.group(1)
# multiple files?
match = re.match(r'option\s+java_multiple_files\s*=\s*(\S+)\s*;', line)
if match:
data.multiple_files = True if match.group(1).lower() == 'true' else False
match = re.match(r'option\s+optimize_for\s*=\s*(\S+)\s*;', line)
if match:
data.optimize_for = match.group(1)
def MatchTypes(line, data):
# messages and enums
match = re.match(r'\s*(message|enum)\s+(\S+)\s+{$', line)
if match:
if match.group(1) == 'message':
data.messages.append(match.group(2))
else:
data.enums.append(match.group(2))
def MatchGroups(line, data):
match = re.match(
r'\s*(required|optional|repeated)\s+group\s+(\S+)\s+=\s+\d+\s+{$', line)
if match:
data.messages.append(match.group(2))
def SetOuterClass(filename, data):
if not data.outer_class:
outer_class = CamelCase(filename.rsplit('/', 1)[-1].split('.', 1)[0])
if outer_class in itertools.chain(data.messages, data.enums):
outer_class += 'OuterClass'
data.outer_class = outer_class
def ParseProto(filename):
data = ProtoMetadata()
with open(filename, 'r') as fh:
brace_depth = 0
inside_extend = False
for line in fh:
line = line.rstrip()
MatchOptions(line, data)
if brace_depth == 0 and re.match(r'\s*extend\s+\S+\s+{$', line):
inside_extend = True
brace_depth += 1
continue
# never emit anything for nested definitions
if brace_depth == 0:
MatchTypes(line, data)
elif brace_depth == 1 and inside_extend:
MatchGroups(line, data)
brace_depth += line.count('{')
brace_depth -= line.count('}')
if brace_depth == 0:
inside_extend = False
SetOuterClass(filename, data)
return data
| 2,303 | 0 | 163 |
859f22430c4589c36e57592748a354e17b82f62b | 33,061 | py | Python | tests/test_factors.py | mscarey/AuthoritySpoke | 047c30a5f8af8dcfae6d6cd4ec6d80bf646ff33b | [
"FTL",
"CNRI-Python",
"Apache-1.1"
] | 18 | 2019-05-13T23:04:48.000Z | 2022-03-23T00:48:30.000Z | tests/test_factors.py | mscarey/AuthoritySpoke | 047c30a5f8af8dcfae6d6cd4ec6d80bf646ff33b | [
"FTL",
"CNRI-Python",
"Apache-1.1"
] | 98 | 2019-05-07T10:38:03.000Z | 2021-10-17T01:54:27.000Z | tests/test_factors.py | mscarey/AuthoritySpoke | 047c30a5f8af8dcfae6d6cd4ec6d80bf646ff33b | [
"FTL",
"CNRI-Python",
"Apache-1.1"
] | 1 | 2020-06-15T17:31:57.000Z | 2020-06-15T17:31:57.000Z | import operator
import pytest
from nettlesome.terms import ContextRegister, DuplicateTermError
from nettlesome.terms import Explanation, TermSequence, means
from nettlesome.entities import Entity
from nettlesome.groups import FactorGroup
from nettlesome.predicates import Predicate
from nettlesome.quantities import Comparison, Q_
from authorityspoke.facts import Fact, build_fact
| 42.169643 | 91 | 0.669641 | import operator
import pytest
from nettlesome.terms import ContextRegister, DuplicateTermError
from nettlesome.terms import Explanation, TermSequence, means
from nettlesome.entities import Entity
from nettlesome.groups import FactorGroup
from nettlesome.predicates import Predicate
from nettlesome.quantities import Comparison, Q_
from authorityspoke.facts import Fact, build_fact
class TestFacts:
def test_default_terms_for_fact(self, make_entity, make_predicate, watt_mentioned):
e = make_entity
f1 = build_fact(make_predicate["p1"], case_factors=watt_mentioned)
assert f1.terms == [e["motel"]]
def test_no_terms_for_fact(self):
predicate = Predicate(content="context was included", truth=False)
fact = Fact(predicate=predicate, terms=[])
assert str(fact) == "the fact it was false that context was included"
def test_build_fact(self, make_predicate, watt_mentioned):
"""
Check that terms is created as a (hashable) tuple, not list
"""
shooting = build_fact(
make_predicate["p_shooting"],
(2, 3),
case_factors=watt_mentioned,
standard_of_proof="preponderance of evidence",
)
assert isinstance(shooting.terms, list)
def test_terms_from_case_factor_indices(
self, make_entity, make_predicate, watt_mentioned
):
"""
If you pass in integers instead of Factor objects to fill the blanks
in the Predicate (which was the only way to do things in the first
version of the Fact class's __init__ method), then the integers
you pass in should be used as indices to select Factor objects
from case_factors.
"""
e = make_entity
f2 = build_fact(
make_predicate["p2"], indices=(1, 0), case_factors=watt_mentioned
)
assert f2.terms == [e["watt"], e["motel"]]
def test_correct_factors_from_indices_in_build_fact(
self, make_entity, make_predicate, watt_mentioned
):
e = make_entity
f2 = build_fact(
make_predicate["p2"],
indices=(1, 2),
case_factors=watt_mentioned,
)
assert f2.terms == [e["watt"], e["trees"]]
def test_wrong_type_in_terms_in_init(
self, make_entity, make_predicate, watt_mentioned
):
e = make_entity
with pytest.raises(TypeError):
f2 = build_fact(
make_predicate["p1"],
indices=("nonsense"),
case_factors=watt_mentioned,
)
def test_invalid_index_for_case_factors_in_init(self, make_predicate, make_entity):
with pytest.raises(IndexError):
_ = build_fact(
make_predicate["p1"],
indices=2,
case_factors=make_entity["watt"],
)
def test_convert_int_terms_to_tuple(self, make_predicate, watt_mentioned):
f = build_fact(make_predicate["p_irrelevant_1"], 3, case_factors=watt_mentioned)
assert f.terms == [watt_mentioned[3]]
def test_string_representation_of_factor(self, watt_factor):
assert "<Hideaway Lodge> was a motel" in str(watt_factor["f1"])
assert "absence of the fact" in str(watt_factor["f3_absent"]).lower()
def test_string_no_truth_value(self, watt_factor):
factor = watt_factor["f2_no_truth"]
assert "whether" in str(factor)
def test_repeating_entity_string(self, make_predicate):
"""I'm not convinced that a model of a Fact ever needs to include
multiple references to the same Entity just because the name of the
Entity appears more than once in the Predicate."""
with pytest.raises(DuplicateTermError):
Fact(
predicate=make_predicate["p_three_entities"],
terms=[Entity(name="Al"), Entity(name="Bob"), Entity(name="Al")],
)
def test_string_representation_with_concrete_entities(self, watt_factor):
"""
"Hideaway Lodge" is still a string representation of an Entity
object, but it's not in angle brackets because it can't be
replaced by another Entity object without changing the meaning
of the Fact.
"""
assert "Hideaway Lodge was a motel" in str(watt_factor["f1_specific"])
def test_string_for_fact_with_identical_terms(self):
devon = Entity(name="Devon", generic=True)
elaine = Entity(name="Elaine", generic=True)
opened_account = Fact(
predicate=Predicate(
content="$applicant opened a bank account for $applicant and $cosigner"
),
terms=(devon, elaine),
)
assert "<Devon> opened a bank account for <Devon> and <Elaine>" in str(
opened_account
)
def test_str_with_concrete_context(self, make_opinion_with_holding):
holding = list(make_opinion_with_holding["cardenas_majority"].holdings)[1]
longer_str = holding.inputs[0].str_with_concrete_context
assert "the exhibit in the form testimony" in longer_str.lower()
assert "the exhibit in the form testimony" not in str(holding.inputs[0]).lower()
def test_complex_fact_no_line_break_in_predicate(self, make_opinion_with_holding):
"""
Tests that the string representation of this Holding's only input
Fact does not contain indented new lines, except in the "SPECIFIC
CONTEXT" part, if present.
The representation of the Exhibit mentioned in the Fact should
not introduce any indented lines inside the Fact's string.
"""
holding = list(make_opinion_with_holding["cardenas_majority"].holdings)[1]
fact_text = str(holding.inputs[0])
if "SPECIFIC CONTEXT" in fact_text:
fact_text = fact_text.split("SPECIFIC CONTEXT")[0].strip()
assert "\n " not in fact_text
def test_new_context_replace_fact(self, make_entity, watt_factor):
changes = ContextRegister.from_lists(
[make_entity["watt"], watt_factor["f2"]],
[Entity(name="Darth Vader"), watt_factor["f10"]],
)
assert "was within the curtilage of <Hideaway Lodge>" in (
watt_factor["f2"].new_context(changes).short_string
)
def test_get_factor_from_recursive_search(self, make_opinion_with_holding):
holding_list = list(make_opinion_with_holding["cardenas_majority"].holdings)
factor_list = list(holding_list[0].recursive_terms.values())
assert any(
factor.compare_keys(Entity(name="parole officer"))
and factor.name == "parole officer"
for factor in factor_list
)
def test_no_duplicate_comma_in_evidence(self, make_opinion_with_holding):
holdings = list(make_opinion_with_holding["cardenas_majority"].holdings)
assert "addicted to heroin, in showing the fact" in str(holdings[0].outputs[0])
def test_new_concrete_context(self, make_entity, watt_factor):
register = ContextRegister.from_lists(
to_replace=[make_entity["watt"], make_entity["motel"]],
replacements=[Entity(name="Darth Vader"), Entity(name="Death Star")],
)
different = watt_factor["f2"].new_context(register)
assert "<Darth Vader> operated" in str(different)
def test_type_of_terms(self, watt_factor):
assert isinstance(watt_factor["f1"].term_sequence, TermSequence)
def test_concrete_to_abstract(self, make_entity, make_predicate):
motel = make_entity["motel_specific"]
d = make_entity["watt"]
fact = Fact(predicate=make_predicate["p2"], terms=(d, motel))
assert "<Wattenburg> operated and lived at Hideaway Lodge" in str(fact)
assert "<Wattenburg> operated and lived at Hideaway Lodge>" in str(
fact.make_generic()
)
def test_entity_slots_as_length_of_factor(self, watt_factor):
assert len(watt_factor["f1"].predicate) == 1
assert len(watt_factor["f1"]) == 1
def test_predicate_with_entities(self, make_entity, watt_factor):
assert "<Hideaway Lodge> was a motel" in str(watt_factor["f1"])
def test_factor_terms_do_not_match_predicate(self, make_predicate, watt_mentioned):
"""
make_predicate["p1"] has only one slot for context factors, but
this tells it to look for three.
"""
with pytest.raises(ValueError):
_ = build_fact(make_predicate["p1"], (0, 1, 2), case_factors=watt_mentioned)
def test_reciprocal_with_wrong_number_of_entities(self, make_entity, watt_factor):
with pytest.raises(ValueError):
watt_factor["f1"].predicate._content_with_terms(
(make_entity["motel"], make_entity["watt"])
)
def test_entity_and_human_in_predicate(self, make_entity, watt_factor):
assert "<Wattenburg> operated and lived at <Hideaway Lodge>" in watt_factor[
"f2"
].predicate._content_with_terms((make_entity["watt"], make_entity["motel"]))
def test_standard_of_proof_must_be_listed(self, make_predicate, watt_mentioned):
with pytest.raises(ValueError):
_ = build_fact(
make_predicate["p2"],
case_factors=watt_mentioned,
standard_of_proof="probably so",
)
def test_standard_of_proof_in_str(self, watt_factor):
factor = watt_factor["f2_preponderance_of_evidence"]
assert factor.standard_of_proof in factor.short_string
def test_case_factors_deleted_from_fact(self, watt_factor):
"""This attribute should have been deleted during Fact.__post_init__"""
predicate = Predicate(content="some things happened")
factor = build_fact(predicate)
assert not hasattr(factor, "case_factors")
def test_repeated_placeholder_in_fact(self, make_opinion_with_holding):
holding = make_opinion_with_holding["lotus_majority"].holdings[9]
fact = holding.inputs[1]
assert fact.short_string == (
"the fact it was false that the precise formulation "
"of <Lotus 1-2-3>'s code was necessary for <Lotus 1-2-3> to work"
)
assert len(fact.terms) == 1
class TestSameMeaning:
def test_equality_factor_from_same_predicate(self, watt_factor):
assert watt_factor["f1"].means(watt_factor["f1b"])
def test_equality_factor_from_equal_predicate(self, watt_factor):
assert watt_factor["f1"].means(watt_factor["f1c"])
def test_equality_because_factors_are_generic_entities(self, watt_factor):
assert watt_factor["f1"].means(watt_factor["f1_different_entity"])
def test_unequal_because_a_factor_is_not_generic(self, watt_factor):
assert not watt_factor["f9_swap_entities_4"].means(watt_factor["f9"])
def test_generic_terms_equal(self, watt_factor):
assert watt_factor["f2_generic"].means(watt_factor["f2_false_generic"])
assert watt_factor["f2_generic"].means(watt_factor["f3_generic"])
def test_equal_referencing_diffent_generic_terms(self, make_factor):
assert make_factor["f_murder"].means(make_factor["f_murder_craig"])
def test_generic_and_specific_factors_unequal(self, watt_factor):
assert not watt_factor["f2"].means(watt_factor["f2_generic"])
def test_factor_unequal_due_to_repeated_term(self, watt_factor):
assert not watt_factor["f2"].means(watt_factor["f2_reflexive"])
def test_factor_different_predicate_truth_unequal(self, watt_factor):
assert not watt_factor["f7"].means(watt_factor["f7_opposite"])
def test_unequal_because_one_factor_is_absent(self, watt_factor):
assert not watt_factor["f8"].means(watt_factor["f8_absent"])
def test_copies_of_identical_factor(self, make_factor):
"""
Even if the two factors have different entity markers in self.terms,
I expect them to evaluate equal because the choice of entity markers is
arbitrary.
"""
f = make_factor
assert f["f_irrelevant_3"].means(f["f_irrelevant_3"])
assert f["f_irrelevant_3"].means(f["f_irrelevant_3_new_context"])
def test_equal_with_different_generic_subfactors(self, make_complex_fact):
assert make_complex_fact["f_relevant_murder"].means(
make_complex_fact["f_relevant_murder_craig"]
)
def test_reciprocal_context_register(self, watt_factor):
"""
This test describes two objects with the same meaning that have been
made in two different ways, each with a different id and repr.
"""
assert watt_factor["f7"].means(watt_factor["f7_swap_entities"])
def test_interchangeable_concrete_terms(self):
"""Detect that placeholders differing only by a final digit are interchangeable."""
ann = Entity(name="Ann", generic=False)
bob = Entity(name="Bob", generic=False)
ann_and_bob_were_family = Fact(
predicate=Predicate(
content="$relative1 and $relative2 both were members of the same family"
),
terms=(ann, bob),
)
bob_and_ann_were_family = Fact(
predicate=Predicate(
content="$relative1 and $relative2 both were members of the same family"
),
terms=(bob, ann),
)
assert ann_and_bob_were_family.means(bob_and_ann_were_family)
def test_unequal_to_enactment(self, watt_factor, e_copyright):
assert not watt_factor["f1"].means(e_copyright)
with pytest.raises(TypeError):
e_copyright.means(watt_factor["f1"])
def test_standard_of_proof_inequality(self, watt_factor):
f = watt_factor
assert not f["f2_clear_and_convincing"].means(f["f2_preponderance_of_evidence"])
assert not f["f2_clear_and_convincing"].means(f["f2"])
assert f["f2_clear_and_convincing"].wrapped_string.endswith(
"\n by the STANDARD clear and convincing"
)
def test_means_despite_plural(self):
directory = Entity(name="Rural's telephone directory", plural=False)
listings = Entity(name="Rural's telephone listings", plural=True)
directory_original = Fact(
predicate=Predicate(content="$thing was original"), terms=directory
)
listings_original = Fact(
predicate=Predicate(content="$thing were original"), terms=listings
)
assert directory_original.means(listings_original)
def test_same_meaning_no_terms(self, make_predicate):
no_context = Fact(predicate=make_predicate["p_no_context"], terms=[])
assert no_context.means(no_context)
class TestImplication:
def test_fact_implies_none(self, watt_factor):
assert watt_factor["f1"].implies(None)
def test_no_implication_of_rule(self, watt_factor, make_rule):
assert not watt_factor["f1"].implies(make_rule["h1"])
def test_fact_does_not_imply_holding(self, watt_factor, make_holding):
assert not watt_factor["f1"].implies(make_holding["h1"])
def test_specific_factor_implies_generic(self, watt_factor):
assert watt_factor["f2"] > watt_factor["f2_generic"]
assert not watt_factor["f2_generic"] > watt_factor["f2"]
def test_specific_factor_implies_generic_explain(self, watt_factor):
answer = watt_factor["f2"].explain_implication(watt_factor["f2_generic"])
assert (
str(watt_factor["f2"]),
watt_factor["f2_generic"],
) in answer.context.items()
def test_specific_implies_generic_form_of_another_fact(self, watt_factor):
assert watt_factor["f2"] > watt_factor["f3_generic"]
def test_specific_fact_does_not_imply_generic_entity(
self, make_entity, watt_factor
):
assert not watt_factor["f2"] > make_entity["motel"]
def test_factor_does_not_imply_predicate(self, make_predicate, watt_factor):
with pytest.raises(TypeError):
assert not watt_factor["f8_meters"] > make_predicate["p8"]
def test_factor_implies_because_of_quantity(self, watt_factor):
assert watt_factor["f8_meters"] > watt_factor["f8"]
assert watt_factor["f8_higher_int"] > watt_factor["f8_float"]
assert watt_factor["f8_int"] < watt_factor["f8_higher_int"]
def test_factor_implies_no_truth_value(self, watt_factor):
assert watt_factor["f2"] > watt_factor["f2_no_truth"]
assert not watt_factor["f2_no_truth"] > watt_factor["f2"]
def test_comparison_implies_no_truth_value(self, watt_factor):
assert watt_factor["f8"] > watt_factor["f8_no_truth"]
assert not watt_factor["f8_no_truth"] > watt_factor["f8"]
def test_implication_standard_of_proof(self, make_factor):
assert (
not make_factor["f_shooting_craig_poe"]
> make_factor["f_shooting_craig_brd"]
)
assert make_factor["f_shooting_craig_brd"] > make_factor["f_shooting_craig_poe"]
def test_factor_implies_because_of_exact_quantity(self, watt_factor):
assert watt_factor["f8_exact"] > watt_factor["f7"]
assert watt_factor["f8_exact"] >= watt_factor["f8"]
def test_no_implication_pint_quantity_and_int(self, watt_factor):
assert not watt_factor["f8"] > watt_factor["f8_int"]
assert not watt_factor["f8"] < watt_factor["f8_int"]
def test_absent_factor_implies_absent_factor_with_lesser_quantity(
self, watt_factor
):
assert watt_factor["f9_absent_miles"] > watt_factor["f9_absent"]
def test_equal_factors_not_gt(self, watt_factor):
f = watt_factor
assert f["f7"] >= f["f7"]
assert f["f7"] <= f["f7"]
assert not f["f7"] > f["f7"]
def test_standard_of_proof_comparison(self, watt_factor):
f = watt_factor
assert f["f2_clear_and_convincing"].implies(f["f2_preponderance_of_evidence"])
assert f["f2_beyond_reasonable_doubt"] >= f["f2_clear_and_convincing"]
def test_no_implication_between_factors_with_and_without_standards(
self, watt_factor
):
f = watt_factor
assert not f["f2_clear_and_convincing"] > f["f2"]
assert not f["f2"] > f["f2_preponderance_of_evidence"]
def test_implication_complex(self, make_complex_fact):
assert (
make_complex_fact["f_relevant_murder"]
> make_complex_fact["f_relevant_murder_whether"]
)
def test_context_register_text(self, make_context_register):
assert str(make_context_register) == (
"ContextRegister(<Alice> is like <Craig>, and <Bob> is like <Dan>)"
)
def test_implication_complex_explain(
self, make_complex_fact, make_context_register
):
complex_true = make_complex_fact["f_relevant_murder"]
complex_whether = make_complex_fact["f_relevant_murder_whether"].new_context(
make_context_register
)
explanation = complex_true.explain_implication(complex_whether)
assert explanation.context[Entity(name="Alice").key].compare_keys(
Entity(name="Craig")
)
def test_implication_explain_keys_only_from_left(
self, make_complex_fact, make_context_register
):
"""
Check that when implies provides a ContextRegister as an "explanation",
it uses elements only from the left as keys and from the right as values.
"""
complex_true = make_complex_fact["f_relevant_murder"]
complex_whether = make_complex_fact["f_relevant_murder_whether"]
new = complex_whether.new_context(make_context_register)
explanations = list(complex_true.explanations_implication(new))
explanation = explanations.pop()
assert explanation.context.get("<Craig>") != Entity(name="Alice")
assert explanation.context.get("<Alice>").compare_keys(Entity(name="Craig"))
def test_context_registers_for_complex_comparison(self, make_complex_fact):
gen = make_complex_fact["f_relevant_murder_nested_swap"]._context_registers(
make_complex_fact["f_relevant_murder"], operator.ge
)
register = next(gen)
assert register.matches.get("<Alice>").compare_keys(Entity(name="Bob"))
def test_no_implication_complex(self, make_complex_fact):
left = make_complex_fact["f_relevant_murder"]
right = make_complex_fact["f_relevant_murder_alice_craig"]
assert not left >= right
assert left.explain_implication(right) is None
def test_implied_by(self, make_complex_fact):
assert make_complex_fact["f_relevant_murder_whether"].implied_by(
make_complex_fact["f_relevant_murder"]
)
def test_explanation_implied_by(self, make_complex_fact):
explanation = make_complex_fact["f_relevant_murder_whether"].explain_implied_by(
make_complex_fact["f_relevant_murder"]
)
assert explanation
def test_explain_not_implied_by(self, make_complex_fact):
left = make_complex_fact["f_relevant_murder"]
right = make_complex_fact["f_relevant_murder_whether"]
assert left.explain_implied_by(right) is None
def test_not_implied_by_none(self, make_complex_fact):
left = make_complex_fact["f_relevant_murder"]
assert not left.implied_by(None)
def test_some_interchangeable_entities(self):
hit = Fact(
predicate="$person1 hit $target1 and $target2",
terms=[Entity(name="Moe"), Entity(name="Curly"), Entity(name="Larry")],
)
hit2 = Fact(
predicate="$person1 hit $target1 and $target2",
terms=[
Entity(name="Joker"),
Entity(name="Batman"),
Entity(name="Superman"),
],
)
assert hit.means(hit2)
def test_interchangeable_entities_in_group(self):
fought = Fact(
predicate="$person1, $person2, and $person3 fought each other",
terms=[Entity(name="Larry"), Entity(name="Moe"), Entity(name="Curly")],
)
hit = Fact(
predicate="$person1 hit $target1 and $target2",
terms=[Entity(name="Moe"), Entity(name="Curly"), Entity(name="Larry")],
)
fought2 = Fact(
predicate="$person1, $person2, and $person3 fought each other",
terms=[
Entity(name="Superman"),
Entity(name="Batman"),
Entity(name="Joker"),
],
)
hit2 = Fact(
predicate="$person1 hit $target1 and $target2",
terms=[
Entity(name="Joker"),
Entity(name="Batman"),
Entity(name="Superman"),
],
)
assert FactorGroup([fought, hit]).implies(FactorGroup([fought2, hit2]))
class TestContradiction:
def test_factor_different_predicate_truth_contradicts(self, watt_factor):
assert watt_factor["f7"].contradicts(watt_factor["f7_opposite"])
assert watt_factor["f7_opposite"].contradicts(watt_factor["f7"])
def test_same_predicate_true_vs_false(self, watt_factor):
assert watt_factor["f10"].contradicts(watt_factor["f10_false"])
assert watt_factor["f10"].truth != watt_factor["f10_false"].truth
def test_factor_does_not_contradict_predicate(self, make_predicate, watt_factor):
with pytest.raises(TypeError):
_ = watt_factor["f7"].contradicts(make_predicate["p7_true"])
def test_factor_contradiction_absent_predicate(self, watt_factor):
assert watt_factor["f3"].contradicts(watt_factor["f3_absent"])
assert watt_factor["f3_absent"].contradicts(watt_factor["f3"])
def test_absences_of_contradictory_facts_consistent(self, watt_factor):
assert not watt_factor["f8_absent"].contradicts(watt_factor["f8_less_absent"])
def test_factor_no_contradiction_no_truth_value(self, watt_factor):
assert not watt_factor["f2"].contradicts(watt_factor["f2_no_truth"])
assert not watt_factor["f2_no_truth"].contradicts(watt_factor["f2_false"])
def test_absent_factor_contradicts_broader_quantity_statement(self, watt_factor):
assert watt_factor["f8_absent"].contradicts(watt_factor["f8_meters"])
assert watt_factor["f8_meters"].contradicts(watt_factor["f8_absent"])
def test_less_specific_absent_contradicts_more_specific(self, watt_factor):
assert watt_factor["f9_absent_miles"].contradicts(watt_factor["f9"])
assert watt_factor["f9"].contradicts(watt_factor["f9_absent_miles"])
def test_no_contradiction_with_more_specific_absent(self, watt_factor):
assert not watt_factor["f9_absent"].contradicts(watt_factor["f9_miles"])
assert not watt_factor["f9_miles"].contradicts(watt_factor["f9_absent"])
def test_contradiction_complex(self, make_complex_fact):
assert make_complex_fact["f_irrelevant_murder"].contradicts(
make_complex_fact["f_relevant_murder_craig"]
)
def test_no_contradiction_complex(self, make_complex_fact):
assert not make_complex_fact["f_irrelevant_murder"].contradicts(
make_complex_fact["f_relevant_murder_alice_craig"]
)
def test_no_contradiction_of_None(self, watt_factor):
assert not watt_factor["f1"].contradicts(None)
def test_contradicts_if_present_both_present(self, watt_factor):
"""
Test a helper function that checks whether there would
be a contradiction if neither Factor was "absent".
"""
assert watt_factor["f2"]._contradicts_if_present(
watt_factor["f2_false"], explanation=Explanation.from_context()
)
def test_contradicts_if_present_one_absent(self, watt_factor):
assert watt_factor["f2"]._contradicts_if_present(
watt_factor["f2_false_absent"], explanation=Explanation.from_context()
)
def test_false_does_not_contradict_absent(self):
absent_fact = Fact(
predicate=Predicate(
content="${rural_s_telephone_directory} was copyrightable", truth=True
),
terms=(Entity(name="Rural's telephone directory")),
absent=True,
)
false_fact = Fact(
predicate=Predicate(
content="${the_java_api} was copyrightable", truth=False
),
terms=(Entity(name="the Java API", generic=True, plural=False)),
absent=False,
)
assert not false_fact.contradicts(absent_fact)
assert not absent_fact.contradicts(false_fact)
def test_inconsistent_statements_about_different_entities(self):
"""
Alice and Bob are both generics. So it's possible to reach a
contradiction if you assume they correspond to one another.
"""
p_small_weight = Comparison(
content="the amount of gold $person possessed was",
sign="<",
expression=Q_("1 gram"),
)
p_large_weight = Comparison(
content="the amount of gold $person possessed was",
sign=">=",
expression=Q_("100 kilograms"),
)
alice = Entity(name="Alice")
bob = Entity(name="Bob")
alice_rich = Fact(predicate=p_large_weight, terms=alice)
bob_poor = Fact(predicate=p_small_weight, terms=bob)
assert alice_rich.contradicts(bob_poor)
def test_inconsistent_statements_about_corresponding_entities(self):
"""
Even though Alice and Bob are both generics, it's known that
Alice in the first context corresponds with Alice in the second.
So there's no contradiction.
"""
p_small_weight = Comparison(
content="the amount of gold $person possessed was",
sign="<",
expression=Q_("1 gram"),
)
p_large_weight = Comparison(
content="the amount of gold $person possessed was",
sign=">=",
expression=Q_("100 kilograms"),
)
alice = Entity(name="Alice")
bob = Entity(name="Bob")
alice_rich = Fact(predicate=p_large_weight, terms=alice)
bob_poor = Fact(predicate=p_small_weight, terms=bob)
register = ContextRegister()
register.insert_pair(alice, alice)
assert not alice_rich.contradicts(bob_poor, context=register)
def test_copy_with_foreign_context(self, watt_mentioned, watt_factor):
w = watt_mentioned
assert (
watt_factor["f1"]
.new_context(ContextRegister.from_lists([w[0]], [w[2]]))
.means(watt_factor["f1_different_entity"])
)
def test_check_entity_consistency_true(self, make_entity, make_factor):
left = make_factor["f_irrelevant_3"]
right = make_factor["f_irrelevant_3_new_context"]
e = make_entity
easy_register = ContextRegister.from_lists([e["dan"]], [e["craig"]])
easy_update = left.update_context_register(
right, easy_register, comparison=means
)
harder_register = ContextRegister.from_lists(
to_replace=[e["alice"], e["bob"], e["craig"], e["dan"], e["circus"]],
replacements=[e["bob"], e["alice"], e["dan"], e["craig"], e["circus"]],
)
harder_update = left.update_context_register(
right,
context=harder_register,
comparison=means,
)
assert any(register is not None for register in easy_update)
assert any(register is not None for register in harder_update)
def test_check_entity_consistency_false(self, make_entity, make_factor):
context = ContextRegister()
context.insert_pair(make_entity["circus"], make_entity["alice"])
update = make_factor["f_irrelevant_3"].update_context_register(
make_factor["f_irrelevant_3_new_context"], comparison=means, context=context
)
assert not any(register is not None for register in update)
def test_entity_consistency_identity_not_equality(self, make_entity, make_factor):
register = ContextRegister()
register.insert_pair(make_entity["dan"], make_entity["dan"])
update = make_factor["f_irrelevant_3"].update_context_register(
make_factor["f_irrelevant_3_new_context"],
context=register,
comparison=means,
)
assert not any(register is not None for register in update)
def test_check_entity_consistency_type_error(
self, make_entity, make_factor, make_predicate
):
"""
There would be no TypeError if it used "means"
instead of .gt. The comparison would just return False.
"""
update = make_factor["f_irrelevant_3"].update_context_register(
make_predicate["p2"],
{str(make_entity["dan"]): make_entity["dan"]},
operator.gt,
)
with pytest.raises(TypeError):
any(register is not None for register in update)
class TestConsistent:
def test_contradictory_facts_about_same_entity(self, watt_factor):
left = watt_factor["f8_less"]
right = watt_factor["f8_meters"]
register = ContextRegister()
register.insert_pair(left.generic_terms()[0], right.generic_terms()[0])
assert not left.consistent_with(right, register)
assert left.explain_consistent_with(right, register) is None
def test_explanations_consistent_with(self, watt_factor):
left = watt_factor["f8_less"]
right = watt_factor["f8_meters"]
register = ContextRegister()
register.insert_pair(left.generic_terms()[0], right.generic_terms()[0])
explanations = list(left.explanations_consistent_with(right, context=register))
assert not explanations
def test_factor_consistent_with_none(self, make_exhibit):
assert make_exhibit["no_shooting_testimony"].consistent_with(
make_exhibit["no_shooting_witness_unknown_testimony"]
)
class TestAddition:
@pytest.mark.parametrize(
"left, right, expected",
[
("f_shooting_craig_poe", "f_shooting_craig_brd", "f_shooting_craig_brd"),
("f_irrelevant_3", "f_irrelevant_3_new_context", "f_irrelevant_3"),
(
"f_irrelevant_3_new_context",
"f_irrelevant_3",
"f_irrelevant_3_new_context",
),
],
)
def test_addition(self, make_factor, left, right, expected):
answer = make_factor[left] + make_factor[right]
assert answer.means(make_factor[expected])
def test_add_unrelated_factors(self, make_factor):
assert make_factor["f_murder"] + make_factor["f_crime"] is None
def test_cant_add_enactment_to_fact(self, watt_factor, e_search_clause):
with pytest.raises(TypeError):
print(watt_factor["f3"] + e_search_clause)
| 21,389 | 11,064 | 218 |
74d5c07e6a219370de5020bdd900eea843a2ba4f | 165,553 | py | Python | icbd/compiler/compiler_types.py | kmod/icbd | 9636564eb3993afa07c6220d589bbd1991923d74 | [
"MIT"
] | 7 | 2015-04-06T15:17:13.000Z | 2020-10-21T04:57:00.000Z | icbd/compiler/compiler_types.py | kmod/icbd | 9636564eb3993afa07c6220d589bbd1991923d74 | [
"MIT"
] | null | null | null | icbd/compiler/compiler_types.py | kmod/icbd | 9636564eb3993afa07c6220d589bbd1991923d74 | [
"MIT"
] | 4 | 2016-05-16T17:53:08.000Z | 2020-11-28T17:18:50.000Z | import _ast
import inspect
import re
import sys
import traceback
from . import closure_analyzer
from .code_emitter import CodeEmitter
DEBUG_CHECKS = True
BINOP_MAP = {
_ast.Add:"__add__",
_ast.Sub:"__sub__",
_ast.Mult:"__mul__",
_ast.BitOr:"__or__",
_ast.BitXor:"__xor__",
_ast.BitAnd:'__and__',
_ast.LShift:'__lshift__',
_ast.RShift:'__rshift__',
_ast.Mod:'__mod__',
_ast.Div:'__div__',
_ast.Pow:'__pow__',
}
COMPARE_MAP = {
_ast.Lt:"__lt__",
_ast.Gt:"__gt__",
_ast.LtE:"__le__",
_ast.GtE:"__ge__",
_ast.Eq:"__eq__",
_ast.NotEq:"__ne__",
_ast.In:"__contains__",
}
COMPARE_REFLECTIONS = {
_ast.Lt:_ast.Gt,
_ast.Gt:_ast.Lt,
_ast.LtE:_ast.GtE,
_ast.GtE:_ast.LtE,
_ast.Eq:_ast.Eq,
_ast.NotEq:_ast.NotEq,
}
# The same thing as an AttributeError, but for the compiled code rather than the compiler code
# The same thing as an TypeError, but for the compiled code rather than the compiler code
# AN error that you tried to instantiate an object that has no first-class representation (such as a polymorphic function, or module)
class AttributeAccessType(object):
""" An enum of the possible ways that an object attribute will
be generated. """
# A member variable of the object, that has a persistent memory location
FIELD = "field"
# A field that the object has implicitly, and is generated on access (such as __class__)
IMPLICIT_FIELD = "implicit_field"
# A class-level method of the object that doesn't change, which is instantiated into an instancemethod on access
CONST_METHOD = "attr_const_method"
# convert everything to IEEE754 format to make sure we don't lose any precision in serialization
_cached_templates = {}
_cached_ctemplates = {}
Variable._ok_code = list(func.func_code for name, func in inspect.getmembers(Variable, inspect.ismethod))
# All types default to raised types
Slice = singleton(SliceMT)
Slice.initialized = ("attrs", "write")
StrConstant = singleton(StrConstantMT)
StrConstant.initialized = ("attrs", "write")
None_ = singleton(NoneMT)
None_.initialized = ("attrs", "write")
Len = singleton(LenMT)
StrFunc = singleton(StrFuncMT)
ReprFunc = singleton(ReprFuncMT)
Nref = singleton(NrefMT)
TypeFunc = singleton(TypeFuncMT)
BoolFunc = singleton(BoolFuncMT)
Isinstance = singleton(IsinstanceMT)
Cast = singleton(CastMT)
StrFormat = singleton(StrFormatMT)
MapFunc = singleton(MapFuncMT)
ReduceFunc = singleton(ReduceFuncMT)
Enumerate = singleton(EnumerateMT)
MinFunc = MinFuncMT("min")
MaxFunc = MinFuncMT("max")
ListFunc = Parametric1ArgCtorFuncMT(ListMT.make_list, "append")
SetFunc = Parametric1ArgCtorFuncMT(SetMT.make_set, "add")
DequeFunc = Parametric1ArgCtorFuncMT(DequeMT.make_deque, "append")
DictFunc = singleton(DictFuncMT)
ObjectClass = ClassMT(None, "object", "object")
Object = ObjectClass._instance
IntClass = ClassMT(ObjectClass, "int", "int", llvm_type="i64")
Int = IntClass._instance
FloatClass = ClassMT(ObjectClass, "float", "float", llvm_type="double")
Float = FloatClass._instance
StrClass = ClassMT(ObjectClass, "str", "str", llvm_type="%string*")
Str = StrClass._instance
BoolClass = ClassMT(ObjectClass, "bool", "bool", "i1")
Bool = BoolClass._instance
TypeClass = ClassMT(ObjectClass, "type", "type")
Type = TypeClass._instance
FileClass = ClassMT(ObjectClass, "file", "file")
File = FileClass._instance
# TODO there is a lot of duplication between this and stuff like closures
class PtrMT(MT):
""" An MT to represent the type of a stored pointer to an object. They should only exist as a compiler implementation detail. """
Underlying = singleton(_UnderlyingMT)
_made_supertypes = {}
# Some type classes for stdlib stuff:
STDLIB_TYPES = []
_IntIterator, _IntIterable = _make_iterable(Int)
_FloatIterator, _FloatIterable = _make_iterable(Float)
_Boolable = BoxedMT([_FakeMT({
"__class__": (Type, AttributeAccessType.IMPLICIT_FIELD),
"__incref__": (CallableMT.make_callable([], 0, None_), AttributeAccessType.CONST_METHOD),
"__decref__": (CallableMT.make_callable([], 0, None_), AttributeAccessType.CONST_METHOD),
"__nonzero__": (CallableMT.make_callable([], 0, Bool), AttributeAccessType.CONST_METHOD),
})])
STDLIB_TYPES.append(_Boolable)
_BoolableIterator, _BoolableIterable = _make_iterable(_Boolable)
BUILTINS = {
"True":Variable(Bool, 1, 1, False),
"False":Variable(Bool, 0, 1, False),
"len":Variable(Len, (), 1, False),
"str":Variable(StrClass, (), 1, False),
"repr":Variable(ReprFunc, (), 1, False),
"type":Variable(TypeClass, (), 1, False),
"map":Variable(MapFunc, (), 1, False),
"reduce":Variable(ReduceFunc, (), 1, False),
"nrefs":Variable(Nref, (), 1, False),
"bool":Variable(BoolClass, (), 1, False),
"list":Variable(ListFunc, (), 1, False),
"dict":Variable(DictFunc, (), 1, False),
"set":Variable(SetFunc, (), 1, False),
"isinstance":Variable(Isinstance, (), 1, False),
"__cast__":Variable(Cast, (), 1, False),
"enumerate":Variable(Enumerate, (), 1, False),
"chr":Variable(UnboxedFunctionMT(None, None, [Int], Str), ("@chr", [], None), 1, False),
"ord":Variable(UnboxedFunctionMT(None, None, [Str], Int), ("@ord", [], None), 1, False),
# "open":Variable(UnboxedFunctionMT(None, None, [Str], File), ("@file_open", [], None), 1, True),
"open":Variable(UnboxedFunctionMT(None, None, [Str, Str], File, ndefaults=1), ("@file_open2", [Variable(Str, "@.str_r", 1, True)], None), 1, False),
"int":Variable(IntClass, (), 1, False),
"min":PolymorphicFunctionMT.make([
Variable(UnboxedFunctionMT(None, None, [Int, Int], Int), ("@int_min", [], None), 1, False),
Variable(UnboxedFunctionMT(None, None, [Float, Float], Float), ("@float_min", [], None), 1, False),
Variable(MinFunc, (), 1, False),
]),
"max":PolymorphicFunctionMT.make([
Variable(UnboxedFunctionMT(None, None, [Int, Int], Int), ("@int_max", [], None), 1, False),
Variable(UnboxedFunctionMT(None, None, [Float, Float], Float), ("@float_max", [], None), 1, False),
Variable(MaxFunc, (), 1, False),
]),
"float":Variable(FloatClass, (), 1, False),
"file":Variable(FileClass, (), 1, False),
"abs":PolymorphicFunctionMT.make([
Variable(UnboxedFunctionMT(None, None, [Int], Int), ("@int_abs", [], None), 1, False),
Variable(UnboxedFunctionMT(None, None, [Float], Float), ("@float_abs", [], None), 1, False)]),
"None":Variable(None_, "null", 1, False),
"object":Variable(ObjectClass, (), 1, False),
"sum":PolymorphicFunctionMT.make([
Variable(UnboxedFunctionMT(None, None, [_IntIterable], Int), ("@sum_int", [], None), 1, False),
Variable(UnboxedFunctionMT(None, None, [_FloatIterable], Float), ("@sum_float", [], None), 1, False),
]),
"any":Variable(UnboxedFunctionMT(None, None, [_BoolableIterable], Bool), ("@any", [], None), 1, False),
}
BUILTIN_MODULES = {
"time":Variable(ModuleMT({
'time':Variable(UnboxedFunctionMT(None, None, [], Float), ("@time_time", [], None), 1, False),
'clock':Variable(UnboxedFunctionMT(None, None, [], Float), ("@time_clock", [], None), 1, False),
'sleep':Variable(UnboxedFunctionMT(None, None, [Float], None_), ("@time_sleep", [], None), 1, False),
}), 1, 1, False),
"sys":Variable(ModuleMT({
'stdin':Variable(File, "@sys_stdin", 1, False),
'stdout':Variable(File, "@sys_stdout", 1, False),
'stderr':Variable(File, "@sys_stderr", 1, False),
}), 1, 1, False),
"math":Variable(ModuleMT({
'sqrt':Variable(UnboxedFunctionMT(None, None, [Float], Float), ("@sqrt", [], None), 1, False),
'tan':Variable(UnboxedFunctionMT(None, None, [Float], Float), ("@tan", [], None), 1, False),
'sin':Variable(UnboxedFunctionMT(None, None, [Float], Float), ("@sin", [], None), 1, False),
'cos':Variable(UnboxedFunctionMT(None, None, [Float], Float), ("@cos", [], None), 1, False),
'ceil':Variable(UnboxedFunctionMT(None, None, [Float], Float), ("@ceil", [], None), 1, False),
'pi':Variable(Float, format_float(3.141592653589793), 1, False),
}), 1, 1, False),
"collections":Variable(ModuleMT({
'deque':Variable(DequeFunc, (), 1, False),
}), 1, 1, False),
# Interopability library:
"hax":Variable(ModuleMT({
"ftoi":Variable(UnboxedFunctionMT(None, None, [Float], Int), ("@hax_ftoi", [], None), 1, False),
"itof":Variable(UnboxedFunctionMT(None, None, [Int], Float), ("@hax_itof", [], None), 1, False),
"min":Variable(UnboxedFunctionMT(None, None, [Int, Int], Int), ("@int_min", [], None), 1, False),
"max":Variable(UnboxedFunctionMT(None, None, [Int, Int], Int), ("@int_max", [], None), 1, False),
"fmin":Variable(UnboxedFunctionMT(None, None, [Float, Float], Float), ("@float_min", [], None), 1, False),
"abs":Variable(UnboxedFunctionMT(None, None, [Float], Float), ("@float_abs", [], None), 1, False),
"initvideo":Variable(UnboxedFunctionMT(None, None, [Int, Int], None_), ("@hax_initvideo", [], None), 1, False),
"plot":Variable(UnboxedFunctionMT(None, None, [Int, Int, Int, Int, Int], None_), ("@hax_plot", [], None), 1, False),
}), 1, 1, False),
}
SliceMT.setup_class_methods()
NoneMT.setup_class_methods()
setup_int()
setup_float()
setup_string()
setup_bool()
setup_type()
setup_file()
| 41.669519 | 331 | 0.570228 | import _ast
import inspect
import re
import sys
import traceback
from . import closure_analyzer
from .code_emitter import CodeEmitter
DEBUG_CHECKS = True
BINOP_MAP = {
_ast.Add:"__add__",
_ast.Sub:"__sub__",
_ast.Mult:"__mul__",
_ast.BitOr:"__or__",
_ast.BitXor:"__xor__",
_ast.BitAnd:'__and__',
_ast.LShift:'__lshift__',
_ast.RShift:'__rshift__',
_ast.Mod:'__mod__',
_ast.Div:'__div__',
_ast.Pow:'__pow__',
}
COMPARE_MAP = {
_ast.Lt:"__lt__",
_ast.Gt:"__gt__",
_ast.LtE:"__le__",
_ast.GtE:"__ge__",
_ast.Eq:"__eq__",
_ast.NotEq:"__ne__",
_ast.In:"__contains__",
}
COMPARE_REFLECTIONS = {
_ast.Lt:_ast.Gt,
_ast.Gt:_ast.Lt,
_ast.LtE:_ast.GtE,
_ast.GtE:_ast.LtE,
_ast.Eq:_ast.Eq,
_ast.NotEq:_ast.NotEq,
}
# The same thing as an AttributeError, but for the compiled code rather than the compiler code
class UserAttributeError(Exception):
pass
# The same thing as an TypeError, but for the compiled code rather than the compiler code
class UserTypeError(Exception):
pass
# AN error that you tried to instantiate an object that has no first-class representation (such as a polymorphic function, or module)
class CantInstantiateException(Exception):
pass
class AttributeAccessType(object):
""" An enum of the possible ways that an object attribute will
be generated. """
# A member variable of the object, that has a persistent memory location
FIELD = "field"
# A field that the object has implicitly, and is generated on access (such as __class__)
IMPLICIT_FIELD = "implicit_field"
# A class-level method of the object that doesn't change, which is instantiated into an instancemethod on access
CONST_METHOD = "attr_const_method"
# convert everything to IEEE754 format to make sure we don't lose any precision in serialization
def format_float(f):
import struct
import binascii
s = struct.pack('d', f)
return "0x" + binascii.hexlify(s[::-1])
def is_emitter(e):
from .codegen import CodeEmitter
return e is None or isinstance(e, CodeEmitter)
_cached_templates = {}
def get_template(name):
if name not in _cached_templates:
_cached_templates[name] = open("templates/%s.tll" % name).read()
return _cached_templates[name]
def convert_none_to_void_ll(code):
code = code.replace("define %none* ", "define void ")
code = code.replace("declare %none* ", "declare void ")
code = re.sub("ret (%none\*|void) [^ \n]*", "ret void", code)
def repl(m):
return "%s = inttoptr i64 0 to %%none*\n call void " % (m.group(1),)
code = re.sub("([^ ]*) = call (%none\*|void) ", repl, code)
code = code.replace("%none* (", "void (") # function pointers
return code
_cached_ctemplates = {}
def get_ctemplate(name):
if name not in _cached_ctemplates:
_cached_ctemplates[name] = open("templates/%s.tc" % name).read()
return _cached_ctemplates[name]
def eval_ctemplate(name, em, locals_):
assert "em" not in locals_
template = get_ctemplate(name)
return _eval_template(name, template, em, locals_, output="c")
def eval_template(name, em, locals_):
assert "em" not in locals_
template = get_template(name)
return _eval_template(name, template, em, locals_, output="llvm")
def _make_typeobj(em, llvm_name, display_name=None):
display_name = display_name or llvm_name
nameptr = em.get_str_ptr(display_name)
return ("""@%(n)s_typename = global %%string {i64 1, i64 %(len)s, i8* %(nameptr)s, [0 x i8] zeroinitializer}\n"""
"""@%(n)s_typeobj = global %%type {i64 1, %%string* @%(n)s_typename, %%type* null}""") % {
'n': llvm_name,
'nameptr': nameptr,
'len': len(display_name)
}
def _eval_template(name, template, _em, locals_, output):
lines = template.split('\n')
newlines = []
locals_['make_typeobj'] = _make_typeobj
assert isinstance(_em, CodeEmitter)
if_state = []
def convert(em, lineno, l):
ignore = not all(if_state)
locals_['em'] = em
get_written = lambda: em.get_llvm().replace('\n', '\n ') if output == "llvm" else em.get_c()
assert not get_written(), get_written()
if '///' in l:
return convert(em, lineno, l[:l.find('///')])
m = re.search(r"\$+\(", l)
if not m:
if ignore:
return ""
return l
ndollar = len(m.group()) - 1
assert ndollar in (1, 2, 3)
nopen = 1
quote = None
for i in xrange(m.end(), len(l)):
if l[i] == '\\':
assert l[i+1] in "nt", l[i+1]
if quote:
if l[i] == quote:
quote = None
continue
if l[i] in '\'\"':
quote = l[i]
continue
if l[i] == '(':
nopen += 1
elif l[i] == ')':
nopen -= 1
if nopen == 0:
end = i
break
else:
raise Exception("Unclosed parethesis at line %d" % (lineno+1,))
to_eval = l[m.end():end]
if to_eval.startswith("if "):
if ignore:
conditional = False
else:
conditional = eval(to_eval[3:], globals(), locals_)
if_state.append(conditional)
evaled = ""
elif to_eval == "endif":
if_state.pop()
evaled = ""
elif ndollar == 1:
if ignore:
evaled = ""
else:
evaled = str(eval(to_eval, globals(), locals_))
assert not get_written()
else:
if not ignore:
exec to_eval in globals(), locals_
evaled = get_written()
em = CodeEmitter(_em)
if ndollar == 3:
end += 3
# assert m.start() == 0, l
# evaled = " " + evaled
assert not get_written(), get_written()
if ignore:
return convert(em, lineno, l[end + 1:])
else:
return l[:m.start()] + evaled + convert(em, lineno, l[end + 1:])
in_3dollar = False
for_3dollar = []
for lineno, l in enumerate(lines):
if "$$$(" in l:
assert not in_3dollar, "can't nest $$$ sections"
in_3dollar = True
if in_3dollar:
for_3dollar.append(l)
if ")$$$" in l:
# Strip of leading writespace:
for i, c in enumerate(for_3dollar[0]):
if c != ' ':
break
for_3dollar = [s[i:] for s in for_3dollar]
l = '\n'.join(for_3dollar)
for_3dollar = []
in_3dollar = False
else:
continue
try:
newlines.append(convert(CodeEmitter(_em), lineno, l))
if len(newlines) >= 2 and newlines[-1] == newlines[-2] == '':
newlines.pop()
except Exception:
print >>sys.stderr, "failed to convert line %d of %s template '%s', args %s" % (lineno + 1, output, name, locals_)
# print l
raise
assert not in_3dollar, "unterminated $$$ section"
return '\n'.join(newlines)
def raw_func_name(llvm_func_name):
if llvm_func_name.startswith('@'):
return llvm_func_name[1:]
elif llvm_func_name.startswith("#!"):
return llvm_func_name
else:
raise Exception(llvm_func_name)
class Variable(object):
def __init__(self, t, v, nrefs, marked):
assert isinstance(t, MT)
if isinstance(t, UnboxedFunctionMT):
assert t.ndefaults == len(v[1])
if t is Float:
# Make sure that the compiler doesn't use any potentially-lossy float representations
assert not isinstance(v, float), v
if isinstance(v, str):
assert not re.match("\d*\.?\d*$", v), v
if isinstance(t, (StrConstantMT, UnboxedInstanceMethod, _SpecialFuncMT, UnboxedFunctionMT, ClassMT, UnboxedTupleMT, PolymorphicFunctionMT)):
assert isinstance(v, tuple), (t, v)
if isinstance(t, UnboxedFunctionMT):
assert len(v) == 3
assert not marked
else:
if isinstance(t, ClosureMT):
assert isinstance(v, str) or v is None
else:
assert isinstance(v, (int, float, str)), (t, v)
assert isinstance(nrefs, int)
assert isinstance(marked, bool)
self.t = t
self.v = v
self.nrefs = nrefs
self.marked = marked
# def mkwrap(n):
# _f = getattr(self.t, n)
# def inner(c, *args):
# return _f(c, self.v, *args)
# return inner
# TODO shouldnt be forgetting about owned references?
# def __del__(self):
# if self.owned:
# import traceback
# traceback.print_stack()
if DEBUG_CHECKS:
def __getattribute__(self, n):
# Allow Variable methods, but only those, to operate on things with 0 nrefs
if sys._getframe(1).f_code not in Variable._ok_code:
assert object.__getattribute__(self, "nrefs") > 0, n
return object.__getattribute__(self, n)
def equiv(self, rhs):
assert self.nrefs > 0
# hax:
d1 = dict(self.__dict__)
d2 = dict(rhs.__dict__)
v1 = d1.pop('v')
v2 = d2.pop('v')
if isinstance(v1, tuple):
assert isinstance(v2, tuple)
assert len(v1) == len(v2)
for i in xrange(len(v1)):
if isinstance(v1[i], Variable):
if not v1[i].equiv(v2[i]):
return False
elif isinstance(v1[i], list):
assert isinstance(v2[i], list)
l1 = v1[i]
l2 = v2[i]
if len(l1) != len(l2):
return False
for j in xrange(len(l1)):
if not l1[j].equiv(l2[j]):
return False
elif v1[i] != v2[i]:
return False
else:
if v1 != v2:
return False
return d1 == d2
# return self.t == rhs.t and self.v == rhs.v and self.nrefs == rhs.nrefs and
def split(self, em):
"""
Split off a copy of this variable, and ensure it's marked
"""
assert is_emitter(em), em
"""
assert self.nrefs > 0
if not self.marked:
r = self.t.incref_llvm(em, self.v)
if r:
em.pl(r)
else:
# Optimization: if this was marked, just "transfer" the mark
self.marked = False
return Variable(self.t, self.v, 1, True)
"""
assert self.nrefs > 0
r = self.t.incref_llvm(em, self.v)
if r:
em.pl(r + " ; split")
r = self.t.incref_c(em, self.v)
if r:
em.pc(r + "; // split")
return Variable(self.t, self.v, 1, True)
def dup(self, dup_cache):
"""
Makes a copy of this variable object.
Only makes sense at a meta level, ie we want to distribute copies of this variable
to all successor nodes.
"""
assert self.nrefs > 0
if self not in dup_cache:
dup_cache[self] = Variable(self.t, self.t.dup(self.v, dup_cache), self.nrefs, self.marked)
return dup_cache[self]
def incvref(self, em):
assert is_emitter(em), em
assert self.nrefs > 0
self.nrefs += 1
# print "incvref %s" % self, self.nrefs
# em.pl("; inc: %s now has %d vrefs" % (self.v, self.nrefs))
def decvref(self, em, note=None):
assert is_emitter(em)
# print; import traceback; traceback.print_stack()
assert self.nrefs > 0, (self, self.t)
self.nrefs -= 1
# print "decvref %s" % self, self.nrefs
# em.pl("; dec: %s now has %d vrefs" % (self.v, self.nrefs))
if self.nrefs == 0:
if isinstance(self.v, tuple):
assert not self.marked
self.t.free(em, self.v)
elif self.marked:
self.nrefs += 2 # for the getattr; add 2 since we don't want it to hit zero again
f = self.getattr(em, "__decref__")
f.call(em, [])
self.nrefs -= 1 # for the extra +1 nrefs
assert self.nrefs == 0, self.nrefs
# This shouldn't matter but it's safer:
self.marked = False
def getattr(self, em, attr, clsonly=False, decvref=True, **kw):
assert is_emitter(em)
assert self.nrefs > 0
try:
r = self.t.getattr(em, self, attr, clsonly=clsonly, **kw)
finally:
if decvref:
self.decvref(em, "getattr")
return r
def getattrptr(self, em, attr):
assert is_emitter(em)
assert self.nrefs > 0
try:
r = self.t.getattrptr(em, self, attr)
finally:
self.decvref(em, "getattrptr")
return r
def setattr(self, em, attr, var):
assert is_emitter(em)
assert self.nrefs > 0
assert isinstance(var, Variable)
r = self.t.setattr(em, self.v, attr, var)
self.decvref(em, "setattr")
return r
def call(self, em, args, expected_type=None):
assert is_emitter(em)
assert self.nrefs > 0
try:
r = self.t.call(em, self.v, args, expected_type=expected_type)
finally:
self.decvref(em, "call [was func]")
return r
def convert_to(self, em, t):
assert is_emitter(em)
assert self.nrefs > 0
assert isinstance(t, MT)
return self.t.convert_to(em, self, t)
Variable._ok_code = list(func.func_code for name, func in inspect.getmembers(Variable, inspect.ismethod))
class MT(object):
def __init__(self):
self.initialized = set()
self.initializing = set()
self.__st = ''.join(traceback.format_stack()) # TODO take this out once I'm done debugging
def __check_initialized(self, stage):
if stage not in self.initialized:
print self.__st
raise Exception(self)
def c_type(self):
l = self.llvm_type()
if l.startswith('%'):
return l[1:]
assert l in ('i1', 'i64', "double")
return l
def incref_llvm(self, em, v):
assert is_emitter(em)
emitter = CodeEmitter(em)
Variable(self, v, 1, False).getattr(emitter, "__incref__").call(emitter, [])
return emitter.get_llvm()
def incref_c(self, em, v):
assert is_emitter(em)
emitter = CodeEmitter(em)
Variable(self, v, 1, False).getattr(emitter, "__incref__").call(emitter, [])
return emitter.get_c()
def decref_llvm(self, em, v):
assert is_emitter(em)
emitter = CodeEmitter(em)
Variable(self, v, 1, False).getattr(emitter, "__decref__").call(emitter, [])
return emitter.get_llvm()
def decref_c(self, em, v):
assert is_emitter(em)
emitter = CodeEmitter(em)
Variable(self, v, 1, False).getattr(emitter, "__decref__").call(emitter, [])
return emitter.get_c()
# All types default to raised types
def get_instantiated(self):
return self
def dup(self, v, dup_cache):
assert isinstance(v, (float, int, str)), (self, repr(v))
return v
def initialize(self, em, stage):
assert stage in ("attrs", "write")
if stage == "write":
self.initialize(em, "attrs")
assert stage not in self.initializing
if stage not in self.initialized:
self.initializing.add(stage)
self._initialize(em, stage)
self.initializing.remove(stage)
self.initialized.add(stage)
def get_attr_types(self):
self.__check_initialized("attrs")
if hasattr(self, "class_methods"):
r = {}
for name, v in self.class_methods.iteritems():
if name in r:
continue
r[name] = (UnboxedInstanceMethod(self, v.t), AttributeAccessType.CONST_METHOD)
if hasattr(self, "typeobj_name"):
r["__class__"] = (Type, AttributeAccessType.IMPLICIT_FIELD)
return r
raise NotImplementedError(type(self))
def getattr(self, em, v, attr, clsonly):
self.__check_initialized("attrs")
if attr == "__class__" and hasattr(self, "typeobj_name"):
return Variable(Type, self.typeobj_name, 1, False)
if hasattr(self, "class_methods"):
if attr not in self.class_methods:
raise UserAttributeError((self, attr))
r = self.class_methods[attr]
r.incvref(em)
return UnboxedInstanceMethod.make(em, v, r)
raise NotImplementedError((type(self), attr))
def hasattr(self, attr):
self.__check_initialized("attrs")
if hasattr(self, "class_methods"):
return attr in self.class_methods
raise NotImplementedError(type(self))
def get_method_name(self, em, name):
self.__check_initialized("attrs")
fake_var = Variable(None_, "null", 1, False)
attr = self.getattr(em, fake_var, name, clsonly=True)
assert isinstance(attr.t, UnboxedInstanceMethod)
f = attr.v[1]
assert isinstance(f, Variable)
if isinstance(f.t, PolymorphicFunctionMT):
f = f.v[0][0]
assert isinstance(f.t, UnboxedFunctionMT), f.t
assert len(f.v) == 3
assert f.v[2] is None
r = f.v[0]
attr.decvref(em)
return raw_func_name(r)
def _can_convert_to(self, t):
return False
def can_convert_to(self, t):
if t is self:
return True
if isinstance(t, BoxedMT):
return t.can_convert_from(self)
return self._can_convert_to(t)
def _convert_to(self, em, var, t):
if isinstance(t, BoxedMT):
return t.convert_from(em, var)
raise UserTypeError("Don't know how to convert %s to %s" % (self, t))
def convert_to(self, em, var, t):
self.initialize(em, "write")
assert var.t is self
if t is self:
return var
r = self._convert_to(em, var, t)
assert r.t is t, (self, t, r.t.__dict__, t.__dict__)
return r
def call(self, em, v, args, expected_type=None):
self.initialize(em, "write")
return self.getattr(em, Variable(self, v, 1, False), "__call__", clsonly=True).call(em, args, expected_type=expected_type)
def singleton(cls):
r = cls()
cls.__init__ = None
return r
class SliceMT(MT):
def llvm_type(self):
return "%slice*"
@staticmethod
def setup_class_methods():
SliceMT.class_methods = {
"__incref__": Variable(UnboxedFunctionMT(None, None, [Slice], None_), ("@slice_incref", [], None), 1, False),
"__decref__": Variable(UnboxedFunctionMT(None, None, [Slice], None_), ("@slice_decref", [], None), 1, False),
}
def _can_convert_to(self, t):
return False
@staticmethod
def create(em, start, end, step):
assert start is None or isinstance(start, Variable)
assert end is None or isinstance(end, Variable)
assert step is None or isinstance(step, Variable)
flags = 0
if start:
flags |= 1
if end:
flags |= 2
name = "%" + em.mkname()
start_s = start.v if start else 0
end_s = end.v if end else 0
step_s = step.v if step else 1
em.pl("%s = call %%slice* @slice_alloc(i64 %d, i64 %s, i64 %s, i64 %s)" % (name, flags, start_s, end_s, step_s))
em.pc("#error unimplemented 0")
for arg in start, end, step:
if arg:
arg.decvref(em)
return Variable(Slice, name, 1, True)
Slice = singleton(SliceMT)
Slice.initialized = ("attrs", "write")
class StrConstantMT(MT):
def dup(self, v, dup_cache):
return v
def free(self, em, v):
pass
def getattr(self, em, v, attr, clsonly):
if attr == "__mod__":
return UnboxedInstanceMethod.make(em, v, Variable(StrFormat, (), 1, False))
# Returning a method that takes a str will cause this StrConstant
# to get upconverted on every call to the method; not most optimized
# but it works out:
return Str.getattr(em, v, attr, clsonly)
def get_instantiated(self):
return Str
def _can_convert_to(self, t):
return t is Str or Str._can_convert_to(t)
__converted = {}
def _convert_to(self, em, var, t):
if t is not Str:
str_v = self._convert_to(em, var, Str)
return Str.convert_to(em, str_v, t)
assert t is Str, t
[s] = var.v
if s not in StrConstantMT.__converted:
r = "@" + em.mkname("str")
ptr = em.get_str_ptr(s)
em.llvm_tail.write("%s = global %%string {i64 1, i64 %d, i8* %s, [0 x i8] zeroinitializer}\n" % (r, len(s), ptr))
StrConstantMT.__converted[s] = r
var.decvref(em)
return Variable(Str, StrConstantMT.__converted[s], 1, False)
StrConstant = singleton(StrConstantMT)
StrConstant.initialized = ("attrs", "write")
class NoneMT(MT):
def llvm_type(self):
return "%none*"
@staticmethod
def setup_class_methods():
em = None
NoneMT.class_methods = {
"__incref__": Variable(UnboxedFunctionMT(em, None, [None_], None_), ("@none_incref", [], None), 1, False),
"__decref__": Variable(UnboxedFunctionMT(em, None, [None_], None_), ("@none_decref", [], None), 1, False),
"__repr__": Variable(UnboxedFunctionMT(em, None, [None_], Str), ("@none_repr", [], None), 1, False),
"__eq__": Variable(UnboxedFunctionMT(em, None, [None_, None_], Bool), ("@none_eq", [], None), 1, False),
}
NoneMT.class_methods["__str__"] = NoneMT.class_methods["__repr__"]
def c_type(self):
return "void*"
def dup(self, v, dup_cache):
return v
def _can_convert_to(self, t):
return t not in (Int, Float, Bool, StrConstantMT, UnboxedTupleMT, UnboxedInstanceMethod, UnboxedFunctionMT)
def _convert_to(self, em, var, t):
assert t is not None_
var.decvref(em)
return Variable(t, "null", 1, True)
None_ = singleton(NoneMT)
None_.initialized = ("attrs", "write")
class _SpecialFuncMT(MT):
def __init__(self):
super(_SpecialFuncMT, self).__init__()
self.initialized = ("attrs", "write")
def call(self, em, v, args, expected_type=None):
raise NotImplementedError()
def dup(self, v, dup_cache):
assert not v
return v
def free(self, em, v):
assert v == ()
return None
def _can_convert_to(self, t):
return False
class LenMT(_SpecialFuncMT):
def call(self, em, v, args, expected_type=None):
assert not v
assert len(args) == 1
[_v] = args
return _v.getattr(em, "__len__", clsonly=True).call(em, [])
Len = singleton(LenMT)
class StrFuncMT(_SpecialFuncMT):
def call(self, em, v, args, expected_type=None):
assert not v
assert len(args) == 1
[_v] = args
return _v.getattr(em, "__str__", clsonly=True).call(em, [])
StrFunc = singleton(StrFuncMT)
class ReprFuncMT(_SpecialFuncMT):
def call(self, em, v, args, expected_type=None):
assert not v
assert len(args) == 1
[_v] = args
return _v.getattr(em, "__repr__", clsonly=True).call(em, [])
ReprFunc = singleton(ReprFuncMT)
class NrefMT(_SpecialFuncMT):
def call(self, em, v, args, expected_type=None):
assert not v
assert len(args) == 1
[_v] = args
return _v.getattr(em, "__nrefs__", clsonly=True).call(em, [])
Nref = singleton(NrefMT)
class TypeFuncMT(_SpecialFuncMT):
def call(self, em, v, args, expected_type=None):
assert not v
assert len(args) == 1
[_v] = args
return _v.getattr(em, "__class__")
TypeFunc = singleton(TypeFuncMT)
class BoolFuncMT(_SpecialFuncMT):
def call(self, em, v, args, expected_type=None):
assert not v
assert len(args) == 1
[_v] = args
r = _v.getattr(em, "__nonzero__", clsonly=True).call(em, [])
assert r.t in (Int, Bool), "__nonzero__ should return bool or int, returned %s" % (r.t,)
if r.t is Int:
new_name = "%" + em.mkname()
em.pl("%s = trunc i64 %s to i1" % (new_name, r.v))
em.pc("#error unimplemented 1")
r.decvref(em)
r = Variable(Bool, new_name, 1, False)
return r
BoolFunc = singleton(BoolFuncMT)
class IsinstanceMT(_SpecialFuncMT):
def call(self, em, v, args, expected_type=None):
assert not v
assert len(args) == 2
obj, cls = args
assert isinstance(cls.t, ClassMT)
if isinstance(obj.t, InstanceMT):
r = obj.t.cls is cls.t
obj.decvref(em)
cls.decvref(em)
return Variable(Bool, str(int(r)), 1, False)
elif isinstance(obj.t, BoxedMT):
realclass = obj.getattr(em, "__class__")
r = "%" + em.mkname()
thisclass = cls.t.get_typeobj(em)
assert realclass.t is Type
assert thisclass.t is Type
em.pl("%s = icmp eq %s %s, %s" % (r, Type.llvm_type(), realclass.v, thisclass.v))
em.pc("#error unimplemented 2")
cls.decvref(em)
return Variable(Bool, r, 1, True)
else:
raise Exception(obj.t)
Isinstance = singleton(IsinstanceMT)
class CastMT(_SpecialFuncMT):
def call(self, em, v, args, expected_type=None):
assert not v
assert len(args) == 2
obj, cls = args
assert isinstance(cls.t, ClassMT)
instance_t = cls.t._instance
if isinstance(obj.t, BoxedMT):
underlying = obj.t._struct.get(em, obj.v, BoxedMT.UNDERLYING_FIELD_NAME, skip_incref=True)
name = "%" + em.mkname()
if instance_t is Float:
em.pl("%s = call %s @ptr_to_float(%s %s)" % (name, instance_t.llvm_type(), underlying.t.llvm_type(), underlying.v))
else:
llvm_cmd = "bitcast"
if instance_t is Int or instance_t is Bool:
llvm_cmd = "ptrtoint"
em.pl("%s = %s %s %s to %s" % (name, llvm_cmd, underlying.t.llvm_type(), underlying.v, instance_t.llvm_type()))
em.pc("#error unimplemented 3")
return Variable(instance_t, name, 1, False)
else:
raise Exception(obj.t)
Cast = singleton(CastMT)
class StrFormatMT(_SpecialFuncMT):
def _extract_format_chars(self, s):
cur = 0
rtn = []
while cur < len(s):
if s[cur] != '%':
cur += 1
continue
cur += 1
while s[cur] in " 0123456789+-.#*":
cur += 1
c = s[cur]
cur += 1
if c == '%':
continue
assert c.isalpha()
rtn.append(c)
return rtn
def can_call(self, args):
return True
def call(self, em, v, args, expected_type=None):
assert len(args) == 2
assert args[0].t is StrConstant, args[0].t
[fmt] = args[0].v
if isinstance(args[1].t, UnboxedTupleMT):
data = list(args[1].v)
elif isinstance(args[1].t, TupleMT):
raise NotImplementedError()
else:
data = [args[1]]
chars = self._extract_format_chars(fmt)
assert len(chars) == len(data), "Wrong number of format arguments specified (need %d but got %d)" % (len(chars), len(data))
for i in xrange(len(chars)):
ch = chars[i]
if ch == 'd':
assert data[i].t is Int
elif ch == 's':
if data[i].t is not Str:
data[i] = StrFunc.call(em, (), [data[i]])
assert data[i].t is Str
elif ch == 'f':
assert data[i].t is Float
else:
raise Exception("Unsupported format character: '%s'" % ch)
s = args[0].convert_to(em, Str)
name = '%' + em.mkname()
em.pl("%s = call %%string* (%%string*, ...)* @str_format(%s)" % (name, ', '.join(["%s %s" % (v.t.llvm_type(), v.v) for v in [s] + data])))
em.pc("#error unimplemented 4")
for d in [s] + data:
d.decvref(em)
return Variable(Str, name, 1, True)
StrFormat = singleton(StrFormatMT)
class MapFuncMT(_SpecialFuncMT):
def call(self, em, v, args, expected_type=None):
assert len(args) == 2
args[0] = args[0].convert_to(em, args[0].t.get_instantiated())
assert isinstance(args[0].t, CallableMT), (args[0].t,)
assert isinstance(args[1].t, ListMT)
f = args[0]
l = args[1]
assert len(f.t.arg_types) >= 1 and len(f.t.arg_types) - f.t.ndefaults <= 1, (len(f.t.arg_types), f.t.ndefaults)
assert f.t.arg_types[0] is l.t.elt_type
name = MapFuncMT.get_name(em, f.t.arg_types[0], f.t.rtn_type)
callable_type = CallableMT.make_callable([f.t.arg_types[0]], 0, f.t.rtn_type)
f = args[0] = f.convert_to(em, callable_type)
r = '%' + em.mkname()
r_type = ListMT.make_list(f.t.rtn_type)
r_type.initialize(em, "write")
em.pl('%s = call %s @%s(%s %s, %s %s)' % (r, r_type.llvm_type(), name, f.t.llvm_type(), f.v, l.t.llvm_type(), l.v))
em.pc("#error unimplemented 5")
for a in args:
a.decvref(em)
return Variable(r_type, r, 1, True)
__made_maps = {}
@staticmethod
def get_name(em, arg_type, ret_type):
mem_key = (arg_type, ret_type)
if mem_key not in MapFuncMT.__made_maps:
name = "map%d" % len(MapFuncMT.__made_maps)
template = eval_ctemplate("map", em, {
'input_type':arg_type,
'output_type':ret_type,
'name':name,
})
em.c_head.write(template)
MapFuncMT.__made_maps[mem_key] = name
ret_list = ListMT.make_list(ret_type)
ret_list.initialize(em, "write")
arg_list = ListMT.make_list(arg_type)
arg_list.initialize(em, "write")
callable_type = CallableMT.make_callable([arg_type], 0, ret_type)
callable_type.initialize(em, "write")
em.llvm_head.write("declare %s @%s(%s, %s)\n" % (ListMT.make_list(ret_type).llvm_type(), name, callable_type.llvm_type(), ListMT.make_list(arg_type).llvm_type()))
return MapFuncMT.__made_maps[mem_key]
MapFunc = singleton(MapFuncMT)
class ReduceFuncMT(_SpecialFuncMT):
def call(self, em, v, args, expected_type=None):
assert len(args) == 3
f, l, initial = args
f = f.convert_to(em, f.t.get_instantiated())
assert isinstance(f.t, CallableMT), (f.t,)
assert isinstance(l.t, ListMT)
accum_type = make_common_supertype([f.t.rtn_type, initial.t])
print accum_type, f.t.rtn_type, initial.t
elt_type = l.t.elt_type
assert len(f.t.arg_types) == 2
assert accum_type.can_convert_to(f.t.arg_types[0]), (accum_type, f.t.arg_types[0])
assert elt_type.can_convert_to(f.t.arg_types[1])
name = ReduceFuncMT.get_name(em, accum_type, elt_type)
callable_type = CallableMT.make_callable([accum_type, elt_type], 0, accum_type)
converted_f = f.convert_to(em, callable_type)
converted_initial = initial.convert_to(em, accum_type)
r = '%' + em.mkname()
em.pl('%s = call %s @%s(%s %s, %s %s, %s %s)' % (r, accum_type.llvm_type(), name, converted_f.t.llvm_type(), converted_f.v, l.t.llvm_type(), l.v, accum_type.llvm_type(), converted_initial.v))
em.pc("#error unimplemented 5")
for a in (converted_f, l, converted_initial):
a.decvref(em)
return Variable(accum_type, r, 1, True)
__made_reduces = {}
@staticmethod
def get_name(em, accum_type, elt_type):
mem_key = (accum_type, elt_type)
if mem_key not in ReduceFuncMT.__made_reduces:
name = "reduce%d" % len(ReduceFuncMT.__made_reduces)
arg_list = ListMT.make_list(elt_type)
arg_list.initialize(em, "write")
callable_type = CallableMT.make_callable([accum_type, elt_type], 0, accum_type)
callable_type.initialize(em, "write")
template = eval_ctemplate("reduce", em, {
'accum_type':accum_type,
'elt_type':elt_type,
'list_type':arg_list,
'callable_type':callable_type,
'name':name,
})
em.c_head.write(template)
ReduceFuncMT.__made_reduces[mem_key] = name
em.llvm_head.write("declare %s @%s(%s, %s, %s)\n" % (accum_type.llvm_type(), name, callable_type.llvm_type(), ListMT.make_list(elt_type).llvm_type(), accum_type.llvm_type()))
return ReduceFuncMT.__made_reduces[mem_key]
ReduceFunc = singleton(ReduceFuncMT)
class EnumerateMT(_SpecialFuncMT):
def call(self, em, v, args, expected_type=None):
assert len(args) == 1
args[0] = args[0].convert_to(em, args[0].t.get_instantiated())
assert isinstance(args[0].t, ListMT), (args[0].t,)
l = args[0]
name = EnumerateMT.get_name(em, l.t)
rtn_type = ListMT.make_list(TupleMT.make_tuple([Int, l.t.elt_type]))
r = "%" + em.mkname()
em.pl("%s = call %s @%s(%s %s)" % (r, rtn_type.llvm_type(), name, l.t.llvm_type(), l.v))
em.pc("#error unimplemented 6")
l.decvref(em)
return Variable(rtn_type, r, 1, True)
__made_names = {}
@staticmethod
def get_name(em, l):
assert isinstance(l, ListMT)
mem_key = (l,)
if mem_key not in EnumerateMT.__made_names:
name = "enumerate%d" % len(EnumerateMT.__made_names)
rtn_elt_type = TupleMT.make_tuple([Int, l.elt_type])
rtn_elt_type.initialize(em, "write")
rtn_type = ListMT.make_list(rtn_elt_type)
rtn_type.initialize(em, "write")
template = eval_ctemplate("enumerate", em, {
'input_type':l,
'return_type':rtn_type,
'name':name,
})
em.c_head.write(template)
EnumerateMT.__made_names[mem_key] = name
em.llvm_head.write("declare %s @%s(%s)\n" % (rtn_type.llvm_type(), name, l.llvm_type()))
return EnumerateMT.__made_names[mem_key]
Enumerate = singleton(EnumerateMT)
class MinFuncMT(_SpecialFuncMT):
def __init__(self, type):
self._type = type
def can_call(self, args):
return len(args) == 1
def call(self, em, v, args, expected_type=None):
assert len(args) == 1
args[0] = args[0].convert_to(em, args[0].t.get_instantiated())
l = args[0]
name = self._type + str(MinFuncMT.get_num(em, l.t))
rtn_type = l.t.get_attr_types()['__iter__'][0].get_instantiated().rtn_type.get_attr_types()['next'][0].get_instantiated().rtn_type
r = "%" + em.mkname()
em.pl("%s = call %s @%s(%s %s)" % (r, rtn_type.llvm_type(), name, l.t.llvm_type(), l.v))
em.pc("#error unimplemented 21")
l.decvref(em)
return Variable(rtn_type, r, 1, True)
__made_names = {}
@staticmethod
def get_num(em, t):
mem_key = (t,)
if mem_key not in MinFuncMT.__made_names:
num = len(MinFuncMT.__made_names)
it_type = t.get_attr_types()['__iter__'][0].get_instantiated().rtn_type
elt_type = it_type.get_attr_types()['next'][0].get_instantiated().rtn_type
template = eval_ctemplate("min", em, {
't':t,
'it':it_type,
'et':elt_type,
'num':num,
})
em.c_head.write(template)
MinFuncMT.__made_names[mem_key] = num
em.llvm_head.write("declare %s @min%s(%s)\n" % (elt_type.llvm_type(), num, t.llvm_type()))
em.llvm_head.write("declare %s @max%s(%s)\n" % (elt_type.llvm_type(), num, t.llvm_type()))
return MinFuncMT.__made_names[mem_key]
MinFunc = MinFuncMT("min")
MaxFunc = MinFuncMT("max")
class UnboxedFunctionMT(MT):
def __init__(self, em, closure_type, arg_types, rtn_type, ndefaults=0):
super(UnboxedFunctionMT, self).__init__()
self.closure_type = closure_type
self.arg_types = [a.get_instantiated() for a in arg_types]
self.rtn_type = rtn_type.get_instantiated()
self.ndefaults = ndefaults
def _initialize(self, em, stage):
self.get_instantiated().initialize(em, stage)
def dup(self, v, dup_cache):
func_name, defaults, closure = v
assert len(defaults) == self.ndefaults
return func_name, [d.dup(dup_cache) for d in defaults], closure.dup(dup_cache) if closure else None
def free(self, em, v):
func_name, defaults, closure = v
assert len(defaults) == self.ndefaults
for d in defaults:
d.decvref(em)
return None
def _argtype_str(self):
return ", ".join(([self.closure_type.llvm_type()] if self.closure_type else []) + [a.llvm_type() for a in self.arg_types])
def llvm_type(self):
ret_type = self.rtn_type.llvm_type() if self.rtn_type is not None_ else "void"
return "%s (%s)*" % (ret_type, self._argtype_str())
def can_call(self, args):
# TODO should pass in defaults_types to this type
if len(args) != len(self.arg_types):
return False
for i in xrange(len(args)):
if not args[i].can_convert_to(self.arg_types[i]):
return False
return True
def call(self, em, v, args, expected_type=None):
func_name, defaults, closure = v
assert len(defaults) == self.ndefaults
assert len(defaults) <= len(self.arg_types)
for d in defaults:
assert isinstance(d, Variable)
assert len(self.arg_types) - len(defaults) <= len(args) <= len(self.arg_types), (args, self.arg_types, defaults)
args = list(args) # we're going to modify it
for n in xrange(len(args), len(self.arg_types)):
arg = defaults[n - len(self.arg_types)]
assert isinstance(arg, Variable)
arg.incvref(em)
args.append(arg)
# args.append(Variable(Int, arg, 1, False))
assert len(self.arg_types) == len(args)
for i in xrange(len(args)):
args[i] = args[i].convert_to(em, self.arg_types[i])
if [_v.t for _v in args] != self.arg_types:
raise UserTypeError([_v.t for _v in args], self.arg_types)
prologue = ""
if self.rtn_type is not None_:
name = "%" + em.mkname()
prologue = "%s = " % name
args_plus_closure = ([closure] + args) if closure else args
rtn_type = self.rtn_type.llvm_type() if self.rtn_type is not None_ else "void"
em.pl("%scall %s (%s)* %s(%s)" % (prologue, rtn_type, self._argtype_str(), func_name, ", ".join("%s %s" % (_v.t.llvm_type(), _v.v) for _v in args_plus_closure)))
if self.rtn_type is not None_:
assert prologue.startswith("%")
prologue = "%s " % self.rtn_type.c_type() + prologue[1:]
if func_name.startswith('%'):
em.pc("#error unimplemented 20")
else:
def local(n):
n = str(n)
if n.startswith('%'):
return n[1:]
return n
em.pc("%s %s(%s);" % (prologue, raw_func_name(func_name), ", ".join(local(_v.v) for _v in args_plus_closure)))
for _v in args:
_v.decvref(em)
if self.rtn_type is not None_:
return Variable(self.rtn_type, name, 1, True)
else:
return Variable(self.rtn_type, "null", 1, False)
def get_instantiated(self):
return CallableMT.make_callable(self.arg_types, self.ndefaults, self.rtn_type)
def _can_convert_to(self, t):
if not isinstance(t, CallableMT):
return False
return t.arg_types == self.arg_types and t.rtn_type == self.rtn_type
def _convert_to(self, em, var, t):
(func_name, defaults, closure) = var.v
assert len(defaults) == self.ndefaults
if isinstance(t, CallableMT):
if t.arg_types == self.arg_types and t.rtn_type == self.rtn_type:
assert (closure is None) == (self.closure_type is None)
assert t.ndefaults <= len(defaults)
defaults = defaults[:t.ndefaults]
r = SimpleFunction.make(em, self.arg_types, defaults, self.rtn_type, closure, func_name)
var.decvref(em)
return r
raise UserTypeError((self.arg_types, self.rtn_type, var.v[1], t.arg_types, t.rtn_type))
raise UserTypeError(t)
class PolymorphicFunctionMT(MT):
def __init__(self, func_types):
super(PolymorphicFunctionMT, self).__init__()
assert isinstance(func_types, (list, tuple))
assert len(func_types) > 0
self.func_types = func_types
self.initialized = ("attrs", "write")
def dup(self, v, dup_cache):
funcs, = v
return ([f.dup(dup_cache) for f in funcs],)
def free(self, em, v):
funcs, = v
assert len(funcs) == len(self.func_types)
for i in xrange(len(funcs)):
assert funcs[i].t is self.func_types[i]
for f in funcs:
f.decvref(em)
def call(self, em, v, args, expected_type=None):
funcs, = v
assert len(funcs) == len(self.func_types)
for i in xrange(len(funcs)):
assert funcs[i].t is self.func_types[i]
for f in funcs:
if f.t.can_call([a.t for a in args]):
f.incvref(em)
return f.call(em, args)
raise UserTypeError("Can't call with args %s" % (args,))
def convert_to(self, em, var, t):
funcs, = var.v
assert len(funcs) == len(self.func_types)
for i in xrange(len(funcs)):
assert funcs[i].t is self.func_types[i]
for f in funcs:
if f.t.can_convert_to(t):
f.incvref(em)
var.decvref(em)
return f.convert_to(em, t)
raise UserTypeError()
def get_instantiated(self):
# TODO this just heuristically picks the first function, but it'd be nice to be able to pick
# the "right" one (is that the same thing as supporting rank-2 polymorphism?)
return self.func_types[0].get_instantiated()
@staticmethod
def make(funcs):
for f in funcs:
assert isinstance(f, Variable)
# This isn't completely necessary but I haven't thought through how it would work
assert isinstance(f.t, (UnboxedFunctionMT, _SpecialFuncMT))
t = PolymorphicFunctionMT([f.t for f in funcs])
return Variable(t, (funcs,), 1, False)
class UnboxedInstanceMethod(MT):
def __init__(self, obj_type, func_type):
super(UnboxedInstanceMethod, self).__init__()
assert isinstance(obj_type, MT)
assert isinstance(func_type, MT)
self._obj_type = obj_type
self._func_type = func_type
def _initialize(self, em, stage):
try:
self.get_instantiated().initialize(em, stage)
except CantInstantiateException:
pass
@staticmethod
def make(em, o, f):
assert is_emitter(em)
assert isinstance(o, Variable)
assert isinstance(f, Variable)
assert isinstance(f.t, (UnboxedFunctionMT, PolymorphicFunctionMT, _SpecialFuncMT)), f.t
o.incvref(em)
f.incvref(em)
t = UnboxedInstanceMethod(o.t, f.t)
t.initialize(em, "write")
return Variable(t, (o, f), 1, False)
def dup(self, v, dup_cache):
(o, f) = v
return o.dup(dup_cache), f.dup(dup_cache)
def can_call(self, args):
return self._func_type.can_call([self._obj_type] + args)
def call(self, em, v, args, expected_type=None):
(o, f) = v
o.incvref(em) # being passed into the function,
f.incvref(em) # which consumes one vref
return f.call(em, [o] + args)
def free(self, em, v):
(o, f) = v
# em.pl("; UIM.decref")
o.decvref(em)
f.decvref(em)
# em.pl("; UIM.decref, done")
def get_instantiated(self):
if isinstance(self._func_type, UnboxedFunctionMT):
return CallableMT.make_callable(self._func_type.arg_types[1:], 0, self._func_type.rtn_type)
elif isinstance(self._func_type, CallableMT):
return CallableMT.make_callable(self._func_type.arg_types[1:], 0, self._func_type.rtn_type)
else:
raise CantInstantiateException(self._func_type)
def _can_convert_to(self, t):
return self.get_instantiated().can_convert_to(t)
def _convert_to(self, em, var, t):
if isinstance(t, CallableMT):
assert isinstance(self._func_type, UnboxedFunctionMT), "unimplemented"
if t.arg_types == self._func_type.arg_types[1:] and t.rtn_type == self._func_type.rtn_type:
assert t == self.get_instantiated()
(o, f) = var.v
o.incvref(em)
if o.t != o.t.get_instantiated():
o = o.convert_to(em, o.t.get_instantiated())
r = InstanceMethod.make(em, o, f)
o.decvref(em, "raised for UIM->IM")
var.decvref(em, "UIM raised to IM")
return r
return var.convert_to(em, self.get_instantiated()).convert_to(em, t)
class InstanceMethod(object):
__made_funcs = {}
def __init__(self):
assert 0
@staticmethod
def make(em, o, f):
assert o.t is o.t.get_instantiated()
assert isinstance(f.t, UnboxedFunctionMT)
name = InstanceMethod.get_name(em, o.t, f.t)
args = f.t.arg_types[1:]
ret = f.t.rtn_type
func = CallableMT.make_callable(args, 0, ret)
func.initialize(em, "write")
r = "%" + em.mkname()
o = o.split(em)
if not isinstance(f.t, UnboxedFunctionMT):
f = f.split(em)
raise Exception("test this")
em.pl("%s = call %s (%s, %s)* @%s_ctor(%s %s, %s %s)" % (r, func.llvm_type(), o.t.llvm_type(), f.t.llvm_type(), name, o.t.llvm_type(), o.v, f.t.llvm_type(), f.v[0]))
em.pc("#error unimplemented 7")
return Variable(func, r, 1, True)
@staticmethod
def get_name(em, ot, ft):
assert isinstance(ot, MT)
assert isinstance(ft, UnboxedFunctionMT)
assert ot is ot.get_instantiated()
mem_key = (ot, tuple(ft.arg_types), ft.rtn_type)
ft.initialize(em, "write")
ot.initialize(em, "attrs")
if mem_key not in InstanceMethod.__made_funcs:
name = "im_%d" % (len(InstanceMethod.__made_funcs),)
InstanceMethod.__made_funcs[mem_key] = name
bound_args = ft.arg_types[1:]
ret = ft.rtn_type
ret_type = ret.llvm_type()
callable_mt = CallableMT.make_callable(bound_args, 0, ret)
callable_mt.initialize(em, "write")
callable_type = callable_mt.llvm_type()
o_decref = ot.decref_llvm(em, "%o") or ""
real_call_type = "%s (%s)*" % (ret_type, ", ".join(["%" + name + "*"] + [a.llvm_type() for a in bound_args]))
call_type = "%s (%s)*" % (ret_type, ", ".join([callable_type] + [a.llvm_type() for a in bound_args]))
func_type = ft.llvm_type()
arg_string = ", ".join(["%s %%o" % ot.llvm_type()] + ["%s %%v%d" % (a.llvm_type(), i) for i, a in enumerate(bound_args)])
def_args = ", ".join(["%%%s* %%self" % name] + ["%s %%v%d" % (a.llvm_type(), i) for i, a in enumerate(bound_args)])
evaluated = eval_template("im", em, {
'n':name,
'r':ret_type,
'ft':ft,
'ot':ot,
'bound_args':bound_args,
"callable_type":callable_type,
"call_type":call_type,
"real_call_type":real_call_type,
"arg_string":arg_string,
"func_type":func_type,
"def_args":def_args,
"obj_type":ot.llvm_type(),
"f_decref":"", # Only supporting UnboxedFunctionMT for now
"o_decref":o_decref,
"vtable_t":callable_type[:-1] + "_vtable",
"alloc_name":em.get_str_ptr(name),
})
evaluated = convert_none_to_void_ll(evaluated)
em.llvm_tail.write(evaluated)
return InstanceMethod.__made_funcs[mem_key]
class CallableMT(MT):
__made_funcs = {}
def __init__(self, args, ndefaults, ret, name):
super(CallableMT, self).__init__()
for a in args:
assert isinstance(a, MT)
assert isinstance(ret, MT)
assert isinstance(ndefaults, (int, long))
assert ret.get_instantiated() is ret
for a in args:
assert a.get_instantiated() is a, (a, a.get_instantiated())
self.arg_types = args
self.ndefaults = ndefaults
self.rtn_type = ret
self.name = name
def _initialize(self, em, stage):
if stage == "attrs":
self.class_methods = {
"__repr__": Variable(UnboxedFunctionMT(em, None, [self], Str), ("@%s_repr" % self.name, [], None), 1, False),
"__eq__": Variable(UnboxedFunctionMT(em, None, [self, self], Bool), ("@%s_eq" % self.name, [], None), 1, False),
"__nonzero__": Variable(UnboxedFunctionMT(em, None, [self], Bool), ("@%s_eq" % self.name, [], None), 1, False),
"__decref__": Variable(UnboxedFunctionMT(em, None, [self], None_), ("@%s_decref" % self.name, [], None), 1, False),
"__incref__": Variable(UnboxedFunctionMT(em, None, [self], None_), ("@%s_incref" % self.name, [], None), 1, False),
}
self.class_methods["__str__"] = self.class_methods["__repr__"]
elif stage == "write":
for a in self.arg_types:
a.initialize(em, "attrs")
self.rtn_type.initialize(em, "attrs")
ret_type = self.rtn_type.llvm_type()
arg_type_strings = []
arg_strings = []
call_types = []
for i in xrange(len(self.arg_types) - self.ndefaults, len(self.arg_types) + 1):
arg_type_string = "%s" % (", ".join(["%" + self.name + "*"] + [a.llvm_type() for a in self.arg_types[:i]]))
arg_string = ", ".join(["%%%s* %%f" % self.name] + ["%s %%v%d" % (a.llvm_type(), i) for i, a in enumerate(self.arg_types[:i])])
call_type = "%s (%s)" % (ret_type, arg_type_string)
arg_type_strings.append(arg_type_string)
arg_strings.append(arg_string)
call_types.append(call_type)
del arg_type_string, arg_string, call_type
evaluated = eval_template("callable", em, {
'n':self.name,
'call_types': ", ".join(["%s*" % ct for ct in call_types]),
"alloc_name":em.get_str_ptr(self.name),
})
em.llvm_tail.write(convert_none_to_void_ll(evaluated))
for i in xrange(self.ndefaults + 1):
this_nargs = len(self.arg_types) - self.ndefaults + i
evaluated = eval_template("callable_call", em, {
'CHECK_NONE':'',
'n':self.name,
'this_nargs': this_nargs,
'call_ptr_idx': i + 1,
'r':ret_type,
"this_call_type":call_types[i],
"this_arg_string":arg_strings[i],
})
evaluated = convert_none_to_void_ll(evaluated)
em.llvm_tail.write(evaluated)
ctemplate = eval_ctemplate("callable", em, {
'n':self.name,
'nargs': len(self.arg_types),
'ndefaults': self.ndefaults,
})
em.c_head.write(ctemplate)
else:
raise Exception(stage)
def llvm_type(self):
return "%%%s*" % (self.name,)
def can_call(self, args):
if not (len(self.arg_types) - self.ndefaults <= len(args) <= len(self.arg_types)):
return False
for i in xrange(len(args)):
if not args[i].can_convert_to(self.arg_types[i]):
return False
return True
def call(self, em, v, args, expected_type=None):
assert len(self.arg_types) - self.ndefaults <= len(args) <= len(self.arg_types), (args, self.arg_types, self.ndefaults)
args = list(args)
for i in xrange(len(args)):
args[i] = args[i].convert_to(em, self.arg_types[i])
assert [_v.t for _v in args] == self.arg_types[:len(args)], ([_v.t for _v in args], self.arg_types)
prologue = ""
if self.rtn_type is not None_:
rtn_name = "%" + em.mkname()
prologue = "%s = " % rtn_name
ret_type = self.rtn_type.llvm_type()
# TODO duplication
else:
ret_type = "void"
type_string = "%s (%s)" % (ret_type, ", ".join([self.llvm_type()] + [a.llvm_type() for a in self.arg_types[:len(args)]]))
arg_string = ", ".join(["%s %s" % (self.llvm_type(), v)] + ["%s %s" % (a.t.llvm_type(), a.v) for a in args])
em.pl("%scall %s* @%s_call%d(%s)" % (prologue, type_string, self.name, len(args), arg_string))
em.pc("#error unimplemented 8")
for _v in args:
_v.decvref(em)
if self.rtn_type is not None_:
return Variable(self.rtn_type, rtn_name, 1, True)
else:
return Variable(self.rtn_type, "null", 1, False)
def _can_convert_to(self, t):
if not isinstance(t, CallableMT):
return False
if t.arg_types != self.arg_types:
return False
if not self.rtn_type.can_convert_to(t.rtn_type):
return False
return True
@staticmethod
def make_callable(args, ndefaults, ret):
for a in args:
assert isinstance(a, MT)
assert isinstance(ret, MT)
mem_key = (tuple(args), ndefaults, ret)
if mem_key not in CallableMT.__made_funcs:
name = "c_%d" % (len(CallableMT.__made_funcs),)
CallableMT.__made_funcs[mem_key] = CallableMT(args, ndefaults, ret, name)
return CallableMT.__made_funcs[mem_key]
def _convert_to(self, em, var, t):
assert isinstance(t, CallableMT)
if t.ndefaults <= self.ndefaults and len(t.arg_types) - t.ndefaults == len(self.arg_types) - self.ndefaults:
if self.arg_types[:len(t.arg_types)] == t.arg_types and self.rtn_type == t.rtn_type:
name = "%" + em.mkname()
em.pl("%s = bitcast %s %s to %s" % (name, self.llvm_type(), var.v, t.llvm_type()))
rtn = Variable(t, name, 1, True)
rtn.incvref(em)
rtn.getattr(em, "__incref__").call(em, [])
var.decvref(em)
return rtn
return _Reboxer.make(em, var, t)
def vtable_type(self):
return self.llvm_type()[:-1] + "_vtable"
class _Reboxer(object):
__made_reboxers = {}
__init__ = None
@staticmethod
def make(em, var, new_type):
assert is_emitter(em)
assert isinstance(var.t, CallableMT)
assert isinstance(new_type, CallableMT)
key = (var.t, new_type)
if key not in _Reboxer.__made_reboxers:
name = em.mkname(prefix="reboxer_")
call_type = UnboxedFunctionMT(em, None, [new_type] + new_type.arg_types, new_type.rtn_type)
_Reboxer.__made_reboxers[key] = name
evaluated = eval_template("reboxer", em, {
'n':name,
'orig_type':var.t,
'new_type':new_type,
'call_type':call_type,
})
em.llvm_tail.write(evaluated)
name = _Reboxer.__made_reboxers[key]
em.pc("#error unimplemented 9")
r = "%" + em.mkname()
em.pl("%s = call %s @%s_ctor(%s %s)" % (r, new_type.llvm_type(), name, var.t.llvm_type(), var.v))
var.decvref(em)
return Variable(new_type, r, 1, True)
class SimpleFunction(object):
__made_funcs = {}
__init__ = None
@staticmethod
def make(em, arg_types, defaults, ret_type, closure, func_name):
ndefaults = len(defaults)
for i in xrange(ndefaults):
arg_idx = len(arg_types) - ndefaults + i
defaults[i].incvref(em)
defaults[i] = defaults[i].convert_to(em, arg_types[arg_idx])
callable_type = CallableMT.make_callable(arg_types, ndefaults, ret_type)
callable_type.initialize(em, "write")
unboxed = UnboxedFunctionMT(em, closure.t if closure else None, arg_types, ret_type)
func_type = unboxed.llvm_type()
default_types = ''.join(", %s" % (d.t.llvm_type(),) for d in defaults)
mem_key = (tuple(arg_types), ndefaults, ret_type, closure.t if closure else None)
closure_def_type = ", %s" % closure.t.llvm_type() if closure else ""
if mem_key not in SimpleFunction.__made_funcs:
name = "f_%d" % (len(SimpleFunction.__made_funcs),)
vtable_t = callable_type.llvm_type()[:-1] + "_vtable"
ret_name = ret_type.llvm_type() if ret_type is not None_ else "void"
call_types = []
for i in xrange(len(arg_types) - ndefaults, len(arg_types) + 1):
arg_type_string = "%s" % (", ".join([callable_type.llvm_type()] + [a.llvm_type() for a in arg_types[:i]]))
call_type = "%s (%s)" % (ret_name, arg_type_string)
call_types.append(call_type)
del arg_type_string, call_type
defaults_argstr = ''.join(", %s %%default%d" % (d.t.llvm_type(), i) for (i, d) in enumerate(defaults))
call_funcs = ", ".join("%s* @%s_call%d" % (call_types[i], name, len(arg_types) - ndefaults + i) for i in xrange(ndefaults + 1))
defaults_start = 4 if closure else 3
store_defaults = "\n ".join("%%dptr%d = getelementptr inbounds %%%s* %%made, i32 0, i32 %d\n store %s %%default%d, %s* %%dptr%d\n %s" % (i, name, i + defaults_start, defaults[i].t.llvm_type(), i, defaults[i].t.llvm_type(), i, defaults[i].t.incref_llvm(em, "%%default%d" % (i,))) for i in xrange(len(defaults)))
decref_defaults = "\n ".join("%%dptr%d = getelementptr inbounds %%%s* %%self, i32 0, i32 %d\n %%default%d = load %s* %%dptr%d\n %s" % (i, name, i + defaults_start, i, defaults[i].t.llvm_type(), i, defaults[i].t.decref_llvm(em, "%%default%d" % (i,))) for i in xrange(len(defaults)))
evaluated = eval_template("function", em, {
'n': name,
'vtable_t': vtable_t,
'callable_type': callable_type.llvm_type(),
'call_types': ', '.join(call_types),
'closure_type': closure.t.llvm_type() if closure else "X",
'closure_def_type': closure_def_type,
'default_types': default_types,
'func_type': func_type,
'IFCLOSURE': '' if closure else ';',
'defaults_argstr': defaults_argstr,
'closure_incref': closure.t.incref_llvm(em, "%closure") if closure else 'closure_incref',
'closure_decref': closure.t.decref_llvm(em, "%closure") if closure else 'closure_decref',
'call_funcs': call_funcs,
'store_defaults': store_defaults,
'decref_defaults': decref_defaults,
'alloc_name': em.get_str_ptr(name),
})
em.llvm_tail.write(evaluated)
for i in xrange(ndefaults + 1):
this_nargs = len(arg_types) - ndefaults + i
this_defaults = len(arg_types) - this_nargs
call_args = ", ".join(["%s %%self" % callable_type.llvm_type()] + ["%s %%v%d" % (a.llvm_type(), i) for i, a in enumerate(arg_types[:this_nargs])])
func_args = ", ".join(["%s %%v%d" % (a.llvm_type(), i) for i, a in enumerate(arg_types[:this_nargs])])
if func_args:
if closure:
func_args = ", " + func_args
if this_defaults:
func_args += ", "
elif closure and this_defaults:
func_args = ","
load_defaults = "\n ".join("%%dptr%d = getelementptr inbounds %%%s* %%f, i32 0, i32 %d\n %%default%d = load %s* %%dptr%d\n ;%s" % (i, name, i + defaults_start, i, defaults[i].t.llvm_type(), i, defaults[i].t.incref_llvm(em, "%%default%d" % (i,))) for i in xrange(ndefaults - this_defaults, ndefaults))
decref_defaults = "\n ".join(";%s" % (defaults[i].t.decref_llvm(em, "%%default%d" % (i,)), ) for i in xrange(ndefaults - this_defaults, ndefaults))
defaults_argstr = ', '.join("%s %%default%d" % (defaults[i].t.llvm_type(), i) for i in xrange(ndefaults - this_defaults, ndefaults))
evaluated = eval_template("function_call", em, {
'n':name,
'this_nargs': this_nargs,
'r':ret_name,
'func_type': func_type,
"call_args": call_args,
"func_args": func_args,
'closure_type': closure.t.llvm_type() if closure else "X",
'IFCLOSURE': '' if closure else ';',
"load_defaults": load_defaults,
"decref_defaults": decref_defaults,
"defaults_args": defaults_argstr,
'callable_type': callable_type.llvm_type(),
})
em.llvm_tail.write(convert_none_to_void_ll(evaluated))
SimpleFunction.__made_funcs[mem_key] = name
name = SimpleFunction.__made_funcs[mem_key]
r = '%' + em.mkname()
# TODO upconvert here
closure_args = ", %s %s" % (closure.t.llvm_type(), closure.v) if closure else ""
defaults_args = "".join(", %s %s" % (defaults[i].t.llvm_type(), defaults[i].v) for i in xrange(ndefaults))
em.pl("%s = call %s (%s %s %s)* @%s_ctor(%s %s %s %s)" % (r, callable_type.llvm_type(), func_type, closure_def_type, default_types, name, func_type, func_name, closure_args, defaults_args))
em.pc("#error unimplemented 10")
for i in xrange(ndefaults):
defaults[i].decvref(em)
return Variable(callable_type, r, 1, True)
class UnboxedTupleMT(MT):
def __init__(self, elt_types):
super(UnboxedTupleMT, self).__init__()
for e in elt_types:
assert isinstance(e, MT)
self.elt_types = tuple(elt_types)
self.initialized = ("attrs", "write")
def dup(self, v, dup_cache):
return tuple([e.dup(dup_cache) for e in v])
def getattr(self, em, v, attr, clsonly):
if attr == "__getitem__":
return UnboxedInstanceMethod.make(em, v, Variable(UnboxedTupleMT.Getitem, (), 1, False))
return self.get_instantiated().getattr(em, v, attr, clsonly)
def free(self, em, v):
assert isinstance(v, tuple)
for e in v:
e.decvref(em)
class GetitemMT(_SpecialFuncMT):
def call(self, em, v, args, expected_type=None):
assert len(args) == 2
assert isinstance(args[0].t, UnboxedTupleMT)
assert args[1].t is Int
idx = args[1].v
assert isinstance(idx, int)
assert 0 <= idx < len(args[0].t.elt_types)
assert isinstance(args[0].v, tuple)
assert len(args[0].t.elt_types) == len(args[0].v)
r = args[0].v[idx]
args[0].decvref(em)
args[1].decvref(em)
r.incvref(em)
return r
Getitem = GetitemMT()
def get_instantiated(self):
return TupleMT.make_tuple([e.get_instantiated() for e in self.elt_types])
def _can_convert_to(self, t):
if not isinstance(t, TupleMT) or len(t.elt_types) != len(self.elt_types):
return False
for i in xrange(len(self.elt_types)):
if not self.elt_types[i].can_convert_to(t.elt_types[i]):
return False
return True
def _convert_to(self, em, var, t):
if isinstance(t, TupleMT) and len(t.elt_types) == len(self.elt_types):
elts = []
for i, e in enumerate(var.v):
e.incvref(em)
elts.append(e.convert_to(em, t.elt_types[i]))
r = t.alloc(em, elts)
for e in elts:
e.decvref(em)
var.decvref(em)
return r
var.incvref(em)
return var.convert_to(em, self.get_instantiated()).convert_to(em, t)
class TupleMT(MT):
def __init__(self, name, elt_types):
super(TupleMT, self).__init__()
for e in elt_types:
assert isinstance(e, MT)
assert e == e.get_instantiated()
self.elt_types = tuple(elt_types)
self.name = name
def _initialize(self, em, stage):
if stage == "attrs":
for e in self.elt_types:
e.initialize(em, "attrs")
self.class_methods = {
"__getitem__": Variable(TupleMT.Getitem, (), 1, False),
"__repr__": Variable(UnboxedFunctionMT(em, None, [self], Str), ("@%s_repr" % (self.name,), [], None), 1, False),
"__len__": Variable(UnboxedFunctionMT(em, None, [self], Int), ("@%s_len" % (self.name,), [], None), 1, False),
"__eq__": Variable(UnboxedFunctionMT(em, None, [self, self], Bool), ("@%s_eq" % (self.name,), [], None), 1, False),
"__lt__": Variable(UnboxedFunctionMT(em, None, [self, self], Bool), ("@%s_lt" % (self.name,), [], None), 1, False),
"__incref__": Variable(UnboxedFunctionMT(em, None, [self], None_), ("@%s_incref" % (self.name,), [], None), 1, False),
"__decref__": Variable(UnboxedFunctionMT(em, None, [self], None_), ("@%s_decref" % (self.name,), [], None), 1, False),
}
self.class_methods["__str__"] = self.class_methods["__repr__"]
self.typeobj_name = "@%s_typeobj" % self.name
if not all(e.hasattr("__eq__") for e in self.elt_types):
del self.class_methods["__eq__"]
del self.class_methods["__lt__"]
elif not all(e.hasattr("__lt__") for e in self.elt_types):
del self.class_methods["__lt__"]
elif stage == "write":
type_str = ", ".join(["i64"] + [e.llvm_type() for e in self.elt_types])
arg_str = ", ".join(["%s %%arg%d" % (e.llvm_type(), i) for i, e in enumerate(self.elt_types)])
carg_str = ", ".join(["%s" % (e.c_type(),) for i, e in enumerate(self.elt_types)])
assign_all = ""
for i in xrange(len(self.elt_types)):
e = self.elt_types[i]
assign_all += "%%ptr%d = getelementptr inbounds %s %s, i64 0, i32 %d\n" % (i, self.llvm_type(), "%rtn", i + 1)
inc = self.elt_types[i].incref_llvm(em, "%%arg%d" % i)
if inc:
assign_all += inc + '\n'
assign_all += "store %s %%arg%d, %s* %%ptr%d\n" % (e.llvm_type(), i, e.llvm_type(), i)
assign_all = assign_all.replace('\n', '\n ')
decref_all = ""
for i in xrange(len(self.elt_types)):
e = self.elt_types[i]
d = e.decref_llvm(em, "%%elt%d" % (i,))
if not d:
continue
decref_all += "%%ptr%d = getelementptr inbounds %s %%self, i64 0, i32 %d\n" % (i, self.llvm_type(), i + 1)
decref_all += "%%elt%d = load %s* %%ptr%d\n" % (i, e.llvm_type(), i)
decref_all += d + '\n'
decref_all = decref_all.replace('\n', '\n ')
add_all_str = ""
for i in xrange(len(self.elt_types)):
e = self.elt_types[i]
assert e == e.get_instantiated()
if i > 0:
add_all_str += "call void @list_string_append(%%list_string* %%list, %%string* %%commaspace)\n" % ()
add_all_str += "%%ptr%d = getelementptr inbounds %s %%self, i64 0, i32 %d\n" % (i, self.llvm_type(), i + 1)
add_all_str += "%%elt%d = load %s* %%ptr%d\n" % (i, e.llvm_type(), i)
emitter = CodeEmitter(em)
elt_repr_func = Variable(e, "%%elt%d" % i, 1, False).getattr(emitter, "__repr__", clsonly=True)
repr_r = elt_repr_func.call(emitter, [])
assert repr_r.t is Str
add_all_str += emitter.get_llvm() + '\n'
add_all_str += "call void @list_string_append(%%list_string* %%list, %%string* %s)\n" % (repr_r.v,)
add_all_str += "call void @str_decref(%%string* %s)\n" % (repr_r.v,)
if len(self.elt_types) == 1:
add_all_str += "call void @list_string_append(%%list_string* %%list, %%string* %%comma)\n" % ()
add_all_str = add_all_str.replace('\n', '\n ')
em.llvm_tail.write(eval_template("tuple", em, {
'n':self.name,
'type_str':type_str,
'arg_str':arg_str,
'assign_all':assign_all,
'decref_all':decref_all,
'add_all_str':add_all_str,
'DEBUG_CHECKS':' ' if DEBUG_CHECKS else ';',
'NO_DEBUG_CHECKS':' ' if not DEBUG_CHECKS else ';',
'len':len(self.elt_types),
'alloc_name':em.get_str_ptr(self.name),
}))
ctemplate = eval_ctemplate("tuple", em, {
't':self,
'n':self.name,
'args':carg_str,
})
em.c_head.write(ctemplate)
else:
raise Exception(stage)
def llvm_type(self):
return "%%%s*" % self.name
def get_ctor_name(self):
return "@%s_ctor" % (self.name,)
def alloc(self, em, elts):
assert self.elt_types == tuple([e.t for e in elts]), (self.elt_types, tuple([e.t for e in elts]))
name = '%' + em.mkname()
em.pl("%s = call %s %s(%s)" % (name, self.llvm_type(), self.get_ctor_name(), ", ".join(["%s %s" % (v.t.llvm_type(), v.v) for v in elts])))
em.pc("#error unimplemented 11")
return Variable(self, name, 1, True)
def _can_convert_to(self, t):
return False
# TODO merge this with the Unboxed version
class GetitemMT(_SpecialFuncMT):
def call(self, em, v, args, expected_type=None):
assert len(args) == 2
assert isinstance(args[0].t, TupleMT)
assert args[1].t is Int
idx = args[1].v
assert isinstance(idx, int)
assert 0 <= idx < len(args[0].t.elt_types)
assert isinstance(args[0].v, str)
t = args[0].t.elt_types[idx]
pl = '%' + em.mkname()
r = '%' + em.mkname()
em.pl("%s = getelementptr inbounds %s %s, i64 0, i32 %d" % (pl, args[0].t.llvm_type(), args[0].v, idx + 1))
em.pl("%s = load %s* %s" % (r, t.llvm_type(), pl))
inc = t.incref_llvm(em, r)
if inc:
em.pl(inc + " ; tuple getitem")
em.pc("#error unimplemented 12")
args[0].decvref(em)
args[1].decvref(em)
return Variable(t, r, 1, True)
Getitem = GetitemMT()
__tuples = {}
@staticmethod
def make_tuple(elt_types):
elt_types = tuple(elt_types)
for e in elt_types:
assert isinstance(e, MT)
if elt_types not in TupleMT.__tuples:
name = "_".join(["tuple%d" % len(elt_types)] + [e.llvm_type().replace('%', '').replace('*', '') for e in elt_types])
if len(name) > 40:
name = "tuple_%d" % len(TupleMT.__tuples)
TupleMT.__tuples[elt_types] = TupleMT(name, elt_types)
t = TupleMT.__tuples[elt_types]
return t
class ListMT(MT):
def __init__(self, elt_type):
super(ListMT, self).__init__()
assert isinstance(elt_type, MT)
self.elt_type = elt_type
self.name = ListMT.get_name(elt_type)
self.iter_type = ListMT.ListIteratorMT(self)
def _initialize(self, em, stage):
if stage == "attrs":
self.elt_type.initialize(em, "attrs")
self.class_methods = {
"__add__": Variable(UnboxedFunctionMT(em, None, [self, self], self), ("@%s_add" % (self.name,), [], None), 1, False),
"append": Variable(UnboxedFunctionMT(em, None, [self, self.elt_type], None_), ("@%s_append" % (self.name,), [], None), 1, False),
"__contains__": Variable(UnboxedFunctionMT(em, None, [self, self.elt_type], Bool), ("@%s_contains" % (self.name,), [], None), 1, False),
"__decref__": Variable(UnboxedFunctionMT(em, None, [self], None_), ("@%s_decref" % (self.name,), [], None), 1, False),
"__eq__": Variable(UnboxedFunctionMT(em, None, [self, self], Bool), ("@%s_eq" % (self.name,), [], None), 1, False),
"extend": Variable(UnboxedFunctionMT(em, None, [self, self], None_), ("@%s_extend" % (self.name,), [], None), 1, False),
"__getitem__": PolymorphicFunctionMT.make([
Variable(UnboxedFunctionMT(em, None, [self, Int], self.elt_type), ("@%s_getitem" % (self.name,), [], None), 1, False),
Variable(UnboxedFunctionMT(em, None, [self, Slice], self), ("@%s_getitem_slice" % (self.name,), [], None), 1, False)]),
"__iadd__": Variable(UnboxedFunctionMT(em, None, [self, self], self), ("@%s_iadd" % (self.name,), [], None), 1, False),
"__incref__": Variable(UnboxedFunctionMT(em, None, [self], None_), ("@%s_incref" % (self.name,), [], None), 1, False),
"insert": Variable(UnboxedFunctionMT(em, None, [self, Int, self.elt_type], None_), ("@%s_insert" % (self.name,), [], None), 1, False),
"__iter__": Variable(UnboxedFunctionMT(em, None, [self], self.iter_type), ("@%s_iter" % (self.name,), [], None), 1, False),
"__len__": Variable(UnboxedFunctionMT(em, None, [self], Int), ("@%s_len" % (self.name,), [], None), 1, False),
"__mul__": Variable(UnboxedFunctionMT(em, None, [self, Int], self), ("@%s_mul" % (self.name,), [], None), 1, False),
"__nonzero__": Variable(UnboxedFunctionMT(em, None, [self], Bool), ("@%s_nonzero" % (self.name,), [], None), 1, False),
"__nrefs__": Variable(UnboxedFunctionMT(em, None, [self], Int), ("@%s_nrefs" % (self.name,), [], None), 1, False),
"pop": Variable(UnboxedFunctionMT(em, None, [self, Int], self.elt_type), ("@%s_pop" % (self.name,), [], None), 1, False),
"__repr__": Variable(UnboxedFunctionMT(em, None, [self], Str), ("@%s_repr" % (self.name,), [], None), 1, False),
"reverse": Variable(UnboxedFunctionMT(em, None, [self], None_), ("@%s_reverse" % (self.name,), [], None), 1, False),
"sort": Variable(UnboxedFunctionMT(em, None, [self], None_), ("@%s_sort" % (self.name,), [], None), 1, False),
"__setitem__": PolymorphicFunctionMT.make([
Variable(UnboxedFunctionMT(em, None, [self, Int, self.elt_type], None_), ("@%s_setitem" % (self.name,), [], None), 1, False),
Variable(UnboxedFunctionMT(em, None, [self, Slice, self], None_), ("@%s_setitem_slice" % (self.name,), [], None), 1, False)]),
}
self.class_methods["__str__"] = self.class_methods["__repr__"]
self.typeobj_name = "@%s_typeobj" % self.name
if not self.elt_type.hasattr("__eq__"):
del self.class_methods["__eq__"]
del self.class_methods["__contains__"]
if not self.elt_type.hasattr("__lt__"):
del self.class_methods["sort"]
elif stage == "write":
initial_size = 10
elt_incref = self.elt_type.incref_llvm(em, "%elt")
elt_decref = self.elt_type.decref_llvm(em, "%gone_elt")
emitter = CodeEmitter(em)
elt_repr_func = Variable(self.elt_type, "%gone_elt", 1, False).getattr(emitter, "__repr__", clsonly=True)
repr_r = elt_repr_func.call(emitter, [])
elt_repr = emitter.get_llvm()
evaluated = eval_template("list", em, {
'n':self.name,
'in_':self.iter_type.name,
'e':self.elt_type.llvm_type(),
'elt_repr':elt_repr,
'repr_r':repr_r.v,
'initial_size':initial_size,
'elt_incref':elt_incref,
'elt_decref':elt_decref,
'DEBUG_CHECKS':' ' if DEBUG_CHECKS else ';',
'NO_DEBUG_CHECKS':' ' if not DEBUG_CHECKS else ';',
'alloc_name':em.get_str_ptr(self.name),
})
em.llvm_tail.write(convert_none_to_void_ll(evaluated))
ctemplate = eval_ctemplate("list", em, {
'et':self.elt_type,
'n':self.name,
'in_':self.iter_type.name,
})
em.c_head.write(ctemplate)
else:
raise Exception(stage)
def llvm_type(self):
return "%%%s*" % (self.name,)
def get_ctor_name(self):
return "@%s_ctor" % (self.name,)
def _can_convert_to(self, t):
if isinstance(t, ListMT):
return self.elt_type.can_convert_to(t.elt_type)
return False
def __make_conversion(self, em, t):
assert isinstance(t, ListMT)
em2 = CodeEmitter(em)
func_name = "@" + em.mkname("_convert_list")
em2.pl("define %s %s(%s %%l) {" % (t.llvm_type(), func_name, self.llvm_type()))
em2.pl("start:")
starting_block = "start"
isnull_name = "%" + em2.mkname()
idx_name = "%" + em2.mkname()
next_idx_name = "%" + em2.mkname()
done_name = "%" + em2.mkname()
newlist_name = "%" + em2.mkname()
rtn_name = "%" + em2.mkname()
start_label = em2.mkname(prefix="label")
cond_label = em2.mkname(prefix="label")
loop_label = em2.mkname(prefix="label")
back_label = em2.mkname(prefix="backedge")
done_label = em2.mkname(prefix="label")
em2.indent(4)
em2.pl("; Starting conversion from %s to %s" % (self.llvm_type(), t.llvm_type()))
em2.pl("%s = icmp eq %s %%l, null" % (isnull_name, self.llvm_type()))
em2.pl("br i1 %s, label %%%s, label %%%s" % (isnull_name, done_label, start_label))
em2.indent(-4)
em2.pl()
em2.pl("%s:" % (start_label,))
em2.indent(4)
em2.blockname = start_label
var = Variable(self, "%l", 1, False)
len_v = var.getattr(em2, "__len__", clsonly=True).call(em2, [])
del var
ctor_name = t.get_ctor_name()
em2.pl("%s = call %s %s()" % (newlist_name, t.llvm_type(), ctor_name))
rtn = Variable(t, newlist_name, 1, True)
em2.pl("br label %%%s" % (cond_label,))
em2.indent(-4)
em2.pl()
em2.pl("%s:" % (cond_label,))
em2.indent(4)
em2.blockname = cond_label
em2.pl("%s = phi i64 [0, %%%s], [%s, %%%s]" % (idx_name, start_label, next_idx_name, back_label))
em2.pl("%s = icmp sge i64 %s, %s" % (done_name, idx_name, len_v.v))
em2.pl("br i1 %s, label %%%s, label %%%s" % (done_name, done_label, loop_label))
em2.indent(-4)
em2.pl()
em2.pl("%s:" % (loop_label,))
em2.indent(4)
em2.blockname = loop_label
var = Variable(self, "%l", 1, False)
gotten = var.getattr(em2, "__getitem__", clsonly=True).call(em2, [Variable(Int, idx_name, 1, False)])
del var
gotten = gotten.convert_to(em2, t.elt_type)
rtn.incvref(em2) # for the following call
rtn.getattr(em2, "append").call(em2, [gotten])
em2.pl("%s = add i64 %s, 1" % (next_idx_name, idx_name))
em2.pl("br label %%%s" % (back_label,))
# We use this trampoline to give the backedge a predictable name,
# so that we can generate the phi instruction beforehand
em2.indent(-4)
em2.pl()
em2.pl("%s:" % (back_label,))
em2.indent(4)
em2.blockname = back_label
em2.pl("br label %%%s" % (cond_label,))
em2.indent(-4)
em2.pl()
em2.pl("%s:" % (done_label,))
em2.indent(4)
em2.blockname = done_label
em2.pl("%s = phi %s [null, %%%s], [%s, %%%s]" % (rtn_name, t.llvm_type(), starting_block, newlist_name, cond_label))
em2.pl("; Done with conversion from %s to %s" % (self.llvm_type(), t.llvm_type()))
em2.pl("ret %s %s" % (t.llvm_type(), rtn_name))
em2.indent(-4)
em2.pl("}")
em.llvm_tail.write(em2.get_llvm() + '\n')
return Variable(UnboxedFunctionMT(em, None, [self], t), (func_name, [], None), 1, False)
# return Variable(t, rtn_name, 1, True)
def _convert_to(self, em, var, t):
if isinstance(t, BoxedMT):
return t.convert_from(em, var)
if not isinstance(t, ListMT) or not self.elt_type.can_convert_to(t.elt_type):
raise UserTypeError(t)
# TODO don't write out a new function if we already did for this type of conversion
f = self.__make_conversion(em, t)
return f.call(em, [var])
@staticmethod
def get_name(elt_type):
return "list_%s" % elt_type.llvm_type().replace('%', '').replace('*', '')
__lists = {}
@staticmethod
def make_list(elt_type):
assert isinstance(elt_type, MT)
if elt_type not in ListMT.__lists:
ListMT.__lists[elt_type] = ListMT(elt_type)
return ListMT.__lists[elt_type]
class ListIteratorMT(MT):
def __init__(self, lst):
super(ListMT.ListIteratorMT, self).__init__()
assert isinstance(lst, ListMT)
self._lst = lst
assert lst.name.startswith("list")
self.name = "listiterator" + lst.name[4:]
self.class_methods = {
"__decref__": Variable(UnboxedFunctionMT(None, None, [self], None_), ("@%s_decref" % (self.name,), [], None), 1, False),
"hasnext": Variable(UnboxedFunctionMT(None, None, [self], Bool), ("@%s_hasnext" % (self.name,), [], None), 1, False),
"__incref__": Variable(UnboxedFunctionMT(None, None, [self], None_), ("@%s_incref" % (self.name,), [], None), 1, False),
"next": Variable(UnboxedFunctionMT(None, None, [self], self._lst.elt_type), ("@%s_next" % (self.name,), [], None), 1, False),
}
self.typeobj_name = "@%s_typeobj" % self.name
self.initialized = ("attrs", "write")
def llvm_type(self):
return "%%%s*" % (self.name)
class SetMT(MT):
def __init__(self, elt_type):
super(SetMT, self).__init__()
assert isinstance(elt_type, MT)
self.elt_type = elt_type
self.name = SetMT.get_name(elt_type)
self.iter_type = SetMT.SetIteratorMT(self)
def _initialize(self, em, stage):
if stage == "attrs":
self.class_methods = {
"add": Variable(UnboxedFunctionMT(em, None, [self, self.elt_type], None_), ("@%s_add" % (self.name,), [], None), 1, False),
"__contains__": Variable(UnboxedFunctionMT(em, None, [self, self.elt_type], Bool), ("@%s_contains" % (self.name,), [], None), 1, False),
"__decref__": Variable(UnboxedFunctionMT(em, None, [self], None_), ("@%s_decref" % (self.name,), [], None), 1, False),
"__eq__": Variable(UnboxedFunctionMT(em, None, [self, self], Bool), ("@%s_eq" % (self.name,), [], None), 1, False),
"__incref__": Variable(UnboxedFunctionMT(em, None, [self], None_), ("@%s_incref" % (self.name,), [], None), 1, False),
"__iter__": Variable(UnboxedFunctionMT(em, None, [self], self.iter_type), ("@%s_iter" % (self.name,), [], None), 1, False),
"__len__": Variable(UnboxedFunctionMT(em, None, [self], Int), ("@%s_len" % (self.name,), [], None), 1, False),
"__nonzero__": Variable(UnboxedFunctionMT(em, None, [self], Bool), ("@%s_nonzero" % (self.name,), [], None), 1, False),
"__repr__": Variable(UnboxedFunctionMT(em, None, [self], Str), ("@%s_repr" % (self.name,), [], None), 1, False),
}
self.class_methods["__str__"] = self.class_methods["__repr__"]
self.typeobj_name = "@%s_typeobj" % self.name
elif stage == "write":
name = SetMT.get_name(self.elt_type)
emitter = CodeEmitter(em)
evaluated = eval_template("set", emitter, {
'n':name,
'elt_type':self.elt_type,
'e':self.elt_type.llvm_type(),
'iter_name':self.iter_type.name,
})
em.llvm_tail.write(convert_none_to_void_ll(evaluated))
ctemplate = eval_ctemplate("set", em, {
'et':self.elt_type,
'n':name,
'i':self.iter_type.name,
})
em.c_head.write(ctemplate)
else:
raise Exception(stage)
def __repr__(self):
return "<SetMT %r>" % (self.elt_type,)
@staticmethod
def get_name(elt_type):
return "set_%s" % elt_type.llvm_type().replace('%', '').replace('*', '')
def llvm_type(self):
return "%%%s*" % (self.name,)
def get_ctor_name(self):
return "@%s_ctor" % (self.name,)
__sets = {}
@staticmethod
def make_set(elt_type):
assert isinstance(elt_type, MT)
if elt_type not in SetMT.__sets:
SetMT.__sets[elt_type] = SetMT(elt_type)
return SetMT.__sets[elt_type]
class SetIteratorMT(MT):
def __init__(self, st):
super(SetMT.SetIteratorMT, self).__init__()
assert isinstance(st, SetMT)
self._st = st
assert st.name.startswith("set")
self.name = "setiterator" + st.name[4:]
self.class_methods = {
"__decref__": Variable(UnboxedFunctionMT(None, None, [self], None_), ("@%s_decref" % (self.name,), [], None), 1, False),
"hasnext": Variable(UnboxedFunctionMT(None, None, [self], Bool), ("@%s_hasnext" % (self.name,), [], None), 1, False),
"__incref__": Variable(UnboxedFunctionMT(None, None, [self], None_), ("@%s_incref" % (self.name,), [], None), 1, False),
"next": Variable(UnboxedFunctionMT(None, None, [self], self._st.elt_type), ("@%s_next" % (self.name,), [], None), 1, False),
}
self.initialized = ("attrs", "write")
def llvm_type(self):
return "%%%s*" % (self.name)
class DequeMT(MT):
def __init__(self, elt_type):
super(DequeMT, self).__init__()
assert isinstance(elt_type, MT)
self.elt_type = elt_type
self.name = DequeMT.get_name(elt_type)
self.iter_type = DequeMT.DequeIteratorMT(self)
def _initialize(self, em, stage):
if stage == "attrs":
self.class_methods = {
"append": Variable(UnboxedFunctionMT(em, None, [self, self.elt_type], None_), ("@%s_append" % (self.name,), [], None), 1, False),
"appendleft": Variable(UnboxedFunctionMT(em, None, [self, self.elt_type], None_), ("@%s_appendleft" % (self.name,), [], None), 1, False),
"__contains__": Variable(UnboxedFunctionMT(em, None, [self, self.elt_type], Bool), ("@%s_contains" % (self.name,), [], None), 1, False),
"__decref__": Variable(UnboxedFunctionMT(em, None, [self], None_), ("@%s_decref" % (self.name,), [], None), 1, False),
"__eq__": Variable(UnboxedFunctionMT(em, None, [self, self], Bool), ("@%s_eq" % (self.name,), [], None), 1, False),
"__incref__": Variable(UnboxedFunctionMT(em, None, [self], None_), ("@%s_incref" % (self.name,), [], None), 1, False),
"__iter__": Variable(UnboxedFunctionMT(em, None, [self], self.iter_type), ("@%s_iter" % (self.name,), [], None), 1, False),
"__len__": Variable(UnboxedFunctionMT(em, None, [self], Int), ("@%s_len" % (self.name,), [], None), 1, False),
"__nonzero__": Variable(UnboxedFunctionMT(em, None, [self], Bool), ("@%s_nonzero" % (self.name,), [], None), 1, False),
"pop": Variable(UnboxedFunctionMT(em, None, [self], self.elt_type), ("@%s_pop" % (self.name,), [], None), 1, False),
"popleft": Variable(UnboxedFunctionMT(em, None, [self], self.elt_type), ("@%s_popleft" % (self.name,), [], None), 1, False),
"__repr__": Variable(UnboxedFunctionMT(em, None, [self], Str), ("@%s_repr" % (self.name,), [], None), 1, False),
}
self.class_methods["__str__"] = self.class_methods["__repr__"]
self.typeobj_name = "@%s_typeobj" % self.name
elif stage == "write":
name = DequeMT.get_name(self.elt_type)
emitter = CodeEmitter(em)
evaluated = eval_template("deque", emitter, {
'n':self.name,
'elt_type':self.elt_type,
'e':self.elt_type.llvm_type(),
'iter_name':self.iter_type.name,
})
em.llvm_tail.write(convert_none_to_void_ll(evaluated))
ctemplate = eval_ctemplate("deque", em, {
'et':self.elt_type,
'n':name,
'i':self.iter_type.name,
})
em.c_head.write(ctemplate)
else:
raise Exception(stage)
@staticmethod
def get_name(elt_type):
return "deque_%s" % elt_type.llvm_type().replace('%', '').replace('*', '')
def llvm_type(self):
return "%%%s*" % (self.name,)
def get_ctor_name(self):
return "@%s_ctor" % (self.name,)
__deques = {}
@staticmethod
def make_deque(elt_type):
assert isinstance(elt_type, MT)
if elt_type not in DequeMT.__deques:
DequeMT.__deques[elt_type] = DequeMT(elt_type)
return DequeMT.__deques[elt_type]
class DequeIteratorMT(MT):
def __init__(self, st):
super(DequeMT.DequeIteratorMT, self).__init__()
assert isinstance(st, DequeMT)
self._st = st
assert st.name.startswith("deque")
self.name = "dequeiterator" + st.name[4:]
self.class_methods = {
"__decref__": Variable(UnboxedFunctionMT(None, None, [self], None_), ("@%s_decref" % (self.name,), [], None), 1, False),
"hasnext": Variable(UnboxedFunctionMT(None, None, [self], Bool), ("@%s_hasnext" % (self.name,), [], None), 1, False),
"__incref__": Variable(UnboxedFunctionMT(None, None, [self], None_), ("@%s_incref" % (self.name,), [], None), 1, False),
"next": Variable(UnboxedFunctionMT(None, None, [self], self._st.elt_type), ("@%s_next" % (self.name,), [], None), 1, False),
}
self.initialized = ("attrs", "write")
def llvm_type(self):
return "%%%s*" % (self.name)
class Parametric1ArgCtorFuncMT(_SpecialFuncMT):
def __init__(self, make_type, add_func_name):
super(Parametric1ArgCtorFuncMT, self).__init__()
self._make_type = make_type
self._add_func_name = add_func_name
self.__made = {}
def _get_converter(self, em, iter_type):
if iter_type not in self.__made:
next_type = iter_type.get_attr_types()['next'][0]
elt_type = next_type.get_instantiated().rtn_type
rtn_type = self._make_type(elt_type)
rtn_type.initialize(em, "write")
func_name = "@" + em.mkname(prefix="%s_convert" % (rtn_type.name,))
evaluated = eval_template("list_convert", em, {
"iter_type":iter_type,
"elt_type":elt_type,
"rtn_type":rtn_type,
"func_name":func_name,
"add_func_name":self._add_func_name,
})
em.llvm_tail.write(evaluated)
self.__made[iter_type] = Variable(UnboxedFunctionMT(em, None, [iter_type], rtn_type), (func_name, [], None), 1, False)
return self.__made[iter_type]
def call(self, em, v, args, expected_type=None):
if len(args) == 1:
arg, = args
iter = arg.getattr(em, "__iter__").call(em, [])
f = self._get_converter(em, iter.t)
f.incvref(em)
return f.call(em, [iter])
elif not args:
assert isinstance(expected_type, MT), repr(expected_type)
name = "%" + em.mkname()
em.pl("%s = call %s %s()" % (name, expected_type.llvm_type(), expected_type.get_ctor_name()))
return Variable(expected_type, name, 1, True)
else:
raise Exception(len(args))
ListFunc = Parametric1ArgCtorFuncMT(ListMT.make_list, "append")
SetFunc = Parametric1ArgCtorFuncMT(SetMT.make_set, "add")
DequeFunc = Parametric1ArgCtorFuncMT(DequeMT.make_deque, "append")
class DictMT(MT):
def __init__(self, key_type, value_type):
super(DictMT, self).__init__()
self.key_type = key_type
self.value_type = value_type
self.item_type = TupleMT.make_tuple([self.key_type, self.value_type])
self.name = DictMT.get_name(key_type, value_type)
self.key_iter_type = DictMT.DictIteratorMT(self, "key", self.key_type)
self.value_iter_type = DictMT.DictIteratorMT(self, "value", self.value_type)
self.item_iter_type = DictMT.DictIteratorMT(self, "item", self.item_type)
def _initialize(self, em, stage):
if stage == "attrs":
self.class_methods = {
"__contains__": Variable(UnboxedFunctionMT(em, None, [self, self.key_type], Bool), ("@%s_contains" % (self.name,), [], None), 1, False),
"__decref__": Variable(UnboxedFunctionMT(None, None, [self], None_), ("@%s_decref" % (self.name,), [], None), 1, False),
"__eq__": Variable(UnboxedFunctionMT(None, None, [self, self], Bool), ("@%s_eq" % (self.name,), [], None), 1, False),
"get": Variable(UnboxedFunctionMT(None, None, [self, self.key_type, self.value_type], self.value_type), ("@%s_get" % self.name, [], None), 1, False),
"__getitem__": Variable(UnboxedFunctionMT(em, None, [self, self.key_type], self.value_type), ("@%s_getitem" % (self.name,), [], None), 1, False),
"__incref__": Variable(UnboxedFunctionMT(None, None, [self], None_), ("@%s_incref" % (self.name,), [], None), 1, False),
"items": Variable(UnboxedFunctionMT(em, None, [self], ListMT.make_list(self.item_type)), ("@%s_items" % (self.name,), [], None), 1, False),
"__iter__": Variable(UnboxedFunctionMT(em, None, [self], self.key_iter_type), ("@%s_iter" % (self.name,), [], None), 1, False),
"iteritems": Variable(UnboxedFunctionMT(em, None, [self], self.item_iter_type), ("@%s_iteritems" % (self.name,), [], None), 1, False),
"itervalues": Variable(UnboxedFunctionMT(em, None, [self], self.value_iter_type), ("@%s_itervalues" % (self.name,), [], None), 1, False),
"__iter__": Variable(UnboxedFunctionMT(em, None, [self], self.key_iter_type), ("@%s_iter" % (self.name,), [], None), 1, False),
"__len__": Variable(UnboxedFunctionMT(em, None, [self], Int), ("@%s_len" % (self.name,), [], None), 1, False),
"__nonzero__": Variable(UnboxedFunctionMT(em, None, [self], Bool), ("@%s_nonzero" % (self.name,), [], None), 1, False),
"__repr__": Variable(UnboxedFunctionMT(em, None, [self], Str), ("@%s_repr" % (self.name,), [], None), 1, False),
"setdefault": Variable(UnboxedFunctionMT(em, None, [self, self.key_type, self.value_type], self.value_type), ("@%s_setdefault" % (self.name,), [], None), 1, False),
"__setitem__": Variable(UnboxedFunctionMT(em, None, [self, self.key_type, self.value_type], None_), ("@%s_set" % (self.name,), [], None), 1, False),
"values": Variable(UnboxedFunctionMT(em, None, [self], ListMT.make_list(self.value_type)), ("@%s_values" % (self.name,), [], None), 1, False),
}
self.class_methods["__str__"] = self.class_methods["__repr__"]
self.class_methods["iterkeys"] = self.class_methods["__iter__"]
self.typeobj_name = "@%s_typeobj" % self.name
elif stage == "write":
ListMT.make_list(self.key_type).initialize(em, "write")
ListMT.make_list(self.value_type).initialize(em, "write")
ListMT.make_list(self.item_type).initialize(em, "write")
self.key_type.initialize(em, "write")
self.value_type.initialize(em, "write")
self.item_type.initialize(em, "write")
template = eval_ctemplate("dict", em, {
't':self,
'n':self.name,
})
em.c_head.write(template)
evaluated = eval_template("dict", em, {
'n':self.name,
'k':self.key_type.llvm_type(),
'v':self.value_type.llvm_type(),
'i':self.item_type.llvm_type(),
'lv':ListMT.make_list(self.value_type).llvm_type(),
'li':ListMT.make_list(self.item_type).llvm_type(),
'key_iter_name':self.key_iter_type.name,
'key_iter':self.key_iter_type.llvm_type(),
'value_iter_name':self.value_iter_type.name,
'value_iter':self.value_iter_type.llvm_type(),
'item_iter_name':self.item_iter_type.name,
'item_iter':self.item_iter_type.llvm_type(),
})
em.llvm_tail.write(convert_none_to_void_ll(evaluated))
else:
raise Exception(stage)
def llvm_type(self):
return "%%%s*" % (self.name,)
def get_ctor_name(self):
return "@%s_ctor" % (self.name,)
@staticmethod
def get_name(key_type, value_type):
return "dict_%s_%s" % (key_type.llvm_type().replace('%', '').replace('*', ''), value_type.llvm_type().replace('%', '').replace('*', ''))
__made_dicts = {}
@staticmethod
def make_dict(key_type, value_type):
assert isinstance(key_type, MT)
assert isinstance(value_type, MT)
mem_key = (key_type, value_type)
if mem_key not in DictMT.__made_dicts:
d = DictMT(key_type, value_type)
DictMT.__made_dicts[mem_key] = d
return DictMT.__made_dicts[mem_key]
class DictIteratorMT(MT):
def __init__(self, d, type_name, rtn_type):
super(DictMT.DictIteratorMT, self).__init__()
assert isinstance(d, DictMT)
self._d = d
assert d.name.startswith("dict")
self.name = "dict%siterator%s" % (type_name, d.name[4:])
self.class_methods = {
"__decref__": Variable(UnboxedFunctionMT(None, None, [self], None_), ("@%s_decref" % (self.name,), [], None), 1, False),
"hasnext": Variable(UnboxedFunctionMT(None, None, [self], Bool), ("@%s_hasnext" % (self.name,), [], None), 1, False),
"__incref__": Variable(UnboxedFunctionMT(None, None, [self], None_), ("@%s_incref" % (self.name,), [], None), 1, False),
"__iter__": Variable(UnboxedFunctionMT(None, None, [self], self), ("@%s_iter" % (self.name,), [], None), 1, False),
"next": Variable(UnboxedFunctionMT(None, None, [self], rtn_type), ("@%s_next" % (self.name,), [], None), 1, False),
}
self.typeobj_name = "@%s_typeobj" % self.name
self.initialized = ("attrs", "write")
def llvm_type(self):
return "%%%s*" % (self.name)
class DictFuncMT(_SpecialFuncMT):
__made = {}
@staticmethod
def get_converter(em, iter_type):
if iter_type not in DictFuncMT.__made:
next_type = iter_type.get_attr_types()['next'][0]
elt_type = next_type.get_instantiated().rtn_type
assert isinstance(elt_type, TupleMT), elt_type
assert len(elt_type.elt_types) == 2
key_type, value_type = elt_type.elt_types
rtn_type = DictMT.make_dict(key_type, value_type)
rtn_type.initialize(em, "write")
func_name = "@" + em.mkname(prefix="%s_convert" % (rtn_type.name,))
evaluated = eval_template("dict_convert", em, {
"iter_type":iter_type,
"elt_type":elt_type,
"key_type":key_type,
"value_type":value_type,
"rtn_type":rtn_type,
"func_name":func_name,
})
em.llvm_tail.write(evaluated)
DictFuncMT.__made[iter_type] = Variable(UnboxedFunctionMT(em, None, [iter_type], rtn_type), (func_name, [], None), 1, False)
return DictFuncMT.__made[iter_type]
def call(self, em, v, args, expected_type=None):
assert len(args) == 1
arg, = args
if isinstance(arg.t, DictMT):
iter = arg.getattr(em, "iteritems").call(em, [])
else:
iter = arg.getattr(em, "__iter__").call(em, [])
f = DictFuncMT.get_converter(em, iter.t)
f.incvref(em)
return f.call(em, [iter])
DictFunc = singleton(DictFuncMT)
class _StructWriter(object):
def __init__(self, em, type_name, fields, constants, dealloc_maker=None, function_types=None):
self.type_name = type_name
self.fields = fields
self.constants = constants
self._function_types = function_types or {}
self._function_placeholders = {}
assert all(isinstance(v, UnboxedFunctionMT) for v in self._function_types.values())
self.dealloc_maker = dealloc_maker or _StructWriter.make_struct_dealloc
if not type_name:
assert not fields
return
def write(self, em):
if not self.type_name:
assert not self.fields
return
field_positions = {}
for i, (n, t) in enumerate(self.fields):
t.initialize(em, "attrs")
field_positions[n] = (i+1, t)
dealloc = self.dealloc_maker(em, self.type_name, field_positions)
em.llvm_tail.write(eval_template("struct", em, {
'n':self.type_name,
'type_str':", ".join(['i64'] + [type.llvm_type() for (name, type) in self.fields]),
'DEBUG_CHECKS':' ' if DEBUG_CHECKS else ';',
'NO_DEBUG_CHECKS':' ' if not DEBUG_CHECKS else ';',
'dealloc':dealloc,
'alloc_name':em.get_str_ptr(self.type_name),
}))
@staticmethod
def make_struct_dealloc(em, type_name, field_positions):
deallocs = []
for n, (i, t) in sorted(field_positions.items()):
ptr_name = "%" + em.mkname()
name = "%" + em.mkname()
d = t.decref_llvm(em, name)
if d is None:
continue
deallocs.append("%s = getelementptr %%%s* %%self, i64 0, i32 %d\n %s = load %s* %s\n %s" % (ptr_name, type_name, i, name, t.llvm_type(), ptr_name, d))
dealloc = '\n'.join(s for s in deallocs if s)
return dealloc
def get(self, em, v, name, skip_incref=False):
if name in self.constants:
if name in self._function_types and self.constants[name] is None:
assert name not in self._function_placeholders
placeholder = em.get_placeholder()
self.constants[name] = Variable(self._function_types[name], (placeholder, [], None), 1, False)
self._function_placeholders[name] = placeholder
del self._function_types[name]
assert self.constants[name], name
self.constants[name].incvref(em)
return self.constants[name]
# return self.constants[name].dup({})
assert isinstance(v, str), v
offset = 1
for i, (n, t) in enumerate(self.fields):
if n == name:
assert v
assert em
p_name = '%' + em.mkname()
rtn_name = '%' + em.mkname()
em.pl("%s = getelementptr inbounds %%%s* %s, i64 0, i32 %d" % (p_name, self.type_name, v, i + offset))
em.pl("%s = load %s* %s" % (rtn_name, t.llvm_type(), p_name))
em.pc("#error unimplemented 13")
if not skip_incref:
inc = t.incref_llvm(em, rtn_name)
if inc:
em.pl(inc + " ; struct get")
marked = not skip_incref
return Variable(t, rtn_name, 1, marked)
raise UserAttributeError("struct doesn't have field %r" % (name,))
def set(self, em, v, name, val, skip_decref_prev=False, skip_incref=False):
assert v is None or isinstance(v, str), v
if name in self.constants:
if self.constants[name] is not None:
orig = self.constants[name]
if isinstance(val.t, (UserModuleMT, ClassMT, ModuleMT)):
assert val.t is orig.t
elif isinstance(val.t, UnboxedFunctionMT):
assert name in self._function_placeholders
assert name not in self._function_types, "should have been deleted by the get that created the placeholder"
em.register_replacement(self._function_placeholders[name], val.v[0])
del self._function_placeholders[name]
else:
raise Exception(name, orig.t)
self.constants[name] = val
return
assert is_emitter(em) and em, em
assert v
offset = 1
for i, (n, t) in enumerate(self.fields):
if n == name:
p_name = '%' + em.mkname()
em.pl("%s = getelementptr inbounds %%%s* %s, i64 0, i32 %d" % (p_name, self.type_name, v, i + offset))
val = val.convert_to(em, t)
assert val.t == t
if not skip_decref_prev:
old_val = '%' + em.mkname()
d = t.decref_llvm(em, old_val)
if d:
em.pl("%s = load %s* %s" % (old_val, t.llvm_type(), p_name))
em.pl(d)
em.pl("store %s %s, %s* %s" % (t.llvm_type(), val.v, t.llvm_type(), p_name))
if not skip_incref:
em.pl(t.incref_llvm(em, val.v))
val.decvref(em)
em.pc("#error unimplemented 14")
return
raise UserAttributeError("struct doesn't have field %r" % (name,))
def has(self, name):
return self.has_constant(name) or any(name == n for (n, t) in self.fields)
def has_constant(self, name):
return name in self.constants
def alloc(self, em):
name = "%" + em.mkname()
em.pl("%s = call %%%s* @%s_alloc()" % (name, self.type_name, self.type_name))
em.pc("#error unimplemented 15")
return name
class ClosureMT(MT):
PARENT_FIELD_NAME = " __parent__"
def __init__(self, em, name, parent_type, parent_obj, cells, functions, classes, modules):
super(ClosureMT, self).__init__()
assert (parent_type is None) or isinstance(parent_type, ClosureMT)
assert isinstance(parent_obj, bool)
if not parent_type:
assert not parent_obj
self.inlined = not cells
if self.inlined:
# Just to make sure the name is invalid and we don't try to use it:
assert name == ''
if parent_obj:
# Ensure that chains of just inlined closures dont get objects
assert (not parent_type.inlined) or (parent_type.inlined and parent_type.parent_obj)
self.name = name
self.parent_type = parent_type # ClosureMT of the parent (or None)
self.parent_obj = parent_obj # Whether this closure has a reference to the parent object
# self.cells = cells # list of [(name, type)]
# self.functions = functions # mapping of name -> UnboxedFunctionMT
# self.classes = classes # mapping of name -> ClassMT
# self.modules = modules # mapping of name -> ModuleMT
struct_fields = cells
if cells and parent_obj:
struct_fields.insert(0, (ClosureMT.PARENT_FIELD_NAME, parent_type.instantiated_type()))
assert not set(functions).intersection(classes)
assert not set(classes).intersection(modules)
assert not set(modules).intersection(functions)
constants = {}
constants.update(functions)
constants.update(classes)
constants.update(modules)
self._struct = _StructWriter(em, name, struct_fields, constants)
self._struct.write(em)
self.initialized = ("attrs", "write")
def instantiated_type(self):
if self.inlined:
return self.parent_type.instantiated_type()
return self
def llvm_type(self):
if self.inlined:
assert self.parent_type, "shouldnt want the llvm type of this because it's not going to be instantiated"
return self.parent_type.llvm_type()
return "%%%s*" % (self.name,)
def dup(self, v, dup_cache):
assert v is None or isinstance(v, str)
return v
def alloc(self, em, parent=None):
assert (parent is None) == (not self.parent_obj), (parent, self.parent_obj)
if self.inlined:
if parent:
r = parent.split(em)
r.t = self
return r
else:
return Variable(self, None, 1, False)
if parent:
assert parent.t == self.parent_type
name = self._struct.alloc(em)
assert isinstance(name, str)
if parent:
self._struct.set(em, name, ClosureMT.PARENT_FIELD_NAME, parent, skip_decref_prev=True)
return Variable(self, name, 1, True)
def getattr(self, em, v, attr, clsonly):
first_real = self
while first_real.inlined:
first_real = first_real.parent_type
assert first_real
if attr == "__incref__":
return UnboxedInstanceMethod.make(em, v, Variable(UnboxedFunctionMT(em, None, [self], None_), ("@%s_incref" % first_real.name, [], None), 1, False))
if attr == "__decref__":
return UnboxedInstanceMethod.make(em, v, Variable(UnboxedFunctionMT(em, None, [self], None_), ("@%s_decref" % first_real.name, [], None), 1, False))
raise Exception("couldn't find '%s' attribute on %s" % (attr, self))
def has(self, name, include_parents=False):
if self._struct.has(name):
return True
if not include_parents:
return False
if self.parent_type:
if self.parent_obj:
return self.parent_type.has(name)
else:
return self.parent_type.has_constant(name)
return False
def has_constant(self, name, include_parents=False):
if self._struct.has_constant(name):
return True
if not include_parents:
return False
return self.parent_type and self.parent_type.has_constant(name)
def get(self, em, v, name):
assert isinstance(name, str)
assert (v is None) or isinstance(v, str)
if self._struct.has_constant(name):
return self._struct.get(em, None, name)
if v is None:
return self.parent_type.get(None, None, name)
if self._struct.has(name):
return self._struct.get(em, v, name)
assert self.parent_type
if self.inlined:
return self.parent_type.get(em, v, name)
if self.parent_obj:
parent = self._struct.get(em, v, ClosureMT.PARENT_FIELD_NAME)
r = self.parent_type.get(em, parent.v, name)
parent.decvref(em)
return r
else:
return self.parent_type.get(None, None, name)
def set(self, em, v, name, val):
assert isinstance(val, Variable)
self._struct.set(em, v, name, val)
def _can_convert_to(self, t):
return False
@staticmethod
def create(em, node, parent_type, parent_obj, closure_info, type_info):
assert is_emitter(em)
assert (parent_type is None) or (isinstance(parent_type, ClosureMT)), parent_type
assert isinstance(closure_info, closure_analyzer.ClosureResults)
assert isinstance(parent_obj, bool)
if parent_obj:
assert parent_type
cells = []
for name in closure_info.used_in_nested:
try:
cells.append((name, type_info.get_scope_type(em, node, name)))
except:
print "failed on", name
raise
# Probably don't need to always create the global closure?
# if not cells and not isinstance(node, _ast.Module):
if not cells:
return ClosureMT(em, '', parent_type, parent_obj, cells,
dict([(n, None) for n in closure_info.functions]),
dict([(n, None) for n in closure_info.classes]),
dict([(n, None) for n in closure_info.modules]))
type_name = em.mkname(prefix="closure")
return ClosureMT(
em,
type_name,
parent_type,
parent_obj,
cells,
dict([(n, None) for n in closure_info.functions]),
dict([(n, None) for n in closure_info.classes]),
dict([(n, None) for n in closure_info.modules]),
)
class ClassMT(MT):
def __init__(self, base, type_name, displayname, llvm_type=None):
super(ClassMT, self).__init__()
if type_name != "object":
assert base
assert isinstance(base, ClassMT), base
self.base = base
self._name = type_name
self._displayname = displayname
if llvm_type is None:
llvm_type = "%%%s*" % type_name
self._llvm_type = llvm_type
self._ctor = None
self._instance = InstanceMT(self)
self._clsattr_types = {}
self._clsattrs = {}
self._instattr_types = []
def _initialize(self, em, stage):
if stage == "attrs":
pass
elif stage == "write":
type_string = ", ".join(['i64'] + [type.llvm_type() for (name, type) in self._instattr_types])
decref_all = ""
for i in xrange(len(self._instattr_types)):
attr_type = self._instattr_types[i][1]
attr_type.initialize(em, "attrs")
d = attr_type.decref_llvm(em, "%%attr%d" % (i,))
if not d:
continue
decref_all += "%%ptr%d = getelementptr inbounds %s %%self, i64 0, i32 %d\n" % (i, self._instance.llvm_type(), i + 1)
decref_all += "%%attr%d = load %s* %%ptr%d\n" % (i, attr_type.llvm_type(), i)
decref_all += d + '\n'
decref_all = decref_all.replace('\n', '\n ')
if self._ctor:
# TODO Hacky that it still writes out the __new__ function but doesn't use it
new_args = ""
init = ""
elif not self.has_classattr("__init__"):
new_args = ""
init = ""
self._ctor = Variable(UnboxedFunctionMT(None, None, [], self._instance), ("@%s_new" % self._name, [], None), 1, False)
else:
init_fn = self.getattr(em, Variable(self, (), 1, False), "__init__", False)
init_fn.t.initialize(em, "write")
init_fn.incvref(em)
new_em = CodeEmitter(em)
args = [Variable(self._instance, "%rtn", 1, False)]
for i, a in enumerate(init_fn.t.arg_types[1:]):
args.append(Variable(a, "%%v%d" % i, 1, False))
init_fn.call(new_em, args)
init = new_em.get_llvm()
new_t = UnboxedFunctionMT(None, None, init_fn.t.arg_types[1:], self._instance, ndefaults=init_fn.t.ndefaults)
assert isinstance(init_fn.t, UnboxedFunctionMT)
defaults = init_fn.v[1]
self._ctor = Variable(new_t, ("@%s_new" % self._name, defaults, None), 1, False)
new_args = ", ".join(["%s %%v%d" % (a.llvm_type(), i) for i, a in enumerate(init_fn.t.arg_types[1:])])
strname_ptr = em.get_str_ptr(self._name)
strname_str = "@" + em.mkname("str")
em.llvm_tail.write("%s = global %%string {i64 1, i64 %d, i8* %s, [0 x i8] zeroinitializer}\n" % (strname_str, len(self._name), strname_ptr))
typeobj = "{i64 1, %%string* %s, %%type* null}" % (strname_str,)
em.llvm_tail.write(eval_template("instance", em, {
'n':self._name,
'type_str':type_string,
'DEBUG_CHECKS':' ' if DEBUG_CHECKS else ';',
'NO_DEBUG_CHECKS':' ' if not DEBUG_CHECKS else ';',
'decref_all':decref_all,
'new_args':new_args,
'init':init,
'alloc_name':em.get_str_ptr(self._name),
'typeobj': typeobj,
'displayname': self._displayname,
}))
em.c_head.write(eval_ctemplate("instance", em, {'n':self._name}))
else:
raise Exception(stage)
def get_typeobj(self, em):
return Variable(Type, "@%s_typeobj" % self._name, 1, False)
def set_clsattr_type(self, name, t):
assert not self.initialized
assert name not in ("__nrefs__", "__incref__", "__decref__")
self._clsattr_types[name] = t
def setattr(self, em, v, attr, val):
assert not v
assert isinstance(val.t, UnboxedFunctionMT)
self.set_clsattr_value(attr, val, em=em)
def set_clsattr_value(self, name, v, _init=False, em=None, force=False):
if not force:
assert name not in ("__nrefs__", "__incref__", "__decref__")
assert isinstance(v.t, (UnboxedFunctionMT, PolymorphicFunctionMT))
assert isinstance(v.v, tuple), (v.v, "probably not a constant and we don't allocate actual storage for class attrs")
assert _init or em
if _init or (em and name in self._clsattr_types and name not in self._clsattrs):
if _init:
assert name not in self._clsattr_types
assert name not in self._clsattrs
self._clsattr_types[name] = v.t
self._clsattrs[name] = v.dup({})
else:
assert name in self._clsattr_types
assert name in self._clsattrs
placeholder = self._clsattrs[name]
assert v.t.can_convert_to(self._clsattr_types[name]), (v.t, self._clsattr_types[name])
assert isinstance(v.t, UnboxedFunctionMT)
placeholder_defaults = placeholder.v[1]
new_defaults = v.v[1]
assert len(new_defaults) == len(placeholder_defaults)
for i in xrange(len(placeholder_defaults)):
assert new_defaults[i].t == placeholder_defaults[i].t
print placeholder_defaults[i].v, new_defaults[i].v
em.register_replacement(placeholder_defaults[i].v, str(new_defaults[i].v))
em.register_replacement(placeholder.v[0], v.v[0])
def set_instattr_type(self, name, t):
assert not self.initialized
assert name not in ("__nrefs__", "__incref__", "__decref__")
assert not any(name == _n for (_n, _t) in self._instattr_types)
self._instattr_types.append((name, t))
def has_classattr(self, attr):
return attr in self._clsattr_types or (self.base and self.base.has_classattr(attr))
def getattr(self, em, var, attr, clsonly):
# TODO this should just use _StructWriter; there's some compilcation with builtin classes though
assert not clsonly
if attr not in self._clsattr_types:
assert self.base.has_classattr(attr)
return self.base.getattr(em, var, attr, clsonly)
if attr in self._clsattr_types and attr not in self._clsattrs:
t = self._clsattr_types[attr]
assert isinstance(t, CallableMT), (name, t)
new_t = UnboxedFunctionMT(None, None, t.arg_types, t.rtn_type, ndefaults=t.ndefaults)
defaults = [Variable(t.arg_types[len(t.arg_types) - t.ndefaults + i], em.get_placeholder(), 1, False) for i in xrange(t.ndefaults)]
self._clsattrs[attr] = Variable(new_t, (em.get_placeholder(), defaults, None), 1, False)
assert attr in self._clsattrs
v = self._clsattrs[attr].dup({})
assert isinstance(v.v, tuple), (v.v, "probably not a constant and we don't allocate actual storage for class attrs")
assert not v.marked, "should probably have incref'd in this case?"
return v
def free(self, em, v):
assert v == ()
for v in self._clsattrs.itervalues():
print object.__getattribute__(v, "__dict__")
v.decvref(em)
def dup(self, v, dup_cache):
assert v == ()
return v
def call(self, em, v, args, expected_type=None):
assert v == ()
self._ctor.incvref(em)
return self._ctor.call(em, args)
def get_instance(self):
return self._instance
__nclasses = 0
@staticmethod
def create(base, displayname):
type_name = "cls%d" % ClassMT.__nclasses
ClassMT.__nclasses += 1
return ClassMT(base, type_name, displayname)
def llvm_type(self):
assert 0, "shouldnt do this"
def get_instantiated(self):
return self._ctor.t.get_instantiated()
def _can_convert_to(self, t):
return self._ctor.t.can_convert_to(t)
def _convert_to(self, em, var, t):
if t is Type:
return self.get_typeobj(em)
self._ctor.incvref(em)
return self._ctor.convert_to(em, t)
class InstanceMT(MT):
def __init__(self, cls):
super(InstanceMT, self).__init__()
assert isinstance(cls, ClassMT)
self.cls = cls
self._name = cls._name
self._llvm_type = cls._llvm_type
def _initialize(self, em, stage):
# Careful: calling initailize with the same stage has the potential to
# introduce cyclic dependencies, but the class should only call instance.initialize
# with an earlier stage, so it should be ok:
self.cls.initialize(em, stage)
def llvm_type(self):
return self._llvm_type
def getattrptr(self, em, var, attr):
assert isinstance(var, Variable)
offset = 1
for i, (name, t) in enumerate(self.cls._instattr_types):
if name == attr:
pl = '%' + em.mkname()
em.pl("%s = getelementptr %s %s, i64 0, i32 %d" % (pl, self.llvm_type(), var.v, i + offset))
em.pc("#error unimplemented 15")
return Variable(PtrMT(t), pl, 1, False)
raise UserAttributeError(attr)
def hasattr(self, attr):
return attr in self.cls._clsattr_types or attr in [name for (name, t) in self.cls._instattr_types]
def getattr(self, em, var, attr, clsonly):
self.cls._MT__check_initialized("attrs")
assert isinstance(var, Variable)
if not clsonly:
if attr == "__class__":
return self.cls.get_typeobj(em)
offset = 1
for i, (name, t) in enumerate(self.cls._instattr_types):
if name == attr:
pl = '%' + em.mkname()
rtn = '%' + em.mkname()
em.pl("%s = getelementptr %s %s, i64 0, i32 %d" % (pl, self.llvm_type(), var.v, i + offset))
em.pl("%s = load %s* %s" % (rtn, t.llvm_type(), pl))
em.pc("#error unimplemented 16")
i = t.incref_llvm(em, rtn)
if i:
em.pl(i)
return Variable(t, rtn, 1, True)
m = self._get_clsmethod(em, attr)
return UnboxedInstanceMethod.make(em, var, m)
def get_attr_types(self):
if self.cls.base:
r = self.cls.base._instance.get_attr_types()
else:
r = {}
for name, t in self.cls._instattr_types:
r[name] = (t, AttributeAccessType.FIELD)
for name, t in self.cls._clsattr_types.iteritems():
if name in r:
continue
r[name] = (UnboxedInstanceMethod(self, t), AttributeAccessType.CONST_METHOD)
# TODO default functions like __repr__
r["__incref__"] = (UnboxedInstanceMethod(self, UnboxedFunctionMT(None, None, [self], None_)), AttributeAccessType.CONST_METHOD)
r["__decref__"] = (UnboxedInstanceMethod(self, UnboxedFunctionMT(None, None, [self], None_)), AttributeAccessType.CONST_METHOD)
if "__repr__" not in r:
r["__repr__"] = (UnboxedInstanceMethod(self, UnboxedFunctionMT(None, None, [self], Str)), AttributeAccessType.CONST_METHOD)
r["__class__"] = (Type, AttributeAccessType.IMPLICIT_FIELD)
return r
def _get_clsmethod(self, em, attr):
self.cls._MT__check_initialized("attrs")
if not self.cls.has_classattr(attr):
if attr == "__str__":
return self._get_clsmethod(em, "__repr__")
if attr == "__repr__":
return Variable(UnboxedFunctionMT(em, None, [self], Str), ("@%s_repr" % self._name, [], None), 1, False)
if attr == "__nonzero__":
return Variable(UnboxedFunctionMT(em, None, [self], Bool), ("@%s_nonzero" % self._name, [], None), 1, False)
if attr == "__eq__":
return Variable(UnboxedFunctionMT(em, None, [self, self], Bool), ("@%s_eq" % self._name, [], None), 1, False)
if attr == "__incref__":
return Variable(UnboxedFunctionMT(em, None, [self], None_), ("@%s_incref" % self._name, [], None), 1, False)
if attr == "__decref__":
return Variable(UnboxedFunctionMT(em, None, [self], None_), ("@%s_decref" % self._name, [], None), 1, False)
raise UserAttributeError("no such attribute '%s' on %s" % (attr, self))
return self.cls.getattr(em, Variable(self.cls, (), 1, False), attr, False)
def setattr(self, em, v, attr, val):
assert isinstance(v, str)
assert isinstance(val, Variable)
offset = 1
for i, (name, t) in enumerate(self.cls._instattr_types):
if name == attr:
val = val.convert_to(em, t)
assert t == val.t, (t, val.t)
inc = val.t.incref_llvm(em, val.v)
if inc:
em.pl(inc)
pl = '%' + em.mkname()
old_val = '%' + em.mkname()
em.pl("%s = getelementptr %s %s, i64 0, i32 %d" % (pl, self.llvm_type(), v, i + offset))
d = t.decref_llvm(em, old_val)
if d:
em.pl("%s = load %s* %s" % (old_val, t.llvm_type(), pl))
em.pl(d)
em.pl("store %s %s, %s* %s" % (t.llvm_type(), val.v, t.llvm_type(), pl))
em.pc("#error unimplemented 17")
val.decvref(em)
return
raise Exception("doesn't have attribute '%s'" % (attr,))
def convert_to(self, em, var, t):
if t is self:
return var
assert isinstance(t, BoxedMT), (self, t)
return t.convert_from(em, var)
def _can_convert_to(self, t):
# TODO this is wrong
return False
def __repr__(self):
return "<InstanceMT %r (%s)>" % (self.llvm_type(), self.cls._displayname)
ObjectClass = ClassMT(None, "object", "object")
Object = ObjectClass._instance
IntClass = ClassMT(ObjectClass, "int", "int", llvm_type="i64")
Int = IntClass._instance
FloatClass = ClassMT(ObjectClass, "float", "float", llvm_type="double")
Float = FloatClass._instance
StrClass = ClassMT(ObjectClass, "str", "str", llvm_type="%string*")
Str = StrClass._instance
BoolClass = ClassMT(ObjectClass, "bool", "bool", "i1")
Bool = BoolClass._instance
TypeClass = ClassMT(ObjectClass, "type", "type")
Type = TypeClass._instance
FileClass = ClassMT(ObjectClass, "file", "file")
File = FileClass._instance
# TODO there is a lot of duplication between this and stuff like closures
class UserModuleMT(MT):
LOADED_FIELD_NAME = " __loaded__"
def __init__(self, em, module_name, module_fn, type_name, vars, constants, function_types):
super(UserModuleMT, self).__init__()
self.module_name = module_name
self._type_name = type_name
self.fn = module_fn
self._struct = _StructWriter(em, type_name, vars, constants, function_types=function_types)
self._struct.write(em)
self.initialized = ("attrs", "write")
def llvm_type(self):
return "%%%s*" % (self._type_name)
def has(self, name):
return self._struct.has(name)
def has_constant(self, name):
return self._struct.has_constant(name)
# Have this to mimic closure objects, since this can be a closure
def set(self, em, v, name, val):
return self.setattr(em, v, name, val)
def getattr(self, em, v, name, clsonly=False, skip_incref=False):
assert not clsonly
return self._struct.get(em, v.v, name, skip_incref=skip_incref)
def setattr(self, em, v, attr, val):
return self._struct.set(em, v, attr, val)
def load(self, em):
em.pc("#error unimplemented 18")
em.pl("call void @%s_init()" % self.module_name)
def load_modules(self, em, cg, closure_info, ts_module, type_info):
for n in closure_info.modules:
assert self._struct.constants[n] is None
# TODO this is pretty hacky
submodule = ts_module.get_name(n)
assert len(submodule.types()) == 1
submodule, = submodule.types()
m = cg.import_module(em, submodule.name)
self._struct.constants[n] = m
@staticmethod
def make(em, module_name, module, module_fn, closure_info, ts_module, type_info):
assert is_emitter(em)
assert isinstance(module, _ast.Module)
assert isinstance(closure_info, closure_analyzer.ClosureResults)
vars = []
vars.append((UserModuleMT.LOADED_FIELD_NAME, Bool))
for name in closure_info.used_in_nested:
try:
vars.append((name, type_info.get_scope_type(em, module, name)))
except:
print "failed on", name
raise
constants = {}
function_types = {}
for n in closure_info.functions:
u = ts_module.get_name(n)
assert len(u.types()) == 1
if u.types()[0].is_dead():
print "Not adding %s since it's dead" % (n,)
continue
try:
function_types[n] = type_info._convert_type(em, u)
except Exception:
print >>sys.stderr, "Failed when converting attribute %s of module %s [%s]" % (n, module_name, ts_module.get_name(n).display())
raise
for n, c in function_types.iteritems():
assert n not in constants
function_types[n] = UnboxedFunctionMT(em, None, c.arg_types, c.rtn_type)
constants[n] = None
for n in closure_info.classes:
assert n not in constants
cls = type_info._convert_type(em, ts_module.get_name(n))
assert isinstance(cls, ClassMT)
constants[n] = Variable(cls, (), 1, False)
for n in closure_info.modules:
assert n not in constants
constants[n] = None
# We will fill this in later, to allow for
# cycles in the import graph
type_name = em.mkname(prefix="mod_%s" % filter(lambda c: not c.isdigit(), module_name))
t = UserModuleMT(
em,
module_name,
module_fn,
type_name,
vars,
constants,
function_types,
)
module_obj_name = "@" + em.mkname(prefix="module")
module_obj = Variable(t, module_obj_name, 1, False)
em.llvm_tail.write(eval_template("module", em, {
'module_obj':module_obj,
'type_name':type_name,
'module_name':module_name,
't':t,
}))
return module_obj
class ModuleMT(MT):
def __init__(self, attrs):
super(ModuleMT, self).__init__()
self._attrs = attrs
self.initialized = ("attrs", "write")
def getattr(self, em, var, attr, clsonly):
assert not clsonly
if attr not in self._attrs:
raise UserAttributeError(attr)
v = self._attrs[attr].dup({})
# TODO to support setting things, this should assert marked, but then we need to put it somewhere
assert not v.marked, "should probably have incref'd in this case?"
return v
def llvm_type(self):
assert 0, "shouldnt do this"
def get_instantiated(self):
assert 0, "dont support this yet"
# Don't support raising these yet
return None
def _can_convert_to(self, t):
return False
class PtrMT(MT):
""" An MT to represent the type of a stored pointer to an object. They should only exist as a compiler implementation detail. """
def __init__(self, referent_type):
super(PtrMT, self).__init__()
self.referent_type = referent_type
self.initialized = ("attrs", "write")
def llvm_type(self):
return self.referent_type.llvm_type() + "*"
def incref_llvm(self, em, name):
return None
def decref_llvm(self, em, name):
return None
class FuncPtrMT(MT):
def __init__(self, func_type):
super(FuncPtrMT, self).__init__()
self.func_type = func_type
self.initialized = ("attrs", "write")
def llvm_type(self):
return self.func_type.llvm_type()
def incref_llvm(self, em, name):
return None
def decref_llvm(self, em, name):
return None
def getattr(self, em, var, attr, clsonly=False):
raise UserAttributeError(attr)
class _UnderlyingMT(MT):
def __init__(self):
super(_UnderlyingMT, self).__init__()
self.initialized = ("attrs", "write")
def llvm_type(self):
return "%underlying*"
def incref_llvm(self, em, v):
assert False, "shouldn't be calling this"
def decref_llvm(self, em, v):
assert False, "shouldn't be calling this"
Underlying = singleton(_UnderlyingMT)
class BoxedMT(MT):
class StorageStrategy(object):
CONST_METHOD = "method" # store a reference to the method, and create the instancemethod on access
PTR = "ptr" # store a pointer to the field in the object
VALUE = "value" # store the value of the field in the boxed object
UNDERLYING_FIELD_NAME = " __underlying"
@staticmethod
def make_struct_dealloc(em, type_name, field_positions):
deallocs = []
decref_name = None
underlying_name = None
for n, (i, t) in sorted(field_positions.items()):
ptr_name = "%" + em.mkname()
name = "%" + em.mkname()
deallocs.append("%s = getelementptr %%%s* %%self, i64 0, i32 %d\n %s = load %s* %s" % (ptr_name, type_name, i, name, t.llvm_type(), ptr_name))
if n == BoxedMT.UNDERLYING_FIELD_NAME:
underlying_name = name
continue
if n == "__decref__":
decref_name = name
d = t.decref_llvm(em, name)
if d is None:
continue
deallocs.append(d)
assert decref_name
assert underlying_name
deallocs.append("call void %s(%%underlying* %s)" % (decref_name, underlying_name))
dealloc = '\n '.join(s for s in deallocs if s)
return dealloc
__nboxes = 0
def __init__(self, types):
super(BoxedMT, self).__init__()
self._name = "boxed%d" % (BoxedMT.__nboxes,)
BoxedMT.__nboxes += 1
self.types = types
def _initialize(self, em, stage):
if stage == "attrs":
for t in self.types:
t.initialize(em, "attrs")
all_attrs = [t.get_attr_types() for t in self.types]
all_attr_names = set()
for i in xrange(len(self.types)):
d = all_attrs[i]
all_attr_names.update(d)
assert "__class__" in d, (d, self.types[i])
for n, (t, at) in d.items():
try:
d[n] = (t.get_instantiated(), at)
except CantInstantiateException:
del d[n]
attrs = {}
for n in all_attr_names:
if any(n not in d for d in all_attrs):
continue
attr_types = [d[n] for d in all_attrs]
if any(at[1] != attr_types[0][1] for at in attr_types):
continue
types = [at[0] for at in attr_types]
if attr_types[0][1] == AttributeAccessType.FIELD and len(set(types)) > 1:
continue
sup = make_common_supertype(types)
if sup is not None:
# TODO this next initialize should somehow be taken care of before the
# enclosing _initialize is even started
# sup.initialize(em)
attrs[n] = sup, attr_types[0][1]
assert "__incref__" in attrs
assert "__decref__" in attrs
assert "__class__" in attrs
attrs.pop("__init__", None)
converted_attrs = []
for n, (t, at) in attrs.iteritems():
ss = BoxedMT._storage_strategy(n, t, at)
converted_attrs.append((n, t, ss))
self.attrs = {}
struct_fields = []
struct_fields.append((BoxedMT.UNDERLYING_FIELD_NAME, Underlying))
for n, t, ss in converted_attrs:
st = self._storage_type(em, t, ss)
# print n, t, st
struct_fields.append((n, st))
self.attrs[n] = (t, ss, st)
self._struct = _StructWriter(em, self._name, struct_fields, {}, dealloc_maker=BoxedMT.make_struct_dealloc)
self.__converters = {}
em.c_head.write("struct %s;" % self._name)
elif stage == "write":
self._struct.write(em)
else:
raise Exception(stage)
def llvm_type(self):
return "%" + self._name + "*"
def c_type(self):
return "struct " + self._name + "*"
def hasattr(self, attr):
return self._struct.has(attr)
def setattr(self, em, v, attr, val):
assert self.initialized
if attr not in self.attrs:
raise UserAttributeError("boxed object does not contain settable %s" % attr)
t, ss, st = self.attrs[attr]
assert ss == BoxedMT.StorageStrategy.PTR, ss
ptr = self._struct.get(em, v, attr)
assert isinstance(ptr.t, PtrMT)
assert ptr.t.referent_type is val.t
prev = "%" + em.mkname()
dec = val.t.decref_llvm(em, prev)
if dec:
em.pl("%s = load %s* %s" % (prev, val.t.llvm_type(), ptr.v))
em.pl(dec)
em.pl("store %s %s, %s* %s" % (val.t.llvm_type(), val.v, val.t.llvm_type(), ptr.v))
def getattr(self, em, v, attr, clsonly):
assert self.initialized
# Handle these specially, since they hit the boxed object, not the underlying:
if attr == "__incref__":
return UnboxedInstanceMethod.make(em, v, Variable(UnboxedFunctionMT(em, None, [self], None_), ("@%s_incref" % self._name, [], None), 1, False))
if attr == "__decref__":
return UnboxedInstanceMethod.make(em, v, Variable(UnboxedFunctionMT(em, None, [self], None_), ("@%s_decref" % self._name, [], None), 1, False))
assert attr not in ("__incref__", "__decref__")
stored = self._struct.get(em, v.v, attr)
t, ss, st = self.attrs[attr]
if ss == BoxedMT.StorageStrategy.CONST_METHOD:
raise Exception("Pretty sure this isn't right, since we should incref the underlying as well")
o = self._struct.get(em, v, BoxedMT.UNDERLYING_FIELD_NAME)
assert isinstance(stored.t, FuncPtrMT)
return UnboxedInstanceMethod.make(em, o, stored)
elif ss == BoxedMT.StorageStrategy.VALUE:
return stored
elif ss == BoxedMT.StorageStrategy.PTR:
assert isinstance(stored.t, PtrMT)
rtn_type = st.referent_type
r = '%' + em.mkname()
em.pl("%s = load %s* %s" % (r, rtn_type.llvm_type(), stored.v))
inc = rtn_type.incref_llvm(em, r)
if inc:
em.pl(inc + " ; getting boxed attr by ptr")
em.pc("#error unimplemented 19")
return Variable(rtn_type, r, 1, True)
raise Exception(ss)
def _storage_type(self, em, attr_type, storage_strategy):
if storage_strategy == BoxedMT.StorageStrategy.CONST_METHOD:
return FuncPtrMT(UnboxedFunctionMT(em, None, [Underlying] + attr_type.arg_types, attr_type.rtn_type))
elif storage_strategy == BoxedMT.StorageStrategy.VALUE:
return attr_type
elif storage_strategy == BoxedMT.StorageStrategy.PTR:
return PtrMT(attr_type)
else:
raise Exception(storage_strategy)
def can_convert_from(self, t):
if t is None_:
return True
attr_types = t.get_attr_types()
for attr_name, (_t, ss, st) in self.attrs.iteritems():
if attr_name not in attr_types or not attr_types[attr_name][0].can_convert_to(_t):
return False
return True
def convert_from(self, em, var):
self._MT__check_initialized("attrs")
# TODO assert size of var object is 64 bits
# TODO return None if the input was None
if var.t not in self.__converters:
converter_name = "@%s_from_%s" % (self._name, var.t.llvm_type().replace("*", "_").replace("%", ""))
self.__converters[var.t] = Variable(UnboxedFunctionMT(None, None, [var.t], self), (converter_name, [], None), 1, False)
evaluated = eval_template("boxer", em, {
'bt': self,
'ot': var.t,
'converter_name':converter_name,
})
em.llvm_tail.write(evaluated)
converter = self.__converters[var.t]
converter.incvref(em)
return converter.call(em, [var])
@staticmethod
def _storage_strategy(name, t, at):
if name in ("__incref__", "__decref__"):
return BoxedMT.StorageStrategy.CONST_METHOD
if at in (AttributeAccessType.CONST_METHOD, AttributeAccessType.IMPLICIT_FIELD):
return BoxedMT.StorageStrategy.VALUE
if at == AttributeAccessType.FIELD:
return BoxedMT.StorageStrategy.PTR
raise Exception(at)
_made_supertypes = {}
def make_common_supertype(types):
assert all(isinstance(t, MT) for t in types)
types = tuple(sorted(set(types)))
if len(types) == 1:
return types[0]
if types == tuple(sorted([Int, Float])):
return Float
if types in _made_supertypes:
return _made_supertypes[types]
if all(isinstance(t, ClassMT) for t in types):
return Type
if all(isinstance(t, CallableMT) for t in types):
for t in types[1:]:
if t.arg_types != types[0].arg_types:
return None
ret_type = make_common_supertype([t.rtn_type for t in types])
if ret_type is None:
return None
return CallableMT.make_callable(types[0].arg_types, 0, ret_type)
if any(isinstance(t, CallableMT) for t in types):
# Callables aren't mixable with non-callables, at least for now.
# Probably can/should return the common_supertype of all the __call__ attributes?
return None
rtn = BoxedMT(types)
_made_supertypes[types] = rtn
return rtn
def common_subtype(em, types):
raise NotImplementedError()
class _FakeMT(MT):
def __init__(self, attrs):
super(_FakeMT, self).__init__()
self._attrs = attrs
def _initialize(self, em, stage):
pass
def get_attr_types(self):
return self._attrs
# Some type classes for stdlib stuff:
STDLIB_TYPES = []
def _make_iterable(elt_type):
iterator = BoxedMT([_FakeMT({
"__class__": (Type, AttributeAccessType.IMPLICIT_FIELD),
"__incref__": (CallableMT.make_callable([], 0, None_), AttributeAccessType.CONST_METHOD),
"__decref__": (CallableMT.make_callable([], 0, None_), AttributeAccessType.CONST_METHOD),
"hasnext": (CallableMT.make_callable([], 0, Bool), AttributeAccessType.CONST_METHOD),
"next": (CallableMT.make_callable([], 0, elt_type), AttributeAccessType.CONST_METHOD),
})])
iterable = BoxedMT([_FakeMT({
"__class__": (Type, AttributeAccessType.IMPLICIT_FIELD),
"__incref__": (CallableMT.make_callable([], 0, None_), AttributeAccessType.CONST_METHOD),
"__decref__": (CallableMT.make_callable([], 0, None_), AttributeAccessType.CONST_METHOD),
"__iter__": (CallableMT.make_callable([], 0, iterator), AttributeAccessType.CONST_METHOD),
})])
STDLIB_TYPES.append(iterator)
STDLIB_TYPES.append(iterable)
return iterator, iterable
_IntIterator, _IntIterable = _make_iterable(Int)
_FloatIterator, _FloatIterable = _make_iterable(Float)
_Boolable = BoxedMT([_FakeMT({
"__class__": (Type, AttributeAccessType.IMPLICIT_FIELD),
"__incref__": (CallableMT.make_callable([], 0, None_), AttributeAccessType.CONST_METHOD),
"__decref__": (CallableMT.make_callable([], 0, None_), AttributeAccessType.CONST_METHOD),
"__nonzero__": (CallableMT.make_callable([], 0, Bool), AttributeAccessType.CONST_METHOD),
})])
STDLIB_TYPES.append(_Boolable)
_BoolableIterator, _BoolableIterable = _make_iterable(_Boolable)
BUILTINS = {
"True":Variable(Bool, 1, 1, False),
"False":Variable(Bool, 0, 1, False),
"len":Variable(Len, (), 1, False),
"str":Variable(StrClass, (), 1, False),
"repr":Variable(ReprFunc, (), 1, False),
"type":Variable(TypeClass, (), 1, False),
"map":Variable(MapFunc, (), 1, False),
"reduce":Variable(ReduceFunc, (), 1, False),
"nrefs":Variable(Nref, (), 1, False),
"bool":Variable(BoolClass, (), 1, False),
"list":Variable(ListFunc, (), 1, False),
"dict":Variable(DictFunc, (), 1, False),
"set":Variable(SetFunc, (), 1, False),
"isinstance":Variable(Isinstance, (), 1, False),
"__cast__":Variable(Cast, (), 1, False),
"enumerate":Variable(Enumerate, (), 1, False),
"chr":Variable(UnboxedFunctionMT(None, None, [Int], Str), ("@chr", [], None), 1, False),
"ord":Variable(UnboxedFunctionMT(None, None, [Str], Int), ("@ord", [], None), 1, False),
# "open":Variable(UnboxedFunctionMT(None, None, [Str], File), ("@file_open", [], None), 1, True),
"open":Variable(UnboxedFunctionMT(None, None, [Str, Str], File, ndefaults=1), ("@file_open2", [Variable(Str, "@.str_r", 1, True)], None), 1, False),
"int":Variable(IntClass, (), 1, False),
"min":PolymorphicFunctionMT.make([
Variable(UnboxedFunctionMT(None, None, [Int, Int], Int), ("@int_min", [], None), 1, False),
Variable(UnboxedFunctionMT(None, None, [Float, Float], Float), ("@float_min", [], None), 1, False),
Variable(MinFunc, (), 1, False),
]),
"max":PolymorphicFunctionMT.make([
Variable(UnboxedFunctionMT(None, None, [Int, Int], Int), ("@int_max", [], None), 1, False),
Variable(UnboxedFunctionMT(None, None, [Float, Float], Float), ("@float_max", [], None), 1, False),
Variable(MaxFunc, (), 1, False),
]),
"float":Variable(FloatClass, (), 1, False),
"file":Variable(FileClass, (), 1, False),
"abs":PolymorphicFunctionMT.make([
Variable(UnboxedFunctionMT(None, None, [Int], Int), ("@int_abs", [], None), 1, False),
Variable(UnboxedFunctionMT(None, None, [Float], Float), ("@float_abs", [], None), 1, False)]),
"None":Variable(None_, "null", 1, False),
"object":Variable(ObjectClass, (), 1, False),
"sum":PolymorphicFunctionMT.make([
Variable(UnboxedFunctionMT(None, None, [_IntIterable], Int), ("@sum_int", [], None), 1, False),
Variable(UnboxedFunctionMT(None, None, [_FloatIterable], Float), ("@sum_float", [], None), 1, False),
]),
"any":Variable(UnboxedFunctionMT(None, None, [_BoolableIterable], Bool), ("@any", [], None), 1, False),
}
BUILTIN_MODULES = {
"time":Variable(ModuleMT({
'time':Variable(UnboxedFunctionMT(None, None, [], Float), ("@time_time", [], None), 1, False),
'clock':Variable(UnboxedFunctionMT(None, None, [], Float), ("@time_clock", [], None), 1, False),
'sleep':Variable(UnboxedFunctionMT(None, None, [Float], None_), ("@time_sleep", [], None), 1, False),
}), 1, 1, False),
"sys":Variable(ModuleMT({
'stdin':Variable(File, "@sys_stdin", 1, False),
'stdout':Variable(File, "@sys_stdout", 1, False),
'stderr':Variable(File, "@sys_stderr", 1, False),
}), 1, 1, False),
"math":Variable(ModuleMT({
'sqrt':Variable(UnboxedFunctionMT(None, None, [Float], Float), ("@sqrt", [], None), 1, False),
'tan':Variable(UnboxedFunctionMT(None, None, [Float], Float), ("@tan", [], None), 1, False),
'sin':Variable(UnboxedFunctionMT(None, None, [Float], Float), ("@sin", [], None), 1, False),
'cos':Variable(UnboxedFunctionMT(None, None, [Float], Float), ("@cos", [], None), 1, False),
'ceil':Variable(UnboxedFunctionMT(None, None, [Float], Float), ("@ceil", [], None), 1, False),
'pi':Variable(Float, format_float(3.141592653589793), 1, False),
}), 1, 1, False),
"collections":Variable(ModuleMT({
'deque':Variable(DequeFunc, (), 1, False),
}), 1, 1, False),
# Interopability library:
"hax":Variable(ModuleMT({
"ftoi":Variable(UnboxedFunctionMT(None, None, [Float], Int), ("@hax_ftoi", [], None), 1, False),
"itof":Variable(UnboxedFunctionMT(None, None, [Int], Float), ("@hax_itof", [], None), 1, False),
"min":Variable(UnboxedFunctionMT(None, None, [Int, Int], Int), ("@int_min", [], None), 1, False),
"max":Variable(UnboxedFunctionMT(None, None, [Int, Int], Int), ("@int_max", [], None), 1, False),
"fmin":Variable(UnboxedFunctionMT(None, None, [Float, Float], Float), ("@float_min", [], None), 1, False),
"abs":Variable(UnboxedFunctionMT(None, None, [Float], Float), ("@float_abs", [], None), 1, False),
"initvideo":Variable(UnboxedFunctionMT(None, None, [Int, Int], None_), ("@hax_initvideo", [], None), 1, False),
"plot":Variable(UnboxedFunctionMT(None, None, [Int, Int, Int, Int, Int], None_), ("@hax_plot", [], None), 1, False),
}), 1, 1, False),
}
SliceMT.setup_class_methods()
NoneMT.setup_class_methods()
def setup_int():
IntClass._ctor = Variable(UnboxedFunctionMT(None, None, [Str, Int], Int, ndefaults=1), ("@int_", [Variable(Int, 10, 1, False)], None), 1, False)
def _int_can_convert_to(t):
return t is Float
def _int_convert_to(em, var, t):
if t is Int:
return var
if t is Float:
name = '%' + em.mkname()
em.pl('%s = sitofp i64 %s to double' % (name, var.v))
em.pc("#error unimplemented")
var.decvref(em)
return Variable(Float, name, 1, True)
if isinstance(t, BoxedMT):
return t.convert_from(em, var)
raise UserTypeError(t)
Int._can_convert_to = _int_can_convert_to
Int.convert_to = _int_convert_to
int_class_methods = {
"__add__": Variable(UnboxedFunctionMT(None, None, [Int, Int], Int), ("@int_add", [], None), 1, False),
"__and__": Variable(UnboxedFunctionMT(None, None, [Int, Int], Int), ("@int_and", [], None), 1, False),
"__or__": Variable(UnboxedFunctionMT(None, None, [Int, Int], Int), ("@int_or", [], None), 1, False),
"__div__": Variable(UnboxedFunctionMT(None, None, [Int, Int], Int), ("@int_div", [], None), 1, False),
"__lshift__": Variable(UnboxedFunctionMT(None, None, [Int, Int], Int), ("@int_lshift", [], None), 1, False),
"__mod__": Variable(UnboxedFunctionMT(None, None, [Int, Int], Int), ("@int_mod", [], None), 1, False),
"__mul__": Variable(UnboxedFunctionMT(None, None, [Int, Int], Int), ("@int_mul", [], None), 1, False),
"__neg__": Variable(UnboxedFunctionMT(None, None, [Int], Int), ("@int_neg", [], None), 1, False),
"__nonzero__": Variable(UnboxedFunctionMT(None, None, [Int], Bool), ("@int_nonzero", [], None), 1, False),
"__pow__": Variable(UnboxedFunctionMT(None, None, [Int, Int], Int), ("@int_pow", [], None), 1, False),
"__repr__": Variable(UnboxedFunctionMT(None, None, [Int], Str), ("@int_repr", [], None), 1, False),
"__rshift__": Variable(UnboxedFunctionMT(None, None, [Int, Int], Int), ("@int_rshift", [], None), 1, False),
"__sub__": Variable(UnboxedFunctionMT(None, None, [Int, Int], Int), ("@int_sub", [], None), 1, False),
# "__incref__": Variable(UnboxedFunctionMT(None, None, [Int], None_), ("@int_incref", [], None), 1, False),
# "__decref__": Variable(UnboxedFunctionMT(None, None, [Int], None_), ("@int_decref", [], None), 1, False),
}
int_class_methods["__str__"] = int_class_methods["__repr__"]
for attr in COMPARE_MAP.values():
int_class_methods[attr] = Variable(UnboxedFunctionMT(None, None, [Int, Int], Bool), ("@int_" + attr[2:-2], [], None), 1, False)
for n, v in int_class_methods.iteritems():
IntClass.set_clsattr_value(n, v, _init=True)
IntClass.initialized = ("attrs", "write")
Int.initialized = ("attrs", "write")
setup_int()
def setup_float():
FloatClass._ctor = Variable(UnboxedFunctionMT(None, None, [Int], Float), ("@float_", [], None), 1, False)
float_class_methods = {
"__add__": Variable(UnboxedFunctionMT(None, None, [Float, Float], Float), ("@float_add", [], None), 1, False),
"__div__": Variable(UnboxedFunctionMT(None, None, [Float, Float], Float), ("@float_div", [], None), 1, False),
"__mod__": Variable(UnboxedFunctionMT(None, None, [Float, Float], Float), ("@float_mod", [], None), 1, False),
"__mul__": Variable(UnboxedFunctionMT(None, None, [Float, Float], Float), ("@float_mul", [], None), 1, False),
"__neg__": Variable(UnboxedFunctionMT(None, None, [Float], Float), ("@float_neg", [], None), 1, False),
"__nonzero__": Variable(UnboxedFunctionMT(None, None, [Float], Bool), ("@float_nonzero", [], None), 1, False),
"__pow__": Variable(UnboxedFunctionMT(None, None, [Float, Float], Float), ("@float_pow", [], None), 1, False),
"__rdiv__": Variable(UnboxedFunctionMT(None, None, [Float, Float], Float), ("@float_rdiv", [], None), 1, False),
"__repr__": Variable(UnboxedFunctionMT(None, None, [Float], Str), ("@float_repr", [], None), 1, False),
"__rsub__": Variable(UnboxedFunctionMT(None, None, [Float, Float], Float), ("@float_rsub", [], None), 1, False),
"__str__": Variable(UnboxedFunctionMT(None, None, [Float], Str), ("@float_str", [], None), 1, False),
"__sub__": Variable(UnboxedFunctionMT(None, None, [Float, Float], Float), ("@float_sub", [], None), 1, False),
# "__incref__": Variable(UnboxedFunctionMT(None, None, [Float], None_), ("@float_incref", [], None), 1, False),
# "__decref__": Variable(UnboxedFunctionMT(None, None, [Float], None_), ("@float_decref", [], None), 1, False),
}
float_class_methods["__radd__"] = float_class_methods["__add__"]
float_class_methods["__rmul__"] = float_class_methods["__mul__"]
for attr in COMPARE_MAP.values():
float_class_methods[attr] = Variable(UnboxedFunctionMT(None, None, [Float, Float], Bool), ("@float_" + attr[2:-2], [], None), 1, False)
for n, v in float_class_methods.iteritems():
FloatClass.set_clsattr_value(n, v, _init=True)
FloatClass.initialized = ("attrs", "write")
Float.initialized = ("attrs", "write")
setup_float()
def setup_string():
StrIteratorClass = ClassMT(ObjectClass, "striterator", "striterator")
StrIterator = StrIteratorClass._instance
StrClass._ctor = Variable(StrFunc, (), 1, False)
em = None
string_class_methods = {
"__add__": Variable(UnboxedFunctionMT(em, None, [Str, Str], Str), ("@str_add", [], None), 1, False),
"__eq__": Variable(UnboxedFunctionMT(em, None, [Str, Str], Bool), ("@str_eq", [], None), 1, False),
"__getitem__": PolymorphicFunctionMT.make([
Variable(UnboxedFunctionMT(em, None, [Str, Int], Str), ("@str_getitem", [], None), 1, False),
Variable(UnboxedFunctionMT(em, None, [Str, Slice], Str), ("@str_getitem_slice", [], None), 1, False)]),
"join": Variable(UnboxedFunctionMT(em, None, [Str, ListMT.make_list(Str)], Str), ("@str_join", [], None), 1, False),
"__len__": Variable(UnboxedFunctionMT(em, None, [Str], Int), ("@str_len", [], None), 1, False),
"__le__": Variable(UnboxedFunctionMT(em, None, [Str, Str], Bool), ("@str_le", [], None), 1, False),
"__lt__": Variable(UnboxedFunctionMT(em, None, [Str, Str], Bool), ("@str_lt", [], None), 1, False),
"__mul__": Variable(UnboxedFunctionMT(em, None, [Str, Int], Str), ("@str_mul", [], None), 1, False),
"__ne__": Variable(UnboxedFunctionMT(em, None, [Str, Str], Bool), ("@str_ne", [], None), 1, False),
"__nonzero__": Variable(UnboxedFunctionMT(em, None, [Str], Bool), ("@str_nonzero", [], None), 1, False),
"__repr__": Variable(UnboxedFunctionMT(em, None, [Str], Str), ("@str_repr", [], None), 1, False),
"split": Variable(UnboxedFunctionMT(em, None, [Str, Str], ListMT.make_list(Str), ndefaults=1), ("@str_split", [Variable(Str, "null", 1, False)], None), 1, False),
"__str__": Variable(UnboxedFunctionMT(em, None, [Str], Str), ("@str_str", [], None), 1, False),
# "__incref__": Variable(UnboxedFunctionMT(em, None, [Str], None_), ("@str_incref", [], None), 1, False),
# "__decref__": Variable(UnboxedFunctionMT(em, None, [Str], None_), ("@str_decref", [], None), 1, False),
"strip": Variable(UnboxedFunctionMT(em, None, [Str], Str), ("@str_strip", [], None), 1, False),
"lstrip": Variable(UnboxedFunctionMT(em, None, [Str], Str), ("@str_lstrip", [], None), 1, False),
"rstrip": Variable(UnboxedFunctionMT(em, None, [Str], Str), ("@str_rstrip", [], None), 1, False),
"__iter__": Variable(UnboxedFunctionMT(em, None, [Str], StrIterator), ("@str_iter", [], None), 1, False),
}
for n, v in string_class_methods.iteritems():
StrClass.set_clsattr_value(n, v, _init=True)
StrClass.initialized = ("attrs", "write")
Str.initialized = ("attrs", "write")
StrIteratorClass.set_clsattr_value("hasnext", Variable(UnboxedFunctionMT(em, None, [StrIterator], Bool), ("@striterator_hasnext", [], None), 1, False), _init=True)
StrIteratorClass.set_clsattr_value("next", Variable(UnboxedFunctionMT(em, None, [StrIterator], Str), ("@striterator_next", [], None), 1, False), _init=True)
StrIteratorClass.set_instattr_type(" str", Str)
StrIteratorClass.set_instattr_type(" pos", Int)
STDLIB_TYPES.append(StrIteratorClass)
setup_string()
def setup_bool():
BoolClass._ctor = Variable(BoolFunc, (), 1, False)
em = None
bool_class_methods = {
"__nonzero__": Variable(UnboxedFunctionMT(em, None, [Bool], Bool), ("@bool_nonzero", [], None), 1, False),
"__repr__": Variable(UnboxedFunctionMT(em, None, [Bool], Str), ("@bool_repr", [], None), 1, False),
"__eq__": Variable(UnboxedFunctionMT(em, None, [Bool, Bool], Bool), ("@bool_eq", [], None), 1, False),
}
bool_class_methods["__str__"] = bool_class_methods["__repr__"]
def _bool_can_convert_to(self, t):
return t in (Int,)
for n, v in bool_class_methods.iteritems():
BoolClass.set_clsattr_value(n, v, _init=True)
BoolClass.initialized = ("attrs", "write")
Bool.initialized = ("attrs", "write")
setup_bool()
def setup_type():
TypeClass._ctor = Variable(TypeFunc, (), 1, False)
type_class_methods = {
"__repr__": Variable(UnboxedFunctionMT(None, None, [Type], Str), ("@type_repr_", [], None), 1, False),
}
type_class_methods["__str__"] = type_class_methods["__repr__"]
for n, v in type_class_methods.iteritems():
TypeClass.set_clsattr_value(n, v, _init=True)
TypeClass.set_clsattr_value("__incref__", Variable(UnboxedFunctionMT(None, None, [Type], None_), ("@type_incref_", [], None), 1, False), _init=True, force=True)
TypeClass.set_clsattr_value("__decref__", Variable(UnboxedFunctionMT(None, None, [Type], None_), ("@type_decref_", [], None), 1, False), _init=True, force=True)
TypeClass.set_instattr_type("__name__", Str)
TypeClass.set_instattr_type("__base__", Type)
# TypeClass.initialized = ("attrs", "write")
# Type.initialized = ("attrs", "write")
setup_type()
def setup_file():
FileClass.set_clsattr_value("__init__", Variable(UnboxedFunctionMT(None, None, [File, Str], None_), ("@file_init", [], None), 1, False), _init=True)
FileClass.set_clsattr_value("read", Variable(UnboxedFunctionMT(None, None, [File, Int], Str), ("@file_read", [], None), 1, False), _init=True)
FileClass.set_clsattr_value("write", Variable(UnboxedFunctionMT(None, None, [File, Str], None_), ("@file_write", [], None), 1, False), _init=True)
FileClass.set_clsattr_value("readline", Variable(UnboxedFunctionMT(None, None, [File], Str), ("@file_readline", [], None), 1, False), _init=True)
FileClass.set_clsattr_value("close", Variable(UnboxedFunctionMT(None, None, [File], None_), ("@file_close", [], None), 1, False), _init=True)
FileClass.set_clsattr_value("flush", Variable(UnboxedFunctionMT(None, None, [File], None_), ("@file_flush", [], None), 1, False), _init=True)
FileClass.set_clsattr_value("__enter__", Variable(UnboxedFunctionMT(None, None, [File], File), ("@file_enter", [], None), 1, False), _init=True)
FileClass.set_clsattr_value("__exit__", Variable(UnboxedFunctionMT(None, None, [File, None_, None_, None_], None_), ("@file_exit", [], None), 1, False), _init=True)
FileClass.set_instattr_type("closed", Bool)
FileClass.set_instattr_type("fd", Int)
FileClass.initialized = ("attrs", "write")
File.initialized = ("attrs", "write")
setup_file()
| 143,686 | 8,264 | 3,719 |
cb9d202d11aad5d50b66f4a9f03ebef480d92465 | 435 | py | Python | app/models/item.py | yudjinn/backend-starlite-postgres | 2cb42a9f85f902e2bdfb6cb99d306401e6b23e7f | [
"MIT"
] | null | null | null | app/models/item.py | yudjinn/backend-starlite-postgres | 2cb42a9f85f902e2bdfb6cb99d306401e6b23e7f | [
"MIT"
] | null | null | null | app/models/item.py | yudjinn/backend-starlite-postgres | 2cb42a9f85f902e2bdfb6cb99d306401e6b23e7f | [
"MIT"
] | 2 | 2022-02-06T17:29:15.000Z | 2022-03-04T12:46:39.000Z | import uuid
from sqlalchemy import Column, ForeignKey, String
from sqlalchemy.dialects.postgresql import UUID
from sqlalchemy.orm import relationship
from .base import Base
from .mixins import DateFieldsMixins
| 27.1875 | 75 | 0.770115 | import uuid
from sqlalchemy import Column, ForeignKey, String
from sqlalchemy.dialects.postgresql import UUID
from sqlalchemy.orm import relationship
from .base import Base
from .mixins import DateFieldsMixins
class Item(DateFieldsMixins, Base):
name: str = Column(String(64), nullable=False)
owner_id: uuid.UUID = Column(UUID(as_uuid=True), ForeignKey("user.id"))
owner = relationship("User", back_populates="items")
| 0 | 199 | 23 |
1df90d6b5a21d880a74025dd9aa1c5f1ebfced00 | 7,715 | py | Python | addons/io_scene_gltf2/io/exp/gltf2_io_get.py | dtysky/glTF-Blender-IO | 5488adce3d496c6db7b2fff121d4fb46962a02b2 | [
"Apache-2.0"
] | null | null | null | addons/io_scene_gltf2/io/exp/gltf2_io_get.py | dtysky/glTF-Blender-IO | 5488adce3d496c6db7b2fff121d4fb46962a02b2 | [
"Apache-2.0"
] | null | null | null | addons/io_scene_gltf2/io/exp/gltf2_io_get.py | dtysky/glTF-Blender-IO | 5488adce3d496c6db7b2fff121d4fb46962a02b2 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2017 The Khronos Group Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Imports
#
import os
from ...io.com.gltf2_io_debug import *
#
# Globals
#
#
# Functions
#
def get_material_requires_texcoords(glTF, index):
"""
Query function, if a material "needs" texture coordinates. This is the case, if a texture is present and used.
"""
if glTF.get('materials') is None:
return False
materials = glTF['materials']
if index < 0 or index >= len(materials):
return False
material = materials[index]
# General
if material.get('emissiveTexture') is not None:
return True
if material.get('normalTexture') is not None:
return True
if material.get('occlusionTexture') is not None:
return True
# Metallic roughness
if material.get('baseColorTexture') is not None:
return True
if material.get('metallicRoughnessTexture') is not None:
return True
# Specular glossiness
if material.get('diffuseTexture') is not None:
return True
if material.get('specularGlossinessTexture') is not None:
return True
# Unlit Material
if material.get('baseColorTexture') is not None:
return True
if material.get('diffuseTexture') is not None:
return True
# Displacement
if material.get('displacementTexture') is not None:
return True
return False
def get_material_requires_normals(glTF, index):
"""
Query function, if a material "needs" normals. This is the case, if a texture is present and used.
At point of writing, same function as for texture coordinates.
"""
return get_material_requires_texcoords(glTF, index)
def get_material_index(glTF, name):
"""
Return the material index in the glTF array.
"""
if name is None:
return -1
if glTF.get('materials') is None:
return -1
index = 0
for material in glTF['materials']:
if material['name'] == name:
return index
index += 1
return -1
def get_mesh_index(glTF, name):
"""
Return the mesh index in the glTF array.
"""
if glTF.get('meshes') is None:
return -1
index = 0
for mesh in glTF['meshes']:
if mesh['name'] == name:
return index
index += 1
return -1
def get_skin_index(glTF, name, index_offset):
"""
Return the skin index in the glTF array.
"""
if glTF.get('skins') is None:
return -1
skeleton = get_node_index(glTF, name)
index = 0
for skin in glTF['skins']:
if skin['skeleton'] == skeleton:
return index + index_offset
index += 1
return -1
def get_camera_index(glTF, name):
"""
Return the camera index in the glTF array.
"""
if glTF.get('cameras') is None:
return -1
index = 0
for camera in glTF['cameras']:
if camera['name'] == name:
return index
index += 1
return -1
def get_light_index(glTF, name):
"""
Return the light index in the glTF array.
"""
if glTF.get('extensions') is None:
return -1
extensions = glTF['extensions']
if extensions.get('KHR_lights_punctual') is None:
return -1
khr_lights_punctual = extensions['KHR_lights_punctual']
if khr_lights_punctual.get('lights') is None:
return -1
lights = khr_lights_punctual['lights']
index = 0
for light in lights:
if light['name'] == name:
return index
index += 1
return -1
def get_node_index(glTF, name):
"""
Return the node index in the glTF array.
"""
if glTF.get('nodes') is None:
return -1
index = 0
for node in glTF['nodes']:
if node['name'] == name:
return index
index += 1
return -1
def get_scene_index(glTF, name):
"""
Return the scene index in the glTF array.
"""
if glTF.get('scenes') is None:
return -1
index = 0
for scene in glTF['scenes']:
if scene['name'] == name:
return index
index += 1
return -1
def get_texture_index(glTF, filename):
"""
Return the texture index in the glTF array by a given filepath.
"""
if glTF.get('textures') is None:
return -1
image_index = get_image_index(glTF, filename)
if image_index == -1:
return -1
for texture_index, texture in enumerate(glTF['textures']):
if image_index == texture['source']:
return texture_index
return -1
def get_image_index(glTF, filename):
"""
Return the image index in the glTF array.
"""
if glTF.get('images') is None:
return -1
image_name = get_image_name(filename)
for index, current_image in enumerate(glTF['images']):
if image_name == current_image['name']:
return index
return -1
def get_image_name(filename):
"""
Return user-facing, extension-agnostic name for image.
"""
return os.path.splitext(filename)[0]
def get_scalar(default_value, init_value = 0.0):
"""
Return scalar with a given default/fallback value.
"""
return_value = init_value
if default_value is None:
return return_value
return_value = default_value
return return_value
def get_vec2(default_value, init_value = [0.0, 0.0]):
"""
Return vec2 with a given default/fallback value.
"""
return_value = init_value
if default_value is None or len(default_value) < 2:
return return_value
index = 0
for number in default_value:
return_value[index] = number
index += 1
if index == 2:
return return_value
return return_value
def get_vec3(default_value, init_value = [0.0, 0.0, 0.0]):
"""
Return vec3 with a given default/fallback value.
"""
return_value = init_value
if default_value is None or len(default_value) < 3:
return return_value
index = 0
for number in default_value:
return_value[index] = number
index += 1
if index == 3:
return return_value
return return_value
def get_vec4(default_value, init_value = [0.0, 0.0, 0.0, 1.0]):
"""
Return vec4 with a given default/fallback value.
"""
return_value = init_value
if default_value is None or len(default_value) < 4:
return return_value
index = 0
for number in default_value:
return_value[index] = number
index += 1
if index == 4:
return return_value
return return_value
def get_index(elements, name):
"""
Return index of a glTF element by a given name.
"""
if elements is None or name is None:
return -1
index = 0
for element in elements:
if element.get('name') is None:
return -1
if element['name'] == name:
return index
index += 1
return -1
| 20.091146 | 114 | 0.6 | # Copyright (c) 2017 The Khronos Group Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Imports
#
import os
from ...io.com.gltf2_io_debug import *
#
# Globals
#
#
# Functions
#
def get_material_requires_texcoords(glTF, index):
"""
Query function, if a material "needs" texture coordinates. This is the case, if a texture is present and used.
"""
if glTF.get('materials') is None:
return False
materials = glTF['materials']
if index < 0 or index >= len(materials):
return False
material = materials[index]
# General
if material.get('emissiveTexture') is not None:
return True
if material.get('normalTexture') is not None:
return True
if material.get('occlusionTexture') is not None:
return True
# Metallic roughness
if material.get('baseColorTexture') is not None:
return True
if material.get('metallicRoughnessTexture') is not None:
return True
# Specular glossiness
if material.get('diffuseTexture') is not None:
return True
if material.get('specularGlossinessTexture') is not None:
return True
# Unlit Material
if material.get('baseColorTexture') is not None:
return True
if material.get('diffuseTexture') is not None:
return True
# Displacement
if material.get('displacementTexture') is not None:
return True
return False
def get_material_requires_normals(glTF, index):
"""
Query function, if a material "needs" normals. This is the case, if a texture is present and used.
At point of writing, same function as for texture coordinates.
"""
return get_material_requires_texcoords(glTF, index)
def get_material_index(glTF, name):
"""
Return the material index in the glTF array.
"""
if name is None:
return -1
if glTF.get('materials') is None:
return -1
index = 0
for material in glTF['materials']:
if material['name'] == name:
return index
index += 1
return -1
def get_mesh_index(glTF, name):
"""
Return the mesh index in the glTF array.
"""
if glTF.get('meshes') is None:
return -1
index = 0
for mesh in glTF['meshes']:
if mesh['name'] == name:
return index
index += 1
return -1
def get_skin_index(glTF, name, index_offset):
"""
Return the skin index in the glTF array.
"""
if glTF.get('skins') is None:
return -1
skeleton = get_node_index(glTF, name)
index = 0
for skin in glTF['skins']:
if skin['skeleton'] == skeleton:
return index + index_offset
index += 1
return -1
def get_camera_index(glTF, name):
"""
Return the camera index in the glTF array.
"""
if glTF.get('cameras') is None:
return -1
index = 0
for camera in glTF['cameras']:
if camera['name'] == name:
return index
index += 1
return -1
def get_light_index(glTF, name):
"""
Return the light index in the glTF array.
"""
if glTF.get('extensions') is None:
return -1
extensions = glTF['extensions']
if extensions.get('KHR_lights_punctual') is None:
return -1
khr_lights_punctual = extensions['KHR_lights_punctual']
if khr_lights_punctual.get('lights') is None:
return -1
lights = khr_lights_punctual['lights']
index = 0
for light in lights:
if light['name'] == name:
return index
index += 1
return -1
def get_node_index(glTF, name):
"""
Return the node index in the glTF array.
"""
if glTF.get('nodes') is None:
return -1
index = 0
for node in glTF['nodes']:
if node['name'] == name:
return index
index += 1
return -1
def get_scene_index(glTF, name):
"""
Return the scene index in the glTF array.
"""
if glTF.get('scenes') is None:
return -1
index = 0
for scene in glTF['scenes']:
if scene['name'] == name:
return index
index += 1
return -1
def get_texture_index(glTF, filename):
"""
Return the texture index in the glTF array by a given filepath.
"""
if glTF.get('textures') is None:
return -1
image_index = get_image_index(glTF, filename)
if image_index == -1:
return -1
for texture_index, texture in enumerate(glTF['textures']):
if image_index == texture['source']:
return texture_index
return -1
def get_image_index(glTF, filename):
"""
Return the image index in the glTF array.
"""
if glTF.get('images') is None:
return -1
image_name = get_image_name(filename)
for index, current_image in enumerate(glTF['images']):
if image_name == current_image['name']:
return index
return -1
def get_image_name(filename):
"""
Return user-facing, extension-agnostic name for image.
"""
return os.path.splitext(filename)[0]
def get_scalar(default_value, init_value = 0.0):
"""
Return scalar with a given default/fallback value.
"""
return_value = init_value
if default_value is None:
return return_value
return_value = default_value
return return_value
def get_vec2(default_value, init_value = [0.0, 0.0]):
"""
Return vec2 with a given default/fallback value.
"""
return_value = init_value
if default_value is None or len(default_value) < 2:
return return_value
index = 0
for number in default_value:
return_value[index] = number
index += 1
if index == 2:
return return_value
return return_value
def get_vec3(default_value, init_value = [0.0, 0.0, 0.0]):
"""
Return vec3 with a given default/fallback value.
"""
return_value = init_value
if default_value is None or len(default_value) < 3:
return return_value
index = 0
for number in default_value:
return_value[index] = number
index += 1
if index == 3:
return return_value
return return_value
def get_vec4(default_value, init_value = [0.0, 0.0, 0.0, 1.0]):
"""
Return vec4 with a given default/fallback value.
"""
return_value = init_value
if default_value is None or len(default_value) < 4:
return return_value
index = 0
for number in default_value:
return_value[index] = number
index += 1
if index == 4:
return return_value
return return_value
def get_index(elements, name):
"""
Return index of a glTF element by a given name.
"""
if elements is None or name is None:
return -1
index = 0
for element in elements:
if element.get('name') is None:
return -1
if element['name'] == name:
return index
index += 1
return -1
| 0 | 0 | 0 |
527d5a647225fe1451d9322370f1d08e468c34af | 696 | py | Python | Src/Web/config.py | 23233/sproxy | b40d6593e3a60ba089484c527de1bc4afc3a2d24 | [
"MIT"
] | 167 | 2019-07-05T13:25:39.000Z | 2020-04-25T11:31:19.000Z | Src/Web/config.py | 23233/sproxy | b40d6593e3a60ba089484c527de1bc4afc3a2d24 | [
"MIT"
] | 10 | 2018-12-11T06:07:37.000Z | 2019-07-03T11:14:36.000Z | Src/Web/config.py | 23233/sproxy | b40d6593e3a60ba089484c527de1bc4afc3a2d24 | [
"MIT"
] | 38 | 2019-07-04T07:50:08.000Z | 2020-04-17T21:05:46.000Z | # Create dummy secrey key so we can use sessions
SECRET_KEY = '1234567890'
# Flask-Security config
SECURITY_URL_PREFIX = "/admin"
SECURITY_PASSWORD_HASH = "pbkdf2_sha256"
SECURITY_PASSWORD_SALT = "ATGUOHAELKiubahiughaerGOJAEGj"
SECURITY_USER_IDENTITY_ATTRIBUTES = ["name"]
# Flask-Security URLs, overridden because they don't put a / at the end
SECURITY_LOGIN_URL = "/login/"
SECURITY_LOGOUT_URL = "/logout/"
SECURITY_REGISTER_URL = "/register/"
SECURITY_POST_LOGIN_VIEW = "/admin/"
SECURITY_POST_LOGOUT_VIEW = "/admin/"
SECURITY_POST_REGISTER_VIEW = "/admin/"
# Flask-Security features
SECURITY_REGISTERABLE = True
SECURITY_SEND_REGISTER_EMAIL = False
SQLALCHEMY_TRACK_MODIFICATIONS = False | 30.26087 | 71 | 0.804598 | # Create dummy secrey key so we can use sessions
SECRET_KEY = '1234567890'
# Flask-Security config
SECURITY_URL_PREFIX = "/admin"
SECURITY_PASSWORD_HASH = "pbkdf2_sha256"
SECURITY_PASSWORD_SALT = "ATGUOHAELKiubahiughaerGOJAEGj"
SECURITY_USER_IDENTITY_ATTRIBUTES = ["name"]
# Flask-Security URLs, overridden because they don't put a / at the end
SECURITY_LOGIN_URL = "/login/"
SECURITY_LOGOUT_URL = "/logout/"
SECURITY_REGISTER_URL = "/register/"
SECURITY_POST_LOGIN_VIEW = "/admin/"
SECURITY_POST_LOGOUT_VIEW = "/admin/"
SECURITY_POST_REGISTER_VIEW = "/admin/"
# Flask-Security features
SECURITY_REGISTERABLE = True
SECURITY_SEND_REGISTER_EMAIL = False
SQLALCHEMY_TRACK_MODIFICATIONS = False | 0 | 0 | 0 |
813d0a9cf8930b06ac9932bbfc5b272617c5c4ac | 3,785 | py | Python | homeassistant/components/mobile_app/websocket_api.py | jeanfpoulin/home-assistant | 04dbe5bc841e1a429873efbd850c35b823ef26ce | [
"Apache-2.0"
] | 3 | 2020-05-18T10:18:16.000Z | 2020-12-08T11:27:55.000Z | homeassistant/components/mobile_app/websocket_api.py | jeanfpoulin/home-assistant | 04dbe5bc841e1a429873efbd850c35b823ef26ce | [
"Apache-2.0"
] | 3 | 2021-02-08T20:54:46.000Z | 2021-09-08T02:30:04.000Z | homeassistant/components/mobile_app/websocket_api.py | jeanfpoulin/home-assistant | 04dbe5bc841e1a429873efbd850c35b823ef26ce | [
"Apache-2.0"
] | 6 | 2020-04-10T06:21:11.000Z | 2021-07-01T08:53:38.000Z | """Websocket API for mobile_app."""
import voluptuous as vol
from homeassistant.components.cloud import async_delete_cloudhook
from homeassistant.components.websocket_api import (
ActiveConnection,
async_register_command,
async_response,
error_message,
result_message,
websocket_command,
ws_require_user,
)
from homeassistant.components.websocket_api.const import (
ERR_INVALID_FORMAT,
ERR_NOT_FOUND,
ERR_UNAUTHORIZED,
)
from homeassistant.const import CONF_WEBHOOK_ID
from homeassistant.exceptions import HomeAssistantError
from homeassistant.helpers import config_validation as cv
from homeassistant.helpers.typing import HomeAssistantType
from .const import (
CONF_CLOUDHOOK_URL,
CONF_USER_ID,
DATA_CONFIG_ENTRIES,
DATA_DELETED_IDS,
DATA_STORE,
DOMAIN,
)
from .helpers import safe_registration, savable_state
def register_websocket_handlers(hass: HomeAssistantType) -> bool:
"""Register the websocket handlers."""
async_register_command(hass, websocket_get_user_registrations)
async_register_command(hass, websocket_delete_registration)
return True
@ws_require_user()
@async_response
@websocket_command(
{
vol.Required("type"): "mobile_app/get_user_registrations",
vol.Optional(CONF_USER_ID): cv.string,
}
)
async def websocket_get_user_registrations(
hass: HomeAssistantType, connection: ActiveConnection, msg: dict
) -> None:
"""Return all registrations or just registrations for given user ID."""
user_id = msg.get(CONF_USER_ID, connection.user.id)
if user_id != connection.user.id and not connection.user.is_admin:
# If user ID is provided and is not current user ID and current user
# isn't an admin user
connection.send_error(msg["id"], ERR_UNAUTHORIZED, "Unauthorized")
return
user_registrations = []
for config_entry in hass.config_entries.async_entries(domain=DOMAIN):
registration = config_entry.data
if connection.user.is_admin or registration[CONF_USER_ID] is user_id:
user_registrations.append(safe_registration(registration))
connection.send_message(result_message(msg["id"], user_registrations))
@ws_require_user()
@async_response
@websocket_command(
{
vol.Required("type"): "mobile_app/delete_registration",
vol.Required(CONF_WEBHOOK_ID): cv.string,
}
)
async def websocket_delete_registration(
hass: HomeAssistantType, connection: ActiveConnection, msg: dict
) -> None:
"""Delete the registration for the given webhook_id."""
user = connection.user
webhook_id = msg.get(CONF_WEBHOOK_ID)
if webhook_id is None:
connection.send_error(msg["id"], ERR_INVALID_FORMAT, "Webhook ID not provided")
return
config_entry = hass.data[DOMAIN][DATA_CONFIG_ENTRIES][webhook_id]
registration = config_entry.data
if registration is None:
connection.send_error(
msg["id"], ERR_NOT_FOUND, "Webhook ID not found in storage"
)
return
if registration[CONF_USER_ID] != user.id and not user.is_admin:
return error_message(
msg["id"], ERR_UNAUTHORIZED, "User is not registration owner"
)
await hass.config_entries.async_remove(config_entry.entry_id)
hass.data[DOMAIN][DATA_DELETED_IDS].append(webhook_id)
store = hass.data[DOMAIN][DATA_STORE]
try:
await store.async_save(savable_state(hass))
except HomeAssistantError:
return error_message(msg["id"], "internal_error", "Error deleting registration")
if CONF_CLOUDHOOK_URL in registration and "cloud" in hass.config.components:
await async_delete_cloudhook(hass, webhook_id)
connection.send_message(result_message(msg["id"], "ok"))
| 30.772358 | 88 | 0.732893 | """Websocket API for mobile_app."""
import voluptuous as vol
from homeassistant.components.cloud import async_delete_cloudhook
from homeassistant.components.websocket_api import (
ActiveConnection,
async_register_command,
async_response,
error_message,
result_message,
websocket_command,
ws_require_user,
)
from homeassistant.components.websocket_api.const import (
ERR_INVALID_FORMAT,
ERR_NOT_FOUND,
ERR_UNAUTHORIZED,
)
from homeassistant.const import CONF_WEBHOOK_ID
from homeassistant.exceptions import HomeAssistantError
from homeassistant.helpers import config_validation as cv
from homeassistant.helpers.typing import HomeAssistantType
from .const import (
CONF_CLOUDHOOK_URL,
CONF_USER_ID,
DATA_CONFIG_ENTRIES,
DATA_DELETED_IDS,
DATA_STORE,
DOMAIN,
)
from .helpers import safe_registration, savable_state
def register_websocket_handlers(hass: HomeAssistantType) -> bool:
"""Register the websocket handlers."""
async_register_command(hass, websocket_get_user_registrations)
async_register_command(hass, websocket_delete_registration)
return True
@ws_require_user()
@async_response
@websocket_command(
{
vol.Required("type"): "mobile_app/get_user_registrations",
vol.Optional(CONF_USER_ID): cv.string,
}
)
async def websocket_get_user_registrations(
hass: HomeAssistantType, connection: ActiveConnection, msg: dict
) -> None:
"""Return all registrations or just registrations for given user ID."""
user_id = msg.get(CONF_USER_ID, connection.user.id)
if user_id != connection.user.id and not connection.user.is_admin:
# If user ID is provided and is not current user ID and current user
# isn't an admin user
connection.send_error(msg["id"], ERR_UNAUTHORIZED, "Unauthorized")
return
user_registrations = []
for config_entry in hass.config_entries.async_entries(domain=DOMAIN):
registration = config_entry.data
if connection.user.is_admin or registration[CONF_USER_ID] is user_id:
user_registrations.append(safe_registration(registration))
connection.send_message(result_message(msg["id"], user_registrations))
@ws_require_user()
@async_response
@websocket_command(
{
vol.Required("type"): "mobile_app/delete_registration",
vol.Required(CONF_WEBHOOK_ID): cv.string,
}
)
async def websocket_delete_registration(
hass: HomeAssistantType, connection: ActiveConnection, msg: dict
) -> None:
"""Delete the registration for the given webhook_id."""
user = connection.user
webhook_id = msg.get(CONF_WEBHOOK_ID)
if webhook_id is None:
connection.send_error(msg["id"], ERR_INVALID_FORMAT, "Webhook ID not provided")
return
config_entry = hass.data[DOMAIN][DATA_CONFIG_ENTRIES][webhook_id]
registration = config_entry.data
if registration is None:
connection.send_error(
msg["id"], ERR_NOT_FOUND, "Webhook ID not found in storage"
)
return
if registration[CONF_USER_ID] != user.id and not user.is_admin:
return error_message(
msg["id"], ERR_UNAUTHORIZED, "User is not registration owner"
)
await hass.config_entries.async_remove(config_entry.entry_id)
hass.data[DOMAIN][DATA_DELETED_IDS].append(webhook_id)
store = hass.data[DOMAIN][DATA_STORE]
try:
await store.async_save(savable_state(hass))
except HomeAssistantError:
return error_message(msg["id"], "internal_error", "Error deleting registration")
if CONF_CLOUDHOOK_URL in registration and "cloud" in hass.config.components:
await async_delete_cloudhook(hass, webhook_id)
connection.send_message(result_message(msg["id"], "ok"))
| 0 | 0 | 0 |
fba6df107748d61010973df91014413655aef5fb | 4,000 | py | Python | test_locks.py | zbentley/amqp-locks-python | 7da8e941fff752bde1f1fd56caa51810fdac7577 | [
"MIT"
] | null | null | null | test_locks.py | zbentley/amqp-locks-python | 7da8e941fff752bde1f1fd56caa51810fdac7577 | [
"MIT"
] | null | null | null | test_locks.py | zbentley/amqp-locks-python | 7da8e941fff752bde1f1fd56caa51810fdac7577 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import pika
import time
import sys
import argparse
sys.path.append("lib")
from rabbitlock.mutex import Mutex
from rabbitlock.semaphore import Semaphore
# http://www.huyng.com/posts/python-performance-analysis/
parse_and_dispatch(sys.argv[1:])
| 29.411765 | 101 | 0.62375 | #!/usr/bin/env python3
import pika
import time
import sys
import argparse
sys.path.append("lib")
from rabbitlock.mutex import Mutex
from rabbitlock.semaphore import Semaphore
def positive_float(value):
retval = float(value)
if retval < 0:
raise argparse.ArgumentTypeError("%s not a positive float" % value)
return retval
# http://www.huyng.com/posts/python-performance-analysis/
class Timer(object):
def __init__(self, verbose=True):
self.verbose = verbose
def __enter__(self):
self.start = time.time()
return self
def __exit__(self, *args):
self.end = time.time()
self.secs = self.end - self.start
self.msecs = self.secs * 1000 # millisecs
if self.verbose:
print('elapsed time: %f ms' % self.msecs)
def get_connection_parameters():
return pika.ConnectionParameters("localhost", 5672, "/", pika.PlainCredentials("guest", "guest"))
def true_mutex_operations(args):
print(args)
lock = Mutex("foo", get_connection_parameters())
while True:
with Timer(args.sleep):
acquired = lock.ensure_acquired()
if acquired:
print("Got lock")
else:
print("Lost lock")
time.sleep(args.sleep)
if not args.loop:
break
def get_held_semaphore(greedy, verbose=False):
sems = [Semaphore("foo", get_connection_parameters())]
with Timer(verbose):
acquired = sems[0].ensure_semaphore_held()
if acquired:
if verbose:
print("Got lock: %d" % acquired)
else:
return []
if greedy and acquired:
success = get_held_semaphore(greedy, verbose)
if success:
sems.extend(success)
return sems
def semaphore_operations(args):
lock = Semaphore("foo", get_connection_parameters())
if args.destroy:
with Timer(args.verbose):
lock.ensure_semaphore_destroyed()
elif args.acquire:
sems = []
while True:
sems.extend(get_held_semaphore(args.greedy, args.verbose))
for sem in sems:
with Timer(args.verbose):
num = sem.ensure_semaphore_held()
if num:
if args.loop:
print("Held: %d" % num)
else:
sems.remove(sem)
if args.verbose:
print("")
if not args.loop:
if not sems:
print("Could not get lock")
break
time.sleep(args.sleep)
elif args.change:
if lock.adjust_semaphore(args.change):
print("Adjustment success")
else:
print("Adjustment failure")
def parse_and_dispatch(args):
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers()
parser.add_argument("--sleep", type=positive_float, default=0.5)
parser.add_argument("--verbose", action="store_true")
semparser = subparsers.add_parser("semaphore")
semparser.set_defaults(func=semaphore_operations)
semparser.add_argument("--greedy", action="store_true")
semparser.add_argument("--loop", action="store_true")
semops = semparser.add_mutually_exclusive_group()
semops.add_argument("--change", type=int)
semops.add_argument("--acquire", action="store_true")
semops.add_argument("--destroy", action="store_true")
truemutexparser = subparsers.add_parser("true_mutex")
truemutexparser.set_defaults(func=true_mutex_operations)
truemutexparser.add_argument("--acquire", action="store_true", default=True)
truemutexparser.add_argument("--loop", action="store_true")
result = parser.parse_args(args)
if hasattr(result, "func"):
# TODO if semaphore, fail if greedy or loop are specified in anything other but acquire mode.
result.func(result)
else:
parser.error("No lock mode specified")
parse_and_dispatch(sys.argv[1:])
| 3,483 | -1 | 240 |
5b3f69f42eae7163ac1ea43116b8a85cdb9c27b7 | 1,068 | py | Python | django_sourcebook/sourcebook/migrations/0009_auto_20200101_2140.py | maxblee/django_sourcebook | f90ca62cfe43c875a485f783ca1a06be40d9bbc5 | [
"MIT"
] | null | null | null | django_sourcebook/sourcebook/migrations/0009_auto_20200101_2140.py | maxblee/django_sourcebook | f90ca62cfe43c875a485f783ca1a06be40d9bbc5 | [
"MIT"
] | null | null | null | django_sourcebook/sourcebook/migrations/0009_auto_20200101_2140.py | maxblee/django_sourcebook | f90ca62cfe43c875a485f783ca1a06be40d9bbc5 | [
"MIT"
] | null | null | null | # Generated by Django 3.0.1 on 2020-01-02 02:40
import datetime
from django.db import migrations, models
| 29.666667 | 78 | 0.495318 | # Generated by Django 3.0.1 on 2020-01-02 02:40
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("sourcebook", "0008_foiarequest_time_completed"),
]
operations = [
migrations.AddField(
model_name="story",
name="publication_date",
field=models.DateField(default=datetime.date(2020, 1, 2)),
),
migrations.AlterField(
model_name="source",
name="source_type",
field=models.CharField(
choices=[
("db", "Database administrator"),
("rp", "Person affected (anecdotal source)"),
("e", "Expert"),
("pr", "Spokesperson, PR-Rep"),
("f", "Public Records officer"),
("o", "Public/Company official"),
("i", "Company or business employee (current or former)"),
],
max_length=2,
),
),
]
| 0 | 938 | 23 |
0655f1c66e00a9e31b9d920ea6a88703dd4f5793 | 3,688 | py | Python | src/exp_num_query.py | moseslichten/TAFSSL | b236dee4936fe933293ebdd65e48a332a3aacdea | [
"Apache-2.0"
] | 10 | 2020-09-26T07:07:52.000Z | 2021-10-11T12:11:33.000Z | src/exp_num_query.py | moseslichten/TAFSSL | b236dee4936fe933293ebdd65e48a332a3aacdea | [
"Apache-2.0"
] | 1 | 2021-04-16T14:36:28.000Z | 2021-04-16T14:36:28.000Z | src/exp_num_query.py | moseslichten/TAFSSL | b236dee4936fe933293ebdd65e48a332a3aacdea | [
"Apache-2.0"
] | 2 | 2020-09-26T07:00:21.000Z | 2021-08-14T14:12:50.000Z | # --------------------------------------------------------
# TAFSSL
# Copyright (c) 2019 IBM Corp
# Licensed under The Apache-2.0 License [see LICENSE for details]
# --------------------------------------------------------
import numpy as np
from utils.proto_msp import ProtoMSP
import time
import pickle
from utils.misc import print_params
from utils.misc import load_features
from utils.misc import print_msg
from utils.misc import avg, ci_95, parse_args
from utils.misc import create_episode, calc_acc
from utils.misc import get_color
from utils.misc import get_features
if __name__ == '__main__':
get_features()
n_query_exp()
n_query_exp_fig()
| 32.637168 | 116 | 0.608731 | # --------------------------------------------------------
# TAFSSL
# Copyright (c) 2019 IBM Corp
# Licensed under The Apache-2.0 License [see LICENSE for details]
# --------------------------------------------------------
import numpy as np
from utils.proto_msp import ProtoMSP
import time
import pickle
from utils.misc import print_params
from utils.misc import load_features
from utils.misc import print_msg
from utils.misc import avg, ci_95, parse_args
from utils.misc import create_episode, calc_acc
from utils.misc import get_color
from utils.misc import get_features
def run_episode(train_mean, cl_data_file, model, n_way=5, n_support=5, n_query=15):
z_all, y = create_episode(cl_data_file, n_way, n_support, n_query)
model.train_mean = train_mean
model.opt.reduced_dim = 4
scores = [model.method_sub(z_all, model.sub_train_mean),
model.method_sub(z_all, model.mean_and_norm_ber),
model.method_project(z_all, model.mean_and_norm_ber, model.calc_ica),
]
model.opt.reduced_dim = 10
scores += [
model.method_proj_and_cluster(z_all, model.mean_and_norm_ber, model.calc_ica),
]
scores += [
model.method_project_and_mean_shift(z_all, model.mean_and_norm_ber, model.calc_ica),
]
return calc_acc(scores, y)
def run_exp(params, verbose):
print_params(params)
n_episodes = 10000
few_shot_params = dict(n_way=params.n_way, n_support=params.n_shot)
model = ProtoMSP(opt=params)
model = model.cuda()
train_mean, cl_data_file = load_features(params)
acc_list = []
start_time = time.perf_counter()
for i in range(1, n_episodes + 1):
acc = run_episode(train_mean, cl_data_file, model, n_query=params.n_query, **few_shot_params)
acc_list += acc
if i % verbose == 0:
print_msg(i, n_episodes, start_time, acc_list, acc)
res = [avg(acc_list[ind::len(acc)]) for ind in range(len(acc))]
ci = [ci_95(acc_list[ind::len(acc)]) for ind in range(len(acc))]
return res, ci
def n_query_exp():
params = parse_args('test')
for ds in ['mini', 'tiered']:
params.dataset = ds
exp_dict = {}
for q in [2, 5, 10, 15, 30, 40, 50]:
params.n_query = q
res, ci = run_exp(params, verbose=1000)
exp_dict[q] = (res, ci)
with open(f'exp_n_queries_{ds}.pickle', 'wb') as handle:
pickle.dump(exp_dict, handle, protocol=pickle.HIGHEST_PROTOCOL)
def n_query_exp_fig():
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
for ds in ['mini', 'tiered']:
with open(f'exp_n_queries_{ds}.pickle', 'rb') as handle:
exp_dict = pickle.load(handle)
x = np.array(list(exp_dict.keys()))
values = np.array(list(exp_dict.values()))
y = values[:, 0, :]
ax = plt.subplot(111)
exp_name = ['simple', 'sub', 'ica', 'ica & bkm', 'ica & msp']
y_lim = None
for n in [*range(y.shape[1])]:
# plt.plot(x, y[:, n].reshape(-1, ), label=exp_name[n])
plt.plot(x[1:], y[:, n].reshape(-1, )[1:], label=exp_name[n], color=get_color(exp_name[n]), linewidth=2)
if exp_name[n] == 'simple':
y_lim = np.min(y[:, n])
ax.legend(loc='lower center', fancybox=True, shadow=True, ncol=3)
plt.ylim(bottom=y_lim - 3)
plt.grid()
fig = plt.figure(1)
plt.xlabel("queries")
plt.ylabel("accuracy")
plt.savefig(f"num-query-exp-{ds}.png")
plt.close(fig)
if __name__ == '__main__':
get_features()
n_query_exp()
n_query_exp_fig()
| 2,929 | 0 | 92 |
0276265483623e630c7767433a7cadbaf7c7e997 | 25 | py | Python | src/scripts/__init__.py | arcosin/ANP_TrackDriver | d1705bb77959cb3653490e126a40f8086ba084bd | [
"MIT"
] | 4 | 2019-05-22T00:17:28.000Z | 2020-08-26T02:03:33.000Z | preprocess/__init__.py | ryanwongsa/open-images-2019-challenge | b49e0933451c4bf9b31a9a8faf1bd8ba3dee1cc5 | [
"Apache-2.0"
] | 3 | 2019-07-14T09:17:11.000Z | 2022-01-13T01:15:10.000Z | preprocess/__init__.py | ryanwongsa/open-images-2019-challenge | b49e0933451c4bf9b31a9a8faf1bd8ba3dee1cc5 | [
"Apache-2.0"
] | 5 | 2021-03-24T01:02:06.000Z | 2021-05-21T16:57:05.000Z | from .preprocess import * | 25 | 25 | 0.8 | from .preprocess import * | 0 | 0 | 0 |
5e9233f492e89e51840f8454bc6abde3f18cbc0a | 3,259 | py | Python | qcommunity/optimization/obj.py | rsln-s/QCommunity | 926c64efc7add34dae5fbab20359ef5b9b4aa420 | [
"Apache-2.0"
] | 2 | 2019-09-05T23:21:49.000Z | 2020-03-10T13:54:32.000Z | qcommunity/optimization/obj.py | rsln-s/QCommunity | 926c64efc7add34dae5fbab20359ef5b9b4aa420 | [
"Apache-2.0"
] | null | null | null | qcommunity/optimization/obj.py | rsln-s/QCommunity | 926c64efc7add34dae5fbab20359ef5b9b4aa420 | [
"Apache-2.0"
] | 1 | 2020-03-10T13:54:34.000Z | 2020-03-10T13:54:34.000Z | #!/usr/bin/env python
# Returns obj_val function to be used in an optimizer
# A better and updated version of qaoa_obj.py
import networkx as nx
import numpy as np
# import matplotlib.pyplot as plt
from networkx.generators.classic import barbell_graph
import copy
import sys
import warnings
import qcommunity.modularity.graphs as gm
from qcommunity.utils.import_graph import generate_graph
from ibmqxbackend.ansatz import IBMQXVarForm
def get_obj(n_nodes,
B,
C=None,
obj_params='ndarray',
sign=1,
backend='IBMQX',
backend_params={'depth': 3},
return_x=False):
"""
:param obj_params: defines the signature of obj_val function. 'beta gamma' or 'ndarray' (added to support arbitrary number of steps and scipy.optimize.minimize.)
:return: obj_val function, number of variational parameters
:rtype: tuple
"""
if return_x:
all_x = []
all_vals = []
# TODO refactor, remove code duplication
if backend == 'IBMQX':
var_form = IBMQXVarForm(
num_qubits=n_nodes, depth=backend_params['depth'])
num_parameters = var_form.num_parameters
if obj_params == 'ndarray':
else:
raise ValueError(
"obj_params '{}' not compatible with backend '{}'".format(
obj_params, backend))
else:
raise ValueError("Unsupported backend: {}".format(backend))
if return_x:
return obj_val, num_parameters, all_x, all_vals
else:
return obj_val, num_parameters
if __name__ == "__main__":
x = np.array([2.1578616206475347, 0.1903995547630178])
obj_val, _ = get_obj_val("get_barbell_graph", 3, 3)
print(obj_val(x[0], x[1]))
obj_val, num_parameters = get_obj_val(
"get_barbell_graph", 3, 3, obj_params='ndarray', backend='IBMQX')
y = np.random.uniform(-np.pi, np.pi, num_parameters)
print(obj_val(y))
obj_val, num_parameters = get_obj_val(
"get_barbell_graph", 3, 3, obj_params='ndarray')
z = np.random.uniform(-np.pi, np.pi, num_parameters)
print(obj_val(z))
| 32.267327 | 166 | 0.59865 | #!/usr/bin/env python
# Returns obj_val function to be used in an optimizer
# A better and updated version of qaoa_obj.py
import networkx as nx
import numpy as np
# import matplotlib.pyplot as plt
from networkx.generators.classic import barbell_graph
import copy
import sys
import warnings
import qcommunity.modularity.graphs as gm
from qcommunity.utils.import_graph import generate_graph
from ibmqxbackend.ansatz import IBMQXVarForm
def get_obj(n_nodes,
B,
C=None,
obj_params='ndarray',
sign=1,
backend='IBMQX',
backend_params={'depth': 3},
return_x=False):
"""
:param obj_params: defines the signature of obj_val function. 'beta gamma' or 'ndarray' (added to support arbitrary number of steps and scipy.optimize.minimize.)
:return: obj_val function, number of variational parameters
:rtype: tuple
"""
if return_x:
all_x = []
all_vals = []
# TODO refactor, remove code duplication
if backend == 'IBMQX':
var_form = IBMQXVarForm(
num_qubits=n_nodes, depth=backend_params['depth'])
num_parameters = var_form.num_parameters
if obj_params == 'ndarray':
def obj_val(x):
resstrs = var_form.run(x)
modularities = [
gm.compute_modularity(n_nodes, B, x, C=C) for x in resstrs
]
y = np.mean(modularities)
if return_x:
all_x.append(copy.deepcopy(x))
all_vals.append({'max': max(modularities), 'mean': y})
print("Actual modularity (to be maximized): {}".format(y))
return sign * y
else:
raise ValueError(
"obj_params '{}' not compatible with backend '{}'".format(
obj_params, backend))
else:
raise ValueError("Unsupported backend: {}".format(backend))
if return_x:
return obj_val, num_parameters, all_x, all_vals
else:
return obj_val, num_parameters
def get_obj_val(graph_generator_name,
left,
right,
seed=None,
obj_params='ndarray',
sign=1,
backend='IBMQX',
backend_params={'depth': 3},
return_x=False):
# Generate the graph
G, _ = generate_graph(graph_generator_name, left, right, seed=seed)
B = nx.modularity_matrix(G).A
return get_obj(
G.number_of_nodes(),
B,
obj_params=obj_params,
sign=sign,
backend=backend,
backend_params=backend_params,
return_x=return_x)
if __name__ == "__main__":
x = np.array([2.1578616206475347, 0.1903995547630178])
obj_val, _ = get_obj_val("get_barbell_graph", 3, 3)
print(obj_val(x[0], x[1]))
obj_val, num_parameters = get_obj_val(
"get_barbell_graph", 3, 3, obj_params='ndarray', backend='IBMQX')
y = np.random.uniform(-np.pi, np.pi, num_parameters)
print(obj_val(y))
obj_val, num_parameters = get_obj_val(
"get_barbell_graph", 3, 3, obj_params='ndarray')
z = np.random.uniform(-np.pi, np.pi, num_parameters)
print(obj_val(z))
| 1,063 | 0 | 58 |
5d32eec903f269edde168438b4d81ba0b9cc8d71 | 1,274 | py | Python | tests/test_write.py | reshalfahsi/GGBColorSpace | f56994ffcd6a83762d67705116e690c7a64c9093 | [
"MIT"
] | 1 | 2020-12-06T07:38:08.000Z | 2020-12-06T07:38:08.000Z | tests/test_write.py | reshalfahsi/GGBColorSpace | f56994ffcd6a83762d67705116e690c7a64c9093 | [
"MIT"
] | 1 | 2021-06-19T02:20:02.000Z | 2021-06-19T02:20:02.000Z | tests/test_write.py | reshalfahsi/GGB | f56994ffcd6a83762d67705116e690c7a64c9093 | [
"MIT"
] | 1 | 2021-05-10T04:14:24.000Z | 2021-05-10T04:14:24.000Z | import cv2
from PIL import Image
import numpy as np
import random
import pytest
from ggb import GGB, CVLib
from ggb.testing import ggb_test
from ggb.testing import get_random_image, get_filled_image
@ggb_test
@ggb_test
if __name__ == '__main__':
pytest.main([__file__])
| 25.48 | 58 | 0.689168 | import cv2
from PIL import Image
import numpy as np
import random
import pytest
from ggb import GGB, CVLib
from ggb.testing import ggb_test
from ggb.testing import get_random_image, get_filled_image
@ggb_test
def test_opencv_write():
w = random.randint(16, 2048)
h = random.randint(16, 2048)
image = get_random_image(w, h, 3, CVLib.OPENCV)
ggb_image = GGB(image=image).process()
assert(isinstance(ggb_image.write(), np.ndarray))
w = random.randint(16, 2048)
h = random.randint(16, 2048)
value = random.randint(0, 255)
image = get_filled_image(w, h, 3, value, CVLib.OPENCV)
ggb_image = GGB(image=image).process()
assert(isinstance(ggb_image.write(), np.ndarray))
@ggb_test
def test_pil_write():
w = random.randint(16, 2048)
h = random.randint(16, 2048)
image = get_random_image(w, h, 3, CVLib.PIL)
ggb_image = GGB(image=image).process()
assert(isinstance(ggb_image.write(), Image.Image))
w = random.randint(16, 2048)
h = random.randint(16, 2048)
value = random.randint(0, 255)
image = get_filled_image(w, h, 3, value, CVLib.PIL)
ggb_image = GGB(image=image).process()
assert(isinstance(ggb_image.write(), Image.Image))
if __name__ == '__main__':
pytest.main([__file__])
| 949 | 0 | 44 |
092bdfd9880f8cd15ba0d55680a5b605d55352b5 | 9,518 | py | Python | tests/unit/test_marc_writer.py | BookOps-CAT/NightShift | dc106b55cee3ec60c790f71f20e0754506385d59 | [
"MIT"
] | null | null | null | tests/unit/test_marc_writer.py | BookOps-CAT/NightShift | dc106b55cee3ec60c790f71f20e0754506385d59 | [
"MIT"
] | 17 | 2020-12-07T04:43:48.000Z | 2022-03-30T12:44:41.000Z | tests/unit/test_marc_writer.py | BookOps-CAT/NightShift | dc106b55cee3ec60c790f71f20e0754506385d59 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Tests `marc.marc_writer.py` module
"""
from contextlib import nullcontext as does_not_raise
import logging
import os
import pickle
from pymarc import Field, MARCReader, Record
import pytest
from nightshift import __title__, __version__
from nightshift.datastore import Resource
from nightshift.marc.marc_writer import BibEnhancer
| 35.251852 | 109 | 0.560937 | # -*- coding: utf-8 -*-
"""
Tests `marc.marc_writer.py` module
"""
from contextlib import nullcontext as does_not_raise
import logging
import os
import pickle
from pymarc import Field, MARCReader, Record
import pytest
from nightshift import __title__, __version__
from nightshift.datastore import Resource
from nightshift.marc.marc_writer import BibEnhancer
class TestBibEnhancer:
def test_init(self, caplog, stub_resource):
with does_not_raise():
with caplog.at_level(logging.INFO):
result = BibEnhancer(stub_resource)
assert result.library == "NYP"
assert isinstance(result.resource, Resource)
assert isinstance(result.bib, Record)
assert "Enhancing NYP Sierra bib # b11111111a." in caplog.text
def test_missing_full_bib(self, stub_resource):
stub_resource.fullBib = None
with pytest.raises(TypeError):
BibEnhancer(stub_resource)
@pytest.mark.parametrize(
"resourceId,libraryId,tag, expectation",
[
(1, 1, "091", "eNYPL Book"),
(2, 1, "091", "eNYPL Audio"),
(3, 1, "091", "eNYPL Video"),
(1, 2, "099", "eBOOK"),
(2, 2, "099", "eAUDIO"),
(3, 2, "099", "eVIDEO"),
],
)
def test_add_call_number_supported_resource_categories(
self, caplog, stub_resource, resourceId, libraryId, tag, expectation
):
stub_resource.resourceCategoryId = resourceId
stub_resource.libraryId = libraryId
if libraryId == 1:
library = "NYP"
elif libraryId == 2:
library = "BPL"
be = BibEnhancer(stub_resource)
with caplog.at_level(logging.DEBUG):
be._add_call_number()
assert f"Added {expectation} to {library} b11111111a." in caplog.text
bib = be.bib
assert str(bib[tag]) == f"={tag} \\\\$a{expectation}"
@pytest.mark.parametrize("library,resourceId", [("QPL", 1), ("NYP", 4), ("BPL", 4)])
def test_add_call_number_unsupported_resources(
self, caplog, stub_resource, library, resourceId
):
stub_resource.resourceCategoryId = resourceId
be = BibEnhancer(stub_resource)
be.library = library
with caplog.at_level(logging.WARN):
be._add_call_number()
assert "091" not in be.bib
assert "099" not in be.bib
assert (
f"Attempting to create a call number for unsupported resource category for {library} b11111111a."
in caplog.text
)
@pytest.mark.parametrize(
"resourceId,suppressed,libraryId,expectation",
[
pytest.param(1, False, 1, "*ov=b11111111a;b2=z;", id="nyp-ebook"),
pytest.param(1, False, 2, "*ov=b11111111a;b2=x;", id="bpl-ebook"),
pytest.param(1, True, 1, "*ov=b11111111a;b2=z;b3=n;", id="nyp-ebook-supp"),
pytest.param(1, True, 2, "*ov=b11111111a;b2=x;b3=n;", id="bpl-ebook-supp"),
pytest.param(2, False, 1, "*ov=b11111111a;b2=n;", id="nyp-eaudio"),
pytest.param(2, False, 2, "*ov=b11111111a;b2=z;", id="bpl-eaudio"),
pytest.param(3, True, 1, "*ov=b11111111a;b2=3;b3=n;", id="nyp-evideo-supp"),
pytest.param(3, True, 2, "*ov=b11111111a;b2=v;b3=n;", id="bpl-evideo-supp"),
pytest.param(4, False, 1, "*ov=b11111111a;b2=a;", id="nyp-print"),
pytest.param(4, False, 2, "*ov=b11111111a;b2=a;", id="bpl-print"),
],
)
def test_add_command_tag(
self, caplog, resourceId, suppressed, libraryId, expectation, stub_resource
):
stub_resource.resourceCategoryId = resourceId
stub_resource.suppressed = suppressed
stub_resource.libraryId = libraryId
if libraryId == 1:
library = "NYP"
elif libraryId == 2:
library = "BPL"
be = BibEnhancer(stub_resource)
with caplog.at_level(logging.DEBUG):
be._add_command_tag()
assert (
f"Added 949 command tag: {expectation} to {library} b11111111a."
in caplog.text
)
bib = be.bib
assert str(bib["949"]) == f"=949 \\\\$a{expectation}"
def test_add_local_tags(self, caplog, stub_resource):
fields = [
Field(tag="020", indicators=[" ", " "], subfields=["a", "978123456789x"]),
Field(
tag="037",
indicators=[" ", " "],
subfields=["a", "123", "b", "Overdrive Inc."],
),
Field(
tag="856",
indicators=["0", "4"],
subfields=["u", "url_here", "2", "opac msg"],
),
]
pickled_fields = pickle.dumps(fields)
stub_resource.srcFieldsToKeep = pickled_fields
be = BibEnhancer(stub_resource)
with caplog.at_level(logging.DEBUG):
be._add_local_tags()
assert (
"Added following local fields ['020', '037', '856'] to NYP b11111111a."
in caplog.text
)
bib = be.bib
assert str(bib["020"]) == "=020 \\\\$a978123456789x"
assert str(bib["037"]) == "=037 \\\\$a123$bOverdrive Inc."
assert str(bib["856"]) == "=856 04$uurl_here$2opac msg"
def test_add_local_tags_missing_tags(self, caplog, stub_resource):
be = BibEnhancer(stub_resource)
with caplog.at_level(logging.DEBUG):
be._add_local_tags()
assert "No local tags to keep were found for NYP b11111111a." in caplog.text
@pytest.mark.parametrize(
"library,tag",
[
("NYP", "901"),
("BPL", "947"),
],
)
def test_add_initials_tag(self, caplog, library, tag, stub_resource):
be = BibEnhancer(stub_resource)
be.library = library
with caplog.at_level(logging.DEBUG):
be._add_initials_tag()
assert f"Added initials tag {tag} to {library} b11111111a." in caplog.text
assert str(be.bib[tag]) == f"={tag} \\\\$a{__title__}/{__version__}"
def test_add_initials_tag_invalid_library(self, stub_resource):
be = BibEnhancer(stub_resource)
bib_before = str(be.bib)
be.library = "foo"
be._add_initials_tag()
assert str(be.bib) == bib_before
def test_purge_tags(self, caplog, stub_resource):
be = BibEnhancer(stub_resource)
fields = [
Field(tag="020", indicators=[" ", " "], subfields=["a", "978123456789x"]),
Field(
tag="037",
indicators=[" ", " "],
subfields=["a", "123", "b", "Overdrive Inc."],
),
Field(
tag="856",
indicators=["0", "4"],
subfields=["u", "url_here", "2", "opac msg"],
),
]
for field in fields:
be.bib.add_field(field)
# make sure added tags are present
for field in fields:
assert field.tag in be.bib
with caplog.at_level(logging.DEBUG):
be._purge_tags()
assert (
"Removed ['020', '029', '037', '090', '856', '910', '938'] from NYP b11111111a."
in caplog.text
)
# test if they were removed
for field in fields:
assert field.tag not in be.bib
def test_purge_tags_non_existent(self, stub_resource):
be = BibEnhancer(stub_resource)
with does_not_raise():
be._purge_tags()
def test_manipulate(self, stub_resource):
stub_resource.resourceCategoryId = 1
stub_resource.libraryId = 1
fields = [
Field(tag="020", indicators=[" ", " "], subfields=["a", "978123456789x"]),
Field(
tag="037",
indicators=[" ", " "],
subfields=["a", "123", "b", "Overdrive Inc."],
),
Field(
tag="856",
indicators=["0", "4"],
subfields=["u", "url_here", "2", "opac msg"],
),
]
pickled_fields = pickle.dumps(fields)
stub_resource.srcFieldsToKeep = pickled_fields
be = BibEnhancer(stub_resource)
with does_not_raise():
be.manipulate()
assert str(be.bib["020"]) == "=020 \\\\$a978123456789x"
assert str(be.bib["037"]) == "=037 \\\\$a123$bOverdrive Inc."
assert str(be.bib["856"]) == "=856 04$uurl_here$2opac msg"
assert str(be.bib["091"]) == "=091 \\\\$aeNYPL Book"
assert str(be.bib["901"]) == f"=901 \\\\$a{__title__}/{__version__}"
assert str(be.bib["949"]) == "=949 \\\\$a*ov=b11111111a;b2=z;"
def test_save2file(self, caplog, stub_resource):
be = BibEnhancer(stub_resource)
with caplog.at_level(logging.DEBUG):
be.save2file()
assert "Saving to file NYP record b11111111a." in caplog.text
assert os.path.exists("temp.mrc")
with open("temp.mrc", "rb") as f:
reader = MARCReader(f)
bib = next(reader)
assert isinstance(bib, Record)
# cleanup
os.remove("temp.mrc")
def test_save2file_os_error(self, caplog, stub_resource, mock_os_error):
be = BibEnhancer(stub_resource)
with caplog.at_level(logging.ERROR):
with pytest.raises(OSError):
be.save2file()
assert "Unable to save record to a temp file. Error" in caplog.text
| 7,247 | 1,886 | 23 |
db40eccb4e3a9805eb8362b1213257f77b448646 | 590 | py | Python | schedule/migrations/0014_event_status.py | itmagistr/django-scheduler | 14d403b581d6ec5c195f7cd4f1567f6f636d1efb | [
"BSD-3-Clause"
] | null | null | null | schedule/migrations/0014_event_status.py | itmagistr/django-scheduler | 14d403b581d6ec5c195f7cd4f1567f6f636d1efb | [
"BSD-3-Clause"
] | null | null | null | schedule/migrations/0014_event_status.py | itmagistr/django-scheduler | 14d403b581d6ec5c195f7cd4f1567f6f636d1efb | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.11.13 on 2018-06-02 18:17
from __future__ import unicode_literals
from django.db import migrations, models
| 28.095238 | 190 | 0.640678 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.13 on 2018-06-02 18:17
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('schedule', '0013_auto_20180424_1145'),
]
operations = [
migrations.AddField(
model_name='event',
name='status',
field=models.IntegerField(choices=[(0, 'Open'), (1, 'Closed')], default=0, help_text='The Status codes behavior should be defined into application.', verbose_name='Status code'),
),
]
| 0 | 410 | 23 |
5f18738c3a2c1e5ce066a636d50938a7b409ce4a | 939 | py | Python | test/wien2k/w2kutils_test.py | minyez/mykit | 911413120c081be2cfcaef06d62dc40b2abd2747 | [
"MIT"
] | 4 | 2019-01-02T09:17:54.000Z | 2019-12-26T07:15:59.000Z | test/wien2k/w2kutils_test.py | minyez/mykit | 911413120c081be2cfcaef06d62dc40b2abd2747 | [
"MIT"
] | 6 | 2019-03-06T03:16:12.000Z | 2019-03-14T14:36:01.000Z | test/wien2k/w2kutils_test.py | minyez/mykit | 911413120c081be2cfcaef06d62dc40b2abd2747 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# coding = utf-8
import os
import unittest as ut
from mykit.wien2k.utils import get_casename, find_complex_file, get_default_r0, get_default_rmt, get_z
if __name__ == '__main__':
ut.main() | 27.617647 | 102 | 0.683706 | #!/usr/bin/env python3
# coding = utf-8
import os
import unittest as ut
from mykit.wien2k.utils import get_casename, find_complex_file, get_default_r0, get_default_rmt, get_z
class test_get_elements_info(ut.TestCase):
def test_r0(self):
self.assertEqual(get_default_r0('Ne1'), get_default_r0('Ne10'))
def test_rmt(self):
self.assertEqual(get_default_rmt('Mg2'), get_default_rmt('Mg'))
def test_z(self):
self.assertEqual(get_z('X'), 0.0)
self.assertEqual(get_z('Pb'), 82.0)
self.assertEqual(get_z('Cl'), 17.0)
self.assertEqual(get_z('Cu'), 29.0)
self.assertEqual(get_z('Ag'), 47.0)
class test_search_file(ut.TestCase):
def test_find_complex_file(self):
self.assertRaises(FileNotFoundError, find_complex_file, 'fake_case', 'in')
self.assertEqual(get_casename(os.path.dirname(__file__)), 'wien2k')
if __name__ == '__main__':
ut.main() | 525 | 36 | 158 |
646ad04a6e28b43086a847390064d260e25ae021 | 849 | py | Python | tests/test_nbp_api.py | woj-i/degiro-p | d6905b240fa558ba4affe835da7274272fe38dcf | [
"MIT"
] | 5 | 2021-02-21T08:16:15.000Z | 2021-05-26T14:14:10.000Z | tests/test_nbp_api.py | woj-i/degiro-pit | d6905b240fa558ba4affe835da7274272fe38dcf | [
"MIT"
] | null | null | null | tests/test_nbp_api.py | woj-i/degiro-pit | d6905b240fa558ba4affe835da7274272fe38dcf | [
"MIT"
] | null | null | null | from unittest import TestCase
from degiro_pit.config import Currency
from degiro_pit.nbp_api import NbpApi
| 28.3 | 61 | 0.664311 | from unittest import TestCase
from degiro_pit.config import Currency
from degiro_pit.nbp_api import NbpApi
class TestNbpApi(TestCase):
test_date = "2021-01-22"
def test_get_eur_pln(self):
nbp = NbpApi()
res = nbp.get_pln(TestNbpApi.test_date, Currency.EUR)
self.assertEqual(4.5354, res)
def test_get_usd_pln(self):
nbp = NbpApi()
res = nbp.get_pln(TestNbpApi.test_date, Currency.USD)
self.assertEqual(3.7255, res)
def test_all_currencies_are_supported(self):
nbp = NbpApi()
for currency in Currency:
res = nbp.get_pln(TestNbpApi.test_date, currency)
self.assertIsInstance(res, float)
def test_get_none_for_holiday_date(self):
nbp = NbpApi()
res = nbp.get_pln("2020-11-11", Currency.EUR)
self.assertIsNone(res)
| 574 | 143 | 23 |
6e87c4e4158b3902ea8dc3803de751b0eaa24e67 | 11,586 | py | Python | annotate/annotation/tasks.py | dojo-modeling/dojo | 8abc71790cbb8639a56c89791535b3df59c725ac | [
"MIT"
] | 3 | 2022-01-28T01:43:30.000Z | 2022-02-24T21:42:13.000Z | annotate/annotation/tasks.py | dojo-modeling/dojo | 8abc71790cbb8639a56c89791535b3df59c725ac | [
"MIT"
] | 8 | 2022-01-12T16:39:57.000Z | 2022-01-31T19:05:45.000Z | annotate/annotation/tasks.py | dojo-modeling/dojo | 8abc71790cbb8639a56c89791535b3df59c725ac | [
"MIT"
] | null | null | null | import copy
import json
import logging
import os
import time
from .rename import rename as rename_function
from django.apps import apps
from django.conf import settings
from django_rq import job
from mixmasta import mixmasta as mix
from utils.cache_helper import cache_get
# Load GADM3 from gadm app
if settings.CACHE_GADM:
gadm3 = apps.get_app_config("gadm").gadm3()
gadm2 = apps.get_app_config("gadm").gadm2()
else:
gadm3 = None
gadm2 = None
def dupe(annotations, rename_list, new_names):
"""annotations is a list of dictionaries, if entry["name"] is in rename_list copy over an entry for every name in new_names and rename the entry["name"] to the new name"""
added = []
new_list = []
rename_count = 0
for entry in annotations:
# if entry["name"] in primary_geo_renames: # RTK
if entry["name"] in rename_list:
# if not primary_geo_renamed: # RTK
if rename_count < len(rename_list):
rename_count += 1
for new_name in new_names:
# Don't add again, although duplicates are removed below.
if new_name in added:
continue
e = entry.copy()
e["name"] = new_name
e["display_name"] = new_name
e["type"] = new_name
new_list.append(e)
added.append(e["name"])
else:
if entry["name"] not in added:
new_list.append(entry)
added.append(entry["name"])
return new_list
def build_mapper(uuid):
"""
Description
-----------
Performs two functions:
(1) Build and return the mixmasta mapper.json from annotations.json.
(2) Return geo_select if "Geo_Select_Form" is annotated.
Returns
-------
ret: dictionary
geo, date, and feature keys for mixmasta process.
geo_select: string, default None
admin_level if set during annotation: country, admin1, admin2, admin3
"""
# Set default return value (None) for geo_select.
geo_select = None
fp = f"data/{uuid}/annotations.json"
with open(fp, "r") as f:
annotations = json.load(f)
conversion_names = {
"name": "display_name",
"geo": "geo_type",
"time": "date_type",
"format": "time_format",
"data_type": "feature_type",
"unit_description": "units_description",
"coord_pair_form": "is_geo_pair",
"qualifycolumn": "qualifies",
"string": "str",
}
ret = {"geo": [], "date": [], "feature": []}
for orig_name in annotations:
entry = {}
entry["name"] = orig_name
for x in annotations[orig_name].keys():
if x in ["redir_col"]:
continue
# Set geo_select if annotated.
if str(x).lower() == "geo_select_form":
geo_select = annotations[orig_name][x]
# Mixmasta expects "admin0" not "country".
if geo_select.lower() == "country":
geo_select = "admin0"
if x.lower() in conversion_names.keys():
new_col_name = conversion_names[x.lower()]
else:
new_col_name = x.lower()
if new_col_name != "display_name":
if new_col_name == "qualifies":
if type(annotations[orig_name][x]) == str:
annotations[orig_name][x] = [annotations[orig_name][x]]
if type(annotations[orig_name][x]) == str and new_col_name not in [
"is_geo_pair",
"qualifies",
"dateformat",
"time_format",
"description",
]:
entry[new_col_name] = annotations[orig_name][x].lower()
else:
entry[new_col_name] = annotations[orig_name][x]
else:
entry[new_col_name] = annotations[orig_name][x]
for x in ["dateassociate", "isgeopair", "qualify"]:
if x in entry.keys():
del entry[x]
ret[entry["type"]].append(entry)
for x in range(len(ret["date"])):
if "dateformat" in ret["date"][x]:
ret["date"][x]["time_format"] = ret["date"][x]["dateformat"]
del ret["date"][x]["dateformat"]
if ret["date"][x].get("primary_time", False):
ret["date"][x]["primary_date"] = True
del ret["date"][x]["primary_time"]
return ret, geo_select
@job("default", timeout=-1)
def post_mixmasta_annotation_processing(rename, context):
"""change annotations to reflect mixmasta's output"""
uuid = context["uuid"]
with open(context["mapper_fp"], "r") as f:
mixmasta_ready_annotations = json.load(f)
to_rename = {}
for k, x in rename.items():
for y in x:
to_rename[y] = k
mixmasta_ready_annotations = rename_function(mixmasta_ready_annotations, to_rename)
primary_date_renames = [
x["name"]
for x in mixmasta_ready_annotations["date"]
if x.get("primary_geo", False)
]
primary_geo_renames = [
x["name"]
for x in mixmasta_ready_annotations["geo"]
if x.get("primary_geo", False)
]
primary_geo_rename_count = 0 # RTK
mixmasta_ready_annotations["geo"] = dupe(
mixmasta_ready_annotations["geo"],
primary_geo_renames,
["admin1", "admin2", "admin3", "country", "lat", "lng"],
)
mixmasta_ready_annotations["date"] = dupe(
mixmasta_ready_annotations["date"], primary_date_renames, ["timestamp"]
)
json.dump(
mixmasta_ready_annotations,
open(f"data/{uuid}/mixmasta_ready_annotations.json", "w"),
)
| 33.102857 | 175 | 0.566718 | import copy
import json
import logging
import os
import time
from .rename import rename as rename_function
from django.apps import apps
from django.conf import settings
from django_rq import job
from mixmasta import mixmasta as mix
from utils.cache_helper import cache_get
# Load GADM3 from gadm app
if settings.CACHE_GADM:
gadm3 = apps.get_app_config("gadm").gadm3()
gadm2 = apps.get_app_config("gadm").gadm2()
else:
gadm3 = None
gadm2 = None
def dupe(annotations, rename_list, new_names):
"""annotations is a list of dictionaries, if entry["name"] is in rename_list copy over an entry for every name in new_names and rename the entry["name"] to the new name"""
added = []
new_list = []
rename_count = 0
for entry in annotations:
# if entry["name"] in primary_geo_renames: # RTK
if entry["name"] in rename_list:
# if not primary_geo_renamed: # RTK
if rename_count < len(rename_list):
rename_count += 1
for new_name in new_names:
# Don't add again, although duplicates are removed below.
if new_name in added:
continue
e = entry.copy()
e["name"] = new_name
e["display_name"] = new_name
e["type"] = new_name
new_list.append(e)
added.append(e["name"])
else:
if entry["name"] not in added:
new_list.append(entry)
added.append(entry["name"])
return new_list
def build_mapper(uuid):
"""
Description
-----------
Performs two functions:
(1) Build and return the mixmasta mapper.json from annotations.json.
(2) Return geo_select if "Geo_Select_Form" is annotated.
Returns
-------
ret: dictionary
geo, date, and feature keys for mixmasta process.
geo_select: string, default None
admin_level if set during annotation: country, admin1, admin2, admin3
"""
# Set default return value (None) for geo_select.
geo_select = None
fp = f"data/{uuid}/annotations.json"
with open(fp, "r") as f:
annotations = json.load(f)
conversion_names = {
"name": "display_name",
"geo": "geo_type",
"time": "date_type",
"format": "time_format",
"data_type": "feature_type",
"unit_description": "units_description",
"coord_pair_form": "is_geo_pair",
"qualifycolumn": "qualifies",
"string": "str",
}
ret = {"geo": [], "date": [], "feature": []}
for orig_name in annotations:
entry = {}
entry["name"] = orig_name
for x in annotations[orig_name].keys():
if x in ["redir_col"]:
continue
# Set geo_select if annotated.
if str(x).lower() == "geo_select_form":
geo_select = annotations[orig_name][x]
# Mixmasta expects "admin0" not "country".
if geo_select.lower() == "country":
geo_select = "admin0"
if x.lower() in conversion_names.keys():
new_col_name = conversion_names[x.lower()]
else:
new_col_name = x.lower()
if new_col_name != "display_name":
if new_col_name == "qualifies":
if type(annotations[orig_name][x]) == str:
annotations[orig_name][x] = [annotations[orig_name][x]]
if type(annotations[orig_name][x]) == str and new_col_name not in [
"is_geo_pair",
"qualifies",
"dateformat",
"time_format",
"description",
]:
entry[new_col_name] = annotations[orig_name][x].lower()
else:
entry[new_col_name] = annotations[orig_name][x]
else:
entry[new_col_name] = annotations[orig_name][x]
for x in ["dateassociate", "isgeopair", "qualify"]:
if x in entry.keys():
del entry[x]
ret[entry["type"]].append(entry)
for x in range(len(ret["date"])):
if "dateformat" in ret["date"][x]:
ret["date"][x]["time_format"] = ret["date"][x]["dateformat"]
del ret["date"][x]["dateformat"]
if ret["date"][x].get("primary_time", False):
ret["date"][x]["primary_date"] = True
del ret["date"][x]["primary_time"]
return ret, geo_select
def valid_qualifier_target(entry):
k = entry.keys()
for x in ["qualify", "primary_geo", "primary_time"]:
if x in k:
if entry[x] == True:
return False
return True
def is_qualifier(entry):
for x in ["qualify", "qualifies", "qualifyColumn"]:
if x in entry.keys():
return True
return False
def clear_invalid_qualifiers(uuid):
fp = f"data/{uuid}/annotations.json"
with open(fp, "r") as f:
annotations = json.load(f)
to_del = {}
for x in annotations.keys():
if "qualify" in annotations[x].keys():
if annotations[x]["qualify"] == True:
to_del[x] = []
if type(annotations[x]["qualifyColumn"]) == str:
annotations[x]["qualifyColumn"] = [annotations[x]["qualifyColumn"]]
for y in annotations[x]["qualifyColumn"]:
if y in annotations.keys():
if not valid_qualifier_target(annotations[y]):
to_del[x].append(y)
else:
to_del[x].append(y)
to_drop = []
for x in to_del.keys():
for y in to_del[x]:
annotations[x]["qualifyColumn"].remove(y)
if annotations[x]["qualifyColumn"] == []:
to_drop.append(x)
for x in to_drop:
if x in annotations.keys():
del annotations[x]
with open(fp, "w") as f:
json.dump(annotations, f)
def build_meta(uuid, d, geo_select, context):
fnames = [x.split(".")[0] for x in os.listdir(d)]
ft = context.get('ft', 'csv')
fp = context.get('uploaded_file_fp', f'data/{uuid}/raw_data.csv')
meta = {}
meta["ftype"] = ft
logging.info(f"context is: {context}")
if ft == "geotiff":
with open(f"data/{uuid}/geotiff_info.json", "r") as f:
tif = json.load(f)
{
"geotiff_Feature_Name": "feat",
"geotiff_Band": "1",
"geotiff_Null_Val": "-9999",
"geotiff_Date": "",
}
if 'bands' in context:
meta["ftype"] = context.get('ft', 'csv')
meta["bands"] = context.get('bands', '1')
meta["null_val"] = context.get('null_val', '-9999')
meta["date"] = context.get('date', '')
meta["date"] = context.get('date', '01/01/2001')
meta["feature_name"] = context.get('Feature_name', tif.get('geotiff_Feature_Name', 'feature'))
meta['band_name'] = context.get('Feature_name', tif.get('geotiff_Feature_Name', 'feature'))
meta["band"] = 0
meta["null_val"] = -9999
meta["bands"] =context.get('bands', {})
meta["band_type"] = context.get('band_type', 'category')
else:
meta["feature_name"] = tif["geotiff_Feature_Name"]
meta["band_name"] = tif["geotiff_Feature_Name"]
meta["null_val"] = tif["geotiff_Null_Val"]
meta["date"] = tif["geotiff_Date"]
if ft == "excel":
xl = json.load(open(f"data/{uuid}/excel_info.json", "r"))
meta["sheet"] = xl["excel_Sheet"]
# Update meta with geocode_level if set as geo_select above.
# If present Mimaxta will override admin param with this value.
# Meant for use with DMC model runs.
if geo_select != None:
meta["geocode_level"] = geo_select
return meta, fp.split('/')[-1], fp
@job("default", timeout=-1)
def generate_mixmasta_files(context):
uuid = context["uuid"]
email = cache_get(uuid, "email", None)
clear_invalid_qualifiers(uuid)
# Build the mapper.json annotations, and get geo_select for geo_coding
# admin level if set annotating lat/lng pairs.
mixmasta_ready_annotations, geo_select = build_mapper(uuid)
logging_preface = cache_get(uuid, "logging_preface", None)
d = f"data/{uuid}"
fp = ""
meta = {}
fn = None
mixmasta_ready_annotations["meta"], fn, fp = build_meta(uuid, d, geo_select, context)
logging.info(f"{logging_preface} - Began mixmasta process")
# BYOM handling
if context.get("mode") == "byom":
# Default to admin2 if geo_select is not set or too precise.
if geo_select in (None, "admin3"):
admin_level = "admin2"
else:
admin_level = geo_select
logging.info(f"{logging_preface} - set admin_level to {admin_level}")
byom_annotations = copy.deepcopy(mixmasta_ready_annotations)
fn = f"{d}/raw_data.csv"
fp = fn
byom_annotations["meta"] = {"ftype": "csv"}
with open(f"data/{uuid}/byom_annotations.json", "w") as f:
json.dump(
byom_annotations,
f,
)
mapper = "byom_annotations"
# BYOD handling
else:
# Default to admin3 if geo_select is not set.
if geo_select == None:
admin_level = "admin3"
else:
admin_level = geo_select
logging.info(f"{logging_preface} - set admin_level to {admin_level}")
mapper = "mixmasta_ready_annotations"
# Set gadm level based on geocoding level; still using gadm2 for gadm0/1.
with open(f"data/{uuid}/mixmasta_ready_annotations.json", "w") as f:
json.dump(
mixmasta_ready_annotations,
f,
)
gadm_level = gadm3 if admin_level == "admin3" else gadm2
context["gadm_level"] = gadm_level
context["output_directory"] = d
context["mapper_fp"] = f"data/{uuid}/{mapper}.json"
context["raw_data_fp"] = fp
context["admin_level"] = admin_level
json.dump(
mixmasta_ready_annotations,
open(f"data/{uuid}/sent_to_mixmasta.json", "w"),
)
def post_mixmasta_annotation_processing(rename, context):
"""change annotations to reflect mixmasta's output"""
uuid = context["uuid"]
with open(context["mapper_fp"], "r") as f:
mixmasta_ready_annotations = json.load(f)
to_rename = {}
for k, x in rename.items():
for y in x:
to_rename[y] = k
mixmasta_ready_annotations = rename_function(mixmasta_ready_annotations, to_rename)
primary_date_renames = [
x["name"]
for x in mixmasta_ready_annotations["date"]
if x.get("primary_geo", False)
]
primary_geo_renames = [
x["name"]
for x in mixmasta_ready_annotations["geo"]
if x.get("primary_geo", False)
]
primary_geo_rename_count = 0 # RTK
mixmasta_ready_annotations["geo"] = dupe(
mixmasta_ready_annotations["geo"],
primary_geo_renames,
["admin1", "admin2", "admin3", "country", "lat", "lng"],
)
mixmasta_ready_annotations["date"] = dupe(
mixmasta_ready_annotations["date"], primary_date_renames, ["timestamp"]
)
json.dump(
mixmasta_ready_annotations,
open(f"data/{uuid}/mixmasta_ready_annotations.json", "w"),
)
| 5,599 | 0 | 114 |
90a6a38783bd1c23407224dbe76204ca852dc6a5 | 59 | py | Python | _Sensation0/__init__.py | Geson-anko/JARVIS3 | bc599a352401a7e135ebaabead4d8e6d8835747e | [
"MIT"
] | null | null | null | _Sensation0/__init__.py | Geson-anko/JARVIS3 | bc599a352401a7e135ebaabead4d8e6d8835747e | [
"MIT"
] | null | null | null | _Sensation0/__init__.py | Geson-anko/JARVIS3 | bc599a352401a7e135ebaabead4d8e6d8835747e | [
"MIT"
] | null | null | null | from .sensation import Sensation
from .train import Train
| 19.666667 | 32 | 0.813559 | from .sensation import Sensation
from .train import Train
| 0 | 0 | 0 |
c590a9496079ddf0947998d4b4dfc4b5a7470b6f | 17,967 | py | Python | trec2014/python/cuttsum/pipeline/representation.py | kedz/cuttsum | 992c21192af03fd2ef863f5ab7d10752f75580fa | [
"Apache-2.0"
] | 6 | 2015-09-10T02:22:21.000Z | 2021-10-01T16:36:46.000Z | trec2014/python/cuttsum/pipeline/representation.py | kedz/cuttsum | 992c21192af03fd2ef863f5ab7d10752f75580fa | [
"Apache-2.0"
] | null | null | null | trec2014/python/cuttsum/pipeline/representation.py | kedz/cuttsum | 992c21192af03fd2ef863f5ab7d10752f75580fa | [
"Apache-2.0"
] | 2 | 2018-04-04T10:44:32.000Z | 2021-10-01T16:37:26.000Z | import marisa_trie
import os
import gzip
from collections import defaultdict
from nltk.tokenize import RegexpTokenizer
from cuttsum.srilm import Client
from itertools import izip
import string
from ..geo import GeoQuery
import numpy as np
import pandas as pd
from nltk.corpus import wordnet as wn
import re
| 36.969136 | 79 | 0.554294 | import marisa_trie
import os
import gzip
from collections import defaultdict
from nltk.tokenize import RegexpTokenizer
from cuttsum.srilm import Client
from itertools import izip
import string
from ..geo import GeoQuery
import numpy as np
import pandas as pd
from nltk.corpus import wordnet as wn
import re
class SalienceFeatureSet(object):
def __init__(self, features=None):
self.character_features = False
self.language_model_features = False
self.frequency_features = False
self.geographic_features = False
self.query_features = False
if features is not None:
self.activate_features(features)
def fs_name(self):
buf = 'fs'
if self.character_features:
buf += '-char'
if self.language_model_features:
buf += '-lm'
if self.frequency_features:
buf += '-tf'
if self.geographic_features:
buf += '-geo'
if self.query_features:
buf += '-query'
return buf
def get_feature_regex(self):
patts = []
if self.character_features:
patts.append('BASIC_')
if self.language_model_features:
patts.append('LM_')
if self.frequency_features:
patts.append('TFIDF_')
if self.geographic_features:
patts.append('GEO_')
if self.query_features:
patts.append('QUERY_')
return '|'.join(patts)
def __unicode__(self):
return u'SalienceFeatureSet: ' \
u'char[{}] lm[{}] freq[{}] geo[{}] query[{}]'.format(
u'X' if self.character_features is True else u' ',
u'X' if self.language_model_features is True else u' ',
u'X' if self.frequency_features is True else u' ',
u'X' if self.geographic_features is True else u' ',
u'X' if self.query_features is True else u' ')
def __str__(self):
return unicode(self).decode(u'utf-8')
def activate_features(self, features):
for fset in features:
if fset == u'all':
self.character_features = True
self.language_model_features = True
self.frequency_features = True
self.geographic_features = True
self.query_features = True
break
elif fset == u'character':
self.character_features = True
elif fset == u'language model':
self.language_model_features = True
elif fset == u'frequency':
self.frequency_features = True
elif fset == u'geographic':
self.geographic_features = True
elif fset == u'query':
self.query_features = True
def as_list(self):
features = []
if self.character_features is True:
features.append(u'character')
if self.language_model_features is True:
features.append(u'language model')
if self.frequency_features is True:
features.append(u'frequency')
if self.geographic_features is True:
features.append(u'geographic')
if self.query_features is True:
features.append(u'query')
return features
def as_set(self):
return set(self.as_list())
class TfIdfExtractor(object):
def __init__(self, current_idf_path, prev_idf_paths):
paths = [current_idf_path] + prev_idf_paths
tries = []
for path in paths:
if os.path.exists(path):
with gzip.open(path, u'r') as f:
trie = marisa_trie.RecordTrie("<dd")
trie.read(f)
tries.append(trie)
else:
tries.append(None)
self.current_trie_ = tries[0]
self.prev_tries_ = tries[1:]
n_paths = len(paths)
self.features = \
[u"TFIDF_FEATS: time since start",
'TFIDF_FEATS: avg tfidf, t0'] \
+ [u'TFIDF_FEATS: tfidf-delta, tm-{}'.format(i)
for i in xrange(1, n_paths)]
def tokenize(self, strings):
sentences = []
for string in strings:
tokens = [token.lower() for token in string.split(' ')]
sentences.append(tokens)
return sentences
def process_streamcorpus_strings(self, strings, secs_since_start):
sentences = self.tokenize(strings)
tf_counts = self.make_tf_counts(sentences)
feats = []
for sentence in sentences:
sent_feat = {"TFIDF_FEATS: time since start": secs_since_start}
avg_tfidf_t0 = self.avg_tfidf(
self.current_trie_, sentence, tf_counts)
sent_feat[u'TFIDF_FEATS: avg tfidf, t0'] = avg_tfidf_t0
for i, trie in enumerate(self.prev_tries_, 1):
delta_idf = avg_tfidf_t0 - self.avg_tfidf(
trie, sentence, tf_counts)
label = u'TFIDF_FEATS: tfidf-delta, tm-{}'.format(i)
sent_feat[label] = delta_idf
feats.append(sent_feat)
return feats
def make_tf_counts(self, sentences):
tf_counts = defaultdict(int)
for sentence in sentences:
for token in sentence:
tf_counts[token] += 1
return tf_counts
def process_article(self, si, corpus):
if u'article-clf' not in si.body.sentences:
return list()
avg_tfidfs = list()
sents = si.body.sentences[u'article-clf']
tf_counts = self.make_tf_counts(sents)
for sentence in sents:
avg_tfidf = self.avg_tfidf(sentence, tf_counts)
avg_tfidfs.append(avg_tfidf)
features = []
for avg_tfidf in avg_tfidfs:
features.append({'avg_tfidf_t0': avg_tfidf})
return features
def avg_tfidf(self, trie, sentence, tf_counts):
if trie is None:
return 0 #float('nan')
total_tfidf = 0
n_terms = 0
unique_words = set()
for token in sentence:
unique_words.add(token)
n_terms = len(unique_words)
if n_terms == 0:
return 0
for word in unique_words:
idf = trie.get(word, None)
if idf is None:
idf = 0
else:
# trie packs single items as a list of tuple, so we need to
# pull the actual data out.
idf = idf[0][0]
#print word, idf, tf_counts[word] * idf
total_tfidf += tf_counts[word] * idf
return total_tfidf / float(n_terms)
class BasicFeaturesExtractor(object):
def __init__(self):
self.features = [
u'BASIC_FEATS: doc position',
u'BASIC_FEATS: sentence length',
u'BASIC_FEATS: punc ratio',
u'BASIC_FEATS: lower ratio',
u'BASIC_FEATS: upper ratio',
u'BASIC_FEATS: all caps ratio',
u'BASIC_FEATS: person ratio',
u'BASIC_FEATS: location ratio',
u'BASIC_FEATS: organization ratio',
u'BASIC_FEATS: date ratio',
u'BASIC_FEATS: time ratio',
u'BASIC_FEATS: duration ratio',
u'BASIC_FEATS: number ratio',
u'BASIC_FEATS: ordinal ratio',
u'BASIC_FEATS: percent ratio',
u'BASIC_FEATS: money ratio',
u'BASIC_FEATS: set ratio',
u'BASIC_FEATS: misc ratio']
self.ne_features = [
u'BASIC_FEATS: person ratio',
u'BASIC_FEATS: location ratio',
u'BASIC_FEATS: organization ratio',
u'BASIC_FEATS: date ratio',
u'BASIC_FEATS: time ratio',
u'BASIC_FEATS: duration ratio',
u'BASIC_FEATS: number ratio',
u'BASIC_FEATS: ordinal ratio',
u'BASIC_FEATS: percent ratio',
u'BASIC_FEATS: money ratio',
u'BASIC_FEATS: set ratio',
u'BASIC_FEATS: misc ratio']
def process_sentences(self, sc_strings, cnlp_strings):
feats = [self.process_sentence(sc_string, cnlp_string)
for sc_string, cnlp_string
in izip(sc_strings, cnlp_strings)]
n_sents = float(len(feats))
for i, feat in enumerate(feats):
feat[u'BASIC_FEATS: doc position'] = i / n_sents
return feats
def process_sentence(self, sc_string, cnlp_string):
feats = {}
cnlp_tokens = cnlp_string.split(' ')
sc_tokens = sc_string.split(' ')
feats[u'BASIC_FEATS: sentence length'] = len(cnlp_tokens)
sc_no_space = sc_string.replace(' ', '')
sc_no_space_punc = sc_no_space.translate(
string.maketrans("",""), string.punctuation)
punc_ratio = 1 - float(len(sc_no_space_punc)) / float(len(sc_no_space))
feats[u'BASIC_FEATS: punc ratio'] = punc_ratio
n_lower = len(re.findall(r'\b[a-z]', sc_string))
n_upper = len(re.findall(r'\b[A-Z]', sc_string))
n_all_caps = len(re.findall(r'\b[A-Z]+\b', sc_string))
n_total = float(len(re.findall(r'\b[A-Za-z]', sc_string)))
if n_total == 0:
n_total = 1
feats[u'BASIC_FEATS: lower ratio'] = n_lower / n_total
feats[u'BASIC_FEATS: upper ratio'] = n_upper / n_total
feats[u'BASIC_FEATS: all caps ratio'] = n_all_caps / n_total
n_tokens = len(cnlp_tokens)
ne_counts = {ne_feature: 0 for ne_feature in self.ne_features}
for token in cnlp_tokens:
if token.startswith('__') and token.endswith('__'):
label = u'BASIC_FEATS: {} ratio'.format(token[2:-2])
ne_counts[label] = ne_counts.get(label) + 1
for token, count in ne_counts.iteritems():
#label = u'BASIC_FEATS: {} ratio'.format(token[2:-2])
feats[token] = \
float(count) / n_tokens
return feats
class LMProbExtractor(object):
def __init__(self, domain_port, domain_order,
gigaword_port, gigaword_order):
self.tok_ = RegexpTokenizer(r'\w+')
self.domain_lm_ = Client(domain_port, domain_order, True)
self.gigaword_lm_ = Client(gigaword_port, gigaword_order, True)
self.features = [u"LM_FEATS: domain avg lp",
u"LM_FEATS: gigaword avg lp"]
def process_corenlp_strings(self, strings):
return [self.process_corenlp_string(string) for string in strings]
def process_corenlp_string(self, string):
dmn_lp, dmn_avg_lp = self.domain_lm_.sentence_log_prob(string)
gw_lp, gw_avg_lp = self.gigaword_lm_.sentence_log_prob(string)
return {u"LM_FEATS: domain avg lp": dmn_avg_lp,
u"LM_FEATS: gigaword avg lp": gw_avg_lp}
def process_article(self, si):
if u'article-clf' not in si.body.sentences:
return list()
lm_scores = []
for sentence in si.body.sentences[u'article-clf']:
bytes_string = ' '.join(token.token for token in sentence.tokens)
uni_string = bytes_string.decode(u'utf-8')
uni_string = uni_string.lower()
uni_tokens = self.tok_.tokenize(uni_string)
uni_string = u' '.join(uni_tokens)
bytes_string = uni_string.encode(u'utf-8')
dmn_lp, dmn_avg_lp = self.domain_lm_.sentence_log_prob(
bytes_string)
gw_lp, gw_avg_lp = self.gigaword_lm_.sentence_log_prob(
bytes_string)
lm_scores.append(
{u"domain avg lp": dmn_avg_lp,
u"gigaword avg lp": gw_avg_lp})
return lm_scores
class QueryFeaturesExtractor(object):
def __init__(self, event):
self.event_query_ = event.query
self.synonyms_ = []
self.hypernyms_ = []
self.hyponyms_ = []
print event.type.split(' ')[0]
for synset in wn.synsets(event.type.split(' ')[0]):
synonyms = \
[lemma.name().lower().replace(u'_', u' ').encode(u'utf-8')
for lemma in synset.lemmas()]
self.synonyms_.extend(synonyms)
hypernyms = \
[lemma.name().lower().replace(u'_', u' ').encode(u'utf-8')
for synset in synset.hypernyms()
for lemma in synset.lemmas()]
self.hypernyms_.extend(hypernyms)
hyponyms = \
[lemma.name().lower().replace(u'_', u' ').encode(u'utf-8')
for synset in synset.hyponyms()
for lemma in synset.lemmas()]
self.hyponyms_.extend(hyponyms)
self.query_size_ = float(len(event.query))
self.features = [u'QUERY_FEATS: query coverage',
u'QUERY_FEATS: total query matches',
u'QUERY_FEATS: synonyms coverage',
u'QUERY_FEATS: total synonyms matches',
u'QUERY_FEATS: hypernyms coverage',
u'QUERY_FEATS: total hypernyms matches',
u'QUERY_FEATS: hyponyms coverage',
u'QUERY_FEATS: total hyponyms matches',]
def process_streamcorpus_strings(self, strings):
return [self.process_streamcorpus_string(string)
for string in strings]
def process_streamcorpus_string(self, string):
query_feats = self.search_(string, self.event_query_, 'query')
syn_feats = self.search_(string, self.synonyms_ , 'synonyms')
hyper_feats = self.search_(string, self.hypernyms_ , 'hypernyms')
hypo_feats = self.search_(string, self.hyponyms_ , 'hyponyms')
query_feats.update(syn_feats.iteritems())
query_feats.update(hyper_feats.iteritems())
query_feats.update(hypo_feats.iteritems())
return query_feats
def search_(self, string, queries, feature_name):
queries_covered = 0
total_covered = 0
for query in queries:
hits = re.findall(query, string, re.I)
if len(hits) > 0:
queries_covered += 1
total_covered += len(hits)
if len(queries) > 0:
qt_coverage = queries_covered / float(len(queries))
else:
qt_coverage = 0
return {u'QUERY_FEATS: {} coverage'.format(feature_name): qt_coverage,
u'QUERY_FEATS: total {} matches'.format(feature_name): \
total_covered}
class GeoFeaturesExtractor(object):
def __init__(self, geo_cache_tsv_path, cluster_paths):
self.gq_ = GeoQuery(geo_cache_tsv_path)
self.hourly_clusters_ = self.load_cluster_paths_(cluster_paths)
self.features = \
[u'GEO_FEATS: median dist to clust_tm{}'.format(t)
for t in xrange(len(cluster_paths))] \
+ [u'GEO_FEATS: first loc min dist to clust_tm{}'.format(t)
for t in range(len(cluster_paths))]
def process_geo_strings(self, strings):
first_loc = None
counts = {}
str2ll = {}
locations = []
for string in strings:
if isinstance(string, float):
locations.append(list())
else:
slocs = list()
for loc in string.split(','):
ll = self.gq_.lookup_location(loc)
if ll is None:
continue
if first_loc is None:
first_loc = np.array(ll)
counts[loc] = counts.get(loc, 0) + 1
str2ll[loc] = ll
slocs.append(ll)
locations.append(slocs)
loc_counts = counts.items()
loc_counts.sort(key=lambda x: x[1], reverse=True)
lls = np.array([str2ll[loc] for loc, count in loc_counts])
feats = {}
for t, clusters in enumerate(self.hourly_clusters_):
if clusters is None or len(lls) == 0:
label = u'GEO_FEATS: median dist to clust_tm{}'.format(
t)
feats[label] = 12451.0 #float('nan')
label = \
u'GEO_FEATS: first loc min dist to clust_tm{}'.format(t)
feats[label] = 12451.0 #float('nan')
else:
D = self.gq_.compute_distances(lls[:,None], clusters)
label = u'GEO_FEATS: median dist to clust_tm{}'.format(
t)
med_dist = np.min(np.median(D, axis=0))
#if np.isnan(med_dist):
# med_dist = 12451.0
feats[label] = med_dist
d = self.gq_.compute_distances(first_loc, clusters)
label = \
u'GEO_FEATS: first loc min dist to clust_tm{}'.format(t)
feats[label] = \
np.min(d)
feats = [feats] * len(strings)
# for i, locs in enumerate(locations):
# if len(locs) > 0:
# feats[i][u'GEO_FEATS: contains loc str'] = 1
# else:
# feats[i][u'GEO_FEATS: contains loc str'] = 0
return feats
def load_cluster_paths_(self, cluster_paths):
clusters = []
for path in cluster_paths:
if os.path.exists(path):
with gzip.open(path, u'r') as f:
clusters_df = pd.io.parsers.read_csv(
f, sep='\t', quoting=3, header=0)
clusters.append(clusters_df.as_matrix())
else:
clusters.append(None)
return clusters
| 16,656 | 75 | 907 |
f0ff86078deb2aa6845135b2c376566c8a81691f | 1,880 | py | Python | dags/utils/scale_features.py | tiendatscorpy/ETL-pipeline | e3f800596e17e0ac2b32f52ea06a196e477c3414 | [
"MIT"
] | null | null | null | dags/utils/scale_features.py | tiendatscorpy/ETL-pipeline | e3f800596e17e0ac2b32f52ea06a196e477c3414 | [
"MIT"
] | null | null | null | dags/utils/scale_features.py | tiendatscorpy/ETL-pipeline | e3f800596e17e0ac2b32f52ea06a196e477c3414 | [
"MIT"
] | null | null | null | import json
import os, errno
from typing import Dict
import numpy as np
import os
from sklearn.preprocessing import scale, minmax_scale
import logging
LOGGER = logging.getLogger(__name__)
def scale_features(input_folder: str, output_folder: str, op_conf: str, **kwargs):
"""
input_folder: folder which contains input audio files
output_folder: folder to store output numpy files
"""
optional_params = eval(op_conf)
LOGGER.info("kwargs ", optional_params)
for genre in list(os.listdir(input_folder)):
if os.path.isdir(f"{input_folder}/{genre}"):
genre_input_folder = f"{input_folder}/{genre}/"
genre_output_folder = f"{output_folder}/{genre}/"
try:
os.makedirs(genre_output_folder)
except OSError as e:
if e.errno != errno.EEXIST:
raise
for file_name in list(os.listdir(genre_input_folder)):
input_file_abs_path = f"{genre_input_folder}/{file_name}"
if os.path.isfile(f"{input_file_abs_path}") and file_name.endswith(
".npy"
):
LOGGER.info(
f"scale_features.task >>> INFO current file: {file_name}"
)
file_name_wo_ex = file_name[:-4]
# load np array
y = np.load(f"{input_file_abs_path}")
y_std_scaled = scale(y)
np.save(
f"{genre_output_folder}/{file_name_wo_ex}_standardcaler.npy",
y_std_scaled,
)
y_mm_scaled = minmax_scale(y)
np.save(
f"{genre_output_folder}/{file_name_wo_ex}_minmaxnormalizer.npy",
y_mm_scaled,
)
| 35.471698 | 88 | 0.545745 | import json
import os, errno
from typing import Dict
import numpy as np
import os
from sklearn.preprocessing import scale, minmax_scale
import logging
LOGGER = logging.getLogger(__name__)
def scale_features(input_folder: str, output_folder: str, op_conf: str, **kwargs):
"""
input_folder: folder which contains input audio files
output_folder: folder to store output numpy files
"""
optional_params = eval(op_conf)
LOGGER.info("kwargs ", optional_params)
for genre in list(os.listdir(input_folder)):
if os.path.isdir(f"{input_folder}/{genre}"):
genre_input_folder = f"{input_folder}/{genre}/"
genre_output_folder = f"{output_folder}/{genre}/"
try:
os.makedirs(genre_output_folder)
except OSError as e:
if e.errno != errno.EEXIST:
raise
for file_name in list(os.listdir(genre_input_folder)):
input_file_abs_path = f"{genre_input_folder}/{file_name}"
if os.path.isfile(f"{input_file_abs_path}") and file_name.endswith(
".npy"
):
LOGGER.info(
f"scale_features.task >>> INFO current file: {file_name}"
)
file_name_wo_ex = file_name[:-4]
# load np array
y = np.load(f"{input_file_abs_path}")
y_std_scaled = scale(y)
np.save(
f"{genre_output_folder}/{file_name_wo_ex}_standardcaler.npy",
y_std_scaled,
)
y_mm_scaled = minmax_scale(y)
np.save(
f"{genre_output_folder}/{file_name_wo_ex}_minmaxnormalizer.npy",
y_mm_scaled,
)
| 0 | 0 | 0 |
e54f3fcf098da3812b7d4a0e9f85780e8a12a31b | 934 | py | Python | benches/bench_merge.py | crkrenn/distogram | 9701233e46bd9e7528071c3c90600516d06c7f5a | [
"MIT"
] | 16 | 2020-07-12T17:27:46.000Z | 2022-03-22T23:42:24.000Z | benches/bench_merge.py | crkrenn/distogram | 9701233e46bd9e7528071c3c90600516d06c7f5a | [
"MIT"
] | 7 | 2020-06-20T01:43:34.000Z | 2022-02-02T17:41:36.000Z | benches/bench_merge.py | crkrenn/distogram | 9701233e46bd9e7528071c3c90600516d06c7f5a | [
"MIT"
] | 3 | 2020-12-27T07:36:48.000Z | 2021-11-04T18:56:46.000Z | import time
from functools import reduce
import distogram
import utils
if __name__ == '__main__':
bench_merge()
| 21.227273 | 56 | 0.591006 | import time
from functools import reduce
import distogram
import utils
def bench_merge():
num_samples = 10
num_points = 100_000
values_list = [
utils.create_values(mean, 0.3, num_points)
for mean in range(num_samples)
]
times_dict: utils.TimesDict = {num_points: dict()}
for n in range(6):
bin_count = 32 * (2 ** n)
histograms = [
utils.create_distogram(bin_count, values)
for values in values_list
]
start = time.time()
_ = reduce(
lambda res, val: distogram.merge(res, val),
histograms,
distogram.Distogram(bin_count=bin_count)
)
time_taken = (time.time() - start) / num_samples
times_dict[num_points][bin_count] = time_taken
utils.plot_times_dict(
times_dict,
title='merge',
)
return
if __name__ == '__main__':
bench_merge()
| 791 | 0 | 23 |
094ba497e8b98cc0b00f380b8d7c8ff4348b9eb1 | 1,210 | py | Python | Chapter14/r2_paths_auth/my_library/models/sample_auth_http.py | a17juanbl/exercicios | 8df4bdf232207fcbc8f1308d1937ee599c1b39d4 | [
"MIT"
] | 125 | 2020-11-28T18:00:34.000Z | 2022-03-07T17:53:22.000Z | Chapter14/r2_paths_auth/my_library/models/sample_auth_http.py | a17juanbl/exercicios | 8df4bdf232207fcbc8f1308d1937ee599c1b39d4 | [
"MIT"
] | 5 | 2021-02-02T10:03:29.000Z | 2022-03-16T07:32:28.000Z | Chapter14/r2_paths_auth/my_library/models/sample_auth_http.py | a17juanbl/exercicios | 8df4bdf232207fcbc8f1308d1937ee599c1b39d4 | [
"MIT"
] | 182 | 2020-11-29T12:07:07.000Z | 2022-03-22T04:27:51.000Z | # -*- coding: utf-8 -*-
# /!\/!\/!\/!\/!\/!\/!\/!\
# Note that this is just a sample code
# You need to add this file in __init__.py
# /!\/!\/!\/!\/!\/!\/!\/!\
from odoo import exceptions, models
from odoo.http import request
| 28.139535 | 72 | 0.623967 | # -*- coding: utf-8 -*-
# /!\/!\/!\/!\/!\/!\/!\/!\
# Note that this is just a sample code
# You need to add this file in __init__.py
# /!\/!\/!\/!\/!\/!\/!\/!\
from odoo import exceptions, models
from odoo.http import request
class IrHttp(models.AbstractModel):
_inherit = 'ir.http'
@classmethod
def _auth_method_base_group_user(cls):
cls._auth_method_user()
if not request.env.user.has_group('base.group_user'):
raise exceptions.AccessDenied()
# this is for the exercise
@classmethod
def _auth_method_groups(cls, group_xmlids=None):
cls._auth_method_user()
if not any(map(request.env.user.has_group, group_xmlids or [])):
raise exceptions.AccessDenied()
# the controller will be like this add this in main.py
@http.route('/my_module/all-books/group_user', type='http',
auth='base_group_user')
def all_books_mine_base_group_user(self):
# your code
return ...
# this is for the exercise
@http.route('/my_module/all-books/groups', type='http',
auth='groups(base.group_no_one)')
def all_books_mine_groups(self):
# your code
return ... | 440 | 518 | 23 |
6d5634d1807e48c38dd5968a7d8b0e5bd2f87b3d | 817 | py | Python | pypy/iterator_generator_coroutine/first_of_all/2_generator.py | DowsonJones/test_test | 9f941b20de42090e6ec9b449953ce1dae405ef73 | [
"MIT"
] | 1 | 2020-03-30T07:13:08.000Z | 2020-03-30T07:13:08.000Z | pypy/iterator_generator_coroutine/first_of_all/2_generator.py | DowsonJones/test_test | 9f941b20de42090e6ec9b449953ce1dae405ef73 | [
"MIT"
] | null | null | null | pypy/iterator_generator_coroutine/first_of_all/2_generator.py | DowsonJones/test_test | 9f941b20de42090e6ec9b449953ce1dae405ef73 | [
"MIT"
] | null | null | null |
F = fib(10) # 运行到这里没有任何反映
# print(next(F))
for i in F:
print(i)
"""
yield:
1. 保存运行状态-断点, 暂停执行将生成器挂起
2. 将yield后面表达式的值, 作为返回值返回
"""
# 使用yield实现协程
import time
if __name__ == '__main__':
main()
| 15.12963 | 42 | 0.49694 | def fib(n):
print('进入')
current = 0
num1, num2 = 0, 1
while current < n:
num = num1
num1, num2 = num2, num2 + num1
current += 1
yield num
return 'done'
F = fib(10) # 运行到这里没有任何反映
# print(next(F))
for i in F:
print(i)
"""
yield:
1. 保存运行状态-断点, 暂停执行将生成器挂起
2. 将yield后面表达式的值, 作为返回值返回
"""
# 使用yield实现协程
import time
def work1():
current = 0
while True:
yield f'work1 {current}th called'
current += 1
def work2():
current = 0
while True:
yield f'work2 {current}th called'
current += 1
def main():
w1 = work1()
w2 = work2()
while True:
print(next(w1))
print(next(w2))
time.sleep(1)
if __name__ == '__main__':
main()
| 493 | 0 | 98 |
5783c68228392e653ee096d49b29e66e2ac5a022 | 1,563 | py | Python | lib/opticalflow.py | hanebarla/CrowdCounting-using-PedestrianFlow | 0f7c196a8b396d92901172a334ecf0256f30ff8a | [
"MIT"
] | null | null | null | lib/opticalflow.py | hanebarla/CrowdCounting-using-PedestrianFlow | 0f7c196a8b396d92901172a334ecf0256f30ff8a | [
"MIT"
] | null | null | null | lib/opticalflow.py | hanebarla/CrowdCounting-using-PedestrianFlow | 0f7c196a8b396d92901172a334ecf0256f30ff8a | [
"MIT"
] | null | null | null | import os
import json
import cv2
import numpy as np
import matplotlib.pyplot as plt
import torch
if __name__ == "__main__":
# json file contains the test images
test_json_path = './test.json'
# the folder to output density map and flow maps
output_folder = './plot'
with open(test_json_path, 'r') as outfile:
img_paths = json.load(outfile)
for i in range(2):
img_path = img_paths[i]
img_folder = os.path.dirname(img_path)
img_name = os.path.basename(img_path)
index = int(img_name.split('.')[0])
prev_index = int(max(1,index-5))
prev_img_path = os.path.join(img_folder,'%03d.jpg'%(prev_index))
c_img = cv2.imread(img_path)
c_img = cv2.resize(c_img, (640, 360))
c_prev_img = cv2.imread(prev_img_path)
c_prev_img = cv2.resize(c_prev_img, (640, 360))
hsv = OptFlow(c_prev_img, c_img)
rgb = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)
save_img = np.concatenate([c_prev_img, rgb], 0)
cv2.imwrite('opticalflow/opticalflow_{}.png'.format(i), save_img)
| 27.910714 | 86 | 0.632118 | import os
import json
import cv2
import numpy as np
import matplotlib.pyplot as plt
import torch
def OptFlow(c_prev_img, c_img):
prev_img = cv2.cvtColor(c_prev_img, cv2.COLOR_BGR2GRAY)
img = cv2.cvtColor(c_img, cv2.COLOR_BGR2GRAY)
hsv = np.zeros_like(c_img)
hsv[..., 1] = 255
flow = cv2.calcOpticalFlowFarneback(prev_img, img, None, 0.5, 3, 15, 3, 5, 1.2, 0)
mag, ang = cv2.cartToPolar(flow[..., 0], flow[..., 1])
hsv[..., 0] = ang * 180 / np.pi / 2
hsv[..., 2] = cv2.normalize(mag, None, 0, 255, cv2.NORM_MINMAX)
return hsv
if __name__ == "__main__":
# json file contains the test images
test_json_path = './test.json'
# the folder to output density map and flow maps
output_folder = './plot'
with open(test_json_path, 'r') as outfile:
img_paths = json.load(outfile)
for i in range(2):
img_path = img_paths[i]
img_folder = os.path.dirname(img_path)
img_name = os.path.basename(img_path)
index = int(img_name.split('.')[0])
prev_index = int(max(1,index-5))
prev_img_path = os.path.join(img_folder,'%03d.jpg'%(prev_index))
c_img = cv2.imread(img_path)
c_img = cv2.resize(c_img, (640, 360))
c_prev_img = cv2.imread(prev_img_path)
c_prev_img = cv2.resize(c_prev_img, (640, 360))
hsv = OptFlow(c_prev_img, c_img)
rgb = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)
save_img = np.concatenate([c_prev_img, rgb], 0)
cv2.imwrite('opticalflow/opticalflow_{}.png'.format(i), save_img)
| 445 | 0 | 23 |
d367c19a7c2a7b23b387887269c0b45058aff294 | 6,978 | py | Python | fairseq/models/nat/old/latent/predictor_module.py | wangqi1996/cmlm | 78472e7edfecd8e36eaf5ae77c674033e1498735 | [
"MIT"
] | null | null | null | fairseq/models/nat/old/latent/predictor_module.py | wangqi1996/cmlm | 78472e7edfecd8e36eaf5ae77c674033e1498735 | [
"MIT"
] | null | null | null | fairseq/models/nat/old/latent/predictor_module.py | wangqi1996/cmlm | 78472e7edfecd8e36eaf5ae77c674033e1498735 | [
"MIT"
] | null | null | null | from torch import nn
from fairseq.modules import TransformerEncoderLayer, TransformerDecoderLayer
class Perceptron(nn.Module):
"""
1. 是否激活 通过是否有激活层来控制,最后一层都没有激活层
"""
class LogisticModel(nn.Module):
""" 两层感知机 """
def __init__(self, args, activation=None, dropout=0.1, contain_normalize=False, **unused):
""" 如果Logistic是模型的最后一层,contain_normalize=True; 否则,设置为False"""
super().__init__()
self.layers = nn.Sequential(
Perceptron(args.encoder_embed_dim, int(args.encoder_embed_dim / 2), drouput=dropout,
activation=activation),
Perceptron(int(args.encoder_embed_dim / 2), 1, drouput=dropout, activation=None)
)
self.activation = None
if contain_normalize:
self.activation = nn.Sigmoid()
"""
TODO: AT Decoder
"""
| 32.915094 | 120 | 0.633276 | from torch import nn
from fairseq.modules import TransformerEncoderLayer, TransformerDecoderLayer
def mean_ds(x, dim=None):
return (
x.float().mean().type_as(x)
if dim is None
else x.float().mean(dim).type_as(x)
)
class Perceptron(nn.Module):
"""
1. 是否激活 通过是否有激活层来控制,最后一层都没有激活层
"""
def __init__(self, input_dim, output_dim, activation=None, drouput=0.1, **unused):
super(Perceptron, self).__init__()
self.linear = nn.Linear(input_dim, output_dim)
self.activate = None
if activation is not None:
self.activate = activation()
self.dropout = None
if drouput > 0:
self.dropout = nn.Dropout(p=drouput)
def forward(self, x, dropout=True):
# inference时关掉dropout开关
if not self.training:
dropout = False
out = self.linear(x)
if self.activate:
out = self.activate(out)
if dropout:
out = self.dropout(out)
return out
class LogisticModel(nn.Module):
""" 两层感知机 """
def __init__(self, args, activation=None, dropout=0.1, contain_normalize=False, **unused):
""" 如果Logistic是模型的最后一层,contain_normalize=True; 否则,设置为False"""
super().__init__()
self.layers = nn.Sequential(
Perceptron(args.encoder_embed_dim, int(args.encoder_embed_dim / 2), drouput=dropout,
activation=activation),
Perceptron(int(args.encoder_embed_dim / 2), 1, drouput=dropout, activation=None)
)
self.activation = None
if contain_normalize:
self.activation = nn.Sigmoid()
def forward(self, decoder_out=None, normalize=False, **unused):
out = self.layers(decoder_out)
if self.activation is not None and normalize:
out = self.activation(out)
return out.squeeze(-1)
"""
TODO: AT Decoder
"""
class DecoderLayer(nn.Module):
def __init__(self, args, pad, num_layers, contain_normalize=True, **unused):
super().__init__()
self.padding_idx = pad
self.layers = nn.ModuleList(
[TransformerDecoderLayer(args, no_encoder_attn=False) for _ in range(num_layers)]
)
self.predictor = LogisticModel(args, activation=nn.Sigmoid, dropout=0.3, contain_normalize=contain_normalize,
**unused)
def forward(self, encoder_output, encoder_padding_mask, decoder_out, decoder_input, normalize=False, **unused):
# NAT的decoder
decoder_padding_mask = decoder_input.eq(self.padding_idx)
batch_size, _ = encoder_padding_mask.shape
b, _, _ = decoder_out.shape
if b == batch_size:
decoder_out = decoder_out.transpose(0, 1)
b, _, _ = encoder_output.shape
if b == batch_size:
encoder_output = encoder_output.transpose(0, 1)
input = decoder_out
for layer in self.layers:
input, _, _ = layer(
input,
encoder_output,
encoder_padding_mask,
incremental_state=None,
self_attn_mask=None,
self_attn_padding_mask=decoder_padding_mask)
out = self.predictor(decoder_out=input.transpose(0, 1), normalize=normalize)
# 使用logistic做了normalize的处理啦
return out
class EncoderLayer(nn.Module):
def __init__(self, args, num_layers=1, contain_logistic=True, contain_normalize=True, pad=0, **unused):
""" contain_logistic和contain_normalize其实是绑定的"""
super().__init__()
self.padding_idx = pad
self.layers = nn.ModuleList(
[TransformerEncoderLayer(args) for _ in range(num_layers)]
)
self.predictor = None
if contain_logistic:
self.predictor = LogisticModel(args, activation=nn.Sigmoid, dropout=0.3,
contain_normalize=contain_normalize, **unused)
def forward(self, input, mask=None, decoder_input=None, normalize=False):
"""
1. 用于encoder: mask=encoder, decoder_input=None
2. 用于decoder: mask=None, decoder_input=decoder.input
"""
if mask is None:
mask = decoder_input.eq(self.padding_idx)
for layer in self.layers:
input = layer(input, encoder_padding_mask=mask)
if self.predictor is not None:
out = self.predictor(input.transpose(0, 1), normalize=normalize)
return out
return input
class LSTMLayer(nn.Module):
def __init__(self, args, num_layers=1, activation=nn.Sigmoid, dropout=0.2, pad=0, contain_logistic=True):
super().__init__()
self.padding_idx = pad
self.lstm = nn.GRU(input_size=args.encoder_embed_dim, hidden_size=300, num_layers=num_layers,
bidirectional=True, dropout=dropout, batch_first=True)
self.predictor = None
if contain_logistic:
self.predictor = nn.Sequential(
Perceptron(input_dim=600, output_dim=300, activation=activation, drouput=0.2),
Perceptron(input_dim=300, output_dim=1, activation=None, drouput=0.2) # 最后一层不用激活
)
self.activate = activation()
def forward(self, decoder_out=None, normalize=False, decoder_input=None, h_0=None, **unused):
x_length = decoder_input.ne(self.padding_idx).sum(-1)
packed_x = nn.utils.rnn.pack_padded_sequence(decoder_out, x_length, batch_first=True, enforce_sorted=False)
lstm_output, _ = self.lstm(packed_x, h_0)
lstm_output, _ = nn.utils.rnn.pad_packed_sequence(lstm_output, padding_value=self.padding_idx, batch_first=True)
if self.activate:
out = self.predictor(lstm_output).squeeze(-1)
else:
out = lstm_output
return self.activate(out) if normalize else out
class EncoderDecoder(nn.Module):
def __init__(self, args, encoder_layers=3, decoder_layers=3, pad=0):
# 三层block
super().__init__()
self.encoder = EncoderLayer(args, num_layers=encoder_layers, contain_logistic=False)
self.decoder = DecoderLayer(args, num_layers=decoder_layers, pad=pad)
def forward(self, encoder_out, decoder_out, decoder_input, normalize=False, **unused):
encoder_output = self.encoder(encoder_out.encoder_out, encoder_out.encoder_padding_mask, normalize=False)
decoder_output = self.decoder(encoder_output, encoder_out.encoder_padding_mask, decoder_out,
decoder_input, normalize=normalize)
return decoder_output
class BCELoss(nn.Module):
def __init__(self):
super().__init__()
self.loss_class = nn.BCEWithLogitsLoss(reduction="none")
def forward(self, outputs, targets):
# target: 1表示需要mask
# output:得分越高表示不需要mask
targets = (~targets).float()
return self.loss_class(outputs, targets)
| 4,602 | 1,193 | 432 |
5f0496938b0764b39076e5b64d229dd8946afff8 | 598 | py | Python | python-experiments/lambda_example.py | MichaelCurrin/python-2016 | 00af4e61175fc15aebfab208c76fe9db845d2f4c | [
"MIT"
] | null | null | null | python-experiments/lambda_example.py | MichaelCurrin/python-2016 | 00af4e61175fc15aebfab208c76fe9db845d2f4c | [
"MIT"
] | null | null | null | python-experiments/lambda_example.py | MichaelCurrin/python-2016 | 00af4e61175fc15aebfab208c76fe9db845d2f4c | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
""" flatten a 2 dimensional list into a 1 dimension list
by joining column items in a row into one item as single comma-separated string
This is useful for preparing data for a CSV writer function which requires a 1-dimensional list of rows with no columns
Alternatively, the join functional can be moved into the CSV writer so that the function can accept 2 dimensional lists
"""
list = [("A", "B", "C"), ("34", "32647", "43"), ("4556", "35235", "23623")]
str = map(lambda x: ",".join(x), list)
print str
"""
#output
['A,B,C',
'34,32647,43',
'4556,35235,23623']
"""
| 27.181818 | 119 | 0.680602 | # -*- coding: utf-8 -*-
""" flatten a 2 dimensional list into a 1 dimension list
by joining column items in a row into one item as single comma-separated string
This is useful for preparing data for a CSV writer function which requires a 1-dimensional list of rows with no columns
Alternatively, the join functional can be moved into the CSV writer so that the function can accept 2 dimensional lists
"""
list = [("A", "B", "C"), ("34", "32647", "43"), ("4556", "35235", "23623")]
str = map(lambda x: ",".join(x), list)
print str
"""
#output
['A,B,C',
'34,32647,43',
'4556,35235,23623']
"""
| 0 | 0 | 0 |
afcd895d40b418f27e030dda81dde145aeb07ff0 | 49,651 | py | Python | src/NebulaBayes/tests/test_NB.py | ADThomas-astro/NebulaBayes | bda65809f43bc336914ce39ae59123e8be1abcbc | [
"MIT"
] | 2 | 2018-08-13T13:50:38.000Z | 2019-09-12T01:45:21.000Z | src/NebulaBayes/tests/test_NB.py | ADThomas-astro/NebulaBayes | bda65809f43bc336914ce39ae59123e8be1abcbc | [
"MIT"
] | null | null | null | src/NebulaBayes/tests/test_NB.py | ADThomas-astro/NebulaBayes | bda65809f43bc336914ce39ae59123e8be1abcbc | [
"MIT"
] | 1 | 2018-09-17T19:10:07.000Z | 2018-09-17T19:10:07.000Z | from __future__ import print_function, division
from collections import OrderedDict as OD
import itertools
import os
import unittest
from astropy.io import fits # FITS file I/O
from astropy.table import Table # Used in converting to pandas DataFrame
import numpy as np
import pandas as pd
import NebulaBayes
from NebulaBayes import NB_Model, __version__
from NebulaBayes.NB1_Process_grids import RegularGridResampler
from NebulaBayes.NB3_Bayes import NB_nd_pdf
"""
Test suite to test NebulaBayes. Mostly functional and regression tests, with
some unit tests as well.
Works with Python 2 and Python 3.
To run only a particular test, type (e.g.):
python3 test_NB.py Test_real_data_with_dereddening
This test suite can be run in-place in NebulaBayes/tests under the NebulaBayes
installation directory (but use the correct python version for the
installation location).
Adam D. Thomas 2017
"""
clean_up = True # Delete test output files after running?
# Save test outputs in NebulaBayes/tests/test_outputs
THIS_FILE_DIR = os.path.dirname(os.path.realpath(__file__))
TEST_DIR = os.path.join(THIS_FILE_DIR, "test_outputs")
###############################################################################
# Helper functions
def build_grid(param_range_dict, line_peaks_dict, n_gridpts_list, std_frac=0.25):
"""
Initialise a grid - create a pandas DataFrame table. Fluxes for each
emission line form a Gaussian ball around a specified point.
param_range_dict: Ordered dict mapping parameter names to a tuple giving
the parameter minimum and maximum
line_peaks_dict: Ordered dict mapping line names to the location
(as a tuple) of the peak of the line flux in the grid, in
gridpoint index coordinates (from zero)
std_frac: Fraction of the range in each dimension used for the std
n_gridpts_list is a list of the number of gridpoints in each dimension.
"""
param_names = list(param_range_dict.keys())
param_val_arrs = [np.linspace(r[0], r[1], n) for r,n in zip(
param_range_dict.values(), n_gridpts_list)]
line_names = list(line_peaks_dict.keys())
std = np.array([(r[1] - r[0]) * std_frac for r in param_range_dict.values()])
line_peak_vals = {}
for line, peak_inds in line_peaks_dict.items():
line_peak = []
for p, peak_ind, val_arr in zip(param_names, peak_inds, param_val_arrs):
p_min, dp = val_arr[0], np.diff(val_arr)[0]
line_peak.append(p_min + peak_ind*dp)
line_peak_vals[line] = line_peak # An ND list corresponding to peak_inds
flux_fns = {}
for l,peak_tuple in line_peaks_dict.items():
peak = np.array(line_peak_vals[l]) # ND vector
flux_fns[l] = gaussian
# Make DataFrame table:
columns = param_names + line_names
n_gridpts = np.product(n_gridpts_list)
OD_for_DF = OD([(c, np.full(n_gridpts, np.nan)) for c in columns])
DF_grid = pd.DataFrame(OD_for_DF)
# Iterate over rows, filling in the table
for i, p_tuple in enumerate(itertools.product(*param_val_arrs)):
# Add parameter values into their columns:
for p,n in zip(p_tuple, param_names):
DF_grid.loc[i,n] = p
# Add "model" line fluxes into their columns:
for l in line_names:
DF_grid.loc[i,l] = flux_fns[l](np.array(p_tuple))
return DF_grid
def extract_grid_fluxes_i(DF, p_name_ind_map, line_names):
"""
Extract emission line fluxes from a grid (represented as a DataFrame) by
inputting gridpoint indices and taking the fluxes at the nearest gridpoint
"""
val_arrs = {p:np.unique(DF[p].values) for p in p_name_ind_map}
assert len(DF) == np.product([len(v) for v in val_arrs.values()])
where = np.full(len(DF), 1, dtype=bool)
for p,ind in p_name_ind_map.items():
where &= (DF.loc[:,p] == val_arrs[p][ind])
assert np.sum(where) == 1
return [DF[line].values[where][0] for line in line_names]
###############################################################################
# Helper class
class Base_2D_Grid_2_Lines(unittest.TestCase):
"""
Base class holding setup and cleanup methods to make a 2D grid with only 2
emission lines, and using a 2D Gaussian to make the grid. There are only
two lines, but one has fluxes set to all 1 and is just for normalisation.
"""
params = ["p1", "p2"]
param_range_dict = OD( [("p1", (-5, 3)), ("p2", (1.2e6, 15e6))] )
n_gridpts_list = (11, 9) # Number of gridpoints in each dimension
interpd_shape = (50, 45)
lines = ["L1", "L2"] # Line names
line_peaks = [8, 5] # Gridpoint indices from zero
@classmethod
def setUpClass(cls):
""" Make grid and run NebulaBayes to obtain the result object """
line_peaks_dict = OD([(l,cls.line_peaks) for l in cls.lines])
cls.DF = build_grid(cls.param_range_dict, line_peaks_dict, cls.n_gridpts_list)
cls.val_arrs = OD([(p,np.unique(cls.DF[p].values)) for p in cls.params])
cls.DF.loc[:,"L1"] = 1. # We'll normalise by this line
cls.grid_file = os.path.join(TEST_DIR, cls.__name__ + "_grid.csv")
cls.DF.to_csv(cls.grid_file, index=False)
cls.NB_Model_1 = NB_Model(cls.grid_file, cls.params, cls.lines,
interpd_grid_shape=cls.interpd_shape)
@classmethod
def tearDownClass(cls):
""" Remove the output when tests in this class have finished """
if clean_up:
os.remove(cls.grid_file)
if hasattr(cls, "posterior_plot"):
os.remove(cls.posterior_plot)
###############################################################################
class Test_Obs_from_Peak_Gridpoint_2D_Grid_2_Lines(Base_2D_Grid_2_Lines):
"""
Test for a grid from Base_2D_Grid_2_Lines: Take a gridpoint that is at
the peak of the Gaussian ball of emission line fluxes, and check that
treating these fluxes as observations leads to correct estimates from
NebulaBayes.
"""
test_gridpoint = [8, 5] # From zero. [11, 9] total gridpoints in each dim
@classmethod
def test_parameter_estimates(self):
""" Ensure the parameter estimates are as expected """
DF_est = self.Result.Posterior.DF_estimates
self.assertTrue(all(p in DF_est.index for p in self.params))
# Tolerance for distance between gridpoint we chose and the estimate:
grid_sep_frac = 0.1 # Allowed fraction of distance between gridpoints
for p, test_ind in zip(self.params, self.test_gridpoint):
tol = np.diff(self.val_arrs[p])[0] * grid_sep_frac
value = self.val_arrs[p][test_ind] # Expected parameter value
est = DF_est.loc[p, "Estimate"] # NebulaBayes estimate
self.assertTrue(np.isclose(est, value, atol=tol))
def test_raw_Grid_spec(self):
""" Ensure the raw grid spec is as expected """
RGrid_spec = self.NB_Model_1.Raw_grids
self.assertEqual(RGrid_spec.param_names, self.params)
self.assertEqual(RGrid_spec.ndim, len(self.params))
self.assertEqual(RGrid_spec.shape, self.n_gridpts_list)
self.assertEqual(RGrid_spec.n_gridpoints, np.product(self.n_gridpts_list))
for a1, a2 in zip(RGrid_spec.param_values_arrs, self.val_arrs.values()):
self.assertTrue(np.allclose(np.asarray(a1), np.asarray(a2)))
def test_interpolated_Grid_spec(self):
""" Ensure the interpolated grid spec is as expected """
IGrid_spec = self.Result.Grid_spec
self.assertEqual(IGrid_spec.param_names, self.params)
self.assertEqual(IGrid_spec.param_display_names, self.params)
self.assertEqual(IGrid_spec.shape, tuple(self.interpd_shape))
self.assertEqual(IGrid_spec.n_gridpoints, np.product(self.interpd_shape))
@classmethod
def tearDownClass(cls):
""" Remove the output files when tests in this class have finished """
super(Test_Obs_from_Peak_Gridpoint_2D_Grid_2_Lines,cls).tearDownClass()
if clean_up:
files = [os.path.join(TEST_DIR, l +
"_PDF_contributes_to_likelihood.pdf") for l in ["L1", "L2"]]
for file_i in files:
os.remove(file_i)
###############################################################################
class Test_Obs_from_nonPeak_Gridpoint_2D_Grid_2_Lines(Base_2D_Grid_2_Lines):
"""
Test for a grid from Base_2D_Grid_2_Lines: Take a gridpoint that is NOT at
the peak of the Gaussian ball of emission line fluxes.
Note that we don't check the values in the posterior or parameter
estimates - there isn't an obvious way to do this here.
We also test that a numpy array prior is accepted.
"""
longMessage = True # Append messages to existing message
test_gridpoint = [6, 4] # From zero. [11, 9] total gridpoints in each dim,
# the line peak is at line_peaks = [8, 5]
@classmethod
def test_parameters_in_output(self):
""" Check all parameters are found in output """
DF_est = self.Result.Posterior.DF_estimates
self.assertTrue(all(p in DF_est.index for p in self.params))
# Posterior is shaped like a donut. Check for a single local min?
###############################################################################
# Test the NebulaBayes ND linear interpolation
###############################################################################
class Test_1D_grid_and_public_attributes(unittest.TestCase):
"""
Test that a 1D grid works and gives expected results.
We use a gaussian 1D "grid", and input a point at the peak into NB to
ensure NB finds the correct point.
We also test that a DataFrame grid table is accepted.
"""
longMessage = True # Append messages to existing message
@classmethod
def test_parameter_estimate(self):
""" Ensure the single parameter estimate is as expected """
DF_est = self.Result.Posterior.DF_estimates
self.assertTrue("P0" in DF_est.index)
lower = self.p_vals[self.test_gridpoint - 1]
upper = self.p_vals[self.test_gridpoint + 1]
est = DF_est.loc["P0", "Estimate"]
self.assertTrue(lower < est < upper, msg="{0}, {1}, {2}".format(
lower, est, upper))
def test_NB_Model_attributes(self):
""" Check that the list of public attributes is what is documented """
public_attrs = sorted([a for a in dir(self.NB_Model_1)
if not a.startswith("_")])
expected_attrs = ["Interpd_grids", "Raw_grids"]
self.assertTrue(public_attrs == expected_attrs, msg=str(public_attrs))
def test_NB_Result_attributes(self):
""" Check that the list of public attributes is what is documented """
public_attrs = sorted([a for a in dir(self.Result)
if not a.startswith("_")])
expected_attrs = [
"DF_obs", "Grid_spec", "Likelihood", "Plot_Config", "Plotter",
"Posterior", "Prior", "deredden", "obs_flux_arrs",
"obs_flux_err_arrs", "propagate_dered_errors"]
self.assertTrue(public_attrs == expected_attrs, msg=str(public_attrs))
def test_NB_nd_pdf_attributes(self):
""" Check that the list of public attributes is what is documented """
public_attrs = sorted([a for a in dir(self.Result.Posterior)
if not a.startswith("_")])
expected_attrs = sorted(["DF_estimates", "Grid_spec", "best_model",
"marginalised_1D", "marginalised_2D", "name", "nd_pdf", "show"])
self.assertTrue(public_attrs == expected_attrs, msg=str(public_attrs))
def test_best_model_dict_keys(self):
""" Check that the list of best model keys is what is documented """
expected_keys = sorted(["table", "chi2", "extinction_Av_mag",
"grid_location"])
key_list = sorted(list(self.Result.Posterior.best_model.keys()))
self.assertEqual(key_list, expected_keys)
@classmethod
def tearDownClass(cls):
""" Remove the output when tests in this class have finished """
if clean_up:
if hasattr(cls, "posterior_plot"):
os.remove(cls.posterior_plot)
if hasattr(cls, "best_model_table"):
os.remove(cls.best_model_table)
###############################################################################
class Test_default_initialisation(unittest.TestCase):
"""
Test that we can initialise fully default HII and NLR NB models
"""
###############################################################################
class Test_real_data_with_dereddening(unittest.TestCase):
"""
Test some real data, from the S7 nuclear spectrum for NGC4691, a star-
forming galaxy. Include a line ratio prior and dereddening in NebulaBayes.
Test saving plots for all 3 Bayes Theorem PDFs.
"""
longMessage = True # Append messages to existing message
lines = ["OII3726_29", "Hgamma", "OIII4363", "Hbeta", "OIII5007",
"NI5200", "OI6300", "Halpha", "NII6583", "SII6716", "SII6731"]
obs_fluxes = [1.22496, 0.3991, 0.00298, 1.0, 0.44942,
0.00766, 0.02923, 4.25103, 1.65312, 0.45598, 0.41482]
obs_errs = [0.00303, 0.00142, 0.00078, 0.0017, 0.0012,
0.00059, 0.00052, 0.00268, 0.00173, 0.00102, 0.00099]
obs_wavelengths = [3727.3, 4340.5, 4363.2, 4861.3, 5006.8,
5200.3, 6300.3, 6562.8, 6583.2, 6716.4, 6730.8]
@classmethod
def test_parameter_estimates(self):
"""
Regression check on parameter estimates.
"""
ests = self.Result.Posterior.DF_estimates["Estimate"] # pandas Series
self.assertTrue(np.isclose(ests["12 + log O/H"], 8.73615, atol=0.0001),
msg=str(ests["12 + log O/H"]))
self.assertTrue(np.isclose(ests["log P/k"], 6.82636, atol=0.0001),
msg=str(ests["log P/k"]))
self.assertTrue(np.isclose(ests["log U"], -2.84848, atol=0.0001),
msg=str(ests["log U"]))
def test_estimate_bounds_checks(self):
"""
Ensure that the "checking columns" in the estimate table are all
showing that the estimates are good.
"""
DF = self.Result.Posterior.DF_estimates # Parameter estimate table
for p in ["12 + log O/H", "log P/k", "log U"]:
for col in ["Est_in_CI68?", "Est_in_CI95?"]:
self.assertTrue(DF.loc[p,col] == "Y")
for col in ["Est_at_lower?", "Est_at_upper?", "P(lower)>50%?",
"P(upper)>50%?"]:
self.assertTrue(DF.loc[p,col] == "N")
self.assertTrue(DF.loc[p,"n_local_maxima"] == 1)
def test_chi2(self):
"""
Regression check that chi2 doesn't change
"""
chi2 = self.Result.Posterior.best_model["chi2"]
self.assertTrue(np.isclose(chi2, 2568.7, atol=0.2), msg=str(chi2))
def test_interp_order(self):
"""
Ensure the correct interpolation order (linear) is preserved
"""
self.assertTrue(self.NB_Model_1.Interpd_grids.interp_order == 1)
def test_all_zero_prior(self):
"""
We permit an all-zero prior - check that it works (a warning should
be printed).
"""
shape = self.NB_Model_1.Interpd_grids.shape
self.Result1 = self.NB_Model_1(self.obs_fluxes, self.obs_errs,
self.lines, prior=np.zeros(shape))
@classmethod
def tearDownClass(cls):
""" Remove the output files when tests in this class have finished """
if clean_up:
files = [cls.prior_plot, cls.likelihood_plot, cls.posterior_plot,
cls.estimate_table]
for file_i in files:
os.remove(file_i)
###############################################################################
class Test_real_data_with_cubic_interpolation(unittest.TestCase):
"""
Very similar to the previous test class, but we use cubic interpolation
instead of linear interpolation when interpolating model flux grids.
We also test resetting the logging level after using the "verbosity" kwarg.
"""
longMessage = True # Append messages to existing message
lines = ["OII3726_29", "Hgamma", "OIII4363", "Hbeta", "OIII5007",
"NI5200", "OI6300", "Halpha", "NII6583", "SII6716", "SII6731"]
obs_fluxes = [1.22496, 0.3991, 0.00298, 1.0, 0.44942,
0.00766, 0.02923, 4.25103, 1.65312, 0.45598, 0.41482]
obs_errs = [0.00303, 0.00142, 0.00078, 0.0017, 0.0012,
0.00059, 0.00052, 0.00268, 0.00173, 0.00102, 0.00099]
obs_wavelengths = [3727.3, 4340.5, 4363.2, 4861.3, 5006.8,
5200.3, 6300.3, 6562.8, 6583.2, 6716.4, 6730.8]
@classmethod
def test_parameter_estimates(self):
"""
Regression check on parameter estimates. Estimates for P and U are
slightly different with the cubic interpolation.
"""
ests = self.Result.Posterior.DF_estimates["Estimate"] # pandas Series
self.assertTrue(np.isclose(ests["12 + log O/H"], 8.73615, atol=0.0001),
msg=str(ests["12 + log O/H"]))
self.assertTrue(np.isclose(ests["log P/k"], 6.86047, atol=0.0001),
msg=str(ests["log P/k"]))
self.assertTrue(np.isclose(ests["log U"], -2.82828, atol=0.0001),
msg=str(ests["log U"]))
def test_chi2(self):
"""
Regression check that chi2 doesn't change
"""
chi2 = self.Result.Posterior.best_model["chi2"]
self.assertTrue(np.isclose(chi2, 2522.7, atol=0.2), msg=str(chi2))
def test_interp_order(self):
"""
Ensure the correct interpolation order (cubic) is preserved
"""
self.assertTrue(self.NB_Model_1.Interpd_grids.interp_order == 3)
def test_resetting_log_level(self):
"""
Ensure that after using the verbosity keyword, the NB_logger
level is unchanged (i.e. was reset to its previous value)
"""
self.assertEqual(NebulaBayes.NB_logger.level, self.test_log_level)
def test_dereddening_result_attributes(self):
"""Ensure dereddening attributes added to Result object."""
self.assertTrue(self.Result.deredden)
self.assertTrue(self.Result.propagate_dered_errors)
@classmethod
def tearDownClass(cls):
""" Remove output files when tests in this class have finished,
and undo change to logging level. """
NebulaBayes.NB_logger.setLevel(cls.old_log_level)
if clean_up:
files = [cls.prior_plot, cls.likelihood_plot, cls.posterior_plot,
cls.estimate_table]
for file_i in files:
os.remove(file_i)
###############################################################################
class Test_upper_bounds_1D(unittest.TestCase):
"""
Test the treatment of upper bounds. We use a 1D grid.
"""
longMessage = True # Append messages to existing message
lines = ["line1", "line2", "line3", "line4", "line5", "line6"]
obs_fluxes = [ 1.0, 8.0, 10.2, -np.inf, -np.inf, -np.inf]
obs_errs = [ 0.05, 0.3, 3.1, 0.3, 0.4, 0.2]
pred_fluxes = [ 1.0, 5.0, 10.2, 0.1, 0.4, 0.4]
# The pred_fluxes are at the "peak" of the grid, that we'll input to NB.
@classmethod
def test_parameter_estimates(self):
"""
Regression test - check the parameter estimate is as expected.
"""
DF_est = self.Result.Posterior.DF_estimates # DataFrame
p0_est = DF_est.loc["p0", "Estimate"]
self.assertTrue(np.isclose(p0_est, self.expected_p0, atol=1))
@classmethod
def tearDownClass(cls):
""" Remove the output files when tests in this class have finished """
if clean_up:
files = [os.path.join(TEST_DIR, l +
"_PDF_contributes_to_likelihood.pdf") for l in cls.lines]
for file_i in files:
os.remove(file_i)
###############################################################################
class Test_all_zero_likelihood(unittest.TestCase):
"""
Test forcing a log_likelihood of all -inf, so the likelihood is all zero.
"""
longMessage = True # Append messages to existing message
lines = ["Halpha", "Hbeta", "OIII4363", "OIII5007", "NII6583"]
obs_fluxes = [ 1e250, 1, 1.2e250, 1.2e250, 1e250]
obs_errs = [ 0.004, 1, 0.005, 0.003, 0.002]
@classmethod
def test_likelihood_all_zero(self):
"""
Regression test - check likelihood is all zero.
"""
likelihood = self.Result.Likelihood.nd_pdf
self.assertTrue(np.all(likelihood == 0))
def test_posterior_all_zero(self):
"""
Regression test - check posterior is all zero.
"""
posterior = self.Result.Posterior.nd_pdf
self.assertTrue(np.all(posterior == 0))
###############################################################################
class Test_data_that_matches_models_poorly(unittest.TestCase):
"""
Test inputting fluxes and errors that are very poorly fit by the entire
model grid. In this case most of the likelihood is zero, and using a
reasonable-ish prior gives a posterior that is zero everywhere.
"""
longMessage = True # Append messages to existing message
lines = ["Halpha", "Hbeta", "OIII4363", "OIII5007", "NII6583"]
obs_fluxes = [ 3.1, 1, 1.8, 5.1, 1.2]
obs_errs = [ 0.01, 1, 0.01, 0.01, 0.01]
# Note the very small errors
@classmethod
def test_likelihood_mostly_zero(self):
"""
Regression test - check likelihood is mostly zero.
"""
likelihood = self.Result.Likelihood.nd_pdf
self.assertTrue(np.sum(likelihood != 0) < 65)
def test_posterior_all_zero(self):
"""
Regression test - check posterior is all zero.
"""
posterior = self.Result.Posterior.nd_pdf
self.assertTrue(np.all(posterior == 0))
###############################################################################
class Test_NB_nd_pdf(unittest.TestCase):
"""
Test the methods in the NB_nd_pdf class
"""
@classmethod
# We want to test NB_nd_pdf attributes; some of "DF_estimates", "Grid_spec",
# "best_model", marginalised_1D", "marginalised_2D", "name", "nd_pdf"
# "best_model" keys: "table", "chi2", "extinction_Av_mag", "grid_location"
def test_best_model_table(self):
""" Check that a single table value matches the desired gridpoint.
This test would fail on NebulaBayes 0.9.7 and earlier """
best_coords = (self.peak_ind_y, self.peak_ind_x)
normed_grids = self.NB_Model_1.Interpd_grids.grids["Hbeta_norm"]
model_OIII = normed_grids["OIII5007"][best_coords]
DF_best = self.NB_nd_pdf_1.best_model["table"]
table_model_OIII = DF_best.loc["OIII5007", "Model"]
self.assertEqual(table_model_OIII, model_OIII)
def test_marginalised_1D_pdf(self):
""" Check that the marginalised 1D pdfs are as expected """
m_1D = self.NB_nd_pdf_1.marginalised_1D
self.assertEqual(len(m_1D), 2)
# Scale the pdfs to compare despite the m_1D PDFs being normalised
m_1D["log U"] /= m_1D["log U"].max()
m_1D["12 + log O/H"] /= m_1D["12 + log O/H"].max()
expected_x_pdf = self.marginalised_x / self.marginalised_x.max()
expected_y_pdf = self.marginalised_y / self.marginalised_y.max()
self.assertTrue(np.allclose(m_1D["log U"], expected_x_pdf,
atol=1e-12, rtol=0))
self.assertTrue(np.allclose(m_1D["12 + log O/H"], expected_y_pdf,
atol=1e-12, rtol=0))
# May have swapped x and y, but it's all symmetric anyway...
def test_nd_pdf(self):
"""
Check that the normalised nd_pdf matches the input raw nd_pdf. We
avoid doing a proper normalisation by comparing with a simple scaling.
"""
pdf = self.NB_nd_pdf_1.nd_pdf
scaled_raw_nd_pdf = self.raw_pdf / self.raw_pdf.max()
self.assertTrue(np.array_equal(pdf / pdf.max(), scaled_raw_nd_pdf))
###############################################################################
class Test_dereddening_changes_results(unittest.TestCase):
"""
Test that using dereddening changes all three PDFs (when obs data are used
in the prior). There previously was a bug where the obs data in the line
ratio priors weren't dereddened.
Also test that PDFs change when errors from the Balmer decrement are
propagated into the dereddened line fluxes.
"""
@classmethod
def test_priors_differ(self):
""" Check that dereddened data was used in line ratio prior, when
requested. This test fails on NebulaBayes 0.9.7 """
pdf_dered1 = self.Result_dered1.Prior.nd_pdf
pdf_nodered = self.Result_nodered.Prior.nd_pdf
max_diff1 = np.max(np.abs(pdf_dered1 - pdf_nodered))
self.assertTrue(max_diff1 > 0.01, str(max_diff1))
# Test uncertainty propagation has an effect
pdf_dered2 = self.Result_dered2.Prior.nd_pdf
max_diff_u = np.max(np.abs(pdf_dered1 - pdf_dered2))
self.assertTrue(max_diff_u > 0.01, str(max_diff_u))
def test_propagate_dered_errors(self):
"""Check propagate_dered_errors values on Result object"""
# Checks default value of False
self.assertFalse(self.Result_dered1.propagate_dered_errors)
self.assertTrue(self.Result_dered2.propagate_dered_errors)
###############################################################################
class Test_likelihood_lines_keyword(unittest.TestCase):
"""
Test inputting fluxes and errors that aren't used in the likelihood, and
test that these lines may be used in a prior.
"""
longMessage = True # Append messages to existing message
lines = ["Halpha", "Hbeta", "OIII4363", "OIII5007", "NII6583"]
obs_fluxes = [ 3.1, 1, 1.8, 5.1, 1.2]
obs_errs = [ 0.01, 1, 0.01, 0.01, 0.01]
exclude_lines = ["Halpha", "OIII5007"]
@classmethod
def test_non_likelihood_lines_in_best_model_table(self):
"""
Regression test - lines not included in likelihood calculation should
still appear in the "best model" table.
"""
self.assertTrue(all(l in self.DF_best.index for l in self.exclude_lines))
def test_best_model_table_fields(self):
"""
Regression test - check fields of best model table (we test for the
case of no dereddening; field names are different with dereddening).
"""
correct_fields = ["In_lhood?", "Obs", "Model", "Resid_Stds", "Obs_S/N"]
t_fields = self.DF_best.columns.tolist()
self.assertTrue(t_fields == correct_fields, t_fields)
def test_In_lhood_field_in_best_model_table(self):
"""
Regression test - the "In_lhood?" field in the best model table should
correctly identify if a line was used in the likelihood.
"""
correct = [("N" if l in self.exclude_lines else "Y") for l in self.lines]
self.assertTrue(self.DF_best["In_lhood?"].values.tolist() == correct)
def test_permuting_input_line_order(self):
"""
Regression test - the order of the input lines should not affect the
results. There was a real bug introduced with the "likelihood_lines"
feature - this test fails on NB version 0.9.6 and 0.9.7!
"""
n = len(self.lines)
for i, ind_tuple in enumerate(itertools.permutations(range(n))):
# There are 5! = 120 permutations, so only check one in five:
if i % 5 != 2:
continue
obs_fluxes = [self.obs_fluxes[j] for j in ind_tuple]
obs_errs = [self.obs_errs[j] for j in ind_tuple]
lines = [self.lines[j] for j in ind_tuple]
Result_i = self.NB_Model_1(obs_fluxes, obs_errs, lines,
likelihood_lines=self.likelihood_lines, **self.kwargs)
P_i = Result_i.Posterior
estimate_Z_i = P_i.DF_estimates.loc["12 + log O/H", "Estimate"]
self.assertEqual(estimate_Z_i, self.estimate_Z)
###############################################################################
class Test_raising_errors(unittest.TestCase):
"""
Test raising errors on bad inputs
"""
@classmethod
def test_bad_grid_parameter_with_too_few_unique_values(self):
"""
Test correct error is raised if there are too few unique values for
a grid parameter.
"""
DF = pd.DataFrame({"p1": [4, 4, 4, 4], "p2": [1, 2, 3, 4],
"l2": [5, 6, 7, 8]})
self.assertRaisesRE(ValueError, "3 unique values are required",
NB_Model, DF, ["p1", "p2"])
###############################################################################
def interactive_plot_tests():
"""
This function needs to be called manually to test the interactive plotting.
from test_NB import interactive_plot_tests
interactive_plot_tests()
"""
lines = ["OII3726_29", "Hgamma", "OIII4363", "Hbeta", "OIII5007",
"NI5200", "OI6300", "Halpha", "NII6583", "SII6716", "SII6731"]
obs_fluxes = [1.22496, 0.3991, 0.00298, 1.0, 0.44942,
0.00766, 0.02923, 4.25103, 1.65312, 0.45598, 0.41482]
obs_errs = [0.00303, 0.00142, 0.00078, 0.0017, 0.0012,
0.00059, 0.00052, 0.00268, 0.00173, 0.00102, 0.00099]
obs_wavelengths = [3727.3, 4340.5, 4363.2, 4861.3, 5006.8,
5200.3, 6300.3, 6562.8, 6583.2, 6716.4, 6730.8]
NB_Model_1 = NB_Model("HII", grid_params=None, line_list=lines,
interpd_grid_shape=[50, 70, 50], grid_error=0.35)
kwargs = {"deredden": True, "propagate_dered_errors": True,
"obs_wavelengths": obs_wavelengths,
"prior":[("SII6716","SII6731")],
"plot_configs": [{"table_on_plot": True,
"legend_fontsize": 5}]*4,
}
Result = NB_Model_1(obs_fluxes, obs_errs, lines, **kwargs)
# Test both ways to make an interactive plot
Result.Plotter.interactive(Result.Posterior)
Result.Prior.show(Result.Plotter)
###############################################################################
# Ideas for more tests:
# Check that parameter estimates are inside the CIs, and check the flags for this
# Test normalising to different lines repeatedly, and checking that the
# unnecessary interpolated grids are deleted.
# Check coverage of the code, to see what isn't being run?
if __name__ == "__main__":
print("\nTesting NebulaBayes version {0} ...\n".format(__version__))
unittest.main(verbosity=2)
| 44.65018 | 88 | 0.583372 | from __future__ import print_function, division
from collections import OrderedDict as OD
import itertools
import os
import unittest
from astropy.io import fits # FITS file I/O
from astropy.table import Table # Used in converting to pandas DataFrame
import numpy as np
import pandas as pd
import NebulaBayes
from NebulaBayes import NB_Model, __version__
from NebulaBayes.NB1_Process_grids import RegularGridResampler
from NebulaBayes.NB3_Bayes import NB_nd_pdf
"""
Test suite to test NebulaBayes. Mostly functional and regression tests, with
some unit tests as well.
Works with Python 2 and Python 3.
To run only a particular test, type (e.g.):
python3 test_NB.py Test_real_data_with_dereddening
This test suite can be run in-place in NebulaBayes/tests under the NebulaBayes
installation directory (but use the correct python version for the
installation location).
Adam D. Thomas 2017
"""
clean_up = True # Delete test output files after running?
# Save test outputs in NebulaBayes/tests/test_outputs
THIS_FILE_DIR = os.path.dirname(os.path.realpath(__file__))
TEST_DIR = os.path.join(THIS_FILE_DIR, "test_outputs")
###############################################################################
# Helper functions
def build_grid(param_range_dict, line_peaks_dict, n_gridpts_list, std_frac=0.25):
"""
Initialise a grid - create a pandas DataFrame table. Fluxes for each
emission line form a Gaussian ball around a specified point.
param_range_dict: Ordered dict mapping parameter names to a tuple giving
the parameter minimum and maximum
line_peaks_dict: Ordered dict mapping line names to the location
(as a tuple) of the peak of the line flux in the grid, in
gridpoint index coordinates (from zero)
std_frac: Fraction of the range in each dimension used for the std
n_gridpts_list is a list of the number of gridpoints in each dimension.
"""
param_names = list(param_range_dict.keys())
param_val_arrs = [np.linspace(r[0], r[1], n) for r,n in zip(
param_range_dict.values(), n_gridpts_list)]
line_names = list(line_peaks_dict.keys())
std = np.array([(r[1] - r[0]) * std_frac for r in param_range_dict.values()])
line_peak_vals = {}
for line, peak_inds in line_peaks_dict.items():
line_peak = []
for p, peak_ind, val_arr in zip(param_names, peak_inds, param_val_arrs):
p_min, dp = val_arr[0], np.diff(val_arr)[0]
line_peak.append(p_min + peak_ind*dp)
line_peak_vals[line] = line_peak # An ND list corresponding to peak_inds
flux_fns = {}
for l,peak_tuple in line_peaks_dict.items():
peak = np.array(line_peak_vals[l]) # ND vector
def gaussian(x):
# N.B. x, peak and std are all ND vectors
distance = np.sqrt(np.sum(((x - peak) / std)**2))
return np.exp(-distance / 2)
flux_fns[l] = gaussian
# Make DataFrame table:
columns = param_names + line_names
n_gridpts = np.product(n_gridpts_list)
OD_for_DF = OD([(c, np.full(n_gridpts, np.nan)) for c in columns])
DF_grid = pd.DataFrame(OD_for_DF)
# Iterate over rows, filling in the table
for i, p_tuple in enumerate(itertools.product(*param_val_arrs)):
# Add parameter values into their columns:
for p,n in zip(p_tuple, param_names):
DF_grid.loc[i,n] = p
# Add "model" line fluxes into their columns:
for l in line_names:
DF_grid.loc[i,l] = flux_fns[l](np.array(p_tuple))
return DF_grid
def extract_grid_fluxes_i(DF, p_name_ind_map, line_names):
"""
Extract emission line fluxes from a grid (represented as a DataFrame) by
inputting gridpoint indices and taking the fluxes at the nearest gridpoint
"""
val_arrs = {p:np.unique(DF[p].values) for p in p_name_ind_map}
assert len(DF) == np.product([len(v) for v in val_arrs.values()])
where = np.full(len(DF), 1, dtype=bool)
for p,ind in p_name_ind_map.items():
where &= (DF.loc[:,p] == val_arrs[p][ind])
assert np.sum(where) == 1
return [DF[line].values[where][0] for line in line_names]
###############################################################################
# Helper class
class Base_2D_Grid_2_Lines(unittest.TestCase):
"""
Base class holding setup and cleanup methods to make a 2D grid with only 2
emission lines, and using a 2D Gaussian to make the grid. There are only
two lines, but one has fluxes set to all 1 and is just for normalisation.
"""
params = ["p1", "p2"]
param_range_dict = OD( [("p1", (-5, 3)), ("p2", (1.2e6, 15e6))] )
n_gridpts_list = (11, 9) # Number of gridpoints in each dimension
interpd_shape = (50, 45)
lines = ["L1", "L2"] # Line names
line_peaks = [8, 5] # Gridpoint indices from zero
@classmethod
def setUpClass(cls):
""" Make grid and run NebulaBayes to obtain the result object """
line_peaks_dict = OD([(l,cls.line_peaks) for l in cls.lines])
cls.DF = build_grid(cls.param_range_dict, line_peaks_dict, cls.n_gridpts_list)
cls.val_arrs = OD([(p,np.unique(cls.DF[p].values)) for p in cls.params])
cls.DF.loc[:,"L1"] = 1. # We'll normalise by this line
cls.grid_file = os.path.join(TEST_DIR, cls.__name__ + "_grid.csv")
cls.DF.to_csv(cls.grid_file, index=False)
cls.NB_Model_1 = NB_Model(cls.grid_file, cls.params, cls.lines,
interpd_grid_shape=cls.interpd_shape)
@classmethod
def tearDownClass(cls):
""" Remove the output when tests in this class have finished """
if clean_up:
os.remove(cls.grid_file)
if hasattr(cls, "posterior_plot"):
os.remove(cls.posterior_plot)
###############################################################################
class Test_Obs_from_Peak_Gridpoint_2D_Grid_2_Lines(Base_2D_Grid_2_Lines):
"""
Test for a grid from Base_2D_Grid_2_Lines: Take a gridpoint that is at
the peak of the Gaussian ball of emission line fluxes, and check that
treating these fluxes as observations leads to correct estimates from
NebulaBayes.
"""
test_gridpoint = [8, 5] # From zero. [11, 9] total gridpoints in each dim
@classmethod
def setUpClass(cls):
super(Test_Obs_from_Peak_Gridpoint_2D_Grid_2_Lines, cls).setUpClass()
test_pt = OD(zip(cls.params, cls.test_gridpoint)) # Map params to gridpt indices
obs_fluxes = extract_grid_fluxes_i(cls.DF, test_pt, ["L1", "L2"])
obs_errors = [f / 7. for f in obs_fluxes]
cls.posterior_plot = os.path.join(TEST_DIR, cls.__name__ + "_posterior.pdf")
kwargs = {"posterior_plot": cls.posterior_plot,
"line_plot_dir": TEST_DIR, "norm_line": "L1"}
cls.Result = cls.NB_Model_1(obs_fluxes, obs_errors, cls.lines, **kwargs)
def test_output_deredden_flag(self):
self.assertTrue(self.Result.deredden is False)
def test_parameter_estimates(self):
""" Ensure the parameter estimates are as expected """
DF_est = self.Result.Posterior.DF_estimates
self.assertTrue(all(p in DF_est.index for p in self.params))
# Tolerance for distance between gridpoint we chose and the estimate:
grid_sep_frac = 0.1 # Allowed fraction of distance between gridpoints
for p, test_ind in zip(self.params, self.test_gridpoint):
tol = np.diff(self.val_arrs[p])[0] * grid_sep_frac
value = self.val_arrs[p][test_ind] # Expected parameter value
est = DF_est.loc[p, "Estimate"] # NebulaBayes estimate
self.assertTrue(np.isclose(est, value, atol=tol))
def test_raw_Grid_spec(self):
""" Ensure the raw grid spec is as expected """
RGrid_spec = self.NB_Model_1.Raw_grids
self.assertEqual(RGrid_spec.param_names, self.params)
self.assertEqual(RGrid_spec.ndim, len(self.params))
self.assertEqual(RGrid_spec.shape, self.n_gridpts_list)
self.assertEqual(RGrid_spec.n_gridpoints, np.product(self.n_gridpts_list))
for a1, a2 in zip(RGrid_spec.param_values_arrs, self.val_arrs.values()):
self.assertTrue(np.allclose(np.asarray(a1), np.asarray(a2)))
def test_interpolated_Grid_spec(self):
""" Ensure the interpolated grid spec is as expected """
IGrid_spec = self.Result.Grid_spec
self.assertEqual(IGrid_spec.param_names, self.params)
self.assertEqual(IGrid_spec.param_display_names, self.params)
self.assertEqual(IGrid_spec.shape, tuple(self.interpd_shape))
self.assertEqual(IGrid_spec.n_gridpoints, np.product(self.interpd_shape))
@classmethod
def tearDownClass(cls):
""" Remove the output files when tests in this class have finished """
super(Test_Obs_from_Peak_Gridpoint_2D_Grid_2_Lines,cls).tearDownClass()
if clean_up:
files = [os.path.join(TEST_DIR, l +
"_PDF_contributes_to_likelihood.pdf") for l in ["L1", "L2"]]
for file_i in files:
os.remove(file_i)
###############################################################################
class Test_Obs_from_nonPeak_Gridpoint_2D_Grid_2_Lines(Base_2D_Grid_2_Lines):
"""
Test for a grid from Base_2D_Grid_2_Lines: Take a gridpoint that is NOT at
the peak of the Gaussian ball of emission line fluxes.
Note that we don't check the values in the posterior or parameter
estimates - there isn't an obvious way to do this here.
We also test that a numpy array prior is accepted.
"""
longMessage = True # Append messages to existing message
test_gridpoint = [6, 4] # From zero. [11, 9] total gridpoints in each dim,
# the line peak is at line_peaks = [8, 5]
@classmethod
def setUpClass(cls):
super(Test_Obs_from_nonPeak_Gridpoint_2D_Grid_2_Lines, cls).setUpClass()
test_pt = OD(zip(cls.params, cls.test_gridpoint)) # Map params to gridpt indices
obs_fluxes = extract_grid_fluxes_i(cls.DF, test_pt, ["L1", "L2"])
obs_errors = [f / 7. for f in obs_fluxes]
cls.posterior_plot = os.path.join(TEST_DIR, cls.__name__ + "_posterior.pdf")
kwargs = {"posterior_plot": cls.posterior_plot, "norm_line": "L1",
"prior": np.ones(cls.NB_Model_1.Interpd_grids.shape)
}
cls.Result = cls.NB_Model_1(obs_fluxes, obs_errors, cls.lines, **kwargs)
def test_output_deredden_flag(self):
self.assertTrue(self.Result.deredden is False)
def test_output_chi2_positive(self):
chi2 = self.Result.Posterior.best_model["chi2"]
self.assertTrue(chi2 > 0, msg="chi2 is " + str(chi2))
def test_output_extinction_is_NA(self): # Since we didn't deredden
Av = self.Result.Posterior.best_model["extinction_Av_mag"]
self.assertTrue(Av == "NA (deredden is False)")
def test_parameters_in_output(self):
""" Check all parameters are found in output """
DF_est = self.Result.Posterior.DF_estimates
self.assertTrue(all(p in DF_est.index for p in self.params))
# Posterior is shaped like a donut. Check for a single local min?
###############################################################################
# Test the NebulaBayes ND linear interpolation
class test_linear_interpolation_1D(unittest.TestCase):
def test_linear_interpolation_simple_1D(self):
R1 = RegularGridResampler([[10, 20, 30]], [5])
R1_pout, R1_arr = R1(np.array([-100,-200,-300]))
assert np.array_equal(R1_pout[0], np.array([ 10., 15., 20., 25., 30.]))
assert np.array_equal(R1_arr, np.array([-100., -150., -200., -250., -300.]))
class test_linear_interpolation_2D(unittest.TestCase):
def test_linear_interpolation_simple_2D(self):
R2 = RegularGridResampler([[10, 20, 30], [8e4, 9e4]], [5,2])
R2_pout, R2_arr = R2(np.array([[-100,-2000],
[-300,-4000],
[-500,-6000]]))
assert np.array_equal(R2_pout[0], np.array([ 10., 15., 20., 25., 30.]))
assert np.array_equal(R2_pout[1], np.array([8e4, 9e4]))
assert np.array_equal(R2_arr, np.array([[-100., -2000.],
[-200., -3000.],
[-300., -4000.],
[-400., -5000.],
[-500., -6000.]]))
class test_linear_interpolation_3D(unittest.TestCase):
def test_linear_interpolation_simple_3D(self):
R3 = RegularGridResampler([[-2, -1],[10, 20, 30], [8e4, 9e4]], [2, 5, 2])
R3_pout, R3_arr = R3(np.array([[[-100, -2000],
[-300, -4000],
[-500, -6000]],
[[-100, -2000],
[-300, -4000],
[-500, -6000]] ]))
assert np.array_equal(R3_pout[0], np.array([-2, -1]))
assert np.array_equal(R3_pout[1], np.array([10., 15., 20., 25., 30.]))
assert np.array_equal(R3_pout[2], np.array([8e4, 9e4]))
assert np.array_equal(R3_arr, np.array([[[-100., -2000.],
[-200., -3000.],
[-300., -4000.],
[-400., -5000.],
[-500., -6000.]],
[[-100., -2000.],
[-200., -3000.],
[-300., -4000.],
[-400., -5000.],
[-500., -6000.]] ]))
###############################################################################
class Test_1D_grid_and_public_attributes(unittest.TestCase):
"""
Test that a 1D grid works and gives expected results.
We use a gaussian 1D "grid", and input a point at the peak into NB to
ensure NB finds the correct point.
We also test that a DataFrame grid table is accepted.
"""
longMessage = True # Append messages to existing message
@classmethod
def setUpClass(cls):
# Make a 1D grid:
test_gridpoint = 45 # From zero
cls.test_gridpoint = test_gridpoint
p_vals = np.linspace(-2, 8, 100)
cls.p_vals = p_vals
peak1, peak2, peak3 = p_vals[8], p_vals[60], p_vals[83]
std1, std2, std3 = 1.1, 1.8, 4.3
flux_0 = 3.0 * np.ones_like(p_vals)
flux_1 = 13. * np.exp(-np.sqrt(((p_vals - peak1) / std1)**2) / 2)
flux_2 = 13. * np.exp(-np.sqrt(((p_vals - peak2) / std2)**2) / 2)
flux_3 = 21. * np.exp(-np.sqrt(((p_vals - peak3) / std3)**2) / 2)
cls.lines = ["l0", "l1", "l2", "l3"]
DF_grid1D = pd.DataFrame({"P0":p_vals, "l0":flux_0, "l1":flux_1,
"l2":flux_2, "l3":flux_3})
obs_fluxes = [x[test_gridpoint] for x in [flux_0,flux_1,flux_2,flux_3]]
obs_errors = [f / 7. for f in obs_fluxes]
cls.posterior_plot = os.path.join(TEST_DIR,
cls.__name__ + "_posterior.pdf")
cls.best_model_table = os.path.join(TEST_DIR,
cls.__name__ + "_best_model.csv")
cls.NB_Model_1 = NB_Model(DF_grid1D, ["P0"], cls.lines, interp_order=1,
interpd_grid_shape=[300])
# Also test cubic interpolation for the 1D case:
cls.NB_Model_2 = NB_Model(DF_grid1D, ["P0"], cls.lines, interp_order=3,
interpd_grid_shape=[300])
# We test the case-insensitivity of the norm_line, by writing
# "L0" instead of "l0" here:
kwargs = {"posterior_plot":cls.posterior_plot, "norm_line":"L0",
"best_model_table":cls.best_model_table}
cls.Result = cls.NB_Model_1(obs_fluxes, obs_errors, cls.lines, **kwargs)
def test_output_deredden_flag(self):
self.assertTrue(self.Result.deredden is False)
def test_output_chi2_positive(self):
chi2 = self.Result.Posterior.best_model["chi2"]
self.assertTrue(chi2 > 0, msg="chi2 is " + str(chi2))
def test_output_extinction_is_NA(self): # Since we didn't deredden
Av = self.Result.Posterior.best_model["extinction_Av_mag"]
self.assertTrue(Av == "NA (deredden is False)")
def test_parameter_estimate(self):
""" Ensure the single parameter estimate is as expected """
DF_est = self.Result.Posterior.DF_estimates
self.assertTrue("P0" in DF_est.index)
lower = self.p_vals[self.test_gridpoint - 1]
upper = self.p_vals[self.test_gridpoint + 1]
est = DF_est.loc["P0", "Estimate"]
self.assertTrue(lower < est < upper, msg="{0}, {1}, {2}".format(
lower, est, upper))
def test_NB_Model_attributes(self):
""" Check that the list of public attributes is what is documented """
public_attrs = sorted([a for a in dir(self.NB_Model_1)
if not a.startswith("_")])
expected_attrs = ["Interpd_grids", "Raw_grids"]
self.assertTrue(public_attrs == expected_attrs, msg=str(public_attrs))
def test_NB_Result_attributes(self):
""" Check that the list of public attributes is what is documented """
public_attrs = sorted([a for a in dir(self.Result)
if not a.startswith("_")])
expected_attrs = [
"DF_obs", "Grid_spec", "Likelihood", "Plot_Config", "Plotter",
"Posterior", "Prior", "deredden", "obs_flux_arrs",
"obs_flux_err_arrs", "propagate_dered_errors"]
self.assertTrue(public_attrs == expected_attrs, msg=str(public_attrs))
def test_NB_nd_pdf_attributes(self):
""" Check that the list of public attributes is what is documented """
public_attrs = sorted([a for a in dir(self.Result.Posterior)
if not a.startswith("_")])
expected_attrs = sorted(["DF_estimates", "Grid_spec", "best_model",
"marginalised_1D", "marginalised_2D", "name", "nd_pdf", "show"])
self.assertTrue(public_attrs == expected_attrs, msg=str(public_attrs))
def test_best_model_dict_keys(self):
""" Check that the list of best model keys is what is documented """
expected_keys = sorted(["table", "chi2", "extinction_Av_mag",
"grid_location"])
key_list = sorted(list(self.Result.Posterior.best_model.keys()))
self.assertEqual(key_list, expected_keys)
@classmethod
def tearDownClass(cls):
""" Remove the output when tests in this class have finished """
if clean_up:
if hasattr(cls, "posterior_plot"):
os.remove(cls.posterior_plot)
if hasattr(cls, "best_model_table"):
os.remove(cls.best_model_table)
###############################################################################
class Test_default_initialisation(unittest.TestCase):
"""
Test that we can initialise fully default HII and NLR NB models
"""
def test_default_HII_initialisation(self):
NB_Model("HII")
def test_default_NLR_initialisation(self):
NB_Model("NLR")
###############################################################################
class Test_real_data_with_dereddening(unittest.TestCase):
"""
Test some real data, from the S7 nuclear spectrum for NGC4691, a star-
forming galaxy. Include a line ratio prior and dereddening in NebulaBayes.
Test saving plots for all 3 Bayes Theorem PDFs.
"""
longMessage = True # Append messages to existing message
lines = ["OII3726_29", "Hgamma", "OIII4363", "Hbeta", "OIII5007",
"NI5200", "OI6300", "Halpha", "NII6583", "SII6716", "SII6731"]
obs_fluxes = [1.22496, 0.3991, 0.00298, 1.0, 0.44942,
0.00766, 0.02923, 4.25103, 1.65312, 0.45598, 0.41482]
obs_errs = [0.00303, 0.00142, 0.00078, 0.0017, 0.0012,
0.00059, 0.00052, 0.00268, 0.00173, 0.00102, 0.00099]
obs_wavelengths = [3727.3, 4340.5, 4363.2, 4861.3, 5006.8,
5200.3, 6300.3, 6562.8, 6583.2, 6716.4, 6730.8]
@classmethod
def setUpClass(cls):
cls.prior_plot = os.path.join(TEST_DIR,
cls.__name__ + "_prior.pdf")
cls.likelihood_plot = os.path.join(TEST_DIR,
cls.__name__ + "_likelihood.pdf")
# Test saving as a png
cls.posterior_plot = os.path.join(TEST_DIR,
cls.__name__ + "_posterior.png")
cls.estimate_table = os.path.join(TEST_DIR,
cls.__name__ + "_parameter_estimates.csv")
# Test different values along each dimension in interpd_grid_shape
cls.NB_Model_1 = NB_Model("HII", grid_params=None, line_list=cls.lines,
interpd_grid_shape=[100, 130, 80],
grid_error=0.35)
kwargs = {"prior_plot": cls.prior_plot,
"likelihood_plot": cls.likelihood_plot,
"posterior_plot": cls.posterior_plot,
"estimate_table": cls.estimate_table,
"deredden": True, "propagate_dered_errors": True,
"obs_wavelengths": cls.obs_wavelengths,
"prior":[("SII6716","SII6731")],
"plot_configs": [{"table_on_plot": True,
"legend_fontsize": 5}]*4,
}
cls.Result = cls.NB_Model_1(cls.obs_fluxes, cls.obs_errs, cls.lines,
**kwargs)
def test_parameter_estimates(self):
"""
Regression check on parameter estimates.
"""
ests = self.Result.Posterior.DF_estimates["Estimate"] # pandas Series
self.assertTrue(np.isclose(ests["12 + log O/H"], 8.73615, atol=0.0001),
msg=str(ests["12 + log O/H"]))
self.assertTrue(np.isclose(ests["log P/k"], 6.82636, atol=0.0001),
msg=str(ests["log P/k"]))
self.assertTrue(np.isclose(ests["log U"], -2.84848, atol=0.0001),
msg=str(ests["log U"]))
def test_estimate_bounds_checks(self):
"""
Ensure that the "checking columns" in the estimate table are all
showing that the estimates are good.
"""
DF = self.Result.Posterior.DF_estimates # Parameter estimate table
for p in ["12 + log O/H", "log P/k", "log U"]:
for col in ["Est_in_CI68?", "Est_in_CI95?"]:
self.assertTrue(DF.loc[p,col] == "Y")
for col in ["Est_at_lower?", "Est_at_upper?", "P(lower)>50%?",
"P(upper)>50%?"]:
self.assertTrue(DF.loc[p,col] == "N")
self.assertTrue(DF.loc[p,"n_local_maxima"] == 1)
def test_chi2(self):
"""
Regression check that chi2 doesn't change
"""
chi2 = self.Result.Posterior.best_model["chi2"]
self.assertTrue(np.isclose(chi2, 2568.7, atol=0.2), msg=str(chi2))
def test_interp_order(self):
"""
Ensure the correct interpolation order (linear) is preserved
"""
self.assertTrue(self.NB_Model_1.Interpd_grids.interp_order == 1)
def test_all_zero_prior(self):
"""
We permit an all-zero prior - check that it works (a warning should
be printed).
"""
shape = self.NB_Model_1.Interpd_grids.shape
self.Result1 = self.NB_Model_1(self.obs_fluxes, self.obs_errs,
self.lines, prior=np.zeros(shape))
@classmethod
def tearDownClass(cls):
""" Remove the output files when tests in this class have finished """
if clean_up:
files = [cls.prior_plot, cls.likelihood_plot, cls.posterior_plot,
cls.estimate_table]
for file_i in files:
os.remove(file_i)
###############################################################################
class Test_real_data_with_cubic_interpolation(unittest.TestCase):
"""
Very similar to the previous test class, but we use cubic interpolation
instead of linear interpolation when interpolating model flux grids.
We also test resetting the logging level after using the "verbosity" kwarg.
"""
longMessage = True # Append messages to existing message
lines = ["OII3726_29", "Hgamma", "OIII4363", "Hbeta", "OIII5007",
"NI5200", "OI6300", "Halpha", "NII6583", "SII6716", "SII6731"]
obs_fluxes = [1.22496, 0.3991, 0.00298, 1.0, 0.44942,
0.00766, 0.02923, 4.25103, 1.65312, 0.45598, 0.41482]
obs_errs = [0.00303, 0.00142, 0.00078, 0.0017, 0.0012,
0.00059, 0.00052, 0.00268, 0.00173, 0.00102, 0.00099]
obs_wavelengths = [3727.3, 4340.5, 4363.2, 4861.3, 5006.8,
5200.3, 6300.3, 6562.8, 6583.2, 6716.4, 6730.8]
@classmethod
def setUpClass(cls):
cls.prior_plot = os.path.join(TEST_DIR,
cls.__name__ + "_prior.pdf")
cls.likelihood_plot = os.path.join(TEST_DIR,
cls.__name__ + "_likelihood.pdf")
cls.posterior_plot = os.path.join(TEST_DIR,
cls.__name__ + "_posterior.pdf")
cls.estimate_table = os.path.join(TEST_DIR,
cls.__name__ + "_parameter_estimates.csv")
# Test different values along each dimension in interpd_grid_shape
cls.NB_Model_1 = NB_Model("HII", line_list=cls.lines, interp_order=3,
interpd_grid_shape=[100, 130, 80],
grid_error=0.35)
cls.old_log_level = NebulaBayes.NB_logger.level
cls.test_log_level = 0 # A low number different to default
assert cls.old_log_level != cls.test_log_level # For test to work
NebulaBayes.NB_logger.setLevel(cls.test_log_level)
kwargs = {"prior_plot": cls.prior_plot,
"likelihood_plot": cls.likelihood_plot,
"posterior_plot": cls.posterior_plot,
"estimate_table": cls.estimate_table,
"deredden": True, "propagate_dered_errors": True,
"obs_wavelengths": cls.obs_wavelengths,
"prior":[("SII6716","SII6731")],
"plot_configs": [{"table_on_plot": True,
"legend_fontsize": 5}]*4,
"verbosity": "DEBUG", # Test that level change is temporary
}
cls.Result = cls.NB_Model_1(cls.obs_fluxes, cls.obs_errs, cls.lines,
**kwargs)
def test_parameter_estimates(self):
"""
Regression check on parameter estimates. Estimates for P and U are
slightly different with the cubic interpolation.
"""
ests = self.Result.Posterior.DF_estimates["Estimate"] # pandas Series
self.assertTrue(np.isclose(ests["12 + log O/H"], 8.73615, atol=0.0001),
msg=str(ests["12 + log O/H"]))
self.assertTrue(np.isclose(ests["log P/k"], 6.86047, atol=0.0001),
msg=str(ests["log P/k"]))
self.assertTrue(np.isclose(ests["log U"], -2.82828, atol=0.0001),
msg=str(ests["log U"]))
def test_chi2(self):
"""
Regression check that chi2 doesn't change
"""
chi2 = self.Result.Posterior.best_model["chi2"]
self.assertTrue(np.isclose(chi2, 2522.7, atol=0.2), msg=str(chi2))
def test_interp_order(self):
"""
Ensure the correct interpolation order (cubic) is preserved
"""
self.assertTrue(self.NB_Model_1.Interpd_grids.interp_order == 3)
def test_resetting_log_level(self):
"""
Ensure that after using the verbosity keyword, the NB_logger
level is unchanged (i.e. was reset to its previous value)
"""
self.assertEqual(NebulaBayes.NB_logger.level, self.test_log_level)
def test_dereddening_result_attributes(self):
"""Ensure dereddening attributes added to Result object."""
self.assertTrue(self.Result.deredden)
self.assertTrue(self.Result.propagate_dered_errors)
@classmethod
def tearDownClass(cls):
""" Remove output files when tests in this class have finished,
and undo change to logging level. """
NebulaBayes.NB_logger.setLevel(cls.old_log_level)
if clean_up:
files = [cls.prior_plot, cls.likelihood_plot, cls.posterior_plot,
cls.estimate_table]
for file_i in files:
os.remove(file_i)
###############################################################################
class Test_upper_bounds_1D(unittest.TestCase):
"""
Test the treatment of upper bounds. We use a 1D grid.
"""
longMessage = True # Append messages to existing message
lines = ["line1", "line2", "line3", "line4", "line5", "line6"]
obs_fluxes = [ 1.0, 8.0, 10.2, -np.inf, -np.inf, -np.inf]
obs_errs = [ 0.05, 0.3, 3.1, 0.3, 0.4, 0.2]
pred_fluxes = [ 1.0, 5.0, 10.2, 0.1, 0.4, 0.4]
# The pred_fluxes are at the "peak" of the grid, that we'll input to NB.
@classmethod
def setUpClass(cls):
n = 100 # Length of grid
best_i = 65
DF_grid1D = pd.DataFrame()
DF_grid1D["p0"] = np.arange(n) - 572.3 # Linear
DF_grid1D["dummy"] = np.exp(-((DF_grid1D["p0"] -
DF_grid1D["p0"].values[best_i])/17.2)**2)
DF_grid1D["dummy"] = DF_grid1D["dummy"].values / DF_grid1D["dummy"].max()
for line, pred_flux in zip(cls.lines, cls.pred_fluxes):
DF_grid1D[line] = DF_grid1D["dummy"].values * pred_flux
# All of the fluxes peak at the point we'll input to NB
DF_grid1D["line1"] = np.ones_like(DF_grid1D["line1"].values)
cls.expected_p0 = DF_grid1D["p0"].values[best_i]
# Note that we set grid_error to zero!
cls.NB_Model_1 = NB_Model(DF_grid1D, grid_params=["p0"], grid_error=0,
line_list=cls.lines, interpd_grid_shape=[500])
kwargs = {"deredden": False, "norm_line": "line1",
"line_plot_dir": TEST_DIR}
cls.Result = cls.NB_Model_1(cls.obs_fluxes, cls.obs_errs, cls.lines,
**kwargs)
def test_parameter_estimates(self):
"""
Regression test - check the parameter estimate is as expected.
"""
DF_est = self.Result.Posterior.DF_estimates # DataFrame
p0_est = DF_est.loc["p0", "Estimate"]
self.assertTrue(np.isclose(p0_est, self.expected_p0, atol=1))
@classmethod
def tearDownClass(cls):
""" Remove the output files when tests in this class have finished """
if clean_up:
files = [os.path.join(TEST_DIR, l +
"_PDF_contributes_to_likelihood.pdf") for l in cls.lines]
for file_i in files:
os.remove(file_i)
###############################################################################
class Test_all_zero_likelihood(unittest.TestCase):
"""
Test forcing a log_likelihood of all -inf, so the likelihood is all zero.
"""
longMessage = True # Append messages to existing message
lines = ["Halpha", "Hbeta", "OIII4363", "OIII5007", "NII6583"]
obs_fluxes = [ 1e250, 1, 1.2e250, 1.2e250, 1e250]
obs_errs = [ 0.004, 1, 0.005, 0.003, 0.002]
@classmethod
def setUpClass(cls):
cls.NB_Model_1 = NB_Model("HII", line_list=cls.lines, grid_error=0.01,
interpd_grid_shape=[30,30,30])
kwargs = {"deredden": False, "norm_line": "Hbeta",
"prior":[("NII6583","Halpha")]}
cls.Result = cls.NB_Model_1(cls.obs_fluxes, cls.obs_errs, cls.lines,
**kwargs)
def test_likelihood_all_zero(self):
"""
Regression test - check likelihood is all zero.
"""
likelihood = self.Result.Likelihood.nd_pdf
self.assertTrue(np.all(likelihood == 0))
def test_posterior_all_zero(self):
"""
Regression test - check posterior is all zero.
"""
posterior = self.Result.Posterior.nd_pdf
self.assertTrue(np.all(posterior == 0))
###############################################################################
class Test_data_that_matches_models_poorly(unittest.TestCase):
"""
Test inputting fluxes and errors that are very poorly fit by the entire
model grid. In this case most of the likelihood is zero, and using a
reasonable-ish prior gives a posterior that is zero everywhere.
"""
longMessage = True # Append messages to existing message
lines = ["Halpha", "Hbeta", "OIII4363", "OIII5007", "NII6583"]
obs_fluxes = [ 3.1, 1, 1.8, 5.1, 1.2]
obs_errs = [ 0.01, 1, 0.01, 0.01, 0.01]
# Note the very small errors
@classmethod
def setUpClass(cls):
cls.NB_Model_1 = NB_Model("HII", line_list=cls.lines, grid_error=0.01,
interpd_grid_shape=[30,30,30])
kwargs = {"deredden": False, "norm_line": "Hbeta",
"prior":[("NII6583", "OIII4363")]}
cls.Result = cls.NB_Model_1(cls.obs_fluxes, cls.obs_errs, cls.lines,
**kwargs)
def test_likelihood_mostly_zero(self):
"""
Regression test - check likelihood is mostly zero.
"""
likelihood = self.Result.Likelihood.nd_pdf
self.assertTrue(np.sum(likelihood != 0) < 65)
def test_posterior_all_zero(self):
"""
Regression test - check posterior is all zero.
"""
posterior = self.Result.Posterior.nd_pdf
self.assertTrue(np.all(posterior == 0))
###############################################################################
class Test_NB_nd_pdf(unittest.TestCase):
"""
Test the methods in the NB_nd_pdf class
"""
@classmethod
def setUpClass(cls):
# Run NB in 2D to obtain an NB_Result object
lines = ["Hbeta", "OIII5007", "Halpha"]
fluxes = [1.0, 1.3, 2.8]
errors = [0.2, 0.2, 0.2]
NB_dir = os.path.dirname(os.path.realpath(NebulaBayes.__file__))
grid_table_file = os.path.join(NB_dir, "grids", "NB_HII_grid.fits.gz")
BinTableHDU_0 = fits.getdata(grid_table_file, 0)
DF_grid = Table(BinTableHDU_0).to_pandas()
DF_grid = DF_grid[DF_grid["log P/k"] == 6.6] # Reduce to 2D grid
nx, ny = 30, 30 # Square interpolated grid
cls.NB_Model_1 = NB_Model(DF_grid, line_list=lines,
grid_params=["12 + log O/H", "log U"],
interpd_grid_shape=[ny, nx])
cls.Result_1 = cls.NB_Model_1(fluxes,errors, lines, deredden=False)
# Now make a 2D pdf which is a single line of pixels along a quarter-
# circle around the origin. This ensures the point defined by the peaks
# of the 1D marginalised PDFs is very different from the 2D pdf peak.
x, y = np.arange(nx), np.arange(ny)
xx, yy = np.meshgrid(x, y)
raw_pdf = np.zeros((ny, nx), dtype=float)
rr2 = xx**2 + yy**2
where_arc = (rr2 > (20 - 0.5)**2) & (rr2 < (20 + 0.5)**2)
raw_pdf[where_arc] = 1.0 # Value 1 along the circular arc; 0 elsewhere
# Make a 2D PDF peak at the intersection of the circular arc and y = x
raw_pdf[np.abs(xx - yy) < 1.5] *= 2
# print(np.unravel_index(raw_pdf.argmax(), raw_pdf.shape)) # (14, 14)
cls.raw_pdf = raw_pdf
val_arrs = cls.NB_Model_1.Interpd_grids.param_values_arrs
dx, dy = np.diff(val_arrs[1])[0], np.diff(val_arrs[0])[0]
cls.marginalised_x = np.trapz(raw_pdf, dx=dy, axis=0)
cls.peak_ind_x = np.argmax(cls.marginalised_x)
cls.marginalised_y = np.trapz(raw_pdf, dx=dx, axis=1)
cls.peak_ind_y = np.argmax(cls.marginalised_y)
# print(cls.peak_ind_y, cls.peak_ind_x) # (20, 20) as desired
# plt.imshow(raw_pdf, origin="lower")
# plt.scatter([14, 20], [14, 20], marker="*")
# Make a new NB_nd_pdf object with the custom raw 2D PDF
cls.NB_nd_pdf_1 = NB_nd_pdf(raw_pdf, cls.Result_1,
cls.NB_Model_1.Interpd_grids, name="Posterior")
# We want to test NB_nd_pdf attributes; some of "DF_estimates", "Grid_spec",
# "best_model", marginalised_1D", "marginalised_2D", "name", "nd_pdf"
# "best_model" keys: "table", "chi2", "extinction_Av_mag", "grid_location"
def test_best_model_grid_location(self):
grid_location = self.NB_nd_pdf_1.best_model["grid_location"]
self.assertEqual(grid_location, (self.peak_ind_y, self.peak_ind_x))
def test_DF_estimates_Index_of_peak(self):
DF_estimates = self.NB_nd_pdf_1.DF_estimates
x_Index_of_peak = DF_estimates.loc["log U", "Index_of_peak"]
self.assertEqual(x_Index_of_peak, self.peak_ind_x)
y_Index_of_peak = DF_estimates.loc["12 + log O/H", "Index_of_peak"]
self.assertEqual(y_Index_of_peak, self.peak_ind_y)
def test_best_model_table(self):
""" Check that a single table value matches the desired gridpoint.
This test would fail on NebulaBayes 0.9.7 and earlier """
best_coords = (self.peak_ind_y, self.peak_ind_x)
normed_grids = self.NB_Model_1.Interpd_grids.grids["Hbeta_norm"]
model_OIII = normed_grids["OIII5007"][best_coords]
DF_best = self.NB_nd_pdf_1.best_model["table"]
table_model_OIII = DF_best.loc["OIII5007", "Model"]
self.assertEqual(table_model_OIII, model_OIII)
def test_marginalised_1D_pdf(self):
""" Check that the marginalised 1D pdfs are as expected """
m_1D = self.NB_nd_pdf_1.marginalised_1D
self.assertEqual(len(m_1D), 2)
# Scale the pdfs to compare despite the m_1D PDFs being normalised
m_1D["log U"] /= m_1D["log U"].max()
m_1D["12 + log O/H"] /= m_1D["12 + log O/H"].max()
expected_x_pdf = self.marginalised_x / self.marginalised_x.max()
expected_y_pdf = self.marginalised_y / self.marginalised_y.max()
self.assertTrue(np.allclose(m_1D["log U"], expected_x_pdf,
atol=1e-12, rtol=0))
self.assertTrue(np.allclose(m_1D["12 + log O/H"], expected_y_pdf,
atol=1e-12, rtol=0))
# May have swapped x and y, but it's all symmetric anyway...
def test_nd_pdf(self):
"""
Check that the normalised nd_pdf matches the input raw nd_pdf. We
avoid doing a proper normalisation by comparing with a simple scaling.
"""
pdf = self.NB_nd_pdf_1.nd_pdf
scaled_raw_nd_pdf = self.raw_pdf / self.raw_pdf.max()
self.assertTrue(np.array_equal(pdf / pdf.max(), scaled_raw_nd_pdf))
###############################################################################
class Test_dereddening_changes_results(unittest.TestCase):
"""
Test that using dereddening changes all three PDFs (when obs data are used
in the prior). There previously was a bug where the obs data in the line
ratio priors weren't dereddened.
Also test that PDFs change when errors from the Balmer decrement are
propagated into the dereddened line fluxes.
"""
@classmethod
def setUpClass(cls):
# Run NB in 2D to obtain an NB_Result object
lines = ["Hbeta", "OIII5007", "Halpha"]
waves = [4861., 5007., 6563.]
fluxes = [1.0, 1.3, 5.1]
errors = [0.2, 0.2, 0.2]
NB_Model_1 = NB_Model("HII", line_list=lines, grid_error=0.35,
interpd_grid_shape=(15, 15, 15))
prior = [("OIII5007", "Hbeta")] # Need obs data in prior
cls.Result_dered1 = NB_Model_1(fluxes, errors, lines, prior=prior,
obs_wavelengths=waves, deredden=True)
cls.Result_dered2 = NB_Model_1(fluxes, errors, lines, prior=prior,
obs_wavelengths=waves, deredden=True, propagate_dered_errors=True)
cls.Result_nodered = NB_Model_1(fluxes, errors, lines, prior=prior,
deredden=False)
def test_priors_differ(self):
""" Check that dereddened data was used in line ratio prior, when
requested. This test fails on NebulaBayes 0.9.7 """
pdf_dered1 = self.Result_dered1.Prior.nd_pdf
pdf_nodered = self.Result_nodered.Prior.nd_pdf
max_diff1 = np.max(np.abs(pdf_dered1 - pdf_nodered))
self.assertTrue(max_diff1 > 0.01, str(max_diff1))
# Test uncertainty propagation has an effect
pdf_dered2 = self.Result_dered2.Prior.nd_pdf
max_diff_u = np.max(np.abs(pdf_dered1 - pdf_dered2))
self.assertTrue(max_diff_u > 0.01, str(max_diff_u))
def test_likelihoods_differ(self):
pdf_dered1 = self.Result_dered1.Likelihood.nd_pdf
pdf_nodered = self.Result_nodered.Likelihood.nd_pdf
max_diff1 = np.max(np.abs(pdf_dered1 - pdf_nodered))
self.assertTrue(max_diff1 > 0.01, str(max_diff1))
pdf_dered2 = self.Result_dered2.Likelihood.nd_pdf
max_diff_u = np.max(np.abs(pdf_dered1 - pdf_dered2))
self.assertTrue(max_diff_u > 0.01, str(max_diff_u))
def test_posteriors_differ(self):
pdf_dered1 = self.Result_dered1.Posterior.nd_pdf
pdf_nodered = self.Result_nodered.Posterior.nd_pdf
max_diff1 = np.max(np.abs(pdf_dered1 - pdf_nodered))
self.assertTrue(max_diff1 > 0.01, str(max_diff1))
pdf_dered2 = self.Result_dered2.Posterior.nd_pdf
max_diff_u = np.max(np.abs(pdf_dered1 - pdf_dered2))
self.assertTrue(max_diff_u > 0.01, str(max_diff_u))
def test_propagate_dered_errors(self):
"""Check propagate_dered_errors values on Result object"""
# Checks default value of False
self.assertFalse(self.Result_dered1.propagate_dered_errors)
self.assertTrue(self.Result_dered2.propagate_dered_errors)
###############################################################################
class Test_likelihood_lines_keyword(unittest.TestCase):
"""
Test inputting fluxes and errors that aren't used in the likelihood, and
test that these lines may be used in a prior.
"""
longMessage = True # Append messages to existing message
lines = ["Halpha", "Hbeta", "OIII4363", "OIII5007", "NII6583"]
obs_fluxes = [ 3.1, 1, 1.8, 5.1, 1.2]
obs_errs = [ 0.01, 1, 0.01, 0.01, 0.01]
exclude_lines = ["Halpha", "OIII5007"]
@classmethod
def setUpClass(cls):
cls.NB_Model_1 = NB_Model("HII", line_list=cls.lines,
interpd_grid_shape=[30,30,30])
cls.kwargs = {"deredden": False, "norm_line": "Hbeta",
"prior": [("NII6583", cls.exclude_lines[0])], "verbosity": "WARNING",
}
cls.likelihood_lines = [l for l in cls.lines if l not in cls.exclude_lines]
cls.Result = cls.NB_Model_1(cls.obs_fluxes, cls.obs_errs, cls.lines,
likelihood_lines=cls.likelihood_lines, **cls.kwargs)
P = cls.Result.Posterior
cls.DF_best = P.best_model["table"]
cls.estimate_Z = P.DF_estimates.loc["12 + log O/H", "Estimate"]
def test_non_likelihood_lines_in_best_model_table(self):
"""
Regression test - lines not included in likelihood calculation should
still appear in the "best model" table.
"""
self.assertTrue(all(l in self.DF_best.index for l in self.exclude_lines))
def test_best_model_table_fields(self):
"""
Regression test - check fields of best model table (we test for the
case of no dereddening; field names are different with dereddening).
"""
correct_fields = ["In_lhood?", "Obs", "Model", "Resid_Stds", "Obs_S/N"]
t_fields = self.DF_best.columns.tolist()
self.assertTrue(t_fields == correct_fields, t_fields)
def test_In_lhood_field_in_best_model_table(self):
"""
Regression test - the "In_lhood?" field in the best model table should
correctly identify if a line was used in the likelihood.
"""
correct = [("N" if l in self.exclude_lines else "Y") for l in self.lines]
self.assertTrue(self.DF_best["In_lhood?"].values.tolist() == correct)
def test_permuting_input_line_order(self):
"""
Regression test - the order of the input lines should not affect the
results. There was a real bug introduced with the "likelihood_lines"
feature - this test fails on NB version 0.9.6 and 0.9.7!
"""
n = len(self.lines)
for i, ind_tuple in enumerate(itertools.permutations(range(n))):
# There are 5! = 120 permutations, so only check one in five:
if i % 5 != 2:
continue
obs_fluxes = [self.obs_fluxes[j] for j in ind_tuple]
obs_errs = [self.obs_errs[j] for j in ind_tuple]
lines = [self.lines[j] for j in ind_tuple]
Result_i = self.NB_Model_1(obs_fluxes, obs_errs, lines,
likelihood_lines=self.likelihood_lines, **self.kwargs)
P_i = Result_i.Posterior
estimate_Z_i = P_i.DF_estimates.loc["12 + log O/H", "Estimate"]
self.assertEqual(estimate_Z_i, self.estimate_Z)
###############################################################################
class Test_raising_errors(unittest.TestCase):
"""
Test raising errors on bad inputs
"""
@classmethod
def setUpClass(cls):
try:
# Python 3
cls.assertRaisesRE = cls.assertRaisesRegex
except AttributeError:
# Python 2
cls.assertRaisesRE = cls.assertRaisesRegexp
def test_bad_grid_parameter_with_too_few_unique_values(self):
"""
Test correct error is raised if there are too few unique values for
a grid parameter.
"""
DF = pd.DataFrame({"p1": [4, 4, 4, 4], "p2": [1, 2, 3, 4],
"l2": [5, 6, 7, 8]})
self.assertRaisesRE(ValueError, "3 unique values are required",
NB_Model, DF, ["p1", "p2"])
###############################################################################
def interactive_plot_tests():
"""
This function needs to be called manually to test the interactive plotting.
from test_NB import interactive_plot_tests
interactive_plot_tests()
"""
lines = ["OII3726_29", "Hgamma", "OIII4363", "Hbeta", "OIII5007",
"NI5200", "OI6300", "Halpha", "NII6583", "SII6716", "SII6731"]
obs_fluxes = [1.22496, 0.3991, 0.00298, 1.0, 0.44942,
0.00766, 0.02923, 4.25103, 1.65312, 0.45598, 0.41482]
obs_errs = [0.00303, 0.00142, 0.00078, 0.0017, 0.0012,
0.00059, 0.00052, 0.00268, 0.00173, 0.00102, 0.00099]
obs_wavelengths = [3727.3, 4340.5, 4363.2, 4861.3, 5006.8,
5200.3, 6300.3, 6562.8, 6583.2, 6716.4, 6730.8]
NB_Model_1 = NB_Model("HII", grid_params=None, line_list=lines,
interpd_grid_shape=[50, 70, 50], grid_error=0.35)
kwargs = {"deredden": True, "propagate_dered_errors": True,
"obs_wavelengths": obs_wavelengths,
"prior":[("SII6716","SII6731")],
"plot_configs": [{"table_on_plot": True,
"legend_fontsize": 5}]*4,
}
Result = NB_Model_1(obs_fluxes, obs_errs, lines, **kwargs)
# Test both ways to make an interactive plot
Result.Plotter.interactive(Result.Posterior)
Result.Prior.show(Result.Plotter)
###############################################################################
# Ideas for more tests:
# Check that parameter estimates are inside the CIs, and check the flags for this
# Test normalising to different lines repeatedly, and checking that the
# unnecessary interpolated grids are deleted.
# Check coverage of the code, to see what isn't being run?
if __name__ == "__main__":
print("\nTesting NebulaBayes version {0} ...\n".format(__version__))
unittest.main(verbosity=2)
| 17,028 | 99 | 838 |
7f5c849a1922a15d5a237861a7d432656ec07858 | 224 | py | Python | spider/spider/distance/rfd/__init__.py | Rosna/P4ML-UI | edf0dd830588f03b197e4d6532830a5aedd88424 | [
"Apache-2.0"
] | 1 | 2021-11-05T17:42:47.000Z | 2021-11-05T17:42:47.000Z | spider/spider/distance/rfd/__init__.py | Rosna/P4ML-UI | edf0dd830588f03b197e4d6532830a5aedd88424 | [
"Apache-2.0"
] | null | null | null | spider/spider/distance/rfd/__init__.py | Rosna/P4ML-UI | edf0dd830588f03b197e4d6532830a5aedd88424 | [
"Apache-2.0"
] | 2 | 2019-02-21T18:29:51.000Z | 2019-09-02T21:21:26.000Z | """
spider.distance.metricl.rfd sub-package
__init.py__
@author: david johnson
Primitive that learns and applies random-forest-based distance metric.
defines the module index
"""
from .rfd import RFD
| 17.230769 | 74 | 0.709821 | """
spider.distance.metricl.rfd sub-package
__init.py__
@author: david johnson
Primitive that learns and applies random-forest-based distance metric.
defines the module index
"""
from .rfd import RFD
| 0 | 0 | 0 |
e6cd940742010007d0946f002755532450bf69fa | 4,842 | py | Python | prission.py | mandypepe/pythontets | 5bf1d84e5c7c769b27c5cda221ab08cc6625daa4 | [
"Apache-2.0"
] | null | null | null | prission.py | mandypepe/pythontets | 5bf1d84e5c7c769b27c5cda221ab08cc6625daa4 | [
"Apache-2.0"
] | null | null | null | prission.py | mandypepe/pythontets | 5bf1d84e5c7c769b27c5cda221ab08cc6625daa4 | [
"Apache-2.0"
] | null | null | null | import pygame, sys
from pygame.locals import *
# Aclaraciones
# Requiere "pygame" para las graficas
#
# Se grafican las figuras para una mejor comprencion pero como son coordenadas tan pequenas no se muestran bien
# (aumentando las proporciones pude verse mejo)
# pero la orden del problema no lo permite.
#
# La solucion trate de buscarla matematicamente
# 1- para saber si es ciudadano o prisionero :
# comprobamos si el punto esta fuera o dentro de un poligono o en uno de sus vertices
#
# definiendo colores
NEGRO = (0, 0, 0)
ROJO = (255, 0, 0)
CAFE = (90, 50, 15)
BLANCO = (255, 255, 255)
AZUL = (0, 0, 255)
# Abriendo Fichero
infile = open('texto.txt', 'r')
for line in infile:
lista = line
pygame.init()
# Asignando dimenciones a la ventana
dimensiones = (500, 500)
pantalla = pygame.display.set_mode(dimensiones)
# asignando nombre de la ventana
pantalla.fill(BLANCO) # rellenando ventana
terminar = False
reloj = pygame.time.Clock()
while not terminar:
for Evento in pygame.event.get():
if Evento.type == pygame.QUIT:
terminar = True
# limpiando lista y declarando variables
lista = lista.replace(" ", ",").replace("|", ",")
lista_limpa = lista.split(",")
lista_x = []
lista_y = []
longitud = len(lista_limpa)
poligono = []
#separando las cordenadas X,Y y conformando el Poligono
i = 0
while i < longitud - 2:
cordenada_x = int(lista_limpa[i])
if (cordenada_x >= 0 and cordenada_x <= 10):
temp_x = int(lista_limpa[i]) # aca se puede aumentar las proporciones
lista_x.append(temp_x)
j = i + 1
cordenada_y = int(lista_limpa[j])
if (cordenada_y >= 0 and cordenada_y <= 10):
temp_y = int(lista_limpa[j]) # aca se puede aumentar las proporciones
lista_y.append(temp_y)
poligono.append((temp_x, temp_y))
i = i + 2
# Preparando las cordenadas para dibujar P (puntos de rectas ) D para las diagonales
# o rectas de cierre de la figura
i = 0
while i < len(lista_x):
px = int(lista_x[i])
py = int(lista_y[i])
pxx = int(lista_x[i + 1])
pyy = int(lista_y[i + 1])
if i == 0:
dx = int(lista_x[i])
dy = int(lista_y[i])
dxx = int(lista_x[i + 3])
dyy = int(lista_y[i + 3])
if i == 2:
dx = int(lista_x[i - 1])
dy = int(lista_y[i - 1])
dxx = int(lista_x[i])
dyy = int(lista_y[i])
#dibujando la figura
pygame.draw.line(pantalla, ROJO, [px, py], [pxx, pyy], 2)
pygame.draw.aaline(pantalla, ROJO, [dx, dy], [dxx, dyy], True)
i = i + 2
#campturando los puntos del usuario
punto_x = int(lista_limpa[len(lista_limpa) - 2])
punto_y = int(lista_limpa[len(lista_limpa) - 1])
#aplicando restricciones
if punto_y >= 3 and punto_y <= 12 and punto_x >= 3 and punto_x <= 12:
punto_x = punto_x
punto_y = punto_y
pygame.draw.circle(pantalla, CAFE, [punto_x, punto_y], 1) #dibujando el .
#metodo para definir si el punto esta dentro o fuera del poligono
if punto_en_poligono(punto_x, punto_y, poligono) == 2:
pygame.display.set_caption("Prisionero Estas en unode los vertice")
elif punto_en_poligono(punto_x, punto_y, poligono) == 1:
pygame.display.set_caption("Prisionero")
else:
pygame.display.set_caption("Ciudadano")
pygame.display.flip()
reloj.tick(20)
# Cerramos el fichero.
infile.close()
pygame.quit()
| 37.828125 | 117 | 0.522305 | import pygame, sys
from pygame.locals import *
# Aclaraciones
# Requiere "pygame" para las graficas
#
# Se grafican las figuras para una mejor comprencion pero como son coordenadas tan pequenas no se muestran bien
# (aumentando las proporciones pude verse mejo)
# pero la orden del problema no lo permite.
#
# La solucion trate de buscarla matematicamente
# 1- para saber si es ciudadano o prisionero :
# comprobamos si el punto esta fuera o dentro de un poligono o en uno de sus vertices
#
# definiendo colores
NEGRO = (0, 0, 0)
ROJO = (255, 0, 0)
CAFE = (90, 50, 15)
BLANCO = (255, 255, 255)
AZUL = (0, 0, 255)
# Abriendo Fichero
infile = open('texto.txt', 'r')
for line in infile:
lista = line
pygame.init()
# Asignando dimenciones a la ventana
dimensiones = (500, 500)
pantalla = pygame.display.set_mode(dimensiones)
# asignando nombre de la ventana
pantalla.fill(BLANCO) # rellenando ventana
terminar = False
reloj = pygame.time.Clock()
while not terminar:
for Evento in pygame.event.get():
if Evento.type == pygame.QUIT:
terminar = True
# limpiando lista y declarando variables
lista = lista.replace(" ", ",").replace("|", ",")
lista_limpa = lista.split(",")
lista_x = []
lista_y = []
longitud = len(lista_limpa)
poligono = []
#separando las cordenadas X,Y y conformando el Poligono
i = 0
while i < longitud - 2:
cordenada_x = int(lista_limpa[i])
if (cordenada_x >= 0 and cordenada_x <= 10):
temp_x = int(lista_limpa[i]) # aca se puede aumentar las proporciones
lista_x.append(temp_x)
j = i + 1
cordenada_y = int(lista_limpa[j])
if (cordenada_y >= 0 and cordenada_y <= 10):
temp_y = int(lista_limpa[j]) # aca se puede aumentar las proporciones
lista_y.append(temp_y)
poligono.append((temp_x, temp_y))
i = i + 2
# Preparando las cordenadas para dibujar P (puntos de rectas ) D para las diagonales
# o rectas de cierre de la figura
i = 0
while i < len(lista_x):
px = int(lista_x[i])
py = int(lista_y[i])
pxx = int(lista_x[i + 1])
pyy = int(lista_y[i + 1])
if i == 0:
dx = int(lista_x[i])
dy = int(lista_y[i])
dxx = int(lista_x[i + 3])
dyy = int(lista_y[i + 3])
if i == 2:
dx = int(lista_x[i - 1])
dy = int(lista_y[i - 1])
dxx = int(lista_x[i])
dyy = int(lista_y[i])
#dibujando la figura
pygame.draw.line(pantalla, ROJO, [px, py], [pxx, pyy], 2)
pygame.draw.aaline(pantalla, ROJO, [dx, dy], [dxx, dyy], True)
i = i + 2
#campturando los puntos del usuario
punto_x = int(lista_limpa[len(lista_limpa) - 2])
punto_y = int(lista_limpa[len(lista_limpa) - 1])
#aplicando restricciones
if punto_y >= 3 and punto_y <= 12 and punto_x >= 3 and punto_x <= 12:
punto_x = punto_x
punto_y = punto_y
pygame.draw.circle(pantalla, CAFE, [punto_x, punto_y], 1) #dibujando el .
#metodo para definir si el punto esta dentro o fuera del poligono
def punto_en_poligono(x, y, poligono):
i = 0
j = len(poligono) - 1
salida = 0
for i in range(len(poligono)):
# condicion para saber si el punto esta en uno de los vertices de la figura
if poligono[i][0] == x and poligono[i][1] == y:
salida = 2
break
if (poligono[i][1] < y and poligono[j][1] >= y) or (poligono[j][1] < y and poligono[i][1] >= y):
if poligono[i][0] + (y - poligono[i][1]) / (poligono[j][1] - poligono[i][1]) * (
poligono[j][0] - poligono[i][0]) < x:
salida = 1
j = i
return salida
if punto_en_poligono(punto_x, punto_y, poligono) == 2:
pygame.display.set_caption("Prisionero Estas en unode los vertice")
elif punto_en_poligono(punto_x, punto_y, poligono) == 1:
pygame.display.set_caption("Prisionero")
else:
pygame.display.set_caption("Ciudadano")
pygame.display.flip()
reloj.tick(20)
# Cerramos el fichero.
infile.close()
pygame.quit()
| 777 | 0 | 35 |
1dc936213106f9d125c70a922c89b50eee90b068 | 4,148 | py | Python | scripts/mg-compare-heatmap.py | natalie-robinson/MG-RAST-Tools | ed28ffaeb17a156a1d249e4104b042a9ba9cb8d5 | [
"BSD-2-Clause"
] | 21 | 2015-01-18T01:43:12.000Z | 2021-09-09T03:26:59.000Z | scripts/mg-compare-heatmap.py | natalie-robinson/MG-RAST-Tools | ed28ffaeb17a156a1d249e4104b042a9ba9cb8d5 | [
"BSD-2-Clause"
] | 26 | 2015-01-22T21:23:09.000Z | 2021-11-13T17:55:37.000Z | scripts/mg-compare-heatmap.py | natalie-robinson/MG-RAST-Tools | ed28ffaeb17a156a1d249e4104b042a9ba9cb8d5 | [
"BSD-2-Clause"
] | 22 | 2015-01-18T01:44:36.000Z | 2021-09-18T09:29:06.000Z | #!/usr/bin/env python
import os
import sys
import json
from argparse import ArgumentParser
from mglib import obj_from_url, tab_to_matrix, AUTH_LIST, API_URL, biom_to_matrix, VERSION
prehelp = """
NAME
mg-compare-heatmap
VERSION
%s
SYNOPSIS
mg-compare-heatmap [ --help, --input <input file or stdin>, --output <output file or stdout>, --format <cv: 'text' or 'biom'>, --cluster <cv: ward, single, complete, mcquitty, median, centroid>, --distance <cv: bray-curtis, euclidean, maximum, manhattan, canberra, minkowski, difference>, --name <boolean>, --normalize <boolean> ]
DESCRIPTION
Retrieve Dendogram Heatmap from abundance profiles for multiple metagenomes.
"""
posthelp = """
Input
Tab-delimited table of abundance profiles, metagenomes in columns and annotation in rows.
OR
BIOM format of abundance profiles.
Output
JSON struct containing ordered distances for metagenomes and annotations, along with dendogram data.
EXAMPLES
mg-compare-taxa --ids "mgm4441679.3,mgm4441680.3,mgm4441681.3,mgm4441682.3" --level class --source RefSeq --format text | mg-compare-heatmap --input - --format text --cluster median --distance manhattan
SEE ALSO
-
AUTHORS
%s
"""
if __name__ == "__main__":
sys.exit(main(sys.argv))
| 41.48 | 334 | 0.662729 | #!/usr/bin/env python
import os
import sys
import json
from argparse import ArgumentParser
from mglib import obj_from_url, tab_to_matrix, AUTH_LIST, API_URL, biom_to_matrix, VERSION
prehelp = """
NAME
mg-compare-heatmap
VERSION
%s
SYNOPSIS
mg-compare-heatmap [ --help, --input <input file or stdin>, --output <output file or stdout>, --format <cv: 'text' or 'biom'>, --cluster <cv: ward, single, complete, mcquitty, median, centroid>, --distance <cv: bray-curtis, euclidean, maximum, manhattan, canberra, minkowski, difference>, --name <boolean>, --normalize <boolean> ]
DESCRIPTION
Retrieve Dendogram Heatmap from abundance profiles for multiple metagenomes.
"""
posthelp = """
Input
Tab-delimited table of abundance profiles, metagenomes in columns and annotation in rows.
OR
BIOM format of abundance profiles.
Output
JSON struct containing ordered distances for metagenomes and annotations, along with dendogram data.
EXAMPLES
mg-compare-taxa --ids "mgm4441679.3,mgm4441680.3,mgm4441681.3,mgm4441682.3" --level class --source RefSeq --format text | mg-compare-heatmap --input - --format text --cluster median --distance manhattan
SEE ALSO
-
AUTHORS
%s
"""
def main(args):
ArgumentParser.format_description = lambda self, formatter: self.description
ArgumentParser.format_epilog = lambda self, formatter: self.epilog
parser = ArgumentParser(usage='', description=prehelp%VERSION, epilog=posthelp%AUTH_LIST)
parser.add_argument("--url", dest="url", default=API_URL, help="communities API url")
parser.add_argument("--input", dest="input", default='-', help="input: filename or stdin (-), default is stdin")
parser.add_argument("--output", dest="output", default='-', help="output: filename or stdout (-), default is stdout")
parser.add_argument("--format", dest="format", default='biom', help="input format: 'text' for tabbed table, 'biom' for BIOM format, default is biom")
parser.add_argument("--cluster", dest="cluster", default='ward', help="cluster function, one of: ward, single, complete, mcquitty, median, centroid, default is ward")
parser.add_argument("--distance", dest="distance", default='bray-curtis', help="distance function, one of: bray-curtis, euclidean, maximum, manhattan, canberra, minkowski, difference, default is bray-curtis")
parser.add_argument("--name", dest="name", type=int, default=0, help="label columns by name, default is by id: 1=true, 0=false")
parser.add_argument("--normalize", dest="normalize", type=int, default=0, help="normalize the input data, default is off: 1=true, 0=false")
# get inputs
opts = parser.parse_args()
if (opts.input != '-') and (not os.path.isfile(opts.input)):
sys.stderr.write("ERROR: input data missing\n")
return 1
if opts.format not in ['text', 'biom']:
sys.stderr.write("ERROR: invalid input format\n")
return 1
# parse inputs
rows = []
cols = []
data = []
try:
indata = sys.stdin.read() if opts.input == '-' else open(opts.input, 'r').read()
if opts.format == 'biom':
try:
biom = json.loads(indata)
rows, cols, data = biom_to_matrix(biom, col_name=opts.name)
except:
sys.stderr.write("ERROR: input BIOM data not correct format\n")
return 1
else:
rows, cols, data = tab_to_matrix(indata)
except:
sys.stderr.write("ERROR: unable to load input data\n")
return 1
# retrieve data
raw = '0' if opts.normalize else '1'
post = {"raw": raw, "cluster": opts.cluster, "distance": opts.distance, "columns": cols, "rows": rows, "data": data}
hmap = obj_from_url(opts.url+'/compute/heatmap', data=json.dumps(post, separators=(',',':')))
# output data
if (not opts.output) or (opts.output == '-'):
out_hdl = sys.stdout
else:
out_hdl = open(opts.output, 'w')
out_hdl.write(json.dumps(hmap, separators=(', ',': '), indent=4)+"\n")
out_hdl.close()
return 0
if __name__ == "__main__":
sys.exit(main(sys.argv))
| 2,855 | 0 | 23 |
3bf02f939d9fa44b09bbbe9fe168ef56750390f0 | 5,733 | py | Python | Youtube Bot Video Generator/videoscript.py | wasimakh2/Automatic-Youtube-Reddit-Text-To-Speech-Video-Generator-and-Uploader | 8f6e1c7fcbd93f7da8f437cb4bf2d9c67c2932c2 | [
"MIT"
] | 1 | 2020-08-24T07:02:32.000Z | 2020-08-24T07:02:32.000Z | Youtube Bot Video Generator/videoscript.py | wasimakh2/Automatic-Youtube-Reddit-Text-To-Speech-Video-Generator-and-Uploader | 8f6e1c7fcbd93f7da8f437cb4bf2d9c67c2932c2 | [
"MIT"
] | null | null | null | Youtube Bot Video Generator/videoscript.py | wasimakh2/Automatic-Youtube-Reddit-Text-To-Speech-Video-Generator-and-Uploader | 8f6e1c7fcbd93f7da8f437cb4bf2d9c67c2932c2 | [
"MIT"
] | 1 | 2021-01-25T17:09:35.000Z | 2021-01-25T17:09:35.000Z | import settings
import cv2
from VideoTypes import imageframe, standardredditformat
import generatemovie
import generatorclient
import datetime
import os
import shutil
import videouploader
import random
import pickle
from time import sleep
videoscripts = []
| 41.846715 | 117 | 0.631083 | import settings
import cv2
from VideoTypes import imageframe, standardredditformat
import generatemovie
import generatorclient
import datetime
import os
import shutil
import videouploader
import random
import pickle
from time import sleep
videoscripts = []
class VideoScriptEngine():
def __init__(self, scriptno=None, scripttitle=None, author=None, ups=None, final_script=None, videotype=None,
video_settings=None, music_type=None, thumbnail=None, characters_amount=None, youtube_title=None,
youtube_description=None, youtube_tags=None):
self.scriptno = scriptno
self.final_script = final_script
self.scripttitle = scripttitle
self.author = author
self.ups = ups
self.videotype = videotype
self.video_settings = video_settings
self.music_type = music_type
self.thumbnail = thumbnail
self.characters_amount = characters_amount
self.youtube_title = youtube_title
self.youtube_description = youtube_description
self.youtube_tags = youtube_tags
standard_path = settings.finishedvideosdirectory + "/vid%s/" % self.scriptno
self.vid_path = standard_path + "vid%s.mp4" % self.scriptno
self.vid_description = standard_path + "description.txt"
self.vid_thumbnail = standard_path + "thumbnail.png"
self.vid_tags = standard_path + "youtubetags.txt"
self.vid_title = standard_path + "youtubetitle.txt"
self.isRendered = False
videoscripts.append(self)
self.save()
def renderVideo(self):
if not self.isRendered:
print("Started Rendering Script %s" % self.scriptno)
imageframe.deleteAllFilesInPath(settings.tempPath)
try:
video_format = standardredditformat.StandardReddit("shit", self.video_settings, self.music_type)
formatted_script = imageframe.parseScript(self.final_script)
newMovie = generatemovie.Movie(video_format, formatted_script,
(self.author, self.scripttitle, self.ups), self.scriptno)
export_location = newMovie.renderVideo()
try:
cv2.imwrite(export_location + "/thumbnail.png", cv2.cvtColor(self.thumbnail, cv2.COLOR_RGB2BGR))
except Exception:
pass
writeTextToFile(export_location + "/description.txt", self.youtube_description)
writeTextToFile(export_location + "/youtubetitle.txt", self.youtube_title)
writeTextToFile(export_location + "/youtubetags.txt", self.youtube_tags)
except Exception as e:
print(e)
print("Sorry, a error occured rendering this video. Skipping it")
self.isRendered = True
self.save()
if settings.exportOffline:
generatorclient.updateUploadDetails(self.scriptno, None, None)
deleteRawSave(self.scriptno)
videoscripts.remove(self)
print("Video Successfully exported offline")
else:
print("VID GEN script %s already rendered" % self.scriptno)
def save(self):
path_name = settings.rawvideosaves + "\\rawvideo%s.save" % self.scriptno
with open(path_name, 'wb') as pickle_file:
pickle.dump(self, pickle_file)
print("VID GEN Saved vid %s to %s" % (self.scriptno, path_name))
def uploadVideo(self):
description = (loadTextFile(self.vid_description)).encode("utf8")
title = (loadTextFile(self.vid_title)).encode("utf8")
tags = loadTextFile(self.vid_tags)
# title, description, tags, thumbnailpath, filepath
time_to_upload = calculateUploadTime()
print("Uploading video %s, sceduled release %s" % (self.scriptno, time_to_upload))
success = videouploader.upload(title, description, tags, self.vid_thumbnail, self.vid_path,
time_to_upload.replace(" ", "T") + ".0Z")
if success:
print("Successfully Uploaded video %s" % self.scriptno)
now = datetime.datetime.now()
time_uploaded = now.strftime('%Y-%m-%d %H:%M:%S')
generatorclient.updateUploadDetails(self.scriptno, time_uploaded, time_to_upload)
print("Done Uploading Video %s" % self.scriptno)
videoscripts.remove(self)
deleteRenderedVideoFolder(self.scriptno)
else:
return False
def loadTextFile(file):
f = open(file, "r", encoding="utf8")
content = f.read()
f.close()
return content
def writeTextToFile(location, text):
with open(location, "w", encoding="utf8") as text_file:
text_file.write(text)
def deleteRawSave(scriptno):
try:
path = settings.rawvideosaves + "/rawvideo%s.save" % scriptno
os.remove(path)
print("Removed raw save %s at %s" % (scriptno, path))
except FileNotFoundError:
print("Couldn't find save and delete for %s" % scriptno)
def deleteRenderedVideoFolder(scriptno):
shutil.rmtree(settings.finishedvideosdirectory + "/vid%s" % scriptno)
deleteRawSave(scriptno)
def calculateUploadTime():
now = datetime.datetime.now()
random_hour = random.randint(16, 18)
suggested = now.replace(hour=random_hour)
if suggested < now:
suggested=now.replace(hour=now.hour + 2)
suggested = suggested.replace(minute=0, second=0, microsecond=0)
return suggested.strftime('%Y-%m-%d %H:%M:%S')
| 5,184 | 5 | 266 |
c7cec01c629502ed181bf15521238a3423e46732 | 258 | py | Python | start.py | ddpaimon/treedbclient | ff03343556090c0398ee1130a91b386e9e2f4cec | [
"MIT"
] | null | null | null | start.py | ddpaimon/treedbclient | ff03343556090c0398ee1130a91b386e9e2f4cec | [
"MIT"
] | null | null | null | start.py | ddpaimon/treedbclient | ff03343556090c0398ee1130a91b386e9e2f4cec | [
"MIT"
] | null | null | null | from tkinter import *
from main_window import MainWindow
if __name__ == "__main__":
root = Tk()
root.columnconfigure(0, weight=1)
root.columnconfigure(2, weight=1)
root.rowconfigure(0, weight=1)
m = MainWindow(root)
root.mainloop()
| 21.5 | 37 | 0.686047 | from tkinter import *
from main_window import MainWindow
if __name__ == "__main__":
root = Tk()
root.columnconfigure(0, weight=1)
root.columnconfigure(2, weight=1)
root.rowconfigure(0, weight=1)
m = MainWindow(root)
root.mainloop()
| 0 | 0 | 0 |
a927846d4e9a598f9eff23e33973c320f1038c9d | 28,220 | py | Python | sim_db/sim_db_lib.py | task123/sim_db | 892934693fc166640f6cbf7af905a2c0d48ea99b | [
"MIT"
] | null | null | null | sim_db/sim_db_lib.py | task123/sim_db | 892934693fc166640f6cbf7af905a2c0d48ea99b | [
"MIT"
] | null | null | null | sim_db/sim_db_lib.py | task123/sim_db | 892934693fc166640f6cbf7af905a2c0d48ea99b | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""Read and write parameters, results and metadata to the 'sim_db' database."""
# Copyright (C) 2017-2019 Håkon Austlid Taskén <hakon.tasken@gmail.com>
# Licenced under the MIT License.
import sim_db.src_command_line_tool.commands.helpers as helpers
import sqlite3
import argparse
import subprocess
import time
import hashlib
import threading
import os
import sys
class SimDB:
"""To interact with the **sim_db** database.
For an actuall simulation it should be initialised at the very start of the
simulation (with 'store_metadata' set to True) and closed with
:func:`~SimDB.close` at the very end of the simulation. This must be done
to add the corrrect metadata.
For multithreading/multiprocessing each thread/process MUST have its
own connection (instance of this class) and MUST provide it with its rank.
"""
def __init__(self, store_metadata=True, db_id=None, rank=None,
only_write_on_rank=0):
"""Connect to the **sim_db** database.
:param store_metadata: If False, no metadata is added to the database.
Typically used when postprocessing (visualizing) data from a
simulation.
:type store_metadata: bool
:param db_id: ID number of the simulation parameters in the **sim_db**
database. If it is 'None', then it is read from the argument passed
to the program after option '--id'.
:type db_id: int
:param rank: Number identifing the calling process and/or thread.
(Typically the MPI or OpenMP rank.) If provided, only the 'rank'
matching 'only_write_on_rank' will write to the database to avoid
too much concurrent writing to the database. Single process and
threaded programs may ignore this, while
multithreading/multiprocessing programs need to provide it.
:type rank: int
:param only_write_on_rank: Number identifing the only process/thread
that will write to the database. Only used if 'rank' is provided.
:type only_write_on_rank: int
"""
self.rank = rank
self.only_write_on_rank = only_write_on_rank
self.start_time = time.time()
self.store_metadata = store_metadata
self.id, self.path_proj_root = self.__read_from_command_line_arguments(
db_id)
self.db = helpers.connect_sim_db()
self.db_cursor = self.db.cursor()
self.column_names = []
self.column_types = []
if (self.store_metadata
and (self.rank == None or self.rank == self.only_write_on_rank)):
self.write('status', 'running')
self.write('time_started', self.__get_date_and_time_as_string())
if (self.store_metadata and self.__is_a_git_project()
and (self.rank == None or self.rank == self.only_write_on_rank)):
proc = subprocess.Popen(
[
'cd "{0}"; git rev-parse HEAD'.format(
self.path_proj_root)
],
stdout=subprocess.PIPE,
stderr=open(os.devnull, 'w'),
shell=True)
(out, err) = proc.communicate()
self.write(
column="git_hash",
value=out.decode('ascii', 'replace'))
proc = subprocess.Popen(
[
'cd "{0}"; git log -n 1 --format=%B HEAD'.format(
self.path_proj_root)
],
stdout=subprocess.PIPE,
stderr=open(os.devnull, 'w'),
shell=True)
(out, err) = proc.communicate()
self.write(
column="commit_message",
value=out.decode('ascii', 'replace'))
proc = subprocess.Popen(
[
'cd "{0}"; git diff HEAD --stat'.format(
self.path_proj_root)
],
stdout=subprocess.PIPE,
stderr=open(os.devnull, 'w'),
shell=True)
(out, err) = proc.communicate()
self.write(
column="git_diff_stat",
value=out.decode('ascii', 'replace'))
proc = subprocess.Popen(
['cd "{0}"; git diff HEAD'.format(self.path_proj_root)],
stdout=subprocess.PIPE,
stderr=open(os.devnull, 'w'),
shell=True)
(out, err) = proc.communicate()
out = out.decode('ascii', 'replace')
if len(out) > 3000:
warning = "WARNING: Diff limited to first 3000 characters.\n"
out = warning + '\n' + out[0:3000] + '\n\n' + warning
self.write(column="git_diff", value=out)
def read(self, column, check_type_is=''):
"""Read parameter in 'column' from the database.
Return None if parameter is empty.
:param column: Name of the column the parameter is read from.
:type column: str
:param check_type_is: Throws ValueError if type does not match
'check_type_is'.The valid types the strings 'int', 'float', 'bool',
'string' and 'int/float/bool/string array' or the types int, float,
bool, str and list.
:raises ColumnError: If column do not exists.
:raises ValueError: If return type does not match 'check_type_is'.
:raises sqlite3.OperationalError: Waited more than 5 seconds to read
from the database, because other threads/processes are busy writing
to it. Way too much concurrent writing is done and it indicates an
design error in the user program.
"""
if column not in self.column_names:
self.column_names, self.column_types = (
helpers.get_db_column_names_and_types(self.db_cursor))
if column not in self.column_names:
raise ColumnError("Column, {0}, is NOT a column in the "
"database.".format(column))
self.db_cursor.execute("SELECT {0} FROM runs WHERE id={1}".format(
column, self.id))
value = self.db_cursor.fetchone()
if value != None:
value = value[0]
value = self.__check_type(check_type_is, column, self.column_names,
self.column_types, value)
return value
def write(self, column, value, type_of_value='', only_if_empty=False):
"""Write value to 'column' in the database.
If 'column' does not exists, a new is added.
If value is None and type_of_value is not set, the entry under 'column'
is set to empty.
For multithreaded and multiprocess programs only a single will
process/thread write to the database to avoid too much concurrent
writing to the database. This is as long as the 'rank' was passed to
SimDB under initialisation.
:param column: Name of the column the parameter is read from.
:type column: str
:param value: New value of the specified entry in the database.
:param type_of_value: Needed if column does note exists or if
value is empty list. The valid types the strings 'int', 'float',
'bool', 'string' and 'int/float/bool/string array' or the types int,
float, bool and str.
:type type_of_value: str or type
:param only_if_empty: If True, it will only write to the database if the
simulation's entry under 'column' is empty.
:type only_if_empty: bool
:raises ValueError: If column exists, but type does not match, or
empty list is passed without type_of_value given.
"""
# For multithreaded/multiprocess programs only a single process/thread
# does any writing.
if self.rank != None and self.rank != self.only_write_on_rank:
return
self.__add_column_if_not_exists_and_check_type(column, type_of_value,
value)
value_string = self.__convert_to_value_string(value, type_of_value)
value_string = self.__escape_quote_with_two_quotes(value_string)
type_dict = dict(zip(self.column_names, self.column_types))
# 'and type(value != None) != bool' allow numpy arrays to be check
# without importing numpy and thereby relying on it being availble.
if (type_dict[column] == 'TEXT'
and (type(value != None) != bool or value != None)):
value_string = "'{0}'".format(value_string)
if only_if_empty and self.is_empty(column):
self.db_cursor.execute("UPDATE runs SET \"{0}\" = {1} WHERE \"id\" "
"= {2} AND {0} IS NULL".format(column, value_string, self.id))
self.db.commit()
else:
self.db_cursor.execute(
"UPDATE runs SET \"{0}\" = {1} WHERE id = {2}".format(
column, value_string, self.id))
self.db.commit()
def unique_results_dir(self, path_directory):
"""Get path to subdirectory in 'path_directory' unique to simulation.
The subdirectory will be named 'date_time_name_id' and is intended to
store results in. If 'results_dir' in the database is empty, a new and
unique directory is created and the path stored in 'results_dir'.
Otherwise the path in 'results_dir' is just returned.
:param path_directory: Path to directory of which to make a
subdirectory. If 'path_directory' starts with 'root/', that part
will be replaced by the full path of the root directory of the
project.
:type path_directory: str
:returns: Full path to new subdirectory.
:rtype: str
"""
results_dir = self.read("results_dir")
if results_dir == None:
if self.rank == None or self.rank == self.only_write_on_rank:
if (len(path_directory) >= 5
and path_directory[0:5] == 'root/'):
path_directory = os.path.join(self.path_proj_root,
path_directory[5:])
results_dir = os.path.join(path_directory,
self.__get_date_and_time_as_string())
results_dir += '_' + str(self.read('name')) + '_' + str(self.id)
results_dir = os.path.abspath(os.path.realpath(results_dir))
os.mkdir(results_dir)
self.write(column="results_dir", value=results_dir,
only_if_empty=False)
else:
while results_dir == None:
results_dir = self.read("results_dir")
return results_dir
def column_exists(self, column):
"""Return True if column is a column in the database.
:raises sqlite3.OperationalError: Waited more than 5 seconds to read
from the database, because other threads/processes are busy writing
to it. Way too much concurrent writing is done and it indicates an
design error in the user program.
"""
if column in self.column_names:
return True
else:
self.column_names, self.column_types = (
helpers.get_db_column_names_and_types(self.db_cursor))
if column in self.column_names:
return True
else:
return False
def is_empty(self, column):
"""Return True if entry in the database under 'column' is empty.
:raises sqlite3.OperationalError: Waited more than 5 seconds to read
from the database, because other threads/processes are busy writing
to it. Way too much concurrent writing is done and it indicates an
design error in the user program.
"""
value = self.read(column)
if value == None:
return True
else:
return False
def set_empty(self, column):
"""Set entry under 'column' in the database to empty."""
self.write(column, None)
def get_id(self):
"""Return 'ID' of the connected simulation."""
return self.id
def get_path_proj_root(self):
"""Return the path to the root directory of the project.
The project's root directory is assumed to be where the '.sim_db/'
directory is located.
"""
return self.path_proj_root
def update_sha1_executables(self, paths_executables):
"""Update the 'sha1_executable' column in the database.
Sets the entry to the sha1 of all the executables. The order will
affect the value.
:param paths_executables: List of full paths to executables.
:type paths_executables: [str]
:raises sqlite3.OperationalError: Waited more than 5 seconds to write
to the database, because other threads/processes are busy writing
to it. Way too much concurrent writing is done and it indicates an
design error in the user program.
"""
sha1 = hashlib.sha1()
for executable in executables:
with open(executable, 'r') as executable_file:
sha1.update(executable_file.read())
self.write('sha1_executables', sha1)
def delete_from_database(self):
"""Delete simulation from database.
:raises sqlite3.OperationalError: Waited more than 5 seconds to write
to the database, because other threads/processes are busy writing
to it. Way too much concurrent writing is done and it indicates an
design error in the user program.
"""
self.db_cursor.execute("DELETE FROM runs WHERE id = {0}".format(
self.id))
self.db.commit()
self.store_metadata = False
def close(self):
"""Closes connection to **sim_db** database and add metadata."""
if (self.store_metadata
and (self.rank == None or self.rank == self.only_write_on_rank)):
used_time = time.time() - self.start_time
used_walltime = "{0}h {1}m {2}s".format(
int(used_time / 3600), int(used_time / 60), used_time % 60)
self.write('used_walltime', used_walltime)
self.write('status', 'finished')
self.db_cursor.close()
self.db.close()
def __get_date_and_time_as_string(self):
"""Return data and time as 'Year-Month-Date_Hours-Minutes-Seconds'."""
return time.strftime("%Y-%b-%d_%H-%M-%S")
def add_empty_sim(store_metadata=False):
"""Add an empty entry into the database and SimDB connected to it.
:param store_metadata: If False, no metadata is added to the database.
Typically used when postprocessing (visualizing) data from a simulation.
:type store_metadata: bool
"""
db = helpers.connect_sim_db()
db_cursor = db.cursor()
default_db_columns = ""
for key in helpers.default_db_columns:
default_db_columns += key + " " + str(
helpers.default_db_columns[key]) + ", "
default_db_columns = default_db_columns[:-2]
db_cursor.execute("CREATE TABLE IF NOT EXISTS runs ({0});".format(
default_db_columns))
db_cursor.execute("INSERT INTO runs DEFAULT VALUES")
db_id = db_cursor.lastrowid
db.commit()
db_cursor.close()
db.close()
return SimDB(db_id=db_id, store_metadata=store_metadata)
| 43.684211 | 80 | 0.545216 | # -*- coding: utf-8 -*-
"""Read and write parameters, results and metadata to the 'sim_db' database."""
# Copyright (C) 2017-2019 Håkon Austlid Taskén <hakon.tasken@gmail.com>
# Licenced under the MIT License.
import sim_db.src_command_line_tool.commands.helpers as helpers
import sqlite3
import argparse
import subprocess
import time
import hashlib
import threading
import os
import sys
class SimDB:
"""To interact with the **sim_db** database.
For an actuall simulation it should be initialised at the very start of the
simulation (with 'store_metadata' set to True) and closed with
:func:`~SimDB.close` at the very end of the simulation. This must be done
to add the corrrect metadata.
For multithreading/multiprocessing each thread/process MUST have its
own connection (instance of this class) and MUST provide it with its rank.
"""
def __init__(self, store_metadata=True, db_id=None, rank=None,
only_write_on_rank=0):
"""Connect to the **sim_db** database.
:param store_metadata: If False, no metadata is added to the database.
Typically used when postprocessing (visualizing) data from a
simulation.
:type store_metadata: bool
:param db_id: ID number of the simulation parameters in the **sim_db**
database. If it is 'None', then it is read from the argument passed
to the program after option '--id'.
:type db_id: int
:param rank: Number identifing the calling process and/or thread.
(Typically the MPI or OpenMP rank.) If provided, only the 'rank'
matching 'only_write_on_rank' will write to the database to avoid
too much concurrent writing to the database. Single process and
threaded programs may ignore this, while
multithreading/multiprocessing programs need to provide it.
:type rank: int
:param only_write_on_rank: Number identifing the only process/thread
that will write to the database. Only used if 'rank' is provided.
:type only_write_on_rank: int
"""
self.rank = rank
self.only_write_on_rank = only_write_on_rank
self.start_time = time.time()
self.store_metadata = store_metadata
self.id, self.path_proj_root = self.__read_from_command_line_arguments(
db_id)
self.db = helpers.connect_sim_db()
self.db_cursor = self.db.cursor()
self.column_names = []
self.column_types = []
if (self.store_metadata
and (self.rank == None or self.rank == self.only_write_on_rank)):
self.write('status', 'running')
self.write('time_started', self.__get_date_and_time_as_string())
if (self.store_metadata and self.__is_a_git_project()
and (self.rank == None or self.rank == self.only_write_on_rank)):
proc = subprocess.Popen(
[
'cd "{0}"; git rev-parse HEAD'.format(
self.path_proj_root)
],
stdout=subprocess.PIPE,
stderr=open(os.devnull, 'w'),
shell=True)
(out, err) = proc.communicate()
self.write(
column="git_hash",
value=out.decode('ascii', 'replace'))
proc = subprocess.Popen(
[
'cd "{0}"; git log -n 1 --format=%B HEAD'.format(
self.path_proj_root)
],
stdout=subprocess.PIPE,
stderr=open(os.devnull, 'w'),
shell=True)
(out, err) = proc.communicate()
self.write(
column="commit_message",
value=out.decode('ascii', 'replace'))
proc = subprocess.Popen(
[
'cd "{0}"; git diff HEAD --stat'.format(
self.path_proj_root)
],
stdout=subprocess.PIPE,
stderr=open(os.devnull, 'w'),
shell=True)
(out, err) = proc.communicate()
self.write(
column="git_diff_stat",
value=out.decode('ascii', 'replace'))
proc = subprocess.Popen(
['cd "{0}"; git diff HEAD'.format(self.path_proj_root)],
stdout=subprocess.PIPE,
stderr=open(os.devnull, 'w'),
shell=True)
(out, err) = proc.communicate()
out = out.decode('ascii', 'replace')
if len(out) > 3000:
warning = "WARNING: Diff limited to first 3000 characters.\n"
out = warning + '\n' + out[0:3000] + '\n\n' + warning
self.write(column="git_diff", value=out)
def read(self, column, check_type_is=''):
"""Read parameter in 'column' from the database.
Return None if parameter is empty.
:param column: Name of the column the parameter is read from.
:type column: str
:param check_type_is: Throws ValueError if type does not match
'check_type_is'.The valid types the strings 'int', 'float', 'bool',
'string' and 'int/float/bool/string array' or the types int, float,
bool, str and list.
:raises ColumnError: If column do not exists.
:raises ValueError: If return type does not match 'check_type_is'.
:raises sqlite3.OperationalError: Waited more than 5 seconds to read
from the database, because other threads/processes are busy writing
to it. Way too much concurrent writing is done and it indicates an
design error in the user program.
"""
if column not in self.column_names:
self.column_names, self.column_types = (
helpers.get_db_column_names_and_types(self.db_cursor))
if column not in self.column_names:
raise ColumnError("Column, {0}, is NOT a column in the "
"database.".format(column))
self.db_cursor.execute("SELECT {0} FROM runs WHERE id={1}".format(
column, self.id))
value = self.db_cursor.fetchone()
if value != None:
value = value[0]
value = self.__check_type(check_type_is, column, self.column_names,
self.column_types, value)
return value
def write(self, column, value, type_of_value='', only_if_empty=False):
"""Write value to 'column' in the database.
If 'column' does not exists, a new is added.
If value is None and type_of_value is not set, the entry under 'column'
is set to empty.
For multithreaded and multiprocess programs only a single will
process/thread write to the database to avoid too much concurrent
writing to the database. This is as long as the 'rank' was passed to
SimDB under initialisation.
:param column: Name of the column the parameter is read from.
:type column: str
:param value: New value of the specified entry in the database.
:param type_of_value: Needed if column does note exists or if
value is empty list. The valid types the strings 'int', 'float',
'bool', 'string' and 'int/float/bool/string array' or the types int,
float, bool and str.
:type type_of_value: str or type
:param only_if_empty: If True, it will only write to the database if the
simulation's entry under 'column' is empty.
:type only_if_empty: bool
:raises ValueError: If column exists, but type does not match, or
empty list is passed without type_of_value given.
"""
# For multithreaded/multiprocess programs only a single process/thread
# does any writing.
if self.rank != None and self.rank != self.only_write_on_rank:
return
self.__add_column_if_not_exists_and_check_type(column, type_of_value,
value)
value_string = self.__convert_to_value_string(value, type_of_value)
value_string = self.__escape_quote_with_two_quotes(value_string)
type_dict = dict(zip(self.column_names, self.column_types))
# 'and type(value != None) != bool' allow numpy arrays to be check
# without importing numpy and thereby relying on it being availble.
if (type_dict[column] == 'TEXT'
and (type(value != None) != bool or value != None)):
value_string = "'{0}'".format(value_string)
if only_if_empty and self.is_empty(column):
self.db_cursor.execute("UPDATE runs SET \"{0}\" = {1} WHERE \"id\" "
"= {2} AND {0} IS NULL".format(column, value_string, self.id))
self.db.commit()
else:
self.db_cursor.execute(
"UPDATE runs SET \"{0}\" = {1} WHERE id = {2}".format(
column, value_string, self.id))
self.db.commit()
def unique_results_dir(self, path_directory):
"""Get path to subdirectory in 'path_directory' unique to simulation.
The subdirectory will be named 'date_time_name_id' and is intended to
store results in. If 'results_dir' in the database is empty, a new and
unique directory is created and the path stored in 'results_dir'.
Otherwise the path in 'results_dir' is just returned.
:param path_directory: Path to directory of which to make a
subdirectory. If 'path_directory' starts with 'root/', that part
will be replaced by the full path of the root directory of the
project.
:type path_directory: str
:returns: Full path to new subdirectory.
:rtype: str
"""
results_dir = self.read("results_dir")
if results_dir == None:
if self.rank == None or self.rank == self.only_write_on_rank:
if (len(path_directory) >= 5
and path_directory[0:5] == 'root/'):
path_directory = os.path.join(self.path_proj_root,
path_directory[5:])
results_dir = os.path.join(path_directory,
self.__get_date_and_time_as_string())
results_dir += '_' + str(self.read('name')) + '_' + str(self.id)
results_dir = os.path.abspath(os.path.realpath(results_dir))
os.mkdir(results_dir)
self.write(column="results_dir", value=results_dir,
only_if_empty=False)
else:
while results_dir == None:
results_dir = self.read("results_dir")
return results_dir
def column_exists(self, column):
"""Return True if column is a column in the database.
:raises sqlite3.OperationalError: Waited more than 5 seconds to read
from the database, because other threads/processes are busy writing
to it. Way too much concurrent writing is done and it indicates an
design error in the user program.
"""
if column in self.column_names:
return True
else:
self.column_names, self.column_types = (
helpers.get_db_column_names_and_types(self.db_cursor))
if column in self.column_names:
return True
else:
return False
def is_empty(self, column):
"""Return True if entry in the database under 'column' is empty.
:raises sqlite3.OperationalError: Waited more than 5 seconds to read
from the database, because other threads/processes are busy writing
to it. Way too much concurrent writing is done and it indicates an
design error in the user program.
"""
value = self.read(column)
if value == None:
return True
else:
return False
def set_empty(self, column):
"""Set entry under 'column' in the database to empty."""
self.write(column, None)
def get_id(self):
"""Return 'ID' of the connected simulation."""
return self.id
def get_path_proj_root(self):
"""Return the path to the root directory of the project.
The project's root directory is assumed to be where the '.sim_db/'
directory is located.
"""
return self.path_proj_root
def update_sha1_executables(self, paths_executables):
"""Update the 'sha1_executable' column in the database.
Sets the entry to the sha1 of all the executables. The order will
affect the value.
:param paths_executables: List of full paths to executables.
:type paths_executables: [str]
:raises sqlite3.OperationalError: Waited more than 5 seconds to write
to the database, because other threads/processes are busy writing
to it. Way too much concurrent writing is done and it indicates an
design error in the user program.
"""
sha1 = hashlib.sha1()
for executable in executables:
with open(executable, 'r') as executable_file:
sha1.update(executable_file.read())
self.write('sha1_executables', sha1)
def delete_from_database(self):
"""Delete simulation from database.
:raises sqlite3.OperationalError: Waited more than 5 seconds to write
to the database, because other threads/processes are busy writing
to it. Way too much concurrent writing is done and it indicates an
design error in the user program.
"""
self.db_cursor.execute("DELETE FROM runs WHERE id = {0}".format(
self.id))
self.db.commit()
self.store_metadata = False
def close(self):
"""Closes connection to **sim_db** database and add metadata."""
if (self.store_metadata
and (self.rank == None or self.rank == self.only_write_on_rank)):
used_time = time.time() - self.start_time
used_walltime = "{0}h {1}m {2}s".format(
int(used_time / 3600), int(used_time / 60), used_time % 60)
self.write('used_walltime', used_walltime)
self.write('status', 'finished')
self.db_cursor.close()
self.db.close()
def __read_from_command_line_arguments(self, db_id):
path_proj_root = None
if db_id == None:
parser = argparse.ArgumentParser()
parser.add_argument(
'--id',
'-i',
type=int,
default=None,
required=True,
help=("<Required> ID of parameters in the database used "
"to run the simulation."))
parser.add_argument(
'--path_proj_root',
'-p',
type=str,
default=None,
help="Path to the root directory of the project.")
args, unknowns = parser.parse_known_args()
db_id = args.id
if args.path_proj_root != None:
path_proj_root = os.path.abspath(args.path_proj_root)
if (path_proj_root == None):
path_proj_root = os.path.dirname(helpers.get_dot_sim_db_dir_path())
else:
if path_proj_root[-1] == '/':
path_proj_root = path_proj_root[:-1]
if (db_id == None):
ValueError("'db_id' is NOT provided to SimDB(db_id=None). If not "
"passed as function parameters, then '--id ID' must be "
"passed to program as command line arguments.")
return (db_id, path_proj_root)
def __check_type(self,
check_type_is,
column,
column_names,
column_types,
value=None):
type_dict = dict(zip(column_names, column_types))
type_of_value = type_dict[column]
if ((check_type_is == 'int' or check_type_is == int)
and type_of_value == 'INTEGER'):
correct_type = True
if value != None:
try:
int(value)
except:
correct_type = False
elif ((check_type_is == 'float' or check_type_is == float)
and type_of_value == 'REAL'):
correct_type = True
if value != None:
try:
float(value)
except:
correct_type = False
# 'type(value != None) != bool' allow numpy arrays to be check
# without importing numpy and thereby relying on it being availble.
elif (type_of_value == 'TEXT'
and (type(value != None) != bool or value != None)):
if (type(value) == str
or (sys.version_info[0] < 3 and type(value) == unicode)):
value, correct_type = self.__convert_text_to_correct_type(
value, check_type_is)
elif type(value) == bool:
correct_type = True
elif check_type_is == 'bool' or check_type_is == bool:
try:
value = bool(value)
correct_type = True
except ValueError:
correct_type = False
else:
try:
value = list(value)
correct_type = True
except TypeError:
correct_type = False
# 'and type(value != None) == bool' allow numpy arrays to be check
# without importing numpy and thereby relying on it being availble.
elif (type_of_value == 'TEXT'
and (type(value == None) == bool and value == None)
and (check_type_is == 'string' or check_type_is == str
or check_type_is == 'bool' or check_type_is == bool
or check_type_is == list or check_type_is == 'int array'
or check_type_is == 'float array'
or check_type_is == 'bool array'
or check_type_is == 'string array')):
correct_type = True
else:
correct_type = False
if not correct_type and check_type_is != '':
raise ValueError("The type is NOT {0}.".format(check_type_is))
return value
def __convert_text_to_correct_type(self, value, check_type_is):
correct_type = False
value_split = value.split('[')
if value == "True":
value = True
if (check_type_is == 'bool' or check_type_is == bool):
correct_type = True
elif value == "False":
value = False
if (check_type_is == 'bool' or check_type_is == bool):
correct_type = True
elif len(value_split) == 1:
if (check_type_is == 'string' or check_type_is == str):
correct_type = True
else:
value = []
if value_split[0].strip() == 'int':
if (check_type_is == 'int array' or check_type_is == list):
correct_type = True
for element in value_split[1].split(']')[0].split(','):
value.append(int(element))
elif value_split[0].strip() == 'float':
if (check_type_is == 'float array' or check_type_is == list):
correct_type = True
for element in value_split[1].split(']')[0].split(','):
value.append(float(element))
elif value_split[0].strip() == 'string':
if (check_type_is == 'string array' or check_type_is == list):
correct_type = True
for i, element in enumerate(
value_split[1].split(']')[0].split(',')):
if i > 0 and len(element) > 0 and element[0] == ' ':
element = element[1:]
value.append(str(element))
elif value_split[0].strip() == 'bool':
if (check_type_is == 'bool array' or check_type_is == list):
correct_type = True
for i, element in enumerate(
value_split[1].split(']')[0].split(',')):
if i > 0 and len(element) > 0 and element[0] == ' ':
element = element[1:]
if element == 'True':
element = True
elif element == 'False':
element = False
else:
correct_type = False
value.append(element)
else:
if (check_type_is == 'string' or check_type_is == str):
correct_type = True
else:
correct_type = False
return value, correct_type
def __convert_to_value_string(self, value, type_of_value):
if sys.version_info[0] < 3 and type(value) == unicode:
return value.encode('ascii', 'replace')
elif (type(value) == int or type(value) == float or type(value) == str
or type_of_value == 'int' or type_of_value == int
or type_of_value == 'float' or type_of_value == float
or type_of_value == 'string' or type_of_value == str):
return str(value)
elif type(value) == bool:
if value:
return "True"
else:
return "False"
# 'type(value == None) == bool' allow numpy arrays to be check
# without importing numpy and thereby relying on it being availble.
elif (type(value == None) == bool and value == None):
return "NULL"
else:
try:
value = list(value)
except TypeError:
raise ValueError("'value' have invalid type.")
if len(value) > 0:
if type_of_value == 'int array' or type(value[0]) == int:
value_string = "int["
elif type_of_value == 'float array' or type(value[0]) == float:
value_string = "float["
elif type_of_value == 'string array' or type(value[0]) == str:
value_string = "string["
elif type_of_value == 'bool array' or type(value[0]) == bool:
value_string = "bool["
for element in value:
if type(value[0]) == bool:
if element:
element = "True"
else:
element = "False"
value_string += str(element) + ", "
value_string = value_string[:-2] + "]"
return value_string
else:
if type_of_value == 'int array':
value_string = "int[]"
elif type_of_value == 'float array':
value_string = "float[]"
elif type_of_value == 'string array':
value_string = "string[]"
elif type_of_value == 'bool array':
value_string = "bool[]"
else:
raise ValueError(
"The type_of_value must be set to 'int array', "
"'float array', 'string array' or 'bool array' "
"when a empty list is passed to SimDB.write().")
return value_string
def __add_column(self, column, type_of_value):
if type_of_value == '':
print("ERROR: Column {0} does not exists in database and "
"'type_of_value' must be provided for it to be added."
.format(column))
exit(1)
if self.rank == None or self.rank == self.only_write_on_rank:
if type_of_value == 'int' or type_of_value == int:
self.db_cursor.execute(
"ALTER TABLE runs ADD COLUMN {0} INTEGER".format(
column))
elif type_of_value == 'float' or type_of_value == float:
self.db_cursor.execute(
"ALTER TABLE runs ADD COLUMN {0} REAL".format(
column))
else:
self.db_cursor.execute(
"ALTER TABLE runs ADD COLUMN {0} TEXT".format(
column))
self.db.commit()
def __add_column_if_not_exists_and_check_type(self, column, type_of_value,
value):
if column in self.column_names:
self.__check_type(type_of_value, column, self.column_names,
self.column_types, value=value)
else:
self.column_names, self.column_types = (
helpers.get_db_column_names_and_types(self.db_cursor))
if column in self.column_names:
self.__check_type(type_of_value, column, self.column_names,
self.column_types, value=value)
else:
if (type_of_value == 'int' or type_of_value == int):
column_type = 'INTEGER'
elif (type_of_value == 'float' or type_of_value == float):
column_type = 'REAL'
else:
column_type = 'TEXT'
self.__check_type(
type_of_value,
column, [column], [column_type],
value=value)
self.__add_column(column, type_of_value)
self.column_names, self.column_types = (
helpers.get_db_column_names_and_types(self.db_cursor))
def __is_a_git_project(self):
directory = self.path_proj_root
prev_dir = ""
while directory != prev_dir:
if os.path.exists(os.path.join(directory, ".git")):
return True
prev_dir = directory
directory = os.path.dirname(directory)
return False
def __escape_quote_with_two_quotes(self, string):
escaped_string = ""
for letter in string:
if letter == "'":
escaped_string += "''"
else:
escaped_string += letter
return escaped_string
def __get_date_and_time_as_string(self):
"""Return data and time as 'Year-Month-Date_Hours-Minutes-Seconds'."""
return time.strftime("%Y-%b-%d_%H-%M-%S")
class ColumnError(Exception):
pass
def add_empty_sim(store_metadata=False):
"""Add an empty entry into the database and SimDB connected to it.
:param store_metadata: If False, no metadata is added to the database.
Typically used when postprocessing (visualizing) data from a simulation.
:type store_metadata: bool
"""
db = helpers.connect_sim_db()
db_cursor = db.cursor()
default_db_columns = ""
for key in helpers.default_db_columns:
default_db_columns += key + " " + str(
helpers.default_db_columns[key]) + ", "
default_db_columns = default_db_columns[:-2]
db_cursor.execute("CREATE TABLE IF NOT EXISTS runs ({0});".format(
default_db_columns))
db_cursor.execute("INSERT INTO runs DEFAULT VALUES")
db_id = db_cursor.lastrowid
db.commit()
db_cursor.close()
db.close()
return SimDB(db_id=db_id, store_metadata=store_metadata)
| 12,088 | 17 | 239 |
36ac6b6d184184fbc0b05f1ff60528053b819d9d | 14,351 | py | Python | SBW_GAN_TF/generator.py | huangleiBuaa/StochasticityBW | 11db7ed0238f0c7cd5f6e336a087fc1d0427b1e6 | [
"BSD-2-Clause"
] | 8 | 2020-03-23T15:46:13.000Z | 2022-03-25T03:11:17.000Z | SBW_GAN_TF/generator.py | huangleiBuaa/StochasticityBW | 11db7ed0238f0c7cd5f6e336a087fc1d0427b1e6 | [
"BSD-2-Clause"
] | null | null | null | SBW_GAN_TF/generator.py | huangleiBuaa/StochasticityBW | 11db7ed0238f0c7cd5f6e336a087fc1d0427b1e6 | [
"BSD-2-Clause"
] | 1 | 2022-03-25T03:11:20.000Z | 2022-03-25T03:11:20.000Z | from tensorflow.python.keras.models import Input, Model
from tensorflow.python.keras.layers import Dense, Reshape, Activation, Conv2D, Conv2DTranspose
from tensorflow.python.keras.layers import BatchNormalization, Add, Embedding, Concatenate
import numpy as np
import tensorflow as tf
from tensorflow.python.keras import backend as K
from gan.utils import glorot_init, resblock, dcblock, get_m_group
from gan.layers.coloring import ConditionalConv11, ConditionalCenterScale, CenterScale, FactorizedConv11
from gan.layers.normalization import DecorelationNormalization
from gan.layers.misc import Split
from layers.spectral_normalized_layers import SNConv2D, SNConditionalConv11, SNDense, SNEmbeding, SNFactorizedConv11
from functools import partial
| 54.774809 | 181 | 0.548464 | from tensorflow.python.keras.models import Input, Model
from tensorflow.python.keras.layers import Dense, Reshape, Activation, Conv2D, Conv2DTranspose
from tensorflow.python.keras.layers import BatchNormalization, Add, Embedding, Concatenate
import numpy as np
import tensorflow as tf
from tensorflow.python.keras import backend as K
from gan.utils import glorot_init, resblock, dcblock, get_m_group
from gan.layers.coloring import ConditionalConv11, ConditionalCenterScale, CenterScale, FactorizedConv11
from gan.layers.normalization import DecorelationNormalization
from gan.layers.misc import Split
from layers.spectral_normalized_layers import SNConv2D, SNConditionalConv11, SNDense, SNEmbeding, SNFactorizedConv11
from functools import partial
def create_norm(norm, coloring,
decomposition='zca', iter_num=5, whitten_m=0, coloring_m=0, instance_norm=0, device='cpu',
cls=None, number_of_classes=None, filters_emb=10,
uncoditional_conv_layer=Conv2D, conditional_conv_layer=ConditionalConv11,
factor_conv_layer=FactorizedConv11):
assert norm in ['n', 'b', 'd', 'dr']
assert coloring in ['ucs', 'ccs', 'uccs', 'uconv', 'fconv', 'ufconv', 'cconv', 'ucconv', 'ccsuconv', 'n']
if norm == 'n':
norm_layer = lambda axis, name: (lambda inp: inp)
elif norm == 'b':
norm_layer = lambda axis, name: BatchNormalization(axis=axis, center=False, scale=False, name=name)
elif norm == 'd':
norm_layer = lambda axis, name: DecorelationNormalization(name=name,
m_per_group=whitten_m,
decomposition=decomposition,
iter_num=iter_num,
instance_norm=instance_norm,
device=device)
elif norm == 'dr':
norm_layer = lambda axis, name: DecorelationNormalization(name=name,
m_per_group=whitten_m,
decomposition=decomposition,
iter_num=iter_num,
instance_norm=instance_norm,
renorm=True)
if coloring == 'ccs':
after_norm_layer = lambda axis, name: lambda x: ConditionalCenterScale(number_of_classes=number_of_classes,
axis=axis, name=name)([x, cls])
elif coloring == 'ucs':
after_norm_layer = lambda axis, name: lambda x: CenterScale(axis=axis, name=name)(x)
elif coloring == 'uccs':
def after_norm_layer(axis, name):
def f(x):
c = ConditionalCenterScale(number_of_classes=number_of_classes, axis=axis, name=name + '_c')([x, cls])
u = CenterScale(axis=axis, name=name + '_u')(x)
out = Add(name=name + '_a')([c, u])
return out
return f
elif coloring == 'cconv':
def after_norm_layer(axis, name):
def f(x):
coloring_group, m = get_m_group(x, coloring_m, axis)
if coloring_group > 1:
splits = Split(coloring_group, axis)(x)
outs = []
for i, split in enumerate(splits):
split_out = conditional_conv_layer(filters=m, number_of_classes=number_of_classes, name=name+str(i))([split, cls])
outs.append(split_out)
out = tf.keras.layers.Concatenate(axis)(outs)
else:
out = conditional_conv_layer(filters=K.int_shape(x)[axis], number_of_classes=number_of_classes,
name=name)([x, cls])
return out
return f
elif coloring == 'fconv':
def after_norm_layer(axis, name):
def f(x):
coloring_group, m = get_m_group(x, coloring_m, axis)
if coloring_group > 1:
splits = Split(coloring_group, axis)(x)
outs = []
for i, split in enumerate(splits):
split_out = factor_conv_layer(filters=m, number_of_classes=number_of_classes, name=name + '_c'+str(i), filters_emb=filters_emb, use_bias=False)([split, cls])
outs.append(split_out)
out = tf.keras.layers.Concatenate(axis)(outs)
else:
out = factor_conv_layer(filters=K.int_shape(x)[axis], number_of_classes=number_of_classes, name=name + '_c', filters_emb=filters_emb, use_bias=False)([x, cls])
return out
return f
elif coloring == 'uconv':
def after_norm_layer(axis, name):
def f(x):
coloring_group, m = get_m_group(x, coloring_m, axis)
if coloring_group > 1:
splits = Split(coloring_group, axis)(x)
outs = []
for i, split in enumerate(splits):
split_out = uncoditional_conv_layer(filters=m, kernel_size=(1, 1), name=name+str(i))(split)
outs.append(split_out)
out = tf.keras.layers.Concatenate(axis)(outs)
else:
out = uncoditional_conv_layer(filters=K.int_shape(x)[axis], kernel_size=(1, 1), name=name)(x)
return out
return f
elif coloring == 'ucconv':
def after_norm_layer(axis, name):
def f(x):
coloring_group, m = get_m_group(x, coloring_m, axis)
if coloring_group > 1:
splits = Split(coloring_group, axis)(x)
cs = []
us = []
for i, split in enumerate(splits):
split_c = conditional_conv_layer(filters=m, number_of_classes=number_of_classes, name=name + '_c'+str(i))([split, cls])
split_u = uncoditional_conv_layer(kernel_size=(1, 1), filters=K.int_shape(x)[axis], name=name + '_u'+str(i))(split)
cs.append(split_c)
us.append(split_u)
c = tf.keras.layers.Concatenate(axis)(cs)
u = tf.keras.layers.Concatenate(axis)(us)
else:
c = conditional_conv_layer(filters=K.int_shape(x)[axis],
number_of_classes=number_of_classes, name=name + '_c')([x, cls])
u = uncoditional_conv_layer(kernel_size=(1, 1), filters=K.int_shape(x)[axis], name=name + '_u')(x)
out = Add(name=name + '_a')([c, u])
return out
return f
elif coloring == 'ccsuconv':
def after_norm_layer(axis, name):
def f(x):
coloring_group, m = get_m_group(x, coloring_m, axis)
c = ConditionalCenterScale(number_of_classes=number_of_classes, axis=axis, name=name + '_c')([x, cls])
if coloring_group > 1:
splits = Split(coloring_group, axis)(x)
us = []
for i, split in enumerate(splits):
split_u = uncoditional_conv_layer(kernel_size=(1, 1), filters=m, name=name + '_u'+str(i))(split)
us.append(split_u)
u = tf.keras.layers.Concatenate(axis)(us)
else:
u = uncoditional_conv_layer(kernel_size=(1, 1), filters=K.int_shape(x)[axis], name=name + '_u')(x)
out = Add(name=name + '_a')([c, u])
return out
return f
elif coloring == 'ufconv':
def after_norm_layer(axis, name):
def f(x):
coloring_group, m = get_m_group(x, coloring_m, axis)
if coloring_group > 1:
splits = Split(coloring_group, axis)(x)
cs = []
us = []
for i, split in enumerate(splits):
split_c = factor_conv_layer(number_of_classes=number_of_classes, name=name + '_c'+str(i),
filters=m, filters_emb=filters_emb,
use_bias=False)([split, cls])
split_u = uncoditional_conv_layer(kernel_size=(1, 1), filters=m, name=name + '_u'+str(i))(split)
cs.append(split_c)
us.append(split_u)
c = tf.keras.layers.Concatenate(axis)(cs)
u = tf.keras.layers.Concatenate(axis)(us)
else:
c = factor_conv_layer(number_of_classes=number_of_classes, name=name + '_c',
filters=K.int_shape(x)[axis], filters_emb=filters_emb,
use_bias=False)([x, cls])
u = uncoditional_conv_layer(kernel_size=(1, 1), filters=K.int_shape(x)[axis], name=name + '_u')(x)
out = Add(name=name + '_a')([c, u])
return out
return f
elif coloring == 'n':
after_norm_layer = lambda axis, name: lambda x: x
def result_norm(axis, name):
def stack(inp):
out = inp
out = norm_layer(axis=axis, name=name + '_npart')(out)
out = after_norm_layer(axis=axis, name=name + '_repart')(out)
return out
return stack
return result_norm
def make_generator(input_noise_shape=(128,), output_channels=3, input_cls_shape=(1, ),
block_sizes=(128, 128, 128), resamples=("UP", "UP", "UP"),
first_block_shape=(4, 4, 128), number_of_classes=10, concat_cls=False,
block_norm='u', block_coloring='cs', filters_emb=10,
last_norm='u', last_coloring='cs',
decomposition='cholesky', whitten_m=0, coloring_m=0, iter_num=5, instance_norm=0, device='cpu',
gan_type=None, arch='res', spectral=False,
before_conv=0,
fully_diff_spectral=False, spectral_iterations=1, conv_singular=True,):
assert arch in ['res', 'dcgan']
inp = Input(input_noise_shape, name='GInputImage')
cls = Input(input_cls_shape, dtype='int32', name='GLabel')
if spectral:
conv_layer = partial(SNConv2D, conv_singular=conv_singular,
fully_diff_spectral=fully_diff_spectral, spectral_iterations=spectral_iterations)
cond_conv_layer = partial(SNConditionalConv11,
fully_diff_spectral=fully_diff_spectral, spectral_iterations=spectral_iterations)
dense_layer = partial(SNDense,
fully_diff_spectral=fully_diff_spectral, spectral_iterations=spectral_iterations)
emb_layer = partial(SNEmbeding, fully_diff_spectral=fully_diff_spectral, spectral_iterations=spectral_iterations)
factor_conv_layer = partial(SNFactorizedConv11,
fully_diff_spectral=fully_diff_spectral, spectral_iterations=spectral_iterations)
else:
conv_layer = Conv2D
cond_conv_layer = ConditionalConv11
dense_layer = Dense
emb_layer = Embedding
factor_conv_layer = FactorizedConv11
if concat_cls:
y = emb_layer(input_dim=number_of_classes, output_dim=first_block_shape[-1])(cls)
y = Reshape((first_block_shape[-1], ))(y)
y = Concatenate(axis=-1)([y, inp])
else:
y = inp
y = dense_layer(units=np.prod(first_block_shape), kernel_initializer=glorot_init)(y)
y = Reshape(first_block_shape)(y)
block_norm_layer = create_norm(block_norm, block_coloring,
decomposition=decomposition,
whitten_m=whitten_m, coloring_m=coloring_m,
iter_num=iter_num, instance_norm=instance_norm, device=device,
cls=cls, number_of_classes=number_of_classes, filters_emb=filters_emb,
uncoditional_conv_layer=conv_layer, conditional_conv_layer=cond_conv_layer,
factor_conv_layer=factor_conv_layer)
last_norm_layer = create_norm(last_norm, last_coloring,
decomposition=decomposition,
whitten_m=whitten_m, coloring_m=coloring_m,
iter_num=iter_num, instance_norm=instance_norm, device=device,
cls=cls, number_of_classes=number_of_classes, filters_emb=filters_emb,
uncoditional_conv_layer=conv_layer, conditional_conv_layer=cond_conv_layer,
factor_conv_layer=factor_conv_layer)
i = 0
for block_size, resample in zip(block_sizes, resamples):
if arch == 'res':
y = resblock(y, kernel_size=(3, 3), resample=resample,
nfilters=block_size, name='Generator.' + str(i),
norm=block_norm_layer, is_first=False, conv_layer=conv_layer)
else:
# TODO: SN DECONV
y = dcblock(y, kernel_size=(4, 4), resample=resample,
nfilters=block_size, name='Generator.' + str(i),
norm=block_norm_layer, is_first=False, conv_layer=Conv2DTranspose, before_conv=before_conv)
i += 1
y = last_norm_layer(axis=-1, name='Generator.BN.Final')(y)
y = Activation('relu')(y)
output = conv_layer(filters=output_channels, kernel_size=(3, 3), name='Generator.Final',
kernel_initializer=glorot_init, use_bias=True, padding='same')(y)
output = Activation('tanh')(output)
if gan_type is None:
return Model(inputs=[inp], outputs=output)
else:
return Model(inputs=[inp, cls], outputs=output)
| 13,551 | 0 | 46 |
154902bea517a92105fa6ca2dc639b1aa8ad881c | 1,591 | py | Python | fossor/checks/loadavg.py | NeolithEra/fossor | d8dbdc40f2f16da601c317dfa74b83e3932c9bb3 | [
"BSD-2-Clause"
] | 165 | 2017-12-14T18:44:25.000Z | 2020-12-09T01:48:57.000Z | fossor/checks/loadavg.py | NeolithEra/fossor | d8dbdc40f2f16da601c317dfa74b83e3932c9bb3 | [
"BSD-2-Clause"
] | 12 | 2017-12-14T23:42:45.000Z | 2020-05-29T15:11:02.000Z | fossor/checks/loadavg.py | NeolithEra/fossor | d8dbdc40f2f16da601c317dfa74b83e3932c9bb3 | [
"BSD-2-Clause"
] | 32 | 2017-12-14T17:51:57.000Z | 2020-06-12T13:11:47.000Z | # Copyright 2017 LinkedIn Corporation. All rights reserved. Licensed under the BSD-2 Clause license.
# See LICENSE in the project root for license information.
import os
from fossor.checks.check import Check
class LoadAvg(Check):
'''this Check will compare the current load average summaries against the count of CPU cores
in play, and will alert the user if there are more processes waiting'''
if __name__ == '__main__':
l = LoadAvg()
print(l.run({}))
| 43 | 122 | 0.614079 | # Copyright 2017 LinkedIn Corporation. All rights reserved. Licensed under the BSD-2 Clause license.
# See LICENSE in the project root for license information.
import os
from fossor.checks.check import Check
class LoadAvg(Check):
'''this Check will compare the current load average summaries against the count of CPU cores
in play, and will alert the user if there are more processes waiting'''
def run(self, variables):
ON_LINUX = os.path.isdir('/proc')
if ON_LINUX:
with open('/proc/loadavg') as f:
contents = f.read().strip()
load_summaries = [float(i) for i in contents.split()[:3]]
with open('/proc/cpuinfo') as f:
cpu_count = len([c for c in f.read().splitlines() if c.startswith('processor')])
else:
uptime, err, return_code = self.shell_call('uptime')
contents = uptime.strip()
load_summaries = [float(i.replace(',', '')) for i in contents.split()[-3:]]
cpu, err, return_code = self.shell_call('sysctl -n hw.ncpu')
cpu_count = int(cpu.strip())
# Alert the user if any of the 1, 5, or 15-minute load averages is greater than the processor count to handle them
if any(c / cpu_count > 1 for c in load_summaries):
return 'Load average shows processes queued beyond CPU count!\nCPU Count: {0}\nLoad averages: {1}'.format(
cpu_count,
' '.join(str(x) for x in load_summaries)
)
if __name__ == '__main__':
l = LoadAvg()
print(l.run({}))
| 1,091 | 0 | 26 |
db7b405dd77b26c6f3e2362687a6b0991b3bd6de | 5,048 | py | Python | tests/app/clients/test_firetext.py | LouisStAmour/notifications-api | 16734595e70113d85fb10689017b2c30bab61fb3 | [
"MIT"
] | 10 | 2020-05-04T14:11:06.000Z | 2022-02-22T19:06:36.000Z | tests/app/clients/test_firetext.py | GouvQC/notification-api | e865b8b92a9a45c7cee006f427dcf77d71b09d6d | [
"MIT"
] | 554 | 2020-05-07T21:56:24.000Z | 2022-03-31T23:04:51.000Z | tests/app/clients/test_firetext.py | LouisStAmour/notifications-api | 16734595e70113d85fb10689017b2c30bab61fb3 | [
"MIT"
] | 4 | 2020-08-27T16:43:29.000Z | 2021-02-17T22:17:27.000Z | from requests import HTTPError
from urllib.parse import parse_qs
from requests.exceptions import ConnectTimeout, ReadTimeout
import pytest
import requests_mock
from app.clients.sms.firetext import get_firetext_responses, SmsClientResponseException, FiretextClientResponseException
| 36.057143 | 120 | 0.724643 | from requests import HTTPError
from urllib.parse import parse_qs
from requests.exceptions import ConnectTimeout, ReadTimeout
import pytest
import requests_mock
from app.clients.sms.firetext import get_firetext_responses, SmsClientResponseException, FiretextClientResponseException
def test_should_return_correct_details_for_delivery():
get_firetext_responses('0') == 'delivered'
def test_should_return_correct_details_for_bounced():
get_firetext_responses('1') == 'permanent-failure'
def test_should_return_correct_details_for_complaint():
get_firetext_responses('2') == 'pending'
def test_should_be_none_if_unrecognised_status_code():
with pytest.raises(KeyError) as e:
get_firetext_responses('99')
assert '99' in str(e.value)
def test_send_sms_successful_returns_firetext_response(mocker, mock_firetext_client):
to = content = reference = 'foo'
response_dict = {
'data': [],
'description': 'SMS successfully queued',
'code': 0,
'responseData': 1
}
with requests_mock.Mocker() as request_mock:
request_mock.post('https://example.com/firetext', json=response_dict, status_code=200)
response = mock_firetext_client.send_sms(to, content, reference)
response_json = response.json()
assert response.status_code == 200
assert response_json['code'] == 0
assert response_json['description'] == 'SMS successfully queued'
def test_send_sms_calls_firetext_correctly(mocker, mock_firetext_client):
to = '+447234567890'
content = 'my message'
reference = 'my reference'
response_dict = {
'code': 0,
}
with requests_mock.Mocker() as request_mock:
request_mock.post('https://example.com/firetext', json=response_dict, status_code=200)
mock_firetext_client.send_sms(to, content, reference)
assert request_mock.call_count == 1
assert request_mock.request_history[0].url == 'https://example.com/firetext'
assert request_mock.request_history[0].method == 'POST'
request_args = parse_qs(request_mock.request_history[0].text)
assert request_args['apiKey'][0] == 'foo'
assert request_args['from'][0] == 'bar'
assert request_args['to'][0] == '447234567890'
assert request_args['message'][0] == content
assert request_args['reference'][0] == reference
def test_send_sms_raises_if_firetext_rejects(mocker, mock_firetext_client):
to = content = reference = 'foo'
response_dict = {
'data': [],
'description': 'Some kind of error',
'code': 1,
'responseData': ''
}
with pytest.raises(SmsClientResponseException) as exc, requests_mock.Mocker() as request_mock:
request_mock.post('https://example.com/firetext', json=response_dict, status_code=200)
mock_firetext_client.send_sms(to, content, reference)
assert exc.value.status_code == 200
assert '"description": "Some kind of error"' in exc.value.text
assert '"code": 1' in exc.value.text
def test_send_sms_raises_if_firetext_rejects_with_unexpected_data(mocker, mock_firetext_client):
to = content = reference = 'foo'
response_dict = {"something": "gone bad"}
with pytest.raises(SmsClientResponseException) as exc, requests_mock.Mocker() as request_mock:
request_mock.post('https://example.com/firetext', json=response_dict, status_code=400)
mock_firetext_client.send_sms(to, content, reference)
assert exc.value.status_code == 400
assert exc.value.text == '{"something": "gone bad"}'
assert type(exc.value.exception) == HTTPError
def test_send_sms_override_configured_shortcode_with_sender(mocker, mock_firetext_client):
to = '+447234567890'
content = 'my message'
reference = 'my reference'
response_dict = {
'code': 0,
}
sender = 'fromservice'
with requests_mock.Mocker() as request_mock:
request_mock.post('https://example.com/firetext', json=response_dict, status_code=200)
mock_firetext_client.send_sms(to, content, reference, sender=sender)
request_args = parse_qs(request_mock.request_history[0].text)
assert request_args['from'][0] == 'fromservice'
def test_send_sms_raises_if_firetext_rejects_with_connect_timeout(rmock, mock_firetext_client):
to = content = reference = 'foo'
with pytest.raises(FiretextClientResponseException) as exc:
rmock.register_uri('POST', 'https://example.com/firetext', exc=ConnectTimeout)
mock_firetext_client.send_sms(to, content, reference)
assert exc.value.status_code == 504
assert exc.value.text == 'Gateway Time-out'
def test_send_sms_raises_if_firetext_rejects_with_read_timeout(rmock, mock_firetext_client):
to = content = reference = 'foo'
with pytest.raises(FiretextClientResponseException) as exc:
rmock.register_uri('POST', 'https://example.com/firetext', exc=ReadTimeout)
mock_firetext_client.send_sms(to, content, reference)
assert exc.value.status_code == 504
assert exc.value.text == 'Gateway Time-out'
| 4,501 | 0 | 253 |
f4b37dcb02cb91970f72659ca8869f6a3bbb5643 | 1,532 | py | Python | day26_lists_NATO_alphabet/main.py | frnkvsk/python100days | 70d607ca58a526f0d66544ed65405b2425718108 | [
"Unlicense"
] | null | null | null | day26_lists_NATO_alphabet/main.py | frnkvsk/python100days | 70d607ca58a526f0d66544ed65405b2425718108 | [
"Unlicense"
] | null | null | null | day26_lists_NATO_alphabet/main.py | frnkvsk/python100days | 70d607ca58a526f0d66544ed65405b2425718108 | [
"Unlicense"
] | null | null | null | import random
# names = ['Alex', 'Beth', 'Carol', 'Dave', 'Kim', 'Sam', 'Heather', 'Hank']
# students_scores = {student:random.randint(1, 100) for student in names}
# passed_students = {student:score for (student, score) in students_scores.items() if score > 59}
# print(students_scores)
# print(passed_students)
# sentence = "What is the Airspeed Velocity of an Unladden Swallow?"
# result = {word:len(word) for word in sentence.split(' ')}
# print(result)
# weather_c = {
# 'Monday': 12,
# 'Tuesday': 14,
# 'Wednesday': 15,
# 'Thursday': 14,
# 'Friday': 21,
# 'Saturday': 22,
# 'Sunday': 24
# }
# weather_f = {day: temp * 9 / 5 + 32 for (day, temp) in weather_c.items()}
# print(weather_f)
# import pandas
#
# student_dict = {
# 'student': ['Mary', 'Andy', 'Peter'],
# 'score': [56, 76, 98]
# }
# student_data_frame = pandas.DataFrame(student_dict)
# print(student_data_frame)
# for (index, row) in student_data_frame.iterrows():
# if row.student == 'Mary':
# print(row.score)
import pandas
letter_dict = {r.letter:r.code for (i, r) in pandas.read_csv('nato_phonetic_alphabet.csv').iterrows()}
generate_phonetic() | 30.039216 | 103 | 0.622063 | import random
# names = ['Alex', 'Beth', 'Carol', 'Dave', 'Kim', 'Sam', 'Heather', 'Hank']
# students_scores = {student:random.randint(1, 100) for student in names}
# passed_students = {student:score for (student, score) in students_scores.items() if score > 59}
# print(students_scores)
# print(passed_students)
# sentence = "What is the Airspeed Velocity of an Unladden Swallow?"
# result = {word:len(word) for word in sentence.split(' ')}
# print(result)
# weather_c = {
# 'Monday': 12,
# 'Tuesday': 14,
# 'Wednesday': 15,
# 'Thursday': 14,
# 'Friday': 21,
# 'Saturday': 22,
# 'Sunday': 24
# }
# weather_f = {day: temp * 9 / 5 + 32 for (day, temp) in weather_c.items()}
# print(weather_f)
# import pandas
#
# student_dict = {
# 'student': ['Mary', 'Andy', 'Peter'],
# 'score': [56, 76, 98]
# }
# student_data_frame = pandas.DataFrame(student_dict)
# print(student_data_frame)
# for (index, row) in student_data_frame.iterrows():
# if row.student == 'Mary':
# print(row.score)
import pandas
letter_dict = {r.letter:r.code for (i, r) in pandas.read_csv('nato_phonetic_alphabet.csv').iterrows()}
def generate_phonetic():
in_word = input('Enter a word ').upper()
try:
phonetic_list = [letter_dict[letter] for letter in list(in_word.strip())]
except KeyError:
print(f"Sorry, only letters in the alphabet please.")
generate_phonetic()
else:
print(phonetic_list)
generate_phonetic() | 297 | 0 | 25 |
41ee8bdcdc4e5cfd537d50225bfde851709ef2eb | 30,500 | py | Python | pyeccodes/defs/grib2/localConcepts/kwbc/shortName_def.py | ecmwf/pyeccodes | dce2c72d3adcc0cb801731366be53327ce13a00b | [
"Apache-2.0"
] | 7 | 2020-04-14T09:41:17.000Z | 2021-08-06T09:38:19.000Z | pyeccodes/defs/grib2/localConcepts/kwbc/shortName_def.py | ecmwf/pyeccodes | dce2c72d3adcc0cb801731366be53327ce13a00b | [
"Apache-2.0"
] | null | null | null | pyeccodes/defs/grib2/localConcepts/kwbc/shortName_def.py | ecmwf/pyeccodes | dce2c72d3adcc0cb801731366be53327ce13a00b | [
"Apache-2.0"
] | 3 | 2020-04-30T12:44:48.000Z | 2020-12-15T08:40:26.000Z | import pyeccodes.accessors as _
| 36.396181 | 85 | 0.600557 | import pyeccodes.accessors as _
def load(h):
def wrapped(h):
discipline = h.get_l('discipline')
parameterCategory = h.get_l('parameterCategory')
parameterNumber = h.get_l('parameterNumber')
if discipline == 0 and parameterCategory == 1 and parameterNumber == 8:
return 'tp'
if discipline == 0 and parameterCategory == 6 and parameterNumber == 1:
return 'tcc'
if discipline == 0 and parameterCategory == 7 and parameterNumber == 7:
return 'cin'
if discipline == 0 and parameterCategory == 2 and parameterNumber == 224:
return 'VRATE'
if discipline == 0 and parameterCategory == 19 and parameterNumber == 234:
return 'ICSEV'
if discipline == 0 and parameterCategory == 6 and parameterNumber == 201:
return 'SUNSD'
if discipline == 255 and parameterCategory == 255 and parameterNumber == 255:
return 'imgd'
if discipline == 10 and parameterCategory == 4 and parameterNumber == 197:
return 'ohc'
if discipline == 10 and parameterCategory == 4 and parameterNumber == 196:
return 'intfd'
if discipline == 10 and parameterCategory == 4 and parameterNumber == 195:
return 'dbss'
if discipline == 10 and parameterCategory == 4 and parameterNumber == 194:
return 'bkeng'
if discipline == 10 and parameterCategory == 4 and parameterNumber == 193:
return 'salin'
if discipline == 10 and parameterCategory == 4 and parameterNumber == 192:
return 'wtmpc'
if discipline == 10 and parameterCategory == 3 and parameterNumber == 202:
return 'sltfl'
if discipline == 10 and parameterCategory == 3 and parameterNumber == 201:
return 'keng'
if discipline == 10 and parameterCategory == 3 and parameterNumber == 200:
return 'ssst'
if discipline == 10 and parameterCategory == 3 and parameterNumber == 199:
return 'sstt'
if discipline == 10 and parameterCategory == 3 and parameterNumber == 198:
return 'ashfl'
if discipline == 10 and parameterCategory == 3 and parameterNumber == 197:
return 'aohflx'
if discipline == 10 and parameterCategory == 3 and parameterNumber == 196:
return 'p2omlt'
if discipline == 10 and parameterCategory == 3 and parameterNumber == 195:
return 'sshg'
if discipline == 10 and parameterCategory == 3 and parameterNumber == 194:
return 'elevhtml'
if discipline == 10 and parameterCategory == 3 and parameterNumber == 193:
return 'etsrg'
if discipline == 10 and parameterCategory == 3 and parameterNumber == 192:
return 'surge'
if discipline == 10 and parameterCategory == 1 and parameterNumber == 195:
return 'vbaro'
if discipline == 10 and parameterCategory == 1 and parameterNumber == 194:
return 'ubaro'
if discipline == 10 and parameterCategory == 1 and parameterNumber == 193:
return 'omlv'
if discipline == 10 and parameterCategory == 1 and parameterNumber == 192:
return 'omlu'
if discipline == 10 and parameterCategory == 0 and parameterNumber == 192:
return 'wstp'
if discipline == 3 and parameterCategory == 1 and parameterNumber == 193:
return 'vsct'
if discipline == 3 and parameterCategory == 1 and parameterNumber == 192:
return 'usct'
if discipline == 2 and parameterCategory == 3 and parameterNumber == 203:
return 'fldcp'
if discipline == 2 and parameterCategory == 3 and parameterNumber == 202:
return 'radt'
if discipline == 2 and parameterCategory == 3 and parameterNumber == 201:
return 'avsft'
if discipline == 2 and parameterCategory == 3 and parameterNumber == 200:
return 'baret'
if discipline == 2 and parameterCategory == 3 and parameterNumber == 199:
return 'lspa'
if discipline == 2 and parameterCategory == 3 and parameterNumber == 198:
return 'evbs'
if discipline == 2 and parameterCategory == 3 and parameterNumber == 194:
return 'sltyp'
if discipline == 2 and parameterCategory == 0 and parameterNumber == 230:
return 'trans'
if discipline == 2 and parameterCategory == 0 and parameterNumber == 229:
return 'evcw'
if discipline == 2 and parameterCategory == 0 and parameterNumber == 228:
return 'acond'
if discipline == 2 and parameterCategory == 0 and parameterNumber == 227:
return 'wcvflx'
if discipline == 2 and parameterCategory == 0 and parameterNumber == 226:
return 'wcuflx'
if discipline == 2 and parameterCategory == 0 and parameterNumber == 225:
return 'wvvflx'
if discipline == 2 and parameterCategory == 0 and parameterNumber == 224:
return 'wvuflx'
if discipline == 2 and parameterCategory == 0 and parameterNumber == 223:
return 'wcconv'
if discipline == 2 and parameterCategory == 0 and parameterNumber == 222:
return 'wvconv'
if discipline == 2 and parameterCategory == 0 and parameterNumber == 221:
return 'wcinc'
if discipline == 2 and parameterCategory == 0 and parameterNumber == 220:
return 'wvinc'
if discipline == 2 and parameterCategory == 0 and parameterNumber == 219:
return 'amixl'
if discipline == 2 and parameterCategory == 0 and parameterNumber == 218:
return 'landn'
if discipline == 2 and parameterCategory == 0 and parameterNumber == 217:
return 'ndvi'
if discipline == 2 and parameterCategory == 0 and parameterNumber == 216:
return 'sfcrh'
if discipline == 2 and parameterCategory == 0 and parameterNumber == 215:
return 'qrec'
if discipline == 2 and parameterCategory == 0 and parameterNumber == 214:
return 'gwrec'
if discipline == 2 and parameterCategory == 0 and parameterNumber == 213:
return 'ewatr'
if discipline == 2 and parameterCategory == 0 and parameterNumber == 212:
return 'lsoil'
if discipline == 2 and parameterCategory == 0 and parameterNumber == 211:
return 'sstor'
if discipline == 2 and parameterCategory == 0 and parameterNumber == 210:
return 'vegt'
if discipline == 2 and parameterCategory == 0 and parameterNumber == 209:
return 'akms'
if discipline == 2 and parameterCategory == 0 and parameterNumber == 208:
return 'akhs'
if discipline == 2 and parameterCategory == 0 and parameterNumber == 207:
return 'icwat'
if discipline == 2 and parameterCategory == 0 and parameterNumber == 206:
return 'rdrip'
if discipline == 2 and parameterCategory == 0 and parameterNumber == 201:
return 'wilt'
if discipline == 2 and parameterCategory == 0 and parameterNumber == 198:
return 'vgtyp'
if discipline == 1 and parameterCategory == 1 and parameterNumber == 195:
return 'cwr'
if discipline == 1 and parameterCategory == 1 and parameterNumber == 194:
return 'ppffg'
if discipline == 1 and parameterCategory == 1 and parameterNumber == 192:
return 'cpozp'
if discipline == 0 and parameterCategory == 191 and parameterNumber == 197:
return 'elonn'
if discipline == 0 and parameterCategory == 191 and parameterNumber == 196:
return 'nlatn'
if discipline == 0 and parameterCategory == 191 and parameterNumber == 195:
return 'mlyno'
if discipline == 0 and parameterCategory == 191 and parameterNumber == 193:
return 'elon'
if discipline == 0 and parameterCategory == 191 and parameterNumber == 192:
return 'nlat'
if discipline == 0 and parameterCategory == 19 and parameterNumber == 232:
return 'vaftd'
if discipline == 0 and parameterCategory == 19 and parameterNumber == 219:
return 'tpfi'
if discipline == 0 and parameterCategory == 19 and parameterNumber == 218:
return 'epsr'
if discipline == 0 and parameterCategory == 19 and parameterNumber == 217:
return 'sipd'
if discipline == 0 and parameterCategory == 19 and parameterNumber == 216:
return 'prsigsvr'
if discipline == 0 and parameterCategory == 19 and parameterNumber == 215:
return 'prsvr'
if discipline == 0 and parameterCategory == 19 and parameterNumber == 214:
return 'nwsalb'
if discipline == 0 and parameterCategory == 19 and parameterNumber == 213:
return 'nbsalb'
if discipline == 0 and parameterCategory == 19 and parameterNumber == 212:
return 'swsalb'
if discipline == 0 and parameterCategory == 19 and parameterNumber == 211:
return 'sbsalb'
if discipline == 0 and parameterCategory == 19 and parameterNumber == 210:
return 'havni'
if discipline == 0 and parameterCategory == 19 and parameterNumber == 209:
return 'lavni'
if discipline == 0 and parameterCategory == 19 and parameterNumber == 208:
return 'ciflt'
if discipline == 0 and parameterCategory == 19 and parameterNumber == 207:
return 'civis'
if discipline == 0 and parameterCategory == 19 and parameterNumber == 206:
return 'cicel'
if discipline == 0 and parameterCategory == 19 and parameterNumber == 205:
return 'flght'
if discipline == 0 and parameterCategory == 19 and parameterNumber == 204:
return 'mixly'
if discipline == 0 and parameterCategory == 19 and parameterNumber == 203:
return 'tstmc'
if discipline == 0 and parameterCategory == 19 and parameterNumber == 202:
return 'swindpro'
if discipline == 0 and parameterCategory == 19 and parameterNumber == 201:
return 'shailpro'
if discipline == 0 and parameterCategory == 19 and parameterNumber == 200:
return 'storprob'
if discipline == 0 and parameterCategory == 19 and parameterNumber == 199:
return 'windprob'
if discipline == 0 and parameterCategory == 19 and parameterNumber == 198:
return 'hailprob'
if discipline == 0 and parameterCategory == 19 and parameterNumber == 197:
return 'torprob'
if discipline == 0 and parameterCategory == 19 and parameterNumber == 196:
return 'hrcono'
if discipline == 0 and parameterCategory == 19 and parameterNumber == 195:
return 'mrcono'
if discipline == 0 and parameterCategory == 19 and parameterNumber == 194:
return 'srcono'
if discipline == 0 and parameterCategory == 17 and parameterNumber == 192:
return 'ltng'
if discipline == 0 and parameterCategory == 16 and parameterNumber == 196:
return 'refc'
if discipline == 0 and parameterCategory == 16 and parameterNumber == 195:
return 'refd'
if discipline == 0 and parameterCategory == 16 and parameterNumber == 194:
return 'refzc'
if discipline == 0 and parameterCategory == 16 and parameterNumber == 193:
return 'refzi'
if discipline == 0 and parameterCategory == 16 and parameterNumber == 192:
return 'refzr'
if discipline == 0 and parameterCategory == 14 and parameterNumber == 199:
return 'pozo'
if discipline == 0 and parameterCategory == 14 and parameterNumber == 198:
return 'pozt'
if discipline == 0 and parameterCategory == 14 and parameterNumber == 197:
return 'toz'
if discipline == 0 and parameterCategory == 14 and parameterNumber == 196:
return 'poz'
if discipline == 0 and parameterCategory == 14 and parameterNumber == 195:
return 'vdfoz'
if discipline == 0 and parameterCategory == 14 and parameterNumber == 194:
return 'ozcat'
if discipline == 0 and parameterCategory == 14 and parameterNumber == 193:
return 'ozcon'
if discipline == 0 and parameterCategory == 13 and parameterNumber == 195:
return 'lipmf'
if discipline == 0 and parameterCategory == 13 and parameterNumber == 194:
return 'lpmtf'
if discipline == 0 and parameterCategory == 13 and parameterNumber == 193:
return 'pmtf'
if discipline == 0 and parameterCategory == 13 and parameterNumber == 192:
return 'pmtc'
if discipline == 0 and parameterCategory == 7 and parameterNumber == 198:
return 'lai'
if discipline == 0 and parameterCategory == 7 and parameterNumber == 197:
return 'uphl'
if discipline == 0 and parameterCategory == 7 and parameterNumber == 195:
return 'cwdi'
if discipline == 0 and parameterCategory == 7 and parameterNumber == 194:
return 'ri'
if discipline == 0 and parameterCategory == 6 and parameterNumber == 200:
return 'mflux'
if discipline == 0 and parameterCategory == 5 and parameterNumber == 197:
return 'cfnlf'
if discipline == 0 and parameterCategory == 5 and parameterNumber == 196:
return 'csdlf'
if discipline == 0 and parameterCategory == 5 and parameterNumber == 195:
return 'csulf'
if discipline == 0 and parameterCategory == 5 and parameterNumber == 194:
return 'lwhr'
if discipline == 0 and parameterCategory == 4 and parameterNumber == 205:
return 'utrf'
if discipline == 0 and parameterCategory == 4 and parameterNumber == 204:
return 'dtrf'
if discipline == 0 and parameterCategory == 4 and parameterNumber == 203:
return 'nddsf'
if discipline == 0 and parameterCategory == 4 and parameterNumber == 202:
return 'nbdsf'
if discipline == 0 and parameterCategory == 4 and parameterNumber == 201:
return 'vddsf'
if discipline == 0 and parameterCategory == 4 and parameterNumber == 200:
return 'vbdsf'
if discipline == 0 and parameterCategory == 4 and parameterNumber == 199:
return 'cfnsf'
if discipline == 0 and parameterCategory == 4 and parameterNumber == 198:
return 'csusf'
if discipline == 0 and parameterCategory == 4 and parameterNumber == 197:
return 'swhr'
if discipline == 0 and parameterCategory == 4 and parameterNumber == 196:
return 'csdsf'
if discipline == 0 and parameterCategory == 4 and parameterNumber == 195:
return 'cduvb'
if discipline == 0 and parameterCategory == 4 and parameterNumber == 194:
return 'duvb'
if discipline == 0 and parameterCategory == 3 and parameterNumber == 212:
return 'presn'
if discipline == 0 and parameterCategory == 3 and parameterNumber == 211:
return 'hgtn'
if discipline == 0 and parameterCategory == 3 and parameterNumber == 210:
return 'lmh'
if discipline == 0 and parameterCategory == 3 and parameterNumber == 209:
return 'cnvdemf'
if discipline == 0 and parameterCategory == 3 and parameterNumber == 208:
return 'cnvdmf'
if discipline == 0 and parameterCategory == 3 and parameterNumber == 207:
return 'cnvumf'
if discipline == 0 and parameterCategory == 3 and parameterNumber == 206:
return 'nlgsp'
if discipline == 0 and parameterCategory == 3 and parameterNumber == 205:
return 'layth'
if discipline == 0 and parameterCategory == 3 and parameterNumber == 204:
return 'hgty'
if discipline == 0 and parameterCategory == 3 and parameterNumber == 203:
return 'hgtx'
if discipline == 0 and parameterCategory == 3 and parameterNumber == 202:
return 'lpsy'
if discipline == 0 and parameterCategory == 3 and parameterNumber == 201:
return 'lpsx'
if discipline == 0 and parameterCategory == 3 and parameterNumber == 200:
return 'plpl'
if discipline == 0 and parameterCategory == 3 and parameterNumber == 199:
return 'tslsa'
if discipline == 0 and parameterCategory == 3 and parameterNumber == 198:
return 'mslma'
if discipline == 0 and parameterCategory == 3 and parameterNumber == 192:
return 'mslet'
if discipline == 0 and parameterCategory == 2 and parameterNumber == 219:
return 'pvmww'
if discipline == 0 and parameterCategory == 2 and parameterNumber == 218:
return 'lmv'
if discipline == 0 and parameterCategory == 2 and parameterNumber == 217:
return 'cngwdv'
if discipline == 0 and parameterCategory == 2 and parameterNumber == 216:
return 'cngwdu'
if discipline == 0 and parameterCategory == 2 and parameterNumber == 215:
return 'omgalf'
if discipline == 0 and parameterCategory == 2 and parameterNumber == 214:
return 'wtend'
if discipline == 0 and parameterCategory == 2 and parameterNumber == 213:
return 'cnvv'
if discipline == 0 and parameterCategory == 2 and parameterNumber == 212:
return 'cnvu'
if discipline == 0 and parameterCategory == 2 and parameterNumber == 211:
return 'gwdv'
if discipline == 0 and parameterCategory == 2 and parameterNumber == 210:
return 'gwdu'
if discipline == 0 and parameterCategory == 2 and parameterNumber == 209:
return 'vdfva'
if discipline == 0 and parameterCategory == 2 and parameterNumber == 208:
return 'vdfua'
if discipline == 0 and parameterCategory == 2 and parameterNumber == 207:
return 'covtm'
if discipline == 0 and parameterCategory == 2 and parameterNumber == 206:
return 'covtz'
if discipline == 0 and parameterCategory == 2 and parameterNumber == 205:
return 'covmz'
if discipline == 0 and parameterCategory == 2 and parameterNumber == 204:
return 'vedh'
if discipline == 0 and parameterCategory == 2 and parameterNumber == 203:
return 'lopp'
if discipline == 0 and parameterCategory == 2 and parameterNumber == 202:
return 'lapp'
if discipline == 0 and parameterCategory == 2 and parameterNumber == 201:
return 'lovv'
if discipline == 0 and parameterCategory == 2 and parameterNumber == 200:
return 'lavv'
if discipline == 0 and parameterCategory == 2 and parameterNumber == 199:
return 'louv'
if discipline == 0 and parameterCategory == 2 and parameterNumber == 198:
return 'lauv'
if discipline == 0 and parameterCategory == 1 and parameterNumber == 225:
return 'frzr'
if discipline == 0 and parameterCategory == 1 and parameterNumber == 224:
return 'acpcpn'
if discipline == 0 and parameterCategory == 1 and parameterNumber == 223:
return 'apcpn'
if discipline == 0 and parameterCategory == 1 and parameterNumber == 222:
return 'snowt'
if discipline == 0 and parameterCategory == 1 and parameterNumber == 221:
return 'arain'
if discipline == 0 and parameterCategory == 1 and parameterNumber == 220:
return 'qmin'
if discipline == 0 and parameterCategory == 1 and parameterNumber == 219:
return 'qmax'
if discipline == 0 and parameterCategory == 1 and parameterNumber == 218:
return 'qz0'
if discipline == 0 and parameterCategory == 1 and parameterNumber == 217:
return 'lrgmr'
if discipline == 0 and parameterCategory == 1 and parameterNumber == 216:
return 'condp'
if discipline == 0 and parameterCategory == 1 and parameterNumber == 215:
return 'vdfmr'
if discipline == 0 and parameterCategory == 1 and parameterNumber == 214:
return 'shamr'
if discipline == 0 and parameterCategory == 1 and parameterNumber == 213:
return 'cnvmr'
if discipline == 0 and parameterCategory == 1 and parameterNumber == 212:
return 'sbsno'
if discipline == 0 and parameterCategory == 1 and parameterNumber == 211:
return 'emnp'
if discipline == 0 and parameterCategory == 1 and parameterNumber == 210:
return 'tcolm'
if discipline == 0 and parameterCategory == 1 and parameterNumber == 209:
return 'tclsw'
if discipline == 0 and parameterCategory == 1 and parameterNumber == 208:
return 'snot'
if discipline == 0 and parameterCategory == 1 and parameterNumber == 207:
return 'ncip'
if discipline == 0 and parameterCategory == 1 and parameterNumber == 206:
return 'tipd'
if discipline == 0 and parameterCategory == 1 and parameterNumber == 198:
return 'minrh'
if discipline == 0 and parameterCategory == 0 and parameterNumber == 204:
return 'tchp'
if discipline == 0 and parameterCategory == 0 and parameterNumber == 203:
return 'thz0'
if discipline == 0 and parameterCategory == 0 and parameterNumber == 202:
return 'vdfhr'
if discipline == 0 and parameterCategory == 0 and parameterNumber == 201:
return 'shahr'
if discipline == 0 and parameterCategory == 0 and parameterNumber == 200:
return 'tsd1d'
if discipline == 0 and parameterCategory == 0 and parameterNumber == 199:
return 'ttphy'
if discipline == 0 and parameterCategory == 0 and parameterNumber == 198:
return 'ttdia'
if discipline == 0 and parameterCategory == 0 and parameterNumber == 197:
return 'thflx'
if discipline == 0 and parameterCategory == 0 and parameterNumber == 196:
return 'cnvhr'
if discipline == 0 and parameterCategory == 0 and parameterNumber == 195:
return 'lrghr'
if discipline == 0 and parameterCategory == 0 and parameterNumber == 194:
return 'rev'
if discipline == 0 and parameterCategory == 0 and parameterNumber == 193:
return 'ttrad'
if discipline == 2 and parameterCategory == 3 and parameterNumber == 197:
return 'poros'
if discipline == 2 and parameterCategory == 3 and parameterNumber == 196:
return 'smdry'
if discipline == 2 and parameterCategory == 3 and parameterNumber == 195:
return 'smref'
if discipline == 2 and parameterCategory == 3 and parameterNumber == 193:
return 'rlyrs'
if discipline == 2 and parameterCategory == 3 and parameterNumber == 192:
return 'soill'
if discipline == 2 and parameterCategory == 0 and parameterNumber == 204:
return 'rcq'
if discipline == 2 and parameterCategory == 0 and parameterNumber == 205:
return 'rcsol'
if discipline == 2 and parameterCategory == 0 and parameterNumber == 203:
return 'rct'
if discipline == 2 and parameterCategory == 0 and parameterNumber == 202:
return 'rcs'
if discipline == 2 and parameterCategory == 0 and parameterNumber == 200:
return 'rsmin'
if discipline == 2 and parameterCategory == 0 and parameterNumber == 199:
return 'ccond'
if discipline == 2 and parameterCategory == 0 and parameterNumber == 197:
return 'bmixl'
if discipline == 2 and parameterCategory == 0 and parameterNumber == 196:
return 'cnwat'
if discipline == 2 and parameterCategory == 0 and parameterNumber == 195:
return 'sfexc'
if discipline == 2 and parameterCategory == 0 and parameterNumber == 194:
return 'mstav'
if discipline == 2 and parameterCategory == 0 and parameterNumber == 193:
return 'gflux'
if discipline == 2 and parameterCategory == 0 and parameterNumber == 192:
return 'soilw'
if discipline == 1 and parameterCategory == 0 and parameterNumber == 193:
return 'ssrun'
if discipline == 1 and parameterCategory == 0 and parameterNumber == 192:
return 'bgrun'
if discipline == 0 and parameterCategory == 191 and parameterNumber == 194:
return 'tsec'
if discipline == 0 and parameterCategory == 19 and parameterNumber == 193:
return 'snfalb'
if discipline == 0 and parameterCategory == 19 and parameterNumber == 192:
return 'mxsalb'
if discipline == 0 and parameterCategory == 14 and parameterNumber == 192:
return 'o3mr'
if discipline == 0 and parameterCategory == 7 and parameterNumber == 193:
return '4lftx'
if discipline == 0 and parameterCategory == 7 and parameterNumber == 192:
return 'lftx'
if discipline == 0 and parameterCategory == 6 and parameterNumber == 199:
return 'fice'
if discipline == 0 and parameterCategory == 6 and parameterNumber == 198:
return 'tcolc'
if discipline == 0 and parameterCategory == 6 and parameterNumber == 197:
return 'tcoli'
if discipline == 0 and parameterCategory == 6 and parameterNumber == 196:
return 'tcolw'
if discipline == 0 and parameterCategory == 6 and parameterNumber == 194:
return 'cuefi'
if discipline == 0 and parameterCategory == 6 and parameterNumber == 193:
return 'cwork'
if discipline == 0 and parameterCategory == 6 and parameterNumber == 192:
return 'cdlyr'
if discipline == 0 and parameterCategory == 5 and parameterNumber == 193:
return 'ulwrf'
if discipline == 0 and parameterCategory == 5 and parameterNumber == 192:
return 'dlwrf'
if discipline == 0 and parameterCategory == 7 and parameterNumber == 196:
return 'uvi'
if discipline == 0 and parameterCategory == 4 and parameterNumber == 193:
return 'uswrf'
if discipline == 0 and parameterCategory == 4 and parameterNumber == 192:
return 'dswrf'
if discipline == 0 and parameterCategory == 3 and parameterNumber == 197:
return '5wava'
if discipline == 0 and parameterCategory == 3 and parameterNumber == 196:
return 'hpbl'
if discipline == 0 and parameterCategory == 3 and parameterNumber == 195:
return 'v-gwd'
if discipline == 0 and parameterCategory == 3 and parameterNumber == 194:
return 'u-gwd'
if discipline == 0 and parameterCategory == 3 and parameterNumber == 193:
return '5wavh'
if discipline == 0 and parameterCategory == 2 and parameterNumber == 197:
return 'fricv'
if discipline == 0 and parameterCategory == 2 and parameterNumber == 196:
return 'cd'
if discipline == 0 and parameterCategory == 2 and parameterNumber == 195:
return 'vstm'
if discipline == 0 and parameterCategory == 2 and parameterNumber == 194:
return 'ustm'
if discipline == 0 and parameterCategory == 2 and parameterNumber == 193:
return 'mflx'
if discipline == 0 and parameterCategory == 2 and parameterNumber == 192:
return 'vwsh'
if discipline == 0 and parameterCategory == 1 and parameterNumber == 11:
return 'sdwe'
if discipline == 0 and parameterCategory == 1 and parameterNumber == 205:
return 'tcols'
if discipline == 0 and parameterCategory == 1 and parameterNumber == 204:
return 'tcolr'
if discipline == 0 and parameterCategory == 1 and parameterNumber == 203:
return 'rime'
if discipline == 0 and parameterCategory == 1 and parameterNumber == 202:
return 'frain'
if discipline == 0 and parameterCategory == 1 and parameterNumber == 201:
return 'snowc'
if discipline == 0 and parameterCategory == 1 and parameterNumber == 200:
return 'pevpr'
if discipline == 0 and parameterCategory == 1 and parameterNumber == 199:
return 'pevap'
if discipline == 1 and parameterCategory == 1 and parameterNumber == 193:
return 'cpofp'
if discipline == 0 and parameterCategory == 1 and parameterNumber == 196:
return 'cprat'
if discipline == 0 and parameterCategory == 1 and parameterNumber == 195:
return 'csnow'
if discipline == 0 and parameterCategory == 1 and parameterNumber == 194:
return 'cicep'
if discipline == 0 and parameterCategory == 1 and parameterNumber == 193:
return 'cfrzr'
if discipline == 0 and parameterCategory == 1 and parameterNumber == 192:
return 'crain'
if discipline == 0 and parameterCategory == 1 and parameterNumber == 197:
return 'mconv'
if discipline == 0 and parameterCategory == 6 and parameterNumber == 195:
return 'tcond'
if discipline == 0 and parameterCategory == 0 and parameterNumber == 192:
return 'snohf'
if discipline == 0 and parameterCategory == 7 and parameterNumber == 6:
return 'cape'
return wrapped
| 30,444 | 0 | 23 |
f855524a5a17dbd4474da10c04f38088efe0a849 | 4,279 | py | Python | gladiator/prepare/command.py | hellcat17/gladiator | 4f1ea65112dab04f584b148d23a150f28a94c434 | [
"Apache-2.0"
] | null | null | null | gladiator/prepare/command.py | hellcat17/gladiator | 4f1ea65112dab04f584b148d23a150f28a94c434 | [
"Apache-2.0"
] | null | null | null | gladiator/prepare/command.py | hellcat17/gladiator | 4f1ea65112dab04f584b148d23a150f28a94c434 | [
"Apache-2.0"
] | null | null | null | """Prepare OpenGL commands for use in templates."""
from enum import auto, Enum
from typing import Iterable, Mapping, Optional, Union
import attr
from gladiator.parse.command import Command, Type
from gladiator.prepare.enum import PreparedEnum
from gladiator.prepare.style import transform_symbol
from gladiator.optional import OptionalValue
from gladiator.options import Options
from gladiator.resources import read_resource_file
@attr.s(auto_attribs=True, kw_only=True, slots=True, frozen=True)
@attr.s(auto_attribs=True, kw_only=True, slots=True, frozen=True)
@attr.s(auto_attribs=True, kw_only=True, slots=True, frozen=True)
@attr.s(auto_attribs=True, kw_only=True, slots=True, frozen=True)
@attr.s(auto_attribs=True, kw_only=True, slots=True, frozen=True)
@attr.s(auto_attribs=True, kw_only=True, slots=True, frozen=True)
@attr.s(auto_attribs=True, kw_only=True, slots=True, frozen=True)
_TYPE_TRANSLATIONS = dict(
t.split(",") for t in read_resource_file("data/type_translations").split("\n") if t
)
# TODO: take options such as casing, style and namespace
# TODO: generate special wrappers for generators and deleters
def prepare_commands(
commands: Iterable[Command],
prepared_enums: Mapping[str, PreparedEnum],
options: Options,
):
"""Prepare the given commands for use as references and in templates. The
given enums are used to construct type references. Yields tuples mapping the
original command name to the prepared command.
"""
for command in commands:
yield command.name, PreparedCommand(
original=command,
type_=CommandType.DEFAULT,
implementation=_make_default_implementation(command, prepared_enums),
name=transform_symbol(
command.name, options.function_case, options.omit_prefix
),
)
| 28.526667 | 87 | 0.714419 | """Prepare OpenGL commands for use in templates."""
from enum import auto, Enum
from typing import Iterable, Mapping, Optional, Union
import attr
from gladiator.parse.command import Command, Type
from gladiator.prepare.enum import PreparedEnum
from gladiator.prepare.style import transform_symbol
from gladiator.optional import OptionalValue
from gladiator.options import Options
from gladiator.resources import read_resource_file
class CommandType(Enum):
DEFAULT = auto()
GENERATOR = auto()
DELETER = auto()
class ConversionType(Enum):
CAST = auto()
METHOD_CALL = auto()
@attr.s(auto_attribs=True, kw_only=True, slots=True, frozen=True)
class TypeReference:
low_level: str
high_level: Optional[PreparedEnum]
front_modifiers: Optional[str]
back_modifiers: Optional[str]
@attr.s(auto_attribs=True, kw_only=True, slots=True, frozen=True)
class PreparedParameter:
type_: TypeReference
name: str
@attr.s(auto_attribs=True, kw_only=True, slots=True, frozen=True)
class CastAction:
param: str
to: TypeReference
@attr.s(auto_attribs=True, kw_only=True, slots=True, frozen=True)
class MethodCallAction:
param: str
method: str
args: Iterable[str]
@attr.s(auto_attribs=True, kw_only=True, slots=True, frozen=True)
class PreparedConversion:
type_: ConversionType
action: Union[CastAction, MethodCallAction]
@attr.s(auto_attribs=True, kw_only=True, slots=True, frozen=True)
class PreparedImplementation:
return_type: TypeReference
params: Iterable[PreparedParameter]
retval_conversion: PreparedConversion
param_conversions: Iterable[PreparedConversion]
retval_temporary = "retval"
@attr.s(auto_attribs=True, kw_only=True, slots=True, frozen=True)
class PreparedCommand:
original: Command
name: str
type_: CommandType
implementation: PreparedImplementation
_TYPE_TRANSLATIONS = dict(
t.split(",") for t in read_resource_file("data/type_translations").split("\n") if t
)
def _translate_low_level_type(low_level: str):
return _TYPE_TRANSLATIONS.get(low_level, low_level)
def _make_type_reference(target: Type, prepared_enums: Mapping[str, PreparedEnum]):
return TypeReference(
low_level=_translate_low_level_type(target.low_level),
front_modifiers=target.front_modifiers,
back_modifiers=target.back_modifiers,
high_level=OptionalValue(target.high_level)
.map(prepared_enums.get)
.value_or_none,
)
def _make_default_implementation(
command: Command, prepared_enums: Mapping[str, PreparedEnum]
):
retval_typeref = _make_type_reference(command.return_type, prepared_enums)
params_with_refs = [
(param, _make_type_reference(param.type_, prepared_enums))
for param in command.params
]
return PreparedImplementation(
return_type=retval_typeref,
params=[
PreparedParameter(type_=ref, name=param.name)
for param, ref in params_with_refs
],
retval_conversion=PreparedConversion(
type_=ConversionType.CAST,
action=CastAction(
param=PreparedImplementation.retval_temporary, to=retval_typeref
),
),
param_conversions=[
PreparedConversion(
type_=ConversionType.CAST, action=CastAction(param=param.name, to=ref)
)
for param, ref in params_with_refs
],
)
# TODO: take options such as casing, style and namespace
# TODO: generate special wrappers for generators and deleters
def prepare_commands(
commands: Iterable[Command],
prepared_enums: Mapping[str, PreparedEnum],
options: Options,
):
"""Prepare the given commands for use as references and in templates. The
given enums are used to construct type references. Yields tuples mapping the
original command name to the prepared command.
"""
for command in commands:
yield command.name, PreparedCommand(
original=command,
type_=CommandType.DEFAULT,
implementation=_make_default_implementation(command, prepared_enums),
name=transform_symbol(
command.name, options.function_case, options.omit_prefix
),
)
| 1,387 | 761 | 269 |
c7393ff14c969a96e0698f982393eac0fd77f625 | 351 | py | Python | notebooks/bqm_functions.py | ozlemsalehi/qa-edu | 4b74a1d06820df36b710919c31a82a71b4e85d84 | [
"MIT"
] | 1 | 2021-10-10T03:35:03.000Z | 2021-10-10T03:35:03.000Z | notebooks/bqm_functions.py | ozlemsalehi/qa-edu | 4b74a1d06820df36b710919c31a82a71b4e85d84 | [
"MIT"
] | null | null | null | notebooks/bqm_functions.py | ozlemsalehi/qa-edu | 4b74a1d06820df36b710919c31a82a71b4e85d84 | [
"MIT"
] | null | null | null | """
This file contains all the functions used in the notebooks
under the Binary Quadratic Model section.
Prepared by Akash Narayanan B
"""
from dimod import BinaryQuadraticModel
# Task 3
linear = {'x1': 3, 'x2': -1, 'x3': 10, 'x4': 7}
quadratic = {('x1', 'x2'): 2, ('x1', 'x3'): -5, ('x2', 'x3'): 3, ('x3', 'x4'): 11}
offset = 8
vartype = 'BINARY' | 25.071429 | 82 | 0.621083 | """
This file contains all the functions used in the notebooks
under the Binary Quadratic Model section.
Prepared by Akash Narayanan B
"""
from dimod import BinaryQuadraticModel
# Task 3
linear = {'x1': 3, 'x2': -1, 'x3': 10, 'x4': 7}
quadratic = {('x1', 'x2'): 2, ('x1', 'x3'): -5, ('x2', 'x3'): 3, ('x3', 'x4'): 11}
offset = 8
vartype = 'BINARY' | 0 | 0 | 0 |
21724846cd453a1ea089631982f272aa7f74cddf | 754 | py | Python | project/delibere/migrations/0036_auto_20170922_1319.py | guglielmo/mosic2-db-delibere | a7c92adf42ad53023af47776c32baa2ee525f5e9 | [
"BSD-3-Clause"
] | null | null | null | project/delibere/migrations/0036_auto_20170922_1319.py | guglielmo/mosic2-db-delibere | a7c92adf42ad53023af47776c32baa2ee525f5e9 | [
"BSD-3-Clause"
] | null | null | null | project/delibere/migrations/0036_auto_20170922_1319.py | guglielmo/mosic2-db-delibere | a7c92adf42ad53023af47776c32baa2ee525f5e9 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.10.8 on 2017-09-22 13:19
from __future__ import unicode_literals
from django.db import migrations, models
| 27.925926 | 124 | 0.619363 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.8 on 2017-09-22 13:19
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('delibere', '0035_auto_20170621_1621'),
]
operations = [
migrations.AlterField(
model_name='delibera',
name='codice',
field=models.CharField(default=None, help_text='Codice identificativo anno/seduta.', max_length=8, unique=True),
preserve_default=False,
),
migrations.AlterField(
model_name='documento',
name='nome',
field=models.CharField(blank=True, max_length=32, null=True, unique=True),
),
]
| 0 | 575 | 23 |
b4791417864a20c51bb22fe093517c0cb17c5969 | 2,605 | py | Python | classification_ScanObjectNN/ScanObjectNN.py | ma-xu/pointMLP-pytorch | 205669826d269972e2616b13198dd52f41c9992e | [
"Apache-2.0"
] | 95 | 2022-02-15T10:09:31.000Z | 2022-03-30T04:29:12.000Z | classification_ScanObjectNN/ScanObjectNN.py | ma-xu/pointMLP-pytorch | 205669826d269972e2616b13198dd52f41c9992e | [
"Apache-2.0"
] | 17 | 2022-02-16T06:41:09.000Z | 2022-03-31T13:00:09.000Z | classification_ScanObjectNN/ScanObjectNN.py | 13952522076/pointMLP-pytorch | 205669826d269972e2616b13198dd52f41c9992e | [
"Apache-2.0"
] | 13 | 2022-02-16T10:06:11.000Z | 2022-03-31T12:55:44.000Z | """
ScanObjectNN download: http://103.24.77.34/scanobjectnn/h5_files.zip
"""
import os
import sys
import glob
import h5py
import numpy as np
from torch.utils.data import Dataset
os.environ["HDF5_USE_FILE_LOCKING"] = "FALSE"
if __name__ == '__main__':
train = ScanObjectNN(1024)
test = ScanObjectNN(1024, 'test')
for data, label in train:
print(data.shape)
print(label)
| 32.160494 | 108 | 0.66142 | """
ScanObjectNN download: http://103.24.77.34/scanobjectnn/h5_files.zip
"""
import os
import sys
import glob
import h5py
import numpy as np
from torch.utils.data import Dataset
os.environ["HDF5_USE_FILE_LOCKING"] = "FALSE"
def download():
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
DATA_DIR = os.path.join(BASE_DIR, 'data')
if not os.path.exists(DATA_DIR):
os.mkdir(DATA_DIR)
if not os.path.exists(os.path.join(DATA_DIR, 'h5_files')):
# note that this link only contains the hardest perturbed variant (PB_T50_RS).
# for full versions, consider the following link.
www = 'https://web.northeastern.edu/smilelab/xuma/datasets/h5_files.zip'
# www = 'http://103.24.77.34/scanobjectnn/h5_files.zip'
zipfile = os.path.basename(www)
os.system('wget %s --no-check-certificate; unzip %s' % (www, zipfile))
os.system('mv %s %s' % (zipfile[:-4], DATA_DIR))
os.system('rm %s' % (zipfile))
def load_scanobjectnn_data(partition):
download()
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
all_data = []
all_label = []
h5_name = BASE_DIR + '/data/h5_files/main_split/' + partition + '_objectdataset_augmentedrot_scale75.h5'
f = h5py.File(h5_name, mode="r")
data = f['data'][:].astype('float32')
label = f['label'][:].astype('int64')
f.close()
all_data.append(data)
all_label.append(label)
all_data = np.concatenate(all_data, axis=0)
all_label = np.concatenate(all_label, axis=0)
return all_data, all_label
def translate_pointcloud(pointcloud):
xyz1 = np.random.uniform(low=2. / 3., high=3. / 2., size=[3])
xyz2 = np.random.uniform(low=-0.2, high=0.2, size=[3])
translated_pointcloud = np.add(np.multiply(pointcloud, xyz1), xyz2).astype('float32')
return translated_pointcloud
class ScanObjectNN(Dataset):
def __init__(self, num_points, partition='training'):
self.data, self.label = load_scanobjectnn_data(partition)
self.num_points = num_points
self.partition = partition
def __getitem__(self, item):
pointcloud = self.data[item][:self.num_points]
label = self.label[item]
if self.partition == 'training':
pointcloud = translate_pointcloud(pointcloud)
np.random.shuffle(pointcloud)
return pointcloud, label
def __len__(self):
return self.data.shape[0]
if __name__ == '__main__':
train = ScanObjectNN(1024)
test = ScanObjectNN(1024, 'test')
for data, label in train:
print(data.shape)
print(label)
| 2,021 | 7 | 172 |
a4a4356ef17d15a32870aceafc1ab8a92b4c9303 | 71 | py | Python | sgi/recursos_humanos/__init__.py | jorgevilaca82/SGI | c3f13d9e3e8f04377d9e23636dc8e35ed5ace35a | [
"MIT"
] | null | null | null | sgi/recursos_humanos/__init__.py | jorgevilaca82/SGI | c3f13d9e3e8f04377d9e23636dc8e35ed5ace35a | [
"MIT"
] | 8 | 2019-12-07T13:13:34.000Z | 2021-09-02T03:07:25.000Z | sgi/recursos_humanos/__init__.py | jorgevilaca82/SGI | c3f13d9e3e8f04377d9e23636dc8e35ed5ace35a | [
"MIT"
] | null | null | null | default_app_config = "sgi.recursos_humanos.apps.RecursosHumanosConfig"
| 35.5 | 70 | 0.873239 | default_app_config = "sgi.recursos_humanos.apps.RecursosHumanosConfig"
| 0 | 0 | 0 |
9903afe6a1227ea53c957090155382ddf95e9914 | 13,824 | py | Python | gntp/__init__.py | pcjacobse/sabnzbd | 494e72a9963a1810e69f4e0f69df7c9dfb9256b0 | [
"0BSD",
"PSF-2.0",
"BSD-2-Clause",
"MIT",
"BSD-3-Clause"
] | null | null | null | gntp/__init__.py | pcjacobse/sabnzbd | 494e72a9963a1810e69f4e0f69df7c9dfb9256b0 | [
"0BSD",
"PSF-2.0",
"BSD-2-Clause",
"MIT",
"BSD-3-Clause"
] | null | null | null | gntp/__init__.py | pcjacobse/sabnzbd | 494e72a9963a1810e69f4e0f69df7c9dfb9256b0 | [
"0BSD",
"PSF-2.0",
"BSD-2-Clause",
"MIT",
"BSD-3-Clause"
] | null | null | null | import re
import hashlib
import time
import StringIO
__version__ = '0.8'
#GNTP/<version> <messagetype> <encryptionAlgorithmID>[:<ivValue>][ <keyHashAlgorithmID>:<keyHash>.<salt>]
GNTP_INFO_LINE = re.compile(
'GNTP/(?P<version>\d+\.\d+) (?P<messagetype>REGISTER|NOTIFY|SUBSCRIBE|\-OK|\-ERROR)' +
' (?P<encryptionAlgorithmID>[A-Z0-9]+(:(?P<ivValue>[A-F0-9]+))?) ?' +
'((?P<keyHashAlgorithmID>[A-Z0-9]+):(?P<keyHash>[A-F0-9]+).(?P<salt>[A-F0-9]+))?\r\n',
re.IGNORECASE
)
GNTP_INFO_LINE_SHORT = re.compile(
'GNTP/(?P<version>\d+\.\d+) (?P<messagetype>REGISTER|NOTIFY|SUBSCRIBE|\-OK|\-ERROR)',
re.IGNORECASE
)
GNTP_HEADER = re.compile('([\w-]+):(.+)')
GNTP_EOL = '\r\n'
class _GNTPBuffer(StringIO.StringIO):
"""GNTP Buffer class"""
def writefmt(self, message="", *args):
"""Shortcut function for writing GNTP Headers"""
self.write((message % args).encode('utf8', 'replace'))
self.write(GNTP_EOL)
class _GNTPBase(object):
"""Base initilization
:param string messagetype: GNTP Message type
:param string version: GNTP Protocol version
:param string encription: Encryption protocol
"""
def _parse_info(self, data):
"""Parse the first line of a GNTP message to get security and other info values
:param string data: GNTP Message
:return dict: Parsed GNTP Info line
"""
match = GNTP_INFO_LINE.match(data)
if not match:
raise ParseError('ERROR_PARSING_INFO_LINE')
info = match.groupdict()
if info['encryptionAlgorithmID'] == 'NONE':
info['encryptionAlgorithmID'] = None
return info
def set_password(self, password, encryptAlgo='MD5'):
"""Set a password for a GNTP Message
:param string password: Null to clear password
:param string encryptAlgo: Supports MD5, SHA1, SHA256, SHA512
"""
hash = {
'MD5': hashlib.md5,
'SHA1': hashlib.sha1,
'SHA256': hashlib.sha256,
'SHA512': hashlib.sha512,
}
self.password = password
self.encryptAlgo = encryptAlgo.upper()
if not password:
self.info['encryptionAlgorithmID'] = None
self.info['keyHashAlgorithm'] = None
return
if not self.encryptAlgo in hash.keys():
raise UnsupportedError('INVALID HASH "%s"' % self.encryptAlgo)
hashfunction = hash.get(self.encryptAlgo)
password = password.encode('utf8')
seed = time.ctime()
salt = hashfunction(seed).hexdigest()
saltHash = hashfunction(seed).digest()
keyBasis = password + saltHash
key = hashfunction(keyBasis).digest()
keyHash = hashfunction(key).hexdigest()
self.info['keyHashAlgorithmID'] = self.encryptAlgo
self.info['keyHash'] = keyHash.upper()
self.info['salt'] = salt.upper()
def _decode_hex(self, value):
"""Helper function to decode hex string to `proper` hex string
:param string value: Human readable hex string
:return string: Hex string
"""
result = ''
for i in range(0, len(value), 2):
tmp = int(value[i:i + 2], 16)
result += chr(tmp)
return result
def _validate_password(self, password):
"""Validate GNTP Message against stored password"""
self.password = password
if password == None:
raise AuthError('Missing password')
keyHash = self.info.get('keyHash', None)
if keyHash is None and self.password is None:
return True
if keyHash is None:
raise AuthError('Invalid keyHash')
if self.password is None:
raise AuthError('Missing password')
password = self.password.encode('utf8')
saltHash = self._decode_hex(self.info['salt'])
keyBasis = password + saltHash
key = hashlib.md5(keyBasis).digest()
keyHash = hashlib.md5(key).hexdigest()
if not keyHash.upper() == self.info['keyHash'].upper():
raise AuthError('Invalid Hash')
return True
def validate(self):
"""Verify required headers"""
for header in self._requiredHeaders:
if not self.headers.get(header, False):
raise ParseError('Missing Notification Header: ' + header)
def _format_info(self):
"""Generate info line for GNTP Message
:return string:
"""
info = u'GNTP/%s %s' % (
self.info.get('version'),
self.info.get('messagetype'),
)
if self.info.get('encryptionAlgorithmID', None):
info += ' %s:%s' % (
self.info.get('encryptionAlgorithmID'),
self.info.get('ivValue'),
)
else:
info += ' NONE'
if self.info.get('keyHashAlgorithmID', None):
info += ' %s:%s.%s' % (
self.info.get('keyHashAlgorithmID'),
self.info.get('keyHash'),
self.info.get('salt')
)
return info
def _parse_dict(self, data):
"""Helper function to parse blocks of GNTP headers into a dictionary
:param string data:
:return dict:
"""
dict = {}
for line in data.split('\r\n'):
match = GNTP_HEADER.match(line)
if not match:
continue
key = unicode(match.group(1).strip(), 'utf8', 'replace')
val = unicode(match.group(2).strip(), 'utf8', 'replace')
dict[key] = val
return dict
def add_resource(self, data):
"""Add binary resource
:param string data: Binary Data
"""
identifier = hashlib.md5(data).hexdigest()
self.resources[identifier] = data
return 'x-growl-resource://%s' % identifier
def decode(self, data, password=None):
"""Decode GNTP Message
:param string data:
"""
self.password = password
self.raw = data
parts = self.raw.split('\r\n\r\n')
self.info = self._parse_info(data)
self.headers = self._parse_dict(parts[0])
def encode(self):
"""Encode a generic GNTP Message
:return string: GNTP Message ready to be sent
"""
buffer = _GNTPBuffer()
buffer.writefmt(self._format_info())
#Headers
for k, v in self.headers.iteritems():
buffer.writefmt('%s: %s', k, v)
buffer.writefmt()
#Resources
for resource, data in self.resources.iteritems():
buffer.writefmt('Identifier: %s', resource)
buffer.writefmt('Length: %d', len(data))
buffer.writefmt()
buffer.write(data)
buffer.writefmt()
buffer.writefmt()
return buffer.getvalue()
class GNTPRegister(_GNTPBase):
"""Represents a GNTP Registration Command
:param string data: (Optional) See decode()
:param string password: (Optional) Password to use while encoding/decoding messages
"""
_requiredHeaders = [
'Application-Name',
'Notifications-Count'
]
_requiredNotificationHeaders = ['Notification-Name']
def validate(self):
'''Validate required headers and validate notification headers'''
for header in self._requiredHeaders:
if not self.headers.get(header, False):
raise ParseError('Missing Registration Header: ' + header)
for notice in self.notifications:
for header in self._requiredNotificationHeaders:
if not notice.get(header, False):
raise ParseError('Missing Notification Header: ' + header)
def decode(self, data, password):
"""Decode existing GNTP Registration message
:param string data: Message to decode
"""
self.raw = data
parts = self.raw.split('\r\n\r\n')
self.info = self._parse_info(data)
self._validate_password(password)
self.headers = self._parse_dict(parts[0])
for i, part in enumerate(parts):
if i == 0:
continue # Skip Header
if part.strip() == '':
continue
notice = self._parse_dict(part)
if notice.get('Notification-Name', False):
self.notifications.append(notice)
elif notice.get('Identifier', False):
notice['Data'] = self._decode_binary(part, notice)
#open('register.png','wblol').write(notice['Data'])
self.resources[notice.get('Identifier')] = notice
def add_notification(self, name, enabled=True):
"""Add new Notification to Registration message
:param string name: Notification Name
:param boolean enabled: Enable this notification by default
"""
notice = {}
notice['Notification-Name'] = u'%s' % name
notice['Notification-Enabled'] = u'%s' % enabled
self.notifications.append(notice)
self.add_header('Notifications-Count', len(self.notifications))
def encode(self):
"""Encode a GNTP Registration Message
:return string: Encoded GNTP Registration message
"""
buffer = _GNTPBuffer()
buffer.writefmt(self._format_info())
#Headers
for k, v in self.headers.iteritems():
buffer.writefmt('%s: %s', k, v)
buffer.writefmt()
#Notifications
if len(self.notifications) > 0:
for notice in self.notifications:
for k, v in notice.iteritems():
buffer.writefmt('%s: %s', k, v)
buffer.writefmt()
#Resources
for resource, data in self.resources.iteritems():
buffer.writefmt('Identifier: %s', resource)
buffer.writefmt('Length: %d', len(data))
buffer.writefmt()
buffer.write(data)
buffer.writefmt()
buffer.writefmt()
return buffer.getvalue()
class GNTPNotice(_GNTPBase):
"""Represents a GNTP Notification Command
:param string data: (Optional) See decode()
:param string app: (Optional) Set Application-Name
:param string name: (Optional) Set Notification-Name
:param string title: (Optional) Set Notification Title
:param string password: (Optional) Password to use while encoding/decoding messages
"""
_requiredHeaders = [
'Application-Name',
'Notification-Name',
'Notification-Title'
]
def decode(self, data, password):
"""Decode existing GNTP Notification message
:param string data: Message to decode.
"""
self.raw = data
parts = self.raw.split('\r\n\r\n')
self.info = self._parse_info(data)
self._validate_password(password)
self.headers = self._parse_dict(parts[0])
for i, part in enumerate(parts):
if i == 0:
continue # Skip Header
if part.strip() == '':
continue
notice = self._parse_dict(part)
if notice.get('Identifier', False):
notice['Data'] = self._decode_binary(part, notice)
#open('notice.png','wblol').write(notice['Data'])
self.resources[notice.get('Identifier')] = notice
class GNTPSubscribe(_GNTPBase):
"""Represents a GNTP Subscribe Command
:param string data: (Optional) See decode()
:param string password: (Optional) Password to use while encoding/decoding messages
"""
_requiredHeaders = [
'Subscriber-ID',
'Subscriber-Name',
]
class GNTPOK(_GNTPBase):
"""Represents a GNTP OK Response
:param string data: (Optional) See _GNTPResponse.decode()
:param string action: (Optional) Set type of action the OK Response is for
"""
_requiredHeaders = ['Response-Action']
class GNTPError(_GNTPBase):
"""Represents a GNTP Error response
:param string data: (Optional) See _GNTPResponse.decode()
:param string errorcode: (Optional) Error code
:param string errordesc: (Optional) Error Description
"""
_requiredHeaders = ['Error-Code', 'Error-Description']
def parse_gntp(data, password=None):
"""Attempt to parse a message as a GNTP message
:param string data: Message to be parsed
:param string password: Optional password to be used to verify the message
"""
match = GNTP_INFO_LINE_SHORT.match(data)
if not match:
raise ParseError('INVALID_GNTP_INFO')
info = match.groupdict()
if info['messagetype'] == 'REGISTER':
return GNTPRegister(data, password=password)
elif info['messagetype'] == 'NOTIFY':
return GNTPNotice(data, password=password)
elif info['messagetype'] == 'SUBSCRIBE':
return GNTPSubscribe(data, password=password)
elif info['messagetype'] == '-OK':
return GNTPOK(data)
elif info['messagetype'] == '-ERROR':
return GNTPError(data)
raise ParseError('INVALID_GNTP_MESSAGE')
| 27.105882 | 105 | 0.697844 | import re
import hashlib
import time
import StringIO
__version__ = '0.8'
#GNTP/<version> <messagetype> <encryptionAlgorithmID>[:<ivValue>][ <keyHashAlgorithmID>:<keyHash>.<salt>]
GNTP_INFO_LINE = re.compile(
'GNTP/(?P<version>\d+\.\d+) (?P<messagetype>REGISTER|NOTIFY|SUBSCRIBE|\-OK|\-ERROR)' +
' (?P<encryptionAlgorithmID>[A-Z0-9]+(:(?P<ivValue>[A-F0-9]+))?) ?' +
'((?P<keyHashAlgorithmID>[A-Z0-9]+):(?P<keyHash>[A-F0-9]+).(?P<salt>[A-F0-9]+))?\r\n',
re.IGNORECASE
)
GNTP_INFO_LINE_SHORT = re.compile(
'GNTP/(?P<version>\d+\.\d+) (?P<messagetype>REGISTER|NOTIFY|SUBSCRIBE|\-OK|\-ERROR)',
re.IGNORECASE
)
GNTP_HEADER = re.compile('([\w-]+):(.+)')
GNTP_EOL = '\r\n'
class BaseError(Exception):
def gntp_error(self):
error = GNTPError(self.errorcode, self.errordesc)
return error.encode()
class ParseError(BaseError):
errorcode = 500
errordesc = 'Error parsing the message'
class AuthError(BaseError):
errorcode = 400
errordesc = 'Error with authorization'
class UnsupportedError(BaseError):
errorcode = 500
errordesc = 'Currently unsupported by gntp.py'
class _GNTPBuffer(StringIO.StringIO):
"""GNTP Buffer class"""
def writefmt(self, message="", *args):
"""Shortcut function for writing GNTP Headers"""
self.write((message % args).encode('utf8', 'replace'))
self.write(GNTP_EOL)
class _GNTPBase(object):
"""Base initilization
:param string messagetype: GNTP Message type
:param string version: GNTP Protocol version
:param string encription: Encryption protocol
"""
def __init__(self, messagetype=None, version='1.0', encryption=None):
self.info = {
'version': version,
'messagetype': messagetype,
'encryptionAlgorithmID': encryption
}
self.headers = {}
self.resources = {}
def __str__(self):
return self.encode()
def _parse_info(self, data):
"""Parse the first line of a GNTP message to get security and other info values
:param string data: GNTP Message
:return dict: Parsed GNTP Info line
"""
match = GNTP_INFO_LINE.match(data)
if not match:
raise ParseError('ERROR_PARSING_INFO_LINE')
info = match.groupdict()
if info['encryptionAlgorithmID'] == 'NONE':
info['encryptionAlgorithmID'] = None
return info
def set_password(self, password, encryptAlgo='MD5'):
"""Set a password for a GNTP Message
:param string password: Null to clear password
:param string encryptAlgo: Supports MD5, SHA1, SHA256, SHA512
"""
hash = {
'MD5': hashlib.md5,
'SHA1': hashlib.sha1,
'SHA256': hashlib.sha256,
'SHA512': hashlib.sha512,
}
self.password = password
self.encryptAlgo = encryptAlgo.upper()
if not password:
self.info['encryptionAlgorithmID'] = None
self.info['keyHashAlgorithm'] = None
return
if not self.encryptAlgo in hash.keys():
raise UnsupportedError('INVALID HASH "%s"' % self.encryptAlgo)
hashfunction = hash.get(self.encryptAlgo)
password = password.encode('utf8')
seed = time.ctime()
salt = hashfunction(seed).hexdigest()
saltHash = hashfunction(seed).digest()
keyBasis = password + saltHash
key = hashfunction(keyBasis).digest()
keyHash = hashfunction(key).hexdigest()
self.info['keyHashAlgorithmID'] = self.encryptAlgo
self.info['keyHash'] = keyHash.upper()
self.info['salt'] = salt.upper()
def _decode_hex(self, value):
"""Helper function to decode hex string to `proper` hex string
:param string value: Human readable hex string
:return string: Hex string
"""
result = ''
for i in range(0, len(value), 2):
tmp = int(value[i:i + 2], 16)
result += chr(tmp)
return result
def _decode_binary(self, rawIdentifier, identifier):
rawIdentifier += '\r\n\r\n'
dataLength = int(identifier['Length'])
pointerStart = self.raw.find(rawIdentifier) + len(rawIdentifier)
pointerEnd = pointerStart + dataLength
data = self.raw[pointerStart:pointerEnd]
if not len(data) == dataLength:
raise ParseError('INVALID_DATA_LENGTH Expected: %s Recieved %s' % (dataLength, len(data)))
return data
def _validate_password(self, password):
"""Validate GNTP Message against stored password"""
self.password = password
if password == None:
raise AuthError('Missing password')
keyHash = self.info.get('keyHash', None)
if keyHash is None and self.password is None:
return True
if keyHash is None:
raise AuthError('Invalid keyHash')
if self.password is None:
raise AuthError('Missing password')
password = self.password.encode('utf8')
saltHash = self._decode_hex(self.info['salt'])
keyBasis = password + saltHash
key = hashlib.md5(keyBasis).digest()
keyHash = hashlib.md5(key).hexdigest()
if not keyHash.upper() == self.info['keyHash'].upper():
raise AuthError('Invalid Hash')
return True
def validate(self):
"""Verify required headers"""
for header in self._requiredHeaders:
if not self.headers.get(header, False):
raise ParseError('Missing Notification Header: ' + header)
def _format_info(self):
"""Generate info line for GNTP Message
:return string:
"""
info = u'GNTP/%s %s' % (
self.info.get('version'),
self.info.get('messagetype'),
)
if self.info.get('encryptionAlgorithmID', None):
info += ' %s:%s' % (
self.info.get('encryptionAlgorithmID'),
self.info.get('ivValue'),
)
else:
info += ' NONE'
if self.info.get('keyHashAlgorithmID', None):
info += ' %s:%s.%s' % (
self.info.get('keyHashAlgorithmID'),
self.info.get('keyHash'),
self.info.get('salt')
)
return info
def _parse_dict(self, data):
"""Helper function to parse blocks of GNTP headers into a dictionary
:param string data:
:return dict:
"""
dict = {}
for line in data.split('\r\n'):
match = GNTP_HEADER.match(line)
if not match:
continue
key = unicode(match.group(1).strip(), 'utf8', 'replace')
val = unicode(match.group(2).strip(), 'utf8', 'replace')
dict[key] = val
return dict
def add_header(self, key, value):
if isinstance(value, unicode):
self.headers[key] = value
else:
self.headers[key] = unicode('%s' % value, 'utf8', 'replace')
def add_resource(self, data):
"""Add binary resource
:param string data: Binary Data
"""
identifier = hashlib.md5(data).hexdigest()
self.resources[identifier] = data
return 'x-growl-resource://%s' % identifier
def decode(self, data, password=None):
"""Decode GNTP Message
:param string data:
"""
self.password = password
self.raw = data
parts = self.raw.split('\r\n\r\n')
self.info = self._parse_info(data)
self.headers = self._parse_dict(parts[0])
def encode(self):
"""Encode a generic GNTP Message
:return string: GNTP Message ready to be sent
"""
buffer = _GNTPBuffer()
buffer.writefmt(self._format_info())
#Headers
for k, v in self.headers.iteritems():
buffer.writefmt('%s: %s', k, v)
buffer.writefmt()
#Resources
for resource, data in self.resources.iteritems():
buffer.writefmt('Identifier: %s', resource)
buffer.writefmt('Length: %d', len(data))
buffer.writefmt()
buffer.write(data)
buffer.writefmt()
buffer.writefmt()
return buffer.getvalue()
class GNTPRegister(_GNTPBase):
"""Represents a GNTP Registration Command
:param string data: (Optional) See decode()
:param string password: (Optional) Password to use while encoding/decoding messages
"""
_requiredHeaders = [
'Application-Name',
'Notifications-Count'
]
_requiredNotificationHeaders = ['Notification-Name']
def __init__(self, data=None, password=None):
_GNTPBase.__init__(self, 'REGISTER')
self.notifications = []
if data:
self.decode(data, password)
else:
self.set_password(password)
self.add_header('Application-Name', 'pygntp')
self.add_header('Notifications-Count', 0)
def validate(self):
'''Validate required headers and validate notification headers'''
for header in self._requiredHeaders:
if not self.headers.get(header, False):
raise ParseError('Missing Registration Header: ' + header)
for notice in self.notifications:
for header in self._requiredNotificationHeaders:
if not notice.get(header, False):
raise ParseError('Missing Notification Header: ' + header)
def decode(self, data, password):
"""Decode existing GNTP Registration message
:param string data: Message to decode
"""
self.raw = data
parts = self.raw.split('\r\n\r\n')
self.info = self._parse_info(data)
self._validate_password(password)
self.headers = self._parse_dict(parts[0])
for i, part in enumerate(parts):
if i == 0:
continue # Skip Header
if part.strip() == '':
continue
notice = self._parse_dict(part)
if notice.get('Notification-Name', False):
self.notifications.append(notice)
elif notice.get('Identifier', False):
notice['Data'] = self._decode_binary(part, notice)
#open('register.png','wblol').write(notice['Data'])
self.resources[notice.get('Identifier')] = notice
def add_notification(self, name, enabled=True):
"""Add new Notification to Registration message
:param string name: Notification Name
:param boolean enabled: Enable this notification by default
"""
notice = {}
notice['Notification-Name'] = u'%s' % name
notice['Notification-Enabled'] = u'%s' % enabled
self.notifications.append(notice)
self.add_header('Notifications-Count', len(self.notifications))
def encode(self):
"""Encode a GNTP Registration Message
:return string: Encoded GNTP Registration message
"""
buffer = _GNTPBuffer()
buffer.writefmt(self._format_info())
#Headers
for k, v in self.headers.iteritems():
buffer.writefmt('%s: %s', k, v)
buffer.writefmt()
#Notifications
if len(self.notifications) > 0:
for notice in self.notifications:
for k, v in notice.iteritems():
buffer.writefmt('%s: %s', k, v)
buffer.writefmt()
#Resources
for resource, data in self.resources.iteritems():
buffer.writefmt('Identifier: %s', resource)
buffer.writefmt('Length: %d', len(data))
buffer.writefmt()
buffer.write(data)
buffer.writefmt()
buffer.writefmt()
return buffer.getvalue()
class GNTPNotice(_GNTPBase):
"""Represents a GNTP Notification Command
:param string data: (Optional) See decode()
:param string app: (Optional) Set Application-Name
:param string name: (Optional) Set Notification-Name
:param string title: (Optional) Set Notification Title
:param string password: (Optional) Password to use while encoding/decoding messages
"""
_requiredHeaders = [
'Application-Name',
'Notification-Name',
'Notification-Title'
]
def __init__(self, data=None, app=None, name=None, title=None, password=None):
_GNTPBase.__init__(self, 'NOTIFY')
if data:
self.decode(data, password)
else:
self.set_password(password)
if app:
self.add_header('Application-Name', app)
if name:
self.add_header('Notification-Name', name)
if title:
self.add_header('Notification-Title', title)
def decode(self, data, password):
"""Decode existing GNTP Notification message
:param string data: Message to decode.
"""
self.raw = data
parts = self.raw.split('\r\n\r\n')
self.info = self._parse_info(data)
self._validate_password(password)
self.headers = self._parse_dict(parts[0])
for i, part in enumerate(parts):
if i == 0:
continue # Skip Header
if part.strip() == '':
continue
notice = self._parse_dict(part)
if notice.get('Identifier', False):
notice['Data'] = self._decode_binary(part, notice)
#open('notice.png','wblol').write(notice['Data'])
self.resources[notice.get('Identifier')] = notice
class GNTPSubscribe(_GNTPBase):
"""Represents a GNTP Subscribe Command
:param string data: (Optional) See decode()
:param string password: (Optional) Password to use while encoding/decoding messages
"""
_requiredHeaders = [
'Subscriber-ID',
'Subscriber-Name',
]
def __init__(self, data=None, password=None):
_GNTPBase.__init__(self, 'SUBSCRIBE')
if data:
self.decode(data, password)
else:
self.set_password(password)
class GNTPOK(_GNTPBase):
"""Represents a GNTP OK Response
:param string data: (Optional) See _GNTPResponse.decode()
:param string action: (Optional) Set type of action the OK Response is for
"""
_requiredHeaders = ['Response-Action']
def __init__(self, data=None, action=None):
_GNTPBase.__init__(self, '-OK')
if data:
self.decode(data)
if action:
self.add_header('Response-Action', action)
class GNTPError(_GNTPBase):
"""Represents a GNTP Error response
:param string data: (Optional) See _GNTPResponse.decode()
:param string errorcode: (Optional) Error code
:param string errordesc: (Optional) Error Description
"""
_requiredHeaders = ['Error-Code', 'Error-Description']
def __init__(self, data=None, errorcode=None, errordesc=None):
_GNTPBase.__init__(self, '-ERROR')
if data:
self.decode(data)
if errorcode:
self.add_header('Error-Code', errorcode)
self.add_header('Error-Description', errordesc)
def error(self):
return (self.headers.get('Error-Code', None),
self.headers.get('Error-Description', None))
def parse_gntp(data, password=None):
"""Attempt to parse a message as a GNTP message
:param string data: Message to be parsed
:param string password: Optional password to be used to verify the message
"""
match = GNTP_INFO_LINE_SHORT.match(data)
if not match:
raise ParseError('INVALID_GNTP_INFO')
info = match.groupdict()
if info['messagetype'] == 'REGISTER':
return GNTPRegister(data, password=password)
elif info['messagetype'] == 'NOTIFY':
return GNTPNotice(data, password=password)
elif info['messagetype'] == 'SUBSCRIBE':
return GNTPSubscribe(data, password=password)
elif info['messagetype'] == '-OK':
return GNTPOK(data)
elif info['messagetype'] == '-ERROR':
return GNTPError(data)
raise ParseError('INVALID_GNTP_MESSAGE')
| 2,062 | 212 | 354 |
838d7c175e3f9e7de18126370cf750f1cc4ae14d | 124 | py | Python | wsm/__init__.py | Rayologist/windows-sshd-manager | 4f78a0cdaa12fe3c2a785aca31066c3be886878b | [
"Apache-2.0"
] | 9 | 2022-02-09T09:09:43.000Z | 2022-02-09T09:10:06.000Z | wsm/__init__.py | Rayologist/windows-sshd-manager | 4f78a0cdaa12fe3c2a785aca31066c3be886878b | [
"Apache-2.0"
] | null | null | null | wsm/__init__.py | Rayologist/windows-sshd-manager | 4f78a0cdaa12fe3c2a785aca31066c3be886878b | [
"Apache-2.0"
] | null | null | null | from .backend import *
from .commands import *
check_config()
if not DB_PATH.is_file():
init_db()
init_firewall()
| 13.777778 | 25 | 0.693548 | from .backend import *
from .commands import *
check_config()
if not DB_PATH.is_file():
init_db()
init_firewall()
| 0 | 0 | 0 |
7c28df5688401e05150d6b2da6280bdaa82a847b | 3,088 | py | Python | drive/snippets/drive-v3/team_drive_snippets/recover_team_drives.py | himanshupr2627/python-samples | 4a04e3aee1068dc1f1402e9e9c90044ff101a6c8 | [
"Apache-2.0"
] | 479 | 2018-03-16T16:45:11.000Z | 2020-10-13T11:32:02.000Z | drive/snippets/drive-v3/team_drive_snippets/recover_team_drives.py | himanshupr2627/python-samples | 4a04e3aee1068dc1f1402e9e9c90044ff101a6c8 | [
"Apache-2.0"
] | 159 | 2018-03-28T20:03:56.000Z | 2020-10-13T06:00:08.000Z | drive/snippets/drive-v3/team_drive_snippets/recover_team_drives.py | himanshupr2627/python-samples | 4a04e3aee1068dc1f1402e9e9c90044ff101a6c8 | [
"Apache-2.0"
] | 493 | 2018-03-21T01:07:21.000Z | 2020-10-14T10:31:00.000Z | """Copyright 2022 Google LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# [START drive_recover_team_drives]
from __future__ import print_function
import google.auth
from googleapiclient.discovery import build
from googleapiclient.errors import HttpError
def recover_team_drives(real_user):
"""Finds all Team Drives without an organizer and add one
Args:
real_user:User ID for the new organizer.
Returns:
team drives_object.
Load pre-authorized user credentials from the environment.
TODO(developer) - See https://developers.google.com/identity
for guides on implementing OAuth2 for the application.
"""
creds, _ = google.auth.default()
try:
# call drive api client
service = build('drive', 'v3', credentials=creds)
# pylint: disable=maybe-no-member
team_drives = []
page_token = None
new_organizer_permission = {'type': 'user',
'role': 'organizer',
'value': 'user@example.com'}
new_organizer_permission['emailAddress'] = real_user
while True:
response = service.teamdrives().list(q='organizerCount = 0',
fields='nextPageToken, '
'teamDrives(id, '
'name)',
useDomainAdminAccess=True,
pageToken=page_token
).execute()
for team_drive in response.get('teamDrives', []):
print('Found Team Drive without organizer: {team_drive.get('
'"title")},{team_drive.get("id")}')
permission = service.permissions().create(
fileId=team_drive.get('id'),
body=new_organizer_permission, useDomainAdminAccess=True,
supportsTeamDrives=True, fields='id').execute()
print(F'Added organizer permission:{permission.get("id")}')
team_drives.extend(response.get('teamDrives', []))
page_token = response.get('nextPageToken', None)
if page_token is None:
break
except HttpError as error:
print(F'An error occurred: {error}')
team_drives = None
print(team_drives)
if __name__ == '__main__':
recover_team_drives(real_user='gduser1@workspacesamples.dev')
# [END drive_recover_team_drives]
| 36.329412 | 77 | 0.591645 | """Copyright 2022 Google LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# [START drive_recover_team_drives]
from __future__ import print_function
import google.auth
from googleapiclient.discovery import build
from googleapiclient.errors import HttpError
def recover_team_drives(real_user):
"""Finds all Team Drives without an organizer and add one
Args:
real_user:User ID for the new organizer.
Returns:
team drives_object.
Load pre-authorized user credentials from the environment.
TODO(developer) - See https://developers.google.com/identity
for guides on implementing OAuth2 for the application.
"""
creds, _ = google.auth.default()
try:
# call drive api client
service = build('drive', 'v3', credentials=creds)
# pylint: disable=maybe-no-member
team_drives = []
page_token = None
new_organizer_permission = {'type': 'user',
'role': 'organizer',
'value': 'user@example.com'}
new_organizer_permission['emailAddress'] = real_user
while True:
response = service.teamdrives().list(q='organizerCount = 0',
fields='nextPageToken, '
'teamDrives(id, '
'name)',
useDomainAdminAccess=True,
pageToken=page_token
).execute()
for team_drive in response.get('teamDrives', []):
print('Found Team Drive without organizer: {team_drive.get('
'"title")},{team_drive.get("id")}')
permission = service.permissions().create(
fileId=team_drive.get('id'),
body=new_organizer_permission, useDomainAdminAccess=True,
supportsTeamDrives=True, fields='id').execute()
print(F'Added organizer permission:{permission.get("id")}')
team_drives.extend(response.get('teamDrives', []))
page_token = response.get('nextPageToken', None)
if page_token is None:
break
except HttpError as error:
print(F'An error occurred: {error}')
team_drives = None
print(team_drives)
if __name__ == '__main__':
recover_team_drives(real_user='gduser1@workspacesamples.dev')
# [END drive_recover_team_drives]
| 0 | 0 | 0 |
5686d915b3397f6683c4fad42732286540a3a962 | 3,136 | py | Python | src/data_preparation/prepare_loc.py | tobiasraabe/locus-of-control | fe249bf8c85d163527d82e0c018e86bd37eff345 | [
"BSD-3-Clause"
] | null | null | null | src/data_preparation/prepare_loc.py | tobiasraabe/locus-of-control | fe249bf8c85d163527d82e0c018e86bd37eff345 | [
"BSD-3-Clause"
] | 56 | 2019-01-27T14:39:33.000Z | 2020-06-22T20:42:32.000Z | src/data_preparation/prepare_loc.py | tobiasraabe/locus-of-control | fe249bf8c85d163527d82e0c018e86bd37eff345 | [
"BSD-3-Clause"
] | 1 | 2018-03-01T09:19:00.000Z | 2018-03-01T09:19:00.000Z | import json
import numpy as np
import pandas as pd
from sklearn.externals import joblib
from bld.project_paths import project_paths_join as ppj
# This list is ordered according to the item table in our paper.
PERCEIVED_CONTROL = [
"LOC_LIFES_COURSE",
"LOC_ACHIEVED_DESERVE",
"LOC_LUCK",
"LOC_OTHERS",
"LOC_DOUBT",
"LOC_POSSIBILITIES",
"LOC_LITTLE_CONTROL",
]
LOC_VALUES = {
"[1] Trifft ueberhaupt nicht zu": 1,
"[2] [2/10]": 2,
"[3] [3/10]": 3,
"[4] [4/10]": 4,
"[5] [5/10]": 5,
"[6] [6/10]": 6,
"[7] Trifft voll zu": 7,
}
def invert_items(df):
"""This function inverts the scale of some items of LoC so that for all
items higher numbers reflect greater feelings of control."""
inverted_items = [
"LOC_ACHIEVED_DESERVE",
"LOC_LUCK",
"LOC_OTHERS",
"LOC_DOUBT",
"LOC_POSSIBILITIES",
"LOC_ABILITIES",
"LOC_LITTLE_CONTROL",
]
for item in inverted_items:
df[item].replace(
to_replace=[1, 2, 3, 4, 5, 6, 7], value=[7, 6, 5, 4, 3, 2, 1], inplace=True
)
return df
def create_index(df):
"""This function creates and index which is the average over all LoC
items."""
df["LOC_INDEX"] = df[PERCEIVED_CONTROL].mean(axis="columns")
return df
if __name__ == "__main__":
# Load dataset
df = pd.read_pickle(ppj("OUT_DATA", "loc_raw.pkl"))
# Clean the data
df = clean_variables(df)
# Invert items so that higher numbers indicate greater feelings of control
df = invert_items(df)
# Calculate Cronbach's alpha for the whole scale
data = df[[i for i in df if "LOC" in i]].values.T
cronbachs_alpha_ten = calculate_cronbachs_alpha(data)
# Restrict to seven item scale proposed by Specht et al (2013)
df = df[["ID", "YEAR"] + PERCEIVED_CONTROL]
# Create an index as the average of LoC items
df = create_index(df)
# Calculate Cronbach's Alpha for seven item scale. First, reshape the data
# to n (items) * p (observations)
data = df[PERCEIVED_CONTROL].values.T
cronbachs_alpha_seven = calculate_cronbachs_alpha(data)
# Create container
container = {}
container["data"] = df
# Save numbers to json
with open(ppj("OUT_TABLES", "cronbachs_alphas.json"), "w") as file:
file.write(
json.dumps(
{"ca_seven": cronbachs_alpha_seven, "ca_ten": cronbachs_alpha_ten}
)
)
# Save data for PCA and FA
joblib.dump(container, ppj("OUT_DATA", "loc_container.pkl"))
| 29.037037 | 87 | 0.642219 | import json
import numpy as np
import pandas as pd
from sklearn.externals import joblib
from bld.project_paths import project_paths_join as ppj
# This list is ordered according to the item table in our paper.
PERCEIVED_CONTROL = [
"LOC_LIFES_COURSE",
"LOC_ACHIEVED_DESERVE",
"LOC_LUCK",
"LOC_OTHERS",
"LOC_DOUBT",
"LOC_POSSIBILITIES",
"LOC_LITTLE_CONTROL",
]
LOC_VALUES = {
"[1] Trifft ueberhaupt nicht zu": 1,
"[2] [2/10]": 2,
"[3] [3/10]": 3,
"[4] [4/10]": 4,
"[5] [5/10]": 5,
"[6] [6/10]": 6,
"[7] Trifft voll zu": 7,
}
def calculate_cronbachs_alpha(itemscores):
itemscores = np.asarray(itemscores)
itemvars = itemscores.var(axis=1, ddof=1)
tscores = itemscores.sum(axis=0)
nitems = len(itemscores)
return nitems / (nitems - 1.0) * (1 - itemvars.sum() / tscores.var(ddof=1))
def clean_variables(df):
# Replace values and cast to integers
for variable in df.select_dtypes("category"):
df[variable].cat.rename_categories(LOC_VALUES, inplace=True)
df[variable] = pd.to_numeric(df[variable], errors="raise", downcast="integer")
return df
def invert_items(df):
"""This function inverts the scale of some items of LoC so that for all
items higher numbers reflect greater feelings of control."""
inverted_items = [
"LOC_ACHIEVED_DESERVE",
"LOC_LUCK",
"LOC_OTHERS",
"LOC_DOUBT",
"LOC_POSSIBILITIES",
"LOC_ABILITIES",
"LOC_LITTLE_CONTROL",
]
for item in inverted_items:
df[item].replace(
to_replace=[1, 2, 3, 4, 5, 6, 7], value=[7, 6, 5, 4, 3, 2, 1], inplace=True
)
return df
def create_index(df):
"""This function creates and index which is the average over all LoC
items."""
df["LOC_INDEX"] = df[PERCEIVED_CONTROL].mean(axis="columns")
return df
if __name__ == "__main__":
# Load dataset
df = pd.read_pickle(ppj("OUT_DATA", "loc_raw.pkl"))
# Clean the data
df = clean_variables(df)
# Invert items so that higher numbers indicate greater feelings of control
df = invert_items(df)
# Calculate Cronbach's alpha for the whole scale
data = df[[i for i in df if "LOC" in i]].values.T
cronbachs_alpha_ten = calculate_cronbachs_alpha(data)
# Restrict to seven item scale proposed by Specht et al (2013)
df = df[["ID", "YEAR"] + PERCEIVED_CONTROL]
# Create an index as the average of LoC items
df = create_index(df)
# Calculate Cronbach's Alpha for seven item scale. First, reshape the data
# to n (items) * p (observations)
data = df[PERCEIVED_CONTROL].values.T
cronbachs_alpha_seven = calculate_cronbachs_alpha(data)
# Create container
container = {}
container["data"] = df
# Save numbers to json
with open(ppj("OUT_TABLES", "cronbachs_alphas.json"), "w") as file:
file.write(
json.dumps(
{"ca_seven": cronbachs_alpha_seven, "ca_ten": cronbachs_alpha_ten}
)
)
# Save data for PCA and FA
joblib.dump(container, ppj("OUT_DATA", "loc_container.pkl"))
| 519 | 0 | 46 |
e99f8216a81c1cdba2df8dcfdf8089339385f03b | 5,265 | py | Python | misctools/archit.py | H4CKY54CK/misctools | e6f1f944046f07b808d19bb4e4c8fae6264eb428 | [
"MIT"
] | 3 | 2020-08-23T21:18:09.000Z | 2021-12-08T15:48:38.000Z | misctools/archit.py | H4CKY54CK/misctools | e6f1f944046f07b808d19bb4e4c8fae6264eb428 | [
"MIT"
] | 2 | 2020-04-14T09:18:54.000Z | 2020-07-13T06:09:22.000Z | misctools/archit.py | H4CKY54CK/misctools | e6f1f944046f07b808d19bb4e4c8fae6264eb428 | [
"MIT"
] | null | null | null | import os
import sys
import time
import shutil
import argparse
import traceback
import subprocess
from . import __version__
| 47.863636 | 225 | 0.605128 | import os
import sys
import time
import shutil
import argparse
import traceback
import subprocess
from . import __version__
def unarchit(args=None):
source = os.path.abspath(args.source)
base, suffix = os.path.splitext(source)
if suffix.lower() in ['.gz', '.bz2', '.xz']:
base, second = os.path.splitext(base)
suffix = second + '.' + suffix.strip('.2')
output = str(source).split(suffix)[0] if not args.output else args.output
shutil.unpack_archive(source, output)
if not args.quiet:
print('Finished extracting.')
if args.delete:
os.remove(source)
def umain(argv=None):
argv = (argv or sys.argv)[1:]
parser = argparse.ArgumentParser()
parser.add_argument('source', help='zip file to extract')
parser.add_argument('output', default=None, nargs='?', help='output destination directory')
parser.add_argument('-D', '--delete', dest='delete', action='store_true', help="delete zip file after extraction")
parser.add_argument('-q', '--quiet', action='store_true')
parser.set_defaults(func=unarchit)
args = parser.parse_args(argv)
args.func(args)
def archit(args, options=None):
args.output = args.output or args.source
base_out = os.path.basename(args.output)
errors = False
if not args.quiet:
print(f"\n\033[38;2;30;144;255m \033[4mMisctools v{__version__}\033[24m\033[38;2;123;104;238m\n")
try:
for item in args.format:
try:
ext = f"tar.{item.split('tar')[0]}" if item != 'tar' and 'tar' in item else item
ext = 'tar.bz2' if item == 'bztar' else ext
if not args.quiet:
print(f" Creating archive {base_out}.{ext}...")
shutil.make_archive(args.output, item, args.source)
if not args.quiet:
print(f" Archive `{base_out}.{ext}` created...\n")
except Exception as e:
with open('archiving_errors.log', 'a') as f:
f.write(traceback.format_exc()+'\n')
errors = True
finally:
if args.install:
ext = f"tar.{args.sys.split('tar')[0]}" if args.sys != 'tar' and 'tar' in args.sys else args.sys
if not args.quiet:
print(f" \033[38;2;255;165;0mPreparing to install `{base_out}.{ext}` via pip...\033[0m\n")
pkg = f"{args.output}.{ext}"
subprocess.check_call([sys.executable, '-m', 'pip', 'install', pkg, *options])
# os.system(f"{sys.executable} -m pip install {' '.join(options)} {args.output}.{ext}")
if not args.quiet:
print(f"\n \033[38;2;255;165;0mFinished installing `{base_out}.{ext}`...\033[0m\n")
if not args.quiet:
print(f" \033[38;2;255;165;0mCleaning up...\033[0m\n")
outpath = os.path.abspath(f'{args.output}.{ext}')
os.remove(outpath)
if errors:
if not args.quiet:
print(" \033[38;2;50;252;50mFinished with errors. See `archiving_errors.log` for a more detailed report.\033[0m")
else:
if not args.quiet:
print(" \033[38;2;50;252;50mFinished.\033[0m")
def main(argv=None):
argv = (argv or sys.argv)[1:]
dformats = {"zip": "zip", "next": "xztar", "tar": "tar", "bz": "bztar", "bztar": "bztar", "bz2": "bztar", "gz": "gztar", "gztar": "gztar", "xz": "xztar", "xztar": "xztar", "all": ["bztar", "gztar", "zip", "tar", "xztar"]}
parser = argparse.ArgumentParser()
parser.add_argument('source', type=str, help='package name in current directory you wish to compress')
parser.add_argument('output', type=str, nargs='?', help='optionally specify an output name for the compressed file')
parser.add_argument('-f', '--format', dest='format', type=str, choices=dformats, nargs='+', help="archive compression format(s) desired")
# parser.add_argument('-z', '--zip', dest='zip', action='store_true', help='choose this archive format')
# parser.add_argument('-t', '--tar', dest='tar', action='store_true', help='choose this archive format')
# parser.add_argument('-g', '--gztar', dest='gztar', action='store_true', help='choose this archive format')
# parser.add_argument('-b', '--bztar', dest='bztar', action='store_true', help='choose this archive format')
# parser.add_argument('-x', '--xztar', dest='xztar', action='store_true', help='choose this archive format')
parser.add_argument('-i', '--install', dest='install', action='store_true', help='after creating the archive, install it via pip')
parser.add_argument('-d', '--debug', dest='debug', action='store_true')
parser.add_argument('-q', '--quiet', dest='quiet', action='store_true', help='keep quiet')
parser.set_defaults(func=archit)
args, options = parser.parse_known_args(argv)
args.sys = None
if sys.platform == 'win32':
args.sys = 'zip'
else:
args.sys = 'gztar'
if not args.format:
args.format = [args.sys]
elif 'all' in args.format:
args.format = dformats['all']
else:
args.format = [dformats[item] for item in args.format]
args.format.append(args.sys)
args.func(args, options)
| 5,049 | 0 | 91 |
62566eb6cd30c2061bec5af80d2812e59322c27e | 860 | py | Python | open_bus_pipelines/operators/api_bash_operator.py | OriHoch/open-bus-pipelines | ad8a5bcb2a84ebbae373c6ecc94fbcd666b81cde | [
"MIT"
] | 2 | 2021-11-14T00:06:18.000Z | 2022-02-15T13:50:37.000Z | open_bus_pipelines/operators/api_bash_operator.py | OriHoch/open-bus-pipelines | ad8a5bcb2a84ebbae373c6ecc94fbcd666b81cde | [
"MIT"
] | null | null | null | open_bus_pipelines/operators/api_bash_operator.py | OriHoch/open-bus-pipelines | ad8a5bcb2a84ebbae373c6ecc94fbcd666b81cde | [
"MIT"
] | 1 | 2021-11-23T07:53:22.000Z | 2021-11-23T07:53:22.000Z | import os
import json
import shlex
from .cli_bash_operator import CliBashOperator
from ..config import OPEN_BUS_PIPELINES_ROOTDIR
| 34.4 | 131 | 0.70814 | import os
import json
import shlex
from .cli_bash_operator import CliBashOperator
from ..config import OPEN_BUS_PIPELINES_ROOTDIR
class ApiBashOperator(CliBashOperator):
def __init__(self, config, **kwargs):
super(ApiBashOperator, self).__init__(self._get_cli_bash_operator_cmd(config), **kwargs)
def _get_cli_bash_operator_cmd(self, config):
return ' '.join([
'python', '-u',
os.path.join(OPEN_BUS_PIPELINES_ROOTDIR, 'open_bus_pipelines', 'operators', '_api_bash_operator_script.py'),
shlex.quote(json.dumps(config)),
'__airflow_dag_run_conf__'
])
def execute(self, context):
self.bash_command = self.bash_command.replace('__airflow_dag_run_conf__', shlex.quote(json.dumps(context['dag_run'].conf)))
return super(ApiBashOperator, self).execute(context)
| 606 | 18 | 104 |
75723ed8882576506d24cd1e306148f4fe9a583e | 341 | py | Python | main.py | kadin008/ArticleSpider | 51c9c79d7aec2d23333cd96b5e0e83c3ee677374 | [
"Apache-2.0"
] | null | null | null | main.py | kadin008/ArticleSpider | 51c9c79d7aec2d23333cd96b5e0e83c3ee677374 | [
"Apache-2.0"
] | null | null | null | main.py | kadin008/ArticleSpider | 51c9c79d7aec2d23333cd96b5e0e83c3ee677374 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# _*_ coding: utf-8 _*_
__author__: 'Patrick Wang'
__date__: '2019/2/28 14:52'
from scrapy.cmdline import execute
import sys
import os
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
# execute(["scrapy", "crawl", "jobbole"])
execute(["scrapy", "crawl", "zhihu"])
# execute(["scrapy", "crawl", "lagou"])
| 21.3125 | 59 | 0.686217 | #!/usr/bin/env python
# _*_ coding: utf-8 _*_
__author__: 'Patrick Wang'
__date__: '2019/2/28 14:52'
from scrapy.cmdline import execute
import sys
import os
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
# execute(["scrapy", "crawl", "jobbole"])
execute(["scrapy", "crawl", "zhihu"])
# execute(["scrapy", "crawl", "lagou"])
| 0 | 0 | 0 |
7cd2c0657c5d91ac0480863e7350e58fe1fcdc05 | 4,517 | py | Python | prestodb/exceptions.py | highker/presto-python-client | 4f5be9c81950cbddd4579a9e55dfce6db54fb8b2 | [
"Apache-2.0"
] | null | null | null | prestodb/exceptions.py | highker/presto-python-client | 4f5be9c81950cbddd4579a9e55dfce6db54fb8b2 | [
"Apache-2.0"
] | null | null | null | prestodb/exceptions.py | highker/presto-python-client | 4f5be9c81950cbddd4579a9e55dfce6db54fb8b2 | [
"Apache-2.0"
] | null | null | null | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This module defines exceptions for Presto operations. It follows the structure
defined in pep-0249.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import random
import time
import prestodb.logging
logger = prestodb.logging.get_logger(__name__)
| 25.811429 | 81 | 0.615231 | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This module defines exceptions for Presto operations. It follows the structure
defined in pep-0249.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import random
import time
import prestodb.logging
logger = prestodb.logging.get_logger(__name__)
class HttpError(Exception):
pass
class Http503Error(HttpError):
pass
class PrestoError(Exception):
pass
class DatabaseError(Exception):
pass
class PrestoQueryError(Exception):
def __init__(self, error, query_id=None):
self._error = error
self._query_id = query_id
@property
def error_code(self):
return self._error.get('errorCode', None)
@property
def error_name(self):
return self._error.get('errorName', None)
@property
def error_type(self):
return self._error.get('errorType', None)
@property
def error_exception(self):
return self.failure_info.get('type', None) if self.failure_info else None
@property
def failure_info(self):
return self._error.get('failureInfo', None)
@property
def message(self):
return self._error.get(
'message',
'Presto did no return an error message',
)
@property
def error_location(self):
location = self._error['errorLocation']
return (location['lineNumber'], location['columnNumber'])
@property
def query_id(self):
return self._query_id
def __repr__(self):
return '{}(type={}, name={}, message="{}", query_id={})'.format(
self.__class__.__name__,
self.error_type,
self.error_name,
self.message,
self.query_id,
)
def __str__(self):
return repr(self)
class PrestoExternalError(PrestoQueryError):
pass
class PrestoInternalError(PrestoQueryError):
pass
class PrestoUserError(PrestoQueryError):
pass
def retry_with(handle_retry, exceptions, conditions, max_attempts):
def wrapper(func):
@functools.wraps(func)
def decorated(*args, **kwargs):
error = None
result = None
for attempt in range(1, max_attempts + 1):
try:
result = func(*args, **kwargs)
if any(guard(result) for guard in conditions):
handle_retry.retry(func, args, kwargs, None, attempt)
continue
return result
except Exception as err:
error = err
if any(isinstance(err, exc) for exc in exceptions):
handle_retry.retry(func, args, kwargs, err, attempt)
continue
break
logger.info('failed after {} attempts'.format(attempt))
if error is not None:
raise error
return result
return decorated
return wrapper
class DelayExponential(object):
def __init__(
self,
base=0.1, # 100ms
exponent=2,
jitter=True,
max_delay=2 * 3600, # 2 hours
):
self._base = base
self._exponent = exponent
self._jitter = jitter
self._max_delay = max_delay
def __call__(self, attempt):
delay = float(self._base) * (self._exponent ** attempt)
if self._jitter:
delay *= random.random()
delay = min(float(self._max_delay), delay)
return delay
class RetryWithExponentialBackoff(object):
def __init__(
self,
base=0.1, # 100ms
exponent=2,
jitter=True,
max_delay=2 * 3600 # 2 hours
):
self._get_delay = DelayExponential(
base, exponent, jitter, max_delay)
def retry(self, func, args, kwargs, err, attempt):
delay = self._get_delay(attempt)
time.sleep(delay)
| 2,654 | 613 | 359 |
b186eb72fe48e7225c8c7512994e3ef73f7156df | 2,699 | py | Python | prody/tests/apps/test_prody_pca.py | grandevelia/ProDy | 7c725640a94c16543423c0756388998cb86a97ae | [
"MIT"
] | 210 | 2015-01-26T08:17:56.000Z | 2022-03-30T01:40:34.000Z | prody/tests/apps/test_prody_pca.py | grandevelia/ProDy | 7c725640a94c16543423c0756388998cb86a97ae | [
"MIT"
] | 555 | 2015-01-05T21:51:54.000Z | 2022-03-31T16:51:41.000Z | prody/tests/apps/test_prody_pca.py | grandevelia/ProDy | 7c725640a94c16543423c0756388998cb86a97ae | [
"MIT"
] | 99 | 2015-02-09T18:00:39.000Z | 2022-03-07T12:52:51.000Z | from os import remove
import shlex
from os.path import isfile, join, split, splitext
from prody.tests import TestCase, skipIf, skipUnless
from numpy.testing import *
try:
import numpy.testing.decorators as dec
except ImportError:
from numpy.testing import dec
from prody.tests.datafiles import TEMPDIR, pathDatafile
from prody.apps import prody_parser
from prody.tests import MATPLOTLIB, NOPRODYCMD, WINDOWS
| 31.022989 | 74 | 0.586514 | from os import remove
import shlex
from os.path import isfile, join, split, splitext
from prody.tests import TestCase, skipIf, skipUnless
from numpy.testing import *
try:
import numpy.testing.decorators as dec
except ImportError:
from numpy.testing import dec
from prody.tests.datafiles import TEMPDIR, pathDatafile
from prody.apps import prody_parser
from prody.tests import MATPLOTLIB, NOPRODYCMD, WINDOWS
class TestPCACommand(TestCase):
def setUp(self):
self.command = ('pca --pdb {pdb} '
'-e -r -o {outdir} -v -z -t all -j '
'-f %8g -d , -x .dat '
'-R -Q -J 1,2 '
'-F png -D 120 -W 5 -H 4 ').format(outdir=TEMPDIR,
pdb=pathDatafile('multi_model_truncated'))
self.suffixes = [
'_pca_cc.png',
'_pca.pca.npz',
'_pca_covariance.dat',
'_pca_cross-correlations.dat',
'_pca_proj_1_2.png',
'_pca_evalues.dat',
'_pca_proj.dat',
'_pca_evectors.dat',
'_pca_sf.png',
'_pca_extended_all.nmd',
'_pca.nmd',
]
self.tearDown()
@dec.slow
@skipIf(NOPRODYCMD, 'prody command not found')
@skipUnless(MATPLOTLIB, 'matplotlib not found')
@skipIf(WINDOWS, 'command tests are not run on Windows')
def testPCACommandDCD(self):
dcd = pathDatafile('dcd')
command = self.command + dcd
prefix = splitext(split(dcd)[1])[0]
namespace = prody_parser.parse_args(shlex.split(command))
namespace.func(namespace)
for suffix in self.suffixes:
fn = join(TEMPDIR, prefix + suffix)
self.assertTrue(isfile(fn), msg=fn+' not found')
@dec.slow
@skipIf(NOPRODYCMD, 'prody command not found')
@skipUnless(MATPLOTLIB, 'matplotlib not found')
@skipIf(WINDOWS, 'command tests are not run on Windows')
def testPCACommandPDB(self):
dcd = pathDatafile('multi_model_truncated')
command = self.command + dcd
prefix = splitext(split(dcd)[1])[0]
namespace = prody_parser.parse_args(shlex.split(command))
namespace.func(namespace)
for suffix in self.suffixes:
fn = join(TEMPDIR, prefix + suffix)
self.assertTrue(isfile(fn), msg=fn+' not found')
def tearDown(self):
for dcd in [pathDatafile('multi_model_truncated'),
pathDatafile('dcd')]:
prefix = splitext(split(dcd)[1])[0]
for suffix in self.suffixes:
fn = join(TEMPDIR, prefix + suffix)
if isfile(fn): remove(fn)
| 1,781 | 475 | 23 |
7725ec364d6620e651e05ce578e7d17391e0e6ff | 1,414 | py | Python | talks/events/factories.py | alan-turing-institute/talks.ox | 5e172b7bb7296fcfc2d5c1b5978ec98a6643d90a | [
"Apache-2.0"
] | 5 | 2015-09-03T11:46:07.000Z | 2022-01-12T10:15:50.000Z | talks/events/factories.py | alan-turing-institute/talks.ox | 5e172b7bb7296fcfc2d5c1b5978ec98a6643d90a | [
"Apache-2.0"
] | 306 | 2015-01-05T10:16:56.000Z | 2021-06-10T08:00:31.000Z | talks/events/factories.py | alan-turing-institute/talks.ox | 5e172b7bb7296fcfc2d5c1b5978ec98a6643d90a | [
"Apache-2.0"
] | 5 | 2016-04-21T10:40:20.000Z | 2021-01-05T09:15:23.000Z | from __future__ import absolute_import
from datetime import datetime
import factory
from . import models
from talks.users.models import Collection, CollectionItem, CollectedDepartment
| 23.966102 | 78 | 0.73338 | from __future__ import absolute_import
from datetime import datetime
import factory
from . import models
from talks.users.models import Collection, CollectionItem, CollectedDepartment
class EventGroupFactory(factory.django.DjangoModelFactory):
class Meta:
model = models.EventGroup
class EventCollectionFactory(factory.django.DjangoModelFactory):
class Meta:
model = Collection
class EventFactory(factory.django.DjangoModelFactory):
start = datetime(2015, 10, 23, 12, 18)
end = datetime(2015, 10, 30, 20, 25)
class Meta:
model = models.Event
class PersonFactory(factory.django.DjangoModelFactory):
class Meta:
model = models.Person
class PersonEventFactory(factory.django.DjangoModelFactory):
class Meta:
model = models.PersonEvent
class TopicItemFactory(factory.django.DjangoModelFactory):
item = factory.SubFactory(EventFactory)
@factory.sequence
def uri(n):
return "http://example.com/%s" % n
class Meta:
model = models.TopicItem
class TopicItemFactory_noSubFactory(factory.django.DjangoModelFactory):
class Meta:
model = models.TopicItem
class CollectedDepartmentFactory(factory.django.DjangoModelFactory):
class Meta:
model = CollectedDepartment
class CollectionItemFactory(factory.django.DjangoModelFactory):
class Meta:
model = CollectionItem
| 33 | 974 | 215 |
7873ab8bfeb4ee79e6303fa1dcf5e53421732720 | 126 | py | Python | edm_web1/app/track/apps.py | zhouli121018/nodejsgm | 0ccbc8acf61badc812f684dd39253d55c99f08eb | [
"MIT"
] | null | null | null | edm_web1/app/track/apps.py | zhouli121018/nodejsgm | 0ccbc8acf61badc812f684dd39253d55c99f08eb | [
"MIT"
] | 18 | 2020-06-05T18:17:40.000Z | 2022-03-11T23:25:21.000Z | edm_web1/app/track/apps.py | zhouli121018/nodejsgm | 0ccbc8acf61badc812f684dd39253d55c99f08eb | [
"MIT"
] | null | null | null | from __future__ import unicode_literals
from django.apps import AppConfig
| 15.75 | 39 | 0.785714 | from __future__ import unicode_literals
from django.apps import AppConfig
class TrackConfig(AppConfig):
name = 'track'
| 0 | 27 | 23 |
2c4404e3104ac6f6cfb652d20db04410a3d2b584 | 2,585 | py | Python | setup.py | ufzq/stagger | e88123e465f0f720f334b7f0960af52ffffd069d | [
"BSD-2-Clause"
] | 4 | 2015-01-25T18:58:10.000Z | 2016-09-21T04:10:31.000Z | setup.py | ufzq/stagger | e88123e465f0f720f334b7f0960af52ffffd069d | [
"BSD-2-Clause"
] | 1 | 2016-01-12T18:56:56.000Z | 2016-01-13T15:01:57.000Z | setup.py | ufzq/stagger | e88123e465f0f720f334b7f0960af52ffffd069d | [
"BSD-2-Clause"
] | null | null | null | #!/usr/bin/env python3
#
# setup.py
# From the stagger project: http://code.google.com/p/stagger/
#
# Copyright (c) 2009-2011 Karoly Lorentey <karoly@lorentey.hu>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# - Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# - Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import distribute_setup
distribute_setup.use_setuptools()
from setuptools import setup;
setup(
name="stagger",
version="0.4.2",
url="http://code.google.com/p/stagger",
author="Karoly Lorentey",
author_email="karoly@lorentey.hu",
packages=["stagger"],
entry_points = {
'console_scripts': ['stagger = stagger.commandline:main']
},
test_suite = "test.alltests.suite",
license="BSD",
description="ID3v1/ID3v2 tag manipulation package in pure Python 3",
long_description="""
The ID3v2 tag format is notorious for its useless specification
documents and its quirky, mutually incompatible
part-implementations. Stagger is to provide a robust tagging package
that is able to handle all the various badly formatted tags out there
and allow you to convert them to a consensus format.
""",
classifiers=[
"Development Status :: 4 - Beta",
"Programming Language :: Python :: 3",
"License :: OSI Approved :: BSD License",
"Operating System :: OS Independent",
"Topic :: Multimedia :: Sound/Audio"
],
)
| 38.58209 | 72 | 0.733075 | #!/usr/bin/env python3
#
# setup.py
# From the stagger project: http://code.google.com/p/stagger/
#
# Copyright (c) 2009-2011 Karoly Lorentey <karoly@lorentey.hu>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# - Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# - Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import distribute_setup
distribute_setup.use_setuptools()
from setuptools import setup;
setup(
name="stagger",
version="0.4.2",
url="http://code.google.com/p/stagger",
author="Karoly Lorentey",
author_email="karoly@lorentey.hu",
packages=["stagger"],
entry_points = {
'console_scripts': ['stagger = stagger.commandline:main']
},
test_suite = "test.alltests.suite",
license="BSD",
description="ID3v1/ID3v2 tag manipulation package in pure Python 3",
long_description="""
The ID3v2 tag format is notorious for its useless specification
documents and its quirky, mutually incompatible
part-implementations. Stagger is to provide a robust tagging package
that is able to handle all the various badly formatted tags out there
and allow you to convert them to a consensus format.
""",
classifiers=[
"Development Status :: 4 - Beta",
"Programming Language :: Python :: 3",
"License :: OSI Approved :: BSD License",
"Operating System :: OS Independent",
"Topic :: Multimedia :: Sound/Audio"
],
)
| 0 | 0 | 0 |
04eff52184b988d1cc793b12b3a99112a4eafd58 | 1,243 | py | Python | python/multiprocessing/main.py | marvinklimke/pi | 5ecd16175c39f9e7a70e6e7ceb26155d4948b86f | [
"MIT"
] | null | null | null | python/multiprocessing/main.py | marvinklimke/pi | 5ecd16175c39f9e7a70e6e7ceb26155d4948b86f | [
"MIT"
] | null | null | null | python/multiprocessing/main.py | marvinklimke/pi | 5ecd16175c39f9e7a70e6e7ceb26155d4948b86f | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
"""Calculate the value of pi using multiprocessing in Python"""
from datetime import datetime
from multiprocessing import Pool
import os
from sys import argv
if __name__ == "__main__":
main()
| 24.86 | 94 | 0.608206 | #!/usr/bin/env python3
"""Calculate the value of pi using multiprocessing in Python"""
from datetime import datetime
from multiprocessing import Pool
import os
from sys import argv
def compute(args):
begin, end, num_steps = args
subtotal = 0.0
for i in range(begin, end):
x = (i + 0.5) / num_steps
subtotal += 4.0 / (1.0 + x**2)
subtotal /= num_steps
return subtotal
def main():
num_processes = os.cpu_count()
num_steps = 1000000
if len(argv) > 2:
num_processes = int(argv[2])
if len(argv) > 1:
num_steps = int(argv[1])
print("Calculating pi using {} processes in {} steps...".format(num_processes, num_steps))
start = datetime.now()
args = []
for i in range(num_processes):
args.append((int(i * num_steps / num_processes),
int((i + 1) * num_steps / num_processes),
num_steps))
with Pool(num_processes) as pool:
subtotals = pool.map(compute, args)
total = sum(subtotals)
finish = datetime.now()
duration = finish - start
print("==> pi = {}".format(total))
print("Calculation took {} seconds.".format(duration.total_seconds()))
if __name__ == "__main__":
main()
| 973 | 0 | 46 |
d8dc135da44f05d9d3ca90831a0906ba17d225d9 | 3,533 | py | Python | tensorflow_datasets/text/squad_question_generation/squad_question_generation.py | vanshhhhh/datasets | aee32f95273ca3bfe83e09fb9b00ba4bf23597a5 | [
"Apache-2.0"
] | null | null | null | tensorflow_datasets/text/squad_question_generation/squad_question_generation.py | vanshhhhh/datasets | aee32f95273ca3bfe83e09fb9b00ba4bf23597a5 | [
"Apache-2.0"
] | null | null | null | tensorflow_datasets/text/squad_question_generation/squad_question_generation.py | vanshhhhh/datasets | aee32f95273ca3bfe83e09fb9b00ba4bf23597a5 | [
"Apache-2.0"
] | 1 | 2021-11-22T18:07:30.000Z | 2021-11-22T18:07:30.000Z | # coding=utf-8
# Copyright 2021 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""squad_question_generation dataset."""
import os
import tensorflow as tf
import tensorflow_datasets.public_api as tfds
from tensorflow_datasets.question_answering import qa_utils
_CITATION = """\
@article{zhou2017neural,
title={Neural Question Generation from Text: A Preliminary Study},
author={Zhou, Qingyu and Yang, Nan and Wei, Furu and Tan, Chuanqi and Bao, Hangbo and Zhou, Ming},
journal={arXiv preprint arXiv:1704.01792},
year={2017}
}
@article{2016arXiv160605250R,
author = {{Rajpurkar}, Pranav and {Zhang}, Jian and {Lopyrev},
Konstantin and {Liang}, Percy},
title = "{SQuAD: 100,000+ Questions for Machine Comprehension of Text}",
journal = {arXiv e-prints},
year = 2016,
eid = {arXiv:1606.05250},
pages = {arXiv:1606.05250},
archivePrefix = {arXiv},
eprint = {1606.05250},
}
"""
_DESCRIPTION = """\
Question generation using squad dataset and data split described in
'Neural Question Generation from Text: A Preliminary Study'.
"""
_URLS = {
"train":
"https://rajpurkar.github.io/SQuAD-explorer/dataset/train-v1.1.json",
"dev":
"https://rajpurkar.github.io/SQuAD-explorer/dataset/dev-v1.1.json",
"mapping":
"https://res.qyzhou.me/qas_id_in_squad.zip",
}
_HOMEPAGE_URL = "https://github.com/magic282/NQG"
class SquadQuestionGeneration(tfds.core.GeneratorBasedBuilder):
"""DatasetBuilder for squad_question_generation dataset."""
VERSION = tfds.core.Version("1.0.0")
RELEASE_NOTES = {
"1.0.0": "Initial build.",
}
def _info(self):
"""Returns the dataset metadata."""
features_dict = qa_utils.SQUADLIKE_FEATURES
return tfds.core.DatasetInfo(
builder=self,
description=_DESCRIPTION,
features=features_dict,
supervised_keys=("context", "question"),
homepage=_HOMEPAGE_URL,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
dl_paths = dl_manager.download_and_extract(_URLS)
mapping_dir = os.path.join(dl_paths["mapping"], "qas_id_in_squad")
return {
tfds.Split.TRAIN:
self._generate_examples(dl_paths["train"],
os.path.join(mapping_dir, "train.txt.id")),
tfds.Split.VALIDATION:
self._generate_examples(
dl_paths["dev"],
os.path.join(mapping_dir, "dev.txt.shuffle.dev.id")),
tfds.Split.TEST:
self._generate_examples(
dl_paths["dev"],
os.path.join(mapping_dir, "dev.txt.shuffle.test.id")),
}
def _generate_examples(self, data_path: str, mapping_path: str):
"""Yields examples."""
with tf.io.gfile.GFile(mapping_path, "r") as f:
ids = set(f.read().splitlines())
for k, ex in qa_utils.generate_squadlike_examples(data_path):
if k in ids:
yield k, ex
| 33.647619 | 100 | 0.669403 | # coding=utf-8
# Copyright 2021 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""squad_question_generation dataset."""
import os
import tensorflow as tf
import tensorflow_datasets.public_api as tfds
from tensorflow_datasets.question_answering import qa_utils
_CITATION = """\
@article{zhou2017neural,
title={Neural Question Generation from Text: A Preliminary Study},
author={Zhou, Qingyu and Yang, Nan and Wei, Furu and Tan, Chuanqi and Bao, Hangbo and Zhou, Ming},
journal={arXiv preprint arXiv:1704.01792},
year={2017}
}
@article{2016arXiv160605250R,
author = {{Rajpurkar}, Pranav and {Zhang}, Jian and {Lopyrev},
Konstantin and {Liang}, Percy},
title = "{SQuAD: 100,000+ Questions for Machine Comprehension of Text}",
journal = {arXiv e-prints},
year = 2016,
eid = {arXiv:1606.05250},
pages = {arXiv:1606.05250},
archivePrefix = {arXiv},
eprint = {1606.05250},
}
"""
_DESCRIPTION = """\
Question generation using squad dataset and data split described in
'Neural Question Generation from Text: A Preliminary Study'.
"""
_URLS = {
"train":
"https://rajpurkar.github.io/SQuAD-explorer/dataset/train-v1.1.json",
"dev":
"https://rajpurkar.github.io/SQuAD-explorer/dataset/dev-v1.1.json",
"mapping":
"https://res.qyzhou.me/qas_id_in_squad.zip",
}
_HOMEPAGE_URL = "https://github.com/magic282/NQG"
class SquadQuestionGeneration(tfds.core.GeneratorBasedBuilder):
"""DatasetBuilder for squad_question_generation dataset."""
VERSION = tfds.core.Version("1.0.0")
RELEASE_NOTES = {
"1.0.0": "Initial build.",
}
def _info(self):
"""Returns the dataset metadata."""
features_dict = qa_utils.SQUADLIKE_FEATURES
return tfds.core.DatasetInfo(
builder=self,
description=_DESCRIPTION,
features=features_dict,
supervised_keys=("context", "question"),
homepage=_HOMEPAGE_URL,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
dl_paths = dl_manager.download_and_extract(_URLS)
mapping_dir = os.path.join(dl_paths["mapping"], "qas_id_in_squad")
return {
tfds.Split.TRAIN:
self._generate_examples(dl_paths["train"],
os.path.join(mapping_dir, "train.txt.id")),
tfds.Split.VALIDATION:
self._generate_examples(
dl_paths["dev"],
os.path.join(mapping_dir, "dev.txt.shuffle.dev.id")),
tfds.Split.TEST:
self._generate_examples(
dl_paths["dev"],
os.path.join(mapping_dir, "dev.txt.shuffle.test.id")),
}
def _generate_examples(self, data_path: str, mapping_path: str):
"""Yields examples."""
with tf.io.gfile.GFile(mapping_path, "r") as f:
ids = set(f.read().splitlines())
for k, ex in qa_utils.generate_squadlike_examples(data_path):
if k in ids:
yield k, ex
| 0 | 0 | 0 |
0bd306ed8f7194f27a682ebda70b085b53ee2b23 | 159 | py | Python | json-load-transform-save/ujsonproc.py | rolando/python-benchmarks | 9ef4dc320983ef7ca3ff689b33f3b0c633388061 | [
"MIT"
] | 1 | 2016-06-23T11:29:01.000Z | 2016-06-23T11:29:01.000Z | json-load-transform-save/ujsonproc.py | rolando/python-benchmarks | 9ef4dc320983ef7ca3ff689b33f3b0c633388061 | [
"MIT"
] | null | null | null | json-load-transform-save/ujsonproc.py | rolando/python-benchmarks | 9ef4dc320983ef7ca3ff689b33f3b0c633388061 | [
"MIT"
] | null | null | null | import sys
from ujson import loads, dumps
for line in sys.stdin:
obj = loads(line)
sys.stdout.write(dumps(obj['actor']))
sys.stdout.write('\n')
| 15.9 | 41 | 0.660377 | import sys
from ujson import loads, dumps
for line in sys.stdin:
obj = loads(line)
sys.stdout.write(dumps(obj['actor']))
sys.stdout.write('\n')
| 0 | 0 | 0 |
6de6758fa523ec1489447da0b79cbce0ee433ec0 | 2,120 | py | Python | code/insert_sensordata_from_azure_queue.py | jurjanbrust/wsl2_mysql_grafana | df5e236897738f47d2f84d0cbff35e0f7ec6d364 | [
"MIT"
] | null | null | null | code/insert_sensordata_from_azure_queue.py | jurjanbrust/wsl2_mysql_grafana | df5e236897738f47d2f84d0cbff35e0f7ec6d364 | [
"MIT"
] | null | null | null | code/insert_sensordata_from_azure_queue.py | jurjanbrust/wsl2_mysql_grafana | df5e236897738f47d2f84d0cbff35e0f7ec6d364 | [
"MIT"
] | null | null | null | from azure.storage.queue import (
QueueClient,
TextBase64EncodePolicy,
TextBase64DecodePolicy
)
import os, uuid, time, json
import mysql.connector
from datetime import datetime
connect_str = "DefaultEndpointsProtocol=https;AccountName=replace;AccountKey=replacewithyours;EndpointSuffix=core.windows.net"
queue_name = "name of queue"
mySql_dbName = "sensordata"
mySql_tableName = "temperature"
queue_client = QueueClient.from_connection_string(conn_str=connect_str, queue_name=queue_name, message_decode_policy=TextBase64DecodePolicy())
messages = queue_client.receive_messages(messages_per_page=5)
db = mysql.connector.connect(
host="db",
user="root",
passwd="example",
database=mySql_dbName
)
cursor = db.cursor()
for message in messages:
processMessage()
queue_client.delete_message(message.id, message.pop_receipt)
time.sleep(0.1)
print("All Done")
| 36.551724 | 180 | 0.734906 | from azure.storage.queue import (
QueueClient,
TextBase64EncodePolicy,
TextBase64DecodePolicy
)
import os, uuid, time, json
import mysql.connector
from datetime import datetime
connect_str = "DefaultEndpointsProtocol=https;AccountName=replace;AccountKey=replacewithyours;EndpointSuffix=core.windows.net"
queue_name = "name of queue"
mySql_dbName = "sensordata"
mySql_tableName = "temperature"
queue_client = QueueClient.from_connection_string(conn_str=connect_str, queue_name=queue_name, message_decode_policy=TextBase64DecodePolicy())
messages = queue_client.receive_messages(messages_per_page=5)
db = mysql.connector.connect(
host="db",
user="root",
passwd="example",
database=mySql_dbName
)
cursor = db.cursor()
def processMessage():
message_json = json.loads(message.content)
payload_raw = message_json["payload_raw"]
payload_bytes = bytes(payload_raw, 'ascii')
sensor_counter = payload_bytes[0] + 256 * payload_bytes[1]
sensor_temperature = payload_bytes[2] + (payload_bytes[3] / 100)
sensor_time = message_json["metadata"]["time"][0: 19]
sensor_latitude = message_json["metadata"]["latitude"]
sensor_longitude = message_json["metadata"]["longitude"]
sensor_rssi = message_json["metadata"]["gateways"][0]["rssi"]
sensor_dev_id = message_json["dev_id"]
sensor_app_id = message_json["app_id"]
sensor_hardware_serial = message_json["hardware_serial"]
print("counter: " + str(sensor_counter) + " temp: " + str(sensor_temperature))
sql = "INSERT INTO " + mySql_tableName + " (counter, temperature, time, latitude, longitude, rssi, dev_id, app_id, hardware_serial) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s)"
val = (sensor_counter, sensor_temperature, sensor_time, sensor_latitude, sensor_longitude, sensor_rssi, sensor_dev_id, sensor_app_id, sensor_hardware_serial)
try:
cursor.execute(sql, val)
db.commit()
except Exception as ex:
print(ex)
for message in messages:
processMessage()
queue_client.delete_message(message.id, message.pop_receipt)
time.sleep(0.1)
print("All Done")
| 1,193 | 0 | 23 |
65d1ca74db776761cd8564119bbc577e67c51c88 | 200 | py | Python | app/__init__.py | twitbox/candidate-sentiment-api | c06141963756a4c6251fbd53f3ddced422bd10b4 | [
"Unlicense",
"MIT"
] | 1 | 2017-09-12T12:18:13.000Z | 2017-09-12T12:18:13.000Z | app/__init__.py | twitbox/candidate-sentiment-api | c06141963756a4c6251fbd53f3ddced422bd10b4 | [
"Unlicense",
"MIT"
] | null | null | null | app/__init__.py | twitbox/candidate-sentiment-api | c06141963756a4c6251fbd53f3ddced422bd10b4 | [
"Unlicense",
"MIT"
] | null | null | null | from .controllers.twitter import search | 20 | 39 | 0.675 | from .controllers.twitter import search
def router(app):
@app.route('/')
def index():
return 'OK'
@app.route('/twitter/<hashtag>')
def search_twitter(hashtag):
return search(hashtag) | 138 | 0 | 23 |
79d19cfb9e26ce72a74fab548e234650eae67dbb | 57 | py | Python | python/testData/highlighting/continueInFinallyBlock.py | tgodzik/intellij-community | f5ef4191fc30b69db945633951fb160c1cfb7b6f | [
"Apache-2.0"
] | 2 | 2019-04-28T07:48:50.000Z | 2020-12-11T14:18:08.000Z | python/testData/highlighting/continueInFinallyBlock.py | Cyril-lamirand/intellij-community | 60ab6c61b82fc761dd68363eca7d9d69663cfa39 | [
"Apache-2.0"
] | 2 | 2022-02-19T09:45:05.000Z | 2022-02-27T20:32:55.000Z | python/testData/highlighting/continueInFinallyBlock.py | Cyril-lamirand/intellij-community | 60ab6c61b82fc761dd68363eca7d9d69663cfa39 | [
"Apache-2.0"
] | 2 | 2020-03-15T08:57:37.000Z | 2020-04-07T04:48:14.000Z | while True:
try:
print("a")
finally:
continue | 11.4 | 14 | 0.578947 | while True:
try:
print("a")
finally:
continue | 0 | 0 | 0 |
9fce0a533c0967a19b5da21b4115ccd074cab091 | 582 | py | Python | studies/airfoil_optimizer/gradient_check.py | peterdsharpe/DawnDesignTool | 7c8b87db33e151cf50e07da982f51494ef84d80a | [
"MIT"
] | 3 | 2021-09-28T16:05:51.000Z | 2021-12-29T01:08:24.000Z | studies/airfoil_optimizer/gradient_check.py | peterdsharpe/DawnDesignTool | 7c8b87db33e151cf50e07da982f51494ef84d80a | [
"MIT"
] | 1 | 2021-12-08T17:35:41.000Z | 2021-12-08T17:35:41.000Z | studies/airfoil_optimizer/gradient_check.py | peterdsharpe/DawnDesignTool | 7c8b87db33e151cf50e07da982f51494ef84d80a | [
"MIT"
] | null | null | null | import copy
if __name__ == '__main__':
epss = np.logspace(-10, -1, 30)
baseline_objective = augmented_objective(x0)
xis = []
for eps in epss:
xi = copy.copy(x0)
xi[4] += eps
xis.append(xi)
objs = [augmented_objective(xi) for xi in xis]
# pool = mp.Pool(mp.cpu_count())
# objs = pool.map(augmented_objective, xis)
# pool.close()
objs = np.array(objs)
derivs = (objs - baseline_objective) / epss
fig, ax = plt.subplots(1, 1, figsize=(6.4, 4.8), dpi=200)
plt.loglog(epss, np.abs(derivs), ".-")
plt.show() | 25.304348 | 61 | 0.585911 | import copy
if __name__ == '__main__':
epss = np.logspace(-10, -1, 30)
baseline_objective = augmented_objective(x0)
xis = []
for eps in epss:
xi = copy.copy(x0)
xi[4] += eps
xis.append(xi)
objs = [augmented_objective(xi) for xi in xis]
# pool = mp.Pool(mp.cpu_count())
# objs = pool.map(augmented_objective, xis)
# pool.close()
objs = np.array(objs)
derivs = (objs - baseline_objective) / epss
fig, ax = plt.subplots(1, 1, figsize=(6.4, 4.8), dpi=200)
plt.loglog(epss, np.abs(derivs), ".-")
plt.show() | 0 | 0 | 0 |
984cd9e9856e57b6b0cd4778d04824c76290e492 | 2,190 | py | Python | awx/main/models/fact.py | gitEdouble/awx | 5885654405ccaf465f08df4db998a6dafebd9b4d | [
"Apache-2.0"
] | 2 | 2018-11-12T18:52:24.000Z | 2020-05-22T18:41:21.000Z | awx/main/models/fact.py | gitEdouble/awx | 5885654405ccaf465f08df4db998a6dafebd9b4d | [
"Apache-2.0"
] | 4 | 2020-04-29T23:03:16.000Z | 2022-03-01T23:56:09.000Z | awx/main/models/fact.py | gitEdouble/awx | 5885654405ccaf465f08df4db998a6dafebd9b4d | [
"Apache-2.0"
] | 9 | 2019-05-11T00:03:30.000Z | 2021-07-07T16:09:17.000Z | # Copyright (c) 2016 Ansible, Inc.
# All Rights Reserved.
import logging
from django.db import models
from django.utils.translation import ugettext_lazy as _
from awx.main.fields import JSONBField
__all__ = ('Fact',)
logger = logging.getLogger('awx.main.models.fact')
class Fact(models.Model):
"""A model representing a fact returned from Ansible.
Facts are stored as JSON dictionaries.
"""
host = models.ForeignKey(
'Host',
related_name='facts',
db_index=True,
on_delete=models.CASCADE,
help_text=_('Host for the facts that the fact scan captured.'),
)
timestamp = models.DateTimeField(
default=None,
editable=False,
help_text=_('Date and time of the corresponding fact scan gathering time.')
)
module = models.CharField(max_length=128)
facts = JSONBField(blank=True, default={}, help_text=_('Arbitrary JSON structure of module facts captured at timestamp for a single host.'))
@staticmethod
@staticmethod
@staticmethod
| 30.84507 | 144 | 0.629224 | # Copyright (c) 2016 Ansible, Inc.
# All Rights Reserved.
import logging
from django.db import models
from django.utils.translation import ugettext_lazy as _
from awx.main.fields import JSONBField
__all__ = ('Fact',)
logger = logging.getLogger('awx.main.models.fact')
class Fact(models.Model):
"""A model representing a fact returned from Ansible.
Facts are stored as JSON dictionaries.
"""
host = models.ForeignKey(
'Host',
related_name='facts',
db_index=True,
on_delete=models.CASCADE,
help_text=_('Host for the facts that the fact scan captured.'),
)
timestamp = models.DateTimeField(
default=None,
editable=False,
help_text=_('Date and time of the corresponding fact scan gathering time.')
)
module = models.CharField(max_length=128)
facts = JSONBField(blank=True, default={}, help_text=_('Arbitrary JSON structure of module facts captured at timestamp for a single host.'))
class Meta:
app_label = 'main'
index_together = [
["timestamp", "module", "host"],
]
@staticmethod
def get_host_fact(host_id, module, timestamp):
qs = Fact.objects.filter(host__id=host_id, module=module, timestamp__lte=timestamp).order_by('-timestamp')
if qs:
return qs[0]
else:
return None
@staticmethod
def get_timeline(host_id, module=None, ts_from=None, ts_to=None):
kwargs = {
'host__id': host_id,
}
if module:
kwargs['module'] = module
if ts_from and ts_to and ts_from == ts_to:
kwargs['timestamp'] = ts_from
else:
if ts_from:
kwargs['timestamp__gt'] = ts_from
if ts_to:
kwargs['timestamp__lte'] = ts_to
return Fact.objects.filter(**kwargs).order_by('-timestamp').only('timestamp', 'module').order_by('-timestamp', 'module')
@staticmethod
def add_fact(host_id, module, timestamp, facts):
fact_obj = Fact.objects.create(host_id=host_id, module=module, timestamp=timestamp, facts=facts)
fact_obj.save()
return fact_obj
| 942 | 99 | 105 |
92d5e207f5dc0651947ac8ccca6c9f69f1ca937e | 293 | py | Python | securex/SecureX-3/scripts/gunicorn.conf.py | CiscoDevNet/security-api-workshop--resources | ee7dbdb7909ee9689c4b061d74cdd6dbb4a8cd20 | [
"Apache-2.0"
] | 4 | 2020-10-13T16:08:11.000Z | 2021-11-28T11:44:45.000Z | securex/SecureX-3/scripts/gunicorn.conf.py | CiscoDevNet/security-api-workshop--resources | ee7dbdb7909ee9689c4b061d74cdd6dbb4a8cd20 | [
"Apache-2.0"
] | 1 | 2021-02-16T18:07:24.000Z | 2021-02-16T18:07:24.000Z | securex/SecureX-3/scripts/gunicorn.conf.py | CiscoDevNet/security-api-workshop--resources | ee7dbdb7909ee9689c4b061d74cdd6dbb4a8cd20 | [
"Apache-2.0"
] | 8 | 2020-10-16T15:30:28.000Z | 2021-03-25T15:28:49.000Z | # import multiprocessing
pidfile = 'flask_app.pid'
workers = 2
# workers = multiprocessing.cpu_count() * 2 + 1
bind = '0.0.0.0:80'
accesslog = './logs/access.log'
errorlog = './logs/error.log'
#certfile = './certs/local.cer'
#keyfile = './certs/local.key'
# user = 'ubuntu'
# group = 'ubuntu' | 24.416667 | 47 | 0.665529 | # import multiprocessing
pidfile = 'flask_app.pid'
workers = 2
# workers = multiprocessing.cpu_count() * 2 + 1
bind = '0.0.0.0:80'
accesslog = './logs/access.log'
errorlog = './logs/error.log'
#certfile = './certs/local.cer'
#keyfile = './certs/local.key'
# user = 'ubuntu'
# group = 'ubuntu' | 0 | 0 | 0 |
ca9a3657148f07444639079c4bf6b45a6d685933 | 1,164 | py | Python | src/discussion/management/commands/remove_low_quality_content.py | ResearchHub/ResearchHub-Backend-Open | d36dca33afae2d442690694bb2ab17180d84bcd3 | [
"MIT"
] | 18 | 2021-05-20T13:20:16.000Z | 2022-02-11T02:40:18.000Z | src/discussion/management/commands/remove_low_quality_content.py | ResearchHub/ResearchHub-Backend-Open | d36dca33afae2d442690694bb2ab17180d84bcd3 | [
"MIT"
] | 109 | 2021-05-21T20:14:23.000Z | 2022-03-31T20:56:10.000Z | src/discussion/management/commands/remove_low_quality_content.py | ResearchHub/ResearchHub-Backend-Open | d36dca33afae2d442690694bb2ab17180d84bcd3 | [
"MIT"
] | 4 | 2021-05-17T13:47:53.000Z | 2022-02-12T10:48:21.000Z | from django.core.management.base import BaseCommand
from django.contrib.contenttypes.models import ContentType
from django.db.models.functions import Length
from user.models import User
from discussion.models import Thread
import uuid
from utils.siftscience import decisions_api, events_api
| 34.235294 | 119 | 0.620275 | from django.core.management.base import BaseCommand
from django.contrib.contenttypes.models import ContentType
from django.db.models.functions import Length
from user.models import User
from discussion.models import Thread
import uuid
from utils.siftscience import decisions_api, events_api
class Command(BaseCommand):
def handle(self, *args, **options):
low_threads = Thread.objects.annotate(text_len=Length('plain_text')).filter(text_len__lte=25, is_removed=False)
thread_count = low_threads.count()
for i, thread in enumerate(low_threads):
print('{} / {}'.format(i, thread_count))
thread.is_removed = True
content_id = f'{type(thread).__name__}_{thread.id}'
if not thread.created_by:
continue
try:
decisions_api.apply_bad_content_decision(thread.created_by, content_id)
events_api.track_flag_content(
thread.created_by,
content_id,
1,
)
except Exception as e:
print(e)
pass
thread.save()
| 815 | 6 | 50 |
72c693fbfd1853a2f9b44a4dafbf326095a24308 | 23,027 | py | Python | pyzoo/zoo/automl/search/ray_tune_search_engine.py | ankitdobhal/analytics-zoo | b8374bcd6c73bba49fe0b0ab075528cdd94cf2af | [
"Apache-2.0"
] | null | null | null | pyzoo/zoo/automl/search/ray_tune_search_engine.py | ankitdobhal/analytics-zoo | b8374bcd6c73bba49fe0b0ab075528cdd94cf2af | [
"Apache-2.0"
] | null | null | null | pyzoo/zoo/automl/search/ray_tune_search_engine.py | ankitdobhal/analytics-zoo | b8374bcd6c73bba49fe0b0ab075528cdd94cf2af | [
"Apache-2.0"
] | null | null | null | #
# Copyright 2018 Analytics Zoo Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import ray
from ray import tune
from copy import deepcopy
import os
from zoo.automl.search.abstract import *
from zoo.automl.common.util import *
from zoo.automl.common.metrics import Evaluator
from zoo.automl.impute.impute import *
from ray.tune import Trainable
import ray.tune.track
from zoo.automl.logger import TensorboardXLogger
from zoo.automl.model.model_builder import ModelBuilder
from zoo.automl.feature.identity_transformer import IdentityTransformer
from zoo.automl.search.tune_utils import (create_searcher,
create_scheduler)
SEARCH_ALG_ALLOWED = ("variant_generator", "skopt", "bayesopt")
class RayTuneSearchEngine(SearchEngine):
"""
Tune driver
"""
def __init__(self,
logs_dir="",
resources_per_trial=None,
name="",
remote_dir=None,
):
"""
Constructor
:param resources_per_trial: resources for each trial
"""
self.pipeline = None
self.train_func = None
self.trainable_class = None
self.resources_per_trail = resources_per_trial
self.trials = None
self.remote_dir = remote_dir
self.name = name
self.logs_dir = os.path.abspath(os.path.expanduser(logs_dir))
def compile(self,
data,
model_create_func,
recipe,
search_space=None,
search_alg=None,
search_alg_params=None,
scheduler=None,
scheduler_params=None,
feature_transformers=None,
mc=False,
metric="mse"):
"""
Do necessary preparations for the engine
:param input_df:
:param search_space:
:param num_samples:
:param stop:
:param search_algorithm:
:param search_algorithm_params:
:param fixed_params:
:param feature_transformers:
:param model:
:param validation_df:
:param metric:
:return:
"""
# data mode detection
assert isinstance(data, dict), 'ERROR: Argument \'data\' should be a dictionary.'
data_mode = None # data_mode can only be 'dataframe' or 'ndarray'
data_schema = set(data.keys())
if set(["df"]).issubset(data_schema):
data_mode = 'dataframe'
if set(["x", "y"]).issubset(data_schema):
data_mode = 'ndarray'
assert data_mode in ['dataframe', 'ndarray'],\
'ERROR: Argument \'data\' should fit either \
dataframe schema (include \'df\' in keys) or\
ndarray (include \'x\' and \'y\' in keys) schema.'
# data extract
if data_mode == 'dataframe':
input_df = data['df']
feature_cols = data.get("feature_cols", None)
target_col = data.get("target_col", None)
validation_df = data.get("val_df", None)
else:
if data["x"].ndim == 1:
data["x"] = data["x"].reshape(-1, 1)
if data["y"].ndim == 1:
data["y"] = data["y"].reshape(-1, 1)
if "val_x" in data.keys() and data["val_x"].ndim == 1:
data["val_x"] = data["val_x"].reshape(-1, 1)
if "val_y" in data.keys() and data["val_y"].ndim == 1:
data["val_y"] = data["val_y"].reshape(-1, 1)
input_data = {"x": data["x"], "y": data["y"]}
if 'val_x' in data.keys():
validation_data = {"x": data["val_x"], "y": data["val_y"]}
else:
validation_data = None
# prepare parameters for search engine
runtime_params = recipe.runtime_params()
self.num_samples = runtime_params['num_samples']
stop = dict(runtime_params)
del stop['num_samples']
self.stop_criteria = stop
if search_space is None:
search_space = recipe.search_space(all_available_features=None)
self._search_alg = RayTuneSearchEngine._set_search_alg(search_alg, search_alg_params,
recipe, search_space)
self._scheduler = RayTuneSearchEngine._set_scheduler(scheduler, scheduler_params)
self.search_space = self._prepare_tune_config(search_space)
if feature_transformers is None and data_mode == 'dataframe':
feature_transformers = IdentityTransformer(feature_cols, target_col)
if data_mode == 'dataframe':
self.train_func = self._prepare_train_func(input_data=input_df,
model_create_func=model_create_func,
feature_transformers=feature_transformers,
validation_data=validation_df,
metric=metric,
mc=mc,
remote_dir=self.remote_dir,
numpy_format=False
)
else:
self.train_func = self._prepare_train_func(input_data=input_data,
model_create_func=model_create_func,
feature_transformers=None,
validation_data=validation_data,
metric=metric,
mc=mc,
remote_dir=self.remote_dir,
numpy_format=True
)
# self.trainable_class = self._prepare_trainable_class(input_df,
# feature_transformers,
# # model,
# future_seq_len,
# validation_df,
# metric_op,
# self.remote_dir)
@staticmethod
@staticmethod
def run(self):
"""
Run trials
:return: trials result
"""
analysis = tune.run(
self.train_func,
local_dir=self.logs_dir,
name=self.name,
stop=self.stop_criteria,
config=self.search_space,
search_alg=self._search_alg,
num_samples=self.num_samples,
scheduler=self._scheduler,
resources_per_trial=self.resources_per_trail,
verbose=1,
reuse_actors=True
)
self.trials = analysis.trials
# Visualization code for ray (leaderboard)
# catch the ImportError Since it has been processed in TensorboardXLogger
tf_config, tf_metric = self._log_adapt(analysis)
self.logger = TensorboardXLogger(os.path.join(self.logs_dir, self.name+"_leaderboard"))
self.logger.run(tf_config, tf_metric)
self.logger.close()
return analysis
@staticmethod
def _get_best_trial(trial_list, metric):
"""Retrieve the best trial."""
return max(trial_list, key=lambda trial: trial.last_result.get(metric, 0))
@staticmethod
@staticmethod
def _get_best_result(trial_list, metric):
"""Retrieve the last result from the best trial."""
return {metric: RayTuneSearchEngine._get_best_trial(trial_list, metric).last_result[metric]}
@staticmethod
@staticmethod
def _prepare_train_func(input_data,
model_create_func,
feature_transformers,
metric,
validation_data=None,
mc=False,
remote_dir=None,
numpy_format=False,
):
"""
Prepare the train function for ray tune
:param input_df: input dataframe
:param feature_transformers: feature transformers
:param model: model or model selector
:param validation_df: validation dataframe
:param metric: the rewarding metric
:return: the train function
"""
numpy_format_id = ray.put(numpy_format)
input_data_id = ray.put(input_data)
ft_id = ray.put(feature_transformers)
# model_id = ray.put(model)
# validation data processing
df_not_empty = isinstance(validation_data, dict) or\
(isinstance(validation_data, pd.DataFrame) and not validation_data.empty)
df_list_not_empty = isinstance(validation_data, dict) or\
(isinstance(validation_data, list) and validation_data
and not all([d.empty for d in validation_data]))
if validation_data is not None and (df_not_empty or df_list_not_empty):
validation_data_id = ray.put(validation_data)
is_val_valid = True
else:
is_val_valid = False
return train_func
@staticmethod
def _prepare_trainable_class(input_df,
feature_transformers,
future_seq_len,
metric,
validation_df=None,
mc=False,
remote_dir=None
):
"""
Prepare the train function for ray tune
:param input_df: input dataframe
:param feature_transformers: feature transformers
:param model: model or model selector
:param validation_df: validation dataframe
:param metric: the rewarding metric
:return: the train function
"""
input_df_id = ray.put(input_df)
ft_id = ray.put(feature_transformers)
# model_id = ray.put(model)
df_not_empty = isinstance(validation_df, pd.DataFrame) and not validation_df.empty
df_list_not_empty = isinstance(validation_df, list) and validation_df \
and not all([d.empty for d in validation_df])
if validation_df is not None and (df_not_empty or df_list_not_empty):
validation_df_id = ray.put(validation_df)
is_val_df_valid = True
else:
is_val_df_valid = False
return TrainableClass
| 43.365348 | 100 | 0.537065 | #
# Copyright 2018 Analytics Zoo Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import ray
from ray import tune
from copy import deepcopy
import os
from zoo.automl.search.abstract import *
from zoo.automl.common.util import *
from zoo.automl.common.metrics import Evaluator
from zoo.automl.impute.impute import *
from ray.tune import Trainable
import ray.tune.track
from zoo.automl.logger import TensorboardXLogger
from zoo.automl.model.model_builder import ModelBuilder
from zoo.automl.feature.identity_transformer import IdentityTransformer
from zoo.automl.search.tune_utils import (create_searcher,
create_scheduler)
SEARCH_ALG_ALLOWED = ("variant_generator", "skopt", "bayesopt")
class RayTuneSearchEngine(SearchEngine):
"""
Tune driver
"""
def __init__(self,
logs_dir="",
resources_per_trial=None,
name="",
remote_dir=None,
):
"""
Constructor
:param resources_per_trial: resources for each trial
"""
self.pipeline = None
self.train_func = None
self.trainable_class = None
self.resources_per_trail = resources_per_trial
self.trials = None
self.remote_dir = remote_dir
self.name = name
self.logs_dir = os.path.abspath(os.path.expanduser(logs_dir))
def compile(self,
data,
model_create_func,
recipe,
search_space=None,
search_alg=None,
search_alg_params=None,
scheduler=None,
scheduler_params=None,
feature_transformers=None,
mc=False,
metric="mse"):
"""
Do necessary preparations for the engine
:param input_df:
:param search_space:
:param num_samples:
:param stop:
:param search_algorithm:
:param search_algorithm_params:
:param fixed_params:
:param feature_transformers:
:param model:
:param validation_df:
:param metric:
:return:
"""
# data mode detection
assert isinstance(data, dict), 'ERROR: Argument \'data\' should be a dictionary.'
data_mode = None # data_mode can only be 'dataframe' or 'ndarray'
data_schema = set(data.keys())
if set(["df"]).issubset(data_schema):
data_mode = 'dataframe'
if set(["x", "y"]).issubset(data_schema):
data_mode = 'ndarray'
assert data_mode in ['dataframe', 'ndarray'],\
'ERROR: Argument \'data\' should fit either \
dataframe schema (include \'df\' in keys) or\
ndarray (include \'x\' and \'y\' in keys) schema.'
# data extract
if data_mode == 'dataframe':
input_df = data['df']
feature_cols = data.get("feature_cols", None)
target_col = data.get("target_col", None)
validation_df = data.get("val_df", None)
else:
if data["x"].ndim == 1:
data["x"] = data["x"].reshape(-1, 1)
if data["y"].ndim == 1:
data["y"] = data["y"].reshape(-1, 1)
if "val_x" in data.keys() and data["val_x"].ndim == 1:
data["val_x"] = data["val_x"].reshape(-1, 1)
if "val_y" in data.keys() and data["val_y"].ndim == 1:
data["val_y"] = data["val_y"].reshape(-1, 1)
input_data = {"x": data["x"], "y": data["y"]}
if 'val_x' in data.keys():
validation_data = {"x": data["val_x"], "y": data["val_y"]}
else:
validation_data = None
# prepare parameters for search engine
runtime_params = recipe.runtime_params()
self.num_samples = runtime_params['num_samples']
stop = dict(runtime_params)
del stop['num_samples']
self.stop_criteria = stop
if search_space is None:
search_space = recipe.search_space(all_available_features=None)
self._search_alg = RayTuneSearchEngine._set_search_alg(search_alg, search_alg_params,
recipe, search_space)
self._scheduler = RayTuneSearchEngine._set_scheduler(scheduler, scheduler_params)
self.search_space = self._prepare_tune_config(search_space)
if feature_transformers is None and data_mode == 'dataframe':
feature_transformers = IdentityTransformer(feature_cols, target_col)
if data_mode == 'dataframe':
self.train_func = self._prepare_train_func(input_data=input_df,
model_create_func=model_create_func,
feature_transformers=feature_transformers,
validation_data=validation_df,
metric=metric,
mc=mc,
remote_dir=self.remote_dir,
numpy_format=False
)
else:
self.train_func = self._prepare_train_func(input_data=input_data,
model_create_func=model_create_func,
feature_transformers=None,
validation_data=validation_data,
metric=metric,
mc=mc,
remote_dir=self.remote_dir,
numpy_format=True
)
# self.trainable_class = self._prepare_trainable_class(input_df,
# feature_transformers,
# # model,
# future_seq_len,
# validation_df,
# metric_op,
# self.remote_dir)
@staticmethod
def _set_search_alg(search_alg, search_alg_params, recipe, search_space):
if search_alg:
if not isinstance(search_alg, str):
raise ValueError(f"search_alg should be of type str."
f" Got {search_alg.__class__.__name__}")
search_alg = search_alg.lower()
if search_alg_params is None:
search_alg_params = dict()
if search_alg not in SEARCH_ALG_ALLOWED:
raise ValueError(f"search_alg must be one of {SEARCH_ALG_ALLOWED}. "
f"Got: {search_alg}")
if search_alg == "skopt":
from skopt import Optimizer
opt_params = recipe.opt_params()
optimizer = Optimizer(opt_params)
search_alg_params.update(dict(
optimizer=optimizer,
parameter_names=list(search_space.keys()),
))
elif search_alg == "bayesopt":
search_alg_params.update({"space": recipe.manual_search_space()})
search_alg_params.update(dict(
metric="reward_metric",
mode="max",
))
search_alg = create_searcher(search_alg,
**search_alg_params)
return search_alg
@staticmethod
def _set_scheduler(scheduler, scheduler_params):
if scheduler:
if not isinstance(scheduler, str):
raise ValueError(f"Scheduler should be of type str. "
f"Got {scheduler.__class__.__name__}")
if scheduler_params is None:
scheduler_params = dict()
scheduler_params.update(dict(
time_attr="training_iteration",
metric="reward_metric",
mode="max",
))
scheduler = create_scheduler(scheduler, **scheduler_params)
return scheduler
def run(self):
"""
Run trials
:return: trials result
"""
analysis = tune.run(
self.train_func,
local_dir=self.logs_dir,
name=self.name,
stop=self.stop_criteria,
config=self.search_space,
search_alg=self._search_alg,
num_samples=self.num_samples,
scheduler=self._scheduler,
resources_per_trial=self.resources_per_trail,
verbose=1,
reuse_actors=True
)
self.trials = analysis.trials
# Visualization code for ray (leaderboard)
# catch the ImportError Since it has been processed in TensorboardXLogger
tf_config, tf_metric = self._log_adapt(analysis)
self.logger = TensorboardXLogger(os.path.join(self.logs_dir, self.name+"_leaderboard"))
self.logger.run(tf_config, tf_metric)
self.logger.close()
return analysis
def get_best_trials(self, k=1):
sorted_trials = RayTuneSearchEngine._get_sorted_trials(self.trials, metric="reward_metric")
best_trials = sorted_trials[:k]
return [self._make_trial_output(t) for t in best_trials]
def _make_trial_output(self, trial):
return TrialOutput(config=trial.config,
model_path=os.path.join(trial.logdir, trial.last_result["checkpoint"]))
@staticmethod
def _get_best_trial(trial_list, metric):
"""Retrieve the best trial."""
return max(trial_list, key=lambda trial: trial.last_result.get(metric, 0))
@staticmethod
def _get_sorted_trials(trial_list, metric):
return sorted(
trial_list,
key=lambda trial: trial.last_result.get(metric, 0),
reverse=True)
@staticmethod
def _get_best_result(trial_list, metric):
"""Retrieve the last result from the best trial."""
return {metric: RayTuneSearchEngine._get_best_trial(trial_list, metric).last_result[metric]}
def test_run(self):
def mock_reporter(**kwargs):
assert "reward_metric" in kwargs, "Did not report proper metric"
assert "checkpoint" in kwargs, "Accidentally removed `checkpoint`?"
raise GoodError("This works.")
try:
self.train_func({'out_units': 1,
'selected_features': ["MONTH(datetime)", "WEEKDAY(datetime)"]},
mock_reporter)
# self.train_func(self.search_space, mock_reporter)
except TypeError as e:
print("Forgot to modify function signature?")
raise e
except GoodError:
print("Works!")
return 1
raise Exception("Didn't call reporter...")
@staticmethod
def _is_validation_df_valid(validation_df):
df_not_empty = isinstance(validation_df, pd.DataFrame) and not validation_df.empty
df_list_not_empty = isinstance(validation_df, list) and validation_df \
and not all([d.empty for d in validation_df])
return validation_df is not None and not (df_not_empty or df_list_not_empty)
@staticmethod
def _prepare_train_func(input_data,
model_create_func,
feature_transformers,
metric,
validation_data=None,
mc=False,
remote_dir=None,
numpy_format=False,
):
"""
Prepare the train function for ray tune
:param input_df: input dataframe
:param feature_transformers: feature transformers
:param model: model or model selector
:param validation_df: validation dataframe
:param metric: the rewarding metric
:return: the train function
"""
numpy_format_id = ray.put(numpy_format)
input_data_id = ray.put(input_data)
ft_id = ray.put(feature_transformers)
# model_id = ray.put(model)
# validation data processing
df_not_empty = isinstance(validation_data, dict) or\
(isinstance(validation_data, pd.DataFrame) and not validation_data.empty)
df_list_not_empty = isinstance(validation_data, dict) or\
(isinstance(validation_data, list) and validation_data
and not all([d.empty for d in validation_data]))
if validation_data is not None and (df_not_empty or df_list_not_empty):
validation_data_id = ray.put(validation_data)
is_val_valid = True
else:
is_val_valid = False
def train_func(config):
numpy_format = ray.get(numpy_format_id)
if isinstance(model_create_func, ModelBuilder):
trial_model = model_create_func.build(config)
else:
trial_model = model_create_func()
if not numpy_format:
global_ft = ray.get(ft_id)
trial_ft = deepcopy(global_ft)
imputer = None
if "imputation" in config:
if config["imputation"] == "LastFillImpute":
imputer = LastFillImpute()
elif config["imputation"] == "FillZeroImpute":
imputer = FillZeroImpute()
# handling input
global_input_df = ray.get(input_data_id)
trial_input_df = deepcopy(global_input_df)
if imputer:
trial_input_df = imputer.impute(trial_input_df)
config = convert_bayes_configs(config).copy()
# print("config is ", config)
(x_train, y_train) = trial_ft.fit_transform(trial_input_df, **config)
# trial_ft.fit(trial_input_df, **config)
# handling validation data
validation_data = None
if is_val_valid:
global_validation_df = ray.get(validation_data_id)
trial_validation_df = deepcopy(global_validation_df)
validation_data = trial_ft.transform(trial_validation_df)
else:
train_data = ray.get(input_data_id)
x_train, y_train = (train_data["x"], train_data["y"])
validation_data = None
if is_val_valid:
validation_data = ray.get(validation_data_id)
validation_data = (validation_data["x"], validation_data["y"])
trial_ft = None
# no need to call build since it is called the first time fit_eval is called.
# callbacks = [TuneCallback(tune_reporter)]
# fit model
best_reward_m = None
# print("config:", config)
for i in range(1, 101):
result = trial_model.fit_eval(x_train,
y_train,
validation_data=validation_data,
mc=mc,
metric=metric,
# verbose=1,
**config)
reward_m = result if Evaluator.get_metric_mode(metric) == "max" else -result
ckpt_name = "best.ckpt"
if best_reward_m is None or reward_m > best_reward_m:
best_reward_m = reward_m
save_zip(ckpt_name, trial_ft, trial_model, config)
if remote_dir is not None:
upload_ppl_hdfs(remote_dir, ckpt_name)
tune.track.log(training_iteration=i,
reward_metric=reward_m,
checkpoint="best.ckpt")
return train_func
@staticmethod
def _prepare_trainable_class(input_df,
feature_transformers,
future_seq_len,
metric,
validation_df=None,
mc=False,
remote_dir=None
):
"""
Prepare the train function for ray tune
:param input_df: input dataframe
:param feature_transformers: feature transformers
:param model: model or model selector
:param validation_df: validation dataframe
:param metric: the rewarding metric
:return: the train function
"""
input_df_id = ray.put(input_df)
ft_id = ray.put(feature_transformers)
# model_id = ray.put(model)
df_not_empty = isinstance(validation_df, pd.DataFrame) and not validation_df.empty
df_list_not_empty = isinstance(validation_df, list) and validation_df \
and not all([d.empty for d in validation_df])
if validation_df is not None and (df_not_empty or df_list_not_empty):
validation_df_id = ray.put(validation_df)
is_val_df_valid = True
else:
is_val_df_valid = False
class TrainableClass(Trainable):
def _setup(self, config):
# print("config in set up is", config)
global_ft = ray.get(ft_id)
# global_model = ray.get(model_id)
self.trial_ft = deepcopy(global_ft)
self.trial_model = TimeSequenceModel(check_optional_config=False,
future_seq_len=future_seq_len)
# handling input
global_input_df = ray.get(input_df_id)
trial_input_df = deepcopy(global_input_df)
self.config = convert_bayes_configs(config).copy()
(self.x_train, self.y_train) = self.trial_ft.fit_transform(trial_input_df,
**self.config)
# trial_ft.fit(trial_input_df, **config)
# handling validation data
self.validation_data = None
if is_val_df_valid:
global_validation_df = ray.get(validation_df_id)
trial_validation_df = deepcopy(global_validation_df)
self.validation_data = self.trial_ft.transform(trial_validation_df)
# no need to call build since it is called the first time fit_eval is called.
# callbacks = [TuneCallback(tune_reporter)]
# fit model
self.best_reward_m = -999
self.reward_m = -999
self.ckpt_name = "pipeline.ckpt"
self.metric_op = 1 if metric_mode is "max" else -1
def _train(self):
# print("self.config in train is ", self.config)
result = self.trial_model.fit_eval(self.x_train, self.y_train,
validation_data=self.validation_data,
# verbose=1,
**self.config)
self.reward_m = result if Evaluator.get_metric_mode(metric) == "max" else -result
# if metric == "mean_squared_error":
# self.reward_m = (-1) * result
# # print("running iteration: ",i)
# elif metric == "r_square":
# self.reward_m = result
# else:
# raise ValueError("metric can only be \"mean_squared_error\" or \"r_square\"")
return {"reward_metric": self.reward_m, "checkpoint": self.ckpt_name}
def _save(self, checkpoint_dir):
# print("checkpoint dir is ", checkpoint_dir)
ckpt_name = self.ckpt_name
# save in the working dir (without "checkpoint_{}".format(training_iteration))
path = os.path.join(checkpoint_dir, "..", ckpt_name)
# path = os.path.join(checkpoint_dir, ckpt_name)
# print("checkpoint save path is ", checkpoint_dir)
if self.reward_m > self.best_reward_m:
self.best_reward_m = self.reward_m
print("****this reward is", self.reward_m)
print("*********saving checkpoint")
save_zip(ckpt_name, self.trial_ft, self.trial_model, self.config)
if remote_dir is not None:
upload_ppl_hdfs(remote_dir, ckpt_name)
return path
def _restore(self, checkpoint_path):
# print("checkpoint path in restore is ", checkpoint_path)
if remote_dir is not None:
restore_hdfs(checkpoint_path, remote_dir, self.trial_ft, self.trial_model)
else:
restore_zip(checkpoint_path, self.trial_ft, self.trial_model)
return TrainableClass
def _prepare_tune_config(self, space):
tune_config = {}
for k, v in space.items():
if isinstance(v, RandomSample):
tune_config[k] = tune.sample_from(v.func)
elif isinstance(v, GridSearch):
tune_config[k] = tune.grid_search(v.values)
else:
tune_config[k] = v
return tune_config
def _log_adapt(self, analysis):
# config
config = analysis.get_all_configs()
# metric
metric_raw = analysis.fetch_trial_dataframes()
metric = {}
for key, value in metric_raw.items():
metric[key] = dict(zip(list(value.columns), list(map(list, value.values.T))))
config[key]["address"] = key
return config, metric
| 11,125 | 11 | 441 |
48c5f6e971f56e8aa3465449742bc2aac20738cb | 2,409 | py | Python | mmseg/datasets/waymo_depth_datset.py | anhvth/Swin-Transformer-Semantic-Segmentation | 31f69fe98de90cbf51bc9910f6fb777b0561bd4e | [
"Apache-2.0"
] | null | null | null | mmseg/datasets/waymo_depth_datset.py | anhvth/Swin-Transformer-Semantic-Segmentation | 31f69fe98de90cbf51bc9910f6fb777b0561bd4e | [
"Apache-2.0"
] | null | null | null | mmseg/datasets/waymo_depth_datset.py | anhvth/Swin-Transformer-Semantic-Segmentation | 31f69fe98de90cbf51bc9910f6fb777b0561bd4e | [
"Apache-2.0"
] | null | null | null | from .custom import *
@DATASETS.register_module(force=True) | 43.017857 | 81 | 0.579909 | from .custom import *
@DATASETS.register_module(force=True)
class WaymoDepthDataset(CustomDataset):
def __init__(self, *args, depth_dir=None, depth_map_suffix='.npy',**kwargs):
self.depth_dir = depth_dir
self.depth_map_suffix = depth_map_suffix
super(WaymoDepthDataset, self).__init__(*args,**kwargs)
def pre_pipeline(self, results):
super().pre_pipeline(results)
results['depth_prefix'] = self.depth_dir
results['flip'] = False
results['flip_direction'] = None
def load_annotations(self, img_dir, img_suffix, ann_dir, seg_map_suffix,
split):
"""Load annotation from directory.
Args:
img_dir (str): Path to image directory
img_suffix (str): Suffix of images.
ann_dir (str|None): Path to annotation directory.
seg_map_suffix (str|None): Suffix of segmentation maps.
split (str|None): Split txt file. If split is specified, only file
with suffix in the splits will be loaded. Otherwise, all images
in img_dir/ann_dir will be loaded. Default: None
Returns:
list[dict]: All image info of dataset.
"""
depth_dir = self.depth_dir
depth_map_suffix = self.depth_map_suffix
img_infos = []
if split is not None:
with open(split) as f:
for line in f:
img_name = line.strip()
img_info = dict(filename=img_name + img_suffix)
if ann_dir is not None:
seg_map = img_name + seg_map_suffix
img_info['ann'] = dict(seg_map=seg_map)
img_infos.append(img_info)
else:
for img in mmcv.scandir(img_dir, img_suffix, recursive=True):
img_info = dict(filename=img)
if ann_dir is not None:
seg_map = img.replace(img_suffix, seg_map_suffix)
img_info['ann'] = dict(seg_map=seg_map)
if depth_dir is not None:
depth_map = img.replace(img_suffix, depth_map_suffix)
img_info['ann'] = dict(depth_map=depth_map)
img_infos.append(img_info)
print_log(f'Loaded {len(img_infos)} images', logger=get_root_logger())
return img_infos | 375 | 1,952 | 22 |
996a8a1128b5be33b3b9ae21ba65e8dc91142069 | 1,167 | py | Python | index.py | samuel14luis/Proyecto-en-Python-criptograf-a-II | 2c1812299f46aa7fb8f289d78ca9004fe334b618 | [
"MIT"
] | null | null | null | index.py | samuel14luis/Proyecto-en-Python-criptograf-a-II | 2c1812299f46aa7fb8f289d78ca9004fe334b618 | [
"MIT"
] | null | null | null | index.py | samuel14luis/Proyecto-en-Python-criptograf-a-II | 2c1812299f46aa7fb8f289d78ca9004fe334b618 | [
"MIT"
] | null | null | null | import os
from utilidades.consola import *
from CriptografiaModerna.menuCM import menuCM
from CriptografiaClasica.menuCC import menuCC
#DEFINICIÓN DE VARIABLES
#DEFINICIÓN DE FUNCIONES
limpiarPantalla()
iniciarMenu()
despedida()
input('')
limpiarPantalla() | 22.442308 | 81 | 0.521851 | import os
from utilidades.consola import *
from CriptografiaModerna.menuCM import menuCM
from CriptografiaClasica.menuCC import menuCC
#DEFINICIÓN DE VARIABLES
#DEFINICIÓN DE FUNCIONES
def iniciarMenu():
cont = 0
eleccion = 'seguir'
output_config = 5
menu = [
'[1] Criptografía Clásica',
'[2] Criptografía Moderna',
'[0] Salir'
]
print ('Bienvenido')
while eleccion == 'seguir':
mostrarMenu(menu)
op = input('Elija una opción> ')
limpiarPantalla()
if op == '1':
aux = menuCC(output_config)
eleccion = aux[0]
output_config = aux[1]
cont = 0
elif op == '2':
aux = menuCM(output_config)
eleccion = aux[0]
output_config = aux[1]
cont = 0
elif op == '0':
eleccion = salir()
else:
cont += 1
mostrarError('Debe ingresar una opción válida. {' + str(cont) + '}')
eleccion = 'seguir'
limpiarPantalla()
iniciarMenu()
despedida()
input('')
limpiarPantalla() | 874 | 0 | 23 |
3e8649d6ffb3892810d5e3b3d26d82ac9734c0ff | 3,284 | py | Python | index_cli/main.py | lishnih/index_cli | 57f23d5df5168bcc73e23e0eeabbb8317014585b | [
"MIT"
] | null | null | null | index_cli/main.py | lishnih/index_cli | 57f23d5df5168bcc73e23e0eeabbb8317014585b | [
"MIT"
] | null | null | null | index_cli/main.py | lishnih/index_cli | 57f23d5df5168bcc73e23e0eeabbb8317014585b | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# coding=utf-8
# Stan 2012-03-12
from __future__ import (division, absolute_import,
print_function, unicode_literals)
import sys
import os
import logging
from importlib import import_module
from .core.types23 import *
from .core.db import getDbUri, openDbUri
from .core.recorder import Recorder
from . import __version__ as index_version
from .base import proceed
from .base.parse import parse_files
from .base.models import Base, Error
| 30.981132 | 80 | 0.678136 | #!/usr/bin/env python
# coding=utf-8
# Stan 2012-03-12
from __future__ import (division, absolute_import,
print_function, unicode_literals)
import sys
import os
import logging
from importlib import import_module
from .core.types23 import *
from .core.db import getDbUri, openDbUri
from .core.recorder import Recorder
from . import __version__ as index_version
from .base import proceed
from .base.parse import parse_files
from .base.models import Base, Error
def main(files=None, profile=None, options={}, recorder=None, **kargs):
files = files or options.get('files')
profile = profile or options.get('profile', 'default')
parser = options.get('parser') or \
'.file_parsers.{0}'.format(profile) if profile else None
# Инициализируем recorder и устанавливаем уровень debug_level
recorder = recorder or Recorder()
recorder.logging_class = Error
recorder.set_debug_level(options.get('debug_level', 1))
# Устанавливаем уровень logging и выводим параметры
logging_level = options.get('logging_level', 'WARNING')
logging.basicConfig(level=logging.getLevelName(logging_level))
logging.debug((index_version, logging_level, logging.root.level, \
recorder.get_debug_level()))
# Определяем dburi
dburi = getDbUri(options)
if not dburi:
recorder.warning("Database not specified!")
return -11
# Устанавливаем соединение с БД
engine, session = openDbUri(dburi)
Base.metadata.create_all(session.bind)
# Определяем сессию и таблицу для сообщений системы
recorder.session = session
if not files:
recorder.warning("Files not specified!")
return -1
# Сканирование файлов
# (создание записей директорий, файлов и элементов файлов)
files = os.path.expanduser(files)
try:
source_type = proceed(files, options, recorder)
except Exception as e:
recorder.exception("Exception during scanning!", target=str(files))
raise
if not parser:
recorder.debug("Parser not specified, exiting!")
return -2
# Загружаем парсер
package = options.get('package', __package__)
recorder.debug(('Loading of parser', package, parser))
try:
mod = import_module(parser, package)
except Exception as e:
recorder.exception("Unable to load the parser, exiting!",
target="{0}{1}".format(package, parser))
raise
entry = options.get('entry')
opening = options.get('opening')
closing = options.get('closing')
recorder.debug("Parser loaded with entry '{0}' and auxilaries '{1}'/'{2}'".\
format(entry, opening, closing),
target="{0}.{1}".format(package, parser))
recorder.func = getattr(mod, entry) if entry else None
opening_func = getattr(mod, opening) if opening else None
closing_func = getattr(mod, closing) if closing else None
er = 0
try:
if opening_func:
opening_func(options, recorder)
if recorder.func:
er = parse_files(files, source_type, options, recorder)
if closing_func:
closing_func(options, recorder)
except Exception as e:
recorder.exception("Exception during parsing!", target=str(files))
return er
| 3,004 | 0 | 23 |
a786d47f5e861416affdf90f0ab36292e84ae8ee | 4,447 | py | Python | mri_works/NodeEditor/modules/File_IO/Save_image_nii_gz.py | montigno/mri_works | 8ec6ff1500aa34d3540e44e4b0148023cf821f61 | [
"CECILL-B"
] | 2 | 2020-08-20T21:00:53.000Z | 2021-08-16T15:28:51.000Z | mri_works/NodeEditor/modules/File_IO/Save_image_nii_gz.py | montigno/mri_works | 8ec6ff1500aa34d3540e44e4b0148023cf821f61 | [
"CECILL-B"
] | 3 | 2020-09-24T06:50:43.000Z | 2020-12-15T11:02:04.000Z | mri_works/NodeEditor/modules/File_IO/Save_image_nii_gz.py | montigno/mri_works | 8ec6ff1500aa34d3540e44e4b0148023cf821f61 | [
"CECILL-B"
] | 1 | 2020-08-20T21:00:59.000Z | 2020-08-20T21:00:59.000Z |
##############################################################################
##############################################################################
##############################################################################
##############################################################################
| 35.015748 | 115 | 0.506634 | class Save_NiiGz:
def __init__(self, image=[[0.0]], filepath='path', **options):
import nibabel as nib
import numpy as np
import tkinter as tk
from tkinter import filedialog
root = tk.Tk()
root.withdraw()
if type(image).__name__ == 'list':
image = np.array(image)
if filepath == 'path':
self.fileSaved = filedialog.asksaveasfile(**options).name
else:
if '.nii.gz' in filepath:
self.fileSaved = filepath
elif '.nii' in filepath:
self.fileSaved = filepath + '.gz'
else:
self.fileSaved = filepath + '.nii.gz'
img = nib.Nifti1Image(image, np.eye(4))
nib.save(img, self.fileSaved)
def pathFile(self: 'path'):
return self.fileSaved
##############################################################################
class Save_NiiGz_header:
def __init__(self,
image=[[0.0]], filepath='path', new_header='', **options):
import nibabel as nib
import tkinter as tk
from tkinter import filedialog
root = tk.Tk()
root.withdraw()
if type(image).__name__ == 'list':
image = np.array(image)
if filepath == 'path':
self.fileSaved = filedialog.asksaveasfile(**options).name
else:
if filepath.endswith('.nii.gz'):
self.fileSaved = filepath
elif filepath.endswith('.nii'):
self.fileSaved = filepath + '.gz'
else:
self.fileSaved = filepath + '.nii.gz'
img = nib.Nifti1Image(image, None, header=new_header)
nib.save(img, self.fileSaved)
def pathFile(self: 'path'):
return self.fileSaved
##############################################################################
class Save_Nifti:
def __init__(self, image=[[0.0]], filepath='path', out_type="enumerate(('nii','nii.gz'))", **options):
import nibabel as nib
import numpy as np
import tkinter as tk
from tkinter import filedialog
root = tk.Tk()
root.withdraw()
if type(image).__name__ == 'list':
image = np.array(image)
if filepath == 'path':
self.fileSaved = filedialog.asksaveasfile(**options).name
else:
if filepath.endswith('.nii') or filepath.endswith('.nii.gz'):
self.fileSaved = filepath[0:filepath.index('.nii')] + '.' + out_type
else:
self.fileSaved = filepath + '.' + out_type
img = nib.Nifti1Image(image, np.eye(4))
nib.save(img, self.fileSaved)
def pathFile(self: 'path'):
return self.fileSaved
##############################################################################
class Save_Nifti_header:
def __init__(self,
image=[[0.0]], filepath='path', new_header='', out_type="enumerate(('nii','nii.gz'))", **options):
import nibabel as nib
import tkinter as tk
from tkinter import filedialog
root = tk.Tk()
root.withdraw()
if type(image).__name__ == 'list':
image = np.array(image)
if filepath == 'path':
self.fileSaved = filedialog.asksaveasfile(**options).name
else:
if filepath.endswith('.nii') or filepath.endswith('.nii.gz'):
self.fileSaved = filepath[0:filepath.index('.nii')] + '.' + out_type
else:
self.fileSaved = filepath + '.' + out_type
img = nib.Nifti1Image(image, None, header=new_header)
nib.save(img, self.fileSaved)
def pathFile(self: 'path'):
return self.fileSaved
##############################################################################
class Create_gzip:
def __init__(self, file_in='path', delete_file=False):
import gzip
import shutil
import os
self.out_gz = ''
with open(file_in, 'rb') as f_in:
with gzip.open(file_in+'.gz', 'wb') as f_out:
shutil.copyfileobj(f_in, f_out)
self.out_gz = file_in+'.gz'
if delete_file:
try:
os.remove(file_in)
except OSError as e:
print("Error : %s : %s" % (input_file, e.strerror))
def out_file(self:'path'):
return self.out_gz
| 3,721 | -5 | 383 |
bc5269769868f479d59e7a9477cb82de75d75760 | 19,744 | py | Python | dataloader.py | gregersn/Super-SloMo | 11f3b562ca44c068b847da33e09e4087adc1c032 | [
"MIT"
] | null | null | null | dataloader.py | gregersn/Super-SloMo | 11f3b562ca44c068b847da33e09e4087adc1c032 | [
"MIT"
] | null | null | null | dataloader.py | gregersn/Super-SloMo | 11f3b562ca44c068b847da33e09e4087adc1c032 | [
"MIT"
] | null | null | null | import torch.utils.data as data
from PIL import Image
import os
import os.path
import random
def _make_dataset(dir):
"""
Creates a 2D list of all the frames in N clips containing
M frames each.
2D List Structure:
[[frame00, frame01,...frameM] <-- clip0
[frame00, frame01,...frameM] <-- clip0
:
[frame00, frame01,...frameM]] <-- clipN
Parameters
----------
dir : string
root directory containing clips.
Returns
-------
list
2D list described above.
"""
framesPath = []
# Find and loop over all the clips in root `dir`.
for index, folder in enumerate(os.listdir(dir)):
clipsFolderPath = os.path.join(dir, folder)
# Skip items which are not folders.
if not (os.path.isdir(clipsFolderPath)):
continue
framesPath.append([])
# Find and loop over all the frames inside the clip.
for image in sorted(os.listdir(clipsFolderPath)):
# Add path to list.
framesPath[index].append(os.path.join(clipsFolderPath, image))
return framesPath
def _make_video_dataset(dir):
"""
Creates a 1D list of all the frames.
1D List Structure:
[frame0, frame1,...frameN]
Parameters
----------
dir : string
root directory containing frames.
Returns
-------
list
1D list described above.
"""
framesPath = []
# Find and loop over all the frames in root `dir`.
for image in sorted(os.listdir(dir)):
# Add path to list.
framesPath.append(os.path.join(dir, image))
return framesPath
def _pil_loader(path, cropArea=None, resizeDim=None, frameFlip=0):
"""
Opens image at `path` using pil and applies data augmentation.
Parameters
----------
path : string
path of the image.
cropArea : tuple, optional
coordinates for cropping image. Default: None
resizeDim : tuple, optional
dimensions for resizing image. Default: None
frameFlip : int, optional
Non zero to flip image horizontally. Default: 0
Returns
-------
list
2D list described above.
"""
# open path as file to avoid ResourceWarning (https://github.com/python-pillow/Pillow/issues/835)
with open(path, 'rb') as f:
img = Image.open(f)
# Resize image if specified.
resized_img = img.resize(resizeDim, Image.ANTIALIAS) if (resizeDim != None) else img
# Crop image if crop area specified.
cropped_img = img.crop(cropArea) if (cropArea != None) else resized_img
# Flip image horizontally if specified.
flipped_img = cropped_img.transpose(Image.FLIP_LEFT_RIGHT) if frameFlip else cropped_img
return flipped_img.convert('RGB')
class SuperSloMo(data.Dataset):
"""
A dataloader for loading N samples arranged in this way:
|-- clip0
|-- frame00
|-- frame01
:
|-- frame11
|-- frame12
|-- clip1
|-- frame00
|-- frame01
:
|-- frame11
|-- frame12
:
:
|-- clipN
|-- frame00
|-- frame01
:
|-- frame11
|-- frame12
...
Attributes
----------
framesPath : list
List of frames' path in the dataset.
Methods
-------
__getitem__(index)
Returns the sample corresponding to `index` from dataset.
__len__()
Returns the size of dataset. Invoked as len(datasetObj).
__repr__()
Returns printable representation of the dataset object.
"""
def __init__(self, root, transform=None, dim=(640, 360), randomCropSize=(352, 352), train=True):
"""
Parameters
----------
root : string
Root directory path.
transform : callable, optional
A function/transform that takes in
a sample and returns a transformed version.
E.g, ``transforms.RandomCrop`` for images.
dim : tuple, optional
Dimensions of images in dataset. Default: (640, 360)
randomCropSize : tuple, optional
Dimensions of random crop to be applied. Default: (352, 352)
train : boolean, optional
Specifies if the dataset is for training or testing/validation.
`True` returns samples with data augmentation like random
flipping, random cropping, etc. while `False` returns the
samples without randomization. Default: True
"""
# Populate the list with image paths for all the
# frame in `root`.
framesPath = _make_dataset(root)
# Raise error if no images found in root.
if len(framesPath) == 0:
raise(RuntimeError("Found 0 files in subfolders of: " + root + "\n"))
self.randomCropSize = randomCropSize
self.cropX0 = dim[0] - randomCropSize[0]
self.cropY0 = dim[1] - randomCropSize[1]
self.root = root
self.transform = transform
self.train = train
self.framesPath = framesPath
def __getitem__(self, index):
"""
Returns the sample corresponding to `index` from dataset.
The sample consists of two reference frames - I0 and I1 -
and a random frame chosen from the 7 intermediate frames
available between I0 and I1 along with it's relative index.
Parameters
----------
index : int
Index
Returns
-------
tuple
(sample, returnIndex) where sample is
[I0, intermediate_frame, I1] and returnIndex is
the position of `random_intermediate_frame`.
e.g.- `returnIndex` of frame next to I0 would be 0 and
frame before I1 would be 6.
"""
sample = []
if (self.train):
### Data Augmentation ###
# To select random 9 frames from 12 frames in a clip
firstFrame = random.randint(0, 3)
# Apply random crop on the 9 input frames
cropX = random.randint(0, self.cropX0)
cropY = random.randint(0, self.cropY0)
cropArea = (cropX, cropY, cropX + self.randomCropSize[0], cropY + self.randomCropSize[1])
# Random reverse frame
#frameRange = range(firstFrame, firstFrame + 9) if (random.randint(0, 1)) else range(firstFrame + 8, firstFrame - 1, -1)
IFrameIndex = random.randint(firstFrame + 1, firstFrame + 7)
if (random.randint(0, 1)):
frameRange = [firstFrame, IFrameIndex, firstFrame + 8]
returnIndex = IFrameIndex - firstFrame - 1
else:
frameRange = [firstFrame + 8, IFrameIndex, firstFrame]
returnIndex = firstFrame - IFrameIndex + 7
# Random flip frame
randomFrameFlip = random.randint(0, 1)
else:
# Fixed settings to return same samples every epoch.
# For validation/test sets.
firstFrame = 0
cropArea = (0, 0, self.randomCropSize[0], self.randomCropSize[1])
IFrameIndex = ((index) % 7 + 1)
returnIndex = IFrameIndex - 1
frameRange = [0, IFrameIndex, 8]
randomFrameFlip = 0
# Loop over for all frames corresponding to the `index`.
for frameIndex in frameRange:
# Open image using pil and augment the image.
image = _pil_loader(self.framesPath[index][frameIndex], cropArea=cropArea, frameFlip=randomFrameFlip)
# Apply transformation if specified.
if self.transform is not None:
image = self.transform(image)
sample.append(image)
return sample, returnIndex
def __len__(self):
"""
Returns the size of dataset. Invoked as len(datasetObj).
Returns
-------
int
number of samples.
"""
return len(self.framesPath)
def __repr__(self):
"""
Returns printable representation of the dataset object.
Returns
-------
string
info.
"""
fmt_str = 'Dataset ' + self.__class__.__name__ + '\n'
fmt_str += ' Number of datapoints: {}\n'.format(self.__len__())
fmt_str += ' Root Location: {}\n'.format(self.root)
tmp = ' Transforms (if any): '
fmt_str += '{0}{1}\n'.format(tmp, self.transform.__repr__().replace('\n', '\n' + ' ' * len(tmp)))
return fmt_str
class UCI101Test(data.Dataset):
"""
A dataloader for loading N samples arranged in this way:
|-- clip0
|-- frame00
|-- frame01
|-- frame02
|-- clip1
|-- frame00
|-- frame01
|-- frame02
:
:
|-- clipN
|-- frame00
|-- frame01
|-- frame02
...
Attributes
----------
framesPath : list
List of frames' path in the dataset.
Methods
-------
__getitem__(index)
Returns the sample corresponding to `index` from dataset.
__len__()
Returns the size of dataset. Invoked as len(datasetObj).
__repr__()
Returns printable representation of the dataset object.
"""
def __init__(self, root, transform=None):
"""
Parameters
----------
root : string
Root directory path.
transform : callable, optional
A function/transform that takes in
a sample and returns a transformed version.
E.g, ``transforms.RandomCrop`` for images.
"""
# Populate the list with image paths for all the
# frame in `root`.
framesPath = _make_dataset(root)
# Raise error if no images found in root.
if len(framesPath) == 0:
raise(RuntimeError("Found 0 files in subfolders of: " + root + "\n"))
self.root = root
self.framesPath = framesPath
self.transform = transform
def __getitem__(self, index):
"""
Returns the sample corresponding to `index` from dataset.
The sample consists of two reference frames - I0 and I1 -
and a intermediate frame between I0 and I1.
Parameters
----------
index : int
Index
Returns
-------
tuple
(sample, returnIndex) where sample is
[I0, intermediate_frame, I1] and returnIndex is
the position of `intermediate_frame`.
The returnIndex is always 3 and is being returned
to maintain compatibility with the `SuperSloMo`
dataloader where 3 corresponds to the middle frame.
"""
sample = []
# Loop over for all frames corresponding to the `index`.
for framePath in self.framesPath[index]:
# Open image using pil.
image = _pil_loader(framePath)
# Apply transformation if specified.
if self.transform is not None:
image = self.transform(image)
sample.append(image)
return sample, 3
def __len__(self):
"""
Returns the size of dataset. Invoked as len(datasetObj).
Returns
-------
int
number of samples.
"""
return len(self.framesPath)
def __repr__(self):
"""
Returns printable representation of the dataset object.
Returns
-------
string
info.
"""
fmt_str = 'Dataset ' + self.__class__.__name__ + '\n'
fmt_str += ' Number of datapoints: {}\n'.format(self.__len__())
fmt_str += ' Root Location: {}\n'.format(self.root)
tmp = ' Transforms (if any): '
fmt_str += '{0}{1}\n'.format(tmp, self.transform.__repr__().replace('\n', '\n' + ' ' * len(tmp)))
return fmt_str
class Video(data.Dataset):
"""
A dataloader for loading all video frames in a folder:
|-- frame0
|-- frame1
:
:
|-- frameN
...
Attributes
----------
framesPath : list
List of frames' path in the dataset.
origDim : tuple
original dimensions of the video.
dim : tuple
resized dimensions of the video (for CNN).
Methods
-------
__getitem__(index)
Returns the sample corresponding to `index` from dataset.
__len__()
Returns the size of dataset. Invoked as len(datasetObj).
__repr__()
Returns printable representation of the dataset object.
"""
def __init__(self, root, transform=None):
"""
Parameters
----------
root : string
Root directory path.
transform : callable, optional
A function/transform that takes in
a sample and returns a transformed version.
E.g, ``transforms.RandomCrop`` for images.
"""
# Populate the list with image paths for all the
# frame in `root`.
framesPath = _make_video_dataset(root)
# Get dimensions of frames
frame = _pil_loader(framesPath[0])
self.origDim = frame.size
self.dim = int(self.origDim[0] / 32) * 32, int(self.origDim[1] / 32) * 32
# Raise error if no images found in root.
if len(framesPath) == 0:
raise(RuntimeError("Found 0 files in: " + root + "\n"))
self.root = root
self.framesPath = framesPath
self.transform = transform
def __getitem__(self, index):
"""
Returns the sample corresponding to `index` from dataset.
The sample consists of two reference frames - I0 and I1.
Parameters
----------
index : int
Index
Returns
-------
list
sample is [I0, I1] where I0 is the frame with index
`index` and I1 is the next frame.
"""
sample = []
# Loop over for all frames corresponding to the `index`.
for framePath in [self.framesPath[index], self.framesPath[index + 1]]:
# Open image using pil.
image = _pil_loader(framePath, resizeDim=self.dim)
# Apply transformation if specified.
if self.transform is not None:
image = self.transform(image)
sample.append(image)
return sample
def __len__(self):
"""
Returns the size of dataset. Invoked as len(datasetObj).
Returns
-------
int
number of samples.
"""
# Using `-1` so that dataloader accesses only upto
# frames [N-1, N] and not [N, N+1] which because frame
# N+1 doesn't exist.
return len(self.framesPath) - 1
def __repr__(self):
"""
Returns printable representation of the dataset object.
Returns
-------
string
info.
"""
fmt_str = 'Dataset ' + self.__class__.__name__ + '\n'
fmt_str += ' Number of datapoints: {}\n'.format(self.__len__())
fmt_str += ' Root Location: {}\n'.format(self.root)
tmp = ' Transforms (if any): '
fmt_str += '{0}{1}\n'.format(tmp, self.transform.__repr__().replace('\n', '\n' + ' ' * len(tmp)))
return fmt_str
class Images(data.Dataset):
"""
A dataloader for loading all video frames in a folder:
|-- frame0
|-- frame1
:
:
|-- frameN
...
Attributes
----------
framesPath : list
List of frames' path in the dataset.
origDim : tuple
original dimensions of the video.
dim : tuple
resized dimensions of the video (for CNN).
Methods
-------
__getitem__(index)
Returns the sample corresponding to `index` from dataset.
__len__()
Returns the size of dataset. Invoked as len(datasetObj).
__repr__()
Returns printable representation of the dataset object.
"""
def __init__(self, frame0, frame1, transform=None):
"""
Parameters
----------
frame0 : string
Input image 1
frame1: string
Input image 2
transform : callable, optional
A function/transform that takes in
a sample and returns a transformed version.
E.g, ``transforms.RandomCrop`` for images.
"""
# Populate the list with image paths for all the
# frame in `root`.
framesPath = [frame0, frame1]
# Get dimensions of frames
frame = _pil_loader(frame0)
self.origDim = frame.size
self.dim = int(self.origDim[0] / 32) * 32, int(self.origDim[1] / 32) * 32
# Raise error if no images found in root.
if len(framesPath) == 0:
raise(RuntimeError("Found 0 files in: " + root + "\n"))
self.framesPath = framesPath
self.transform = transform
def __getitem__(self, index):
"""
Returns the sample corresponding to `index` from dataset.
The sample consists of two reference frames - I0 and I1.
Parameters
----------
index : int
Index
Returns
-------
list
sample is [I0, I1] where I0 is the frame with index
`index` and I1 is the next frame.
"""
sample = []
# Loop over for all frames corresponding to the `index`.
for framePath in [self.framesPath[index], self.framesPath[index + 1]]:
# Open image using pil.
image = _pil_loader(framePath, resizeDim=self.dim)
# Apply transformation if specified.
if self.transform is not None:
image = self.transform(image)
sample.append(image)
return sample
def __len__(self):
"""
Returns the size of dataset. Invoked as len(datasetObj).
Returns
-------
int
number of samples.
"""
# Using `-1` so that dataloader accesses only upto
# frames [N-1, N] and not [N, N+1] which because frame
# N+1 doesn't exist.
return len(self.framesPath) - 1
def __repr__(self):
"""
Returns printable representation of the dataset object.
Returns
-------
string
info.
"""
fmt_str = 'Dataset ' + self.__class__.__name__ + '\n'
fmt_str += ' Number of datapoints: {}\n'.format(self.__len__())
fmt_str += ' Root Location: {}\n'.format(self.root)
tmp = ' Transforms (if any): '
fmt_str += '{0}{1}\n'.format(tmp, self.transform.__repr__().replace('\n', '\n' + ' ' * len(tmp)))
return fmt_str | 29.293769 | 132 | 0.538341 | import torch.utils.data as data
from PIL import Image
import os
import os.path
import random
def _make_dataset(dir):
"""
Creates a 2D list of all the frames in N clips containing
M frames each.
2D List Structure:
[[frame00, frame01,...frameM] <-- clip0
[frame00, frame01,...frameM] <-- clip0
:
[frame00, frame01,...frameM]] <-- clipN
Parameters
----------
dir : string
root directory containing clips.
Returns
-------
list
2D list described above.
"""
framesPath = []
# Find and loop over all the clips in root `dir`.
for index, folder in enumerate(os.listdir(dir)):
clipsFolderPath = os.path.join(dir, folder)
# Skip items which are not folders.
if not (os.path.isdir(clipsFolderPath)):
continue
framesPath.append([])
# Find and loop over all the frames inside the clip.
for image in sorted(os.listdir(clipsFolderPath)):
# Add path to list.
framesPath[index].append(os.path.join(clipsFolderPath, image))
return framesPath
def _make_video_dataset(dir):
"""
Creates a 1D list of all the frames.
1D List Structure:
[frame0, frame1,...frameN]
Parameters
----------
dir : string
root directory containing frames.
Returns
-------
list
1D list described above.
"""
framesPath = []
# Find and loop over all the frames in root `dir`.
for image in sorted(os.listdir(dir)):
# Add path to list.
framesPath.append(os.path.join(dir, image))
return framesPath
def _pil_loader(path, cropArea=None, resizeDim=None, frameFlip=0):
"""
Opens image at `path` using pil and applies data augmentation.
Parameters
----------
path : string
path of the image.
cropArea : tuple, optional
coordinates for cropping image. Default: None
resizeDim : tuple, optional
dimensions for resizing image. Default: None
frameFlip : int, optional
Non zero to flip image horizontally. Default: 0
Returns
-------
list
2D list described above.
"""
# open path as file to avoid ResourceWarning (https://github.com/python-pillow/Pillow/issues/835)
with open(path, 'rb') as f:
img = Image.open(f)
# Resize image if specified.
resized_img = img.resize(resizeDim, Image.ANTIALIAS) if (resizeDim != None) else img
# Crop image if crop area specified.
cropped_img = img.crop(cropArea) if (cropArea != None) else resized_img
# Flip image horizontally if specified.
flipped_img = cropped_img.transpose(Image.FLIP_LEFT_RIGHT) if frameFlip else cropped_img
return flipped_img.convert('RGB')
class SuperSloMo(data.Dataset):
"""
A dataloader for loading N samples arranged in this way:
|-- clip0
|-- frame00
|-- frame01
:
|-- frame11
|-- frame12
|-- clip1
|-- frame00
|-- frame01
:
|-- frame11
|-- frame12
:
:
|-- clipN
|-- frame00
|-- frame01
:
|-- frame11
|-- frame12
...
Attributes
----------
framesPath : list
List of frames' path in the dataset.
Methods
-------
__getitem__(index)
Returns the sample corresponding to `index` from dataset.
__len__()
Returns the size of dataset. Invoked as len(datasetObj).
__repr__()
Returns printable representation of the dataset object.
"""
def __init__(self, root, transform=None, dim=(640, 360), randomCropSize=(352, 352), train=True):
"""
Parameters
----------
root : string
Root directory path.
transform : callable, optional
A function/transform that takes in
a sample and returns a transformed version.
E.g, ``transforms.RandomCrop`` for images.
dim : tuple, optional
Dimensions of images in dataset. Default: (640, 360)
randomCropSize : tuple, optional
Dimensions of random crop to be applied. Default: (352, 352)
train : boolean, optional
Specifies if the dataset is for training or testing/validation.
`True` returns samples with data augmentation like random
flipping, random cropping, etc. while `False` returns the
samples without randomization. Default: True
"""
# Populate the list with image paths for all the
# frame in `root`.
framesPath = _make_dataset(root)
# Raise error if no images found in root.
if len(framesPath) == 0:
raise(RuntimeError("Found 0 files in subfolders of: " + root + "\n"))
self.randomCropSize = randomCropSize
self.cropX0 = dim[0] - randomCropSize[0]
self.cropY0 = dim[1] - randomCropSize[1]
self.root = root
self.transform = transform
self.train = train
self.framesPath = framesPath
def __getitem__(self, index):
"""
Returns the sample corresponding to `index` from dataset.
The sample consists of two reference frames - I0 and I1 -
and a random frame chosen from the 7 intermediate frames
available between I0 and I1 along with it's relative index.
Parameters
----------
index : int
Index
Returns
-------
tuple
(sample, returnIndex) where sample is
[I0, intermediate_frame, I1] and returnIndex is
the position of `random_intermediate_frame`.
e.g.- `returnIndex` of frame next to I0 would be 0 and
frame before I1 would be 6.
"""
sample = []
if (self.train):
### Data Augmentation ###
# To select random 9 frames from 12 frames in a clip
firstFrame = random.randint(0, 3)
# Apply random crop on the 9 input frames
cropX = random.randint(0, self.cropX0)
cropY = random.randint(0, self.cropY0)
cropArea = (cropX, cropY, cropX + self.randomCropSize[0], cropY + self.randomCropSize[1])
# Random reverse frame
#frameRange = range(firstFrame, firstFrame + 9) if (random.randint(0, 1)) else range(firstFrame + 8, firstFrame - 1, -1)
IFrameIndex = random.randint(firstFrame + 1, firstFrame + 7)
if (random.randint(0, 1)):
frameRange = [firstFrame, IFrameIndex, firstFrame + 8]
returnIndex = IFrameIndex - firstFrame - 1
else:
frameRange = [firstFrame + 8, IFrameIndex, firstFrame]
returnIndex = firstFrame - IFrameIndex + 7
# Random flip frame
randomFrameFlip = random.randint(0, 1)
else:
# Fixed settings to return same samples every epoch.
# For validation/test sets.
firstFrame = 0
cropArea = (0, 0, self.randomCropSize[0], self.randomCropSize[1])
IFrameIndex = ((index) % 7 + 1)
returnIndex = IFrameIndex - 1
frameRange = [0, IFrameIndex, 8]
randomFrameFlip = 0
# Loop over for all frames corresponding to the `index`.
for frameIndex in frameRange:
# Open image using pil and augment the image.
image = _pil_loader(self.framesPath[index][frameIndex], cropArea=cropArea, frameFlip=randomFrameFlip)
# Apply transformation if specified.
if self.transform is not None:
image = self.transform(image)
sample.append(image)
return sample, returnIndex
def __len__(self):
"""
Returns the size of dataset. Invoked as len(datasetObj).
Returns
-------
int
number of samples.
"""
return len(self.framesPath)
def __repr__(self):
"""
Returns printable representation of the dataset object.
Returns
-------
string
info.
"""
fmt_str = 'Dataset ' + self.__class__.__name__ + '\n'
fmt_str += ' Number of datapoints: {}\n'.format(self.__len__())
fmt_str += ' Root Location: {}\n'.format(self.root)
tmp = ' Transforms (if any): '
fmt_str += '{0}{1}\n'.format(tmp, self.transform.__repr__().replace('\n', '\n' + ' ' * len(tmp)))
return fmt_str
class UCI101Test(data.Dataset):
"""
A dataloader for loading N samples arranged in this way:
|-- clip0
|-- frame00
|-- frame01
|-- frame02
|-- clip1
|-- frame00
|-- frame01
|-- frame02
:
:
|-- clipN
|-- frame00
|-- frame01
|-- frame02
...
Attributes
----------
framesPath : list
List of frames' path in the dataset.
Methods
-------
__getitem__(index)
Returns the sample corresponding to `index` from dataset.
__len__()
Returns the size of dataset. Invoked as len(datasetObj).
__repr__()
Returns printable representation of the dataset object.
"""
def __init__(self, root, transform=None):
"""
Parameters
----------
root : string
Root directory path.
transform : callable, optional
A function/transform that takes in
a sample and returns a transformed version.
E.g, ``transforms.RandomCrop`` for images.
"""
# Populate the list with image paths for all the
# frame in `root`.
framesPath = _make_dataset(root)
# Raise error if no images found in root.
if len(framesPath) == 0:
raise(RuntimeError("Found 0 files in subfolders of: " + root + "\n"))
self.root = root
self.framesPath = framesPath
self.transform = transform
def __getitem__(self, index):
"""
Returns the sample corresponding to `index` from dataset.
The sample consists of two reference frames - I0 and I1 -
and a intermediate frame between I0 and I1.
Parameters
----------
index : int
Index
Returns
-------
tuple
(sample, returnIndex) where sample is
[I0, intermediate_frame, I1] and returnIndex is
the position of `intermediate_frame`.
The returnIndex is always 3 and is being returned
to maintain compatibility with the `SuperSloMo`
dataloader where 3 corresponds to the middle frame.
"""
sample = []
# Loop over for all frames corresponding to the `index`.
for framePath in self.framesPath[index]:
# Open image using pil.
image = _pil_loader(framePath)
# Apply transformation if specified.
if self.transform is not None:
image = self.transform(image)
sample.append(image)
return sample, 3
def __len__(self):
"""
Returns the size of dataset. Invoked as len(datasetObj).
Returns
-------
int
number of samples.
"""
return len(self.framesPath)
def __repr__(self):
"""
Returns printable representation of the dataset object.
Returns
-------
string
info.
"""
fmt_str = 'Dataset ' + self.__class__.__name__ + '\n'
fmt_str += ' Number of datapoints: {}\n'.format(self.__len__())
fmt_str += ' Root Location: {}\n'.format(self.root)
tmp = ' Transforms (if any): '
fmt_str += '{0}{1}\n'.format(tmp, self.transform.__repr__().replace('\n', '\n' + ' ' * len(tmp)))
return fmt_str
class Video(data.Dataset):
"""
A dataloader for loading all video frames in a folder:
|-- frame0
|-- frame1
:
:
|-- frameN
...
Attributes
----------
framesPath : list
List of frames' path in the dataset.
origDim : tuple
original dimensions of the video.
dim : tuple
resized dimensions of the video (for CNN).
Methods
-------
__getitem__(index)
Returns the sample corresponding to `index` from dataset.
__len__()
Returns the size of dataset. Invoked as len(datasetObj).
__repr__()
Returns printable representation of the dataset object.
"""
def __init__(self, root, transform=None):
"""
Parameters
----------
root : string
Root directory path.
transform : callable, optional
A function/transform that takes in
a sample and returns a transformed version.
E.g, ``transforms.RandomCrop`` for images.
"""
# Populate the list with image paths for all the
# frame in `root`.
framesPath = _make_video_dataset(root)
# Get dimensions of frames
frame = _pil_loader(framesPath[0])
self.origDim = frame.size
self.dim = int(self.origDim[0] / 32) * 32, int(self.origDim[1] / 32) * 32
# Raise error if no images found in root.
if len(framesPath) == 0:
raise(RuntimeError("Found 0 files in: " + root + "\n"))
self.root = root
self.framesPath = framesPath
self.transform = transform
def __getitem__(self, index):
"""
Returns the sample corresponding to `index` from dataset.
The sample consists of two reference frames - I0 and I1.
Parameters
----------
index : int
Index
Returns
-------
list
sample is [I0, I1] where I0 is the frame with index
`index` and I1 is the next frame.
"""
sample = []
# Loop over for all frames corresponding to the `index`.
for framePath in [self.framesPath[index], self.framesPath[index + 1]]:
# Open image using pil.
image = _pil_loader(framePath, resizeDim=self.dim)
# Apply transformation if specified.
if self.transform is not None:
image = self.transform(image)
sample.append(image)
return sample
def __len__(self):
"""
Returns the size of dataset. Invoked as len(datasetObj).
Returns
-------
int
number of samples.
"""
# Using `-1` so that dataloader accesses only upto
# frames [N-1, N] and not [N, N+1] which because frame
# N+1 doesn't exist.
return len(self.framesPath) - 1
def __repr__(self):
"""
Returns printable representation of the dataset object.
Returns
-------
string
info.
"""
fmt_str = 'Dataset ' + self.__class__.__name__ + '\n'
fmt_str += ' Number of datapoints: {}\n'.format(self.__len__())
fmt_str += ' Root Location: {}\n'.format(self.root)
tmp = ' Transforms (if any): '
fmt_str += '{0}{1}\n'.format(tmp, self.transform.__repr__().replace('\n', '\n' + ' ' * len(tmp)))
return fmt_str
class Images(data.Dataset):
"""
A dataloader for loading all video frames in a folder:
|-- frame0
|-- frame1
:
:
|-- frameN
...
Attributes
----------
framesPath : list
List of frames' path in the dataset.
origDim : tuple
original dimensions of the video.
dim : tuple
resized dimensions of the video (for CNN).
Methods
-------
__getitem__(index)
Returns the sample corresponding to `index` from dataset.
__len__()
Returns the size of dataset. Invoked as len(datasetObj).
__repr__()
Returns printable representation of the dataset object.
"""
def __init__(self, frame0, frame1, transform=None):
"""
Parameters
----------
frame0 : string
Input image 1
frame1: string
Input image 2
transform : callable, optional
A function/transform that takes in
a sample and returns a transformed version.
E.g, ``transforms.RandomCrop`` for images.
"""
# Populate the list with image paths for all the
# frame in `root`.
framesPath = [frame0, frame1]
# Get dimensions of frames
frame = _pil_loader(frame0)
self.origDim = frame.size
self.dim = int(self.origDim[0] / 32) * 32, int(self.origDim[1] / 32) * 32
# Raise error if no images found in root.
if len(framesPath) == 0:
raise(RuntimeError("Found 0 files in: " + root + "\n"))
self.framesPath = framesPath
self.transform = transform
def __getitem__(self, index):
"""
Returns the sample corresponding to `index` from dataset.
The sample consists of two reference frames - I0 and I1.
Parameters
----------
index : int
Index
Returns
-------
list
sample is [I0, I1] where I0 is the frame with index
`index` and I1 is the next frame.
"""
sample = []
# Loop over for all frames corresponding to the `index`.
for framePath in [self.framesPath[index], self.framesPath[index + 1]]:
# Open image using pil.
image = _pil_loader(framePath, resizeDim=self.dim)
# Apply transformation if specified.
if self.transform is not None:
image = self.transform(image)
sample.append(image)
return sample
def get_single(self, index):
image = _pil_loader(self.framePath[index])
if self.transform is not None:
image = self.transform(image)
return image
def __len__(self):
"""
Returns the size of dataset. Invoked as len(datasetObj).
Returns
-------
int
number of samples.
"""
# Using `-1` so that dataloader accesses only upto
# frames [N-1, N] and not [N, N+1] which because frame
# N+1 doesn't exist.
return len(self.framesPath) - 1
def __repr__(self):
"""
Returns printable representation of the dataset object.
Returns
-------
string
info.
"""
fmt_str = 'Dataset ' + self.__class__.__name__ + '\n'
fmt_str += ' Number of datapoints: {}\n'.format(self.__len__())
fmt_str += ' Root Location: {}\n'.format(self.root)
tmp = ' Transforms (if any): '
fmt_str += '{0}{1}\n'.format(tmp, self.transform.__repr__().replace('\n', '\n' + ' ' * len(tmp)))
return fmt_str | 160 | 0 | 31 |
40f10ba9cba8e68dff7b7eb5342f4e0f3d77da91 | 215,751 | py | Python | boto3_type_annotations_with_docs/boto3_type_annotations/greengrass/client.py | cowboygneox/boto3_type_annotations | 450dce1de4e066b939de7eac2ec560ed1a7ddaa2 | [
"MIT"
] | 119 | 2018-12-01T18:20:57.000Z | 2022-02-02T10:31:29.000Z | boto3_type_annotations_with_docs/boto3_type_annotations/greengrass/client.py | cowboygneox/boto3_type_annotations | 450dce1de4e066b939de7eac2ec560ed1a7ddaa2 | [
"MIT"
] | 15 | 2018-11-16T00:16:44.000Z | 2021-11-13T03:44:18.000Z | boto3_type_annotations_with_docs/boto3_type_annotations/greengrass/client.py | cowboygneox/boto3_type_annotations | 450dce1de4e066b939de7eac2ec560ed1a7ddaa2 | [
"MIT"
] | 11 | 2019-05-06T05:26:51.000Z | 2021-09-28T15:27:59.000Z | from typing import Optional
from botocore.client import BaseClient
from typing import Dict
from botocore.paginate import Paginator
from botocore.waiter import Waiter
from typing import Union
from typing import List
| 49.850046 | 525 | 0.544276 | from typing import Optional
from botocore.client import BaseClient
from typing import Dict
from botocore.paginate import Paginator
from botocore.waiter import Waiter
from typing import Union
from typing import List
class Client(BaseClient):
def associate_role_to_group(self, GroupId: str, RoleArn: str = None) -> Dict:
"""
Associates a role with a group. Your Greengrass core will use the role to access AWS cloud services. The role's permissions should allow Greengrass core Lambda functions to perform actions against the cloud.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/greengrass-2017-06-07/AssociateRoleToGroup>`_
**Request Syntax**
::
response = client.associate_role_to_group(
GroupId='string',
RoleArn='string'
)
**Response Syntax**
::
{
'AssociatedAt': 'string'
}
**Response Structure**
- *(dict) --* success
- **AssociatedAt** *(string) --* The time, in milliseconds since the epoch, when the role ARN was associated with the group.
:type GroupId: string
:param GroupId: **[REQUIRED]** The ID of the Greengrass group.
:type RoleArn: string
:param RoleArn: The ARN of the role you wish to associate with this group.
:rtype: dict
:returns:
"""
pass
def associate_service_role_to_account(self, RoleArn: str = None) -> Dict:
"""
Associates a role with your account. AWS IoT Greengrass will use the role to access your Lambda functions and AWS IoT resources. This is necessary for deployments to succeed. The role must have at least minimum permissions in the policy ''AWSGreengrassResourceAccessRolePolicy''.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/greengrass-2017-06-07/AssociateServiceRoleToAccount>`_
**Request Syntax**
::
response = client.associate_service_role_to_account(
RoleArn='string'
)
**Response Syntax**
::
{
'AssociatedAt': 'string'
}
**Response Structure**
- *(dict) --* success
- **AssociatedAt** *(string) --* The time when the service role was associated with the account.
:type RoleArn: string
:param RoleArn: The ARN of the service role you wish to associate with your account.
:rtype: dict
:returns:
"""
pass
def can_paginate(self, operation_name: str = None):
"""
Check if an operation can be paginated.
:type operation_name: string
:param operation_name: The operation name. This is the same name
as the method name on the client. For example, if the
method name is ``create_foo``, and you\'d normally invoke the
operation as ``client.create_foo(**kwargs)``, if the
``create_foo`` operation can be paginated, you can use the
call ``client.get_paginator(\"create_foo\")``.
:return: ``True`` if the operation can be paginated,
``False`` otherwise.
"""
pass
def create_connector_definition(self, AmznClientToken: str = None, InitialVersion: Dict = None, Name: str = None, tags: Dict = None) -> Dict:
"""
Creates a connector definition. You may provide the initial version of the connector definition now or use ''CreateConnectorDefinitionVersion'' at a later time.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/greengrass-2017-06-07/CreateConnectorDefinition>`_
**Request Syntax**
::
response = client.create_connector_definition(
AmznClientToken='string',
InitialVersion={
'Connectors': [
{
'ConnectorArn': 'string',
'Id': 'string',
'Parameters': {
'string': 'string'
}
},
]
},
Name='string',
tags={
'string': 'string'
}
)
**Response Syntax**
::
{
'Arn': 'string',
'CreationTimestamp': 'string',
'Id': 'string',
'LastUpdatedTimestamp': 'string',
'LatestVersion': 'string',
'LatestVersionArn': 'string',
'Name': 'string'
}
**Response Structure**
- *(dict) --*
- **Arn** *(string) --* The ARN of the definition.
- **CreationTimestamp** *(string) --* The time, in milliseconds since the epoch, when the definition was created.
- **Id** *(string) --* The ID of the definition.
- **LastUpdatedTimestamp** *(string) --* The time, in milliseconds since the epoch, when the definition was last updated.
- **LatestVersion** *(string) --* The latest version of the definition.
- **LatestVersionArn** *(string) --* The ARN of the latest version of the definition.
- **Name** *(string) --* The name of the definition.
:type AmznClientToken: string
:param AmznClientToken: A client token used to correlate requests and responses.
:type InitialVersion: dict
:param InitialVersion: Information about the initial version of the connector definition.
- **Connectors** *(list) --* A list of references to connectors in this version, with their corresponding configuration settings.
- *(dict) --* Information about a connector. Connectors run on the Greengrass core and contain built-in integration with local infrastructure, device protocols, AWS, and other cloud services.
- **ConnectorArn** *(string) --* The ARN of the connector.
- **Id** *(string) --* A descriptive or arbitrary ID for the connector. This value must be unique within the connector definition version. Max length is 128 characters with pattern [a-zA-Z0-9:_-]+.
- **Parameters** *(dict) --* The parameters or configuration that the connector uses.
- *(string) --*
- *(string) --*
:type Name: string
:param Name: The name of the connector definition.
:type tags: dict
:param tags: Tag(s) to add to the new resource
- *(string) --*
- *(string) --*
:rtype: dict
:returns:
"""
pass
def create_connector_definition_version(self, ConnectorDefinitionId: str, AmznClientToken: str = None, Connectors: List = None) -> Dict:
"""
Creates a version of a connector definition which has already been defined.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/greengrass-2017-06-07/CreateConnectorDefinitionVersion>`_
**Request Syntax**
::
response = client.create_connector_definition_version(
AmznClientToken='string',
ConnectorDefinitionId='string',
Connectors=[
{
'ConnectorArn': 'string',
'Id': 'string',
'Parameters': {
'string': 'string'
}
},
]
)
**Response Syntax**
::
{
'Arn': 'string',
'CreationTimestamp': 'string',
'Id': 'string',
'Version': 'string'
}
**Response Structure**
- *(dict) --*
- **Arn** *(string) --* The ARN of the version.
- **CreationTimestamp** *(string) --* The time, in milliseconds since the epoch, when the version was created.
- **Id** *(string) --* The ID of the version.
- **Version** *(string) --* The unique ID of the version.
:type AmznClientToken: string
:param AmznClientToken: A client token used to correlate requests and responses.
:type ConnectorDefinitionId: string
:param ConnectorDefinitionId: **[REQUIRED]** The ID of the connector definition.
:type Connectors: list
:param Connectors: A list of references to connectors in this version, with their corresponding configuration settings.
- *(dict) --* Information about a connector. Connectors run on the Greengrass core and contain built-in integration with local infrastructure, device protocols, AWS, and other cloud services.
- **ConnectorArn** *(string) --* The ARN of the connector.
- **Id** *(string) --* A descriptive or arbitrary ID for the connector. This value must be unique within the connector definition version. Max length is 128 characters with pattern [a-zA-Z0-9:_-]+.
- **Parameters** *(dict) --* The parameters or configuration that the connector uses.
- *(string) --*
- *(string) --*
:rtype: dict
:returns:
"""
pass
def create_core_definition(self, AmznClientToken: str = None, InitialVersion: Dict = None, Name: str = None, tags: Dict = None) -> Dict:
"""
Creates a core definition. You may provide the initial version of the core definition now or use ''CreateCoreDefinitionVersion'' at a later time. Greengrass groups must each contain exactly one Greengrass core.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/greengrass-2017-06-07/CreateCoreDefinition>`_
**Request Syntax**
::
response = client.create_core_definition(
AmznClientToken='string',
InitialVersion={
'Cores': [
{
'CertificateArn': 'string',
'Id': 'string',
'SyncShadow': True|False,
'ThingArn': 'string'
},
]
},
Name='string',
tags={
'string': 'string'
}
)
**Response Syntax**
::
{
'Arn': 'string',
'CreationTimestamp': 'string',
'Id': 'string',
'LastUpdatedTimestamp': 'string',
'LatestVersion': 'string',
'LatestVersionArn': 'string',
'Name': 'string'
}
**Response Structure**
- *(dict) --*
- **Arn** *(string) --* The ARN of the definition.
- **CreationTimestamp** *(string) --* The time, in milliseconds since the epoch, when the definition was created.
- **Id** *(string) --* The ID of the definition.
- **LastUpdatedTimestamp** *(string) --* The time, in milliseconds since the epoch, when the definition was last updated.
- **LatestVersion** *(string) --* The latest version of the definition.
- **LatestVersionArn** *(string) --* The ARN of the latest version of the definition.
- **Name** *(string) --* The name of the definition.
:type AmznClientToken: string
:param AmznClientToken: A client token used to correlate requests and responses.
:type InitialVersion: dict
:param InitialVersion: Information about the initial version of the core definition.
- **Cores** *(list) --* A list of cores in the core definition version.
- *(dict) --* Information about a core.
- **CertificateArn** *(string) --* The ARN of the certificate associated with the core.
- **Id** *(string) --* A descriptive or arbitrary ID for the core. This value must be unique within the core definition version. Max length is 128 characters with pattern \'\'[a-zA-Z0-9:_-]+\'\'.
- **SyncShadow** *(boolean) --* If true, the core\'s local shadow is automatically synced with the cloud.
- **ThingArn** *(string) --* The ARN of the thing which is the core.
:type Name: string
:param Name: The name of the core definition.
:type tags: dict
:param tags: Tag(s) to add to the new resource
- *(string) --*
- *(string) --*
:rtype: dict
:returns:
"""
pass
def create_core_definition_version(self, CoreDefinitionId: str, AmznClientToken: str = None, Cores: List = None) -> Dict:
"""
Creates a version of a core definition that has already been defined. Greengrass groups must each contain exactly one Greengrass core.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/greengrass-2017-06-07/CreateCoreDefinitionVersion>`_
**Request Syntax**
::
response = client.create_core_definition_version(
AmznClientToken='string',
CoreDefinitionId='string',
Cores=[
{
'CertificateArn': 'string',
'Id': 'string',
'SyncShadow': True|False,
'ThingArn': 'string'
},
]
)
**Response Syntax**
::
{
'Arn': 'string',
'CreationTimestamp': 'string',
'Id': 'string',
'Version': 'string'
}
**Response Structure**
- *(dict) --*
- **Arn** *(string) --* The ARN of the version.
- **CreationTimestamp** *(string) --* The time, in milliseconds since the epoch, when the version was created.
- **Id** *(string) --* The ID of the version.
- **Version** *(string) --* The unique ID of the version.
:type AmznClientToken: string
:param AmznClientToken: A client token used to correlate requests and responses.
:type CoreDefinitionId: string
:param CoreDefinitionId: **[REQUIRED]** The ID of the core definition.
:type Cores: list
:param Cores: A list of cores in the core definition version.
- *(dict) --* Information about a core.
- **CertificateArn** *(string) --* The ARN of the certificate associated with the core.
- **Id** *(string) --* A descriptive or arbitrary ID for the core. This value must be unique within the core definition version. Max length is 128 characters with pattern \'\'[a-zA-Z0-9:_-]+\'\'.
- **SyncShadow** *(boolean) --* If true, the core\'s local shadow is automatically synced with the cloud.
- **ThingArn** *(string) --* The ARN of the thing which is the core.
:rtype: dict
:returns:
"""
pass
def create_deployment(self, GroupId: str, AmznClientToken: str = None, DeploymentId: str = None, DeploymentType: str = None, GroupVersionId: str = None) -> Dict:
"""
Creates a deployment. ''CreateDeployment'' requests are idempotent with respect to the ''X-Amzn-Client-Token'' token and the request parameters.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/greengrass-2017-06-07/CreateDeployment>`_
**Request Syntax**
::
response = client.create_deployment(
AmznClientToken='string',
DeploymentId='string',
DeploymentType='NewDeployment'|'Redeployment'|'ResetDeployment'|'ForceResetDeployment',
GroupId='string',
GroupVersionId='string'
)
**Response Syntax**
::
{
'DeploymentArn': 'string',
'DeploymentId': 'string'
}
**Response Structure**
- *(dict) --* Success. The group was deployed.
- **DeploymentArn** *(string) --* The ARN of the deployment.
- **DeploymentId** *(string) --* The ID of the deployment.
:type AmznClientToken: string
:param AmznClientToken: A client token used to correlate requests and responses.
:type DeploymentId: string
:param DeploymentId: The ID of the deployment if you wish to redeploy a previous deployment.
:type DeploymentType: string
:param DeploymentType: The type of deployment. When used for \'\'CreateDeployment\'\', only \'\'NewDeployment\'\' and \'\'Redeployment\'\' are valid.
:type GroupId: string
:param GroupId: **[REQUIRED]** The ID of the Greengrass group.
:type GroupVersionId: string
:param GroupVersionId: The ID of the group version to be deployed.
:rtype: dict
:returns:
"""
pass
def create_device_definition(self, AmznClientToken: str = None, InitialVersion: Dict = None, Name: str = None, tags: Dict = None) -> Dict:
"""
Creates a device definition. You may provide the initial version of the device definition now or use ''CreateDeviceDefinitionVersion'' at a later time.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/greengrass-2017-06-07/CreateDeviceDefinition>`_
**Request Syntax**
::
response = client.create_device_definition(
AmznClientToken='string',
InitialVersion={
'Devices': [
{
'CertificateArn': 'string',
'Id': 'string',
'SyncShadow': True|False,
'ThingArn': 'string'
},
]
},
Name='string',
tags={
'string': 'string'
}
)
**Response Syntax**
::
{
'Arn': 'string',
'CreationTimestamp': 'string',
'Id': 'string',
'LastUpdatedTimestamp': 'string',
'LatestVersion': 'string',
'LatestVersionArn': 'string',
'Name': 'string'
}
**Response Structure**
- *(dict) --*
- **Arn** *(string) --* The ARN of the definition.
- **CreationTimestamp** *(string) --* The time, in milliseconds since the epoch, when the definition was created.
- **Id** *(string) --* The ID of the definition.
- **LastUpdatedTimestamp** *(string) --* The time, in milliseconds since the epoch, when the definition was last updated.
- **LatestVersion** *(string) --* The latest version of the definition.
- **LatestVersionArn** *(string) --* The ARN of the latest version of the definition.
- **Name** *(string) --* The name of the definition.
:type AmznClientToken: string
:param AmznClientToken: A client token used to correlate requests and responses.
:type InitialVersion: dict
:param InitialVersion: Information about the initial version of the device definition.
- **Devices** *(list) --* A list of devices in the definition version.
- *(dict) --* Information about a device.
- **CertificateArn** *(string) --* The ARN of the certificate associated with the device.
- **Id** *(string) --* A descriptive or arbitrary ID for the device. This value must be unique within the device definition version. Max length is 128 characters with pattern \'\'[a-zA-Z0-9:_-]+\'\'.
- **SyncShadow** *(boolean) --* If true, the device\'s local shadow will be automatically synced with the cloud.
- **ThingArn** *(string) --* The thing ARN of the device.
:type Name: string
:param Name: The name of the device definition.
:type tags: dict
:param tags: Tag(s) to add to the new resource
- *(string) --*
- *(string) --*
:rtype: dict
:returns:
"""
pass
def create_device_definition_version(self, DeviceDefinitionId: str, AmznClientToken: str = None, Devices: List = None) -> Dict:
"""
Creates a version of a device definition that has already been defined.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/greengrass-2017-06-07/CreateDeviceDefinitionVersion>`_
**Request Syntax**
::
response = client.create_device_definition_version(
AmznClientToken='string',
DeviceDefinitionId='string',
Devices=[
{
'CertificateArn': 'string',
'Id': 'string',
'SyncShadow': True|False,
'ThingArn': 'string'
},
]
)
**Response Syntax**
::
{
'Arn': 'string',
'CreationTimestamp': 'string',
'Id': 'string',
'Version': 'string'
}
**Response Structure**
- *(dict) --*
- **Arn** *(string) --* The ARN of the version.
- **CreationTimestamp** *(string) --* The time, in milliseconds since the epoch, when the version was created.
- **Id** *(string) --* The ID of the version.
- **Version** *(string) --* The unique ID of the version.
:type AmznClientToken: string
:param AmznClientToken: A client token used to correlate requests and responses.
:type DeviceDefinitionId: string
:param DeviceDefinitionId: **[REQUIRED]** The ID of the device definition.
:type Devices: list
:param Devices: A list of devices in the definition version.
- *(dict) --* Information about a device.
- **CertificateArn** *(string) --* The ARN of the certificate associated with the device.
- **Id** *(string) --* A descriptive or arbitrary ID for the device. This value must be unique within the device definition version. Max length is 128 characters with pattern \'\'[a-zA-Z0-9:_-]+\'\'.
- **SyncShadow** *(boolean) --* If true, the device\'s local shadow will be automatically synced with the cloud.
- **ThingArn** *(string) --* The thing ARN of the device.
:rtype: dict
:returns:
"""
pass
def create_function_definition(self, AmznClientToken: str = None, InitialVersion: Dict = None, Name: str = None, tags: Dict = None) -> Dict:
"""
Creates a Lambda function definition which contains a list of Lambda functions and their configurations to be used in a group. You can create an initial version of the definition by providing a list of Lambda functions and their configurations now, or use ''CreateFunctionDefinitionVersion'' later.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/greengrass-2017-06-07/CreateFunctionDefinition>`_
**Request Syntax**
::
response = client.create_function_definition(
AmznClientToken='string',
InitialVersion={
'DefaultConfig': {
'Execution': {
'IsolationMode': 'GreengrassContainer'|'NoContainer',
'RunAs': {
'Gid': 123,
'Uid': 123
}
}
},
'Functions': [
{
'FunctionArn': 'string',
'FunctionConfiguration': {
'EncodingType': 'binary'|'json',
'Environment': {
'AccessSysfs': True|False,
'Execution': {
'IsolationMode': 'GreengrassContainer'|'NoContainer',
'RunAs': {
'Gid': 123,
'Uid': 123
}
},
'ResourceAccessPolicies': [
{
'Permission': 'ro'|'rw',
'ResourceId': 'string'
},
],
'Variables': {
'string': 'string'
}
},
'ExecArgs': 'string',
'Executable': 'string',
'MemorySize': 123,
'Pinned': True|False,
'Timeout': 123
},
'Id': 'string'
},
]
},
Name='string',
tags={
'string': 'string'
}
)
**Response Syntax**
::
{
'Arn': 'string',
'CreationTimestamp': 'string',
'Id': 'string',
'LastUpdatedTimestamp': 'string',
'LatestVersion': 'string',
'LatestVersionArn': 'string',
'Name': 'string'
}
**Response Structure**
- *(dict) --*
- **Arn** *(string) --* The ARN of the definition.
- **CreationTimestamp** *(string) --* The time, in milliseconds since the epoch, when the definition was created.
- **Id** *(string) --* The ID of the definition.
- **LastUpdatedTimestamp** *(string) --* The time, in milliseconds since the epoch, when the definition was last updated.
- **LatestVersion** *(string) --* The latest version of the definition.
- **LatestVersionArn** *(string) --* The ARN of the latest version of the definition.
- **Name** *(string) --* The name of the definition.
:type AmznClientToken: string
:param AmznClientToken: A client token used to correlate requests and responses.
:type InitialVersion: dict
:param InitialVersion: Information about the initial version of the function definition.
- **DefaultConfig** *(dict) --* The default configuration that applies to all Lambda functions in this function definition version. Individual Lambda functions can override these settings.
- **Execution** *(dict) --* Configuration information that specifies how a Lambda function runs.
- **IsolationMode** *(string) --* Specifies whether the Lambda function runs in a Greengrass container (default) or without containerization. Unless your scenario requires that you run without containerization, we recommend that you run in a Greengrass container. Omit this value to run the Lambda function with the default containerization for the group.
- **RunAs** *(dict) --* Specifies the user and group whose permissions are used when running the Lambda function. You can specify one or both values to override the default values. We recommend that you avoid running as root unless absolutely necessary to minimize the risk of unintended changes or malicious attacks. To run as root, you must set \'\'IsolationMode\'\' to \'\'NoContainer\'\' and update config.json in \'\'greengrass-root/config\'\' to set \'\'allowFunctionsToRunAsRoot\'\' to \'\'yes\'\'.
- **Gid** *(integer) --* The group ID whose permissions are used to run a Lambda function.
- **Uid** *(integer) --* The user ID whose permissions are used to run a Lambda function.
- **Functions** *(list) --* A list of Lambda functions in this function definition version.
- *(dict) --* Information about a Lambda function.
- **FunctionArn** *(string) --* The ARN of the Lambda function.
- **FunctionConfiguration** *(dict) --* The configuration of the Lambda function.
- **EncodingType** *(string) --* The expected encoding type of the input payload for the function. The default is \'\'json\'\'.
- **Environment** *(dict) --* The environment configuration of the function.
- **AccessSysfs** *(boolean) --* If true, the Lambda function is allowed to access the host\'s /sys folder. Use this when the Lambda function needs to read device information from /sys. This setting applies only when you run the Lambda function in a Greengrass container.
- **Execution** *(dict) --* Configuration related to executing the Lambda function
- **IsolationMode** *(string) --* Specifies whether the Lambda function runs in a Greengrass container (default) or without containerization. Unless your scenario requires that you run without containerization, we recommend that you run in a Greengrass container. Omit this value to run the Lambda function with the default containerization for the group.
- **RunAs** *(dict) --* Specifies the user and group whose permissions are used when running the Lambda function. You can specify one or both values to override the default values. We recommend that you avoid running as root unless absolutely necessary to minimize the risk of unintended changes or malicious attacks. To run as root, you must set \'\'IsolationMode\'\' to \'\'NoContainer\'\' and update config.json in \'\'greengrass-root/config\'\' to set \'\'allowFunctionsToRunAsRoot\'\' to \'\'yes\'\'.
- **Gid** *(integer) --* The group ID whose permissions are used to run a Lambda function.
- **Uid** *(integer) --* The user ID whose permissions are used to run a Lambda function.
- **ResourceAccessPolicies** *(list) --* A list of the resources, with their permissions, to which the Lambda function will be granted access. A Lambda function can have at most 10 resources. ResourceAccessPolicies apply only when you run the Lambda function in a Greengrass container.
- *(dict) --* A policy used by the function to access a resource.
- **Permission** *(string) --* The permissions that the Lambda function has to the resource. Can be one of \'\'rw\'\' (read/write) or \'\'ro\'\' (read-only).
- **ResourceId** *(string) --* The ID of the resource. (This ID is assigned to the resource when you create the resource definiton.)
- **Variables** *(dict) --* Environment variables for the Lambda function\'s configuration.
- *(string) --*
- *(string) --*
- **ExecArgs** *(string) --* The execution arguments.
- **Executable** *(string) --* The name of the function executable.
- **MemorySize** *(integer) --* The memory size, in KB, which the function requires. This setting is not applicable and should be cleared when you run the Lambda function without containerization.
- **Pinned** *(boolean) --* True if the function is pinned. Pinned means the function is long-lived and starts when the core starts.
- **Timeout** *(integer) --* The allowed function execution time, after which Lambda should terminate the function. This timeout still applies to pinned Lambda functions for each request.
- **Id** *(string) --* A descriptive or arbitrary ID for the function. This value must be unique within the function definition version. Max length is 128 characters with pattern \'\'[a-zA-Z0-9:_-]+\'\'.
:type Name: string
:param Name: The name of the function definition.
:type tags: dict
:param tags: Tag(s) to add to the new resource
- *(string) --*
- *(string) --*
:rtype: dict
:returns:
"""
pass
def create_function_definition_version(self, FunctionDefinitionId: str, AmznClientToken: str = None, DefaultConfig: Dict = None, Functions: List = None) -> Dict:
"""
Creates a version of a Lambda function definition that has already been defined.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/greengrass-2017-06-07/CreateFunctionDefinitionVersion>`_
**Request Syntax**
::
response = client.create_function_definition_version(
AmznClientToken='string',
DefaultConfig={
'Execution': {
'IsolationMode': 'GreengrassContainer'|'NoContainer',
'RunAs': {
'Gid': 123,
'Uid': 123
}
}
},
FunctionDefinitionId='string',
Functions=[
{
'FunctionArn': 'string',
'FunctionConfiguration': {
'EncodingType': 'binary'|'json',
'Environment': {
'AccessSysfs': True|False,
'Execution': {
'IsolationMode': 'GreengrassContainer'|'NoContainer',
'RunAs': {
'Gid': 123,
'Uid': 123
}
},
'ResourceAccessPolicies': [
{
'Permission': 'ro'|'rw',
'ResourceId': 'string'
},
],
'Variables': {
'string': 'string'
}
},
'ExecArgs': 'string',
'Executable': 'string',
'MemorySize': 123,
'Pinned': True|False,
'Timeout': 123
},
'Id': 'string'
},
]
)
**Response Syntax**
::
{
'Arn': 'string',
'CreationTimestamp': 'string',
'Id': 'string',
'Version': 'string'
}
**Response Structure**
- *(dict) --*
- **Arn** *(string) --* The ARN of the version.
- **CreationTimestamp** *(string) --* The time, in milliseconds since the epoch, when the version was created.
- **Id** *(string) --* The ID of the version.
- **Version** *(string) --* The unique ID of the version.
:type AmznClientToken: string
:param AmznClientToken: A client token used to correlate requests and responses.
:type DefaultConfig: dict
:param DefaultConfig: The default configuration that applies to all Lambda functions in this function definition version. Individual Lambda functions can override these settings.
- **Execution** *(dict) --* Configuration information that specifies how a Lambda function runs.
- **IsolationMode** *(string) --* Specifies whether the Lambda function runs in a Greengrass container (default) or without containerization. Unless your scenario requires that you run without containerization, we recommend that you run in a Greengrass container. Omit this value to run the Lambda function with the default containerization for the group.
- **RunAs** *(dict) --* Specifies the user and group whose permissions are used when running the Lambda function. You can specify one or both values to override the default values. We recommend that you avoid running as root unless absolutely necessary to minimize the risk of unintended changes or malicious attacks. To run as root, you must set \'\'IsolationMode\'\' to \'\'NoContainer\'\' and update config.json in \'\'greengrass-root/config\'\' to set \'\'allowFunctionsToRunAsRoot\'\' to \'\'yes\'\'.
- **Gid** *(integer) --* The group ID whose permissions are used to run a Lambda function.
- **Uid** *(integer) --* The user ID whose permissions are used to run a Lambda function.
:type FunctionDefinitionId: string
:param FunctionDefinitionId: **[REQUIRED]** The ID of the Lambda function definition.
:type Functions: list
:param Functions: A list of Lambda functions in this function definition version.
- *(dict) --* Information about a Lambda function.
- **FunctionArn** *(string) --* The ARN of the Lambda function.
- **FunctionConfiguration** *(dict) --* The configuration of the Lambda function.
- **EncodingType** *(string) --* The expected encoding type of the input payload for the function. The default is \'\'json\'\'.
- **Environment** *(dict) --* The environment configuration of the function.
- **AccessSysfs** *(boolean) --* If true, the Lambda function is allowed to access the host\'s /sys folder. Use this when the Lambda function needs to read device information from /sys. This setting applies only when you run the Lambda function in a Greengrass container.
- **Execution** *(dict) --* Configuration related to executing the Lambda function
- **IsolationMode** *(string) --* Specifies whether the Lambda function runs in a Greengrass container (default) or without containerization. Unless your scenario requires that you run without containerization, we recommend that you run in a Greengrass container. Omit this value to run the Lambda function with the default containerization for the group.
- **RunAs** *(dict) --* Specifies the user and group whose permissions are used when running the Lambda function. You can specify one or both values to override the default values. We recommend that you avoid running as root unless absolutely necessary to minimize the risk of unintended changes or malicious attacks. To run as root, you must set \'\'IsolationMode\'\' to \'\'NoContainer\'\' and update config.json in \'\'greengrass-root/config\'\' to set \'\'allowFunctionsToRunAsRoot\'\' to \'\'yes\'\'.
- **Gid** *(integer) --* The group ID whose permissions are used to run a Lambda function.
- **Uid** *(integer) --* The user ID whose permissions are used to run a Lambda function.
- **ResourceAccessPolicies** *(list) --* A list of the resources, with their permissions, to which the Lambda function will be granted access. A Lambda function can have at most 10 resources. ResourceAccessPolicies apply only when you run the Lambda function in a Greengrass container.
- *(dict) --* A policy used by the function to access a resource.
- **Permission** *(string) --* The permissions that the Lambda function has to the resource. Can be one of \'\'rw\'\' (read/write) or \'\'ro\'\' (read-only).
- **ResourceId** *(string) --* The ID of the resource. (This ID is assigned to the resource when you create the resource definiton.)
- **Variables** *(dict) --* Environment variables for the Lambda function\'s configuration.
- *(string) --*
- *(string) --*
- **ExecArgs** *(string) --* The execution arguments.
- **Executable** *(string) --* The name of the function executable.
- **MemorySize** *(integer) --* The memory size, in KB, which the function requires. This setting is not applicable and should be cleared when you run the Lambda function without containerization.
- **Pinned** *(boolean) --* True if the function is pinned. Pinned means the function is long-lived and starts when the core starts.
- **Timeout** *(integer) --* The allowed function execution time, after which Lambda should terminate the function. This timeout still applies to pinned Lambda functions for each request.
- **Id** *(string) --* A descriptive or arbitrary ID for the function. This value must be unique within the function definition version. Max length is 128 characters with pattern \'\'[a-zA-Z0-9:_-]+\'\'.
:rtype: dict
:returns:
"""
pass
def create_group(self, AmznClientToken: str = None, InitialVersion: Dict = None, Name: str = None, tags: Dict = None) -> Dict:
"""
Creates a group. You may provide the initial version of the group or use ''CreateGroupVersion'' at a later time. Tip: You can use the ''gg_group_setup'' package (https://github.com/awslabs/aws-greengrass-group-setup) as a library or command-line application to create and deploy Greengrass groups.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/greengrass-2017-06-07/CreateGroup>`_
**Request Syntax**
::
response = client.create_group(
AmznClientToken='string',
InitialVersion={
'ConnectorDefinitionVersionArn': 'string',
'CoreDefinitionVersionArn': 'string',
'DeviceDefinitionVersionArn': 'string',
'FunctionDefinitionVersionArn': 'string',
'LoggerDefinitionVersionArn': 'string',
'ResourceDefinitionVersionArn': 'string',
'SubscriptionDefinitionVersionArn': 'string'
},
Name='string',
tags={
'string': 'string'
}
)
**Response Syntax**
::
{
'Arn': 'string',
'CreationTimestamp': 'string',
'Id': 'string',
'LastUpdatedTimestamp': 'string',
'LatestVersion': 'string',
'LatestVersionArn': 'string',
'Name': 'string'
}
**Response Structure**
- *(dict) --* Success. The group was created.
- **Arn** *(string) --* The ARN of the definition.
- **CreationTimestamp** *(string) --* The time, in milliseconds since the epoch, when the definition was created.
- **Id** *(string) --* The ID of the definition.
- **LastUpdatedTimestamp** *(string) --* The time, in milliseconds since the epoch, when the definition was last updated.
- **LatestVersion** *(string) --* The latest version of the definition.
- **LatestVersionArn** *(string) --* The ARN of the latest version of the definition.
- **Name** *(string) --* The name of the definition.
:type AmznClientToken: string
:param AmznClientToken: A client token used to correlate requests and responses.
:type InitialVersion: dict
:param InitialVersion: Information about the initial version of the group.
- **ConnectorDefinitionVersionArn** *(string) --* The ARN of the connector definition version for this group.
- **CoreDefinitionVersionArn** *(string) --* The ARN of the core definition version for this group.
- **DeviceDefinitionVersionArn** *(string) --* The ARN of the device definition version for this group.
- **FunctionDefinitionVersionArn** *(string) --* The ARN of the function definition version for this group.
- **LoggerDefinitionVersionArn** *(string) --* The ARN of the logger definition version for this group.
- **ResourceDefinitionVersionArn** *(string) --* The ARN of the resource definition version for this group.
- **SubscriptionDefinitionVersionArn** *(string) --* The ARN of the subscription definition version for this group.
:type Name: string
:param Name: The name of the group.
:type tags: dict
:param tags: Tag(s) to add to the new resource
- *(string) --*
- *(string) --*
:rtype: dict
:returns:
"""
pass
def create_group_certificate_authority(self, GroupId: str, AmznClientToken: str = None) -> Dict:
"""
Creates a CA for the group. If a CA already exists, it will rotate the existing CA.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/greengrass-2017-06-07/CreateGroupCertificateAuthority>`_
**Request Syntax**
::
response = client.create_group_certificate_authority(
AmznClientToken='string',
GroupId='string'
)
**Response Syntax**
::
{
'GroupCertificateAuthorityArn': 'string'
}
**Response Structure**
- *(dict) --* Success. The response body contains the new active CA ARN.
- **GroupCertificateAuthorityArn** *(string) --* The ARN of the group certificate authority.
:type AmznClientToken: string
:param AmznClientToken: A client token used to correlate requests and responses.
:type GroupId: string
:param GroupId: **[REQUIRED]** The ID of the Greengrass group.
:rtype: dict
:returns:
"""
pass
def create_group_version(self, GroupId: str, AmznClientToken: str = None, ConnectorDefinitionVersionArn: str = None, CoreDefinitionVersionArn: str = None, DeviceDefinitionVersionArn: str = None, FunctionDefinitionVersionArn: str = None, LoggerDefinitionVersionArn: str = None, ResourceDefinitionVersionArn: str = None, SubscriptionDefinitionVersionArn: str = None) -> Dict:
"""
Creates a version of a group which has already been defined.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/greengrass-2017-06-07/CreateGroupVersion>`_
**Request Syntax**
::
response = client.create_group_version(
AmznClientToken='string',
ConnectorDefinitionVersionArn='string',
CoreDefinitionVersionArn='string',
DeviceDefinitionVersionArn='string',
FunctionDefinitionVersionArn='string',
GroupId='string',
LoggerDefinitionVersionArn='string',
ResourceDefinitionVersionArn='string',
SubscriptionDefinitionVersionArn='string'
)
**Response Syntax**
::
{
'Arn': 'string',
'CreationTimestamp': 'string',
'Id': 'string',
'Version': 'string'
}
**Response Structure**
- *(dict) --* Success. The response contains information about the group version.
- **Arn** *(string) --* The ARN of the version.
- **CreationTimestamp** *(string) --* The time, in milliseconds since the epoch, when the version was created.
- **Id** *(string) --* The ID of the version.
- **Version** *(string) --* The unique ID of the version.
:type AmznClientToken: string
:param AmznClientToken: A client token used to correlate requests and responses.
:type ConnectorDefinitionVersionArn: string
:param ConnectorDefinitionVersionArn: The ARN of the connector definition version for this group.
:type CoreDefinitionVersionArn: string
:param CoreDefinitionVersionArn: The ARN of the core definition version for this group.
:type DeviceDefinitionVersionArn: string
:param DeviceDefinitionVersionArn: The ARN of the device definition version for this group.
:type FunctionDefinitionVersionArn: string
:param FunctionDefinitionVersionArn: The ARN of the function definition version for this group.
:type GroupId: string
:param GroupId: **[REQUIRED]** The ID of the Greengrass group.
:type LoggerDefinitionVersionArn: string
:param LoggerDefinitionVersionArn: The ARN of the logger definition version for this group.
:type ResourceDefinitionVersionArn: string
:param ResourceDefinitionVersionArn: The ARN of the resource definition version for this group.
:type SubscriptionDefinitionVersionArn: string
:param SubscriptionDefinitionVersionArn: The ARN of the subscription definition version for this group.
:rtype: dict
:returns:
"""
pass
def create_logger_definition(self, AmznClientToken: str = None, InitialVersion: Dict = None, Name: str = None, tags: Dict = None) -> Dict:
"""
Creates a logger definition. You may provide the initial version of the logger definition now or use ''CreateLoggerDefinitionVersion'' at a later time.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/greengrass-2017-06-07/CreateLoggerDefinition>`_
**Request Syntax**
::
response = client.create_logger_definition(
AmznClientToken='string',
InitialVersion={
'Loggers': [
{
'Component': 'GreengrassSystem'|'Lambda',
'Id': 'string',
'Level': 'DEBUG'|'INFO'|'WARN'|'ERROR'|'FATAL',
'Space': 123,
'Type': 'FileSystem'|'AWSCloudWatch'
},
]
},
Name='string',
tags={
'string': 'string'
}
)
**Response Syntax**
::
{
'Arn': 'string',
'CreationTimestamp': 'string',
'Id': 'string',
'LastUpdatedTimestamp': 'string',
'LatestVersion': 'string',
'LatestVersionArn': 'string',
'Name': 'string'
}
**Response Structure**
- *(dict) --*
- **Arn** *(string) --* The ARN of the definition.
- **CreationTimestamp** *(string) --* The time, in milliseconds since the epoch, when the definition was created.
- **Id** *(string) --* The ID of the definition.
- **LastUpdatedTimestamp** *(string) --* The time, in milliseconds since the epoch, when the definition was last updated.
- **LatestVersion** *(string) --* The latest version of the definition.
- **LatestVersionArn** *(string) --* The ARN of the latest version of the definition.
- **Name** *(string) --* The name of the definition.
:type AmznClientToken: string
:param AmznClientToken: A client token used to correlate requests and responses.
:type InitialVersion: dict
:param InitialVersion: Information about the initial version of the logger definition.
- **Loggers** *(list) --* A list of loggers.
- *(dict) --* Information about a logger
- **Component** *(string) --* The component that will be subject to logging.
- **Id** *(string) --* A descriptive or arbitrary ID for the logger. This value must be unique within the logger definition version. Max length is 128 characters with pattern \'\'[a-zA-Z0-9:_-]+\'\'.
- **Level** *(string) --* The level of the logs.
- **Space** *(integer) --* The amount of file space, in KB, to use if the local file system is used for logging purposes.
- **Type** *(string) --* The type of log output which will be used.
:type Name: string
:param Name: The name of the logger definition.
:type tags: dict
:param tags: Tag(s) to add to the new resource
- *(string) --*
- *(string) --*
:rtype: dict
:returns:
"""
pass
def create_logger_definition_version(self, LoggerDefinitionId: str, AmznClientToken: str = None, Loggers: List = None) -> Dict:
"""
Creates a version of a logger definition that has already been defined.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/greengrass-2017-06-07/CreateLoggerDefinitionVersion>`_
**Request Syntax**
::
response = client.create_logger_definition_version(
AmznClientToken='string',
LoggerDefinitionId='string',
Loggers=[
{
'Component': 'GreengrassSystem'|'Lambda',
'Id': 'string',
'Level': 'DEBUG'|'INFO'|'WARN'|'ERROR'|'FATAL',
'Space': 123,
'Type': 'FileSystem'|'AWSCloudWatch'
},
]
)
**Response Syntax**
::
{
'Arn': 'string',
'CreationTimestamp': 'string',
'Id': 'string',
'Version': 'string'
}
**Response Structure**
- *(dict) --*
- **Arn** *(string) --* The ARN of the version.
- **CreationTimestamp** *(string) --* The time, in milliseconds since the epoch, when the version was created.
- **Id** *(string) --* The ID of the version.
- **Version** *(string) --* The unique ID of the version.
:type AmznClientToken: string
:param AmznClientToken: A client token used to correlate requests and responses.
:type LoggerDefinitionId: string
:param LoggerDefinitionId: **[REQUIRED]** The ID of the logger definition.
:type Loggers: list
:param Loggers: A list of loggers.
- *(dict) --* Information about a logger
- **Component** *(string) --* The component that will be subject to logging.
- **Id** *(string) --* A descriptive or arbitrary ID for the logger. This value must be unique within the logger definition version. Max length is 128 characters with pattern \'\'[a-zA-Z0-9:_-]+\'\'.
- **Level** *(string) --* The level of the logs.
- **Space** *(integer) --* The amount of file space, in KB, to use if the local file system is used for logging purposes.
- **Type** *(string) --* The type of log output which will be used.
:rtype: dict
:returns:
"""
pass
def create_resource_definition(self, AmznClientToken: str = None, InitialVersion: Dict = None, Name: str = None, tags: Dict = None) -> Dict:
"""
Creates a resource definition which contains a list of resources to be used in a group. You can create an initial version of the definition by providing a list of resources now, or use ''CreateResourceDefinitionVersion'' later.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/greengrass-2017-06-07/CreateResourceDefinition>`_
**Request Syntax**
::
response = client.create_resource_definition(
AmznClientToken='string',
InitialVersion={
'Resources': [
{
'Id': 'string',
'Name': 'string',
'ResourceDataContainer': {
'LocalDeviceResourceData': {
'GroupOwnerSetting': {
'AutoAddGroupOwner': True|False,
'GroupOwner': 'string'
},
'SourcePath': 'string'
},
'LocalVolumeResourceData': {
'DestinationPath': 'string',
'GroupOwnerSetting': {
'AutoAddGroupOwner': True|False,
'GroupOwner': 'string'
},
'SourcePath': 'string'
},
'S3MachineLearningModelResourceData': {
'DestinationPath': 'string',
'S3Uri': 'string'
},
'SageMakerMachineLearningModelResourceData': {
'DestinationPath': 'string',
'SageMakerJobArn': 'string'
},
'SecretsManagerSecretResourceData': {
'ARN': 'string',
'AdditionalStagingLabelsToDownload': [
'string',
]
}
}
},
]
},
Name='string',
tags={
'string': 'string'
}
)
**Response Syntax**
::
{
'Arn': 'string',
'CreationTimestamp': 'string',
'Id': 'string',
'LastUpdatedTimestamp': 'string',
'LatestVersion': 'string',
'LatestVersionArn': 'string',
'Name': 'string'
}
**Response Structure**
- *(dict) --*
- **Arn** *(string) --* The ARN of the definition.
- **CreationTimestamp** *(string) --* The time, in milliseconds since the epoch, when the definition was created.
- **Id** *(string) --* The ID of the definition.
- **LastUpdatedTimestamp** *(string) --* The time, in milliseconds since the epoch, when the definition was last updated.
- **LatestVersion** *(string) --* The latest version of the definition.
- **LatestVersionArn** *(string) --* The ARN of the latest version of the definition.
- **Name** *(string) --* The name of the definition.
:type AmznClientToken: string
:param AmznClientToken: A client token used to correlate requests and responses.
:type InitialVersion: dict
:param InitialVersion: Information about the initial version of the resource definition.
- **Resources** *(list) --* A list of resources.
- *(dict) --* Information about a resource.
- **Id** *(string) --* The resource ID, used to refer to a resource in the Lambda function configuration. Max length is 128 characters with pattern \'\'[a-zA-Z0-9:_-]+\'\'. This must be unique within a Greengrass group.
- **Name** *(string) --* The descriptive resource name, which is displayed on the AWS IoT Greengrass console. Max length 128 characters with pattern \'\'[a-zA-Z0-9:_-]+\'\'. This must be unique within a Greengrass group.
- **ResourceDataContainer** *(dict) --* A container of data for all resource types.
- **LocalDeviceResourceData** *(dict) --* Attributes that define the local device resource.
- **GroupOwnerSetting** *(dict) --* Group/owner related settings for local resources.
- **AutoAddGroupOwner** *(boolean) --* If true, AWS IoT Greengrass automatically adds the specified Linux OS group owner of the resource to the Lambda process privileges. Thus the Lambda process will have the file access permissions of the added Linux group.
- **GroupOwner** *(string) --* The name of the Linux OS group whose privileges will be added to the Lambda process. This field is optional.
- **SourcePath** *(string) --* The local absolute path of the device resource. The source path for a device resource can refer only to a character device or block device under \'\'/dev\'\'.
- **LocalVolumeResourceData** *(dict) --* Attributes that define the local volume resource.
- **DestinationPath** *(string) --* The absolute local path of the resource inside the Lambda environment.
- **GroupOwnerSetting** *(dict) --* Allows you to configure additional group privileges for the Lambda process. This field is optional.
- **AutoAddGroupOwner** *(boolean) --* If true, AWS IoT Greengrass automatically adds the specified Linux OS group owner of the resource to the Lambda process privileges. Thus the Lambda process will have the file access permissions of the added Linux group.
- **GroupOwner** *(string) --* The name of the Linux OS group whose privileges will be added to the Lambda process. This field is optional.
- **SourcePath** *(string) --* The local absolute path of the volume resource on the host. The source path for a volume resource type cannot start with \'\'/sys\'\'.
- **S3MachineLearningModelResourceData** *(dict) --* Attributes that define an Amazon S3 machine learning resource.
- **DestinationPath** *(string) --* The absolute local path of the resource inside the Lambda environment.
- **S3Uri** *(string) --* The URI of the source model in an S3 bucket. The model package must be in tar.gz or .zip format.
- **SageMakerMachineLearningModelResourceData** *(dict) --* Attributes that define an Amazon SageMaker machine learning resource.
- **DestinationPath** *(string) --* The absolute local path of the resource inside the Lambda environment.
- **SageMakerJobArn** *(string) --* The ARN of the Amazon SageMaker training job that represents the source model.
- **SecretsManagerSecretResourceData** *(dict) --* Attributes that define a secret resource, which references a secret from AWS Secrets Manager.
- **ARN** *(string) --* The ARN of the Secrets Manager secret to make available on the core. The value of the secret\'s latest version (represented by the \'\'AWSCURRENT\'\' staging label) is included by default.
- **AdditionalStagingLabelsToDownload** *(list) --* Optional. The staging labels whose values you want to make available on the core, in addition to \'\'AWSCURRENT\'\'.
- *(string) --*
:type Name: string
:param Name: The name of the resource definition.
:type tags: dict
:param tags: Tag(s) to add to the new resource
- *(string) --*
- *(string) --*
:rtype: dict
:returns:
"""
pass
def create_resource_definition_version(self, ResourceDefinitionId: str, AmznClientToken: str = None, Resources: List = None) -> Dict:
"""
Creates a version of a resource definition that has already been defined.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/greengrass-2017-06-07/CreateResourceDefinitionVersion>`_
**Request Syntax**
::
response = client.create_resource_definition_version(
AmznClientToken='string',
ResourceDefinitionId='string',
Resources=[
{
'Id': 'string',
'Name': 'string',
'ResourceDataContainer': {
'LocalDeviceResourceData': {
'GroupOwnerSetting': {
'AutoAddGroupOwner': True|False,
'GroupOwner': 'string'
},
'SourcePath': 'string'
},
'LocalVolumeResourceData': {
'DestinationPath': 'string',
'GroupOwnerSetting': {
'AutoAddGroupOwner': True|False,
'GroupOwner': 'string'
},
'SourcePath': 'string'
},
'S3MachineLearningModelResourceData': {
'DestinationPath': 'string',
'S3Uri': 'string'
},
'SageMakerMachineLearningModelResourceData': {
'DestinationPath': 'string',
'SageMakerJobArn': 'string'
},
'SecretsManagerSecretResourceData': {
'ARN': 'string',
'AdditionalStagingLabelsToDownload': [
'string',
]
}
}
},
]
)
**Response Syntax**
::
{
'Arn': 'string',
'CreationTimestamp': 'string',
'Id': 'string',
'Version': 'string'
}
**Response Structure**
- *(dict) --*
- **Arn** *(string) --* The ARN of the version.
- **CreationTimestamp** *(string) --* The time, in milliseconds since the epoch, when the version was created.
- **Id** *(string) --* The ID of the version.
- **Version** *(string) --* The unique ID of the version.
:type AmznClientToken: string
:param AmznClientToken: A client token used to correlate requests and responses.
:type ResourceDefinitionId: string
:param ResourceDefinitionId: **[REQUIRED]** The ID of the resource definition.
:type Resources: list
:param Resources: A list of resources.
- *(dict) --* Information about a resource.
- **Id** *(string) --* The resource ID, used to refer to a resource in the Lambda function configuration. Max length is 128 characters with pattern \'\'[a-zA-Z0-9:_-]+\'\'. This must be unique within a Greengrass group.
- **Name** *(string) --* The descriptive resource name, which is displayed on the AWS IoT Greengrass console. Max length 128 characters with pattern \'\'[a-zA-Z0-9:_-]+\'\'. This must be unique within a Greengrass group.
- **ResourceDataContainer** *(dict) --* A container of data for all resource types.
- **LocalDeviceResourceData** *(dict) --* Attributes that define the local device resource.
- **GroupOwnerSetting** *(dict) --* Group/owner related settings for local resources.
- **AutoAddGroupOwner** *(boolean) --* If true, AWS IoT Greengrass automatically adds the specified Linux OS group owner of the resource to the Lambda process privileges. Thus the Lambda process will have the file access permissions of the added Linux group.
- **GroupOwner** *(string) --* The name of the Linux OS group whose privileges will be added to the Lambda process. This field is optional.
- **SourcePath** *(string) --* The local absolute path of the device resource. The source path for a device resource can refer only to a character device or block device under \'\'/dev\'\'.
- **LocalVolumeResourceData** *(dict) --* Attributes that define the local volume resource.
- **DestinationPath** *(string) --* The absolute local path of the resource inside the Lambda environment.
- **GroupOwnerSetting** *(dict) --* Allows you to configure additional group privileges for the Lambda process. This field is optional.
- **AutoAddGroupOwner** *(boolean) --* If true, AWS IoT Greengrass automatically adds the specified Linux OS group owner of the resource to the Lambda process privileges. Thus the Lambda process will have the file access permissions of the added Linux group.
- **GroupOwner** *(string) --* The name of the Linux OS group whose privileges will be added to the Lambda process. This field is optional.
- **SourcePath** *(string) --* The local absolute path of the volume resource on the host. The source path for a volume resource type cannot start with \'\'/sys\'\'.
- **S3MachineLearningModelResourceData** *(dict) --* Attributes that define an Amazon S3 machine learning resource.
- **DestinationPath** *(string) --* The absolute local path of the resource inside the Lambda environment.
- **S3Uri** *(string) --* The URI of the source model in an S3 bucket. The model package must be in tar.gz or .zip format.
- **SageMakerMachineLearningModelResourceData** *(dict) --* Attributes that define an Amazon SageMaker machine learning resource.
- **DestinationPath** *(string) --* The absolute local path of the resource inside the Lambda environment.
- **SageMakerJobArn** *(string) --* The ARN of the Amazon SageMaker training job that represents the source model.
- **SecretsManagerSecretResourceData** *(dict) --* Attributes that define a secret resource, which references a secret from AWS Secrets Manager.
- **ARN** *(string) --* The ARN of the Secrets Manager secret to make available on the core. The value of the secret\'s latest version (represented by the \'\'AWSCURRENT\'\' staging label) is included by default.
- **AdditionalStagingLabelsToDownload** *(list) --* Optional. The staging labels whose values you want to make available on the core, in addition to \'\'AWSCURRENT\'\'.
- *(string) --*
:rtype: dict
:returns:
"""
pass
def create_software_update_job(self, AmznClientToken: str = None, S3UrlSignerRole: str = None, SoftwareToUpdate: str = None, UpdateAgentLogLevel: str = None, UpdateTargets: List = None, UpdateTargetsArchitecture: str = None, UpdateTargetsOperatingSystem: str = None) -> Dict:
"""
Creates a software update for a core or group of cores (specified as an IoT thing group.) Use this to update the OTA Agent as well as the Greengrass core software. It makes use of the IoT Jobs feature which provides additional commands to manage a Greengrass core software update job.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/greengrass-2017-06-07/CreateSoftwareUpdateJob>`_
**Request Syntax**
::
response = client.create_software_update_job(
AmznClientToken='string',
S3UrlSignerRole='string',
SoftwareToUpdate='core'|'ota_agent',
UpdateAgentLogLevel='NONE'|'TRACE'|'DEBUG'|'VERBOSE'|'INFO'|'WARN'|'ERROR'|'FATAL',
UpdateTargets=[
'string',
],
UpdateTargetsArchitecture='armv7l'|'x86_64'|'aarch64',
UpdateTargetsOperatingSystem='ubuntu'|'raspbian'|'amazon_linux'
)
**Response Syntax**
::
{
'IotJobArn': 'string',
'IotJobId': 'string'
}
**Response Structure**
- *(dict) --* success
- **IotJobArn** *(string) --* The IoT Job ARN corresponding to this update.
- **IotJobId** *(string) --* The IoT Job Id corresponding to this update.
:type AmznClientToken: string
:param AmznClientToken: A client token used to correlate requests and responses.
:type S3UrlSignerRole: string
:param S3UrlSignerRole: The IAM Role that Greengrass will use to create pre-signed URLs pointing towards the update artifact.
:type SoftwareToUpdate: string
:param SoftwareToUpdate: The piece of software on the Greengrass core that will be updated.
:type UpdateAgentLogLevel: string
:param UpdateAgentLogLevel: The minimum level of log statements that should be logged by the OTA Agent during an update.
:type UpdateTargets: list
:param UpdateTargets: The ARNs of the targets (IoT things or IoT thing groups) that this update will be applied to.
- *(string) --*
:type UpdateTargetsArchitecture: string
:param UpdateTargetsArchitecture: The architecture of the cores which are the targets of an update.
:type UpdateTargetsOperatingSystem: string
:param UpdateTargetsOperatingSystem: The operating system of the cores which are the targets of an update.
:rtype: dict
:returns:
"""
pass
def create_subscription_definition(self, AmznClientToken: str = None, InitialVersion: Dict = None, Name: str = None, tags: Dict = None) -> Dict:
"""
Creates a subscription definition. You may provide the initial version of the subscription definition now or use ''CreateSubscriptionDefinitionVersion'' at a later time.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/greengrass-2017-06-07/CreateSubscriptionDefinition>`_
**Request Syntax**
::
response = client.create_subscription_definition(
AmznClientToken='string',
InitialVersion={
'Subscriptions': [
{
'Id': 'string',
'Source': 'string',
'Subject': 'string',
'Target': 'string'
},
]
},
Name='string',
tags={
'string': 'string'
}
)
**Response Syntax**
::
{
'Arn': 'string',
'CreationTimestamp': 'string',
'Id': 'string',
'LastUpdatedTimestamp': 'string',
'LatestVersion': 'string',
'LatestVersionArn': 'string',
'Name': 'string'
}
**Response Structure**
- *(dict) --*
- **Arn** *(string) --* The ARN of the definition.
- **CreationTimestamp** *(string) --* The time, in milliseconds since the epoch, when the definition was created.
- **Id** *(string) --* The ID of the definition.
- **LastUpdatedTimestamp** *(string) --* The time, in milliseconds since the epoch, when the definition was last updated.
- **LatestVersion** *(string) --* The latest version of the definition.
- **LatestVersionArn** *(string) --* The ARN of the latest version of the definition.
- **Name** *(string) --* The name of the definition.
:type AmznClientToken: string
:param AmznClientToken: A client token used to correlate requests and responses.
:type InitialVersion: dict
:param InitialVersion: Information about the initial version of the subscription definition.
- **Subscriptions** *(list) --* A list of subscriptions.
- *(dict) --* Information about a subscription.
- **Id** *(string) --* A descriptive or arbitrary ID for the subscription. This value must be unique within the subscription definition version. Max length is 128 characters with pattern \'\'[a-zA-Z0-9:_-]+\'\'.
- **Source** *(string) --* The source of the subscription. Can be a thing ARN, a Lambda function ARN, a connector ARN, \'cloud\' (which represents the AWS IoT cloud), or \'GGShadowService\'.
- **Subject** *(string) --* The MQTT topic used to route the message.
- **Target** *(string) --* Where the message is sent to. Can be a thing ARN, a Lambda function ARN, a connector ARN, \'cloud\' (which represents the AWS IoT cloud), or \'GGShadowService\'.
:type Name: string
:param Name: The name of the subscription definition.
:type tags: dict
:param tags: Tag(s) to add to the new resource
- *(string) --*
- *(string) --*
:rtype: dict
:returns:
"""
pass
def create_subscription_definition_version(self, SubscriptionDefinitionId: str, AmznClientToken: str = None, Subscriptions: List = None) -> Dict:
"""
Creates a version of a subscription definition which has already been defined.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/greengrass-2017-06-07/CreateSubscriptionDefinitionVersion>`_
**Request Syntax**
::
response = client.create_subscription_definition_version(
AmznClientToken='string',
SubscriptionDefinitionId='string',
Subscriptions=[
{
'Id': 'string',
'Source': 'string',
'Subject': 'string',
'Target': 'string'
},
]
)
**Response Syntax**
::
{
'Arn': 'string',
'CreationTimestamp': 'string',
'Id': 'string',
'Version': 'string'
}
**Response Structure**
- *(dict) --*
- **Arn** *(string) --* The ARN of the version.
- **CreationTimestamp** *(string) --* The time, in milliseconds since the epoch, when the version was created.
- **Id** *(string) --* The ID of the version.
- **Version** *(string) --* The unique ID of the version.
:type AmznClientToken: string
:param AmznClientToken: A client token used to correlate requests and responses.
:type SubscriptionDefinitionId: string
:param SubscriptionDefinitionId: **[REQUIRED]** The ID of the subscription definition.
:type Subscriptions: list
:param Subscriptions: A list of subscriptions.
- *(dict) --* Information about a subscription.
- **Id** *(string) --* A descriptive or arbitrary ID for the subscription. This value must be unique within the subscription definition version. Max length is 128 characters with pattern \'\'[a-zA-Z0-9:_-]+\'\'.
- **Source** *(string) --* The source of the subscription. Can be a thing ARN, a Lambda function ARN, a connector ARN, \'cloud\' (which represents the AWS IoT cloud), or \'GGShadowService\'.
- **Subject** *(string) --* The MQTT topic used to route the message.
- **Target** *(string) --* Where the message is sent to. Can be a thing ARN, a Lambda function ARN, a connector ARN, \'cloud\' (which represents the AWS IoT cloud), or \'GGShadowService\'.
:rtype: dict
:returns:
"""
pass
def delete_connector_definition(self, ConnectorDefinitionId: str) -> Dict:
"""
Deletes a connector definition.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/greengrass-2017-06-07/DeleteConnectorDefinition>`_
**Request Syntax**
::
response = client.delete_connector_definition(
ConnectorDefinitionId='string'
)
**Response Syntax**
::
{}
**Response Structure**
- *(dict) --* success
:type ConnectorDefinitionId: string
:param ConnectorDefinitionId: **[REQUIRED]** The ID of the connector definition.
:rtype: dict
:returns:
"""
pass
def delete_core_definition(self, CoreDefinitionId: str) -> Dict:
"""
Deletes a core definition.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/greengrass-2017-06-07/DeleteCoreDefinition>`_
**Request Syntax**
::
response = client.delete_core_definition(
CoreDefinitionId='string'
)
**Response Syntax**
::
{}
**Response Structure**
- *(dict) --* success
:type CoreDefinitionId: string
:param CoreDefinitionId: **[REQUIRED]** The ID of the core definition.
:rtype: dict
:returns:
"""
pass
def delete_device_definition(self, DeviceDefinitionId: str) -> Dict:
"""
Deletes a device definition.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/greengrass-2017-06-07/DeleteDeviceDefinition>`_
**Request Syntax**
::
response = client.delete_device_definition(
DeviceDefinitionId='string'
)
**Response Syntax**
::
{}
**Response Structure**
- *(dict) --* success
:type DeviceDefinitionId: string
:param DeviceDefinitionId: **[REQUIRED]** The ID of the device definition.
:rtype: dict
:returns:
"""
pass
def delete_function_definition(self, FunctionDefinitionId: str) -> Dict:
"""
Deletes a Lambda function definition.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/greengrass-2017-06-07/DeleteFunctionDefinition>`_
**Request Syntax**
::
response = client.delete_function_definition(
FunctionDefinitionId='string'
)
**Response Syntax**
::
{}
**Response Structure**
- *(dict) --* success
:type FunctionDefinitionId: string
:param FunctionDefinitionId: **[REQUIRED]** The ID of the Lambda function definition.
:rtype: dict
:returns:
"""
pass
def delete_group(self, GroupId: str) -> Dict:
"""
Deletes a group.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/greengrass-2017-06-07/DeleteGroup>`_
**Request Syntax**
::
response = client.delete_group(
GroupId='string'
)
**Response Syntax**
::
{}
**Response Structure**
- *(dict) --* success
:type GroupId: string
:param GroupId: **[REQUIRED]** The ID of the Greengrass group.
:rtype: dict
:returns:
"""
pass
def delete_logger_definition(self, LoggerDefinitionId: str) -> Dict:
"""
Deletes a logger definition.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/greengrass-2017-06-07/DeleteLoggerDefinition>`_
**Request Syntax**
::
response = client.delete_logger_definition(
LoggerDefinitionId='string'
)
**Response Syntax**
::
{}
**Response Structure**
- *(dict) --* success
:type LoggerDefinitionId: string
:param LoggerDefinitionId: **[REQUIRED]** The ID of the logger definition.
:rtype: dict
:returns:
"""
pass
def delete_resource_definition(self, ResourceDefinitionId: str) -> Dict:
"""
Deletes a resource definition.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/greengrass-2017-06-07/DeleteResourceDefinition>`_
**Request Syntax**
::
response = client.delete_resource_definition(
ResourceDefinitionId='string'
)
**Response Syntax**
::
{}
**Response Structure**
- *(dict) --* success
:type ResourceDefinitionId: string
:param ResourceDefinitionId: **[REQUIRED]** The ID of the resource definition.
:rtype: dict
:returns:
"""
pass
def delete_subscription_definition(self, SubscriptionDefinitionId: str) -> Dict:
"""
Deletes a subscription definition.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/greengrass-2017-06-07/DeleteSubscriptionDefinition>`_
**Request Syntax**
::
response = client.delete_subscription_definition(
SubscriptionDefinitionId='string'
)
**Response Syntax**
::
{}
**Response Structure**
- *(dict) --* success
:type SubscriptionDefinitionId: string
:param SubscriptionDefinitionId: **[REQUIRED]** The ID of the subscription definition.
:rtype: dict
:returns:
"""
pass
def disassociate_role_from_group(self, GroupId: str) -> Dict:
"""
Disassociates the role from a group.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/greengrass-2017-06-07/DisassociateRoleFromGroup>`_
**Request Syntax**
::
response = client.disassociate_role_from_group(
GroupId='string'
)
**Response Syntax**
::
{
'DisassociatedAt': 'string'
}
**Response Structure**
- *(dict) --* success
- **DisassociatedAt** *(string) --* The time, in milliseconds since the epoch, when the role was disassociated from the group.
:type GroupId: string
:param GroupId: **[REQUIRED]** The ID of the Greengrass group.
:rtype: dict
:returns:
"""
pass
def disassociate_service_role_from_account(self) -> Dict:
"""
Disassociates the service role from your account. Without a service role, deployments will not work.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/greengrass-2017-06-07/DisassociateServiceRoleFromAccount>`_
**Request Syntax**
::
response = client.disassociate_service_role_from_account()
**Response Syntax**
::
{
'DisassociatedAt': 'string'
}
**Response Structure**
- *(dict) --* success
- **DisassociatedAt** *(string) --* The time when the service role was disassociated from the account.
:rtype: dict
:returns:
"""
pass
def generate_presigned_url(self, ClientMethod: str = None, Params: Dict = None, ExpiresIn: int = None, HttpMethod: str = None):
"""
Generate a presigned url given a client, its method, and arguments
:type ClientMethod: string
:param ClientMethod: The client method to presign for
:type Params: dict
:param Params: The parameters normally passed to
``ClientMethod``.
:type ExpiresIn: int
:param ExpiresIn: The number of seconds the presigned url is valid
for. By default it expires in an hour (3600 seconds)
:type HttpMethod: string
:param HttpMethod: The http method to use on the generated url. By
default, the http method is whatever is used in the method\'s model.
:returns: The presigned url
"""
pass
def get_associated_role(self, GroupId: str) -> Dict:
"""
Retrieves the role associated with a particular group.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/greengrass-2017-06-07/GetAssociatedRole>`_
**Request Syntax**
::
response = client.get_associated_role(
GroupId='string'
)
**Response Syntax**
::
{
'AssociatedAt': 'string',
'RoleArn': 'string'
}
**Response Structure**
- *(dict) --* success
- **AssociatedAt** *(string) --* The time when the role was associated with the group.
- **RoleArn** *(string) --* The ARN of the role that is associated with the group.
:type GroupId: string
:param GroupId: **[REQUIRED]** The ID of the Greengrass group.
:rtype: dict
:returns:
"""
pass
def get_bulk_deployment_status(self, BulkDeploymentId: str) -> Dict:
"""
Returns the status of a bulk deployment.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/greengrass-2017-06-07/GetBulkDeploymentStatus>`_
**Request Syntax**
::
response = client.get_bulk_deployment_status(
BulkDeploymentId='string'
)
**Response Syntax**
::
{
'BulkDeploymentMetrics': {
'InvalidInputRecords': 123,
'RecordsProcessed': 123,
'RetryAttempts': 123
},
'BulkDeploymentStatus': 'Initializing'|'Running'|'Completed'|'Stopping'|'Stopped'|'Failed',
'CreatedAt': 'string',
'ErrorDetails': [
{
'DetailedErrorCode': 'string',
'DetailedErrorMessage': 'string'
},
],
'ErrorMessage': 'string',
'tags': {
'string': 'string'
}
}
**Response Structure**
- *(dict) --* Success. The response body contains the status of the bulk deployment.
- **BulkDeploymentMetrics** *(dict) --* Relevant metrics on input records processed during bulk deployment.
- **InvalidInputRecords** *(integer) --* The total number of records that returned a non-retryable error. For example, this can occur if a group record from the input file uses an invalid format or specifies a nonexistent group version, or if the execution role doesn't grant permission to deploy a group or group version.
- **RecordsProcessed** *(integer) --* The total number of group records from the input file that have been processed so far, or attempted.
- **RetryAttempts** *(integer) --* The total number of deployment attempts that returned a retryable error. For example, a retry is triggered if the attempt to deploy a group returns a throttling error. ''StartBulkDeployment'' retries a group deployment up to five times.
- **BulkDeploymentStatus** *(string) --* The status of the bulk deployment.
- **CreatedAt** *(string) --* The time, in ISO format, when the deployment was created.
- **ErrorDetails** *(list) --* Error details
- *(dict) --* Details about the error.
- **DetailedErrorCode** *(string) --* A detailed error code.
- **DetailedErrorMessage** *(string) --* A detailed error message.
- **ErrorMessage** *(string) --* Error message
- **tags** *(dict) --* The tags for the definition.
- *(string) --*
- *(string) --*
:type BulkDeploymentId: string
:param BulkDeploymentId: **[REQUIRED]** The ID of the bulk deployment.
:rtype: dict
:returns:
"""
pass
def get_connectivity_info(self, ThingName: str) -> Dict:
"""
Retrieves the connectivity information for a core.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/greengrass-2017-06-07/GetConnectivityInfo>`_
**Request Syntax**
::
response = client.get_connectivity_info(
ThingName='string'
)
**Response Syntax**
::
{
'ConnectivityInfo': [
{
'HostAddress': 'string',
'Id': 'string',
'Metadata': 'string',
'PortNumber': 123
},
],
'Message': 'string'
}
**Response Structure**
- *(dict) --* success
- **ConnectivityInfo** *(list) --* Connectivity info list.
- *(dict) --* Information about a Greengrass core's connectivity.
- **HostAddress** *(string) --* The endpoint for the Greengrass core. Can be an IP address or DNS.
- **Id** *(string) --* The ID of the connectivity information.
- **Metadata** *(string) --* Metadata for this endpoint.
- **PortNumber** *(integer) --* The port of the Greengrass core. Usually 8883.
- **Message** *(string) --* A message about the connectivity info request.
:type ThingName: string
:param ThingName: **[REQUIRED]** The thing name.
:rtype: dict
:returns:
"""
pass
def get_connector_definition(self, ConnectorDefinitionId: str) -> Dict:
"""
Retrieves information about a connector definition.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/greengrass-2017-06-07/GetConnectorDefinition>`_
**Request Syntax**
::
response = client.get_connector_definition(
ConnectorDefinitionId='string'
)
**Response Syntax**
::
{
'Arn': 'string',
'CreationTimestamp': 'string',
'Id': 'string',
'LastUpdatedTimestamp': 'string',
'LatestVersion': 'string',
'LatestVersionArn': 'string',
'Name': 'string',
'tags': {
'string': 'string'
}
}
**Response Structure**
- *(dict) --*
- **Arn** *(string) --* The ARN of the definition.
- **CreationTimestamp** *(string) --* The time, in milliseconds since the epoch, when the definition was created.
- **Id** *(string) --* The ID of the definition.
- **LastUpdatedTimestamp** *(string) --* The time, in milliseconds since the epoch, when the definition was last updated.
- **LatestVersion** *(string) --* The latest version of the definition.
- **LatestVersionArn** *(string) --* The ARN of the latest version of the definition.
- **Name** *(string) --* The name of the definition.
- **tags** *(dict) --* The tags for the definition.
- *(string) --*
- *(string) --*
:type ConnectorDefinitionId: string
:param ConnectorDefinitionId: **[REQUIRED]** The ID of the connector definition.
:rtype: dict
:returns:
"""
pass
def get_connector_definition_version(self, ConnectorDefinitionId: str, ConnectorDefinitionVersionId: str, NextToken: str = None) -> Dict:
"""
Retrieves information about a connector definition version, including the connectors that the version contains. Connectors are prebuilt modules that interact with local infrastructure, device protocols, AWS, and other cloud services.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/greengrass-2017-06-07/GetConnectorDefinitionVersion>`_
**Request Syntax**
::
response = client.get_connector_definition_version(
ConnectorDefinitionId='string',
ConnectorDefinitionVersionId='string',
NextToken='string'
)
**Response Syntax**
::
{
'Arn': 'string',
'CreationTimestamp': 'string',
'Definition': {
'Connectors': [
{
'ConnectorArn': 'string',
'Id': 'string',
'Parameters': {
'string': 'string'
}
},
]
},
'Id': 'string',
'NextToken': 'string',
'Version': 'string'
}
**Response Structure**
- *(dict) --*
- **Arn** *(string) --* The ARN of the connector definition version.
- **CreationTimestamp** *(string) --* The time, in milliseconds since the epoch, when the connector definition version was created.
- **Definition** *(dict) --* Information about the connector definition version.
- **Connectors** *(list) --* A list of references to connectors in this version, with their corresponding configuration settings.
- *(dict) --* Information about a connector. Connectors run on the Greengrass core and contain built-in integration with local infrastructure, device protocols, AWS, and other cloud services.
- **ConnectorArn** *(string) --* The ARN of the connector.
- **Id** *(string) --* A descriptive or arbitrary ID for the connector. This value must be unique within the connector definition version. Max length is 128 characters with pattern [a-zA-Z0-9:_-]+.
- **Parameters** *(dict) --* The parameters or configuration that the connector uses.
- *(string) --*
- *(string) --*
- **Id** *(string) --* The ID of the connector definition version.
- **NextToken** *(string) --* The token for the next set of results, or ''null'' if there are no additional results.
- **Version** *(string) --* The version of the connector definition version.
:type ConnectorDefinitionId: string
:param ConnectorDefinitionId: **[REQUIRED]** The ID of the connector definition.
:type ConnectorDefinitionVersionId: string
:param ConnectorDefinitionVersionId: **[REQUIRED]** The ID of the connector definition version.
:type NextToken: string
:param NextToken: The token for the next set of results, or \'\'null\'\' if there are no additional results.
:rtype: dict
:returns:
"""
pass
def get_core_definition(self, CoreDefinitionId: str) -> Dict:
"""
Retrieves information about a core definition version.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/greengrass-2017-06-07/GetCoreDefinition>`_
**Request Syntax**
::
response = client.get_core_definition(
CoreDefinitionId='string'
)
**Response Syntax**
::
{
'Arn': 'string',
'CreationTimestamp': 'string',
'Id': 'string',
'LastUpdatedTimestamp': 'string',
'LatestVersion': 'string',
'LatestVersionArn': 'string',
'Name': 'string',
'tags': {
'string': 'string'
}
}
**Response Structure**
- *(dict) --*
- **Arn** *(string) --* The ARN of the definition.
- **CreationTimestamp** *(string) --* The time, in milliseconds since the epoch, when the definition was created.
- **Id** *(string) --* The ID of the definition.
- **LastUpdatedTimestamp** *(string) --* The time, in milliseconds since the epoch, when the definition was last updated.
- **LatestVersion** *(string) --* The latest version of the definition.
- **LatestVersionArn** *(string) --* The ARN of the latest version of the definition.
- **Name** *(string) --* The name of the definition.
- **tags** *(dict) --* The tags for the definition.
- *(string) --*
- *(string) --*
:type CoreDefinitionId: string
:param CoreDefinitionId: **[REQUIRED]** The ID of the core definition.
:rtype: dict
:returns:
"""
pass
def get_core_definition_version(self, CoreDefinitionId: str, CoreDefinitionVersionId: str) -> Dict:
"""
Retrieves information about a core definition version.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/greengrass-2017-06-07/GetCoreDefinitionVersion>`_
**Request Syntax**
::
response = client.get_core_definition_version(
CoreDefinitionId='string',
CoreDefinitionVersionId='string'
)
**Response Syntax**
::
{
'Arn': 'string',
'CreationTimestamp': 'string',
'Definition': {
'Cores': [
{
'CertificateArn': 'string',
'Id': 'string',
'SyncShadow': True|False,
'ThingArn': 'string'
},
]
},
'Id': 'string',
'NextToken': 'string',
'Version': 'string'
}
**Response Structure**
- *(dict) --* success
- **Arn** *(string) --* The ARN of the core definition version.
- **CreationTimestamp** *(string) --* The time, in milliseconds since the epoch, when the core definition version was created.
- **Definition** *(dict) --* Information about the core definition version.
- **Cores** *(list) --* A list of cores in the core definition version.
- *(dict) --* Information about a core.
- **CertificateArn** *(string) --* The ARN of the certificate associated with the core.
- **Id** *(string) --* A descriptive or arbitrary ID for the core. This value must be unique within the core definition version. Max length is 128 characters with pattern ''[a-zA-Z0-9:_-]+''.
- **SyncShadow** *(boolean) --* If true, the core's local shadow is automatically synced with the cloud.
- **ThingArn** *(string) --* The ARN of the thing which is the core.
- **Id** *(string) --* The ID of the core definition version.
- **NextToken** *(string) --* The token for the next set of results, or ''null'' if there are no additional results.
- **Version** *(string) --* The version of the core definition version.
:type CoreDefinitionId: string
:param CoreDefinitionId: **[REQUIRED]** The ID of the core definition.
:type CoreDefinitionVersionId: string
:param CoreDefinitionVersionId: **[REQUIRED]** The ID of the core definition version.
:rtype: dict
:returns:
"""
pass
def get_deployment_status(self, DeploymentId: str, GroupId: str) -> Dict:
"""
Returns the status of a deployment.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/greengrass-2017-06-07/GetDeploymentStatus>`_
**Request Syntax**
::
response = client.get_deployment_status(
DeploymentId='string',
GroupId='string'
)
**Response Syntax**
::
{
'DeploymentStatus': 'string',
'DeploymentType': 'NewDeployment'|'Redeployment'|'ResetDeployment'|'ForceResetDeployment',
'ErrorDetails': [
{
'DetailedErrorCode': 'string',
'DetailedErrorMessage': 'string'
},
],
'ErrorMessage': 'string',
'UpdatedAt': 'string'
}
**Response Structure**
- *(dict) --* Success. The response body contains the status of the deployment for the group.
- **DeploymentStatus** *(string) --* The status of the deployment: ''InProgress'', ''Building'', ''Success'', or ''Failure''.
- **DeploymentType** *(string) --* The type of the deployment.
- **ErrorDetails** *(list) --* Error details
- *(dict) --* Details about the error.
- **DetailedErrorCode** *(string) --* A detailed error code.
- **DetailedErrorMessage** *(string) --* A detailed error message.
- **ErrorMessage** *(string) --* Error message
- **UpdatedAt** *(string) --* The time, in milliseconds since the epoch, when the deployment status was updated.
:type DeploymentId: string
:param DeploymentId: **[REQUIRED]** The ID of the deployment.
:type GroupId: string
:param GroupId: **[REQUIRED]** The ID of the Greengrass group.
:rtype: dict
:returns:
"""
pass
def get_device_definition(self, DeviceDefinitionId: str) -> Dict:
"""
Retrieves information about a device definition.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/greengrass-2017-06-07/GetDeviceDefinition>`_
**Request Syntax**
::
response = client.get_device_definition(
DeviceDefinitionId='string'
)
**Response Syntax**
::
{
'Arn': 'string',
'CreationTimestamp': 'string',
'Id': 'string',
'LastUpdatedTimestamp': 'string',
'LatestVersion': 'string',
'LatestVersionArn': 'string',
'Name': 'string',
'tags': {
'string': 'string'
}
}
**Response Structure**
- *(dict) --*
- **Arn** *(string) --* The ARN of the definition.
- **CreationTimestamp** *(string) --* The time, in milliseconds since the epoch, when the definition was created.
- **Id** *(string) --* The ID of the definition.
- **LastUpdatedTimestamp** *(string) --* The time, in milliseconds since the epoch, when the definition was last updated.
- **LatestVersion** *(string) --* The latest version of the definition.
- **LatestVersionArn** *(string) --* The ARN of the latest version of the definition.
- **Name** *(string) --* The name of the definition.
- **tags** *(dict) --* The tags for the definition.
- *(string) --*
- *(string) --*
:type DeviceDefinitionId: string
:param DeviceDefinitionId: **[REQUIRED]** The ID of the device definition.
:rtype: dict
:returns:
"""
pass
def get_device_definition_version(self, DeviceDefinitionId: str, DeviceDefinitionVersionId: str, NextToken: str = None) -> Dict:
"""
Retrieves information about a device definition version.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/greengrass-2017-06-07/GetDeviceDefinitionVersion>`_
**Request Syntax**
::
response = client.get_device_definition_version(
DeviceDefinitionId='string',
DeviceDefinitionVersionId='string',
NextToken='string'
)
**Response Syntax**
::
{
'Arn': 'string',
'CreationTimestamp': 'string',
'Definition': {
'Devices': [
{
'CertificateArn': 'string',
'Id': 'string',
'SyncShadow': True|False,
'ThingArn': 'string'
},
]
},
'Id': 'string',
'NextToken': 'string',
'Version': 'string'
}
**Response Structure**
- *(dict) --*
- **Arn** *(string) --* The ARN of the device definition version.
- **CreationTimestamp** *(string) --* The time, in milliseconds since the epoch, when the device definition version was created.
- **Definition** *(dict) --* Information about the device definition version.
- **Devices** *(list) --* A list of devices in the definition version.
- *(dict) --* Information about a device.
- **CertificateArn** *(string) --* The ARN of the certificate associated with the device.
- **Id** *(string) --* A descriptive or arbitrary ID for the device. This value must be unique within the device definition version. Max length is 128 characters with pattern ''[a-zA-Z0-9:_-]+''.
- **SyncShadow** *(boolean) --* If true, the device's local shadow will be automatically synced with the cloud.
- **ThingArn** *(string) --* The thing ARN of the device.
- **Id** *(string) --* The ID of the device definition version.
- **NextToken** *(string) --* The token for the next set of results, or ''null'' if there are no additional results.
- **Version** *(string) --* The version of the device definition version.
:type DeviceDefinitionId: string
:param DeviceDefinitionId: **[REQUIRED]** The ID of the device definition.
:type DeviceDefinitionVersionId: string
:param DeviceDefinitionVersionId: **[REQUIRED]** The ID of the device definition version.
:type NextToken: string
:param NextToken: The token for the next set of results, or \'\'null\'\' if there are no additional results.
:rtype: dict
:returns:
"""
pass
def get_function_definition(self, FunctionDefinitionId: str) -> Dict:
"""
Retrieves information about a Lambda function definition, including its creation time and latest version.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/greengrass-2017-06-07/GetFunctionDefinition>`_
**Request Syntax**
::
response = client.get_function_definition(
FunctionDefinitionId='string'
)
**Response Syntax**
::
{
'Arn': 'string',
'CreationTimestamp': 'string',
'Id': 'string',
'LastUpdatedTimestamp': 'string',
'LatestVersion': 'string',
'LatestVersionArn': 'string',
'Name': 'string',
'tags': {
'string': 'string'
}
}
**Response Structure**
- *(dict) --* success
- **Arn** *(string) --* The ARN of the definition.
- **CreationTimestamp** *(string) --* The time, in milliseconds since the epoch, when the definition was created.
- **Id** *(string) --* The ID of the definition.
- **LastUpdatedTimestamp** *(string) --* The time, in milliseconds since the epoch, when the definition was last updated.
- **LatestVersion** *(string) --* The latest version of the definition.
- **LatestVersionArn** *(string) --* The ARN of the latest version of the definition.
- **Name** *(string) --* The name of the definition.
- **tags** *(dict) --* The tags for the definition.
- *(string) --*
- *(string) --*
:type FunctionDefinitionId: string
:param FunctionDefinitionId: **[REQUIRED]** The ID of the Lambda function definition.
:rtype: dict
:returns:
"""
pass
def get_function_definition_version(self, FunctionDefinitionId: str, FunctionDefinitionVersionId: str, NextToken: str = None) -> Dict:
"""
Retrieves information about a Lambda function definition version, including which Lambda functions are included in the version and their configurations.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/greengrass-2017-06-07/GetFunctionDefinitionVersion>`_
**Request Syntax**
::
response = client.get_function_definition_version(
FunctionDefinitionId='string',
FunctionDefinitionVersionId='string',
NextToken='string'
)
**Response Syntax**
::
{
'Arn': 'string',
'CreationTimestamp': 'string',
'Definition': {
'DefaultConfig': {
'Execution': {
'IsolationMode': 'GreengrassContainer'|'NoContainer',
'RunAs': {
'Gid': 123,
'Uid': 123
}
}
},
'Functions': [
{
'FunctionArn': 'string',
'FunctionConfiguration': {
'EncodingType': 'binary'|'json',
'Environment': {
'AccessSysfs': True|False,
'Execution': {
'IsolationMode': 'GreengrassContainer'|'NoContainer',
'RunAs': {
'Gid': 123,
'Uid': 123
}
},
'ResourceAccessPolicies': [
{
'Permission': 'ro'|'rw',
'ResourceId': 'string'
},
],
'Variables': {
'string': 'string'
}
},
'ExecArgs': 'string',
'Executable': 'string',
'MemorySize': 123,
'Pinned': True|False,
'Timeout': 123
},
'Id': 'string'
},
]
},
'Id': 'string',
'NextToken': 'string',
'Version': 'string'
}
**Response Structure**
- *(dict) --* success
- **Arn** *(string) --* The ARN of the function definition version.
- **CreationTimestamp** *(string) --* The time, in milliseconds since the epoch, when the function definition version was created.
- **Definition** *(dict) --* Information on the definition.
- **DefaultConfig** *(dict) --* The default configuration that applies to all Lambda functions in this function definition version. Individual Lambda functions can override these settings.
- **Execution** *(dict) --* Configuration information that specifies how a Lambda function runs.
- **IsolationMode** *(string) --* Specifies whether the Lambda function runs in a Greengrass container (default) or without containerization. Unless your scenario requires that you run without containerization, we recommend that you run in a Greengrass container. Omit this value to run the Lambda function with the default containerization for the group.
- **RunAs** *(dict) --* Specifies the user and group whose permissions are used when running the Lambda function. You can specify one or both values to override the default values. We recommend that you avoid running as root unless absolutely necessary to minimize the risk of unintended changes or malicious attacks. To run as root, you must set ''IsolationMode'' to ''NoContainer'' and update config.json in ''greengrass-root/config'' to set ''allowFunctionsToRunAsRoot'' to ''yes''.
- **Gid** *(integer) --* The group ID whose permissions are used to run a Lambda function.
- **Uid** *(integer) --* The user ID whose permissions are used to run a Lambda function.
- **Functions** *(list) --* A list of Lambda functions in this function definition version.
- *(dict) --* Information about a Lambda function.
- **FunctionArn** *(string) --* The ARN of the Lambda function.
- **FunctionConfiguration** *(dict) --* The configuration of the Lambda function.
- **EncodingType** *(string) --* The expected encoding type of the input payload for the function. The default is ''json''.
- **Environment** *(dict) --* The environment configuration of the function.
- **AccessSysfs** *(boolean) --* If true, the Lambda function is allowed to access the host's /sys folder. Use this when the Lambda function needs to read device information from /sys. This setting applies only when you run the Lambda function in a Greengrass container.
- **Execution** *(dict) --* Configuration related to executing the Lambda function
- **IsolationMode** *(string) --* Specifies whether the Lambda function runs in a Greengrass container (default) or without containerization. Unless your scenario requires that you run without containerization, we recommend that you run in a Greengrass container. Omit this value to run the Lambda function with the default containerization for the group.
- **RunAs** *(dict) --* Specifies the user and group whose permissions are used when running the Lambda function. You can specify one or both values to override the default values. We recommend that you avoid running as root unless absolutely necessary to minimize the risk of unintended changes or malicious attacks. To run as root, you must set ''IsolationMode'' to ''NoContainer'' and update config.json in ''greengrass-root/config'' to set ''allowFunctionsToRunAsRoot'' to ''yes''.
- **Gid** *(integer) --* The group ID whose permissions are used to run a Lambda function.
- **Uid** *(integer) --* The user ID whose permissions are used to run a Lambda function.
- **ResourceAccessPolicies** *(list) --* A list of the resources, with their permissions, to which the Lambda function will be granted access. A Lambda function can have at most 10 resources. ResourceAccessPolicies apply only when you run the Lambda function in a Greengrass container.
- *(dict) --* A policy used by the function to access a resource.
- **Permission** *(string) --* The permissions that the Lambda function has to the resource. Can be one of ''rw'' (read/write) or ''ro'' (read-only).
- **ResourceId** *(string) --* The ID of the resource. (This ID is assigned to the resource when you create the resource definiton.)
- **Variables** *(dict) --* Environment variables for the Lambda function's configuration.
- *(string) --*
- *(string) --*
- **ExecArgs** *(string) --* The execution arguments.
- **Executable** *(string) --* The name of the function executable.
- **MemorySize** *(integer) --* The memory size, in KB, which the function requires. This setting is not applicable and should be cleared when you run the Lambda function without containerization.
- **Pinned** *(boolean) --* True if the function is pinned. Pinned means the function is long-lived and starts when the core starts.
- **Timeout** *(integer) --* The allowed function execution time, after which Lambda should terminate the function. This timeout still applies to pinned Lambda functions for each request.
- **Id** *(string) --* A descriptive or arbitrary ID for the function. This value must be unique within the function definition version. Max length is 128 characters with pattern ''[a-zA-Z0-9:_-]+''.
- **Id** *(string) --* The ID of the function definition version.
- **NextToken** *(string) --* The token for the next set of results, or ''null'' if there are no additional results.
- **Version** *(string) --* The version of the function definition version.
:type FunctionDefinitionId: string
:param FunctionDefinitionId: **[REQUIRED]** The ID of the Lambda function definition.
:type FunctionDefinitionVersionId: string
:param FunctionDefinitionVersionId: **[REQUIRED]** The ID of the function definition version.
:type NextToken: string
:param NextToken: The token for the next set of results, or \'\'null\'\' if there are no additional results.
:rtype: dict
:returns:
"""
pass
def get_group(self, GroupId: str) -> Dict:
"""
Retrieves information about a group.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/greengrass-2017-06-07/GetGroup>`_
**Request Syntax**
::
response = client.get_group(
GroupId='string'
)
**Response Syntax**
::
{
'Arn': 'string',
'CreationTimestamp': 'string',
'Id': 'string',
'LastUpdatedTimestamp': 'string',
'LatestVersion': 'string',
'LatestVersionArn': 'string',
'Name': 'string',
'tags': {
'string': 'string'
}
}
**Response Structure**
- *(dict) --* success
- **Arn** *(string) --* The ARN of the definition.
- **CreationTimestamp** *(string) --* The time, in milliseconds since the epoch, when the definition was created.
- **Id** *(string) --* The ID of the definition.
- **LastUpdatedTimestamp** *(string) --* The time, in milliseconds since the epoch, when the definition was last updated.
- **LatestVersion** *(string) --* The latest version of the definition.
- **LatestVersionArn** *(string) --* The ARN of the latest version of the definition.
- **Name** *(string) --* The name of the definition.
- **tags** *(dict) --* The tags for the definition.
- *(string) --*
- *(string) --*
:type GroupId: string
:param GroupId: **[REQUIRED]** The ID of the Greengrass group.
:rtype: dict
:returns:
"""
pass
def get_group_certificate_authority(self, CertificateAuthorityId: str, GroupId: str) -> Dict:
"""
Retreives the CA associated with a group. Returns the public key of the CA.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/greengrass-2017-06-07/GetGroupCertificateAuthority>`_
**Request Syntax**
::
response = client.get_group_certificate_authority(
CertificateAuthorityId='string',
GroupId='string'
)
**Response Syntax**
::
{
'GroupCertificateAuthorityArn': 'string',
'GroupCertificateAuthorityId': 'string',
'PemEncodedCertificate': 'string'
}
**Response Structure**
- *(dict) --* Success. The response body contains the PKI Configuration.
- **GroupCertificateAuthorityArn** *(string) --* The ARN of the certificate authority for the group.
- **GroupCertificateAuthorityId** *(string) --* The ID of the certificate authority for the group.
- **PemEncodedCertificate** *(string) --* The PEM encoded certificate for the group.
:type CertificateAuthorityId: string
:param CertificateAuthorityId: **[REQUIRED]** The ID of the certificate authority.
:type GroupId: string
:param GroupId: **[REQUIRED]** The ID of the Greengrass group.
:rtype: dict
:returns:
"""
pass
def get_group_certificate_configuration(self, GroupId: str) -> Dict:
"""
Retrieves the current configuration for the CA used by the group.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/greengrass-2017-06-07/GetGroupCertificateConfiguration>`_
**Request Syntax**
::
response = client.get_group_certificate_configuration(
GroupId='string'
)
**Response Syntax**
::
{
'CertificateAuthorityExpiryInMilliseconds': 'string',
'CertificateExpiryInMilliseconds': 'string',
'GroupId': 'string'
}
**Response Structure**
- *(dict) --* Success. The response body contains the PKI Configuration.
- **CertificateAuthorityExpiryInMilliseconds** *(string) --* The amount of time remaining before the certificate authority expires, in milliseconds.
- **CertificateExpiryInMilliseconds** *(string) --* The amount of time remaining before the certificate expires, in milliseconds.
- **GroupId** *(string) --* The ID of the group certificate configuration.
:type GroupId: string
:param GroupId: **[REQUIRED]** The ID of the Greengrass group.
:rtype: dict
:returns:
"""
pass
def get_group_version(self, GroupId: str, GroupVersionId: str) -> Dict:
"""
Retrieves information about a group version.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/greengrass-2017-06-07/GetGroupVersion>`_
**Request Syntax**
::
response = client.get_group_version(
GroupId='string',
GroupVersionId='string'
)
**Response Syntax**
::
{
'Arn': 'string',
'CreationTimestamp': 'string',
'Definition': {
'ConnectorDefinitionVersionArn': 'string',
'CoreDefinitionVersionArn': 'string',
'DeviceDefinitionVersionArn': 'string',
'FunctionDefinitionVersionArn': 'string',
'LoggerDefinitionVersionArn': 'string',
'ResourceDefinitionVersionArn': 'string',
'SubscriptionDefinitionVersionArn': 'string'
},
'Id': 'string',
'Version': 'string'
}
**Response Structure**
- *(dict) --* success
- **Arn** *(string) --* The ARN of the group version.
- **CreationTimestamp** *(string) --* The time, in milliseconds since the epoch, when the group version was created.
- **Definition** *(dict) --* Information about the group version definition.
- **ConnectorDefinitionVersionArn** *(string) --* The ARN of the connector definition version for this group.
- **CoreDefinitionVersionArn** *(string) --* The ARN of the core definition version for this group.
- **DeviceDefinitionVersionArn** *(string) --* The ARN of the device definition version for this group.
- **FunctionDefinitionVersionArn** *(string) --* The ARN of the function definition version for this group.
- **LoggerDefinitionVersionArn** *(string) --* The ARN of the logger definition version for this group.
- **ResourceDefinitionVersionArn** *(string) --* The ARN of the resource definition version for this group.
- **SubscriptionDefinitionVersionArn** *(string) --* The ARN of the subscription definition version for this group.
- **Id** *(string) --* The ID of the group version.
- **Version** *(string) --* The unique ID for the version of the group.
:type GroupId: string
:param GroupId: **[REQUIRED]** The ID of the Greengrass group.
:type GroupVersionId: string
:param GroupVersionId: **[REQUIRED]** The ID of the group version.
:rtype: dict
:returns:
"""
pass
def get_logger_definition(self, LoggerDefinitionId: str) -> Dict:
"""
Retrieves information about a logger definition.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/greengrass-2017-06-07/GetLoggerDefinition>`_
**Request Syntax**
::
response = client.get_logger_definition(
LoggerDefinitionId='string'
)
**Response Syntax**
::
{
'Arn': 'string',
'CreationTimestamp': 'string',
'Id': 'string',
'LastUpdatedTimestamp': 'string',
'LatestVersion': 'string',
'LatestVersionArn': 'string',
'Name': 'string',
'tags': {
'string': 'string'
}
}
**Response Structure**
- *(dict) --*
- **Arn** *(string) --* The ARN of the definition.
- **CreationTimestamp** *(string) --* The time, in milliseconds since the epoch, when the definition was created.
- **Id** *(string) --* The ID of the definition.
- **LastUpdatedTimestamp** *(string) --* The time, in milliseconds since the epoch, when the definition was last updated.
- **LatestVersion** *(string) --* The latest version of the definition.
- **LatestVersionArn** *(string) --* The ARN of the latest version of the definition.
- **Name** *(string) --* The name of the definition.
- **tags** *(dict) --* The tags for the definition.
- *(string) --*
- *(string) --*
:type LoggerDefinitionId: string
:param LoggerDefinitionId: **[REQUIRED]** The ID of the logger definition.
:rtype: dict
:returns:
"""
pass
def get_logger_definition_version(self, LoggerDefinitionId: str, LoggerDefinitionVersionId: str, NextToken: str = None) -> Dict:
"""
Retrieves information about a logger definition version.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/greengrass-2017-06-07/GetLoggerDefinitionVersion>`_
**Request Syntax**
::
response = client.get_logger_definition_version(
LoggerDefinitionId='string',
LoggerDefinitionVersionId='string',
NextToken='string'
)
**Response Syntax**
::
{
'Arn': 'string',
'CreationTimestamp': 'string',
'Definition': {
'Loggers': [
{
'Component': 'GreengrassSystem'|'Lambda',
'Id': 'string',
'Level': 'DEBUG'|'INFO'|'WARN'|'ERROR'|'FATAL',
'Space': 123,
'Type': 'FileSystem'|'AWSCloudWatch'
},
]
},
'Id': 'string',
'Version': 'string'
}
**Response Structure**
- *(dict) --* success
- **Arn** *(string) --* The ARN of the logger definition version.
- **CreationTimestamp** *(string) --* The time, in milliseconds since the epoch, when the logger definition version was created.
- **Definition** *(dict) --* Information about the logger definition version.
- **Loggers** *(list) --* A list of loggers.
- *(dict) --* Information about a logger
- **Component** *(string) --* The component that will be subject to logging.
- **Id** *(string) --* A descriptive or arbitrary ID for the logger. This value must be unique within the logger definition version. Max length is 128 characters with pattern ''[a-zA-Z0-9:_-]+''.
- **Level** *(string) --* The level of the logs.
- **Space** *(integer) --* The amount of file space, in KB, to use if the local file system is used for logging purposes.
- **Type** *(string) --* The type of log output which will be used.
- **Id** *(string) --* The ID of the logger definition version.
- **Version** *(string) --* The version of the logger definition version.
:type LoggerDefinitionId: string
:param LoggerDefinitionId: **[REQUIRED]** The ID of the logger definition.
:type LoggerDefinitionVersionId: string
:param LoggerDefinitionVersionId: **[REQUIRED]** The ID of the logger definition version.
:type NextToken: string
:param NextToken: The token for the next set of results, or \'\'null\'\' if there are no additional results.
:rtype: dict
:returns:
"""
pass
def get_paginator(self, operation_name: str = None) -> Paginator:
"""
Create a paginator for an operation.
:type operation_name: string
:param operation_name: The operation name. This is the same name
as the method name on the client. For example, if the
method name is ``create_foo``, and you\'d normally invoke the
operation as ``client.create_foo(**kwargs)``, if the
``create_foo`` operation can be paginated, you can use the
call ``client.get_paginator(\"create_foo\")``.
:raise OperationNotPageableError: Raised if the operation is not
pageable. You can use the ``client.can_paginate`` method to
check if an operation is pageable.
:rtype: L{botocore.paginate.Paginator}
:return: A paginator object.
"""
pass
def get_resource_definition(self, ResourceDefinitionId: str) -> Dict:
"""
Retrieves information about a resource definition, including its creation time and latest version.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/greengrass-2017-06-07/GetResourceDefinition>`_
**Request Syntax**
::
response = client.get_resource_definition(
ResourceDefinitionId='string'
)
**Response Syntax**
::
{
'Arn': 'string',
'CreationTimestamp': 'string',
'Id': 'string',
'LastUpdatedTimestamp': 'string',
'LatestVersion': 'string',
'LatestVersionArn': 'string',
'Name': 'string',
'tags': {
'string': 'string'
}
}
**Response Structure**
- *(dict) --* success
- **Arn** *(string) --* The ARN of the definition.
- **CreationTimestamp** *(string) --* The time, in milliseconds since the epoch, when the definition was created.
- **Id** *(string) --* The ID of the definition.
- **LastUpdatedTimestamp** *(string) --* The time, in milliseconds since the epoch, when the definition was last updated.
- **LatestVersion** *(string) --* The latest version of the definition.
- **LatestVersionArn** *(string) --* The ARN of the latest version of the definition.
- **Name** *(string) --* The name of the definition.
- **tags** *(dict) --* The tags for the definition.
- *(string) --*
- *(string) --*
:type ResourceDefinitionId: string
:param ResourceDefinitionId: **[REQUIRED]** The ID of the resource definition.
:rtype: dict
:returns:
"""
pass
def get_resource_definition_version(self, ResourceDefinitionId: str, ResourceDefinitionVersionId: str) -> Dict:
"""
Retrieves information about a resource definition version, including which resources are included in the version.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/greengrass-2017-06-07/GetResourceDefinitionVersion>`_
**Request Syntax**
::
response = client.get_resource_definition_version(
ResourceDefinitionId='string',
ResourceDefinitionVersionId='string'
)
**Response Syntax**
::
{
'Arn': 'string',
'CreationTimestamp': 'string',
'Definition': {
'Resources': [
{
'Id': 'string',
'Name': 'string',
'ResourceDataContainer': {
'LocalDeviceResourceData': {
'GroupOwnerSetting': {
'AutoAddGroupOwner': True|False,
'GroupOwner': 'string'
},
'SourcePath': 'string'
},
'LocalVolumeResourceData': {
'DestinationPath': 'string',
'GroupOwnerSetting': {
'AutoAddGroupOwner': True|False,
'GroupOwner': 'string'
},
'SourcePath': 'string'
},
'S3MachineLearningModelResourceData': {
'DestinationPath': 'string',
'S3Uri': 'string'
},
'SageMakerMachineLearningModelResourceData': {
'DestinationPath': 'string',
'SageMakerJobArn': 'string'
},
'SecretsManagerSecretResourceData': {
'ARN': 'string',
'AdditionalStagingLabelsToDownload': [
'string',
]
}
}
},
]
},
'Id': 'string',
'Version': 'string'
}
**Response Structure**
- *(dict) --* success
- **Arn** *(string) --* Arn of the resource definition version.
- **CreationTimestamp** *(string) --* The time, in milliseconds since the epoch, when the resource definition version was created.
- **Definition** *(dict) --* Information about the definition.
- **Resources** *(list) --* A list of resources.
- *(dict) --* Information about a resource.
- **Id** *(string) --* The resource ID, used to refer to a resource in the Lambda function configuration. Max length is 128 characters with pattern ''[a-zA-Z0-9:_-]+''. This must be unique within a Greengrass group.
- **Name** *(string) --* The descriptive resource name, which is displayed on the AWS IoT Greengrass console. Max length 128 characters with pattern ''[a-zA-Z0-9:_-]+''. This must be unique within a Greengrass group.
- **ResourceDataContainer** *(dict) --* A container of data for all resource types.
- **LocalDeviceResourceData** *(dict) --* Attributes that define the local device resource.
- **GroupOwnerSetting** *(dict) --* Group/owner related settings for local resources.
- **AutoAddGroupOwner** *(boolean) --* If true, AWS IoT Greengrass automatically adds the specified Linux OS group owner of the resource to the Lambda process privileges. Thus the Lambda process will have the file access permissions of the added Linux group.
- **GroupOwner** *(string) --* The name of the Linux OS group whose privileges will be added to the Lambda process. This field is optional.
- **SourcePath** *(string) --* The local absolute path of the device resource. The source path for a device resource can refer only to a character device or block device under ''/dev''.
- **LocalVolumeResourceData** *(dict) --* Attributes that define the local volume resource.
- **DestinationPath** *(string) --* The absolute local path of the resource inside the Lambda environment.
- **GroupOwnerSetting** *(dict) --* Allows you to configure additional group privileges for the Lambda process. This field is optional.
- **AutoAddGroupOwner** *(boolean) --* If true, AWS IoT Greengrass automatically adds the specified Linux OS group owner of the resource to the Lambda process privileges. Thus the Lambda process will have the file access permissions of the added Linux group.
- **GroupOwner** *(string) --* The name of the Linux OS group whose privileges will be added to the Lambda process. This field is optional.
- **SourcePath** *(string) --* The local absolute path of the volume resource on the host. The source path for a volume resource type cannot start with ''/sys''.
- **S3MachineLearningModelResourceData** *(dict) --* Attributes that define an Amazon S3 machine learning resource.
- **DestinationPath** *(string) --* The absolute local path of the resource inside the Lambda environment.
- **S3Uri** *(string) --* The URI of the source model in an S3 bucket. The model package must be in tar.gz or .zip format.
- **SageMakerMachineLearningModelResourceData** *(dict) --* Attributes that define an Amazon SageMaker machine learning resource.
- **DestinationPath** *(string) --* The absolute local path of the resource inside the Lambda environment.
- **SageMakerJobArn** *(string) --* The ARN of the Amazon SageMaker training job that represents the source model.
- **SecretsManagerSecretResourceData** *(dict) --* Attributes that define a secret resource, which references a secret from AWS Secrets Manager.
- **ARN** *(string) --* The ARN of the Secrets Manager secret to make available on the core. The value of the secret's latest version (represented by the ''AWSCURRENT'' staging label) is included by default.
- **AdditionalStagingLabelsToDownload** *(list) --* Optional. The staging labels whose values you want to make available on the core, in addition to ''AWSCURRENT''.
- *(string) --*
- **Id** *(string) --* The ID of the resource definition version.
- **Version** *(string) --* The version of the resource definition version.
:type ResourceDefinitionId: string
:param ResourceDefinitionId: **[REQUIRED]** The ID of the resource definition.
:type ResourceDefinitionVersionId: string
:param ResourceDefinitionVersionId: **[REQUIRED]** The ID of the resource definition version.
:rtype: dict
:returns:
"""
pass
def get_service_role_for_account(self) -> Dict:
"""
Retrieves the service role that is attached to your account.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/greengrass-2017-06-07/GetServiceRoleForAccount>`_
**Request Syntax**
::
response = client.get_service_role_for_account()
**Response Syntax**
::
{
'AssociatedAt': 'string',
'RoleArn': 'string'
}
**Response Structure**
- *(dict) --* success
- **AssociatedAt** *(string) --* The time when the service role was associated with the account.
- **RoleArn** *(string) --* The ARN of the role which is associated with the account.
:rtype: dict
:returns:
"""
pass
def get_subscription_definition(self, SubscriptionDefinitionId: str) -> Dict:
"""
Retrieves information about a subscription definition.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/greengrass-2017-06-07/GetSubscriptionDefinition>`_
**Request Syntax**
::
response = client.get_subscription_definition(
SubscriptionDefinitionId='string'
)
**Response Syntax**
::
{
'Arn': 'string',
'CreationTimestamp': 'string',
'Id': 'string',
'LastUpdatedTimestamp': 'string',
'LatestVersion': 'string',
'LatestVersionArn': 'string',
'Name': 'string',
'tags': {
'string': 'string'
}
}
**Response Structure**
- *(dict) --*
- **Arn** *(string) --* The ARN of the definition.
- **CreationTimestamp** *(string) --* The time, in milliseconds since the epoch, when the definition was created.
- **Id** *(string) --* The ID of the definition.
- **LastUpdatedTimestamp** *(string) --* The time, in milliseconds since the epoch, when the definition was last updated.
- **LatestVersion** *(string) --* The latest version of the definition.
- **LatestVersionArn** *(string) --* The ARN of the latest version of the definition.
- **Name** *(string) --* The name of the definition.
- **tags** *(dict) --* The tags for the definition.
- *(string) --*
- *(string) --*
:type SubscriptionDefinitionId: string
:param SubscriptionDefinitionId: **[REQUIRED]** The ID of the subscription definition.
:rtype: dict
:returns:
"""
pass
def get_subscription_definition_version(self, SubscriptionDefinitionId: str, SubscriptionDefinitionVersionId: str, NextToken: str = None) -> Dict:
"""
Retrieves information about a subscription definition version.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/greengrass-2017-06-07/GetSubscriptionDefinitionVersion>`_
**Request Syntax**
::
response = client.get_subscription_definition_version(
NextToken='string',
SubscriptionDefinitionId='string',
SubscriptionDefinitionVersionId='string'
)
**Response Syntax**
::
{
'Arn': 'string',
'CreationTimestamp': 'string',
'Definition': {
'Subscriptions': [
{
'Id': 'string',
'Source': 'string',
'Subject': 'string',
'Target': 'string'
},
]
},
'Id': 'string',
'NextToken': 'string',
'Version': 'string'
}
**Response Structure**
- *(dict) --*
- **Arn** *(string) --* The ARN of the subscription definition version.
- **CreationTimestamp** *(string) --* The time, in milliseconds since the epoch, when the subscription definition version was created.
- **Definition** *(dict) --* Information about the subscription definition version.
- **Subscriptions** *(list) --* A list of subscriptions.
- *(dict) --* Information about a subscription.
- **Id** *(string) --* A descriptive or arbitrary ID for the subscription. This value must be unique within the subscription definition version. Max length is 128 characters with pattern ''[a-zA-Z0-9:_-]+''.
- **Source** *(string) --* The source of the subscription. Can be a thing ARN, a Lambda function ARN, a connector ARN, 'cloud' (which represents the AWS IoT cloud), or 'GGShadowService'.
- **Subject** *(string) --* The MQTT topic used to route the message.
- **Target** *(string) --* Where the message is sent to. Can be a thing ARN, a Lambda function ARN, a connector ARN, 'cloud' (which represents the AWS IoT cloud), or 'GGShadowService'.
- **Id** *(string) --* The ID of the subscription definition version.
- **NextToken** *(string) --* The token for the next set of results, or ''null'' if there are no additional results.
- **Version** *(string) --* The version of the subscription definition version.
:type NextToken: string
:param NextToken: The token for the next set of results, or \'\'null\'\' if there are no additional results.
:type SubscriptionDefinitionId: string
:param SubscriptionDefinitionId: **[REQUIRED]** The ID of the subscription definition.
:type SubscriptionDefinitionVersionId: string
:param SubscriptionDefinitionVersionId: **[REQUIRED]** The ID of the subscription definition version.
:rtype: dict
:returns:
"""
pass
def get_waiter(self, waiter_name: str = None) -> Waiter:
"""
Returns an object that can wait for some condition.
:type waiter_name: str
:param waiter_name: The name of the waiter to get. See the waiters
section of the service docs for a list of available waiters.
:returns: The specified waiter object.
:rtype: botocore.waiter.Waiter
"""
pass
def list_bulk_deployment_detailed_reports(self, BulkDeploymentId: str, MaxResults: str = None, NextToken: str = None) -> Dict:
"""
Gets a paginated list of the deployments that have been started in a bulk deployment operation, and their current deployment status.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/greengrass-2017-06-07/ListBulkDeploymentDetailedReports>`_
**Request Syntax**
::
response = client.list_bulk_deployment_detailed_reports(
BulkDeploymentId='string',
MaxResults='string',
NextToken='string'
)
**Response Syntax**
::
{
'Deployments': [
{
'CreatedAt': 'string',
'DeploymentArn': 'string',
'DeploymentId': 'string',
'DeploymentStatus': 'string',
'DeploymentType': 'NewDeployment'|'Redeployment'|'ResetDeployment'|'ForceResetDeployment',
'ErrorDetails': [
{
'DetailedErrorCode': 'string',
'DetailedErrorMessage': 'string'
},
],
'ErrorMessage': 'string',
'GroupArn': 'string'
},
],
'NextToken': 'string'
}
**Response Structure**
- *(dict) --* Success. The response body contains the list of deployments for the given group.
- **Deployments** *(list) --* A list of the individual group deployments in the bulk deployment operation.
- *(dict) --* Information about an individual group deployment in a bulk deployment operation.
- **CreatedAt** *(string) --* The time, in ISO format, when the deployment was created.
- **DeploymentArn** *(string) --* The ARN of the group deployment.
- **DeploymentId** *(string) --* The ID of the group deployment.
- **DeploymentStatus** *(string) --* The current status of the group deployment: ''InProgress'', ''Building'', ''Success'', or ''Failure''.
- **DeploymentType** *(string) --* The type of the deployment.
- **ErrorDetails** *(list) --* Details about the error.
- *(dict) --* Details about the error.
- **DetailedErrorCode** *(string) --* A detailed error code.
- **DetailedErrorMessage** *(string) --* A detailed error message.
- **ErrorMessage** *(string) --* The error message for a failed deployment
- **GroupArn** *(string) --* The ARN of the Greengrass group.
- **NextToken** *(string) --* The token for the next set of results, or ''null'' if there are no additional results.
:type BulkDeploymentId: string
:param BulkDeploymentId: **[REQUIRED]** The ID of the bulk deployment.
:type MaxResults: string
:param MaxResults: The maximum number of results to be returned per request.
:type NextToken: string
:param NextToken: The token for the next set of results, or \'\'null\'\' if there are no additional results.
:rtype: dict
:returns:
"""
pass
def list_bulk_deployments(self, MaxResults: str = None, NextToken: str = None) -> Dict:
"""
Returns a list of bulk deployments.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/greengrass-2017-06-07/ListBulkDeployments>`_
**Request Syntax**
::
response = client.list_bulk_deployments(
MaxResults='string',
NextToken='string'
)
**Response Syntax**
::
{
'BulkDeployments': [
{
'BulkDeploymentArn': 'string',
'BulkDeploymentId': 'string',
'CreatedAt': 'string'
},
],
'NextToken': 'string'
}
**Response Structure**
- *(dict) --* Success. The response body contains the list of bulk deployments.
- **BulkDeployments** *(list) --* A list of bulk deployments.
- *(dict) --* Information about a bulk deployment. You cannot start a new bulk deployment while another one is still running or in a non-terminal state.
- **BulkDeploymentArn** *(string) --* The ARN of the bulk deployment.
- **BulkDeploymentId** *(string) --* The ID of the bulk deployment.
- **CreatedAt** *(string) --* The time, in ISO format, when the deployment was created.
- **NextToken** *(string) --* The token for the next set of results, or ''null'' if there are no additional results.
:type MaxResults: string
:param MaxResults: The maximum number of results to be returned per request.
:type NextToken: string
:param NextToken: The token for the next set of results, or \'\'null\'\' if there are no additional results.
:rtype: dict
:returns:
"""
pass
def list_connector_definition_versions(self, ConnectorDefinitionId: str, MaxResults: str = None, NextToken: str = None) -> Dict:
"""
Lists the versions of a connector definition, which are containers for connectors. Connectors run on the Greengrass core and contain built-in integration with local infrastructure, device protocols, AWS, and other cloud services.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/greengrass-2017-06-07/ListConnectorDefinitionVersions>`_
**Request Syntax**
::
response = client.list_connector_definition_versions(
ConnectorDefinitionId='string',
MaxResults='string',
NextToken='string'
)
**Response Syntax**
::
{
'NextToken': 'string',
'Versions': [
{
'Arn': 'string',
'CreationTimestamp': 'string',
'Id': 'string',
'Version': 'string'
},
]
}
**Response Structure**
- *(dict) --*
- **NextToken** *(string) --* The token for the next set of results, or ''null'' if there are no additional results.
- **Versions** *(list) --* Information about a version.
- *(dict) --* Information about a version.
- **Arn** *(string) --* The ARN of the version.
- **CreationTimestamp** *(string) --* The time, in milliseconds since the epoch, when the version was created.
- **Id** *(string) --* The ID of the version.
- **Version** *(string) --* The unique ID of the version.
:type ConnectorDefinitionId: string
:param ConnectorDefinitionId: **[REQUIRED]** The ID of the connector definition.
:type MaxResults: string
:param MaxResults: The maximum number of results to be returned per request.
:type NextToken: string
:param NextToken: The token for the next set of results, or \'\'null\'\' if there are no additional results.
:rtype: dict
:returns:
"""
pass
def list_connector_definitions(self, MaxResults: str = None, NextToken: str = None) -> Dict:
"""
Retrieves a list of connector definitions.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/greengrass-2017-06-07/ListConnectorDefinitions>`_
**Request Syntax**
::
response = client.list_connector_definitions(
MaxResults='string',
NextToken='string'
)
**Response Syntax**
::
{
'Definitions': [
{
'Arn': 'string',
'CreationTimestamp': 'string',
'Id': 'string',
'LastUpdatedTimestamp': 'string',
'LatestVersion': 'string',
'LatestVersionArn': 'string',
'Name': 'string',
'Tags': {
'string': 'string'
}
},
],
'NextToken': 'string'
}
**Response Structure**
- *(dict) --*
- **Definitions** *(list) --* Information about a definition.
- *(dict) --* Information about a definition.
- **Arn** *(string) --* The ARN of the definition.
- **CreationTimestamp** *(string) --* The time, in milliseconds since the epoch, when the definition was created.
- **Id** *(string) --* The ID of the definition.
- **LastUpdatedTimestamp** *(string) --* The time, in milliseconds since the epoch, when the definition was last updated.
- **LatestVersion** *(string) --* The latest version of the definition.
- **LatestVersionArn** *(string) --* The ARN of the latest version of the definition.
- **Name** *(string) --* The name of the definition.
- **Tags** *(dict) --* The tags for the definition.
- *(string) --*
- *(string) --*
- **NextToken** *(string) --* The token for the next set of results, or ''null'' if there are no additional results.
:type MaxResults: string
:param MaxResults: The maximum number of results to be returned per request.
:type NextToken: string
:param NextToken: The token for the next set of results, or \'\'null\'\' if there are no additional results.
:rtype: dict
:returns:
"""
pass
def list_core_definition_versions(self, CoreDefinitionId: str, MaxResults: str = None, NextToken: str = None) -> Dict:
"""
Lists the versions of a core definition.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/greengrass-2017-06-07/ListCoreDefinitionVersions>`_
**Request Syntax**
::
response = client.list_core_definition_versions(
CoreDefinitionId='string',
MaxResults='string',
NextToken='string'
)
**Response Syntax**
::
{
'NextToken': 'string',
'Versions': [
{
'Arn': 'string',
'CreationTimestamp': 'string',
'Id': 'string',
'Version': 'string'
},
]
}
**Response Structure**
- *(dict) --*
- **NextToken** *(string) --* The token for the next set of results, or ''null'' if there are no additional results.
- **Versions** *(list) --* Information about a version.
- *(dict) --* Information about a version.
- **Arn** *(string) --* The ARN of the version.
- **CreationTimestamp** *(string) --* The time, in milliseconds since the epoch, when the version was created.
- **Id** *(string) --* The ID of the version.
- **Version** *(string) --* The unique ID of the version.
:type CoreDefinitionId: string
:param CoreDefinitionId: **[REQUIRED]** The ID of the core definition.
:type MaxResults: string
:param MaxResults: The maximum number of results to be returned per request.
:type NextToken: string
:param NextToken: The token for the next set of results, or \'\'null\'\' if there are no additional results.
:rtype: dict
:returns:
"""
pass
def list_core_definitions(self, MaxResults: str = None, NextToken: str = None) -> Dict:
"""
Retrieves a list of core definitions.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/greengrass-2017-06-07/ListCoreDefinitions>`_
**Request Syntax**
::
response = client.list_core_definitions(
MaxResults='string',
NextToken='string'
)
**Response Syntax**
::
{
'Definitions': [
{
'Arn': 'string',
'CreationTimestamp': 'string',
'Id': 'string',
'LastUpdatedTimestamp': 'string',
'LatestVersion': 'string',
'LatestVersionArn': 'string',
'Name': 'string',
'Tags': {
'string': 'string'
}
},
],
'NextToken': 'string'
}
**Response Structure**
- *(dict) --*
- **Definitions** *(list) --* Information about a definition.
- *(dict) --* Information about a definition.
- **Arn** *(string) --* The ARN of the definition.
- **CreationTimestamp** *(string) --* The time, in milliseconds since the epoch, when the definition was created.
- **Id** *(string) --* The ID of the definition.
- **LastUpdatedTimestamp** *(string) --* The time, in milliseconds since the epoch, when the definition was last updated.
- **LatestVersion** *(string) --* The latest version of the definition.
- **LatestVersionArn** *(string) --* The ARN of the latest version of the definition.
- **Name** *(string) --* The name of the definition.
- **Tags** *(dict) --* The tags for the definition.
- *(string) --*
- *(string) --*
- **NextToken** *(string) --* The token for the next set of results, or ''null'' if there are no additional results.
:type MaxResults: string
:param MaxResults: The maximum number of results to be returned per request.
:type NextToken: string
:param NextToken: The token for the next set of results, or \'\'null\'\' if there are no additional results.
:rtype: dict
:returns:
"""
pass
def list_deployments(self, GroupId: str, MaxResults: str = None, NextToken: str = None) -> Dict:
"""
Returns a history of deployments for the group.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/greengrass-2017-06-07/ListDeployments>`_
**Request Syntax**
::
response = client.list_deployments(
GroupId='string',
MaxResults='string',
NextToken='string'
)
**Response Syntax**
::
{
'Deployments': [
{
'CreatedAt': 'string',
'DeploymentArn': 'string',
'DeploymentId': 'string',
'DeploymentType': 'NewDeployment'|'Redeployment'|'ResetDeployment'|'ForceResetDeployment',
'GroupArn': 'string'
},
],
'NextToken': 'string'
}
**Response Structure**
- *(dict) --* Success. The response body contains the list of deployments for the given group.
- **Deployments** *(list) --* A list of deployments for the requested groups.
- *(dict) --* Information about a deployment.
- **CreatedAt** *(string) --* The time, in milliseconds since the epoch, when the deployment was created.
- **DeploymentArn** *(string) --* The ARN of the deployment.
- **DeploymentId** *(string) --* The ID of the deployment.
- **DeploymentType** *(string) --* The type of the deployment.
- **GroupArn** *(string) --* The ARN of the group for this deployment.
- **NextToken** *(string) --* The token for the next set of results, or ''null'' if there are no additional results.
:type GroupId: string
:param GroupId: **[REQUIRED]** The ID of the Greengrass group.
:type MaxResults: string
:param MaxResults: The maximum number of results to be returned per request.
:type NextToken: string
:param NextToken: The token for the next set of results, or \'\'null\'\' if there are no additional results.
:rtype: dict
:returns:
"""
pass
def list_device_definition_versions(self, DeviceDefinitionId: str, MaxResults: str = None, NextToken: str = None) -> Dict:
"""
Lists the versions of a device definition.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/greengrass-2017-06-07/ListDeviceDefinitionVersions>`_
**Request Syntax**
::
response = client.list_device_definition_versions(
DeviceDefinitionId='string',
MaxResults='string',
NextToken='string'
)
**Response Syntax**
::
{
'NextToken': 'string',
'Versions': [
{
'Arn': 'string',
'CreationTimestamp': 'string',
'Id': 'string',
'Version': 'string'
},
]
}
**Response Structure**
- *(dict) --*
- **NextToken** *(string) --* The token for the next set of results, or ''null'' if there are no additional results.
- **Versions** *(list) --* Information about a version.
- *(dict) --* Information about a version.
- **Arn** *(string) --* The ARN of the version.
- **CreationTimestamp** *(string) --* The time, in milliseconds since the epoch, when the version was created.
- **Id** *(string) --* The ID of the version.
- **Version** *(string) --* The unique ID of the version.
:type DeviceDefinitionId: string
:param DeviceDefinitionId: **[REQUIRED]** The ID of the device definition.
:type MaxResults: string
:param MaxResults: The maximum number of results to be returned per request.
:type NextToken: string
:param NextToken: The token for the next set of results, or \'\'null\'\' if there are no additional results.
:rtype: dict
:returns:
"""
pass
def list_device_definitions(self, MaxResults: str = None, NextToken: str = None) -> Dict:
"""
Retrieves a list of device definitions.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/greengrass-2017-06-07/ListDeviceDefinitions>`_
**Request Syntax**
::
response = client.list_device_definitions(
MaxResults='string',
NextToken='string'
)
**Response Syntax**
::
{
'Definitions': [
{
'Arn': 'string',
'CreationTimestamp': 'string',
'Id': 'string',
'LastUpdatedTimestamp': 'string',
'LatestVersion': 'string',
'LatestVersionArn': 'string',
'Name': 'string',
'Tags': {
'string': 'string'
}
},
],
'NextToken': 'string'
}
**Response Structure**
- *(dict) --*
- **Definitions** *(list) --* Information about a definition.
- *(dict) --* Information about a definition.
- **Arn** *(string) --* The ARN of the definition.
- **CreationTimestamp** *(string) --* The time, in milliseconds since the epoch, when the definition was created.
- **Id** *(string) --* The ID of the definition.
- **LastUpdatedTimestamp** *(string) --* The time, in milliseconds since the epoch, when the definition was last updated.
- **LatestVersion** *(string) --* The latest version of the definition.
- **LatestVersionArn** *(string) --* The ARN of the latest version of the definition.
- **Name** *(string) --* The name of the definition.
- **Tags** *(dict) --* The tags for the definition.
- *(string) --*
- *(string) --*
- **NextToken** *(string) --* The token for the next set of results, or ''null'' if there are no additional results.
:type MaxResults: string
:param MaxResults: The maximum number of results to be returned per request.
:type NextToken: string
:param NextToken: The token for the next set of results, or \'\'null\'\' if there are no additional results.
:rtype: dict
:returns:
"""
pass
def list_function_definition_versions(self, FunctionDefinitionId: str, MaxResults: str = None, NextToken: str = None) -> Dict:
"""
Lists the versions of a Lambda function definition.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/greengrass-2017-06-07/ListFunctionDefinitionVersions>`_
**Request Syntax**
::
response = client.list_function_definition_versions(
FunctionDefinitionId='string',
MaxResults='string',
NextToken='string'
)
**Response Syntax**
::
{
'NextToken': 'string',
'Versions': [
{
'Arn': 'string',
'CreationTimestamp': 'string',
'Id': 'string',
'Version': 'string'
},
]
}
**Response Structure**
- *(dict) --* success
- **NextToken** *(string) --* The token for the next set of results, or ''null'' if there are no additional results.
- **Versions** *(list) --* Information about a version.
- *(dict) --* Information about a version.
- **Arn** *(string) --* The ARN of the version.
- **CreationTimestamp** *(string) --* The time, in milliseconds since the epoch, when the version was created.
- **Id** *(string) --* The ID of the version.
- **Version** *(string) --* The unique ID of the version.
:type FunctionDefinitionId: string
:param FunctionDefinitionId: **[REQUIRED]** The ID of the Lambda function definition.
:type MaxResults: string
:param MaxResults: The maximum number of results to be returned per request.
:type NextToken: string
:param NextToken: The token for the next set of results, or \'\'null\'\' if there are no additional results.
:rtype: dict
:returns:
"""
pass
def list_function_definitions(self, MaxResults: str = None, NextToken: str = None) -> Dict:
"""
Retrieves a list of Lambda function definitions.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/greengrass-2017-06-07/ListFunctionDefinitions>`_
**Request Syntax**
::
response = client.list_function_definitions(
MaxResults='string',
NextToken='string'
)
**Response Syntax**
::
{
'Definitions': [
{
'Arn': 'string',
'CreationTimestamp': 'string',
'Id': 'string',
'LastUpdatedTimestamp': 'string',
'LatestVersion': 'string',
'LatestVersionArn': 'string',
'Name': 'string',
'Tags': {
'string': 'string'
}
},
],
'NextToken': 'string'
}
**Response Structure**
- *(dict) --* Success. The response contains the IDs of all the Greengrass Lambda function definitions in this account.
- **Definitions** *(list) --* Information about a definition.
- *(dict) --* Information about a definition.
- **Arn** *(string) --* The ARN of the definition.
- **CreationTimestamp** *(string) --* The time, in milliseconds since the epoch, when the definition was created.
- **Id** *(string) --* The ID of the definition.
- **LastUpdatedTimestamp** *(string) --* The time, in milliseconds since the epoch, when the definition was last updated.
- **LatestVersion** *(string) --* The latest version of the definition.
- **LatestVersionArn** *(string) --* The ARN of the latest version of the definition.
- **Name** *(string) --* The name of the definition.
- **Tags** *(dict) --* The tags for the definition.
- *(string) --*
- *(string) --*
- **NextToken** *(string) --* The token for the next set of results, or ''null'' if there are no additional results.
:type MaxResults: string
:param MaxResults: The maximum number of results to be returned per request.
:type NextToken: string
:param NextToken: The token for the next set of results, or \'\'null\'\' if there are no additional results.
:rtype: dict
:returns:
"""
pass
def list_group_certificate_authorities(self, GroupId: str) -> Dict:
"""
Retrieves the current CAs for a group.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/greengrass-2017-06-07/ListGroupCertificateAuthorities>`_
**Request Syntax**
::
response = client.list_group_certificate_authorities(
GroupId='string'
)
**Response Syntax**
::
{
'GroupCertificateAuthorities': [
{
'GroupCertificateAuthorityArn': 'string',
'GroupCertificateAuthorityId': 'string'
},
]
}
**Response Structure**
- *(dict) --* Success. The response body contains the PKI Configuration.
- **GroupCertificateAuthorities** *(list) --* A list of certificate authorities associated with the group.
- *(dict) --* Information about a certificate authority for a group.
- **GroupCertificateAuthorityArn** *(string) --* The ARN of the certificate authority for the group.
- **GroupCertificateAuthorityId** *(string) --* The ID of the certificate authority for the group.
:type GroupId: string
:param GroupId: **[REQUIRED]** The ID of the Greengrass group.
:rtype: dict
:returns:
"""
pass
def list_group_versions(self, GroupId: str, MaxResults: str = None, NextToken: str = None) -> Dict:
"""
Lists the versions of a group.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/greengrass-2017-06-07/ListGroupVersions>`_
**Request Syntax**
::
response = client.list_group_versions(
GroupId='string',
MaxResults='string',
NextToken='string'
)
**Response Syntax**
::
{
'NextToken': 'string',
'Versions': [
{
'Arn': 'string',
'CreationTimestamp': 'string',
'Id': 'string',
'Version': 'string'
},
]
}
**Response Structure**
- *(dict) --* Success. The response contains the list of versions and metadata for the given group.
- **NextToken** *(string) --* The token for the next set of results, or ''null'' if there are no additional results.
- **Versions** *(list) --* Information about a version.
- *(dict) --* Information about a version.
- **Arn** *(string) --* The ARN of the version.
- **CreationTimestamp** *(string) --* The time, in milliseconds since the epoch, when the version was created.
- **Id** *(string) --* The ID of the version.
- **Version** *(string) --* The unique ID of the version.
:type GroupId: string
:param GroupId: **[REQUIRED]** The ID of the Greengrass group.
:type MaxResults: string
:param MaxResults: The maximum number of results to be returned per request.
:type NextToken: string
:param NextToken: The token for the next set of results, or \'\'null\'\' if there are no additional results.
:rtype: dict
:returns:
"""
pass
def list_groups(self, MaxResults: str = None, NextToken: str = None) -> Dict:
"""
Retrieves a list of groups.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/greengrass-2017-06-07/ListGroups>`_
**Request Syntax**
::
response = client.list_groups(
MaxResults='string',
NextToken='string'
)
**Response Syntax**
::
{
'Groups': [
{
'Arn': 'string',
'CreationTimestamp': 'string',
'Id': 'string',
'LastUpdatedTimestamp': 'string',
'LatestVersion': 'string',
'LatestVersionArn': 'string',
'Name': 'string'
},
],
'NextToken': 'string'
}
**Response Structure**
- *(dict) --*
- **Groups** *(list) --* Information about a group.
- *(dict) --* Information about a group.
- **Arn** *(string) --* The ARN of the group.
- **CreationTimestamp** *(string) --* The time, in milliseconds since the epoch, when the group was created.
- **Id** *(string) --* The ID of the group.
- **LastUpdatedTimestamp** *(string) --* The time, in milliseconds since the epoch, when the group was last updated.
- **LatestVersion** *(string) --* The latest version of the group.
- **LatestVersionArn** *(string) --* The ARN of the latest version of the group.
- **Name** *(string) --* The name of the group.
- **NextToken** *(string) --* The token for the next set of results, or ''null'' if there are no additional results.
:type MaxResults: string
:param MaxResults: The maximum number of results to be returned per request.
:type NextToken: string
:param NextToken: The token for the next set of results, or \'\'null\'\' if there are no additional results.
:rtype: dict
:returns:
"""
pass
def list_logger_definition_versions(self, LoggerDefinitionId: str, MaxResults: str = None, NextToken: str = None) -> Dict:
"""
Lists the versions of a logger definition.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/greengrass-2017-06-07/ListLoggerDefinitionVersions>`_
**Request Syntax**
::
response = client.list_logger_definition_versions(
LoggerDefinitionId='string',
MaxResults='string',
NextToken='string'
)
**Response Syntax**
::
{
'NextToken': 'string',
'Versions': [
{
'Arn': 'string',
'CreationTimestamp': 'string',
'Id': 'string',
'Version': 'string'
},
]
}
**Response Structure**
- *(dict) --*
- **NextToken** *(string) --* The token for the next set of results, or ''null'' if there are no additional results.
- **Versions** *(list) --* Information about a version.
- *(dict) --* Information about a version.
- **Arn** *(string) --* The ARN of the version.
- **CreationTimestamp** *(string) --* The time, in milliseconds since the epoch, when the version was created.
- **Id** *(string) --* The ID of the version.
- **Version** *(string) --* The unique ID of the version.
:type LoggerDefinitionId: string
:param LoggerDefinitionId: **[REQUIRED]** The ID of the logger definition.
:type MaxResults: string
:param MaxResults: The maximum number of results to be returned per request.
:type NextToken: string
:param NextToken: The token for the next set of results, or \'\'null\'\' if there are no additional results.
:rtype: dict
:returns:
"""
pass
def list_logger_definitions(self, MaxResults: str = None, NextToken: str = None) -> Dict:
"""
Retrieves a list of logger definitions.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/greengrass-2017-06-07/ListLoggerDefinitions>`_
**Request Syntax**
::
response = client.list_logger_definitions(
MaxResults='string',
NextToken='string'
)
**Response Syntax**
::
{
'Definitions': [
{
'Arn': 'string',
'CreationTimestamp': 'string',
'Id': 'string',
'LastUpdatedTimestamp': 'string',
'LatestVersion': 'string',
'LatestVersionArn': 'string',
'Name': 'string',
'Tags': {
'string': 'string'
}
},
],
'NextToken': 'string'
}
**Response Structure**
- *(dict) --*
- **Definitions** *(list) --* Information about a definition.
- *(dict) --* Information about a definition.
- **Arn** *(string) --* The ARN of the definition.
- **CreationTimestamp** *(string) --* The time, in milliseconds since the epoch, when the definition was created.
- **Id** *(string) --* The ID of the definition.
- **LastUpdatedTimestamp** *(string) --* The time, in milliseconds since the epoch, when the definition was last updated.
- **LatestVersion** *(string) --* The latest version of the definition.
- **LatestVersionArn** *(string) --* The ARN of the latest version of the definition.
- **Name** *(string) --* The name of the definition.
- **Tags** *(dict) --* The tags for the definition.
- *(string) --*
- *(string) --*
- **NextToken** *(string) --* The token for the next set of results, or ''null'' if there are no additional results.
:type MaxResults: string
:param MaxResults: The maximum number of results to be returned per request.
:type NextToken: string
:param NextToken: The token for the next set of results, or \'\'null\'\' if there are no additional results.
:rtype: dict
:returns:
"""
pass
def list_resource_definition_versions(self, ResourceDefinitionId: str, MaxResults: str = None, NextToken: str = None) -> Dict:
"""
Lists the versions of a resource definition.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/greengrass-2017-06-07/ListResourceDefinitionVersions>`_
**Request Syntax**
::
response = client.list_resource_definition_versions(
MaxResults='string',
NextToken='string',
ResourceDefinitionId='string'
)
**Response Syntax**
::
{
'NextToken': 'string',
'Versions': [
{
'Arn': 'string',
'CreationTimestamp': 'string',
'Id': 'string',
'Version': 'string'
},
]
}
**Response Structure**
- *(dict) --* success
- **NextToken** *(string) --* The token for the next set of results, or ''null'' if there are no additional results.
- **Versions** *(list) --* Information about a version.
- *(dict) --* Information about a version.
- **Arn** *(string) --* The ARN of the version.
- **CreationTimestamp** *(string) --* The time, in milliseconds since the epoch, when the version was created.
- **Id** *(string) --* The ID of the version.
- **Version** *(string) --* The unique ID of the version.
:type MaxResults: string
:param MaxResults: The maximum number of results to be returned per request.
:type NextToken: string
:param NextToken: The token for the next set of results, or \'\'null\'\' if there are no additional results.
:type ResourceDefinitionId: string
:param ResourceDefinitionId: **[REQUIRED]** The ID of the resource definition.
:rtype: dict
:returns:
"""
pass
def list_resource_definitions(self, MaxResults: str = None, NextToken: str = None) -> Dict:
"""
Retrieves a list of resource definitions.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/greengrass-2017-06-07/ListResourceDefinitions>`_
**Request Syntax**
::
response = client.list_resource_definitions(
MaxResults='string',
NextToken='string'
)
**Response Syntax**
::
{
'Definitions': [
{
'Arn': 'string',
'CreationTimestamp': 'string',
'Id': 'string',
'LastUpdatedTimestamp': 'string',
'LatestVersion': 'string',
'LatestVersionArn': 'string',
'Name': 'string',
'Tags': {
'string': 'string'
}
},
],
'NextToken': 'string'
}
**Response Structure**
- *(dict) --* The IDs of all the Greengrass resource definitions in this account.
- **Definitions** *(list) --* Information about a definition.
- *(dict) --* Information about a definition.
- **Arn** *(string) --* The ARN of the definition.
- **CreationTimestamp** *(string) --* The time, in milliseconds since the epoch, when the definition was created.
- **Id** *(string) --* The ID of the definition.
- **LastUpdatedTimestamp** *(string) --* The time, in milliseconds since the epoch, when the definition was last updated.
- **LatestVersion** *(string) --* The latest version of the definition.
- **LatestVersionArn** *(string) --* The ARN of the latest version of the definition.
- **Name** *(string) --* The name of the definition.
- **Tags** *(dict) --* The tags for the definition.
- *(string) --*
- *(string) --*
- **NextToken** *(string) --* The token for the next set of results, or ''null'' if there are no additional results.
:type MaxResults: string
:param MaxResults: The maximum number of results to be returned per request.
:type NextToken: string
:param NextToken: The token for the next set of results, or \'\'null\'\' if there are no additional results.
:rtype: dict
:returns:
"""
pass
def list_subscription_definition_versions(self, SubscriptionDefinitionId: str, MaxResults: str = None, NextToken: str = None) -> Dict:
"""
Lists the versions of a subscription definition.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/greengrass-2017-06-07/ListSubscriptionDefinitionVersions>`_
**Request Syntax**
::
response = client.list_subscription_definition_versions(
MaxResults='string',
NextToken='string',
SubscriptionDefinitionId='string'
)
**Response Syntax**
::
{
'NextToken': 'string',
'Versions': [
{
'Arn': 'string',
'CreationTimestamp': 'string',
'Id': 'string',
'Version': 'string'
},
]
}
**Response Structure**
- *(dict) --*
- **NextToken** *(string) --* The token for the next set of results, or ''null'' if there are no additional results.
- **Versions** *(list) --* Information about a version.
- *(dict) --* Information about a version.
- **Arn** *(string) --* The ARN of the version.
- **CreationTimestamp** *(string) --* The time, in milliseconds since the epoch, when the version was created.
- **Id** *(string) --* The ID of the version.
- **Version** *(string) --* The unique ID of the version.
:type MaxResults: string
:param MaxResults: The maximum number of results to be returned per request.
:type NextToken: string
:param NextToken: The token for the next set of results, or \'\'null\'\' if there are no additional results.
:type SubscriptionDefinitionId: string
:param SubscriptionDefinitionId: **[REQUIRED]** The ID of the subscription definition.
:rtype: dict
:returns:
"""
pass
def list_subscription_definitions(self, MaxResults: str = None, NextToken: str = None) -> Dict:
"""
Retrieves a list of subscription definitions.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/greengrass-2017-06-07/ListSubscriptionDefinitions>`_
**Request Syntax**
::
response = client.list_subscription_definitions(
MaxResults='string',
NextToken='string'
)
**Response Syntax**
::
{
'Definitions': [
{
'Arn': 'string',
'CreationTimestamp': 'string',
'Id': 'string',
'LastUpdatedTimestamp': 'string',
'LatestVersion': 'string',
'LatestVersionArn': 'string',
'Name': 'string',
'Tags': {
'string': 'string'
}
},
],
'NextToken': 'string'
}
**Response Structure**
- *(dict) --*
- **Definitions** *(list) --* Information about a definition.
- *(dict) --* Information about a definition.
- **Arn** *(string) --* The ARN of the definition.
- **CreationTimestamp** *(string) --* The time, in milliseconds since the epoch, when the definition was created.
- **Id** *(string) --* The ID of the definition.
- **LastUpdatedTimestamp** *(string) --* The time, in milliseconds since the epoch, when the definition was last updated.
- **LatestVersion** *(string) --* The latest version of the definition.
- **LatestVersionArn** *(string) --* The ARN of the latest version of the definition.
- **Name** *(string) --* The name of the definition.
- **Tags** *(dict) --* The tags for the definition.
- *(string) --*
- *(string) --*
- **NextToken** *(string) --* The token for the next set of results, or ''null'' if there are no additional results.
:type MaxResults: string
:param MaxResults: The maximum number of results to be returned per request.
:type NextToken: string
:param NextToken: The token for the next set of results, or \'\'null\'\' if there are no additional results.
:rtype: dict
:returns:
"""
pass
def list_tags_for_resource(self, ResourceArn: str) -> Dict:
"""
Retrieves the tags for a resource.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/greengrass-2017-06-07/ListTagsForResource>`_
**Request Syntax**
::
response = client.list_tags_for_resource(
ResourceArn='string'
)
**Response Syntax**
::
{
'tags': {
'string': 'string'
}
}
**Response Structure**
- *(dict) --*
- **tags** *(dict) --* A map of the key-value pairs for the resource tag.
- *(string) --*
- *(string) --*
:type ResourceArn: string
:param ResourceArn: **[REQUIRED]** The Amazon Resource Name (ARN) of the resource.
:rtype: dict
:returns:
"""
pass
def reset_deployments(self, GroupId: str, AmznClientToken: str = None, Force: bool = None) -> Dict:
"""
Resets a group's deployments.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/greengrass-2017-06-07/ResetDeployments>`_
**Request Syntax**
::
response = client.reset_deployments(
AmznClientToken='string',
Force=True|False,
GroupId='string'
)
**Response Syntax**
::
{
'DeploymentArn': 'string',
'DeploymentId': 'string'
}
**Response Structure**
- *(dict) --* Success. The group's deployments were reset.
- **DeploymentArn** *(string) --* The ARN of the deployment.
- **DeploymentId** *(string) --* The ID of the deployment.
:type AmznClientToken: string
:param AmznClientToken: A client token used to correlate requests and responses.
:type Force: boolean
:param Force: If true, performs a best-effort only core reset.
:type GroupId: string
:param GroupId: **[REQUIRED]** The ID of the Greengrass group.
:rtype: dict
:returns:
"""
pass
def start_bulk_deployment(self, AmznClientToken: str = None, ExecutionRoleArn: str = None, InputFileUri: str = None, tags: Dict = None) -> Dict:
"""
Deploys multiple groups in one operation. This action starts the bulk deployment of a specified set of group versions. Each group version deployment will be triggered with an adaptive rate that has a fixed upper limit. We recommend that you include an ''X-Amzn-Client-Token'' token in every ''StartBulkDeployment'' request. These requests are idempotent with respect to the token and the request parameters.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/greengrass-2017-06-07/StartBulkDeployment>`_
**Request Syntax**
::
response = client.start_bulk_deployment(
AmznClientToken='string',
ExecutionRoleArn='string',
InputFileUri='string',
tags={
'string': 'string'
}
)
**Response Syntax**
::
{
'BulkDeploymentArn': 'string',
'BulkDeploymentId': 'string'
}
**Response Structure**
- *(dict) --* success
- **BulkDeploymentArn** *(string) --* The ARN of the bulk deployment.
- **BulkDeploymentId** *(string) --* The ID of the bulk deployment.
:type AmznClientToken: string
:param AmznClientToken: A client token used to correlate requests and responses.
:type ExecutionRoleArn: string
:param ExecutionRoleArn: The ARN of the execution role to associate with the bulk deployment operation. This IAM role must allow the \'\'greengrass:CreateDeployment\'\' action for all group versions that are listed in the input file. This IAM role must have access to the S3 bucket containing the input file.
:type InputFileUri: string
:param InputFileUri: The URI of the input file contained in the S3 bucket. The execution role must have \'\'getObject\'\' permissions on this bucket to access the input file. The input file is a JSON-serialized, line delimited file with UTF-8 encoding that provides a list of group and version IDs and the deployment type. This file must be less than 100 MB. Currently, AWS IoT Greengrass supports only \'\'NewDeployment\'\' deployment types.
:type tags: dict
:param tags: Tag(s) to add to the new resource
- *(string) --*
- *(string) --*
:rtype: dict
:returns:
"""
pass
def stop_bulk_deployment(self, BulkDeploymentId: str) -> Dict:
"""
Stops the execution of a bulk deployment. This action returns a status of ''Stopping'' until the deployment is stopped. You cannot start a new bulk deployment while a previous deployment is in the ''Stopping'' state. This action doesn't rollback completed deployments or cancel pending deployments.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/greengrass-2017-06-07/StopBulkDeployment>`_
**Request Syntax**
::
response = client.stop_bulk_deployment(
BulkDeploymentId='string'
)
**Response Syntax**
::
{}
**Response Structure**
- *(dict) --* Success. The bulk deployment is being stopped.
:type BulkDeploymentId: string
:param BulkDeploymentId: **[REQUIRED]** The ID of the bulk deployment.
:rtype: dict
:returns:
"""
pass
def tag_resource(self, ResourceArn: str, tags: Dict):
"""
Add tags to a resource.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/greengrass-2017-06-07/TagResource>`_
**Request Syntax**
::
response = client.tag_resource(
ResourceArn='string',
tags={
'string': 'string'
}
)
:type ResourceArn: string
:param ResourceArn: **[REQUIRED]** The Amazon Resource Name (ARN) of the resource.
:type tags: dict
:param tags: **[REQUIRED]** A map of the key-value pairs for the resource tag.
- *(string) --*
- *(string) --*
:returns: None
"""
pass
def untag_resource(self, ResourceArn: str, TagKeys: List):
"""
Remove tags with specified keys from a resource.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/greengrass-2017-06-07/UntagResource>`_
**Request Syntax**
::
response = client.untag_resource(
ResourceArn='string',
TagKeys=[
'string',
]
)
:type ResourceArn: string
:param ResourceArn: **[REQUIRED]** The Amazon Resource Name (ARN) of the resource.
:type TagKeys: list
:param TagKeys: **[REQUIRED]** A list of the keys to remove from the resource tags.
- *(string) --*
:returns: None
"""
pass
def update_connectivity_info(self, ThingName: str, ConnectivityInfo: List = None) -> Dict:
"""
Updates the connectivity information for the core. Any devices that belong to the group which has this core will receive this information in order to find the location of the core and connect to it.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/greengrass-2017-06-07/UpdateConnectivityInfo>`_
**Request Syntax**
::
response = client.update_connectivity_info(
ConnectivityInfo=[
{
'HostAddress': 'string',
'Id': 'string',
'Metadata': 'string',
'PortNumber': 123
},
],
ThingName='string'
)
**Response Syntax**
::
{
'Message': 'string',
'Version': 'string'
}
**Response Structure**
- *(dict) --* success
- **Message** *(string) --* A message about the connectivity info update request.
- **Version** *(string) --* The new version of the connectivity info.
:type ConnectivityInfo: list
:param ConnectivityInfo: A list of connectivity info.
- *(dict) --* Information about a Greengrass core\'s connectivity.
- **HostAddress** *(string) --* The endpoint for the Greengrass core. Can be an IP address or DNS.
- **Id** *(string) --* The ID of the connectivity information.
- **Metadata** *(string) --* Metadata for this endpoint.
- **PortNumber** *(integer) --* The port of the Greengrass core. Usually 8883.
:type ThingName: string
:param ThingName: **[REQUIRED]** The thing name.
:rtype: dict
:returns:
"""
pass
def update_connector_definition(self, ConnectorDefinitionId: str, Name: str = None) -> Dict:
"""
Updates a connector definition.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/greengrass-2017-06-07/UpdateConnectorDefinition>`_
**Request Syntax**
::
response = client.update_connector_definition(
ConnectorDefinitionId='string',
Name='string'
)
**Response Syntax**
::
{}
**Response Structure**
- *(dict) --* success
:type ConnectorDefinitionId: string
:param ConnectorDefinitionId: **[REQUIRED]** The ID of the connector definition.
:type Name: string
:param Name: The name of the definition.
:rtype: dict
:returns:
"""
pass
def update_core_definition(self, CoreDefinitionId: str, Name: str = None) -> Dict:
"""
Updates a core definition.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/greengrass-2017-06-07/UpdateCoreDefinition>`_
**Request Syntax**
::
response = client.update_core_definition(
CoreDefinitionId='string',
Name='string'
)
**Response Syntax**
::
{}
**Response Structure**
- *(dict) --* success
:type CoreDefinitionId: string
:param CoreDefinitionId: **[REQUIRED]** The ID of the core definition.
:type Name: string
:param Name: The name of the definition.
:rtype: dict
:returns:
"""
pass
def update_device_definition(self, DeviceDefinitionId: str, Name: str = None) -> Dict:
"""
Updates a device definition.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/greengrass-2017-06-07/UpdateDeviceDefinition>`_
**Request Syntax**
::
response = client.update_device_definition(
DeviceDefinitionId='string',
Name='string'
)
**Response Syntax**
::
{}
**Response Structure**
- *(dict) --* success
:type DeviceDefinitionId: string
:param DeviceDefinitionId: **[REQUIRED]** The ID of the device definition.
:type Name: string
:param Name: The name of the definition.
:rtype: dict
:returns:
"""
pass
def update_function_definition(self, FunctionDefinitionId: str, Name: str = None) -> Dict:
"""
Updates a Lambda function definition.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/greengrass-2017-06-07/UpdateFunctionDefinition>`_
**Request Syntax**
::
response = client.update_function_definition(
FunctionDefinitionId='string',
Name='string'
)
**Response Syntax**
::
{}
**Response Structure**
- *(dict) --* success
:type FunctionDefinitionId: string
:param FunctionDefinitionId: **[REQUIRED]** The ID of the Lambda function definition.
:type Name: string
:param Name: The name of the definition.
:rtype: dict
:returns:
"""
pass
def update_group(self, GroupId: str, Name: str = None) -> Dict:
"""
Updates a group.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/greengrass-2017-06-07/UpdateGroup>`_
**Request Syntax**
::
response = client.update_group(
GroupId='string',
Name='string'
)
**Response Syntax**
::
{}
**Response Structure**
- *(dict) --* success
:type GroupId: string
:param GroupId: **[REQUIRED]** The ID of the Greengrass group.
:type Name: string
:param Name: The name of the definition.
:rtype: dict
:returns:
"""
pass
def update_group_certificate_configuration(self, GroupId: str, CertificateExpiryInMilliseconds: str = None) -> Dict:
"""
Updates the Certificate expiry time for a group.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/greengrass-2017-06-07/UpdateGroupCertificateConfiguration>`_
**Request Syntax**
::
response = client.update_group_certificate_configuration(
CertificateExpiryInMilliseconds='string',
GroupId='string'
)
**Response Syntax**
::
{
'CertificateAuthorityExpiryInMilliseconds': 'string',
'CertificateExpiryInMilliseconds': 'string',
'GroupId': 'string'
}
**Response Structure**
- *(dict) --* Success. The response body contains the PKI Configuration.
- **CertificateAuthorityExpiryInMilliseconds** *(string) --* The amount of time remaining before the certificate authority expires, in milliseconds.
- **CertificateExpiryInMilliseconds** *(string) --* The amount of time remaining before the certificate expires, in milliseconds.
- **GroupId** *(string) --* The ID of the group certificate configuration.
:type CertificateExpiryInMilliseconds: string
:param CertificateExpiryInMilliseconds: The amount of time remaining before the certificate expires, in milliseconds.
:type GroupId: string
:param GroupId: **[REQUIRED]** The ID of the Greengrass group.
:rtype: dict
:returns:
"""
pass
def update_logger_definition(self, LoggerDefinitionId: str, Name: str = None) -> Dict:
"""
Updates a logger definition.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/greengrass-2017-06-07/UpdateLoggerDefinition>`_
**Request Syntax**
::
response = client.update_logger_definition(
LoggerDefinitionId='string',
Name='string'
)
**Response Syntax**
::
{}
**Response Structure**
- *(dict) --* success
:type LoggerDefinitionId: string
:param LoggerDefinitionId: **[REQUIRED]** The ID of the logger definition.
:type Name: string
:param Name: The name of the definition.
:rtype: dict
:returns:
"""
pass
def update_resource_definition(self, ResourceDefinitionId: str, Name: str = None) -> Dict:
"""
Updates a resource definition.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/greengrass-2017-06-07/UpdateResourceDefinition>`_
**Request Syntax**
::
response = client.update_resource_definition(
Name='string',
ResourceDefinitionId='string'
)
**Response Syntax**
::
{}
**Response Structure**
- *(dict) --* success
:type Name: string
:param Name: The name of the definition.
:type ResourceDefinitionId: string
:param ResourceDefinitionId: **[REQUIRED]** The ID of the resource definition.
:rtype: dict
:returns:
"""
pass
def update_subscription_definition(self, SubscriptionDefinitionId: str, Name: str = None) -> Dict:
"""
Updates a subscription definition.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/greengrass-2017-06-07/UpdateSubscriptionDefinition>`_
**Request Syntax**
::
response = client.update_subscription_definition(
Name='string',
SubscriptionDefinitionId='string'
)
**Response Syntax**
::
{}
**Response Structure**
- *(dict) --* success
:type Name: string
:param Name: The name of the definition.
:type SubscriptionDefinitionId: string
:param SubscriptionDefinitionId: **[REQUIRED]** The ID of the subscription definition.
:rtype: dict
:returns:
"""
pass
| 0 | 215,512 | 23 |
d4d0c054bd8c6ab251cce470a6372d0eed7bc869 | 38,962 | py | Python | app/ariadicom.py | MePyDo/pygqa | 61cde42ee815968fdd029cc5056ede3badea3d91 | [
"MIT"
] | 3 | 2021-02-25T13:19:52.000Z | 2021-03-03T03:46:46.000Z | app/ariadicom.py | MedPhyDO/pygqa | 580b2c6028d2299790a38262b795b8409cbfcc37 | [
"MIT"
] | null | null | null | app/ariadicom.py | MedPhyDO/pygqa | 580b2c6028d2299790a38262b795b8409cbfcc37 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
__author__ = "R. Bauer"
__copyright__ = "MedPhyDO - Machbarkeitsstudien des Instituts für Medizinische Strahlenphysik und Strahlenschutz am Klinikum Dortmund im Rahmen von Bachelor und Masterarbeiten an der TU-Dortmund / FH-Dortmund"
__credits__ = ["R.Bauer", "K.Loot"]
__license__ = "MIT"
__version__ = "0.1.2"
__status__ = "Prototype"
from dotmap import DotMap
import os.path as osp
from pathlib import Path
import pandas as pd
import numpy as np
import json
from datetime import date
from isp.dicom import ispDicom
from isp.config import dict_merge
from app.config import infoFields
from app.aria import ariaClass
#from app.dicom import dicomClass
from app.results import ispResults
from app.qa.mlc import checkMlc
from app.qa.field import checkField
from app.qa.wl import checkWL
from app.qa.vmat import checkVMAT
import logging
logger = logging.getLogger( "MQTT" )
class ariaDicomClass( ariaClass, ispDicom ):
'''Zentrale Klasse
Attributes
----------
config : Dot
konfigurations Daten
variables :
Metadaten aus config.variables
infoFields:
Infofelder aus config
dicomfiles: dict
geladene Dicom dateien
pd_results: pd
testergebnisse als Pandas tabelle
resultfile
Datei mit Ergebnissen als panda File
lastSQL: str
die letzte durchgeführte sql Abfrage
'''
def __init__( self, database=None, server="VMSDBD", config=None ):
"""Klasse sowie ariaClass und dicomClass initialisieren
"""
# Klassen defaults setzen und übergaben
self.config = config
self.variables = self.config.variables
self.infoFields = infoFields
self.dicomfiles: dict = {}
self.pd_results = None
self.resultfile = None
self.lastSQL = ""
# ariaClass initialisieren
ariaClass.__init__( self, database )
# dicomClass initialisieren. Der Erfolg kann über dicomClass.initialized abgefragt werden
ispDicom.__init__( self, server, self.config )
# Datei mit Ergebnissen als pandas laden
self.resultfile = osp.join( self.config.get("resultsPath", ".."), self.config.get("database.gqa.name", "gqa.json") )
self.pd_results = ispResults( self.config, self.resultfile )
def initResultsPath(self, AcquisitionYear=None ):
'''Den Ablegeort zu den PDF Dateien bestimmen
in variables.path befindet sich jetzt der resultsPath ggf. mit angehängten AcquisitionYear
Parameters
----------
AcquisitionYear : TYPE, optional
DESCRIPTION. The default is None.
Returns
-------
dirname : str
der aktuelle PDF Pfad (auch in self.variables["path"] )
'''
paths = [ ]
# ist der Pfad relativ angegeben ab base path verwenden
if self.config["resultsPath"][0] == ".":
paths.append( self.config["BASE_DIR"] )
paths.append( self.config["resultsPath"] )
else:
paths.append( self.config["resultsPath"] )
# zusätzlich noch das AcquisitionYear anfügen
if AcquisitionYear:
paths.append( str(AcquisitionYear) )
# den Pfad in variables["path"] ablegen
dirname = osp.abspath( osp.join( *paths ) )
self.variables["path"] = dirname
return dirname
def getAllGQA(self, pids=None, testTags:list=None, year:int=None, month:int=None, day:int=None, withInfo=True, withResult=False ):
'''Holt für die angegebenen PatientenIds aus allen Courses
die Felder mit Angaben in [Radiation].[Comment] und wertet sie entsprechend aus
Parameters
----------
pids : list, optional
DESCRIPTION. The default is None.
testTags : list, optional
DESCRIPTION. The default is None.
year : int, optional
DESCRIPTION. The default is None.
month : int, optional
DESCRIPTION. The default is None.
day : int, optional
DESCRIPTION. The default is None.
withInfo : TYPE, optional
DESCRIPTION. The default is True.
withResult : TYPE, optional
DESCRIPTION. The default is False.
Returns
-------
gqa : dict
Aufbau::
units: dict
<unit>: dict
<infoType>: dict
ready: dict
all: int
<energy> : int
gqa: dict
fields: int
energyFields: int
counts: dict
all: int
<energy> : int
pdf: dict,
items: dict
<energy>: dict
<SliceUID>: {info} -> dies wird bei run in ein DataFrame umgewandelt
series: [],
'''
if not pids:
return {}
if type(pids) == str:
pids = pids.split(",")
if not type(pids) == list:
pids = [pids]
if not pids or len(pids) == 0:
return {}
# filter zusammenstellen
where = "LEN([Radiation].[Comment]) > 0 "
subSql = []
for pid in pids:
subSql.append( "[Patient].[PatientId]='{}'".format( pid.strip() ) )
if len( subSql ) > 0:
where += " AND (" + " OR ".join( subSql ) + ")"
images, sql = self.getImages(
addWhere=where,
AcquisitionYear=year,
AcquisitionMonth=month,
AcquisitionDay=day,
testTags=testTags
)
self.lastSQL = sql
# Pfad für die PDF Dateien
self.initResultsPath( year )
return self.prepareGQA( images, year=year, withInfo=withInfo, withResult=withResult )
def prepareGQA(self, imagedatas=[], year:int=0, withInfo=True, withResult=False, withDicomData:bool=False ):
"""Auswertung für GQA vorbereiten zusätzlich noch Ergebnisse aus der Datenbank einfügen
Benötig config.GQA und config.units
- units: ["Linac-1", "Linac-2"],
- gqa : dict
<testId>: dict
<unit>: dict
fields: int
energyFields: int
Parameters
----------
imagedatas : list, optional
Auflistungen von Bildinformationen aus der Aria Datenbank. The default is [].
year : int, optional
DESCRIPTION. The default is 0.
withInfo : TYPE, optional
alle ImageInfos mit hinzufügen. The default is True.
withResult : boolean, optional
Testergebnisse mit ausgeben. The default is False.
withDicomData : boolean, optional
Info pro gerät in dicomfiles ablegen. The default is False.
Returns
-------
gqa : dict
# alles aus config.gqa dabei die Unites mit Daten füllen
<testname>
info:
inaktiv
tip
anleitung
options:
TODO:
tolerance:
<energy>
<unit-n>
fields: int
energyFields: int
energy: list
"""
# dicom gerät , name , infos
self.dicomfiles = {}
units = self.config.units
# Dateien im Pfad
pdfFiles = []
if osp.exists( self.variables["path"] ):
p = Path( self.variables["path"] )
pdfFiles = [i.name for i in p.glob( '*.pdf' )]
# files = os.listdir( self.variables["path"] )
data = {
"GQA" : self.config.get("GQA").toDict(),
"units" : units,
"testTags" : {},
"testIds": {}
}
# nur das gesuchte Jahr, ohne index
df_results = self.pd_results.gqa[ self.pd_results.gqa['year'] == year ].reset_index()
result_fields = [ "acceptance", "group" ]
if withResult:
result_fields.append("data")
# neuen index setzen
# Das geht nur bei daten in df_results
if len(df_results.index) > 0:
df_results.set_index( df_results.apply(lambda x: f"{x['year']}|{x['unit']}|{x['test']}|{x['energy']}|{x['month']}", axis=1), inplace=True )
data["results"] = df_results[ result_fields ].to_dict( orient="split" )
else:
data["results"] = {
"columns":result_fields,
"data":[],
"index":[]
}
# tags und gqa ids bestimmen
for testid, item in self.config.GQA.items():
if "tag" in item:
data["testTags"][ item["tag"] ] = testid
data["testIds"][ testid ] = item["tag"]
tagNotFound = {}
inactiv = []
testNotFound = []
for imagedata in imagedatas:
# bereitetet die Datenbank Informationen auf
info = self.getImageInfos( imagedata )
unit = info["unit"]
energy = info["energy"]
#
# zusätzlich die Daten in self.dicomfiles ablegen
#
if withDicomData:
if not unit in self.dicomfiles:
self.dicomfiles[ unit ] = {}
# zusätzlich in dicomfiles ablegen
self.dicomfiles[ unit ][ info["id"] ] = info
# Felder zuordnen, eine Aufnahme kann für mehrere tests verwendet werden
# tag für die Datenbank, testid für das PDF
for testTag in info["testTags"]:
# nur wenn es auch einen test gibt
if not testTag in data["testTags"]:
tagNotFound[ testTag ] = testTag
continue
testId = data["testTags"][testTag]
# ist der test in gqa nicht erlaubt überspringen
# inaktive kann auch einen Text enthalten der beschreibt warum
# FIXME: inaktive
t = "GQA.{}.info.inaktiv".format( testId )
if not self.config.get(t, False) == False:
inactiv.append( self.config.get(t) )
continue
# gibt es in GQA passend zum Test dem Gerät und der Energie einen Eintrag
t = "GQA.{}.{}.energyFields.{}".format( testId, unit, energy )
energyFields = self.config.get(t, False)
if energyFields == False:
testNotFound.append( t )
continue
# Art des tests MT|JT
tagArt = testId[0:2]
if tagArt == "JT":
dateFlag = "0"
else:
dateFlag = str( info["AcquisitionMonth"] )
#
test_unit = data["GQA"][testId][unit]
if not dateFlag in test_unit:
test_unit[ dateFlag ] = {}
if not energy in test_unit[ dateFlag ]:
test_unit[ dateFlag ][energy] = {
"counts": 0,
"ready": False,
"pdfName" : "",
"pdf": False,
"acceptance" : {}
}
# Anzahl der Felder für das Datumsflag der jeweiligen Energie erhöhen (counts)
test_unit[ dateFlag ][ energy ][ "counts" ] += 1
# auf mid Anzahl prüfen
if test_unit[ dateFlag ][ energy ][ "counts" ] >= energyFields:
test_unit[ dateFlag ][ energy ][ "ready" ] = True
# PDF Dateiname zusammenstellen
pdfName = self.config.render_template(
self.config["templates"][ "PDF-" + tagArt + "-filename"],
{
"AcquisitionYear": info["AcquisitionYear"],
"AcquisitionMonth": info["AcquisitionMonth"],
"unit": unit,
"energy": energy,
"testId": testId
}
)
if pdfName in pdfFiles:
test_unit[ dateFlag ][ energy ][ "pdfName" ] = pdfName
test_unit[ dateFlag ][ energy ][ "pdf" ] = True
# nicht gefundene Tags
data["inactiv"] = inactiv
data["tagNotFound"] = tagNotFound
data["testNotFound"] = testNotFound
return data
# ---------------------- einfache Ausgaben
def getTagging(self, art:str="full", pid:list=[], output_format="json" ):
"""alle Tags in Comment Feldern als html Tabelle zurückgeben
Parameters
----------
art : str, optional
Art der Tagging Tabellen (). The default is "full".
* full
* sum
* test
* tags
pid : list, optional
Angabe von PatientsIds für die Tags bestimmt werden sollen. The default is [].
output_format: str
Format der Ausgabe [ json, html ]
Returns
-------
str|dict
html Tags code oder dict.
"""
style = """
<style>
.gqa-tagging {
}
.gqa-tagging table {
color: #333;
font-family: Helvetica, Arial, sans-serif;
min-width: 100px;
border-collapse: collapse;
border-spacing: 0;
font-size: 10px;
}
.gqa-tagging table td, .gqa-tagging table th {
border: 1px solid gray;
text-align: center;
vertical-align: middle;
}
.gqa-tagging table th {
font-weight: bold;
}
.gqa-tagging table thead th, .gqa-tagging table tbody th {
background-color: #F7F7F7;
}
.gqa-tagging table td {
background-color: white;
}
.gqa-tagging table th, .gqa-tagging table td, .gqa-tagging table caption {
padding: 2px 2px 2px 2px;
}
</style>
"""
split = True
if art == "tags":
# bei tags conmment nicht splitten
split = False
tags = self.getTags( pid, split )
if output_format == "json":
return tags
if not tags or len(tags) == 0:
return "getTagging: keine Daten gefunden"
html = '<div class="gqa-tagging flex-1">'
html += '<h1 class="m-0 p-1 text-white bg-secondary">Art: ' + art + '</h2>'
# Pandas erzeugen
df = pd.DataFrame( tags )
if art == "full":
table = pd.pivot_table( df,
index=['Comment', 'CourseId', 'PlanSetupId', 'Energy', 'DoseRate', 'RadiationId'],
columns='PatientId',
values= "nummer",
fill_value=0
)
elif art == "sum":
table = pd.pivot_table( df,
index=['Comment', 'CourseId', 'PlanSetupId','Energy', 'DoseRate'],
columns=['PatientId'],
values= 'nummer',
aggfunc=[np.sum],
fill_value=0
)
elif art == "test":
table = pd.pivot_table( df,
index=['Comment', 'CourseId', 'Energy', 'DoseRate'],
columns=[ 'PlanSetupId', 'PatientId'],
values= 'nummer',
aggfunc=[np.sum],
fill_value=0
)
elif art == "tags":
table = pd.pivot_table( df,
index=['Comment'],
columns=['PatientId'],
values= 'nummer',
fill_value=0
#aggfunc=[np.sum]
)
# tags zurückgeben als einfache Tabelle
#table = df[ ["Comment"] ].groupby( "Comment" ).first().reset_index()
# table.fillna('', inplace=True)
html += (table.style
.applymap( highlight_fifty )
.set_table_attributes('class="gqa-tagging-table"')
#.float_format()
.render()
)
html += '</div>'
return style + html
def getMatrix( self, output_format="json", params:dict={} ):
"""Gibt eine Liste alle Testbeschreibungen (config) mit Anleitungen
Parameters
----------
output_format: str
Format der Ausgabe [ json, html ]
params: dict
Aufrufparameter mit year und month
Returns
-------
str|dict
html matrix code oder dict.
"""
# jahr und Monat bei 0 mit dem aktuellen belegen
today = date.today()
if params["year"] == 0:
params["year"] = today.year
if params["month"] == 0:
params["month"] = today.month
# pdf wird zum laden der Texte verwendet
from isp.mpdf import PdfGenerator as ispPdf
pdf = ispPdf()
html_jt = ""
html_mt = ""
html_nn = ""
data_dict = {}
for key, content in self.config.GQA.items():
data = {
"key" : key,
"tip" : "",
"need" : "",
"anleitung" : "",
"chips" : ""
}
chips = []
# units und energy
for unit_key, unit in self.config.units.items():
if unit in content:
for energy in content[ unit ].energy:
chips.append( { "class": "badge badge-pill badge-info mr-1", "content": "{} - {}".format( unit_key, energy ) } )
# info bestimmen
info = content.info
data["tip"] = info.get("tip", "")
need = info.get("need", "")
if type(need) == str and need != "":
chips.append( { "class": "badge badge-pill badge-success", "content": 'benötigt: ' + need } )
# Anleitung
anleitung_filename = info.get("anleitung", "")
data["anleitung"] = '<p class="badge badge-pill badge-primary">Anleitung fehlt!</p>'
if anleitung_filename != "":
anleitung = pdf.textFile(anleitung_filename, render = False)
if anleitung:
data["anleitung"] = anleitung
# Toleranz
tolerance = content.info.get("tolerance", False)
if tolerance:
data["anleitung"] += "<h6>Toleranz</h6>"
# ggf formel erstellen
for e, item in tolerance.items():
self.prepare_tolerance(key, e)
pass
# toleranz einfügen
data["anleitung"] += '<pre class="toleranz bg-light text-monospace ">' + json.dumps( tolerance, indent=2 ) + '</pre>'
# ist der test als inaktiv Hinweis ausgeben
inaktiv = content.info.get('inaktiv', False)
if inaktiv != False:
chips.append( { "class": "inaktiv", "content": 'Inaktiv: ' + inaktiv } )
# gibt es optional Angaben
optional = content.info.get('optional', [])
if len(optional) > 0:
for item in optional:
chips.append( { "class": "badge badge-pill badge-primary", "content": 'Optional wenn: ' + item + ' OK' } )
# TODO
todo = content.info.get("TODO", False)
if todo and len(todo) > 0:
data["anleitung"] += "TODO"
data["anleitung"] += '<pre class="p-1 bg-warning">'
for t in todo:
data["anleitung"] += "* " + t + "\n"
data["anleitung"] += '</pre>'
# markierungen zusammenstellen
for chip in chips:
data["chips"] += '<div class="{class}">{content}</div>'.format(**chip)
data_dict[ key ] = content.toDict()
data_dict[ key ][ "anleitung" ] = anleitung
card = """
<div class="card m-3" >
<div class="card-header">
<span class="font-weight-bolder">{key}</span>
<span class="pl-3">{tip}</span>
<div class="float-right">{chips}</div>
</div>
<div class="card-body p-1">
{anleitung}
</div>
</div>
""".format( **data )
if key[0:2] == "JT":
html_jt += card
elif key[0:2] == "MT":
html_mt += card
else:
html_nn += card
if output_format == "json":
return data_dict
style = """
<style>
/* Anpassung pdf text */
.gqa_matrix h2 {
font-size: 1.1667em;
font-weight: bold;
line-height: 1.286em;
margin-top: 0.5em;
margin-bottom: 0.5em;
}
.gqa_matrix .card-body p::first-of-type {
background-color: #FFFFFFAA;
}
</style>
"""
html = '''
<div class="gqa_matrix">
<h1 class="m-0 p-1 text-white bg-secondary" >Angaben für: {month}/{year}</h1>
<content class="p-1 d-flex flex-row" >
<div class="w-50">{jt}</div>
<div class="w-50">{mt}</div>
<div class="">{nn}</div>
</content>
</div>
'''.format( jt=html_jt, mt=html_mt, nn=html_nn, **params )
return style + html
def prepare_tolerance(self, testid:str="", energy=None):
"""Prüft ob es in conig eine tolerance Angabe für die testid und die Energie gibt
Stellt wenn f nicht angegeben wurde eine Formel in f zusammen
Gibt es eine GQA.<testid>.info.tolerance.default Angabe, so wird diese als Grundlage für alle Energien verwendet
Zweig in config::
GQA.<testid>.info.tolerance.<energy>
{
name: {
f: formel mit {value}
value: wert
range: [min, max]
operator: [ eq, ne, lt, gt, le, ge]
}
}
Parameters
----------
testid : str, optional
id des zu verarbeitenden tolerance Bereichs
energy : str, optional
Augabe der Energie für die Info. The default is None.
Ohne Angabe wird nur der Parameter info zurückgegeben
Returns
-------
info : dict
Parameter info mit zusätzlichen Angaben für die Energie.
Beispiel::
"default": {
"warning" : { "f":"abs({value}) > 1.0", "unit": "%" },
"error" : { "f":"abs({value}) > 2.0", "unit": "%" },
"check" : { "field": "diff", "query":"ME == 100" }
},
"MU_20": {
"warning" : { "f":"abs({value}) > 1.0", "unit": "%" },
"error" : { "f":"abs({value}) > 2.5", "unit": "%" },
"check" : { "field": "diff", "query":"ME == 20" }
},
"""
info = self.config.get( ["GQA", testid, "info" ] )
default = info.tolerance.get( "default", False )
tolerance = info.tolerance.get( energy, False )
if not tolerance and not default:
return DotMap()
if not default:
default = DotMap()
if tolerance:
tolerance = dict_merge( default, tolerance)
else:
tolerance = default
#print("prepare_tolerance tolerance", tolerance )
import functools
# alle Angaben durchgehen
for name in tolerance:
if not isinstance( tolerance.get(name), dict ):
continue
for artName, art in tolerance.get(name).items():
# überspringen wenn art = soll oder f schon vorhanden
if artName == "soll" or art.get("f", None):
continue
# gibt es keine formel dann erstellen
# wurde ein wert angegeben
_value = art.get("value", None)
_range = art.get("range", None)
if _value:
#zuerst den operator festlegen
operator = art.get("operator", "gt")
# [ eq, ne, lt, gt, le, ge]
operator = functools.reduce(lambda a, b: a.replace(*b)
, [('eq','=='),('ne','!='),('lt', '<'),( 'gt', '>'),( 'le','<='),( 'ge', '>=')] #iterable of pairs: (oldval, newval)
, operator #The string from which to replace values
)
tolerance[name][artName]["f"] = "abs({}) {} {}".format( "{value}", operator, _value )
# wurde ein Bereich angegeben
elif art.get("range", None) and len(_range) >= 2:
tolerance[name][artName]["f"] = "{} <= {} >= {}".format( _range[0], "{value}", _range[1] )
return tolerance
# ---------------------- Test durchführung
| 35.876611 | 209 | 0.523022 | # -*- coding: utf-8 -*-
__author__ = "R. Bauer"
__copyright__ = "MedPhyDO - Machbarkeitsstudien des Instituts für Medizinische Strahlenphysik und Strahlenschutz am Klinikum Dortmund im Rahmen von Bachelor und Masterarbeiten an der TU-Dortmund / FH-Dortmund"
__credits__ = ["R.Bauer", "K.Loot"]
__license__ = "MIT"
__version__ = "0.1.2"
__status__ = "Prototype"
from dotmap import DotMap
import os.path as osp
from pathlib import Path
import pandas as pd
import numpy as np
import json
from datetime import date
from isp.dicom import ispDicom
from isp.config import dict_merge
from app.config import infoFields
from app.aria import ariaClass
#from app.dicom import dicomClass
from app.results import ispResults
from app.qa.mlc import checkMlc
from app.qa.field import checkField
from app.qa.wl import checkWL
from app.qa.vmat import checkVMAT
import logging
logger = logging.getLogger( "MQTT" )
class ariaDicomClass( ariaClass, ispDicom ):
'''Zentrale Klasse
Attributes
----------
config : Dot
konfigurations Daten
variables :
Metadaten aus config.variables
infoFields:
Infofelder aus config
dicomfiles: dict
geladene Dicom dateien
pd_results: pd
testergebnisse als Pandas tabelle
resultfile
Datei mit Ergebnissen als panda File
lastSQL: str
die letzte durchgeführte sql Abfrage
'''
def __init__( self, database=None, server="VMSDBD", config=None ):
"""Klasse sowie ariaClass und dicomClass initialisieren
"""
# Klassen defaults setzen und übergaben
self.config = config
self.variables = self.config.variables
self.infoFields = infoFields
self.dicomfiles: dict = {}
self.pd_results = None
self.resultfile = None
self.lastSQL = ""
# ariaClass initialisieren
ariaClass.__init__( self, database )
# dicomClass initialisieren. Der Erfolg kann über dicomClass.initialized abgefragt werden
ispDicom.__init__( self, server, self.config )
# Datei mit Ergebnissen als pandas laden
self.resultfile = osp.join( self.config.get("resultsPath", ".."), self.config.get("database.gqa.name", "gqa.json") )
self.pd_results = ispResults( self.config, self.resultfile )
def initResultsPath(self, AcquisitionYear=None ):
'''Den Ablegeort zu den PDF Dateien bestimmen
in variables.path befindet sich jetzt der resultsPath ggf. mit angehängten AcquisitionYear
Parameters
----------
AcquisitionYear : TYPE, optional
DESCRIPTION. The default is None.
Returns
-------
dirname : str
der aktuelle PDF Pfad (auch in self.variables["path"] )
'''
paths = [ ]
# ist der Pfad relativ angegeben ab base path verwenden
if self.config["resultsPath"][0] == ".":
paths.append( self.config["BASE_DIR"] )
paths.append( self.config["resultsPath"] )
else:
paths.append( self.config["resultsPath"] )
# zusätzlich noch das AcquisitionYear anfügen
if AcquisitionYear:
paths.append( str(AcquisitionYear) )
# den Pfad in variables["path"] ablegen
dirname = osp.abspath( osp.join( *paths ) )
self.variables["path"] = dirname
return dirname
def getAllGQA(self, pids=None, testTags:list=None, year:int=None, month:int=None, day:int=None, withInfo=True, withResult=False ):
'''Holt für die angegebenen PatientenIds aus allen Courses
die Felder mit Angaben in [Radiation].[Comment] und wertet sie entsprechend aus
Parameters
----------
pids : list, optional
DESCRIPTION. The default is None.
testTags : list, optional
DESCRIPTION. The default is None.
year : int, optional
DESCRIPTION. The default is None.
month : int, optional
DESCRIPTION. The default is None.
day : int, optional
DESCRIPTION. The default is None.
withInfo : TYPE, optional
DESCRIPTION. The default is True.
withResult : TYPE, optional
DESCRIPTION. The default is False.
Returns
-------
gqa : dict
Aufbau::
units: dict
<unit>: dict
<infoType>: dict
ready: dict
all: int
<energy> : int
gqa: dict
fields: int
energyFields: int
counts: dict
all: int
<energy> : int
pdf: dict,
items: dict
<energy>: dict
<SliceUID>: {info} -> dies wird bei run in ein DataFrame umgewandelt
series: [],
'''
if not pids:
return {}
if type(pids) == str:
pids = pids.split(",")
if not type(pids) == list:
pids = [pids]
if not pids or len(pids) == 0:
return {}
# filter zusammenstellen
where = "LEN([Radiation].[Comment]) > 0 "
subSql = []
for pid in pids:
subSql.append( "[Patient].[PatientId]='{}'".format( pid.strip() ) )
if len( subSql ) > 0:
where += " AND (" + " OR ".join( subSql ) + ")"
images, sql = self.getImages(
addWhere=where,
AcquisitionYear=year,
AcquisitionMonth=month,
AcquisitionDay=day,
testTags=testTags
)
self.lastSQL = sql
# Pfad für die PDF Dateien
self.initResultsPath( year )
return self.prepareGQA( images, year=year, withInfo=withInfo, withResult=withResult )
def prepareGQA(self, imagedatas=[], year:int=0, withInfo=True, withResult=False, withDicomData:bool=False ):
"""Auswertung für GQA vorbereiten zusätzlich noch Ergebnisse aus der Datenbank einfügen
Benötig config.GQA und config.units
- units: ["Linac-1", "Linac-2"],
- gqa : dict
<testId>: dict
<unit>: dict
fields: int
energyFields: int
Parameters
----------
imagedatas : list, optional
Auflistungen von Bildinformationen aus der Aria Datenbank. The default is [].
year : int, optional
DESCRIPTION. The default is 0.
withInfo : TYPE, optional
alle ImageInfos mit hinzufügen. The default is True.
withResult : boolean, optional
Testergebnisse mit ausgeben. The default is False.
withDicomData : boolean, optional
Info pro gerät in dicomfiles ablegen. The default is False.
Returns
-------
gqa : dict
# alles aus config.gqa dabei die Unites mit Daten füllen
<testname>
info:
inaktiv
tip
anleitung
options:
TODO:
tolerance:
<energy>
<unit-n>
fields: int
energyFields: int
energy: list
"""
# dicom gerät , name , infos
self.dicomfiles = {}
units = self.config.units
# Dateien im Pfad
pdfFiles = []
if osp.exists( self.variables["path"] ):
p = Path( self.variables["path"] )
pdfFiles = [i.name for i in p.glob( '*.pdf' )]
# files = os.listdir( self.variables["path"] )
data = {
"GQA" : self.config.get("GQA").toDict(),
"units" : units,
"testTags" : {},
"testIds": {}
}
# nur das gesuchte Jahr, ohne index
df_results = self.pd_results.gqa[ self.pd_results.gqa['year'] == year ].reset_index()
result_fields = [ "acceptance", "group" ]
if withResult:
result_fields.append("data")
# neuen index setzen
# Das geht nur bei daten in df_results
if len(df_results.index) > 0:
df_results.set_index( df_results.apply(lambda x: f"{x['year']}|{x['unit']}|{x['test']}|{x['energy']}|{x['month']}", axis=1), inplace=True )
data["results"] = df_results[ result_fields ].to_dict( orient="split" )
else:
data["results"] = {
"columns":result_fields,
"data":[],
"index":[]
}
# tags und gqa ids bestimmen
for testid, item in self.config.GQA.items():
if "tag" in item:
data["testTags"][ item["tag"] ] = testid
data["testIds"][ testid ] = item["tag"]
tagNotFound = {}
inactiv = []
testNotFound = []
for imagedata in imagedatas:
# bereitetet die Datenbank Informationen auf
info = self.getImageInfos( imagedata )
unit = info["unit"]
energy = info["energy"]
#
# zusätzlich die Daten in self.dicomfiles ablegen
#
if withDicomData:
if not unit in self.dicomfiles:
self.dicomfiles[ unit ] = {}
# zusätzlich in dicomfiles ablegen
self.dicomfiles[ unit ][ info["id"] ] = info
# Felder zuordnen, eine Aufnahme kann für mehrere tests verwendet werden
# tag für die Datenbank, testid für das PDF
for testTag in info["testTags"]:
# nur wenn es auch einen test gibt
if not testTag in data["testTags"]:
tagNotFound[ testTag ] = testTag
continue
testId = data["testTags"][testTag]
# ist der test in gqa nicht erlaubt überspringen
# inaktive kann auch einen Text enthalten der beschreibt warum
# FIXME: inaktive
t = "GQA.{}.info.inaktiv".format( testId )
if not self.config.get(t, False) == False:
inactiv.append( self.config.get(t) )
continue
# gibt es in GQA passend zum Test dem Gerät und der Energie einen Eintrag
t = "GQA.{}.{}.energyFields.{}".format( testId, unit, energy )
energyFields = self.config.get(t, False)
if energyFields == False:
testNotFound.append( t )
continue
# Art des tests MT|JT
tagArt = testId[0:2]
if tagArt == "JT":
dateFlag = "0"
else:
dateFlag = str( info["AcquisitionMonth"] )
#
test_unit = data["GQA"][testId][unit]
if not dateFlag in test_unit:
test_unit[ dateFlag ] = {}
if not energy in test_unit[ dateFlag ]:
test_unit[ dateFlag ][energy] = {
"counts": 0,
"ready": False,
"pdfName" : "",
"pdf": False,
"acceptance" : {}
}
# Anzahl der Felder für das Datumsflag der jeweiligen Energie erhöhen (counts)
test_unit[ dateFlag ][ energy ][ "counts" ] += 1
# auf mid Anzahl prüfen
if test_unit[ dateFlag ][ energy ][ "counts" ] >= energyFields:
test_unit[ dateFlag ][ energy ][ "ready" ] = True
# PDF Dateiname zusammenstellen
pdfName = self.config.render_template(
self.config["templates"][ "PDF-" + tagArt + "-filename"],
{
"AcquisitionYear": info["AcquisitionYear"],
"AcquisitionMonth": info["AcquisitionMonth"],
"unit": unit,
"energy": energy,
"testId": testId
}
)
if pdfName in pdfFiles:
test_unit[ dateFlag ][ energy ][ "pdfName" ] = pdfName
test_unit[ dateFlag ][ energy ][ "pdf" ] = True
# nicht gefundene Tags
data["inactiv"] = inactiv
data["tagNotFound"] = tagNotFound
data["testNotFound"] = testNotFound
return data
# ---------------------- einfache Ausgaben
def getTagging(self, art:str="full", pid:list=[], output_format="json" ):
"""alle Tags in Comment Feldern als html Tabelle zurückgeben
Parameters
----------
art : str, optional
Art der Tagging Tabellen (). The default is "full".
* full
* sum
* test
* tags
pid : list, optional
Angabe von PatientsIds für die Tags bestimmt werden sollen. The default is [].
output_format: str
Format der Ausgabe [ json, html ]
Returns
-------
str|dict
html Tags code oder dict.
"""
style = """
<style>
.gqa-tagging {
}
.gqa-tagging table {
color: #333;
font-family: Helvetica, Arial, sans-serif;
min-width: 100px;
border-collapse: collapse;
border-spacing: 0;
font-size: 10px;
}
.gqa-tagging table td, .gqa-tagging table th {
border: 1px solid gray;
text-align: center;
vertical-align: middle;
}
.gqa-tagging table th {
font-weight: bold;
}
.gqa-tagging table thead th, .gqa-tagging table tbody th {
background-color: #F7F7F7;
}
.gqa-tagging table td {
background-color: white;
}
.gqa-tagging table th, .gqa-tagging table td, .gqa-tagging table caption {
padding: 2px 2px 2px 2px;
}
</style>
"""
split = True
if art == "tags":
# bei tags conmment nicht splitten
split = False
tags = self.getTags( pid, split )
if output_format == "json":
return tags
if not tags or len(tags) == 0:
return "getTagging: keine Daten gefunden"
html = '<div class="gqa-tagging flex-1">'
html += '<h1 class="m-0 p-1 text-white bg-secondary">Art: ' + art + '</h2>'
# Pandas erzeugen
df = pd.DataFrame( tags )
if art == "full":
table = pd.pivot_table( df,
index=['Comment', 'CourseId', 'PlanSetupId', 'Energy', 'DoseRate', 'RadiationId'],
columns='PatientId',
values= "nummer",
fill_value=0
)
elif art == "sum":
table = pd.pivot_table( df,
index=['Comment', 'CourseId', 'PlanSetupId','Energy', 'DoseRate'],
columns=['PatientId'],
values= 'nummer',
aggfunc=[np.sum],
fill_value=0
)
elif art == "test":
table = pd.pivot_table( df,
index=['Comment', 'CourseId', 'Energy', 'DoseRate'],
columns=[ 'PlanSetupId', 'PatientId'],
values= 'nummer',
aggfunc=[np.sum],
fill_value=0
)
elif art == "tags":
table = pd.pivot_table( df,
index=['Comment'],
columns=['PatientId'],
values= 'nummer',
fill_value=0
#aggfunc=[np.sum]
)
# tags zurückgeben als einfache Tabelle
#table = df[ ["Comment"] ].groupby( "Comment" ).first().reset_index()
# table.fillna('', inplace=True)
def highlight_fifty( val ):
color = 'black' if val > 0 else 'white'
#print(val , color)
return 'color: %s' % color
html += (table.style
.applymap( highlight_fifty )
.set_table_attributes('class="gqa-tagging-table"')
#.float_format()
.render()
)
html += '</div>'
return style + html
def getMatrix( self, output_format="json", params:dict={} ):
"""Gibt eine Liste alle Testbeschreibungen (config) mit Anleitungen
Parameters
----------
output_format: str
Format der Ausgabe [ json, html ]
params: dict
Aufrufparameter mit year und month
Returns
-------
str|dict
html matrix code oder dict.
"""
# jahr und Monat bei 0 mit dem aktuellen belegen
today = date.today()
if params["year"] == 0:
params["year"] = today.year
if params["month"] == 0:
params["month"] = today.month
# pdf wird zum laden der Texte verwendet
from isp.mpdf import PdfGenerator as ispPdf
pdf = ispPdf()
html_jt = ""
html_mt = ""
html_nn = ""
data_dict = {}
for key, content in self.config.GQA.items():
data = {
"key" : key,
"tip" : "",
"need" : "",
"anleitung" : "",
"chips" : ""
}
chips = []
# units und energy
for unit_key, unit in self.config.units.items():
if unit in content:
for energy in content[ unit ].energy:
chips.append( { "class": "badge badge-pill badge-info mr-1", "content": "{} - {}".format( unit_key, energy ) } )
# info bestimmen
info = content.info
data["tip"] = info.get("tip", "")
need = info.get("need", "")
if type(need) == str and need != "":
chips.append( { "class": "badge badge-pill badge-success", "content": 'benötigt: ' + need } )
# Anleitung
anleitung_filename = info.get("anleitung", "")
data["anleitung"] = '<p class="badge badge-pill badge-primary">Anleitung fehlt!</p>'
if anleitung_filename != "":
anleitung = pdf.textFile(anleitung_filename, render = False)
if anleitung:
data["anleitung"] = anleitung
# Toleranz
tolerance = content.info.get("tolerance", False)
if tolerance:
data["anleitung"] += "<h6>Toleranz</h6>"
# ggf formel erstellen
for e, item in tolerance.items():
self.prepare_tolerance(key, e)
pass
# toleranz einfügen
data["anleitung"] += '<pre class="toleranz bg-light text-monospace ">' + json.dumps( tolerance, indent=2 ) + '</pre>'
# ist der test als inaktiv Hinweis ausgeben
inaktiv = content.info.get('inaktiv', False)
if inaktiv != False:
chips.append( { "class": "inaktiv", "content": 'Inaktiv: ' + inaktiv } )
# gibt es optional Angaben
optional = content.info.get('optional', [])
if len(optional) > 0:
for item in optional:
chips.append( { "class": "badge badge-pill badge-primary", "content": 'Optional wenn: ' + item + ' OK' } )
# TODO
todo = content.info.get("TODO", False)
if todo and len(todo) > 0:
data["anleitung"] += "TODO"
data["anleitung"] += '<pre class="p-1 bg-warning">'
for t in todo:
data["anleitung"] += "* " + t + "\n"
data["anleitung"] += '</pre>'
# markierungen zusammenstellen
for chip in chips:
data["chips"] += '<div class="{class}">{content}</div>'.format(**chip)
data_dict[ key ] = content.toDict()
data_dict[ key ][ "anleitung" ] = anleitung
card = """
<div class="card m-3" >
<div class="card-header">
<span class="font-weight-bolder">{key}</span>
<span class="pl-3">{tip}</span>
<div class="float-right">{chips}</div>
</div>
<div class="card-body p-1">
{anleitung}
</div>
</div>
""".format( **data )
if key[0:2] == "JT":
html_jt += card
elif key[0:2] == "MT":
html_mt += card
else:
html_nn += card
if output_format == "json":
return data_dict
style = """
<style>
/* Anpassung pdf text */
.gqa_matrix h2 {
font-size: 1.1667em;
font-weight: bold;
line-height: 1.286em;
margin-top: 0.5em;
margin-bottom: 0.5em;
}
.gqa_matrix .card-body p::first-of-type {
background-color: #FFFFFFAA;
}
</style>
"""
html = '''
<div class="gqa_matrix">
<h1 class="m-0 p-1 text-white bg-secondary" >Angaben für: {month}/{year}</h1>
<content class="p-1 d-flex flex-row" >
<div class="w-50">{jt}</div>
<div class="w-50">{mt}</div>
<div class="">{nn}</div>
</content>
</div>
'''.format( jt=html_jt, mt=html_mt, nn=html_nn, **params )
return style + html
def prepare_tolerance(self, testid:str="", energy=None):
"""Prüft ob es in conig eine tolerance Angabe für die testid und die Energie gibt
Stellt wenn f nicht angegeben wurde eine Formel in f zusammen
Gibt es eine GQA.<testid>.info.tolerance.default Angabe, so wird diese als Grundlage für alle Energien verwendet
Zweig in config::
GQA.<testid>.info.tolerance.<energy>
{
name: {
f: formel mit {value}
value: wert
range: [min, max]
operator: [ eq, ne, lt, gt, le, ge]
}
}
Parameters
----------
testid : str, optional
id des zu verarbeitenden tolerance Bereichs
energy : str, optional
Augabe der Energie für die Info. The default is None.
Ohne Angabe wird nur der Parameter info zurückgegeben
Returns
-------
info : dict
Parameter info mit zusätzlichen Angaben für die Energie.
Beispiel::
"default": {
"warning" : { "f":"abs({value}) > 1.0", "unit": "%" },
"error" : { "f":"abs({value}) > 2.0", "unit": "%" },
"check" : { "field": "diff", "query":"ME == 100" }
},
"MU_20": {
"warning" : { "f":"abs({value}) > 1.0", "unit": "%" },
"error" : { "f":"abs({value}) > 2.5", "unit": "%" },
"check" : { "field": "diff", "query":"ME == 20" }
},
"""
info = self.config.get( ["GQA", testid, "info" ] )
default = info.tolerance.get( "default", False )
tolerance = info.tolerance.get( energy, False )
if not tolerance and not default:
return DotMap()
if not default:
default = DotMap()
if tolerance:
tolerance = dict_merge( default, tolerance)
else:
tolerance = default
#print("prepare_tolerance tolerance", tolerance )
import functools
# alle Angaben durchgehen
for name in tolerance:
if not isinstance( tolerance.get(name), dict ):
continue
for artName, art in tolerance.get(name).items():
# überspringen wenn art = soll oder f schon vorhanden
if artName == "soll" or art.get("f", None):
continue
# gibt es keine formel dann erstellen
# wurde ein wert angegeben
_value = art.get("value", None)
_range = art.get("range", None)
if _value:
#zuerst den operator festlegen
operator = art.get("operator", "gt")
# [ eq, ne, lt, gt, le, ge]
operator = functools.reduce(lambda a, b: a.replace(*b)
, [('eq','=='),('ne','!='),('lt', '<'),( 'gt', '>'),( 'le','<='),( 'ge', '>=')] #iterable of pairs: (oldval, newval)
, operator #The string from which to replace values
)
tolerance[name][artName]["f"] = "abs({}) {} {}".format( "{value}", operator, _value )
# wurde ein Bereich angegeben
elif art.get("range", None) and len(_range) >= 2:
tolerance[name][artName]["f"] = "{} <= {} >= {}".format( _range[0], "{value}", _range[1] )
return tolerance
# ---------------------- Test durchführung
def runTests(self, pid=None,
year:int=None, month:int=None, day:int=None,
testId:str=None, addWhere:str="", reloadDicom:bool=False, unittest:bool=False ):
# die results der jeweiligen pdf Datei
test_results = {}
# units und test bereitstellen
# units = self.config.units
unit = self.config.get( ["units", pid], False)
test = self.config.get( ["GQA", testId], False)
if test == False or unit == False:
return test_results
# tags und gqa ids bestimmen
tags = {}
for key, item in self.config.GQA.items():
if "tag" in item:
tags[ item["tag"] ] = key
testTag = test.tag
#testId = tags[ testTag ]
#unit = units[ pid ]
# getTestData sucht in der datenbank nach dem tag des tests
data = self.getTestData(
PatientId=pid,
AcquisitionYear=year,
AcquisitionMonth=month,
AcquisitionDay=day,
testTags=[ testTag ]
)
#print("runTests", testTag, data )
energyFields = self.config.get( ["GQA", testId, unit, 'energyFields'], {} )
for energy, info in data.items():
# nur die Energien die angegeben wurden
if not energy in energyFields:
continue
# payload erweitert in doTestType variables und wird für MQTT verwendet
payload = {
"testId" : testId,
"testTag" : testTag,
"AcquisitionYear" : year,
"AcquisitionMonth" : month,
"unit" : unit,
"energy" : energy,
"reloadDicom" : reloadDicom,
"unittest": unittest
}
# den test durchführen
pdfFilename, results = self.doTestType(
testId = testId,
data = info,
payload = payload
)
# results in pandas ablegen
if len(results) > 0:
self.pd_results.upsert( results["result"] )
# nur ablegen wenn ein pdf da ist
if not pdfFilename == "":
# results pro dateiname merken
test_results[ pdfFilename ] = results
# Pandas Ergebnisse speichern
self.pd_results.write()
# pdf Dateien zurückgeben
return test_results
def doTestType(self, testId:str, data=None, payload:dict={} ):
# ohne Daten nichts machen
if len(data) == 0:
# Anzeigen des problems?
return False
# metadaten als kopie bereitstellen und um payload erweitern
# damit gerät und energie für das pdf vorhanden ist
variables=self.variables.copy()
# variables um payload erweitern
variables.update( payload )
# variables um info Bereich des test erweitern
#variables.update( self.prepare_tolerance( variables['testId'], variables['energy'] ) )
# metadaten um die test Variante erweitern
variables["variante"] = payload["testTag"]
#variables["testTag"] = payload["testTag"]
#variables["testId"] = testId
# variables um configdaten des Tests erweitern diese werden in der test Klasse als metadata verwendet
# TODO: AcquisitionYear und AcquisitionMonth als year und month in current ablegen
variables["testConfig"] = self.config.get( ["GQA", testId ], DotMap() );
current = self.config.get( ["GQA", testId, "current" ], DotMap() )
variables["testConfig"]["current"] = dict_merge( current, DotMap({
"testTag": variables["variante"],
"testId": variables["testId"],
"unit": variables["unit"],
"energy": variables["energy"],
"year": variables["AcquisitionYear"],
"month": variables["AcquisitionMonth"],
"fields": self.config.get( ["GQA", testId, variables["unit"], "energyFields", variables["energy"] ], current.get( "fields" ,0) ),
# "tolerance": self.config.get( ["GQA", testId, "info", "tolerance", variables["energy"] ], current.get( "tolerance", {} ) )
"tolerance": self.prepare_tolerance( variables['testId'], variables['energy'] )
}) )
variables["testConfig"]["AcquisitionYear"] = variables["AcquisitionYear"]
variables["testConfig"]["AcquisitionMonth"] = variables["AcquisitionMonth"]
# print("doTestType", variables.testConfig.current.toDict() )
# die benötigten Daten vom server oder aus dem DICOM dir holen
# in self.dicomfiles liegen dann pro gerät die Infos als dict
# in self.data liegen dann pro SOPInstanceUID die eingelesenen DICOM daten
if not "index" in payload:
payload["index"] = 0
if "AcquisitionYear" in payload:
AcquisitionYear = payload["AcquisitionYear"]
else:
AcquisitionYear = ""
# Pfad für die Ergebnisse vorbereiten
self.initResultsPath( AcquisitionYear )
imageCount = len( data )
# Dicom Daten einlesen
i = 0
read_count = 0
dicomData = {}
df = pd.DataFrame( data.values() )
'''
['id', 'PatientId', 'RadiationId', 'RadiationSer', 'CourseId',
'PlanSetupId', 'SliceRTType', 'ImageId', 'SeriesId', 'SeriesNumber',
'CreationDate', 'studyID', 'filepath', 'filename', 'Kennung',
'SOPClassUID', 'acquisition', 'AcquisitionYear', 'AcquisitionMonth',
'day', 'Tag', 'unit', 'energy', 'doserate', 'MetersetExposure', 'ME',
'Technique', 'gantry', 'GantryRtnExt', 'GantryRtnDirection',
'StopAngle', 'collimator', 'CollMode', 'table', 'SID', 'MLCPlanType',
'IndexParameterType', 'X1', 'X2', 'Y1', 'Y2', 'SliceUID', 'SeriesUID',
'StudyUID', 'FrameOfReferenceUID', 'gating', 'testTags', 'subTags',
'varianten', 'AcquisitionDateTime', 'dicom', 'check_variante',
'check_subtag'],
'''
#print("doTestType", variante, df[ [ 'energy', "AcquisitionYear", "AcquisitionMonth", "varianten"] ] )
# progress starten
if hasattr( logger, "progressStart"):
logger.progressStart( testId, payload )
# Dicomdaten mit retrieve() holen entweder lokal oder vom server
for SOPInstanceUID in data:
i += 1
# 40% für die dicom daten 40% für die Auswertung 20 % für das pdf
if hasattr( logger, "progress"):
logger.progress( testId, 40 / imageCount * i )
# Dicomdaten holen, diese werden in self.dicomData ergänzt um AcquisitionYear abgelegt
result, signals = self.retrieve( {
"PatientID" : data[SOPInstanceUID]["PatientId"],
"SOPInstanceUID" : SOPInstanceUID,
"override" : variables["reloadDicom"],
"subPath" : str(AcquisitionYear)
})
for dcm in result:
data[ dcm.SOPInstanceUID ]["dicom"] = dcm
# FIXME: in allen testModulen zugriff auf dicom daten über data und nicht mehr über dicomData
# getFullData() sollte dann nicht mehr benötigt werden
dicomData[ dcm.SOPInstanceUID ] = dcm
read_count += 1
# dicom Verbindung falls sie geöffnet wurde schließen
self.closeAE()
if i > read_count: # pragma: no cover
# das einlesen der Dicomdaten war nicht möglich
if hasattr( logger, "progress"):
logger.progress( testId, 100 )
logger.warning( "doTestType: dicom retrieve Fehler: {} - {} - {} - {}".format(
SOPInstanceUID,
data[SOPInstanceUID]["testTags"],
data[SOPInstanceUID]["PatientId"],
data[SOPInstanceUID]["ImageId"]
) )
return "", { "result":"", "content":"" }
# ab hier immer 40%
if hasattr( logger, "progress"):
logger.progress( testId, 40 )
# DataFrame erzeugen
df = pd.DataFrame( data.values() )
# die aktuelle variante und den subtag aus varianten in check_variante und check_subtag erzeugen
df["check_variante"] = variables["testTag"]
df["check_subtag"] = df["varianten"].apply(lambda x: x.get( variables["testTag"] ))
logger.debug( "doTestTag: {}".format( variables["testTag"] ) )
#
# variables aufbereiten
#
# Art des tests MT|JT
infoTypeArt = testId[0:2]
# Dateiname aus config templates
variables["filename"] = self.config.get( ["templates", "PDF-{}-filename".format(infoTypeArt)], "noname.pdf" )
# wenn nicht angegeben Titel und Betreff aus config templates
for t in ["Titel", "Betreff"]:
if variables.get(t, "") == "":
variables[t] = self.config.get( ["templates", "PDF-{}-{}".format(infoTypeArt, t)], "" )
pdfData = {
"pdf_filepath" : ""
}
result = []
if testId=="JT-4_2_2_1-A":
check = checkMlc( self.config, variables, dicomData=dicomData )
pdfData, result = check.doJT_4_2_2_1_A( df )
elif testId=="JT-4_2_2_1-B":
check = checkMlc( self.config, variables, dicomData=dicomData )
pdfData, result = check.doJT_4_2_2_1_B( df )
elif testId=="JT-4_2_2_1-C":
check = checkMlc( self.config, variables, dicomData=dicomData )
pdfData, result = check.doJT_4_2_2_1_C( df )
elif testId=="JT-LeafSpeed":
check = checkMlc( self.config, variables, dicomData=dicomData )
pdfData, result = check.doJT_LeafSpeed( df )
elif testId=="JT-10_3_1":
check = checkMlc( self.config, variables, dicomData=dicomData )
pdfData, result = check.doJT_10_3_1( df )
elif testId=="JT-7_2":
check = checkField( self.config, variables, dicomData=dicomData )
pdfData, result = check.doJT_7_2( df )
elif testId=="JT-7_3":
check = checkField( self.config, variables, dicomData=dicomData )
pdfData, result = check.doJT_7_3( df )
elif testId=="JT-7_4":
check = checkField( self.config, variables, dicomData=dicomData )
pdfData, result = check.doJT_7_4( df )
elif testId=="JT-7_5":
check = checkField( self.config, variables, dicomData=dicomData )
pdfData, result = check.doJT_7_5( df )
elif testId=="JT-9_1_2":
check = checkField( self.config, variables, dicomData=dicomData )
pdfData, result = check.doJT_9_1_2( df )
elif testId=="JT-10_3":
check = checkField( self.config, variables, dicomData=dicomData )
pdfData, result = check.doJT_10_3( df )
elif testId=="MT-4_1_2":
check = checkField( self.config, variables, dicomData=dicomData )
pdfData, result = check.doMT_4_1_2( df )
elif testId=="MT-WL":
check = checkWL( self.config, variables, dicomData=dicomData )
pdfData, result = check.doMT_WL( df )
elif testId=="MT-8_02-1-2":
check = checkMlc( self.config, variables, dicomData=dicomData )
pdfData, result = check.doMT_8_02_1_2( df )
elif testId=="MT-8_02-3":
check = checkMlc( self.config, variables, dicomData=dicomData )
pdfData, result = check.doMT_8_02_3( df )
elif testId=="MT-8_02-4":
check = checkMlc( self.config, variables, dicomData=dicomData )
pdfData, result = check.doMT_8_02_4( df )
elif testId=="MT-8_02-5":
check = checkField( self.config, variables, dicomData=dicomData )
pdfData, result = check.doMT_8_02_5( df )
elif testId=="MT-LeafSpeed":
check = checkMlc( self.config, variables, dicomData=dicomData )
pdfData, result = check.doMT_LeafSpeed( df )
elif testId=="MT-VMAT-0_1":
check = checkField( self.config, variables, dicomData=dicomData )
pdfData, result = check.doMT_VMAT_0_1( df )
elif testId=="MT-VMAT-0_2":
check = checkMlc( self.config, variables, dicomData=dicomData )
pdfData, result = check.doMT_VMAT_0_2( df )
elif testId=="MT-VMAT-1_1":
check = checkMlc( self.config, variables, dicomData=dicomData )
pdfData, result = check.doMT_VMAT_1_1( df )
elif testId=="MT-VMAT-1_2":
check = checkMlc( self.config, variables, dicomData=dicomData )
pdfData, result = check.doMT_VMAT_1_2( df )
elif testId=="MT-VMAT-2":
check = checkVMAT( self.config, variables, dicomData=dicomData )
pdfData, result = check.doMT_VMAT_2( df )
elif testId=="MT-VMAT-3":
check = checkVMAT( self.config, variables, dicomData=dicomData )
pdfData, result = check.doMT_VMAT_3( df )
# ab hier ist progress immer 100%
if hasattr( logger, "progress"):
logger.progress( testId, 100 )
# progress beenden
if hasattr( logger, "progress"):
logger.progressReady( testId )
#print("doTestType", testId, result )
return pdfData["pdf_filepath"], { "result":result, "pdfData": pdfData }
| 13,532 | 0 | 83 |
3f1054735f9d61a24ad850ea7ddd8b84f561b898 | 1,451 | py | Python | evoware/scripts/resetfile.py | graik/evowarepy | 4ed2fbebb808b6be75479790ee23b19cb50b3039 | [
"Apache-2.0"
] | 2 | 2019-03-08T08:36:24.000Z | 2019-03-11T11:59:40.000Z | evoware/scripts/resetfile.py | graik/evowarepy | 4ed2fbebb808b6be75479790ee23b19cb50b3039 | [
"Apache-2.0"
] | 1 | 2015-03-15T13:03:44.000Z | 2015-03-15T13:03:44.000Z | evoware/scripts/resetfile.py | graik/evowarepy | 4ed2fbebb808b6be75479790ee23b19cb50b3039 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
## evoware/py -- python modules for Evoware scripting
## Copyright 2014 - 2019 Raik Gruenberg
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
"""Reset a (worklist) file to empty; or create an empty file."""
import sys, os
import evoware.fileutil as F
import evoware.dialogs as D
###########################
# MAIN
###########################
if __name__ == '__main__':
f = ''
try:
if len(sys.argv) < 2:
_use()
f = F.absfile(sys.argv[1])
h = open(f, 'w')
h.close()
except Exception as why:
D.lastException('Error resetting file %r' % f)
| 27.377358 | 77 | 0.610613 | #!/usr/bin/env python
## evoware/py -- python modules for Evoware scripting
## Copyright 2014 - 2019 Raik Gruenberg
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
"""Reset a (worklist) file to empty; or create an empty file."""
import sys, os
import evoware.fileutil as F
import evoware.dialogs as D
def _use():
print("""
resetfile.py -- reset file to empty (0 Byte length)
Syntax:
resetfile.py <file>
If <file> exists, it will be overridden by an empty file. If <file> does not
exist, it will be created.
""")
sys.exit(0)
###########################
# MAIN
###########################
if __name__ == '__main__':
f = ''
try:
if len(sys.argv) < 2:
_use()
f = F.absfile(sys.argv[1])
h = open(f, 'w')
h.close()
except Exception as why:
D.lastException('Error resetting file %r' % f)
| 231 | 0 | 23 |
56442242124bf91fdda8f794a68f6a9f91ccba36 | 1,240 | py | Python | LESO/test.py | thesethtruth/WB-thesis-model | f786d15fa446e8c089431853a82aa4d577e13f20 | [
"MIT"
] | null | null | null | LESO/test.py | thesethtruth/WB-thesis-model | f786d15fa446e8c089431853a82aa4d577e13f20 | [
"MIT"
] | null | null | null | LESO/test.py | thesethtruth/WB-thesis-model | f786d15fa446e8c089431853a82aa4d577e13f20 | [
"MIT"
] | null | null | null | # test.py
# a list of attributes a component should have for sure
basic = [
'merit_tag',
'styling',
'dof',
'lower',
'upper',
'lifetime',
'capex',
'opex',
'variable_cost',
'variable_income',
]
power_control = [
'positive',
'negative',
]
| 23.396226 | 76 | 0.552419 | # test.py
# a list of attributes a component should have for sure
basic = [
'merit_tag',
'styling',
'dof',
'lower',
'upper',
'lifetime',
'capex',
'opex',
'variable_cost',
'variable_income',
]
power_control = [
'positive',
'negative',
]
def attribute_test(component):
# checking basic attributes
attrs = list()
for attr in basic:
if not hasattr(component, attr):
attrs.append(attr)
# specifically for dispatchables
if hasattr(component, 'power_control'):
for attr in power_control:
if not hasattr(component, attr):
attrs.append(attr)
# financial variable check
if component.capex == 0:
if component.lifetime is not None:
raise ValueError(
f'Component {component.__str__()} cannot have CAPEX = 0'+
'whilest lifetime is not none. Either set lifetime to None'+
'or supply CAPEX != 0.'
)
# raise error
if attrs:
raise NotImplementedError(
f'This component ({component.__str__()}) lacks'+
f'these required attributes: {attrs}'
)
pass
| 930 | 0 | 23 |
1b6dec9d5c8deee6cd01b4c6307a5be4fdde7f1a | 13,138 | py | Python | MATLAB_path_generation/network_som.py | AlexRookie/neurocluster | 685e4c2930e7af68b09a5ae8ed7008936d6e49d4 | [
"BSD-2-Clause"
] | null | null | null | MATLAB_path_generation/network_som.py | AlexRookie/neurocluster | 685e4c2930e7af68b09a5ae8ed7008936d6e49d4 | [
"BSD-2-Clause"
] | null | null | null | MATLAB_path_generation/network_som.py | AlexRookie/neurocluster | 685e4c2930e7af68b09a5ae8ed7008936d6e49d4 | [
"BSD-2-Clause"
] | null | null | null | import tensorflow as tf
#from tensorflow import keras
#from tensorflow.keras import backend as K
import numpy as np
#import matplotlib.pyplot as plt
from time import sleep
#=======================================================================================#
class SOMLayer(tf.keras.layers.Layer):
"""
Self-Organizing Map layer class with rectangular topology
# Example
```
model.add(SOMLayer(map_size=(10,10)))
```
# Arguments
map_size: Tuple representing the size of the rectangular map. Number of prototypes is map_size[0]*map_size[1].
prototypes: Numpy array with shape `(n_prototypes, latent_dim)` witch represents the initial cluster centers
# Input shape
2D tensor with shape: `(n_samples, latent_dim)`
# Output shape
2D tensor with shape: `(n_samples, n_prototypes)`
"""
def call(self, inputs, **kwargs):
"""
Calculate pairwise squared euclidean distances between inputs and prototype vectors
Arguments:
inputs: the variable containing data, Tensor with shape `(n_samples, latent_dim)`
Return:
d: distances between inputs and prototypes, Tensor with shape `(n_samples, n_prototypes)`
"""
# Note: (tf.expand_dims(inputs, axis=1) - self.prototypes) has shape (n_samples, n_prototypes, latent_dim)
d = tf.reduce_sum(tf.square(tf.expand_dims(inputs, axis=1) - self.prototypes), axis=2)
return d
| 40.674923 | 160 | 0.590349 | import tensorflow as tf
#from tensorflow import keras
#from tensorflow.keras import backend as K
import numpy as np
#import matplotlib.pyplot as plt
from time import sleep
class Network:
def __init__(self):
self.model = []
self.map_size = None
self.units = None
self.classes = None
self.batch_size = None
self.epochs = None
self.l_rate = None
def define_model(self, map_size=(10,10), units=(1,10), classes=0):
self.map_size = np.asarray(map_size).astype(int)
self.units = np.asarray(units).astype(int)
self.classes = np.asarray(classes).astype(int)
inputs = tf.keras.layers.Input(shape=(self.units[0], self.units[1]), name='input') #X_train.shape[-1]
flatten = tf.keras.layers.Flatten(name='flatten')(inputs)
som_layer = SOMLayer(map_size=self.map_size, name='SOM')(flatten)
#self.model = tf.keras.models.Model(inputs=inputs, outputs=som_layer)
outputs = tf.keras.layers.Dense(units=self.classes, activation='softmax', name='classifier')(som_layer)
self.model = tf.keras.models.Model(inputs=inputs, outputs=[som_layer, outputs])
return self.model
def prepare_data(self, X, y=None, training_percentage=70, batch=32, randomize=True):
X = np.asarray(X)
y = np.asarray(y)
self.batch_size = int(batch)
# One hot encode
y = tf.keras.utils.to_categorical(y)
if randomize:
m = X.shape[1]
n = X.shape[2]
X = X.reshape(X.shape[0], m*n)
perm = np.random.permutation(X.shape[0])
X = X[perm, :]
y = y[perm, :]
X = X.reshape(X.shape[0], m, n)
# Normalize
#x = np.copy(X)
#for i in range(x.shape[0]):
# x[i, 0, :] = (x[i, 0, :] - np.min(x[i, 0, :])) / (np.max(x[i, 0, :]) - np.min(x[i, 0, :]))
# x[i, 1, :] = (x[i, 1, :] - np.min(x[i, 1, :])) / (np.max(x[i, 1, :]) - np.min(x[i, 1, :]))
# Shift
x = np.copy(X)
for i in range(x.shape[0]):
x[i, 0, :] = x[i, 0, :] - x[i, 0, 0]
x[i, 1, :] = x[i, 1, :] - x[i, 1, 0]
num_of_samples = x.shape[0]
train = int(training_percentage*num_of_samples/100)
valid = num_of_samples-train
if train < self.batch_size or valid < self.batch_size:
self.batch_size = 1
else:
# Samples must be multiplier of batch
train = int(train/self.batch_size) * self.batch_size
valid = num_of_samples-train
valid = int(valid/self.batch_size) * self.batch_size
x_train = x[0:train, :, :]
x_valid = x[train:train+valid, :, :]
y_train = y[0:train, :]
y_valid = y[train:train+valid, :]
self.x_train = np.array(x_train)
self.x_valid = np.array(x_valid)
self.y_train = np.array(y_train)
self.y_valid = np.array(y_valid)
return self.x_train, self.x_valid, self.y_train, self.y_valid
def kmeans_loss(self, y_pred, distances):
"""
Calculate k-means reconstruction loss
# Arguments
y_pred: cluster assignments, numpy.array with shape `(n_samples,)`
distances: pairwise squared euclidean distances between inputs and prototype vectors, numpy.array with shape `(n_samples, n_prototypes)`
# Return
k-means reconstruction loss
"""
return np.mean([distances[i, y_pred[i]] for i in range(len(y_pred))])
def map_dist(self, y_pred, map_size):
"""
Calculate pairwise Manhattan distances between cluster assignments and map prototypes (rectangular grid topology)
# Arguments
y_pred: cluster assignments, numpy.array with shape `(n_samples,)`
# Return
pairwise distance matrix (map_dist[i,k] is the distance on the map between assigned cell of data point i and cell k)
"""
n_prototypes = map_size[0]*map_size[1]
labels = np.arange(n_prototypes)
tmp = np.expand_dims(y_pred, axis=1)
d_row = np.abs(tmp-labels)//map_size[1]
d_col = np.abs(tmp%map_size[1]-labels%map_size[1])
return d_row + d_col
def neighborhood_function(self, d, T, neighborhood='gaussian'):
"""
SOM neighborhood function (gaussian neighborhood)
# Arguments
d: distance on the map
T: temperature parameter
"""
if neighborhood == 'gaussian':
return np.exp(-(d**2)/(T**2))
elif neighborhood == 'window':
return (d <= T).astype(np.float32)
def som_fit(self, x_train, y_train=None, decay='exponential'):
"""
Training procedure
# Arguments
X_train: training set
y_train: (optional) training labels
X_val: (optional) validation set
y_val: (optional) validation labels
epochs: number of training epochs
batch_size: training batch size
decay: type of temperature decay ('exponential' or 'linear')
"""
x_train = np.asarray(x_train)
y_train = np.asarray(y_train)
Tmax = 10 # initial temperature parameter
Tmin = 0.1 # final temperature parameter
som_epochs = self.epochs # Number of epochs where SOM neighborhood is decreased
eval_interval = 10 # Evaluate metrics on training/validation batch every eval_interval epochs
# Set and compute some initial values
index = 0
T = Tmax
for it in range(self.epochs):
# Get training and validation batches
#x_batch = np.expand_dims(x_train[index], axis=0)
#y_batch = np.expand_dims(y_train[index], axis=0)
if (index + 1) * self.batch_size >= x_train.shape[0]:
x_batch = x_train[index * self.batch_size::]
if y_train is not None:
y_batch = y_train[index * self.batch_size::]
index = 0
else:
x_batch = x_train[index * self.batch_size:(index + 1) * self.batch_size]
if y_train is not None:
y_batch = y_train[index * self.batch_size:(index + 1) * self.batch_size]
index += 1
# Compute cluster assignments for batches
d, _ = self.model.predict(x_batch)
y_pred = d.argmin(axis=1)
# Update temperature parameter
if it < som_epochs:
if decay == 'exponential':
T = Tmax*(Tmin/Tmax)**(it/(som_epochs-1))
elif decay == 'linear':
T = Tmax - (Tmax-Tmin)*(it/(som_epochs-1))
# Compute topographic weights batches
w_batch = self.neighborhood_function(self.map_dist(y_pred, self.map_size), T, neighborhood='gaussian')
# Train on batch
loss = self.model.train_on_batch(x_batch, [w_batch, y_batch]) # loss: ['loss', 'SOM_loss', 'classifier_loss', 'SOM_accuracy', 'classifier_accuracy']
if it % eval_interval == 0:
# Evaluate losses and metrics
Lsom = loss[1]
Lkm = self.kmeans_loss(y_pred, d)
Ltop = loss[1] - self.kmeans_loss(y_pred, d)
#quantization_err = quantization_error(d)
#topographic_err = topographic_error(d, map_size)
print('iteration {} - T={}'.format(it, T))
print('[Train] - Lsom={:f} (Lkm={:f}/Ltop={:f})'.format(Lsom, Lkm, Ltop))
#print('[Train] - Quantization err={:f} / Topographic err={:f}'.format(quantization_err, topographic_err))
sleep(0.2)
return self.model
def som_loss(self, weights, distances):
"""
Calculate SOM reconstruction loss
# Arguments
weights: weights for the weighted sum, Tensor with shape `(n_samples, n_prototypes)`
distances: pairwise squared euclidean distances between inputs and prototype vectors, Tensor with shape `(n_samples, n_prototypes)`
# Return
SOM reconstruction loss
"""
return tf.reduce_mean(tf.reduce_sum(weights*distances, axis=1))
def rmse(self, y_true, y_pred):
# Root mean squared error (rmse) for regression
return tf.keras.backend.sqrt(tf.keras.backend.mean(tf.keras.backend.square(y_pred - y_true)))
def train_model(self, x_train, y_train=None, epochs=50, learn_rate=0.01):
x_train = np.asarray(x_train)
y_train = np.asarray(y_train).astype(np.int8)
self.epochs = int(epochs)
self.l_rate = learn_rate
# Compile model
#self.model.compile(optimizer='adam', loss=self.som_loss) #, metrics=[self.rmse])
self.model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=self.l_rate),
loss='categorical_crossentropy',
metrics=['accuracy']) #keras.optimizers.Adam(1e-3)
# Train model
self.model = self.som_fit(x_train, y_train)
return self.model
def predict(self, inputs):
inputs = np.asarray(inputs)
if len(inputs.shape)==1:
inputs = np.expand_dims(inputs, axis=0)
# Predict
som_pred, pred = self.model(inputs) # == model.predict([inputs])
#pred = np.argmin(pred)
#y_classes = pred.argmax(axis=-1)
return som_pred, pred
def save(self, file):
self.model.save_weights(file+'_weights.h5')
self.model.save(file+'.h5')
def load_model(self, file):
self.model.load(file+'.h5')
return self.model
def load_weights(self, file):
self.model.load_weights(file+'_weights.h5')
return self.model
#def plot(self):
# # Plot
# som_weights = self.model.get_layer(name='SOM').get_weights()[0]
#
# fig1, axes = plt.subplots(nrows=self.map_size[0], ncols=self.map_size[1], figsize=(10, 10))
# for k in range(self.map_size[0] * self.map_size[1]):
# axes[k // self.map_size[1]][k % self.map_size[1]].imshow(som_weights[k].reshape(2, self.units), cmap='gray')
# axes[k // self.map_size[1]][k % self.map_size[1]].axis('off')
# plt.subplots_adjust(hspace=0.05, wspace=0.05)
#
# plt.draw() # non-blocking plot
# plt.pause(0.1)
#=======================================================================================#
class SOMLayer(tf.keras.layers.Layer):
"""
Self-Organizing Map layer class with rectangular topology
# Example
```
model.add(SOMLayer(map_size=(10,10)))
```
# Arguments
map_size: Tuple representing the size of the rectangular map. Number of prototypes is map_size[0]*map_size[1].
prototypes: Numpy array with shape `(n_prototypes, latent_dim)` witch represents the initial cluster centers
# Input shape
2D tensor with shape: `(n_samples, latent_dim)`
# Output shape
2D tensor with shape: `(n_samples, n_prototypes)`
"""
def __init__(self, map_size, prototypes=None, **kwargs):
if 'input_shape' not in kwargs and 'latent_dim' in kwargs:
kwargs['input_shape'] = (kwargs.pop('latent_dim'),)
super(SOMLayer, self).__init__(**kwargs)
self.map_size = map_size
self.n_prototypes = map_size[0]*map_size[1]
self.initial_prototypes = prototypes
self.input_spec = tf.keras.layers.InputSpec(ndim=2)
self.prototypes = None
self.built = False
def build(self, input_shape):
assert(len(input_shape) == 2)
input_dim = input_shape[1]
self.input_spec = tf.keras.layers.InputSpec(dtype=tf.float32, shape=(None, input_dim))
self.prototypes = self.add_weight(shape=(self.n_prototypes, input_dim), initializer='glorot_uniform', name='prototypes')
if self.initial_prototypes is not None:
self.set_weights(self.initial_prototypes)
del self.initial_prototypes
self.built = True
def call(self, inputs, **kwargs):
"""
Calculate pairwise squared euclidean distances between inputs and prototype vectors
Arguments:
inputs: the variable containing data, Tensor with shape `(n_samples, latent_dim)`
Return:
d: distances between inputs and prototypes, Tensor with shape `(n_samples, n_prototypes)`
"""
# Note: (tf.expand_dims(inputs, axis=1) - self.prototypes) has shape (n_samples, n_prototypes, latent_dim)
d = tf.reduce_sum(tf.square(tf.expand_dims(inputs, axis=1) - self.prototypes), axis=2)
return d
def compute_output_shape(self, input_shape):
assert(input_shape and len(input_shape) == 2)
return input_shape[0], self.n_prototypes
def get_config(self):
config = {'map_size': self.map_size}
base_config = super(SOMLayer, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
| 5,413 | 6,120 | 131 |
b5e67a019e3fa2d2d081a184ac33d2894fe68755 | 345 | py | Python | 034_Aumentos_multiplos.py | fabioeomedeiros/Python-Base | ef9c1c66b3221f71d1c8dcaf4c2f86503712e9f1 | [
"MIT"
] | null | null | null | 034_Aumentos_multiplos.py | fabioeomedeiros/Python-Base | ef9c1c66b3221f71d1c8dcaf4c2f86503712e9f1 | [
"MIT"
] | null | null | null | 034_Aumentos_multiplos.py | fabioeomedeiros/Python-Base | ef9c1c66b3221f71d1c8dcaf4c2f86503712e9f1 | [
"MIT"
] | null | null | null | # 034_Aumentos_multiplos.py
print()
salario = float(input("Salário atual: R$"))
print()
# if (salario <= 1250):
# salario *= 1.15 # -> salario = salario * 1.15
# else:
# salario *= 1.10 # -> salario = salario * 1.10
salario = salario * 1.15 if salario<= 1250 else salario * 1.10
print(f"Seu novo salário será: {salario:.2f}")
print()
| 23 | 62 | 0.628986 | # 034_Aumentos_multiplos.py
print()
salario = float(input("Salário atual: R$"))
print()
# if (salario <= 1250):
# salario *= 1.15 # -> salario = salario * 1.15
# else:
# salario *= 1.10 # -> salario = salario * 1.10
salario = salario * 1.15 if salario<= 1250 else salario * 1.10
print(f"Seu novo salário será: {salario:.2f}")
print()
| 0 | 0 | 0 |
c9e87550b730c5f553198eebd8acac9773639a86 | 898 | py | Python | tests/ctrl_c_test.py | twosigma/uberjob | e8afcadf8ca0cca8583b4d24ca13d000ccbf52c6 | [
"Apache-2.0"
] | 11 | 2020-09-27T13:15:29.000Z | 2022-03-08T17:13:07.000Z | tests/ctrl_c_test.py | twosigma/uberjob | e8afcadf8ca0cca8583b4d24ca13d000ccbf52c6 | [
"Apache-2.0"
] | 8 | 2020-11-13T21:52:11.000Z | 2022-02-05T20:59:51.000Z | tests/ctrl_c_test.py | twosigma/uberjob | e8afcadf8ca0cca8583b4d24ca13d000ccbf52c6 | [
"Apache-2.0"
] | 2 | 2021-05-18T01:22:50.000Z | 2021-06-16T17:43:19.000Z | #
# Copyright 2020 Two Sigma Open Source, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""This program should exit within a second or two if CTRL+C is pressed (SIGINT)."""
import time
import uberjob
if __name__ == "__main__":
main()
| 29.933333 | 84 | 0.727171 | #
# Copyright 2020 Two Sigma Open Source, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""This program should exit within a second or two if CTRL+C is pressed (SIGINT)."""
import time
import uberjob
def main():
plan = uberjob.Plan()
items = [plan.call(time.sleep, 1) for _ in range(1000)]
uberjob.run(plan, output=items, max_workers=4)
if __name__ == "__main__":
main()
| 127 | 0 | 23 |
fa103c52f70424ba4b8057f67aac8aa5fac1d2df | 782 | py | Python | contests_atcoder/abc175/abc175_e_np.py | takelifetime/competitive-programming | e7cf8ef923ccefad39a1727ca94c610d650fcb76 | [
"BSD-2-Clause"
] | null | null | null | contests_atcoder/abc175/abc175_e_np.py | takelifetime/competitive-programming | e7cf8ef923ccefad39a1727ca94c610d650fcb76 | [
"BSD-2-Clause"
] | 1 | 2021-01-02T06:36:51.000Z | 2021-01-02T06:36:51.000Z | contests_atcoder/abc175/abc175_e_np.py | takelifetime/competitive-programming | e7cf8ef923ccefad39a1727ca94c610d650fcb76 | [
"BSD-2-Clause"
] | null | null | null | from collections import defaultdict
import numpy as np
r, c, k = map(int, input().split())
items = np.zeros((r, c), np.int32)
for _ in range(k):
y, x, v = map(int, input().split())
items[y - 1, x - 1] = v
dp = np.zeros((r, c, 4), np.int64)
for y in range(r):
for x in range(c):
dp[y, x, 0] = max(dp[y - 1, x]) if y != 0 else 0
dp[y, x, 1] = max(dp[y - 1, x]) + items[y, x] if y != 0 else 0
dp[y, x, 0] = max(dp[y, x, 0], dp[y, x - 1, 0]) if x != 0 else dp[y, x, 0]
if x != 0:
for k in range(3):
dp[y, x, k + 1] = max(dp[y, x - 1, k] + items[y, x], dp[y, x - 1, k + 1], dp[y, x, k + 1])
else:
dp[y, x, 1] = dp[y, x, 0] + items[y, x]
#print(*dp, sep="\n")
print(max(dp[-1, -1])) | 30.076923 | 106 | 0.446292 | from collections import defaultdict
import numpy as np
r, c, k = map(int, input().split())
items = np.zeros((r, c), np.int32)
for _ in range(k):
y, x, v = map(int, input().split())
items[y - 1, x - 1] = v
dp = np.zeros((r, c, 4), np.int64)
for y in range(r):
for x in range(c):
dp[y, x, 0] = max(dp[y - 1, x]) if y != 0 else 0
dp[y, x, 1] = max(dp[y - 1, x]) + items[y, x] if y != 0 else 0
dp[y, x, 0] = max(dp[y, x, 0], dp[y, x - 1, 0]) if x != 0 else dp[y, x, 0]
if x != 0:
for k in range(3):
dp[y, x, k + 1] = max(dp[y, x - 1, k] + items[y, x], dp[y, x - 1, k + 1], dp[y, x, k + 1])
else:
dp[y, x, 1] = dp[y, x, 0] + items[y, x]
#print(*dp, sep="\n")
print(max(dp[-1, -1])) | 0 | 0 | 0 |
ed08999c58b13ae8b6317ac1085c46f6e2599ce6 | 1,536 | py | Python | examples/direct_shape_optimization/hesaffBaum.py | rdguez-mariano/affnet | a3f0bb32d9001d1daf024f38d29867f37816ea78 | [
"MIT"
] | 211 | 2017-11-21T11:42:49.000Z | 2022-03-17T12:42:54.000Z | examples/direct_shape_optimization/hesaffBaum.py | rdguez-mariano/affnet | a3f0bb32d9001d1daf024f38d29867f37816ea78 | [
"MIT"
] | 31 | 2017-12-31T18:47:49.000Z | 2020-05-26T14:43:53.000Z | examples/direct_shape_optimization/hesaffBaum.py | rdguez-mariano/affnet | a3f0bb32d9001d1daf024f38d29867f37816ea78 | [
"MIT"
] | 47 | 2017-12-04T01:03:23.000Z | 2022-03-18T11:50:28.000Z | #!/usr/bin/python2 -utt
# -*- coding: utf-8 -*-
import torch
import torch.nn as nn
import numpy as np
import sys
import os
import time
from PIL import Image
from torch.autograd import Variable
import torch.backends.cudnn as cudnn
import torch.optim as optim
from tqdm import tqdm
import math
import torch.nn.functional as F
from copy import deepcopy
from SparseImgRepresenter import ScaleSpaceAffinePatchExtractor
from LAF import denormalizeLAFs, LAFs2ellT, abc2A
from Utils import line_prepender
from architectures import AffNetFast
from HandCraftedModules import AffineShapeEstimator
USE_CUDA = False
try:
input_img_fname = sys.argv[1]
output_fname = sys.argv[2]
nfeats = int(sys.argv[3])
except:
print "Wrong input format. Try python hesaffBaum.py imgs/cat.png cat.txt 2000"
sys.exit(1)
img = Image.open(input_img_fname).convert('RGB')
img = np.mean(np.array(img), axis = 2)
var_image = torch.autograd.Variable(torch.from_numpy(img.astype(np.float32)), volatile = True)
var_image_reshape = var_image.view(1, 1, var_image.size(0),var_image.size(1))
HA = ScaleSpaceAffinePatchExtractor( mrSize = 5.192, num_features = nfeats, border = 5, num_Baum_iters = 16, AffNet = AffineShapeEstimator(patch_size=19))
if USE_CUDA:
HA = HA.cuda()
var_image_reshape = var_image_reshape.cuda()
LAFs, resp = HA(var_image_reshape)
ells = LAFs2ellT(LAFs.cpu()).cpu().numpy()
np.savetxt(output_fname, ells, delimiter=' ', fmt='%10.10f')
line_prepender(output_fname, str(len(ells)))
line_prepender(output_fname, '1.0')
| 30.117647 | 154 | 0.763672 | #!/usr/bin/python2 -utt
# -*- coding: utf-8 -*-
import torch
import torch.nn as nn
import numpy as np
import sys
import os
import time
from PIL import Image
from torch.autograd import Variable
import torch.backends.cudnn as cudnn
import torch.optim as optim
from tqdm import tqdm
import math
import torch.nn.functional as F
from copy import deepcopy
from SparseImgRepresenter import ScaleSpaceAffinePatchExtractor
from LAF import denormalizeLAFs, LAFs2ellT, abc2A
from Utils import line_prepender
from architectures import AffNetFast
from HandCraftedModules import AffineShapeEstimator
USE_CUDA = False
try:
input_img_fname = sys.argv[1]
output_fname = sys.argv[2]
nfeats = int(sys.argv[3])
except:
print "Wrong input format. Try python hesaffBaum.py imgs/cat.png cat.txt 2000"
sys.exit(1)
img = Image.open(input_img_fname).convert('RGB')
img = np.mean(np.array(img), axis = 2)
var_image = torch.autograd.Variable(torch.from_numpy(img.astype(np.float32)), volatile = True)
var_image_reshape = var_image.view(1, 1, var_image.size(0),var_image.size(1))
HA = ScaleSpaceAffinePatchExtractor( mrSize = 5.192, num_features = nfeats, border = 5, num_Baum_iters = 16, AffNet = AffineShapeEstimator(patch_size=19))
if USE_CUDA:
HA = HA.cuda()
var_image_reshape = var_image_reshape.cuda()
LAFs, resp = HA(var_image_reshape)
ells = LAFs2ellT(LAFs.cpu()).cpu().numpy()
np.savetxt(output_fname, ells, delimiter=' ', fmt='%10.10f')
line_prepender(output_fname, str(len(ells)))
line_prepender(output_fname, '1.0')
| 0 | 0 | 0 |
d816df9d1e5c02c753f0fea53b0d3108eb531074 | 765 | py | Python | packages/pyre/schemata/EnvVar.py | rtburns-jpl/pyre | ffc4fc1b2936e355f709d084eb4055954960b3a2 | [
"BSD-3-Clause"
] | null | null | null | packages/pyre/schemata/EnvVar.py | rtburns-jpl/pyre | ffc4fc1b2936e355f709d084eb4055954960b3a2 | [
"BSD-3-Clause"
] | 1 | 2021-06-10T23:42:13.000Z | 2021-06-10T23:42:13.000Z | packages/pyre/schemata/EnvVar.py | jlmaurer/pyre | 6af38a83621d7d6228d147b4bb94f97fbb10f6e2 | [
"BSD-3-Clause"
] | 2 | 2020-08-31T18:07:52.000Z | 2021-12-10T08:54:39.000Z | # -*- coding: utf-8 -*-
#
# michael a.g. aïvázis
# orthologue
# (c) 1998-2020 all rights reserved
#
# externals
import os
# superclass
from .String import String
# declaration
class EnvVar(String):
"""
A type declarator for strings whose default values are associated with an environment variable
"""
# constants
typename = 'envvar' # the name of my type
# meta-methods
# end of file
| 20.131579 | 98 | 0.639216 | # -*- coding: utf-8 -*-
#
# michael a.g. aïvázis
# orthologue
# (c) 1998-2020 all rights reserved
#
# externals
import os
# superclass
from .String import String
# declaration
class EnvVar(String):
"""
A type declarator for strings whose default values are associated with an environment variable
"""
# constants
typename = 'envvar' # the name of my type
# meta-methods
def __init__(self, variable, **kwds):
# compute the default value by attempting to read the value from the environment
default = os.environ.get(variable, str())
# chain up
super().__init__(default=default, **kwds)
# save the variable name
self.envvar = variable
# all done
return
# end of file
| 322 | 0 | 26 |
75b598f5df23bfbeb73e495c28f7ea6456fc6405 | 1,367 | py | Python | src/tap/stream/load.py | lmmx/tap | d60ce0ec9b4b0545b5ed2b3dfeea956457999b8d | [
"MIT"
] | null | null | null | src/tap/stream/load.py | lmmx/tap | d60ce0ec9b4b0545b5ed2b3dfeea956457999b8d | [
"MIT"
] | 19 | 2021-02-15T23:18:35.000Z | 2021-11-04T11:32:40.000Z | src/tap/stream/load.py | lmmx/tap | d60ce0ec9b4b0545b5ed2b3dfeea956457999b8d | [
"MIT"
] | null | null | null | from ..share.cal import parse_abs_from_rel_date
from .streams import Stream
__all__ = ["load_stream"]
def load_stream(
programme="Today",
station="r4",
broadcaster="bbc",
ymd=None,
ymd_ago=None,
**stream_opts,
):
"""
Create a `Stream` for a specific episode of a radio programme from the named
arguments and pass `stream_opts` through.
`ymd` and `ymd_ago` are options to specify either an absolute
or relative date as `(year, month, day)` tuple of 3 integers in both cases.
`ymd` defaults to today's date and `ymd_ago` defaults to `(0,0,0)`.
`stream_opts` include:
- `transcribe=False` to determine whether the `Stream.transcribe`
method is called upon initialisation
- `reload=False` to control whether to reload the stream from disk
- `min_s=5.`/`max_s=50.` to control the min./max. audio segment length.
If `reload` is True, do not pull/preprocess/transcribe: the transcripts are expected
to already exist on disk, so just load them from there and recreate the `Stream`.
"""
if broadcaster != "bbc":
raise NotImplementedError("Only currently supporting BBC stations")
date = parse_abs_from_rel_date(ymd=ymd, ymd_ago=ymd_ago)
ymd = (date.year, date.month, date.day)
stream = Stream(programme, station, broadcaster, ymd, **stream_opts)
return stream
| 35.973684 | 88 | 0.697147 | from ..share.cal import parse_abs_from_rel_date
from .streams import Stream
__all__ = ["load_stream"]
def load_stream(
programme="Today",
station="r4",
broadcaster="bbc",
ymd=None,
ymd_ago=None,
**stream_opts,
):
"""
Create a `Stream` for a specific episode of a radio programme from the named
arguments and pass `stream_opts` through.
`ymd` and `ymd_ago` are options to specify either an absolute
or relative date as `(year, month, day)` tuple of 3 integers in both cases.
`ymd` defaults to today's date and `ymd_ago` defaults to `(0,0,0)`.
`stream_opts` include:
- `transcribe=False` to determine whether the `Stream.transcribe`
method is called upon initialisation
- `reload=False` to control whether to reload the stream from disk
- `min_s=5.`/`max_s=50.` to control the min./max. audio segment length.
If `reload` is True, do not pull/preprocess/transcribe: the transcripts are expected
to already exist on disk, so just load them from there and recreate the `Stream`.
"""
if broadcaster != "bbc":
raise NotImplementedError("Only currently supporting BBC stations")
date = parse_abs_from_rel_date(ymd=ymd, ymd_ago=ymd_ago)
ymd = (date.year, date.month, date.day)
stream = Stream(programme, station, broadcaster, ymd, **stream_opts)
return stream
| 0 | 0 | 0 |
1c350cff265b3d8016a43c446d64999c2d32b3e3 | 6,479 | py | Python | tests/test_packages/test_skills/test_generic_seller/test_dialogues.py | valory-xyz/agents-aea | 8f38efa96041b0156ed1ae328178e395dbabf2fc | [
"Apache-2.0"
] | 28 | 2021-10-31T18:54:14.000Z | 2022-03-17T13:10:43.000Z | tests/test_packages/test_skills/test_generic_seller/test_dialogues.py | valory-xyz/agents-aea | 8f38efa96041b0156ed1ae328178e395dbabf2fc | [
"Apache-2.0"
] | 66 | 2021-10-31T11:55:48.000Z | 2022-03-31T06:26:23.000Z | tests/test_packages/test_skills/test_generic_seller/test_dialogues.py | valory-xyz/agents-aea | 8f38efa96041b0156ed1ae328178e395dbabf2fc | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
#
# Copyright 2022 Valory AG
# Copyright 2018-2021 Fetch.AI Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ------------------------------------------------------------------------------
"""This module contains the tests of the dialogue classes of the generic seller skill."""
from pathlib import Path
from typing import cast
import pytest
from aea.exceptions import AEAEnforceError
from aea.helpers.transaction.base import Terms
from aea.protocols.dialogue.base import DialogueLabel
from aea.test_tools.test_skill import BaseSkillTestCase, COUNTERPARTY_AGENT_ADDRESS
from packages.fetchai.protocols.default.message import DefaultMessage
from packages.fetchai.protocols.fipa.message import FipaMessage
from packages.fetchai.protocols.ledger_api.message import LedgerApiMessage
from packages.fetchai.protocols.oef_search.message import OefSearchMessage
from packages.fetchai.skills.generic_seller.dialogues import (
DefaultDialogue,
DefaultDialogues,
FipaDialogue,
FipaDialogues,
LedgerApiDialogue,
LedgerApiDialogues,
OefSearchDialogue,
OefSearchDialogues,
)
from tests.conftest import ROOT_DIR
class TestDialogues(BaseSkillTestCase):
"""Test dialogue classes of generic seller."""
path_to_skill = Path(ROOT_DIR, "packages", "fetchai", "skills", "generic_seller")
@classmethod
def setup(cls):
"""Setup the test class."""
super().setup()
cls.default_dialogues = cast(
DefaultDialogues, cls._skill.skill_context.default_dialogues
)
cls.fipa_dialogues = cast(
FipaDialogues, cls._skill.skill_context.fipa_dialogues
)
cls.ledger_api_dialogues = cast(
LedgerApiDialogues, cls._skill.skill_context.ledger_api_dialogues
)
cls.oef_search_dialogues = cast(
OefSearchDialogues, cls._skill.skill_context.oef_search_dialogues
)
def test_default_dialogues(self):
"""Test the DefaultDialogues class."""
_, dialogue = self.default_dialogues.create(
counterparty=COUNTERPARTY_AGENT_ADDRESS,
performative=DefaultMessage.Performative.BYTES,
content=b"some_content",
)
assert dialogue.role == DefaultDialogue.Role.AGENT
assert dialogue.self_address == self.skill.skill_context.agent_address
def test_fipa_dialogue(self):
"""Test the FipaDialogue class."""
fipa_dialogue = FipaDialogue(
DialogueLabel(
("", ""),
COUNTERPARTY_AGENT_ADDRESS,
self.skill.skill_context.agent_address,
),
self.skill.skill_context.agent_address,
role=DefaultDialogue.Role.AGENT,
)
# terms
with pytest.raises(AEAEnforceError, match="Terms not set!"):
assert fipa_dialogue.terms
terms = Terms(
"some_ledger_id",
self.skill.skill_context.agent_address,
"counterprty",
{"currency_id": 50},
{"good_id": -10},
"some_nonce",
)
fipa_dialogue.terms = terms
with pytest.raises(AEAEnforceError, match="Terms already set!"):
fipa_dialogue.terms = terms
assert fipa_dialogue.terms == terms
def test_fipa_dialogues(self):
"""Test the FipaDialogues class."""
_, dialogue = self.fipa_dialogues.create(
counterparty=COUNTERPARTY_AGENT_ADDRESS,
performative=FipaMessage.Performative.CFP,
query="some_query",
)
assert dialogue.role == FipaDialogue.Role.SELLER
assert dialogue.self_address == self.skill.skill_context.agent_address
def test_ledger_api_dialogue(self):
"""Test the LedgerApiDialogue class."""
ledger_api_dialogue = LedgerApiDialogue(
DialogueLabel(
("", ""),
COUNTERPARTY_AGENT_ADDRESS,
self.skill.skill_context.agent_address,
),
self.skill.skill_context.agent_address,
role=LedgerApiDialogue.Role.AGENT,
)
# associated_fipa_dialogue
with pytest.raises(AEAEnforceError, match="FipaDialogue not set!"):
assert ledger_api_dialogue.associated_fipa_dialogue
fipa_dialogue = FipaDialogue(
DialogueLabel(
("", ""),
COUNTERPARTY_AGENT_ADDRESS,
self.skill.skill_context.agent_address,
),
self.skill.skill_context.agent_address,
role=FipaDialogue.Role.BUYER,
)
ledger_api_dialogue.associated_fipa_dialogue = fipa_dialogue
with pytest.raises(AEAEnforceError, match="FipaDialogue already set!"):
ledger_api_dialogue.associated_fipa_dialogue = fipa_dialogue
assert ledger_api_dialogue.associated_fipa_dialogue == fipa_dialogue
def test_ledger_api_dialogues(self):
"""Test the LedgerApiDialogues class."""
_, dialogue = self.ledger_api_dialogues.create(
counterparty=COUNTERPARTY_AGENT_ADDRESS,
performative=LedgerApiMessage.Performative.GET_BALANCE,
ledger_id="some_ledger_id",
address="some_address",
)
assert dialogue.role == LedgerApiDialogue.Role.AGENT
assert dialogue.self_address == str(self.skill.skill_context.skill_id)
def test_oef_search_dialogues(self):
"""Test the OefSearchDialogues class."""
_, dialogue = self.oef_search_dialogues.create(
counterparty=COUNTERPARTY_AGENT_ADDRESS,
performative=OefSearchMessage.Performative.SEARCH_SERVICES,
query="some_query",
)
assert dialogue.role == OefSearchDialogue.Role.AGENT
assert dialogue.self_address == str(self.skill.skill_context.skill_id)
| 38.337278 | 89 | 0.656428 | # -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
#
# Copyright 2022 Valory AG
# Copyright 2018-2021 Fetch.AI Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ------------------------------------------------------------------------------
"""This module contains the tests of the dialogue classes of the generic seller skill."""
from pathlib import Path
from typing import cast
import pytest
from aea.exceptions import AEAEnforceError
from aea.helpers.transaction.base import Terms
from aea.protocols.dialogue.base import DialogueLabel
from aea.test_tools.test_skill import BaseSkillTestCase, COUNTERPARTY_AGENT_ADDRESS
from packages.fetchai.protocols.default.message import DefaultMessage
from packages.fetchai.protocols.fipa.message import FipaMessage
from packages.fetchai.protocols.ledger_api.message import LedgerApiMessage
from packages.fetchai.protocols.oef_search.message import OefSearchMessage
from packages.fetchai.skills.generic_seller.dialogues import (
DefaultDialogue,
DefaultDialogues,
FipaDialogue,
FipaDialogues,
LedgerApiDialogue,
LedgerApiDialogues,
OefSearchDialogue,
OefSearchDialogues,
)
from tests.conftest import ROOT_DIR
class TestDialogues(BaseSkillTestCase):
"""Test dialogue classes of generic seller."""
path_to_skill = Path(ROOT_DIR, "packages", "fetchai", "skills", "generic_seller")
@classmethod
def setup(cls):
"""Setup the test class."""
super().setup()
cls.default_dialogues = cast(
DefaultDialogues, cls._skill.skill_context.default_dialogues
)
cls.fipa_dialogues = cast(
FipaDialogues, cls._skill.skill_context.fipa_dialogues
)
cls.ledger_api_dialogues = cast(
LedgerApiDialogues, cls._skill.skill_context.ledger_api_dialogues
)
cls.oef_search_dialogues = cast(
OefSearchDialogues, cls._skill.skill_context.oef_search_dialogues
)
def test_default_dialogues(self):
"""Test the DefaultDialogues class."""
_, dialogue = self.default_dialogues.create(
counterparty=COUNTERPARTY_AGENT_ADDRESS,
performative=DefaultMessage.Performative.BYTES,
content=b"some_content",
)
assert dialogue.role == DefaultDialogue.Role.AGENT
assert dialogue.self_address == self.skill.skill_context.agent_address
def test_fipa_dialogue(self):
"""Test the FipaDialogue class."""
fipa_dialogue = FipaDialogue(
DialogueLabel(
("", ""),
COUNTERPARTY_AGENT_ADDRESS,
self.skill.skill_context.agent_address,
),
self.skill.skill_context.agent_address,
role=DefaultDialogue.Role.AGENT,
)
# terms
with pytest.raises(AEAEnforceError, match="Terms not set!"):
assert fipa_dialogue.terms
terms = Terms(
"some_ledger_id",
self.skill.skill_context.agent_address,
"counterprty",
{"currency_id": 50},
{"good_id": -10},
"some_nonce",
)
fipa_dialogue.terms = terms
with pytest.raises(AEAEnforceError, match="Terms already set!"):
fipa_dialogue.terms = terms
assert fipa_dialogue.terms == terms
def test_fipa_dialogues(self):
"""Test the FipaDialogues class."""
_, dialogue = self.fipa_dialogues.create(
counterparty=COUNTERPARTY_AGENT_ADDRESS,
performative=FipaMessage.Performative.CFP,
query="some_query",
)
assert dialogue.role == FipaDialogue.Role.SELLER
assert dialogue.self_address == self.skill.skill_context.agent_address
def test_ledger_api_dialogue(self):
"""Test the LedgerApiDialogue class."""
ledger_api_dialogue = LedgerApiDialogue(
DialogueLabel(
("", ""),
COUNTERPARTY_AGENT_ADDRESS,
self.skill.skill_context.agent_address,
),
self.skill.skill_context.agent_address,
role=LedgerApiDialogue.Role.AGENT,
)
# associated_fipa_dialogue
with pytest.raises(AEAEnforceError, match="FipaDialogue not set!"):
assert ledger_api_dialogue.associated_fipa_dialogue
fipa_dialogue = FipaDialogue(
DialogueLabel(
("", ""),
COUNTERPARTY_AGENT_ADDRESS,
self.skill.skill_context.agent_address,
),
self.skill.skill_context.agent_address,
role=FipaDialogue.Role.BUYER,
)
ledger_api_dialogue.associated_fipa_dialogue = fipa_dialogue
with pytest.raises(AEAEnforceError, match="FipaDialogue already set!"):
ledger_api_dialogue.associated_fipa_dialogue = fipa_dialogue
assert ledger_api_dialogue.associated_fipa_dialogue == fipa_dialogue
def test_ledger_api_dialogues(self):
"""Test the LedgerApiDialogues class."""
_, dialogue = self.ledger_api_dialogues.create(
counterparty=COUNTERPARTY_AGENT_ADDRESS,
performative=LedgerApiMessage.Performative.GET_BALANCE,
ledger_id="some_ledger_id",
address="some_address",
)
assert dialogue.role == LedgerApiDialogue.Role.AGENT
assert dialogue.self_address == str(self.skill.skill_context.skill_id)
def test_oef_search_dialogues(self):
"""Test the OefSearchDialogues class."""
_, dialogue = self.oef_search_dialogues.create(
counterparty=COUNTERPARTY_AGENT_ADDRESS,
performative=OefSearchMessage.Performative.SEARCH_SERVICES,
query="some_query",
)
assert dialogue.role == OefSearchDialogue.Role.AGENT
assert dialogue.self_address == str(self.skill.skill_context.skill_id)
| 0 | 0 | 0 |
dc48bed3180a4802fc80473edc5b04249f1db382 | 1,295 | py | Python | data/visualizer/viz.py | PACE-challenge/Cluster-Editing-PACE-2021-instances- | 67f5df368c400f15fa2527ea1eb48ac1bd6a2d7a | [
"CC0-1.0"
] | 1 | 2021-08-23T14:47:55.000Z | 2021-08-23T14:47:55.000Z | data/visualizer/viz.py | PACE-challenge/Cluster-Editing-PACE-2021-instances- | 67f5df368c400f15fa2527ea1eb48ac1bd6a2d7a | [
"CC0-1.0"
] | null | null | null | data/visualizer/viz.py | PACE-challenge/Cluster-Editing-PACE-2021-instances- | 67f5df368c400f15fa2527ea1eb48ac1bd6a2d7a | [
"CC0-1.0"
] | 1 | 2022-02-28T20:42:10.000Z | 2022-02-28T20:42:10.000Z | import numpy as np
import matplotlib.pyplot as plt
import sys
S = read_instance(sys.argv[1])
plt.hist(S.flatten(), bins=np.linspace(0, 1, 200))
plt.title("Histogram of similarity values")
plt.xlabel("Similarity")
plt.ylabel("Frequency")
plt.savefig(sys.argv[1]+"_viz2.pdf", dpi=400)
plt.close()
n = len(S)
x = np.arange(n)
S[(x,x)] = 0.5
S = S - 0.5
m = np.quantile(np.abs(S), 0.99)
S = S / m / 2
S = S + 0.5
S[(x,x)] = 1
#print(S)
plt.imshow(S, vmin=0, vmax=1, cmap='RdBu_r')
plt.colorbar()
plt.title("Similarity matrix")
plt.savefig(sys.argv[1]+"_viz1.pdf", dpi=400)
plt.close()
| 23.981481 | 59 | 0.559073 | import numpy as np
import matplotlib.pyplot as plt
import sys
def read_instance(filename):
if filename.endswith(".csv"):
return np.loadtxt(filename, delimiter=', ')
assert(filename.endswith(".gr"))
# adjacency matrix is the similaity matrix in this case
S = None
f = open(filename)
p_read = False
for line in f:
words = line.split()
if len(words) == 0 or words[0] == 'c':
continue
if words[0] == 'p':
if p_read:
continue
p_read = True
n = int(words[2])
S = np.zeros((n,n))
else:
assert(len(words) == 2)
i, j = int(words[0])-1, int(words[1])-1
S[i,j] = 1
S[j,i] = 1
return S
S = read_instance(sys.argv[1])
plt.hist(S.flatten(), bins=np.linspace(0, 1, 200))
plt.title("Histogram of similarity values")
plt.xlabel("Similarity")
plt.ylabel("Frequency")
plt.savefig(sys.argv[1]+"_viz2.pdf", dpi=400)
plt.close()
n = len(S)
x = np.arange(n)
S[(x,x)] = 0.5
S = S - 0.5
m = np.quantile(np.abs(S), 0.99)
S = S / m / 2
S = S + 0.5
S[(x,x)] = 1
#print(S)
plt.imshow(S, vmin=0, vmax=1, cmap='RdBu_r')
plt.colorbar()
plt.title("Similarity matrix")
plt.savefig(sys.argv[1]+"_viz1.pdf", dpi=400)
plt.close()
| 686 | 0 | 23 |
193ffbb9d91b75344c0042d5ef6cd59b2ea7f4af | 4,850 | py | Python | test/base/tool_shed_util.py | vimalkumarvelayudhan/galaxy | ea89dd8f149778b6c2f0f3f4a34c8b21f7033af7 | [
"CC-BY-3.0"
] | null | null | null | test/base/tool_shed_util.py | vimalkumarvelayudhan/galaxy | ea89dd8f149778b6c2f0f3f4a34c8b21f7033af7 | [
"CC-BY-3.0"
] | 1 | 2015-02-21T18:48:19.000Z | 2015-02-27T15:50:32.000Z | test/base/tool_shed_util.py | vimalkumarvelayudhan/galaxy | ea89dd8f149778b6c2f0f3f4a34c8b21f7033af7 | [
"CC-BY-3.0"
] | 3 | 2015-02-22T13:34:16.000Z | 2020-10-01T01:28:04.000Z | import logging
import os
import sys
cwd = os.getcwd()
if cwd not in sys.path:
sys.path.append( cwd )
new_path = [ os.path.join( cwd, "lib" ) ]
if new_path not in sys.path:
new_path.extend( sys.path )
sys.path = new_path
from galaxy.util import parse_xml
log = logging.getLogger(__name__)
# Set a 10 minute timeout for repository installation.
repository_installation_timeout = 600
def get_installed_repository_info( elem, last_galaxy_test_file_dir, last_tested_repository_name, last_tested_changeset_revision, tool_path ):
"""
Return the GALAXY_TEST_FILE_DIR, the containing repository name and the
change set revision for the tool elem. This only happens when testing
tools installed from the tool shed.
"""
tool_config_path = elem.get( 'file' )
installed_tool_path_items = tool_config_path.split( '/repos/' )
sans_shed = installed_tool_path_items[ 1 ]
path_items = sans_shed.split( '/' )
repository_owner = path_items[ 0 ]
repository_name = path_items[ 1 ]
changeset_revision = path_items[ 2 ]
if repository_name != last_tested_repository_name or changeset_revision != last_tested_changeset_revision:
# Locate the test-data directory.
installed_tool_path = os.path.join( installed_tool_path_items[ 0 ], 'repos', repository_owner, repository_name, changeset_revision )
for root, dirs, files in os.walk( os.path.join(tool_path, installed_tool_path )):
if '.' in dirs:
dirs.remove( '.hg' )
if 'test-data' in dirs:
return os.path.join( root, 'test-data' ), repository_name, changeset_revision
return None, repository_name, changeset_revision
return last_galaxy_test_file_dir, last_tested_repository_name, last_tested_changeset_revision
def parse_tool_panel_config( config, shed_tools_dict ):
"""
Parse a shed-related tool panel config to generate the shed_tools_dict. This only happens when testing tools installed from the tool shed.
"""
last_galaxy_test_file_dir = None
last_tested_repository_name = None
last_tested_changeset_revision = None
tool_path = None
has_test_data = False
tree = parse_xml( config )
root = tree.getroot()
tool_path = root.get('tool_path')
for elem in root:
if elem.tag == 'tool':
galaxy_test_file_dir, \
last_tested_repository_name, \
last_tested_changeset_revision = get_installed_repository_info( elem,
last_galaxy_test_file_dir,
last_tested_repository_name,
last_tested_changeset_revision,
tool_path )
if galaxy_test_file_dir:
if not has_test_data:
has_test_data = True
if galaxy_test_file_dir != last_galaxy_test_file_dir:
if not os.path.isabs( galaxy_test_file_dir ):
galaxy_test_file_dir = os.path.join( os.getcwd(), galaxy_test_file_dir )
guid = elem.get( 'guid' )
shed_tools_dict[ guid ] = galaxy_test_file_dir
last_galaxy_test_file_dir = galaxy_test_file_dir
elif elem.tag == 'section':
for section_elem in elem:
if section_elem.tag == 'tool':
galaxy_test_file_dir, \
last_tested_repository_name, \
last_tested_changeset_revision = get_installed_repository_info( section_elem,
last_galaxy_test_file_dir,
last_tested_repository_name,
last_tested_changeset_revision,
tool_path )
if galaxy_test_file_dir:
if not has_test_data:
has_test_data = True
if galaxy_test_file_dir != last_galaxy_test_file_dir:
if not os.path.isabs( galaxy_test_file_dir ):
galaxy_test_file_dir = os.path.join( os.getcwd(), galaxy_test_file_dir )
guid = section_elem.get( 'guid' )
shed_tools_dict[ guid ] = galaxy_test_file_dir
last_galaxy_test_file_dir = galaxy_test_file_dir
return has_test_data, shed_tools_dict
| 50 | 142 | 0.573402 | import logging
import os
import sys
cwd = os.getcwd()
if cwd not in sys.path:
sys.path.append( cwd )
new_path = [ os.path.join( cwd, "lib" ) ]
if new_path not in sys.path:
new_path.extend( sys.path )
sys.path = new_path
from galaxy.util import parse_xml
log = logging.getLogger(__name__)
# Set a 10 minute timeout for repository installation.
repository_installation_timeout = 600
def get_installed_repository_info( elem, last_galaxy_test_file_dir, last_tested_repository_name, last_tested_changeset_revision, tool_path ):
"""
Return the GALAXY_TEST_FILE_DIR, the containing repository name and the
change set revision for the tool elem. This only happens when testing
tools installed from the tool shed.
"""
tool_config_path = elem.get( 'file' )
installed_tool_path_items = tool_config_path.split( '/repos/' )
sans_shed = installed_tool_path_items[ 1 ]
path_items = sans_shed.split( '/' )
repository_owner = path_items[ 0 ]
repository_name = path_items[ 1 ]
changeset_revision = path_items[ 2 ]
if repository_name != last_tested_repository_name or changeset_revision != last_tested_changeset_revision:
# Locate the test-data directory.
installed_tool_path = os.path.join( installed_tool_path_items[ 0 ], 'repos', repository_owner, repository_name, changeset_revision )
for root, dirs, files in os.walk( os.path.join(tool_path, installed_tool_path )):
if '.' in dirs:
dirs.remove( '.hg' )
if 'test-data' in dirs:
return os.path.join( root, 'test-data' ), repository_name, changeset_revision
return None, repository_name, changeset_revision
return last_galaxy_test_file_dir, last_tested_repository_name, last_tested_changeset_revision
def parse_tool_panel_config( config, shed_tools_dict ):
"""
Parse a shed-related tool panel config to generate the shed_tools_dict. This only happens when testing tools installed from the tool shed.
"""
last_galaxy_test_file_dir = None
last_tested_repository_name = None
last_tested_changeset_revision = None
tool_path = None
has_test_data = False
tree = parse_xml( config )
root = tree.getroot()
tool_path = root.get('tool_path')
for elem in root:
if elem.tag == 'tool':
galaxy_test_file_dir, \
last_tested_repository_name, \
last_tested_changeset_revision = get_installed_repository_info( elem,
last_galaxy_test_file_dir,
last_tested_repository_name,
last_tested_changeset_revision,
tool_path )
if galaxy_test_file_dir:
if not has_test_data:
has_test_data = True
if galaxy_test_file_dir != last_galaxy_test_file_dir:
if not os.path.isabs( galaxy_test_file_dir ):
galaxy_test_file_dir = os.path.join( os.getcwd(), galaxy_test_file_dir )
guid = elem.get( 'guid' )
shed_tools_dict[ guid ] = galaxy_test_file_dir
last_galaxy_test_file_dir = galaxy_test_file_dir
elif elem.tag == 'section':
for section_elem in elem:
if section_elem.tag == 'tool':
galaxy_test_file_dir, \
last_tested_repository_name, \
last_tested_changeset_revision = get_installed_repository_info( section_elem,
last_galaxy_test_file_dir,
last_tested_repository_name,
last_tested_changeset_revision,
tool_path )
if galaxy_test_file_dir:
if not has_test_data:
has_test_data = True
if galaxy_test_file_dir != last_galaxy_test_file_dir:
if not os.path.isabs( galaxy_test_file_dir ):
galaxy_test_file_dir = os.path.join( os.getcwd(), galaxy_test_file_dir )
guid = section_elem.get( 'guid' )
shed_tools_dict[ guid ] = galaxy_test_file_dir
last_galaxy_test_file_dir = galaxy_test_file_dir
return has_test_data, shed_tools_dict
| 0 | 0 | 0 |
de4918caee09ec92475c1349ee971e4eb711dfbe | 273 | py | Python | tudo/ex011.py | Ramon-Erik/Exercicios-Python | 158a7f1846dd3d486aa0517fa337d46d73aab649 | [
"MIT"
] | 1 | 2021-07-08T00:35:57.000Z | 2021-07-08T00:35:57.000Z | tudo/ex011.py | Ramon-Erik/Exercicios-Python | 158a7f1846dd3d486aa0517fa337d46d73aab649 | [
"MIT"
] | null | null | null | tudo/ex011.py | Ramon-Erik/Exercicios-Python | 158a7f1846dd3d486aa0517fa337d46d73aab649 | [
"MIT"
] | null | null | null | largura = float(input('Qual a largura da parede em metros? '))
altura = float(input('Qual a altura da parede em metros? '))
area = largura * altura
print(f'A área dessa parede é: {area}m². ')
tinta = area / 2
print(f'Será usado {tinta}L de tinta para cada metro quadrado.') | 45.5 | 64 | 0.70696 | largura = float(input('Qual a largura da parede em metros? '))
altura = float(input('Qual a altura da parede em metros? '))
area = largura * altura
print(f'A área dessa parede é: {area}m². ')
tinta = area / 2
print(f'Será usado {tinta}L de tinta para cada metro quadrado.') | 0 | 0 | 0 |
cf39502934ab9e24950e06af4ca528a1d9074261 | 535 | py | Python | app/user/models/profile.py | NicolefAvella/ApiMovie | 4860b312f62dee73de6015c3029e75a6045f79a1 | [
"MIT"
] | null | null | null | app/user/models/profile.py | NicolefAvella/ApiMovie | 4860b312f62dee73de6015c3029e75a6045f79a1 | [
"MIT"
] | null | null | null | app/user/models/profile.py | NicolefAvella/ApiMovie | 4860b312f62dee73de6015c3029e75a6045f79a1 | [
"MIT"
] | null | null | null | from django.db import models
class Profile(models.Model):
"""Profile model."""
user = models.OneToOneField('user.User', on_delete=models.CASCADE)
picture = models.ImageField(
'profile picture',
upload_to='user/pictures/',
blank=True,
null=True
)
biography = models.TextField(max_length=500, blank=True)
movies_create = models.PositiveIntegerField(default=0)
movies_recomment = models.PositiveIntegerField(default=0)
| 24.318182 | 70 | 0.674766 | from django.db import models
class Profile(models.Model):
"""Profile model."""
user = models.OneToOneField('user.User', on_delete=models.CASCADE)
picture = models.ImageField(
'profile picture',
upload_to='user/pictures/',
blank=True,
null=True
)
biography = models.TextField(max_length=500, blank=True)
movies_create = models.PositiveIntegerField(default=0)
movies_recomment = models.PositiveIntegerField(default=0)
def __str__(self):
return str(self.user)
| 27 | 0 | 27 |
adc436be93d84dc86cd6f82501d3b84b9e1f875f | 628 | py | Python | ABC159D/resolve.py | staguchi0703/prob_boot_camp_medium | d6f31b3e50230877efb2ebfef40f90ef6468bfc7 | [
"MIT"
] | null | null | null | ABC159D/resolve.py | staguchi0703/prob_boot_camp_medium | d6f31b3e50230877efb2ebfef40f90ef6468bfc7 | [
"MIT"
] | null | null | null | ABC159D/resolve.py | staguchi0703/prob_boot_camp_medium | d6f31b3e50230877efb2ebfef40f90ef6468bfc7 | [
"MIT"
] | null | null | null | def resolve():
'''
code here
求めるものは
k番目のボールを除いた N−1個のボールから、書かれている整数が等しいような異なる2つのボールを選び出す方法
言い換えて
①同じ数から2個選ぶ組み合わせの和
②k番目のボールを除いた N−1個のボールから、K番目のボールと同じ数を選ぶ数
※選ぶボールとペアになっていた個数を数え上げて引く
①-②
'''
import collections
N = int(input())
A_list = [int(item) for item in input().split()]
origin_dict = collections.Counter(A_list)
twopair_in_N = 0
for i in origin_dict.values():
twopair_in_N += i*(i-1)//2
for j in range(N):
deff = origin_dict[A_list[j]] -1
print(twopair_in_N - deff)
if __name__ == "__main__":
resolve()
| 19.030303 | 59 | 0.613057 | def resolve():
'''
code here
求めるものは
k番目のボールを除いた N−1個のボールから、書かれている整数が等しいような異なる2つのボールを選び出す方法
言い換えて
①同じ数から2個選ぶ組み合わせの和
②k番目のボールを除いた N−1個のボールから、K番目のボールと同じ数を選ぶ数
※選ぶボールとペアになっていた個数を数え上げて引く
①-②
'''
import collections
N = int(input())
A_list = [int(item) for item in input().split()]
origin_dict = collections.Counter(A_list)
twopair_in_N = 0
for i in origin_dict.values():
twopair_in_N += i*(i-1)//2
for j in range(N):
deff = origin_dict[A_list[j]] -1
print(twopair_in_N - deff)
if __name__ == "__main__":
resolve()
| 0 | 0 | 0 |
61c62aec87289839c0bbe7e36af25de69071130f | 6,699 | py | Python | pysyncrosim/environment.py | syncrosim/pysyncrosim | 20c0005674b7dbcef19c233dfa7db95e0d9d451e | [
"MIT"
] | null | null | null | pysyncrosim/environment.py | syncrosim/pysyncrosim | 20c0005674b7dbcef19c233dfa7db95e0d9d451e | [
"MIT"
] | 6 | 2021-12-27T16:42:19.000Z | 2022-01-13T21:10:28.000Z | pysyncrosim/environment.py | syncrosim/pysyncrosim | 20c0005674b7dbcef19c233dfa7db95e0d9d451e | [
"MIT"
] | null | null | null | import os
import pandas as pd
def runtime_input_folder(scenario, datasheet_name):
"""
Creates a SyncroSim Datasheet input folder.
Parameters
----------
scenario : Scenario
Scenario class instance.
datasheet_name : String
Name of SyncroSim Datasheet.
Returns
-------
String
Path to input folder.
"""
_validate_environment()
parent_folder = _environment.input_directory.item()
return _create_scenario_folder(scenario, parent_folder, datasheet_name)
def runtime_output_folder(scenario, datasheet_name):
"""
Creates a SyncroSim Datasheet output folder.
Parameters
----------
scenario : Scenario
Scenario class instance.
datasheet_name : String
Name of SyncroSim Datasheet.
Returns
-------
String
Path to ouput folder.
"""
_validate_environment()
parent_folder = _environment.output_directory.item()
return _create_scenario_folder(scenario, parent_folder, datasheet_name)
def runtime_temp_folder(folder_name):
"""
Creates a SyncroSim Datasheet temporary folder.
Parameters
----------
folder_name : String
Name of temporary folder.
Returns
-------
String
Path to temporary folder.
"""
_validate_environment()
return _create_temp_folder(folder_name)
def progress_bar(report_type="step", iteration=None, timestep=None,
total_steps=None, message=None):
"""
Begins, steps, ends, and reports progress for a SyncroSim simulation.
Parameters
----------
report_type : String, optional
Directive to "begin", "end", "report", "message", or "step" the
simulation. The default is "step".
iteration : Int, optional
Number of iterations. The default is None.
timestep : Int, optional
Number of timesteps. The default is None.
total_steps : Int, optional
Number of total steps in the simulation. The default is None.
message : String, optional
A message to print to the progress bar status. The default is None.
Raises
------
TypeError
If iteration, timestep, or total_steps are not Integers.
ValueError
If report_type is not "begin", "end", "step", "report", or "message".
Returns
-------
None.
"""
_validate_environment()
# Begin progress bar tracking
if report_type == "begin":
try:
assert total_steps % 1 == 0
total_steps = int(total_steps)
print("ssim-task-start=%d\r\n" % total_steps, flush=True)
except AssertionError or TypeError:
raise TypeError("total_steps must be an Integer")
# End progress bar tracking
elif report_type == "end":
print("ssim-task-end=True\r\n", flush=True)
# Step progress bar
elif report_type == "step":
print("ssim-task-step=1\r\n", flush=True)
# Report iteration and timestep
elif report_type == "report":
try:
assert iteration % 1 == 0
assert timestep % 1 == 0
print(
f"ssim-task-status=Simulating -> Iteration is {iteration}" +
" - Timestep is {timestep}\r\n",
flush=True)
except AssertionError or TypeError:
raise TypeError("iteration and timestep must be Integers")
# Print arbitrary message
elif report_type == "message":
print(
"ssim-task-status=" + str(message) + "\r\n",
flush=True)
else:
raise ValueError("Invalid report_type")
def update_run_log(*message, sep=""):
"""
Begins, steps, ends, and reports progress for a SyncroSim simulation.
Parameters
----------
*message : String
Message to write to the run log. Can be provided as multiple arguments
that will be concatenated together using sep.
sep : String, optional
String to use if concatenating multiple message arguments. The default
is an empty String.
Raises
------
ValueError
If no message is provided.
Returns
-------
None.
"""
_validate_environment()
# Check that a message is provided
if len(message) == 0:
raise ValueError("Please include a message to send to the run log.")
# Initialize the message
final_message = "ssim-task-log=" + str(message[0])
# Concatenate additional message pieces
if len(message) > 1:
for m in message[1:]:
final_message = final_message + str(sep) + str(m)
# Finalize message
final_message = final_message + "\r\n"
print(final_message, flush=True)
| 29.253275 | 78 | 0.609494 | import os
import pandas as pd
def runtime_input_folder(scenario, datasheet_name):
"""
Creates a SyncroSim Datasheet input folder.
Parameters
----------
scenario : Scenario
Scenario class instance.
datasheet_name : String
Name of SyncroSim Datasheet.
Returns
-------
String
Path to input folder.
"""
_validate_environment()
parent_folder = _environment.input_directory.item()
return _create_scenario_folder(scenario, parent_folder, datasheet_name)
def runtime_output_folder(scenario, datasheet_name):
"""
Creates a SyncroSim Datasheet output folder.
Parameters
----------
scenario : Scenario
Scenario class instance.
datasheet_name : String
Name of SyncroSim Datasheet.
Returns
-------
String
Path to ouput folder.
"""
_validate_environment()
parent_folder = _environment.output_directory.item()
return _create_scenario_folder(scenario, parent_folder, datasheet_name)
def runtime_temp_folder(folder_name):
"""
Creates a SyncroSim Datasheet temporary folder.
Parameters
----------
folder_name : String
Name of temporary folder.
Returns
-------
String
Path to temporary folder.
"""
_validate_environment()
return _create_temp_folder(folder_name)
def progress_bar(report_type="step", iteration=None, timestep=None,
total_steps=None, message=None):
"""
Begins, steps, ends, and reports progress for a SyncroSim simulation.
Parameters
----------
report_type : String, optional
Directive to "begin", "end", "report", "message", or "step" the
simulation. The default is "step".
iteration : Int, optional
Number of iterations. The default is None.
timestep : Int, optional
Number of timesteps. The default is None.
total_steps : Int, optional
Number of total steps in the simulation. The default is None.
message : String, optional
A message to print to the progress bar status. The default is None.
Raises
------
TypeError
If iteration, timestep, or total_steps are not Integers.
ValueError
If report_type is not "begin", "end", "step", "report", or "message".
Returns
-------
None.
"""
_validate_environment()
# Begin progress bar tracking
if report_type == "begin":
try:
assert total_steps % 1 == 0
total_steps = int(total_steps)
print("ssim-task-start=%d\r\n" % total_steps, flush=True)
except AssertionError or TypeError:
raise TypeError("total_steps must be an Integer")
# End progress bar tracking
elif report_type == "end":
print("ssim-task-end=True\r\n", flush=True)
# Step progress bar
elif report_type == "step":
print("ssim-task-step=1\r\n", flush=True)
# Report iteration and timestep
elif report_type == "report":
try:
assert iteration % 1 == 0
assert timestep % 1 == 0
print(
f"ssim-task-status=Simulating -> Iteration is {iteration}" +
" - Timestep is {timestep}\r\n",
flush=True)
except AssertionError or TypeError:
raise TypeError("iteration and timestep must be Integers")
# Print arbitrary message
elif report_type == "message":
print(
"ssim-task-status=" + str(message) + "\r\n",
flush=True)
else:
raise ValueError("Invalid report_type")
def update_run_log(*message, sep=""):
"""
Begins, steps, ends, and reports progress for a SyncroSim simulation.
Parameters
----------
*message : String
Message to write to the run log. Can be provided as multiple arguments
that will be concatenated together using sep.
sep : String, optional
String to use if concatenating multiple message arguments. The default
is an empty String.
Raises
------
ValueError
If no message is provided.
Returns
-------
None.
"""
_validate_environment()
# Check that a message is provided
if len(message) == 0:
raise ValueError("Please include a message to send to the run log.")
# Initialize the message
final_message = "ssim-task-log=" + str(message[0])
# Concatenate additional message pieces
if len(message) > 1:
for m in message[1:]:
final_message = final_message + str(sep) + str(m)
# Finalize message
final_message = final_message + "\r\n"
print(final_message, flush=True)
def _environment():
env_df = pd.DataFrame(
{"package_directory": [os.getenv("SSIM_PACKAGE_DIRECTORY")],
"program_directory": [os.getenv("SSIM_PROGRAM_DIRECTORY")],
"library_filepath": [os.getenv("SSIM_LIBRARY_FILEPATH")],
"project_id": [int(os.getenv("SSIM_PROJECT_ID", default=-1))],
"scenario_id": [int(os.getenv("SSIM_SCENARIO_ID", default=-1))],
"input_directory": [os.getenv("SSIM_INPUT_DIRECTORY")],
"output_directory": [os.getenv("SSIM_OUTPUT_DIRECTORY")],
"temp_directory": [os.getenv("SSIM_TEMP_DIRECTORY")],
"transfer_directory": [os.getenv("SSIM_TRANSFER_DIRECTORY")],
"before_iteration": [
int(os.getenv("SSIM_STOCHASTIC_TIME_BEFORE_ITERATION",
default=-1))],
"after_iteration": [
int(os.getenv("SSIM_STOCHASTIC_TIME_AFTER_ITERATION",
default=-1))],
"before_timestep": [
int(os.getenv("SSIM_STOCHASTIC_TIME_BEFORE_TIMESTEP",
default=-1))],
"after_timestep": [
int(os.getenv("SSIM_STOCHASTIC_TIME_AFTER_TIMESTEP",
default=-1))]})
return env_df
def _validate_environment():
e = _environment()
if e.program_directory.item() is None:
raise RuntimeError("This function requires a SyncroSim environment")
def _create_scenario_folder(scenario, parent_folder, datasheet_name):
sidpart = "Scenario-" + str(scenario.sid)
# p = re.sub("\\", "/", parent_folder)
f = os.path.join(parent_folder, sidpart, datasheet_name)
if not os.path.isdir(f):
os.mkdir(f)
return f
def _create_temp_folder(folder_name):
t = _environment().temp_directory.item()
# p = re.sub("\\\\", "/", t)
f = os.path.join(t, folder_name)
if not os.path.isdir(f):
os.mkdir(f)
return f
| 1,853 | 0 | 104 |
5d04a1726570f54dbd238fae0bb455d7ae1425df | 12,970 | py | Python | tools/extStringCppParser.py | Mu-L/palanteer | ead425a0cf05dfe6ce178c0a72f19bfc4ff743b1 | [
"MIT"
] | null | null | null | tools/extStringCppParser.py | Mu-L/palanteer | ead425a0cf05dfe6ce178c0a72f19bfc4ff743b1 | [
"MIT"
] | null | null | null | tools/extStringCppParser.py | Mu-L/palanteer | ead425a0cf05dfe6ce178c0a72f19bfc4ff743b1 | [
"MIT"
] | null | null | null | #! /usr/bin/env python3
# The MIT License (MIT)
#
# Copyright(c) 2021, Damien Feneyrou <dfeneyrou@gmail.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files(the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions :
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# Palanteer: Generate an external strings lookup from C++ code
# and/or from a binary (for the automatic instrumentation case)
#
# What it does for source code filenames:
# - loop on all the provided C/C++ files
# - identify Palanteer calls and corresponding parameters + file basenames
# - compute and display the tuple <key> <string> on stdout
#
# What it does for a provided binary:
# - calls "nm" on the Linux elf binary
# - collect all symbols from the text section
# - format and display the tuples <key> <string> on stdout (2 per function: filename and functio
import sys
if sys.version_info.major < 3:
print("ERROR: This tool requires python3 (not python2)", file=sys.stderr)
sys.exit(1)
import os
import os.path
import re
import subprocess
# Constants
# =========
# Regexp to detect if a word which starts with pl[g] (so that it looks like a command) followed with a parenthesis
MATCH_DETECT = re.compile(".*?(^|[^a-zA-Z\d])pl(g?)([a-zA-Z]*)\s*\((.*)")
# Regexp to extract the symbols from the text section, and also the weak ones
MATCH_INFO_LINE = re.compile("^([0-9a-z]+)\s+[TW]\s(.*?)\s(\S+):(\d+)$", re.IGNORECASE)
# Commands whose parameters shall be processed. Associated values are: 0=convert only strings 1=convert all parameters
PL_COMMANDS_TYPE = {
"Assert": 1,
"Begin": 0,
"End": 0,
"Data": 0,
"Text": 0,
"DeclareThread": 0,
"LockNotify": 0,
"LockNotifyDyn": 0,
"LockWait": 0,
"LockWaitDyn": 0,
"LockState": 0,
"LockStateDyn": 0,
"LockScopeState": 0,
"LockScopeStateDyn": 0,
"MakeString": 0,
"Marker": 0,
"MarkerDyn": 0,
"MemPush": 0,
"RegisterCli": 1,
"Scope": 0,
"Text": 0,
"Var": 1,
}
# Helpers
# =======
# Main entry
# ==========
# Bootstrap
if __name__ == "__main__":
main(sys.argv)
# Unit test
# =========
# ./extStringCppParser.py extStringCppParser.py shall give "good" followed by a sequential numbers, and no "BAD"
"""
plBegin aa("BAD0");
plBegin("good01");
plBegin ("good02");
plBegin("good03", "good04");
plgBegin(BAD1, "good05");
plBegin("good06",
"good07") "BAD2";
plBegin("good08", // "BAD3"
"good09"); // "BAD4"
plVar(good10, good11);
plgVar (BAD5, good12,
good13);
plgVar (BAD6, good14, // BAD7
good15);
plAssert(good16);
plgAssert(BAD8, good17,
good18);
plAssert(good19(a,b()), good20("content("), good21);
not at start of the line plMakeString("good22"),plMakeString ("good23" ) , plMakeString ( "good24")
plBegin("good25 <<< last one"); // Easy one at the end so it is easy to detect non sequential "goods"
"""
| 34.221636 | 131 | 0.537471 | #! /usr/bin/env python3
# The MIT License (MIT)
#
# Copyright(c) 2021, Damien Feneyrou <dfeneyrou@gmail.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files(the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions :
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# Palanteer: Generate an external strings lookup from C++ code
# and/or from a binary (for the automatic instrumentation case)
#
# What it does for source code filenames:
# - loop on all the provided C/C++ files
# - identify Palanteer calls and corresponding parameters + file basenames
# - compute and display the tuple <key> <string> on stdout
#
# What it does for a provided binary:
# - calls "nm" on the Linux elf binary
# - collect all symbols from the text section
# - format and display the tuples <key> <string> on stdout (2 per function: filename and functio
import sys
if sys.version_info.major < 3:
print("ERROR: This tool requires python3 (not python2)", file=sys.stderr)
sys.exit(1)
import os
import os.path
import re
import subprocess
# Constants
# =========
# Regexp to detect if a word which starts with pl[g] (so that it looks like a command) followed with a parenthesis
MATCH_DETECT = re.compile(".*?(^|[^a-zA-Z\d])pl(g?)([a-zA-Z]*)\s*\((.*)")
# Regexp to extract the symbols from the text section, and also the weak ones
MATCH_INFO_LINE = re.compile("^([0-9a-z]+)\s+[TW]\s(.*?)\s(\S+):(\d+)$", re.IGNORECASE)
# Commands whose parameters shall be processed. Associated values are: 0=convert only strings 1=convert all parameters
PL_COMMANDS_TYPE = {
"Assert": 1,
"Begin": 0,
"End": 0,
"Data": 0,
"Text": 0,
"DeclareThread": 0,
"LockNotify": 0,
"LockNotifyDyn": 0,
"LockWait": 0,
"LockWaitDyn": 0,
"LockState": 0,
"LockStateDyn": 0,
"LockScopeState": 0,
"LockScopeStateDyn": 0,
"MakeString": 0,
"Marker": 0,
"MarkerDyn": 0,
"MemPush": 0,
"RegisterCli": 1,
"Scope": 0,
"Text": 0,
"Var": 1,
}
# Helpers
# =======
def computeHash(s, isHash64bits, hashSalt):
if isHash64bits:
h = 14695981039346656037 + hashSalt
for c in s:
h = ((h ^ ord(c)) * 1099511628211) & 0xFFFFFFFFFFFFFFFF
if h == 0:
h = 1 # Special case for our application (0 is reserved internally)
return h
else:
h = 2166136261 + hashSalt
for c in s:
h = ((h ^ ord(c)) * 16777619) & 0xFFFFFFFF
if h == 0:
h = 1 # Special case for our application (0 is reserved internally)
return h
def addString(
s, hashToStringlkup, collisions, isHash64bits, hashSalt, h=None, doOverride=False
):
if h == None:
h = computeHash(s, isHash64bits, hashSalt)
if h in hashToStringlkup:
# String already in the lookup?
if s == hashToStringlkup[h]:
return
# Collision!
if not doOverride:
if h in collisions:
collisions[h].append(s)
else:
collisions[h] = [hashToStringlkup[h], s]
return
# Add the new entry to the lookup
hashToStringlkup[h] = s
# Main entry
# ==========
def main(argv):
# Command line parameters parsing
# ===============================
hashSalt = 0
isHash64bits = True
doPrintUsage = False
exeName = None
fileNames = []
i = 1
while i < len(argv):
if argv[i] == "--hash32":
isHash64bits = False
elif argv[i] == "--salt" and i + 1 < len(argv):
i = i + 1
hashSalt = int(argv[i])
elif argv[i] == "--exe" and i + 1 < len(argv):
i = i + 1
exeName = argv[i]
elif argv[i][0] == "-":
doPrintUsage = True # Unknown option
print("Unknown option '%s'" % argv[i], file=sys.stderr)
else:
fileNames.append(argv[i])
i = i + 1
if not fileNames and not exeName:
doPrintUsage = True
if doPrintUsage:
print(
"""This tool is a part of Palanteer and useful for the 'external string' feature.
It parses C++ files and dumps on stdout a hashed string lookup for the Palanteer calls.
Syntax: %s [options] [<source filenames>*]
Options:
--hash32 : generate 32 bits hash (to use with PL_SHORT_STRING_HASH=1)
--salt <value>: hash salt value (shall match PL_HASH_SALT, default value is 0)
--exe <name> : ELF executable from which to extract the function names and addresses
Useful only when using PL_IMPL_AUTO_INSTRUMENT (Linux only)
Note 1: The 'hand-made' code parsing is simple but should be enough for most need.
It may fail in some corner cases (C macro masking values etc...).
Note 2: If Palanteer commands are encapsulated inside custom macros in your code, the list of command
at the top of this file shall probably be modified."""
% argv[0],
file=sys.stderr,
)
sys.exit(1)
# Process the executables
# =======================
hashToStringlkup, collisions = {}, {}
if exeName and sys.platform == "linux":
if not os.path.exists(exeName):
printf("Input executable '%s' does not exist" % exeName, file=sys.stderr)
sys.exit(1)
# Collect the information from 'nm'
# As the base virtual address is substracted from the logged function address, no need to care about ASLR
nmProcess = subprocess.run(
["nm", "--line-numbers", "--demangle", exeName],
universal_newlines=True,
capture_output=True,
)
for l in nmProcess.stdout.split("\n"):
# Filter on symbols from the text section only
m = MATCH_INFO_LINE.match(l)
if not m:
continue
# The hash value in this automatic instrumentation case is the base address of the function
hashValue = int(m.group(1), 16) + hashSalt
# Add the function name
addString(
m.group(2),
hashToStringlkup,
collisions,
isHash64bits,
hashSalt,
hashValue,
)
# Add the function filename, augmented with the line number
# The override of any former value is due to the potential emission of dual constructor/destructor symbols by GCC/Clang
# (see https://stackoverflow.com/questions/6921295/dual-emission-of-constructor-symbols for details)
addString(
"%s:%s" % (m.group(3), m.group(4)),
hashToStringlkup,
collisions,
isHash64bits,
hashSalt,
hashValue + 1,
doOverride=True,
)
# Process source files
# ====================
addString(
"", hashToStringlkup, collisions, isHash64bits, hashSalt
) # Add the empty string which are used internally
for f in fileNames:
# Insert the file basename, as it is what the palanteer client is using
basename = os.path.basename(f)
addString(basename, hashToStringlkup, collisions, isHash64bits, hashSalt)
# Load the file
lines = []
with open(f, "r") as fHandle:
lines = fHandle.readlines()
# Loop on lines
lineNbr, lineQty = 0, len(lines)
while lineNbr < lineQty:
# Detect a Palanteer command
l = lines[lineNbr]
while l:
m = MATCH_DETECT.match(l)
if not m:
l, lineNbr = None, lineNbr + 1
continue
cmdType = PL_COMMANDS_TYPE.get(m.group(3), None)
if cmdType == None:
l = m.group(4)
continue
isGroup = not not m.group(2)
# Parse the parameters
params, currentParam, isInQuote, parenthLevel, prevC = (
[],
[],
False,
0,
"",
)
paramLine = m.group(4)
while parenthLevel >= 0:
for i, c in enumerate(paramLine):
if c == '"':
isInQuote = not isInQuote
if isInQuote:
currentParam.append(c)
continue
if c == "/" and prevC == "/":
currentParam = currentParam[:-1] # Remove first '/'
break
elif c == "(":
parenthLevel += 1
currentParam.append(c)
elif c == ")":
parenthLevel -= 1
if parenthLevel < 0: # Palanteer command terminaison
params.append("".join(currentParam).strip())
l = paramLine[i:]
break
else:
currentParam.append(c)
elif c == "," and parenthLevel == 0:
params.append("".join(currentParam).strip())
currentParam = []
else:
currentParam.append(c)
prevC = c
# If parsing is not complete, process next line
if parenthLevel >= 0:
lineNbr += 1
if lineNbr < lineQty:
paramLine = lines[lineNbr]
else:
break
# Update the lookup
if isGroup:
params = params[1:] # Remove group parameter
for p in params:
if not p:
continue
if p[0] == '"' and p[-1] == '"':
addString(
p[1:-1],
hashToStringlkup,
collisions,
isHash64bits,
hashSalt,
)
# Commands of type 1 stringifies all parameters. Also with quote (for assertions)
if cmdType == 1:
addString(
p, hashToStringlkup, collisions, isHash64bits, hashSalt
)
# Output
sortedK = sorted(hashToStringlkup.keys(), key=lambda x: hashToStringlkup[x].lower())
for k in sortedK:
print("@@%016X@@%s" % (k, hashToStringlkup[k]))
# Error
if collisions:
sortedK = sorted(collisions.keys())
for k in sortedK:
cList = collisions[k]
print(
"COLLISION %016X %s"
% (k, " ".join(["[%s]" % s for s in collisions[k]])),
file=sys.stderr,
)
# Exit status
sys.exit(1 if collisions else 0)
# Bootstrap
if __name__ == "__main__":
main(sys.argv)
# Unit test
# =========
# ./extStringCppParser.py extStringCppParser.py shall give "good" followed by a sequential numbers, and no "BAD"
"""
plBegin aa("BAD0");
plBegin("good01");
plBegin ("good02");
plBegin("good03", "good04");
plgBegin(BAD1, "good05");
plBegin("good06",
"good07") "BAD2";
plBegin("good08", // "BAD3"
"good09"); // "BAD4"
plVar(good10, good11);
plgVar (BAD5, good12,
good13);
plgVar (BAD6, good14, // BAD7
good15);
plAssert(good16);
plgAssert(BAD8, good17,
good18);
plAssert(good19(a,b()), good20("content("), good21);
not at start of the line plMakeString("good22"),plMakeString ("good23" ) , plMakeString ( "good24")
plBegin("good25 <<< last one"); // Easy one at the end so it is easy to detect non sequential "goods"
"""
| 9,097 | 0 | 69 |
e55bf99f4d0d8b617d13272dc1499461cf5c6975 | 632 | py | Python | okie/tasks/migrations/0004_auto_20210528_1622.py | immortal-zeus/Django-task_dis | 10e99997893d4aabc2305969894e49f2873c17b7 | [
"BSD-3-Clause"
] | 1 | 2021-06-22T06:31:56.000Z | 2021-06-22T06:31:56.000Z | okie/tasks/migrations/0004_auto_20210528_1622.py | immortal-zeus/Django-task_dis | 10e99997893d4aabc2305969894e49f2873c17b7 | [
"BSD-3-Clause"
] | null | null | null | okie/tasks/migrations/0004_auto_20210528_1622.py | immortal-zeus/Django-task_dis | 10e99997893d4aabc2305969894e49f2873c17b7 | [
"BSD-3-Clause"
] | null | null | null | # Generated by Django 3.2.3 on 2021-05-28 10:52
from django.db import migrations, models
| 26.333333 | 97 | 0.602848 | # Generated by Django 3.2.3 on 2021-05-28 10:52
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('tasks', '0003_rename_user_tuser'),
]
operations = [
migrations.AlterField(
model_name='tasks',
name='task_description',
field=models.CharField(blank=True, default=None, max_length=2000, null=True),
),
migrations.AlterField(
model_name='tasks',
name='task_pic',
field=models.ImageField(blank=True, default=None, null=True, upload_to='taskimage/'),
),
]
| 0 | 518 | 23 |
1be6a2ef1e1199d71dec9476b340096db69dcdad | 174 | py | Python | codes/1/104.py | BigShuang/python-introductory-exercises | 9b8d391ce5fcbd12a654aba1c62a746ddb52a42d | [
"MIT"
] | null | null | null | codes/1/104.py | BigShuang/python-introductory-exercises | 9b8d391ce5fcbd12a654aba1c62a746ddb52a42d | [
"MIT"
] | null | null | null | codes/1/104.py | BigShuang/python-introductory-exercises | 9b8d391ce5fcbd12a654aba1c62a746ddb52a42d | [
"MIT"
] | null | null | null | s = input()
m, n = s.split(" ")
m = int(m)
n = int(n)
for ri in range(m):
for ci in range(n):
v = ( ri + 1 ) * ( ci + 1 )
print(v, end=" ")
print()
| 14.5 | 35 | 0.408046 | s = input()
m, n = s.split(" ")
m = int(m)
n = int(n)
for ri in range(m):
for ci in range(n):
v = ( ri + 1 ) * ( ci + 1 )
print(v, end=" ")
print()
| 0 | 0 | 0 |
89e422283e506acfd5a5dd572513aa47e2b556aa | 1,312 | py | Python | Code/Python/PossiblePlate.py | sgino209/LPR | 75da71796484f2b72e6f9626dc83fa7ec5103ba9 | [
"Apache-2.0"
] | null | null | null | Code/Python/PossiblePlate.py | sgino209/LPR | 75da71796484f2b72e6f9626dc83fa7ec5103ba9 | [
"Apache-2.0"
] | null | null | null | Code/Python/PossiblePlate.py | sgino209/LPR | 75da71796484f2b72e6f9626dc83fa7ec5103ba9 | [
"Apache-2.0"
] | 1 | 2021-01-27T12:28:14.000Z | 2021-01-27T12:28:14.000Z | # !/usr/bin/python
# _ _ ____ _ _ ____ _ _ _
# | | (_) ___ ___ _ __ ___ ___ | _ \| | __ _| |_ ___ | _ \ ___ ___ ___ __ _ _ __ (_) |_(_) ___ _ __
# | | | |/ __/ _ \ '_ \/ __|/ _ \ | |_) | |/ _` | __/ _ \ | |_) / _ \/ __/ _ \ / _` | '_ \| | __| |/ _ \| '_ \
# | |___| | (__ __/ | | \__ \ __/ | __/| | (_| | |_ __/ | _ < __/ (__ (_) | (_| | | | | | |_| | (_) | | | |
# |_____|_|\___\___|_| |_|___/\___| |_| |_|\__,_|\__\___| |_| \_\___|\___\___/ \__, |_| |_|_|\__|_|\___/|_| |_|
# |___/
# (c) Shahar Gino, July-2017, sgino209@gmail.com
# ---------------------------------------------------------------------------------------------------------------
class PossiblePlate:
""" Class for representing a (possible) license-plate object """
# -- .. -- .. -- .. -- .. -- .. -- .. -- .. -- .. -- .. -- .. -- .. -- .. -- .. -- .. -- .. -- ..
def __init__(self):
""" Constructor """
self.imgPlate = None
self.imgGrayscale = None
self.imgThresh = None
self.rrLocationOfPlateInScene = None
self.rrLocationOfPlateInSceneGbl = None
self.strChars = ""
self.rectFind = False
| 52.48 | 113 | 0.348323 | # !/usr/bin/python
# _ _ ____ _ _ ____ _ _ _
# | | (_) ___ ___ _ __ ___ ___ | _ \| | __ _| |_ ___ | _ \ ___ ___ ___ __ _ _ __ (_) |_(_) ___ _ __
# | | | |/ __/ _ \ '_ \/ __|/ _ \ | |_) | |/ _` | __/ _ \ | |_) / _ \/ __/ _ \ / _` | '_ \| | __| |/ _ \| '_ \
# | |___| | (__ __/ | | \__ \ __/ | __/| | (_| | |_ __/ | _ < __/ (__ (_) | (_| | | | | | |_| | (_) | | | |
# |_____|_|\___\___|_| |_|___/\___| |_| |_|\__,_|\__\___| |_| \_\___|\___\___/ \__, |_| |_|_|\__|_|\___/|_| |_|
# |___/
# (c) Shahar Gino, July-2017, sgino209@gmail.com
# ---------------------------------------------------------------------------------------------------------------
class PossiblePlate:
""" Class for representing a (possible) license-plate object """
# -- .. -- .. -- .. -- .. -- .. -- .. -- .. -- .. -- .. -- .. -- .. -- .. -- .. -- .. -- .. -- ..
def __init__(self):
""" Constructor """
self.imgPlate = None
self.imgGrayscale = None
self.imgThresh = None
self.rrLocationOfPlateInScene = None
self.rrLocationOfPlateInSceneGbl = None
self.strChars = ""
self.rectFind = False
| 0 | 0 | 0 |