hexsha stringlengths 40 40 | size int64 7 1.04M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 4 247 | max_stars_repo_name stringlengths 4 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 368k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 247 | max_issues_repo_name stringlengths 4 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 247 | max_forks_repo_name stringlengths 4 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.04M | avg_line_length float64 1.77 618k | max_line_length int64 1 1.02M | alphanum_fraction float64 0 1 | original_content stringlengths 7 1.04M | filtered:remove_function_no_docstring int64 -102 942k | filtered:remove_class_no_docstring int64 -354 977k | filtered:remove_delete_markers int64 0 60.1k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
fe5faeef8ee1634c0f11e3b81c2ab55c90744a4f | 1,239 | py | Python | tests/test_utils.py | cenyk1230/cogdl | fa1f74d5c3a15b5a52abfc7cd3f04dce4b7dbcce | [
"MIT"
] | 1,072 | 2019-08-02T05:46:21.000Z | 2022-03-31T07:51:53.000Z | tests/test_utils.py | cenyk1230/cogdl | fa1f74d5c3a15b5a52abfc7cd3f04dce4b7dbcce | [
"MIT"
] | 96 | 2019-08-05T17:27:22.000Z | 2022-03-03T08:36:57.000Z | tests/test_utils.py | cenyk1230/cogdl | fa1f74d5c3a15b5a52abfc7cd3f04dce4b7dbcce | [
"MIT"
] | 299 | 2019-08-08T07:33:10.000Z | 2022-03-31T09:30:07.000Z | from sklearn.metrics import f1_score
import torch
from cogdl.utils import build_args_from_dict
from cogdl.utils import accuracy, multiclass_f1, multilabel_f1, bce_with_logits_loss, cross_entropy_loss
if __name__ == "__main__":
test_build_args_from_dict()
| 31.769231 | 104 | 0.678773 | from sklearn.metrics import f1_score
import torch
from cogdl.utils import build_args_from_dict
from cogdl.utils import accuracy, multiclass_f1, multilabel_f1, bce_with_logits_loss, cross_entropy_loss
def test_build_args_from_dict():
dic = {"arg1": "value1", "arg2": 2, "arg3": 0.3}
args = build_args_from_dict(dic)
assert args.arg1 == "value1"
assert args.arg2 == 2
assert args.arg3 == 0.3
def test_evaluator():
pred = torch.randn(20, 5)
target_one = torch.randint(0, 5, (20,))
target_mult = torch.randint(0, 2, (20, 5)).float()
def f(x):
return round(float(x), 5)
_ = cross_entropy_loss(pred, target_one)
_pred = torch.nn.functional.log_softmax(pred, dim=-1)
acc = _pred.max(1)[1].eq(target_one).double().sum() / len(_pred)
assert f(acc) == f(accuracy(_pred, target_one))
f1 = f1_score(target_one, _pred.max(1)[1], average="micro")
assert f(f1) == f(multiclass_f1(_pred, target_one))
_ = bce_with_logits_loss(pred, target_mult)
_pred = torch.zeros_like(pred)
_pred[pred > 0] = 1
f1 = f1_score(target_mult, _pred, average="micro")
assert f(f1) == f(multilabel_f1(pred, target_mult))
if __name__ == "__main__":
test_build_args_from_dict()
| 930 | 0 | 46 |
c38a95aad8565a4d121edf838f6134f6a4a283f3 | 9,493 | py | Python | analogistics/supply_chain/P8_performance_assessment/wh_indexes.py | aletuf93/analogistics | c5f76910683bc3a1cc6f24799f0299232b4fc522 | [
"MIT"
] | null | null | null | analogistics/supply_chain/P8_performance_assessment/wh_indexes.py | aletuf93/analogistics | c5f76910683bc3a1cc6f24799f0299232b4fc522 | [
"MIT"
] | null | null | null | analogistics/supply_chain/P8_performance_assessment/wh_indexes.py | aletuf93/analogistics | c5f76910683bc3a1cc6f24799f0299232b4fc522 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from analogistics.statistics import time_series as ts
from analogistics.supply_chain.information_framework import movementfunctionfromInventory
from analogistics.explore import paretoDataframe
def calculatePopularity(movements: pd.Series):
"""
Define the popularity for a SKU
Args:
movements (pd.Series): series of the movement with one item per day.
Returns:
pop_in (float): relative popularity IN per day.
pop_out (float): realtiva popularity OUT per day.
pop_absolute_in (float): popularity IN per day.
pop_absolute_out (float): popularity OUT per day.
"""
pop_in = len(movements[movements > 0]) / len(movements)
pop_out = len(movements[movements < 0]) / len(movements)
pop_absolute_in = len(movements[movements > 0])
pop_absolute_out = len(movements[movements < 0])
return pop_in, pop_out, pop_absolute_in, pop_absolute_out
def calculateCOI(inventory: pd.Series):
"""
Calculate the COI index of an SKU, given the inventory function
Args:
inventory (pd.Series): series of the inventory of an SKU.
Returns:
COI_in (float): COI index IN.
COI_out (float): COI index OUT.
"""
# define inventory from movements
movements = movementfunctionfromInventory(inventory)
movements = movements.dropna()
pop_in, pop_out, _, _ = calculatePopularity(movements['QUANTITY'])
# calculate daily COI
I_t_avg = np.nanmean(inventory)
if I_t_avg > 0:
COI_in = pop_in / I_t_avg
COI_out = pop_out / I_t_avg
else:
COI_in = COI_out = np.nan
return COI_in, COI_out
def calculateTurn(inventory: pd.Series):
"""
Calculate the TURN index of an SKU, given the inventory function
Args:
inventory (pd.series): series of the inventory of an SKU.
Returns:
turn (float): Output turn index.
"""
# define inventory from movements
movements = movementfunctionfromInventory(inventory)
movements = movements.dropna()
# calculate the average outbound quantity per day
out_qty_day = -np.sum(movements[movements['QUANTITY'] < 0]['QUANTITY']) / len(movements)
# calculate average inventory quantity
I_t_avg = np.nanmean(inventory)
if I_t_avg > 0:
turn = out_qty_day / I_t_avg
else:
turn = np.nan
return turn
def calculateOrderCompletion(D_mov: pd.DataFrame, itemcode: str,
itemfield: str = 'ITEMCODE', ordercodefield: str = 'ORDERCODE'):
"""
calculate the Order Completion (OC) index
Args:
D_mov (pd.DataFrame): dataframe with movements reporting ordercode and itemcode columns.
itemcode (str): itemcode to calculate the order competion (OC) index.
itemfield (str, optional): string name of D_mov clumn with itemcode. Defaults to 'ITEMCODE'.
ordercodefield (str, optional): string name of D_mov clumn with ordercode. Defaults to 'ORDERCODE'.
Returns:
OC (float): Output OC index.
"""
# clean data
D_mov = D_mov[[itemfield, ordercodefield]]
D_mov = D_mov[D_mov[ordercodefield] != 'nan']
D_mov = D_mov.dropna()
D_mov = D_mov.reset_index()
orders = list(set(D_mov[D_mov[itemfield] == itemcode][ordercodefield]))
idx = [j in orders for j in D_mov[ordercodefield]]
D_orders = D_mov.loc[idx]
OC = 0
for ordercode in orders:
D_orders_filtered = D_orders[D_orders[ordercodefield] == ordercode]
OC = OC + 1 / len(D_orders_filtered)
return OC
def fourierAnalysisInventory(inventory: pd.Series):
"""
fourier analysis of the inventory curve
Args:
inventory (pd.series): list of inventory values.
Returns:
first_carrier (TYPE): frequency (in 1/days) with the highest amplitude value.
period (TYPE): period (in days) associated with the frequency with the highest amplitude value.
"""
D = ts.fourierAnalysis(np.array(inventory))
D = D.sort_values(by='Amplitude', ascending=False)
first_carrier = D.iloc[0]['Frequency_domain_value'] # 1 / days
period = 1 / first_carrier
return first_carrier, period
def updatePopularity(D_SKUs: pd.DataFrame):
"""
Update the popularity index
Args:
D_SKUs (pd.dataFrame): Input dataframe with SKUs.
Returns:
D_SKUs (pd.DataFrame): Output DataFrame with updated popularity.
"""
# create results columns
D_SKUs['POP_IN'] = np.nan
D_SKUs['POP_OUT'] = np.nan
D_SKUs['POP_IN_TOT'] = np.nan
D_SKUs['POP_OUT_TOT'] = np.nan
for index, row in D_SKUs.iterrows():
# select inventory curve
I_t = D_SKUs.loc[index]['INVENTORY_QTY']
# calculate the popularity
movements = movementfunctionfromInventory(I_t)
movements = movements.dropna()
if len(movements) > 0:
POP_IN, POP_OUT, POP_IN_TOT, POP_OUT_TOT = calculatePopularity(movements['QUANTITY'])
# update the dataframe
D_SKUs.at[index, 'POP_IN'] = POP_IN
D_SKUs.at[index, 'POP_OUT'] = POP_OUT
D_SKUs.at[index, 'POP_IN_TOT'] = POP_IN_TOT
D_SKUs.at[index, 'POP_OUT_TOT'] = POP_OUT_TOT
return D_SKUs
def updateCOI(D_SKUs: pd.DataFrame):
"""
Update the COI index
Args:
D_SKUs (pd.DataFrame): Input dataframe with SKUs.
Returns:
D_SKUs (pd.DataFrame): Output DataFrame with updated COI.
"""
# create result columns
D_SKUs['COI_IN'] = np.nan
D_SKUs['COI_OUT'] = np.nan
for index, row in D_SKUs.iterrows():
# select inventory curve
I_t = D_SKUs.loc[index]['INVENTORY_QTY']
# calculate the popularity
movements = movementfunctionfromInventory(I_t)
movements = movements.dropna()
if len(movements) > 0:
COI_IN, COI_OUT = calculateCOI(I_t)
# update the dataframe
D_SKUs.at[index, 'COI_IN'] = COI_IN
D_SKUs.at[index, 'COI_OUT'] = COI_OUT
return D_SKUs
def updateTURN(D_SKUs: pd.DataFrame):
"""
Update TURN index
Args:
D_SKUs (pd.DataFrame): Input dataframe with SKUs.
Returns:
D_SKUs (TYPE): Output DataFrame with updated TURN.
"""
# create result columns
D_SKUs['TURN'] = np.nan
for index, row in D_SKUs.iterrows():
# select inventory curve
I_t = D_SKUs.loc[index]['INVENTORY_QTY']
# calculate the popularity
movements = movementfunctionfromInventory(I_t)
movements = movements.dropna()
if len(movements) > 0:
TURN = calculateTurn(I_t)
# update the dataframe
D_SKUs.at[index, 'TURN'] = TURN
return D_SKUs
def updateOrderCompletion(D_SKUs: pd.DataFrame, D_mov: pd.DataFrame):
"""
Update OC index
Args:
D_SKUs (pd.DataFrame): Input dataframe with SKUs.
D_mov (pd.DataFrame): Input dataframe with movements.
Returns:
D_SKUs (pd.dataFrame): Output DataFrame with updated OC.
"""
# create result columns
D_SKUs['OC'] = np.nan
for index, row in D_SKUs.iterrows():
part = row['ITEMCODE']
# calculate the popularity
OC = calculateOrderCompletion(D_mov, part, itemfield='ITEMCODE', ordercodefield='ORDERCODE')
# update the dataframe
D_SKUs.at[index, 'OC'] = OC
return D_SKUs
def updateFourieranalysis(D_SKUs: pd.DataFrame):
"""
Update the Fourier Analysis
Args:
D_SKUs (pd.DataFrame): Input dataframe with SKUs.
Returns:
D_SKUs (pd.DataFrame): Output DataFrame with updated fourier analysis.
"""
# create result columns
D_SKUs['FOURIER_CARRIER'] = np.nan
D_SKUs['FOURIER_PERIOD'] = np.nan
for index, row in D_SKUs.iterrows():
# select inventory curve
I_t = D_SKUs.loc[index]['INVENTORY_QTY']
# calculate the popularity
movements = movementfunctionfromInventory(I_t)
movements = movements.dropna()
if len(movements) > 0:
carrier, period = fourierAnalysisInventory(I_t)
# update the dataframe
D_SKUs.at[index, 'FOURIER_CARRIER'] = carrier
D_SKUs.at[index, 'FOURIER_PERIOD'] = period
return D_SKUs
# %% PARETO AND HISTOGRAM PLOT
def whIndexParetoPlot(D_SKUs: pd.DataFrame, columnIndex: str):
"""
Define the Pareto and histogram plot for a WH index
Args:
D_SKUs (pd.DataFrame): Input dataframe with SKUs.
columnIndex (str): Name of the index to plot.
Returns:
output_figures (dict): Output dictionary with figures.
"""
output_figures = {}
# define the pareto values
D_SKUs_pop = paretoDataframe(D_SKUs, columnIndex)
# build the pareto figures
fig1 = plt.figure()
plt.plot(np.arange(0, len(D_SKUs_pop)), D_SKUs_pop[f"{columnIndex}_CUM"], color='orange')
plt.title(f"{columnIndex} Pareto curve")
plt.xlabel("N. of SKUs")
plt.ylabel("Popularity percentage")
# save the Pareto figure
output_figures[f"{columnIndex}_pareto"] = fig1
fig2 = plt.figure()
plt.hist(D_SKUs_pop[columnIndex], color='orange')
plt.title(f"{columnIndex} histogram")
plt.xlabel(f"{columnIndex}")
plt.ylabel("Frequency")
# save the Pareto figure
output_figures[f"{columnIndex}_hist"] = fig2
return output_figures
| 27.83871 | 107 | 0.651006 | # -*- coding: utf-8 -*-
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from analogistics.statistics import time_series as ts
from analogistics.supply_chain.information_framework import movementfunctionfromInventory
from analogistics.explore import paretoDataframe
def calculatePopularity(movements: pd.Series):
"""
Define the popularity for a SKU
Args:
movements (pd.Series): series of the movement with one item per day.
Returns:
pop_in (float): relative popularity IN per day.
pop_out (float): realtiva popularity OUT per day.
pop_absolute_in (float): popularity IN per day.
pop_absolute_out (float): popularity OUT per day.
"""
pop_in = len(movements[movements > 0]) / len(movements)
pop_out = len(movements[movements < 0]) / len(movements)
pop_absolute_in = len(movements[movements > 0])
pop_absolute_out = len(movements[movements < 0])
return pop_in, pop_out, pop_absolute_in, pop_absolute_out
def calculateCOI(inventory: pd.Series):
"""
Calculate the COI index of an SKU, given the inventory function
Args:
inventory (pd.Series): series of the inventory of an SKU.
Returns:
COI_in (float): COI index IN.
COI_out (float): COI index OUT.
"""
# define inventory from movements
movements = movementfunctionfromInventory(inventory)
movements = movements.dropna()
pop_in, pop_out, _, _ = calculatePopularity(movements['QUANTITY'])
# calculate daily COI
I_t_avg = np.nanmean(inventory)
if I_t_avg > 0:
COI_in = pop_in / I_t_avg
COI_out = pop_out / I_t_avg
else:
COI_in = COI_out = np.nan
return COI_in, COI_out
def calculateTurn(inventory: pd.Series):
"""
Calculate the TURN index of an SKU, given the inventory function
Args:
inventory (pd.series): series of the inventory of an SKU.
Returns:
turn (float): Output turn index.
"""
# define inventory from movements
movements = movementfunctionfromInventory(inventory)
movements = movements.dropna()
# calculate the average outbound quantity per day
out_qty_day = -np.sum(movements[movements['QUANTITY'] < 0]['QUANTITY']) / len(movements)
# calculate average inventory quantity
I_t_avg = np.nanmean(inventory)
if I_t_avg > 0:
turn = out_qty_day / I_t_avg
else:
turn = np.nan
return turn
def calculateOrderCompletion(D_mov: pd.DataFrame, itemcode: str,
itemfield: str = 'ITEMCODE', ordercodefield: str = 'ORDERCODE'):
"""
calculate the Order Completion (OC) index
Args:
D_mov (pd.DataFrame): dataframe with movements reporting ordercode and itemcode columns.
itemcode (str): itemcode to calculate the order competion (OC) index.
itemfield (str, optional): string name of D_mov clumn with itemcode. Defaults to 'ITEMCODE'.
ordercodefield (str, optional): string name of D_mov clumn with ordercode. Defaults to 'ORDERCODE'.
Returns:
OC (float): Output OC index.
"""
# clean data
D_mov = D_mov[[itemfield, ordercodefield]]
D_mov = D_mov[D_mov[ordercodefield] != 'nan']
D_mov = D_mov.dropna()
D_mov = D_mov.reset_index()
orders = list(set(D_mov[D_mov[itemfield] == itemcode][ordercodefield]))
idx = [j in orders for j in D_mov[ordercodefield]]
D_orders = D_mov.loc[idx]
OC = 0
for ordercode in orders:
D_orders_filtered = D_orders[D_orders[ordercodefield] == ordercode]
OC = OC + 1 / len(D_orders_filtered)
return OC
def fourierAnalysisInventory(inventory: pd.Series):
"""
fourier analysis of the inventory curve
Args:
inventory (pd.series): list of inventory values.
Returns:
first_carrier (TYPE): frequency (in 1/days) with the highest amplitude value.
period (TYPE): period (in days) associated with the frequency with the highest amplitude value.
"""
D = ts.fourierAnalysis(np.array(inventory))
D = D.sort_values(by='Amplitude', ascending=False)
first_carrier = D.iloc[0]['Frequency_domain_value'] # 1 / days
period = 1 / first_carrier
return first_carrier, period
def updatePopularity(D_SKUs: pd.DataFrame):
"""
Update the popularity index
Args:
D_SKUs (pd.dataFrame): Input dataframe with SKUs.
Returns:
D_SKUs (pd.DataFrame): Output DataFrame with updated popularity.
"""
# create results columns
D_SKUs['POP_IN'] = np.nan
D_SKUs['POP_OUT'] = np.nan
D_SKUs['POP_IN_TOT'] = np.nan
D_SKUs['POP_OUT_TOT'] = np.nan
for index, row in D_SKUs.iterrows():
# select inventory curve
I_t = D_SKUs.loc[index]['INVENTORY_QTY']
# calculate the popularity
movements = movementfunctionfromInventory(I_t)
movements = movements.dropna()
if len(movements) > 0:
POP_IN, POP_OUT, POP_IN_TOT, POP_OUT_TOT = calculatePopularity(movements['QUANTITY'])
# update the dataframe
D_SKUs.at[index, 'POP_IN'] = POP_IN
D_SKUs.at[index, 'POP_OUT'] = POP_OUT
D_SKUs.at[index, 'POP_IN_TOT'] = POP_IN_TOT
D_SKUs.at[index, 'POP_OUT_TOT'] = POP_OUT_TOT
return D_SKUs
def updateCOI(D_SKUs: pd.DataFrame):
"""
Update the COI index
Args:
D_SKUs (pd.DataFrame): Input dataframe with SKUs.
Returns:
D_SKUs (pd.DataFrame): Output DataFrame with updated COI.
"""
# create result columns
D_SKUs['COI_IN'] = np.nan
D_SKUs['COI_OUT'] = np.nan
for index, row in D_SKUs.iterrows():
# select inventory curve
I_t = D_SKUs.loc[index]['INVENTORY_QTY']
# calculate the popularity
movements = movementfunctionfromInventory(I_t)
movements = movements.dropna()
if len(movements) > 0:
COI_IN, COI_OUT = calculateCOI(I_t)
# update the dataframe
D_SKUs.at[index, 'COI_IN'] = COI_IN
D_SKUs.at[index, 'COI_OUT'] = COI_OUT
return D_SKUs
def updateTURN(D_SKUs: pd.DataFrame):
"""
Update TURN index
Args:
D_SKUs (pd.DataFrame): Input dataframe with SKUs.
Returns:
D_SKUs (TYPE): Output DataFrame with updated TURN.
"""
# create result columns
D_SKUs['TURN'] = np.nan
for index, row in D_SKUs.iterrows():
# select inventory curve
I_t = D_SKUs.loc[index]['INVENTORY_QTY']
# calculate the popularity
movements = movementfunctionfromInventory(I_t)
movements = movements.dropna()
if len(movements) > 0:
TURN = calculateTurn(I_t)
# update the dataframe
D_SKUs.at[index, 'TURN'] = TURN
return D_SKUs
def updateOrderCompletion(D_SKUs: pd.DataFrame, D_mov: pd.DataFrame):
"""
Update OC index
Args:
D_SKUs (pd.DataFrame): Input dataframe with SKUs.
D_mov (pd.DataFrame): Input dataframe with movements.
Returns:
D_SKUs (pd.dataFrame): Output DataFrame with updated OC.
"""
# create result columns
D_SKUs['OC'] = np.nan
for index, row in D_SKUs.iterrows():
part = row['ITEMCODE']
# calculate the popularity
OC = calculateOrderCompletion(D_mov, part, itemfield='ITEMCODE', ordercodefield='ORDERCODE')
# update the dataframe
D_SKUs.at[index, 'OC'] = OC
return D_SKUs
def updateFourieranalysis(D_SKUs: pd.DataFrame):
"""
Update the Fourier Analysis
Args:
D_SKUs (pd.DataFrame): Input dataframe with SKUs.
Returns:
D_SKUs (pd.DataFrame): Output DataFrame with updated fourier analysis.
"""
# create result columns
D_SKUs['FOURIER_CARRIER'] = np.nan
D_SKUs['FOURIER_PERIOD'] = np.nan
for index, row in D_SKUs.iterrows():
# select inventory curve
I_t = D_SKUs.loc[index]['INVENTORY_QTY']
# calculate the popularity
movements = movementfunctionfromInventory(I_t)
movements = movements.dropna()
if len(movements) > 0:
carrier, period = fourierAnalysisInventory(I_t)
# update the dataframe
D_SKUs.at[index, 'FOURIER_CARRIER'] = carrier
D_SKUs.at[index, 'FOURIER_PERIOD'] = period
return D_SKUs
# %% PARETO AND HISTOGRAM PLOT
def whIndexParetoPlot(D_SKUs: pd.DataFrame, columnIndex: str):
"""
Define the Pareto and histogram plot for a WH index
Args:
D_SKUs (pd.DataFrame): Input dataframe with SKUs.
columnIndex (str): Name of the index to plot.
Returns:
output_figures (dict): Output dictionary with figures.
"""
output_figures = {}
# define the pareto values
D_SKUs_pop = paretoDataframe(D_SKUs, columnIndex)
# build the pareto figures
fig1 = plt.figure()
plt.plot(np.arange(0, len(D_SKUs_pop)), D_SKUs_pop[f"{columnIndex}_CUM"], color='orange')
plt.title(f"{columnIndex} Pareto curve")
plt.xlabel("N. of SKUs")
plt.ylabel("Popularity percentage")
# save the Pareto figure
output_figures[f"{columnIndex}_pareto"] = fig1
fig2 = plt.figure()
plt.hist(D_SKUs_pop[columnIndex], color='orange')
plt.title(f"{columnIndex} histogram")
plt.xlabel(f"{columnIndex}")
plt.ylabel("Frequency")
# save the Pareto figure
output_figures[f"{columnIndex}_hist"] = fig2
return output_figures
| 0 | 0 | 0 |
871d15c7fed0e7ecb34d5e18a42a899c21404f83 | 202 | py | Python | data_collection/gazette/spiders/sc_forquilhinha.py | kaiocp/querido-diario | 86004049c6eee305e13066cf3607d30849bb099a | [
"MIT"
] | 454 | 2018-04-07T03:32:57.000Z | 2020-08-17T19:56:22.000Z | data_collection/gazette/spiders/sc_forquilhinha.py | kaiocp/querido-diario | 86004049c6eee305e13066cf3607d30849bb099a | [
"MIT"
] | 254 | 2020-08-18T14:09:43.000Z | 2022-03-28T11:30:51.000Z | data_collection/gazette/spiders/sc_forquilhinha.py | kaiocp/querido-diario | 86004049c6eee305e13066cf3607d30849bb099a | [
"MIT"
] | 183 | 2018-04-11T15:09:37.000Z | 2020-08-15T18:55:11.000Z | from gazette.spiders.base.fecam import FecamGazetteSpider
| 25.25 | 57 | 0.777228 | from gazette.spiders.base.fecam import FecamGazetteSpider
class ScForquilhinhaSpider(FecamGazetteSpider):
name = "sc_forquilhinha"
FECAM_QUERY = "cod_entidade:94"
TERRITORY_ID = "4205456"
| 0 | 120 | 23 |
7d2d81b868d78570f829b82f7e1a603d8f307c26 | 25,424 | py | Python | causallearn/score/LocalScoreFunction.py | softsys4ai/causal-config-labyrinth | 4f50f9ff15429b0ac6ad0a99fbe4cfdd17e360fc | [
"MIT"
] | 15 | 2022-01-20T12:35:35.000Z | 2022-03-24T16:25:24.000Z | causallearn/score/LocalScoreFunction.py | softsys4ai/unicorn | 4f50f9ff15429b0ac6ad0a99fbe4cfdd17e360fc | [
"MIT"
] | 14 | 2022-01-23T00:20:00.000Z | 2022-02-22T01:40:43.000Z | causallearn/score/LocalScoreFunction.py | softsys4ai/causal-config-labyrinth | 4f50f9ff15429b0ac6ad0a99fbe4cfdd17e360fc | [
"MIT"
] | 1 | 2022-02-23T08:59:24.000Z | 2022-02-23T08:59:24.000Z | import math
import pandas as pd
from causallearn.utils.ScoreUtils import *
def local_score_BIC(Data, i, PAi, parameters=None):
'''
Calculate the *negative* local score with BIC for the linear Gaussian continue data case
Parameters
----------
Data: ndarray, (sample, features)
i: current index
PAi: parent indexes
parameters: lambda_value, the penalty discount of bic
Returns
-------
score: local BIC score
'''
if parameters is None:
lambda_value = 1
else:
lambda_value = parameters['lambda_value']
Data = np.mat(Data)
T = Data.shape[0]
X = Data[:, i]
if len(PAi) != 0:
PA = Data[:, PAi]
D = PA.shape[1]
# derive the parameters by maximum likelihood
H = PA * pdinv(PA.T * PA) * PA.T
E = X - H * X
sigma2 = np.sum(np.power(E, 2)) / T
# BIC
score = T * np.log(sigma2) + lambda_value * D * np.log(T)
else:
sigma2 = np.sum(np.power(X, 2)) / T
# BIC
score = T * np.log(sigma2)
return score
def local_score_BDeu(Data, i, PAi, parameters=None):
'''
Calculate the *negative* local score with BDeu for the discrete case
Parameters
----------
Data: (sample, features)
i: current index
PAi: parent indexes
parameters:
sample_prior: sample prior
structure_prior: structure prior
r_i_map: number of states of the finite random variable X_{i}
Returns
-------
score: local BDeu score
'''
if parameters is None:
sample_prior = 1 # default sample_prior = 1
structure_prior = 1 # default structure_prior = 1
r_i_map = {i: len(np.unique(np.asarray(Data[:, i]))) for i in range(Data.shape[1])}
else:
sample_prior = parameters['sample_prior']
structure_prior = parameters['structure_prior']
r_i_map = parameters['r_i_map']
# calculate q_{i}
q_i = 1
for pa in PAi:
q_i *= r_i_map[pa]
if len(PAi) != 0:
# calculate N_{ij}
names = ['x{}'.format(i) for i in range(Data.shape[1])]
Data_pd = pd.DataFrame(Data, columns=names)
parant_names = ['x{}'.format(i) for i in PAi]
Data_pd_group_Nij = Data_pd.groupby(parant_names)
Nij_map = {key: len(Data_pd_group_Nij.indices.get(key)) for key in Data_pd_group_Nij.indices.keys()}
Nij_map_keys_list = list(Nij_map.keys())
# calculate N_{ijk}
Nijk_map = {ij: Data_pd_group_Nij.get_group(ij).groupby('x{}'.format(i)).apply(len).reset_index() for ij in
Nij_map.keys()}
for v in Nijk_map.values():
v.columns = ['x{}'.format(i), 'times']
else:
# calculate N_{ij}
names = ['x{}'.format(i) for i in range(Data.shape[1])]
Nij_map = {'': len(Data[:, i])}
Nij_map_keys_list = ['']
Data_pd = pd.DataFrame(Data, columns=names)
# calculate N_{ijk}
Nijk_map = {ij: Data_pd.groupby('x{}'.format(i)).apply(len).reset_index() for ij in Nij_map_keys_list}
for v in Nijk_map.values():
v.columns = ['x{}'.format(i), 'times']
BDeu_score = 0
# first term
vm = Data.shape[0] - 1
BDeu_score += len(PAi) * np.log(structure_prior / vm) + (vm - len(PAi)) * np.log(1 - (structure_prior / vm))
# second term
for pa in range(len(Nij_map_keys_list)):
Nij = Nij_map.get(Nij_map_keys_list[pa])
first_term = math.lgamma(sample_prior / q_i) - math.lgamma(Nij + sample_prior / q_i)
second_term = 0
Nijk_list = Nijk_map.get(Nij_map_keys_list[pa])['times'].to_numpy()
for Nijk in Nijk_list:
second_term += math.lgamma(Nijk + sample_prior / (r_i_map[i] * q_i)) - math.lgamma(
sample_prior / (r_i_map[i] * q_i))
BDeu_score += first_term + second_term
return -BDeu_score
def local_score_cv_general(Data, Xi, PAi, parameters):
'''
Calculate the local score
using negative k-fold cross-validated log likelihood as the score
based on a regression model in RKHS
Parameters
----------
Data: (sample, features)
i: current index
PAi: parent indexes
parameters:
kfold: k-fold cross validation
lambda: regularization parameter
Returns
-------
score: local score
'''
Data = np.mat(Data)
PAi = list(PAi)
T = Data.shape[0]
X = Data[:, Xi]
var_lambda = parameters['lambda'] # regularization parameter
k = parameters['kfold'] # k-fold cross validation
n0 = math.floor(T / k)
gamma = 0.01
Thresh = 1E-5
if (len(PAi)):
PA = Data[:, PAi]
# set the kernel for X
GX = np.sum(np.multiply(X, X), axis=1)
Q = np.tile(GX, (1, T))
R = np.tile(GX.T, (T, 1))
dists = Q + R - 2 * X * X.T
dists = dists - np.tril(dists)
dists = np.reshape(dists, (T ** 2, 1))
width = np.sqrt(0.5 * np.median(dists[np.where(dists > 0)], axis=1)[0, 0]) # median value
width = width * 2
theta = 1 / (width ** 2)
Kx, _ = kernel(X, X, (theta, 1)) # Gaussian kernel
H0 = np.mat(np.eye(T)) - np.mat(np.ones((T, T))) / (T) # for centering of the data in feature space
Kx = H0 * Kx * H0 # kernel matrix for X
# eig_Kx, eix = eigdec((Kx + Kx.T)/2, np.min([400, math.floor(T/2)]), evals_only=False) # /2
# IIx = np.where(eig_Kx > np.max(eig_Kx) * Thresh)[0]
# eig_Kx = eig_Kx[IIx]
# eix = eix[:, IIx]
# mx = len(IIx)
# set the kernel for PA
Kpa = np.mat(np.ones((T, T)))
for m in range(PA.shape[1]):
G = np.sum((np.multiply(PA[:, m], PA[:, m])), axis=1)
Q = np.tile(G, (1, T))
R = np.tile(G.T, (T, 1))
dists = Q + R - 2 * PA[:, m] * PA[:, m].T
dists = dists - np.tril(dists)
dists = np.reshape(dists, (T ** 2, 1))
width = np.sqrt(0.5 * np.median(dists[np.where(dists > 0)], axis=1)[0, 0]) # median value
width = width * 2
theta = 1 / (width ** 2)
Kpa = np.multiply(Kpa, kernel(PA[:, m], PA[:, m], (theta, 1))[0])
H0 = np.mat(np.eye(T)) - np.mat(np.ones((T, T))) / (T) # for centering of the data in feature space
Kpa = H0 * Kpa * H0 # kernel matrix for PA
CV = 0
for kk in range(k):
if (kk == 0):
Kx_te = Kx[0:n0, 0:n0]
Kx_tr = Kx[n0: T, n0: T]
Kx_tr_te = Kx[n0: T, 0: n0]
Kpa_te = Kpa[0:n0, 0: n0]
Kpa_tr = Kpa[n0: T, n0: T]
Kpa_tr_te = Kpa[n0: T, 0: n0]
nv = n0 # sample size of validated data
if (kk == k - 1):
Kx_te = Kx[kk * n0:T, kk * n0: T]
Kx_tr = Kx[0:kk * n0, 0: kk * n0]
Kx_tr_te = Kx[0:kk * n0, kk * n0: T]
Kpa_te = Kpa[kk * n0:T, kk * n0: T]
Kpa_tr = Kpa[0: kk * n0, 0: kk * n0]
Kpa_tr_te = Kpa[0:kk * n0, kk * n0: T]
nv = T - kk * n0
if (kk < k - 1 and kk > 0):
Kx_te = Kx[kk * n0: (kk + 1) * n0, kk * n0: (kk + 1) * n0]
Kx_tr = Kx[np.ix_(np.concatenate([np.arange(0, kk * n0), np.arange((kk + 1) * n0, T)]),
np.concatenate([np.arange(0, kk * n0), np.arange((kk + 1) * n0, T)]))]
Kx_tr_te = Kx[np.ix_(np.concatenate([np.arange(0, kk * n0), np.arange((kk + 1) * n0, T)]),
np.arange(kk * n0, (kk + 1) * n0))]
Kpa_te = Kpa[kk * n0: (kk + 1) * n0, kk * n0: (kk + 1) * n0]
Kpa_tr = Kpa[np.ix_(np.concatenate([np.arange(0, kk * n0), np.arange((kk + 1) * n0, T)]),
np.concatenate([np.arange(0, kk * n0), np.arange((kk + 1) * n0, T)]))]
Kpa_tr_te = Kpa[np.ix_(np.concatenate([np.arange(0, kk * n0), np.arange((kk + 1) * n0, T)]),
np.arange(kk * n0, (kk + 1) * n0))]
nv = n0
n1 = T - nv
tmp1 = pdinv(Kpa_tr + n1 * var_lambda * np.mat(np.eye(n1)))
tmp2 = tmp1 * Kx_tr * tmp1
tmp3 = tmp1 * pdinv(np.mat(np.eye(n1)) + n1 * var_lambda ** 2 / gamma * tmp2) * tmp1
A = (Kx_te + Kpa_tr_te.T * tmp2 * Kpa_tr_te - 2 * Kx_tr_te.T * tmp1 * Kpa_tr_te
- n1 * var_lambda ** 2 / gamma * Kx_tr_te.T * tmp3 * Kx_tr_te
- n1 * var_lambda ** 2 / gamma * Kpa_tr_te.T * tmp1 * Kx_tr * tmp3 * Kx_tr * tmp1 * Kpa_tr_te
+ 2 * n1 * var_lambda ** 2 / gamma * Kx_tr_te.T * tmp3 * Kx_tr * tmp1 * Kpa_tr_te) / gamma
B = n1 * var_lambda ** 2 / gamma * tmp2 + np.mat(np.eye(n1))
L = np.linalg.cholesky(B)
C = np.sum(np.log(np.diag(L)))
# CV = CV + (nv*nv*log(2*pi) + nv*C + nv*mx*log(gamma) + trace(A))/2;
CV = CV + (nv * nv * np.log(2 * np.pi) + nv * C + np.trace(A)) / 2
CV = CV / k
else:
# set the kernel for X
GX = np.sum(np.multiply(X, X), axis=1)
Q = np.tile(GX, (1, T))
R = np.tile(GX.T, (T, 1))
dists = Q + R - 2 * X * X.T
dists = dists - np.tril(dists)
dists = np.reshape(dists, (T ** 2, 1))
width = np.sqrt(0.5 * np.median(dists[np.where(dists > 0)], axis=1)[0, 0]) # median value
width = width * 2
theta = 1 / (width ** 2)
Kx, _ = kernel(X, X, (theta, 1)) # Gaussian kernel
H0 = np.mat(np.eye(T)) - np.mat(np.ones((T, T))) / (T) # for centering of the data in feature space
Kx = H0 * Kx * H0 # kernel matrix for X
# eig_Kx, eix = eigdec((Kx + Kx.T) / 2, np.min([400, math.floor(T / 2)]), evals_only=False) # /2
# IIx = np.where(eig_Kx > np.max(eig_Kx) * Thresh)[0]
# mx = len(IIx)
CV = 0
for kk in range(k):
if (kk == 0):
Kx_te = Kx[kk * n0: (kk + 1) * n0, kk * n0: (kk + 1) * n0]
Kx_tr = Kx[(kk + 1) * n0:T, (kk + 1) * n0: T]
Kx_tr_te = Kx[(kk + 1) * n0:T, kk * n0: (kk + 1) * n0]
nv = n0
if (kk == k - 1):
Kx_te = Kx[kk * n0: T, kk * n0: T]
Kx_tr = Kx[0: kk * n0, 0: kk * n0]
Kx_tr_te = Kx[0:kk * n0, kk * n0: T]
nv = T - kk * n0
if (kk < k - 1 and kk > 0):
Kx_te = Kx[kk * n0: (kk + 1) * n0, kk * n0: (kk + 1) * n0]
Kx_tr = Kx[np.ix_(np.concatenate([np.arange(0, kk * n0), np.arange((kk + 1) * n0, T)]),
np.concatenate([np.arange(0, kk * n0), np.arange((kk + 1) * n0, T)]))]
Kx_tr_te = Kx[np.ix_(np.concatenate([np.arange(0, kk * n0), np.arange((kk + 1) * n0, T)]),
np.arange(kk * n0, (kk + 1) * n0))]
nv = n0
n1 = T - nv
A = (Kx_te - 1 / (gamma * n1) * Kx_tr_te.T * pdinv(
np.mat(np.eye(n1)) + 1 / (gamma * n1) * Kx_tr) * Kx_tr_te) / gamma
B = 1 / (gamma * n1) * Kx_tr + np.mat(np.eye(n1))
L = np.linalg.cholesky(B)
C = np.sum(np.log(np.diag(L)))
# CV = CV + (nv*nv*log(2*pi) + nv*C + nv*mx*log(gamma) + trace(A))/2;
CV = CV + (nv * nv * np.log(2 * np.pi) + nv * C + np.trace(A)) / 2
CV = CV / k
score = CV # negative cross-validated likelihood
return score
def local_score_cv_multi(Data, Xi, PAi, parameters):
'''
Calculate the local score
using negative k-fold cross-validated log likelihood as the score
based on a regression model in RKHS
for variables with multi-variate dimensions
Parameters
----------
Data: (sample, features)
i: current index
PAi: parent indexes
parameters:
kfold: k-fold cross validation
lambda: regularization parameter
dlabel: for variables with multi-dimensions,
indicate which dimensions belong to the i-th variable.
Returns
-------
score: local score
'''
T = Data.shape[0]
X = Data[:, parameters['dlabel'][Xi]]
var_lambda = parameters['lambda'] # regularization parameter
k = parameters['kfold'] # k-fold cross validation
n0 = math.floor(T / k)
gamma = 0.01
Thresh = 1E-5
if (len(PAi)):
# set the kernel for X
GX = np.sum(np.multiply(X, X), axis=1)
Q = np.tile(GX, (1, T))
R = np.tile(GX.T, (T, 1))
dists = Q + R - 2 * X * X.T
dists = dists - np.tril(dists)
dists = np.reshape(dists, (T ** 2, 1))
width = np.sqrt(0.5 * np.median(dists[np.where(dists > 0)], axis=1)[0, 0]) # median value
width = width * 3 ###
theta = 1 / (width ** 2 * X.shape[1]) #
Kx, _ = kernel(X, X, (theta, 1)) # Gaussian kernel
H0 = np.mat(np.eye(T)) - np.mat(np.ones((T, T))) / (T) # for centering of the data in feature space
Kx = H0 * Kx * H0 # kernel matrix for X
# set the kernel for PA
Kpa = np.mat(np.ones((T, T)))
for m in range(len(PAi)):
PA = Data[:, parameters['dlabel'][PAi[m]]]
G = np.sum((np.multiply(PA, PA)), axis=1)
Q = np.tile(G, (1, T))
R = np.tile(G.T, (T, 1))
dists = Q + R - 2 * PA * PA.T
dists = dists - np.tril(dists)
dists = np.reshape(dists, (T ** 2, 1))
width = np.sqrt(0.5 * np.median(dists[np.where(dists > 0)], axis=1)[0, 0]) # median value
width = width * 3 ###
theta = 1 / (width ** 2 * PA.shape[1])
Kpa = np.multiply(Kpa, kernel(PA, PA, (theta, 1))[0])
H0 = np.mat(np.eye(T)) - np.mat(np.ones((T, T))) / (T) # for centering of the data in feature space
Kpa = H0 * Kpa * H0 # kernel matrix for PA
CV = 0
for kk in range(k):
if (kk == 0):
Kx_te = Kx[0:n0, 0:n0]
Kx_tr = Kx[n0: T, n0: T]
Kx_tr_te = Kx[n0: T, 0: n0]
Kpa_te = Kpa[0:n0, 0: n0]
Kpa_tr = Kpa[n0: T, n0: T]
Kpa_tr_te = Kpa[n0: T, 0: n0]
nv = n0 # sample size of validated data
if (kk == k - 1):
Kx_te = Kx[kk * n0:T, kk * n0: T]
Kx_tr = Kx[0:kk * n0, 0: kk * n0]
Kx_tr_te = Kx[0:kk * n0, kk * n0: T]
Kpa_te = Kpa[kk * n0:T, kk * n0: T]
Kpa_tr = Kpa[0: kk * n0, 0: kk * n0]
Kpa_tr_te = Kpa[0:kk * n0, kk * n0: T]
nv = T - kk * n0
if (kk < k - 1 and kk > 0):
Kx_te = Kx[kk * n0: (kk + 1) * n0, kk * n0: (kk + 1) * n0]
Kx_tr = Kx[np.ix_(np.concatenate([np.arange(0, kk * n0), np.arange((kk + 1) * n0, T)]),
np.concatenate([np.arange(0, kk * n0), np.arange((kk + 1) * n0, T)]))]
Kx_tr_te = Kx[np.ix_(np.concatenate([np.arange(0, kk * n0), np.arange((kk + 1) * n0, T)]),
np.arange(kk * n0, (kk + 1) * n0))]
Kpa_te = Kpa[kk * n0: (kk + 1) * n0, kk * n0: (kk + 1) * n0]
Kpa_tr = Kpa[np.ix_(np.concatenate([np.arange(0, kk * n0), np.arange((kk + 1) * n0, T)]),
np.concatenate([np.arange(0, kk * n0), np.arange((kk + 1) * n0, T)]))]
Kpa_tr_te = Kpa[np.ix_(np.concatenate([np.arange(0, kk * n0), np.arange((kk + 1) * n0, T)]),
np.arange(kk * n0, (kk + 1) * n0))]
nv = n0
n1 = T - nv
tmp1 = pdinv(Kpa_tr + n1 * var_lambda * np.mat(np.eye(n1)))
tmp2 = tmp1 * Kx_tr * tmp1
tmp3 = tmp1 * pdinv(np.mat(np.eye(n1)) + n1 * var_lambda ** 2 / gamma * tmp2) * tmp1
A = (Kx_te + Kpa_tr_te.T * tmp2 * Kpa_tr_te - 2 * Kx_tr_te.T * tmp1 * Kpa_tr_te
- n1 * var_lambda ** 2 / gamma * Kx_tr_te.T * tmp3 * Kx_tr_te
- n1 * var_lambda ** 2 / gamma * Kpa_tr_te.T * tmp1 * Kx_tr * tmp3 * Kx_tr * tmp1 * Kpa_tr_te
+ 2 * n1 * var_lambda ** 2 / gamma * Kx_tr_te.T * tmp3 * Kx_tr * tmp1 * Kpa_tr_te) / gamma
B = n1 * var_lambda ** 2 / gamma * tmp2 + np.mat(np.eye(n1))
L = np.linalg.cholesky(B)
C = np.sum(np.log(np.diag(L)))
# CV = CV + (nv*nv*log(2*pi) + nv*C + nv*mx*log(gamma) + trace(A))/2;
CV = CV + (nv * nv * np.log(2 * np.pi) + nv * C + np.trace(A)) / 2
CV = CV / k
else:
# set the kernel for X
GX = np.sum(np.multiply(X, X), axis=1)
Q = np.tile(GX, (1, T))
R = np.tile(GX.T, (T, 1))
dists = Q + R - 2 * X * X.T
dists = dists - np.tril(dists)
dists = np.reshape(dists, (T ** 2, 1))
width = np.sqrt(0.5 * np.median(dists[np.where(dists > 0)], axis=1)[0, 0]) # median value
width = width * 3 ###
theta = 1 / (width ** 2 * X.shape[1]) #
Kx, _ = kernel(X, X, (theta, 1)) # Gaussian kernel
H0 = np.mat(np.eye(T)) - np.mat(np.ones((T, T))) / (T) # for centering of the data in feature space
Kx = H0 * Kx * H0 # kernel matrix for X
CV = 0
for kk in range(k):
if (kk == 0):
Kx_te = Kx[kk * n0: (kk + 1) * n0, kk * n0: (kk + 1) * n0]
Kx_tr = Kx[(kk + 1) * n0:T, (kk + 1) * n0: T]
Kx_tr_te = Kx[(kk + 1) * n0:T, kk * n0: (kk + 1) * n0]
nv = n0
if (kk == k - 1):
Kx_te = Kx[kk * n0: T, kk * n0: T]
Kx_tr = Kx[0: kk * n0, 0: kk * n0]
Kx_tr_te = Kx[0:kk * n0, kk * n0: T]
nv = T - kk * n0
if (kk < k - 1 and kk > 0):
Kx_te = Kx[kk * n0: (kk + 1) * n0, kk * n0: (kk + 1) * n0]
Kx_tr = Kx[np.ix_(np.concatenate([np.arange(0, kk * n0), np.arange((kk + 1) * n0, T)]),
np.concatenate([np.arange(0, kk * n0), np.arange((kk + 1) * n0, T)]))]
Kx_tr_te = Kx[np.ix_(np.concatenate([np.arange(0, kk * n0), np.arange((kk + 1) * n0, T)]),
np.arange(kk * n0, (kk + 1) * n0))]
nv = n0
n1 = T - nv
A = (Kx_te - 1 / (gamma * n1) * Kx_tr_te.T * pdinv(
np.mat(np.eye(n1)) + 1 / (gamma * n1) * Kx_tr) * Kx_tr_te) / gamma
B = 1 / (gamma * n1) * Kx_tr + np.mat(np.eye(n1))
L = np.linalg.cholesky(B)
C = np.sum(np.log(np.diag(L)))
# CV = CV + (nv*nv*log(2*pi) + nv*C + nv*mx*log(gamma) + trace(A))/2;
CV = CV + (nv * nv * np.log(2 * np.pi) + nv * C + np.trace(A)) / 2
CV = CV / k
score = CV # negative cross-validated likelihood
return score
def local_score_marginal_general(Data, Xi, PAi, parameters):
'''
Calculate the local score by negative marginal likelihood
based on a regression model in RKHS
Parameters
----------
Data: (sample, features)
i: current index
PAi: parent indexes
parameters: None
Returns
-------
score: local score
'''
T = Data.shape[0]
X = Data[:, Xi]
dX = X.shape[1]
# set the kernel for X
GX = np.sum(np.multiply(X, X), axis=1)
Q = np.tile(GX, (1, T))
R = np.tile(GX.T, (T, 1))
dists = Q + R - 2 * X * X.T
dists = dists - np.tril(dists)
dists = np.reshape(dists, (T ** 2, 1))
width = np.sqrt(0.5 * np.median(dists[np.where(dists > 0)], axis=1)[0, 0])
width = width * 2.5 # kernel width
theta = 1 / (width ** 2)
H = np.mat(np.eye(T)) - np.mat(np.ones((T, T))) / T
Kx, _ = kernel(X, X, (theta, 1))
Kx = H * Kx * H
Thresh = 1E-5
eig_Kx, eix = eigdec((Kx + Kx.T) / 2, np.min([400, math.floor(T / 4)]), evals_only=False) # /2
IIx = np.where(eig_Kx > np.max(eig_Kx) * Thresh)[0]
eig_Kx = eig_Kx[IIx]
eix = eix[:, IIx]
if (len(PAi)):
PA = Data[:, PAi]
widthPA = np.mat(np.empty((PA.shape[1], 1)))
# set the kernel for PA
for m in range(PA.shape[1]):
G = np.sum((np.multiply(PA[:, m], PA[:, m])), axis=1)
Q = np.tile(G, (1, T))
R = np.tile(G.T, (T, 1))
dists = Q + R - 2 * PA[:, m] * PA[:, m].T
dists = dists - np.tril(dists)
dists = np.reshape(dists, (T ** 2, 1))
widthPA[m] = np.sqrt(0.5 * np.median(dists[np.where(dists > 0)], axis=1)[0, 0])
widthPA = widthPA * 2.5 # kernel width
covfunc = np.asarray(['covSum', ['covSEard', 'covNoise']])
logtheta0 = np.vstack([np.log(widthPA), 0, np.log(np.sqrt(0.1))])
logtheta, fvals, iter = minimize(logtheta0, 'gpr_multi_new', -300, covfunc, PA,
2 * np.sqrt(T) * eix * np.diag(np.sqrt(eig_Kx)) / np.sqrt(eig_Kx[0]))
nlml, dnlml = gpr_multi_new(logtheta, covfunc, PA,
2 * np.sqrt(T) * eix * np.diag(np.sqrt(eig_Kx)) / np.sqrt(eig_Kx[0]),
nargout=2)
else:
covfunc = np.asarray(['covSum', ['covSEard', 'covNoise']])
PA = np.mat(np.zeros((T, 1)))
logtheta0 = np.mat([100, 0, np.log(np.sqrt(0.1))]).T
logtheta, fvals, iter = minimize(logtheta0, 'gpr_multi_new', -300, covfunc, PA,
2 * np.sqrt(T) * eix * np.diag(np.sqrt(eig_Kx)) / np.sqrt(eig_Kx[0]))
nlml, dnlml = gpr_multi_new(logtheta, covfunc, PA,
2 * np.sqrt(T) * eix * np.diag(np.sqrt(eig_Kx)) / np.sqrt(eig_Kx[0]),
nargout=2)
score = nlml # negative log-likelihood
return score
def local_score_marginal_multi(Data, Xi, PAi, parameters):
'''
Calculate the local score by negative marginal likelihood
based on a regression model in RKHS
for variables with multi-variate dimensions
Parameters
----------
Data: (sample, features)
i: current index
PAi: parent indexes
parameters:
dlabel: for variables with multi-dimensions,
indicate which dimensions belong to the i-th variable.
Returns
-------
score: local score
'''
T = Data.shape[0]
X = Data[:, parameters['dlabel'][Xi]]
dX = X.shape[1]
# set the kernel for X
GX = np.sum(np.multiply(X, X), axis=1)
Q = np.tile(GX, (1, T))
R = np.tile(GX.T, (T, 1))
dists = Q + R - 2 * X * X.T
dists = dists - np.tril(dists)
dists = np.reshape(dists, (T ** 2, 1))
widthX = np.sqrt(0.5 * np.median(dists[np.where(dists > 0)], axis=1)[0, 0])
widthX = widthX * 2.5 # kernel width
theta = 1 / (widthX ** 2)
H = np.mat(np.eye(T)) - np.mat(np.ones((T, T))) / T
Kx, _ = kernel(X, X, (theta, 1))
Kx = H * Kx * H
Thresh = 1E-5
eig_Kx, eix = eigdec((Kx + Kx.T) / 2, np.min([400, math.floor(T / 4)]), evals_only=False) # /2
IIx = np.where(eig_Kx > np.max(eig_Kx) * Thresh)[0]
eig_Kx = eig_Kx[IIx]
eix = eix[:, IIx]
if (len(PAi)):
widthPA_all = np.mat(np.empty((1, 0)))
# set the kernel for PA
PA_all = np.mat(np.empty((Data.shape[0], 0)))
for m in range(len(PAi)):
PA = Data[:, parameters['dlabel'][PAi[m]]]
PA_all = np.hstack([PA_all, PA])
G = np.sum((np.multiply(PA, PA)), axis=1)
Q = np.tile(G, (1, T))
R = np.tile(G.T, (T, 1))
dists = Q + R - 2 * PA * PA.T
dists = dists - np.tril(dists)
dists = np.reshape(dists, (T ** 2, 1))
widthPA = np.sqrt(0.5 * np.median(dists[np.where(dists > 0)], axis=1)[0, 0])
widthPA_all = np.hstack(
[widthPA_all, widthPA * np.mat(np.ones((1, np.size(parameters['dlabel'][PAi[m]]))))])
widthPA_all = widthPA_all * 2.5 # kernel width
covfunc = np.asarray(['covSum', ['covSEard', 'covNoise']])
logtheta0 = np.vstack([np.log(widthPA_all.T), 0, np.log(np.sqrt(0.1))])
logtheta, fvals, iter = minimize(logtheta0, 'gpr_multi_new', -300, covfunc, PA_all,
2 * np.sqrt(T) * eix * np.diag(np.sqrt(eig_Kx)) / np.sqrt(eig_Kx[0]))
nlml, dnlml = gpr_multi_new(logtheta, covfunc, PA_all,
2 * np.sqrt(T) * eix * np.diag(np.sqrt(eig_Kx)) / np.sqrt(eig_Kx[0]),
nargout=2)
else:
covfunc = np.asarray(['covSum', ['covSEard', 'covNoise']])
PA = np.mat(np.zeros((T, 1)))
logtheta0 = np.mat([100, 0, np.log(np.sqrt(0.1))]).T
logtheta, fvals, iter = minimize(logtheta0, 'gpr_multi_new', -300, covfunc, PA,
2 * np.sqrt(T) * eix * np.diag(np.sqrt(eig_Kx)) / np.sqrt(eig_Kx[0]))
nlml, dnlml = gpr_multi_new(logtheta, covfunc, PA,
2 * np.sqrt(T) * eix * np.diag(np.sqrt(eig_Kx)) / np.sqrt(eig_Kx[0]),
nargout=2)
score = nlml # negative log-likelihood
return score
| 40.484076 | 115 | 0.48411 | import math
import pandas as pd
from causallearn.utils.ScoreUtils import *
def local_score_BIC(Data, i, PAi, parameters=None):
'''
Calculate the *negative* local score with BIC for the linear Gaussian continue data case
Parameters
----------
Data: ndarray, (sample, features)
i: current index
PAi: parent indexes
parameters: lambda_value, the penalty discount of bic
Returns
-------
score: local BIC score
'''
if parameters is None:
lambda_value = 1
else:
lambda_value = parameters['lambda_value']
Data = np.mat(Data)
T = Data.shape[0]
X = Data[:, i]
if len(PAi) != 0:
PA = Data[:, PAi]
D = PA.shape[1]
# derive the parameters by maximum likelihood
H = PA * pdinv(PA.T * PA) * PA.T
E = X - H * X
sigma2 = np.sum(np.power(E, 2)) / T
# BIC
score = T * np.log(sigma2) + lambda_value * D * np.log(T)
else:
sigma2 = np.sum(np.power(X, 2)) / T
# BIC
score = T * np.log(sigma2)
return score
def local_score_BDeu(Data, i, PAi, parameters=None):
'''
Calculate the *negative* local score with BDeu for the discrete case
Parameters
----------
Data: (sample, features)
i: current index
PAi: parent indexes
parameters:
sample_prior: sample prior
structure_prior: structure prior
r_i_map: number of states of the finite random variable X_{i}
Returns
-------
score: local BDeu score
'''
if parameters is None:
sample_prior = 1 # default sample_prior = 1
structure_prior = 1 # default structure_prior = 1
r_i_map = {i: len(np.unique(np.asarray(Data[:, i]))) for i in range(Data.shape[1])}
else:
sample_prior = parameters['sample_prior']
structure_prior = parameters['structure_prior']
r_i_map = parameters['r_i_map']
# calculate q_{i}
q_i = 1
for pa in PAi:
q_i *= r_i_map[pa]
if len(PAi) != 0:
# calculate N_{ij}
names = ['x{}'.format(i) for i in range(Data.shape[1])]
Data_pd = pd.DataFrame(Data, columns=names)
parant_names = ['x{}'.format(i) for i in PAi]
Data_pd_group_Nij = Data_pd.groupby(parant_names)
Nij_map = {key: len(Data_pd_group_Nij.indices.get(key)) for key in Data_pd_group_Nij.indices.keys()}
Nij_map_keys_list = list(Nij_map.keys())
# calculate N_{ijk}
Nijk_map = {ij: Data_pd_group_Nij.get_group(ij).groupby('x{}'.format(i)).apply(len).reset_index() for ij in
Nij_map.keys()}
for v in Nijk_map.values():
v.columns = ['x{}'.format(i), 'times']
else:
# calculate N_{ij}
names = ['x{}'.format(i) for i in range(Data.shape[1])]
Nij_map = {'': len(Data[:, i])}
Nij_map_keys_list = ['']
Data_pd = pd.DataFrame(Data, columns=names)
# calculate N_{ijk}
Nijk_map = {ij: Data_pd.groupby('x{}'.format(i)).apply(len).reset_index() for ij in Nij_map_keys_list}
for v in Nijk_map.values():
v.columns = ['x{}'.format(i), 'times']
BDeu_score = 0
# first term
vm = Data.shape[0] - 1
BDeu_score += len(PAi) * np.log(structure_prior / vm) + (vm - len(PAi)) * np.log(1 - (structure_prior / vm))
# second term
for pa in range(len(Nij_map_keys_list)):
Nij = Nij_map.get(Nij_map_keys_list[pa])
first_term = math.lgamma(sample_prior / q_i) - math.lgamma(Nij + sample_prior / q_i)
second_term = 0
Nijk_list = Nijk_map.get(Nij_map_keys_list[pa])['times'].to_numpy()
for Nijk in Nijk_list:
second_term += math.lgamma(Nijk + sample_prior / (r_i_map[i] * q_i)) - math.lgamma(
sample_prior / (r_i_map[i] * q_i))
BDeu_score += first_term + second_term
return -BDeu_score
def local_score_cv_general(Data, Xi, PAi, parameters):
'''
Calculate the local score
using negative k-fold cross-validated log likelihood as the score
based on a regression model in RKHS
Parameters
----------
Data: (sample, features)
i: current index
PAi: parent indexes
parameters:
kfold: k-fold cross validation
lambda: regularization parameter
Returns
-------
score: local score
'''
Data = np.mat(Data)
PAi = list(PAi)
T = Data.shape[0]
X = Data[:, Xi]
var_lambda = parameters['lambda'] # regularization parameter
k = parameters['kfold'] # k-fold cross validation
n0 = math.floor(T / k)
gamma = 0.01
Thresh = 1E-5
if (len(PAi)):
PA = Data[:, PAi]
# set the kernel for X
GX = np.sum(np.multiply(X, X), axis=1)
Q = np.tile(GX, (1, T))
R = np.tile(GX.T, (T, 1))
dists = Q + R - 2 * X * X.T
dists = dists - np.tril(dists)
dists = np.reshape(dists, (T ** 2, 1))
width = np.sqrt(0.5 * np.median(dists[np.where(dists > 0)], axis=1)[0, 0]) # median value
width = width * 2
theta = 1 / (width ** 2)
Kx, _ = kernel(X, X, (theta, 1)) # Gaussian kernel
H0 = np.mat(np.eye(T)) - np.mat(np.ones((T, T))) / (T) # for centering of the data in feature space
Kx = H0 * Kx * H0 # kernel matrix for X
# eig_Kx, eix = eigdec((Kx + Kx.T)/2, np.min([400, math.floor(T/2)]), evals_only=False) # /2
# IIx = np.where(eig_Kx > np.max(eig_Kx) * Thresh)[0]
# eig_Kx = eig_Kx[IIx]
# eix = eix[:, IIx]
# mx = len(IIx)
# set the kernel for PA
Kpa = np.mat(np.ones((T, T)))
for m in range(PA.shape[1]):
G = np.sum((np.multiply(PA[:, m], PA[:, m])), axis=1)
Q = np.tile(G, (1, T))
R = np.tile(G.T, (T, 1))
dists = Q + R - 2 * PA[:, m] * PA[:, m].T
dists = dists - np.tril(dists)
dists = np.reshape(dists, (T ** 2, 1))
width = np.sqrt(0.5 * np.median(dists[np.where(dists > 0)], axis=1)[0, 0]) # median value
width = width * 2
theta = 1 / (width ** 2)
Kpa = np.multiply(Kpa, kernel(PA[:, m], PA[:, m], (theta, 1))[0])
H0 = np.mat(np.eye(T)) - np.mat(np.ones((T, T))) / (T) # for centering of the data in feature space
Kpa = H0 * Kpa * H0 # kernel matrix for PA
CV = 0
for kk in range(k):
if (kk == 0):
Kx_te = Kx[0:n0, 0:n0]
Kx_tr = Kx[n0: T, n0: T]
Kx_tr_te = Kx[n0: T, 0: n0]
Kpa_te = Kpa[0:n0, 0: n0]
Kpa_tr = Kpa[n0: T, n0: T]
Kpa_tr_te = Kpa[n0: T, 0: n0]
nv = n0 # sample size of validated data
if (kk == k - 1):
Kx_te = Kx[kk * n0:T, kk * n0: T]
Kx_tr = Kx[0:kk * n0, 0: kk * n0]
Kx_tr_te = Kx[0:kk * n0, kk * n0: T]
Kpa_te = Kpa[kk * n0:T, kk * n0: T]
Kpa_tr = Kpa[0: kk * n0, 0: kk * n0]
Kpa_tr_te = Kpa[0:kk * n0, kk * n0: T]
nv = T - kk * n0
if (kk < k - 1 and kk > 0):
Kx_te = Kx[kk * n0: (kk + 1) * n0, kk * n0: (kk + 1) * n0]
Kx_tr = Kx[np.ix_(np.concatenate([np.arange(0, kk * n0), np.arange((kk + 1) * n0, T)]),
np.concatenate([np.arange(0, kk * n0), np.arange((kk + 1) * n0, T)]))]
Kx_tr_te = Kx[np.ix_(np.concatenate([np.arange(0, kk * n0), np.arange((kk + 1) * n0, T)]),
np.arange(kk * n0, (kk + 1) * n0))]
Kpa_te = Kpa[kk * n0: (kk + 1) * n0, kk * n0: (kk + 1) * n0]
Kpa_tr = Kpa[np.ix_(np.concatenate([np.arange(0, kk * n0), np.arange((kk + 1) * n0, T)]),
np.concatenate([np.arange(0, kk * n0), np.arange((kk + 1) * n0, T)]))]
Kpa_tr_te = Kpa[np.ix_(np.concatenate([np.arange(0, kk * n0), np.arange((kk + 1) * n0, T)]),
np.arange(kk * n0, (kk + 1) * n0))]
nv = n0
n1 = T - nv
tmp1 = pdinv(Kpa_tr + n1 * var_lambda * np.mat(np.eye(n1)))
tmp2 = tmp1 * Kx_tr * tmp1
tmp3 = tmp1 * pdinv(np.mat(np.eye(n1)) + n1 * var_lambda ** 2 / gamma * tmp2) * tmp1
A = (Kx_te + Kpa_tr_te.T * tmp2 * Kpa_tr_te - 2 * Kx_tr_te.T * tmp1 * Kpa_tr_te
- n1 * var_lambda ** 2 / gamma * Kx_tr_te.T * tmp3 * Kx_tr_te
- n1 * var_lambda ** 2 / gamma * Kpa_tr_te.T * tmp1 * Kx_tr * tmp3 * Kx_tr * tmp1 * Kpa_tr_te
+ 2 * n1 * var_lambda ** 2 / gamma * Kx_tr_te.T * tmp3 * Kx_tr * tmp1 * Kpa_tr_te) / gamma
B = n1 * var_lambda ** 2 / gamma * tmp2 + np.mat(np.eye(n1))
L = np.linalg.cholesky(B)
C = np.sum(np.log(np.diag(L)))
# CV = CV + (nv*nv*log(2*pi) + nv*C + nv*mx*log(gamma) + trace(A))/2;
CV = CV + (nv * nv * np.log(2 * np.pi) + nv * C + np.trace(A)) / 2
CV = CV / k
else:
# set the kernel for X
GX = np.sum(np.multiply(X, X), axis=1)
Q = np.tile(GX, (1, T))
R = np.tile(GX.T, (T, 1))
dists = Q + R - 2 * X * X.T
dists = dists - np.tril(dists)
dists = np.reshape(dists, (T ** 2, 1))
width = np.sqrt(0.5 * np.median(dists[np.where(dists > 0)], axis=1)[0, 0]) # median value
width = width * 2
theta = 1 / (width ** 2)
Kx, _ = kernel(X, X, (theta, 1)) # Gaussian kernel
H0 = np.mat(np.eye(T)) - np.mat(np.ones((T, T))) / (T) # for centering of the data in feature space
Kx = H0 * Kx * H0 # kernel matrix for X
# eig_Kx, eix = eigdec((Kx + Kx.T) / 2, np.min([400, math.floor(T / 2)]), evals_only=False) # /2
# IIx = np.where(eig_Kx > np.max(eig_Kx) * Thresh)[0]
# mx = len(IIx)
CV = 0
for kk in range(k):
if (kk == 0):
Kx_te = Kx[kk * n0: (kk + 1) * n0, kk * n0: (kk + 1) * n0]
Kx_tr = Kx[(kk + 1) * n0:T, (kk + 1) * n0: T]
Kx_tr_te = Kx[(kk + 1) * n0:T, kk * n0: (kk + 1) * n0]
nv = n0
if (kk == k - 1):
Kx_te = Kx[kk * n0: T, kk * n0: T]
Kx_tr = Kx[0: kk * n0, 0: kk * n0]
Kx_tr_te = Kx[0:kk * n0, kk * n0: T]
nv = T - kk * n0
if (kk < k - 1 and kk > 0):
Kx_te = Kx[kk * n0: (kk + 1) * n0, kk * n0: (kk + 1) * n0]
Kx_tr = Kx[np.ix_(np.concatenate([np.arange(0, kk * n0), np.arange((kk + 1) * n0, T)]),
np.concatenate([np.arange(0, kk * n0), np.arange((kk + 1) * n0, T)]))]
Kx_tr_te = Kx[np.ix_(np.concatenate([np.arange(0, kk * n0), np.arange((kk + 1) * n0, T)]),
np.arange(kk * n0, (kk + 1) * n0))]
nv = n0
n1 = T - nv
A = (Kx_te - 1 / (gamma * n1) * Kx_tr_te.T * pdinv(
np.mat(np.eye(n1)) + 1 / (gamma * n1) * Kx_tr) * Kx_tr_te) / gamma
B = 1 / (gamma * n1) * Kx_tr + np.mat(np.eye(n1))
L = np.linalg.cholesky(B)
C = np.sum(np.log(np.diag(L)))
# CV = CV + (nv*nv*log(2*pi) + nv*C + nv*mx*log(gamma) + trace(A))/2;
CV = CV + (nv * nv * np.log(2 * np.pi) + nv * C + np.trace(A)) / 2
CV = CV / k
score = CV # negative cross-validated likelihood
return score
def local_score_cv_multi(Data, Xi, PAi, parameters):
'''
Calculate the local score
using negative k-fold cross-validated log likelihood as the score
based on a regression model in RKHS
for variables with multi-variate dimensions
Parameters
----------
Data: (sample, features)
i: current index
PAi: parent indexes
parameters:
kfold: k-fold cross validation
lambda: regularization parameter
dlabel: for variables with multi-dimensions,
indicate which dimensions belong to the i-th variable.
Returns
-------
score: local score
'''
T = Data.shape[0]
X = Data[:, parameters['dlabel'][Xi]]
var_lambda = parameters['lambda'] # regularization parameter
k = parameters['kfold'] # k-fold cross validation
n0 = math.floor(T / k)
gamma = 0.01
Thresh = 1E-5
if (len(PAi)):
# set the kernel for X
GX = np.sum(np.multiply(X, X), axis=1)
Q = np.tile(GX, (1, T))
R = np.tile(GX.T, (T, 1))
dists = Q + R - 2 * X * X.T
dists = dists - np.tril(dists)
dists = np.reshape(dists, (T ** 2, 1))
width = np.sqrt(0.5 * np.median(dists[np.where(dists > 0)], axis=1)[0, 0]) # median value
width = width * 3 ###
theta = 1 / (width ** 2 * X.shape[1]) #
Kx, _ = kernel(X, X, (theta, 1)) # Gaussian kernel
H0 = np.mat(np.eye(T)) - np.mat(np.ones((T, T))) / (T) # for centering of the data in feature space
Kx = H0 * Kx * H0 # kernel matrix for X
# set the kernel for PA
Kpa = np.mat(np.ones((T, T)))
for m in range(len(PAi)):
PA = Data[:, parameters['dlabel'][PAi[m]]]
G = np.sum((np.multiply(PA, PA)), axis=1)
Q = np.tile(G, (1, T))
R = np.tile(G.T, (T, 1))
dists = Q + R - 2 * PA * PA.T
dists = dists - np.tril(dists)
dists = np.reshape(dists, (T ** 2, 1))
width = np.sqrt(0.5 * np.median(dists[np.where(dists > 0)], axis=1)[0, 0]) # median value
width = width * 3 ###
theta = 1 / (width ** 2 * PA.shape[1])
Kpa = np.multiply(Kpa, kernel(PA, PA, (theta, 1))[0])
H0 = np.mat(np.eye(T)) - np.mat(np.ones((T, T))) / (T) # for centering of the data in feature space
Kpa = H0 * Kpa * H0 # kernel matrix for PA
CV = 0
for kk in range(k):
if (kk == 0):
Kx_te = Kx[0:n0, 0:n0]
Kx_tr = Kx[n0: T, n0: T]
Kx_tr_te = Kx[n0: T, 0: n0]
Kpa_te = Kpa[0:n0, 0: n0]
Kpa_tr = Kpa[n0: T, n0: T]
Kpa_tr_te = Kpa[n0: T, 0: n0]
nv = n0 # sample size of validated data
if (kk == k - 1):
Kx_te = Kx[kk * n0:T, kk * n0: T]
Kx_tr = Kx[0:kk * n0, 0: kk * n0]
Kx_tr_te = Kx[0:kk * n0, kk * n0: T]
Kpa_te = Kpa[kk * n0:T, kk * n0: T]
Kpa_tr = Kpa[0: kk * n0, 0: kk * n0]
Kpa_tr_te = Kpa[0:kk * n0, kk * n0: T]
nv = T - kk * n0
if (kk < k - 1 and kk > 0):
Kx_te = Kx[kk * n0: (kk + 1) * n0, kk * n0: (kk + 1) * n0]
Kx_tr = Kx[np.ix_(np.concatenate([np.arange(0, kk * n0), np.arange((kk + 1) * n0, T)]),
np.concatenate([np.arange(0, kk * n0), np.arange((kk + 1) * n0, T)]))]
Kx_tr_te = Kx[np.ix_(np.concatenate([np.arange(0, kk * n0), np.arange((kk + 1) * n0, T)]),
np.arange(kk * n0, (kk + 1) * n0))]
Kpa_te = Kpa[kk * n0: (kk + 1) * n0, kk * n0: (kk + 1) * n0]
Kpa_tr = Kpa[np.ix_(np.concatenate([np.arange(0, kk * n0), np.arange((kk + 1) * n0, T)]),
np.concatenate([np.arange(0, kk * n0), np.arange((kk + 1) * n0, T)]))]
Kpa_tr_te = Kpa[np.ix_(np.concatenate([np.arange(0, kk * n0), np.arange((kk + 1) * n0, T)]),
np.arange(kk * n0, (kk + 1) * n0))]
nv = n0
n1 = T - nv
tmp1 = pdinv(Kpa_tr + n1 * var_lambda * np.mat(np.eye(n1)))
tmp2 = tmp1 * Kx_tr * tmp1
tmp3 = tmp1 * pdinv(np.mat(np.eye(n1)) + n1 * var_lambda ** 2 / gamma * tmp2) * tmp1
A = (Kx_te + Kpa_tr_te.T * tmp2 * Kpa_tr_te - 2 * Kx_tr_te.T * tmp1 * Kpa_tr_te
- n1 * var_lambda ** 2 / gamma * Kx_tr_te.T * tmp3 * Kx_tr_te
- n1 * var_lambda ** 2 / gamma * Kpa_tr_te.T * tmp1 * Kx_tr * tmp3 * Kx_tr * tmp1 * Kpa_tr_te
+ 2 * n1 * var_lambda ** 2 / gamma * Kx_tr_te.T * tmp3 * Kx_tr * tmp1 * Kpa_tr_te) / gamma
B = n1 * var_lambda ** 2 / gamma * tmp2 + np.mat(np.eye(n1))
L = np.linalg.cholesky(B)
C = np.sum(np.log(np.diag(L)))
# CV = CV + (nv*nv*log(2*pi) + nv*C + nv*mx*log(gamma) + trace(A))/2;
CV = CV + (nv * nv * np.log(2 * np.pi) + nv * C + np.trace(A)) / 2
CV = CV / k
else:
# set the kernel for X
GX = np.sum(np.multiply(X, X), axis=1)
Q = np.tile(GX, (1, T))
R = np.tile(GX.T, (T, 1))
dists = Q + R - 2 * X * X.T
dists = dists - np.tril(dists)
dists = np.reshape(dists, (T ** 2, 1))
width = np.sqrt(0.5 * np.median(dists[np.where(dists > 0)], axis=1)[0, 0]) # median value
width = width * 3 ###
theta = 1 / (width ** 2 * X.shape[1]) #
Kx, _ = kernel(X, X, (theta, 1)) # Gaussian kernel
H0 = np.mat(np.eye(T)) - np.mat(np.ones((T, T))) / (T) # for centering of the data in feature space
Kx = H0 * Kx * H0 # kernel matrix for X
CV = 0
for kk in range(k):
if (kk == 0):
Kx_te = Kx[kk * n0: (kk + 1) * n0, kk * n0: (kk + 1) * n0]
Kx_tr = Kx[(kk + 1) * n0:T, (kk + 1) * n0: T]
Kx_tr_te = Kx[(kk + 1) * n0:T, kk * n0: (kk + 1) * n0]
nv = n0
if (kk == k - 1):
Kx_te = Kx[kk * n0: T, kk * n0: T]
Kx_tr = Kx[0: kk * n0, 0: kk * n0]
Kx_tr_te = Kx[0:kk * n0, kk * n0: T]
nv = T - kk * n0
if (kk < k - 1 and kk > 0):
Kx_te = Kx[kk * n0: (kk + 1) * n0, kk * n0: (kk + 1) * n0]
Kx_tr = Kx[np.ix_(np.concatenate([np.arange(0, kk * n0), np.arange((kk + 1) * n0, T)]),
np.concatenate([np.arange(0, kk * n0), np.arange((kk + 1) * n0, T)]))]
Kx_tr_te = Kx[np.ix_(np.concatenate([np.arange(0, kk * n0), np.arange((kk + 1) * n0, T)]),
np.arange(kk * n0, (kk + 1) * n0))]
nv = n0
n1 = T - nv
A = (Kx_te - 1 / (gamma * n1) * Kx_tr_te.T * pdinv(
np.mat(np.eye(n1)) + 1 / (gamma * n1) * Kx_tr) * Kx_tr_te) / gamma
B = 1 / (gamma * n1) * Kx_tr + np.mat(np.eye(n1))
L = np.linalg.cholesky(B)
C = np.sum(np.log(np.diag(L)))
# CV = CV + (nv*nv*log(2*pi) + nv*C + nv*mx*log(gamma) + trace(A))/2;
CV = CV + (nv * nv * np.log(2 * np.pi) + nv * C + np.trace(A)) / 2
CV = CV / k
score = CV # negative cross-validated likelihood
return score
def local_score_marginal_general(Data, Xi, PAi, parameters):
'''
Calculate the local score by negative marginal likelihood
based on a regression model in RKHS
Parameters
----------
Data: (sample, features)
i: current index
PAi: parent indexes
parameters: None
Returns
-------
score: local score
'''
T = Data.shape[0]
X = Data[:, Xi]
dX = X.shape[1]
# set the kernel for X
GX = np.sum(np.multiply(X, X), axis=1)
Q = np.tile(GX, (1, T))
R = np.tile(GX.T, (T, 1))
dists = Q + R - 2 * X * X.T
dists = dists - np.tril(dists)
dists = np.reshape(dists, (T ** 2, 1))
width = np.sqrt(0.5 * np.median(dists[np.where(dists > 0)], axis=1)[0, 0])
width = width * 2.5 # kernel width
theta = 1 / (width ** 2)
H = np.mat(np.eye(T)) - np.mat(np.ones((T, T))) / T
Kx, _ = kernel(X, X, (theta, 1))
Kx = H * Kx * H
Thresh = 1E-5
eig_Kx, eix = eigdec((Kx + Kx.T) / 2, np.min([400, math.floor(T / 4)]), evals_only=False) # /2
IIx = np.where(eig_Kx > np.max(eig_Kx) * Thresh)[0]
eig_Kx = eig_Kx[IIx]
eix = eix[:, IIx]
if (len(PAi)):
PA = Data[:, PAi]
widthPA = np.mat(np.empty((PA.shape[1], 1)))
# set the kernel for PA
for m in range(PA.shape[1]):
G = np.sum((np.multiply(PA[:, m], PA[:, m])), axis=1)
Q = np.tile(G, (1, T))
R = np.tile(G.T, (T, 1))
dists = Q + R - 2 * PA[:, m] * PA[:, m].T
dists = dists - np.tril(dists)
dists = np.reshape(dists, (T ** 2, 1))
widthPA[m] = np.sqrt(0.5 * np.median(dists[np.where(dists > 0)], axis=1)[0, 0])
widthPA = widthPA * 2.5 # kernel width
covfunc = np.asarray(['covSum', ['covSEard', 'covNoise']])
logtheta0 = np.vstack([np.log(widthPA), 0, np.log(np.sqrt(0.1))])
logtheta, fvals, iter = minimize(logtheta0, 'gpr_multi_new', -300, covfunc, PA,
2 * np.sqrt(T) * eix * np.diag(np.sqrt(eig_Kx)) / np.sqrt(eig_Kx[0]))
nlml, dnlml = gpr_multi_new(logtheta, covfunc, PA,
2 * np.sqrt(T) * eix * np.diag(np.sqrt(eig_Kx)) / np.sqrt(eig_Kx[0]),
nargout=2)
else:
covfunc = np.asarray(['covSum', ['covSEard', 'covNoise']])
PA = np.mat(np.zeros((T, 1)))
logtheta0 = np.mat([100, 0, np.log(np.sqrt(0.1))]).T
logtheta, fvals, iter = minimize(logtheta0, 'gpr_multi_new', -300, covfunc, PA,
2 * np.sqrt(T) * eix * np.diag(np.sqrt(eig_Kx)) / np.sqrt(eig_Kx[0]))
nlml, dnlml = gpr_multi_new(logtheta, covfunc, PA,
2 * np.sqrt(T) * eix * np.diag(np.sqrt(eig_Kx)) / np.sqrt(eig_Kx[0]),
nargout=2)
score = nlml # negative log-likelihood
return score
def local_score_marginal_multi(Data, Xi, PAi, parameters):
'''
Calculate the local score by negative marginal likelihood
based on a regression model in RKHS
for variables with multi-variate dimensions
Parameters
----------
Data: (sample, features)
i: current index
PAi: parent indexes
parameters:
dlabel: for variables with multi-dimensions,
indicate which dimensions belong to the i-th variable.
Returns
-------
score: local score
'''
T = Data.shape[0]
X = Data[:, parameters['dlabel'][Xi]]
dX = X.shape[1]
# set the kernel for X
GX = np.sum(np.multiply(X, X), axis=1)
Q = np.tile(GX, (1, T))
R = np.tile(GX.T, (T, 1))
dists = Q + R - 2 * X * X.T
dists = dists - np.tril(dists)
dists = np.reshape(dists, (T ** 2, 1))
widthX = np.sqrt(0.5 * np.median(dists[np.where(dists > 0)], axis=1)[0, 0])
widthX = widthX * 2.5 # kernel width
theta = 1 / (widthX ** 2)
H = np.mat(np.eye(T)) - np.mat(np.ones((T, T))) / T
Kx, _ = kernel(X, X, (theta, 1))
Kx = H * Kx * H
Thresh = 1E-5
eig_Kx, eix = eigdec((Kx + Kx.T) / 2, np.min([400, math.floor(T / 4)]), evals_only=False) # /2
IIx = np.where(eig_Kx > np.max(eig_Kx) * Thresh)[0]
eig_Kx = eig_Kx[IIx]
eix = eix[:, IIx]
if (len(PAi)):
widthPA_all = np.mat(np.empty((1, 0)))
# set the kernel for PA
PA_all = np.mat(np.empty((Data.shape[0], 0)))
for m in range(len(PAi)):
PA = Data[:, parameters['dlabel'][PAi[m]]]
PA_all = np.hstack([PA_all, PA])
G = np.sum((np.multiply(PA, PA)), axis=1)
Q = np.tile(G, (1, T))
R = np.tile(G.T, (T, 1))
dists = Q + R - 2 * PA * PA.T
dists = dists - np.tril(dists)
dists = np.reshape(dists, (T ** 2, 1))
widthPA = np.sqrt(0.5 * np.median(dists[np.where(dists > 0)], axis=1)[0, 0])
widthPA_all = np.hstack(
[widthPA_all, widthPA * np.mat(np.ones((1, np.size(parameters['dlabel'][PAi[m]]))))])
widthPA_all = widthPA_all * 2.5 # kernel width
covfunc = np.asarray(['covSum', ['covSEard', 'covNoise']])
logtheta0 = np.vstack([np.log(widthPA_all.T), 0, np.log(np.sqrt(0.1))])
logtheta, fvals, iter = minimize(logtheta0, 'gpr_multi_new', -300, covfunc, PA_all,
2 * np.sqrt(T) * eix * np.diag(np.sqrt(eig_Kx)) / np.sqrt(eig_Kx[0]))
nlml, dnlml = gpr_multi_new(logtheta, covfunc, PA_all,
2 * np.sqrt(T) * eix * np.diag(np.sqrt(eig_Kx)) / np.sqrt(eig_Kx[0]),
nargout=2)
else:
covfunc = np.asarray(['covSum', ['covSEard', 'covNoise']])
PA = np.mat(np.zeros((T, 1)))
logtheta0 = np.mat([100, 0, np.log(np.sqrt(0.1))]).T
logtheta, fvals, iter = minimize(logtheta0, 'gpr_multi_new', -300, covfunc, PA,
2 * np.sqrt(T) * eix * np.diag(np.sqrt(eig_Kx)) / np.sqrt(eig_Kx[0]))
nlml, dnlml = gpr_multi_new(logtheta, covfunc, PA,
2 * np.sqrt(T) * eix * np.diag(np.sqrt(eig_Kx)) / np.sqrt(eig_Kx[0]),
nargout=2)
score = nlml # negative log-likelihood
return score
| 0 | 0 | 0 |
2c9f5d52f1b4861421709e07ade22be53a597029 | 4,882 | py | Python | src/data_generation/data_recorder.py | StanfordASL/NASA_ULI_Xplane_Simulator | 051fa492b650545442c790cf04a8cdb19632bd02 | [
"Apache-2.0"
] | 4 | 2021-06-28T19:13:58.000Z | 2021-12-10T03:10:38.000Z | src/data_generation/data_recorder.py | StanfordASL/NASA_ULI_Xplane_Simulator | 051fa492b650545442c790cf04a8cdb19632bd02 | [
"Apache-2.0"
] | null | null | null | src/data_generation/data_recorder.py | StanfordASL/NASA_ULI_Xplane_Simulator | 051fa492b650545442c790cf04a8cdb19632bd02 | [
"Apache-2.0"
] | 1 | 2021-05-25T20:52:11.000Z | 2021-05-25T20:52:11.000Z | import sys
import os
# make sure this is a system variable in your bashrc
NASA_ULI_ROOT_DIR = os.environ['NASA_ULI_ROOT_DIR']
XPC3_DIR = NASA_ULI_ROOT_DIR + '/src/'
sys.path.append(XPC3_DIR)
import numpy as np
import xpc3
import xpc3_helper
import time
import os
import mss
import cv2
import settings
screenShot = mss.mss()
def record(client, outDir, startPerc = 1.5, endPerc = 10, freq = 10, numEpisodes = 1):
""" Record data from training episodes in XPlane into a CSV and PNG files
Args:
client: XPlane Client
outDir: output directory for csv file and png images
-----------------
startPerc: percentage down runway to start collecting data
endPerc: percentage down runway to finish collecting data
(NOTE: this must be less than endPerc for the sinusoidal trajectory)
freq: frequency to save data
(NOTE: this is approximate due to computational overhead)
numEpisodes: number of episodes to record for
"""
# Make data folder if it doesn't exist
if not os.path.exists(outDir):
os.makedirs(outDir)
# Initialize the CSV file
csvFile = outDir + 'labels.csv'
with open(csvFile, 'w') as fd:
fd.write('image_filename,absolute_time_GMT_seconds,relative_time_seconds,distance_to_centerline_meters,')
fd.write('distance_to_centerline_NORMALIZED,downtrack_position_meters,downtrack_position_NORMALIZED,')
fd.write('heading_error_degrees,heading_error_NORMALIZED,period_of_day,cloud_type\n')
for i in range(numEpisodes):
first = True
percDownRunway = xpc3_helper.getPercDownRunway(client)
currStep = 0
while percDownRunway > endPerc:
time.sleep(0.5)
percDownRunway = xpc3_helper.getPercDownRunway(client)
while percDownRunway < endPerc:
if ((percDownRunway > startPerc) and (percDownRunway < 45.0)) or ((percDownRunway > 51.5) and (percDownRunway < endPerc)):
if first:
startTime = client.getDREF("sim/time/zulu_time_sec")[0]
addCurrData(client, outDir, csvFile, startTime, currStep, i + 1)
first = False
time.sleep(1 / freq)
else:
addCurrData(client, outDir, csvFile,
startTime, currStep, i + 1)
time.sleep(1 / freq)
currStep += 1
else:
time.sleep(1)
percDownRunway = xpc3_helper.getPercDownRunway(client)
def addCurrData(client, outDir, csvFile, startTime, currStep, episodeNum):
""" Add current data to csv file and save off a screenshot
Args:
client: XPlane Client
outDir: output directory for csv file and png images
csvFile: name of the CSV file for non-image data (should be initialized already)
startTime: absolute time that the episode started
currStep: current step of saving data (for image labeling)
episodeNum: number of the current episode (for image labeling)
"""
# Time step information
absolute_time = client.getDREF("sim/time/zulu_time_sec")[0]
time_step = absolute_time - startTime
# Plane state information
cte, dtp, he = xpc3_helper.getHomeState(client)
# Environmental conditions
local_time = client.getDREF("sim/time/local_time_sec")[0]
if local_time < 5 * 3600 or local_time > 17 * 3600:
period_of_day = 2
time_period = 'night'
elif local_time > 12 * 3600 and local_time < 17 * 3600:
period_of_day = 1
time_period = 'afternoon'
else:
period_of_day = 0
time_period = 'morning'
cloud_cover = client.getDREF("sim/weather/cloud_type[0]")[0]
weather = settings.WEATHER_TYPES[cloud_cover]
# Image information
img = cv2.cvtColor(np.array(screenShot.grab(settings.MONITOR)),
cv2.COLOR_BGRA2BGR)[230:, :, :]
img = cv2.resize(img, (settings.WIDTH, settings.HEIGHT))
img_name = 'MWH_Runway04_' + time_period + '_' + weather + '_' + str(episodeNum) + '_' + str(currStep) + '.png'
# For now, just save the image to an output directory
cv2.imwrite('%s%s' % (outDir, img_name), img)
# Append everything to the csv file
with open(csvFile, 'a') as fd:
fd.write("%s,%f,%f,%f,%f,%f,%f,%f,%f,%d,%d\n" % (img_name, absolute_time, time_step,
cte, cte / 10.0, dtp, dtp / 2982.0, he, he / 30.0, period_of_day, cloud_cover))
if __name__ == "__main__":
main()
| 38.440945 | 134 | 0.630274 | import sys
import os
# make sure this is a system variable in your bashrc
NASA_ULI_ROOT_DIR = os.environ['NASA_ULI_ROOT_DIR']
XPC3_DIR = NASA_ULI_ROOT_DIR + '/src/'
sys.path.append(XPC3_DIR)
import numpy as np
import xpc3
import xpc3_helper
import time
import os
import mss
import cv2
import settings
screenShot = mss.mss()
def main():
with xpc3.XPlaneConnect() as client:
record(client, settings.OUT_DIR, endPerc = settings.END_PERC - 1.0,
freq=settings.FREQUENCY, numEpisodes=len(settings.CASE_INDS))
def record(client, outDir, startPerc = 1.5, endPerc = 10, freq = 10, numEpisodes = 1):
""" Record data from training episodes in XPlane into a CSV and PNG files
Args:
client: XPlane Client
outDir: output directory for csv file and png images
-----------------
startPerc: percentage down runway to start collecting data
endPerc: percentage down runway to finish collecting data
(NOTE: this must be less than endPerc for the sinusoidal trajectory)
freq: frequency to save data
(NOTE: this is approximate due to computational overhead)
numEpisodes: number of episodes to record for
"""
# Make data folder if it doesn't exist
if not os.path.exists(outDir):
os.makedirs(outDir)
# Initialize the CSV file
csvFile = outDir + 'labels.csv'
with open(csvFile, 'w') as fd:
fd.write('image_filename,absolute_time_GMT_seconds,relative_time_seconds,distance_to_centerline_meters,')
fd.write('distance_to_centerline_NORMALIZED,downtrack_position_meters,downtrack_position_NORMALIZED,')
fd.write('heading_error_degrees,heading_error_NORMALIZED,period_of_day,cloud_type\n')
for i in range(numEpisodes):
first = True
percDownRunway = xpc3_helper.getPercDownRunway(client)
currStep = 0
while percDownRunway > endPerc:
time.sleep(0.5)
percDownRunway = xpc3_helper.getPercDownRunway(client)
while percDownRunway < endPerc:
if ((percDownRunway > startPerc) and (percDownRunway < 45.0)) or ((percDownRunway > 51.5) and (percDownRunway < endPerc)):
if first:
startTime = client.getDREF("sim/time/zulu_time_sec")[0]
addCurrData(client, outDir, csvFile, startTime, currStep, i + 1)
first = False
time.sleep(1 / freq)
else:
addCurrData(client, outDir, csvFile,
startTime, currStep, i + 1)
time.sleep(1 / freq)
currStep += 1
else:
time.sleep(1)
percDownRunway = xpc3_helper.getPercDownRunway(client)
def addCurrData(client, outDir, csvFile, startTime, currStep, episodeNum):
""" Add current data to csv file and save off a screenshot
Args:
client: XPlane Client
outDir: output directory for csv file and png images
csvFile: name of the CSV file for non-image data (should be initialized already)
startTime: absolute time that the episode started
currStep: current step of saving data (for image labeling)
episodeNum: number of the current episode (for image labeling)
"""
# Time step information
absolute_time = client.getDREF("sim/time/zulu_time_sec")[0]
time_step = absolute_time - startTime
# Plane state information
cte, dtp, he = xpc3_helper.getHomeState(client)
# Environmental conditions
local_time = client.getDREF("sim/time/local_time_sec")[0]
if local_time < 5 * 3600 or local_time > 17 * 3600:
period_of_day = 2
time_period = 'night'
elif local_time > 12 * 3600 and local_time < 17 * 3600:
period_of_day = 1
time_period = 'afternoon'
else:
period_of_day = 0
time_period = 'morning'
cloud_cover = client.getDREF("sim/weather/cloud_type[0]")[0]
weather = settings.WEATHER_TYPES[cloud_cover]
# Image information
img = cv2.cvtColor(np.array(screenShot.grab(settings.MONITOR)),
cv2.COLOR_BGRA2BGR)[230:, :, :]
img = cv2.resize(img, (settings.WIDTH, settings.HEIGHT))
img_name = 'MWH_Runway04_' + time_period + '_' + weather + '_' + str(episodeNum) + '_' + str(currStep) + '.png'
# For now, just save the image to an output directory
cv2.imwrite('%s%s' % (outDir, img_name), img)
# Append everything to the csv file
with open(csvFile, 'a') as fd:
fd.write("%s,%f,%f,%f,%f,%f,%f,%f,%f,%d,%d\n" % (img_name, absolute_time, time_step,
cte, cte / 10.0, dtp, dtp / 2982.0, he, he / 30.0, period_of_day, cloud_cover))
if __name__ == "__main__":
main()
| 190 | 0 | 23 |
e9e8e3d3249ddc6a3c68b462aadd0cf70fd33c59 | 581 | py | Python | rsatest.py | PotatoDIY/rsamsg | 2e324b9fc37a6bfcd9edf06214def00818e44380 | [
"MIT"
] | null | null | null | rsatest.py | PotatoDIY/rsamsg | 2e324b9fc37a6bfcd9edf06214def00818e44380 | [
"MIT"
] | 1 | 2021-04-30T21:03:26.000Z | 2021-04-30T21:03:26.000Z | rsatest.py | PotatoDIY/rsamsg | 2e324b9fc37a6bfcd9edf06214def00818e44380 | [
"MIT"
] | null | null | null | #!/usr/bin/python3
# -*- coding: utf-8 -*-
from rsamsg import *
#alice and bob
#alice_pubkey, alice_privkey=gen()
#bob_pubkey, bob_privkey=gen()
alice_pubkey = loadfile("keys/alice_pubkey")
alice_prikey = loadfile("keys/alice_prikey")
bob_pubkey = loadfile("keys/bob_pubkey")
bob_prikey = loadfile("keys/bob_prikey")
#from alice
message = "你好"
#to bob
crypto,signature=alice_to_bob(message,bob_pubkey,alice_prikey)
# bob verify
verifyed,msg=bob_get_msg_from_alice(crypto,signature,bob_prikey,alice_pubkey)
if(verifyed):
print(msg)
else:
print("err:{}".format(msg)) | 20.034483 | 77 | 0.748709 | #!/usr/bin/python3
# -*- coding: utf-8 -*-
from rsamsg import *
#alice and bob
#alice_pubkey, alice_privkey=gen()
#bob_pubkey, bob_privkey=gen()
alice_pubkey = loadfile("keys/alice_pubkey")
alice_prikey = loadfile("keys/alice_prikey")
bob_pubkey = loadfile("keys/bob_pubkey")
bob_prikey = loadfile("keys/bob_prikey")
#from alice
message = "你好"
#to bob
crypto,signature=alice_to_bob(message,bob_pubkey,alice_prikey)
# bob verify
verifyed,msg=bob_get_msg_from_alice(crypto,signature,bob_prikey,alice_pubkey)
if(verifyed):
print(msg)
else:
print("err:{}".format(msg)) | 0 | 0 | 0 |
ab19dd13a41e5770195c38a79129d61c58d261cd | 3,338 | py | Python | scale/job/tasks/health_task.py | kaydoh/scale | 1b6a3b879ffe83e10d3b9d9074835a4c3bf476ee | [
"Apache-2.0"
] | 121 | 2015-11-18T18:15:33.000Z | 2022-03-10T01:55:00.000Z | scale/job/tasks/health_task.py | kaydoh/scale | 1b6a3b879ffe83e10d3b9d9074835a4c3bf476ee | [
"Apache-2.0"
] | 1,415 | 2015-12-23T23:36:04.000Z | 2022-01-07T14:10:09.000Z | scale/job/tasks/health_task.py | kaydoh/scale | 1b6a3b879ffe83e10d3b9d9074835a4c3bf476ee | [
"Apache-2.0"
] | 66 | 2015-12-03T20:38:56.000Z | 2020-07-27T15:28:11.000Z | """Defines the class for a node health check task"""
from __future__ import unicode_literals
import datetime
from django.conf import settings
from job.tasks.base_task import AtomicCounter
from job.tasks.node_task import NodeTask
from node.resources.node_resources import NodeResources
from node.resources.resource import Cpus, Mem
HEALTH_TASK_ID_PREFIX = 'scale_health'
COUNTER = AtomicCounter()
class HealthTask(NodeTask):
"""Represents a task that performs a health check on a node. This class is thread-safe.
"""
BAD_DAEMON_CODE = 2
LOW_DOCKER_SPACE_CODE = 3
BAD_LOGSTASH_CODE = 4
def __init__(self, framework_id, agent_id):
"""Constructor
:param framework_id: The framework ID
:type framework_id: string
:param agent_id: The agent ID
:type agent_id: string
"""
task_id = '%s_%s_%d' % (HEALTH_TASK_ID_PREFIX, framework_id, COUNTER.get_next())
super(HealthTask, self).__init__(task_id, 'Scale Health Check', agent_id)
self._uses_docker = False
self._docker_image = None
self._docker_params = []
self._is_docker_privileged = False
self._running_timeout_threshold = datetime.timedelta(minutes=15)
health_check_commands = []
# Check if docker version works (indicates if daemon is working)
bad_daemon_check = 'docker version'
bad_daemon_check = 'timeout -s SIGKILL 10s %s' % bad_daemon_check # docker version has 10 seconds to succeed
bad_daemon_check = '%s; if [[ $? != 0 ]]; then exit %d; fi' % (bad_daemon_check, HealthTask.BAD_DAEMON_CODE)
health_check_commands.append(bad_daemon_check)
# Check if docker ps works (also indicates if daemon is working)
docker_ps_check = 'docker ps'
docker_ps_check = 'timeout -s SIGKILL 10s %s' % docker_ps_check # docker ps has 10 seconds to succeed
docker_ps_check = '%s; if [[ $? != 0 ]]; then exit %d; fi' % (docker_ps_check, HealthTask.BAD_DAEMON_CODE)
health_check_commands.append(docker_ps_check)
# Check if Docker disk space is below 1 GiB (assumes /var/lib/docker, ignores check otherwise)
get_disk_space = 'df --output=avail /var/lib/docker | tail -1'
test_disk_space = 'test `%s` -lt 1048576; if [[ $? == 0 ]]; then exit %d; fi'
test_disk_space = test_disk_space % (get_disk_space, HealthTask.LOW_DOCKER_SPACE_CODE)
low_docker_space_check = 'if [[ -d /var/lib/docker ]]; then %s; fi' % test_disk_space
health_check_commands.append(low_docker_space_check)
# Check to ensure that fluentd is reachable
if settings.LOGGING_HEALTH_ADDRESS:
logging_check = 'timeout -s SIGKILL 5s curl %s; if [[ $? != 0 ]]; then exit %d; fi'
logging_check = logging_check % (settings.LOGGING_HEALTH_ADDRESS, HealthTask.BAD_LOGSTASH_CODE)
health_check_commands.append(logging_check)
self._command = ' && '.join(health_check_commands)
# Node task properties
self.task_type = 'health-check'
self.title = 'Node Health Check'
self.description = 'Checks the health status of the node'
def get_resources(self):
"""See :meth:`job.tasks.base_task.Task.get_resources`
"""
return NodeResources([Cpus(0.1), Mem(32.0)])
| 40.216867 | 117 | 0.679449 | """Defines the class for a node health check task"""
from __future__ import unicode_literals
import datetime
from django.conf import settings
from job.tasks.base_task import AtomicCounter
from job.tasks.node_task import NodeTask
from node.resources.node_resources import NodeResources
from node.resources.resource import Cpus, Mem
HEALTH_TASK_ID_PREFIX = 'scale_health'
COUNTER = AtomicCounter()
class HealthTask(NodeTask):
"""Represents a task that performs a health check on a node. This class is thread-safe.
"""
BAD_DAEMON_CODE = 2
LOW_DOCKER_SPACE_CODE = 3
BAD_LOGSTASH_CODE = 4
def __init__(self, framework_id, agent_id):
"""Constructor
:param framework_id: The framework ID
:type framework_id: string
:param agent_id: The agent ID
:type agent_id: string
"""
task_id = '%s_%s_%d' % (HEALTH_TASK_ID_PREFIX, framework_id, COUNTER.get_next())
super(HealthTask, self).__init__(task_id, 'Scale Health Check', agent_id)
self._uses_docker = False
self._docker_image = None
self._docker_params = []
self._is_docker_privileged = False
self._running_timeout_threshold = datetime.timedelta(minutes=15)
health_check_commands = []
# Check if docker version works (indicates if daemon is working)
bad_daemon_check = 'docker version'
bad_daemon_check = 'timeout -s SIGKILL 10s %s' % bad_daemon_check # docker version has 10 seconds to succeed
bad_daemon_check = '%s; if [[ $? != 0 ]]; then exit %d; fi' % (bad_daemon_check, HealthTask.BAD_DAEMON_CODE)
health_check_commands.append(bad_daemon_check)
# Check if docker ps works (also indicates if daemon is working)
docker_ps_check = 'docker ps'
docker_ps_check = 'timeout -s SIGKILL 10s %s' % docker_ps_check # docker ps has 10 seconds to succeed
docker_ps_check = '%s; if [[ $? != 0 ]]; then exit %d; fi' % (docker_ps_check, HealthTask.BAD_DAEMON_CODE)
health_check_commands.append(docker_ps_check)
# Check if Docker disk space is below 1 GiB (assumes /var/lib/docker, ignores check otherwise)
get_disk_space = 'df --output=avail /var/lib/docker | tail -1'
test_disk_space = 'test `%s` -lt 1048576; if [[ $? == 0 ]]; then exit %d; fi'
test_disk_space = test_disk_space % (get_disk_space, HealthTask.LOW_DOCKER_SPACE_CODE)
low_docker_space_check = 'if [[ -d /var/lib/docker ]]; then %s; fi' % test_disk_space
health_check_commands.append(low_docker_space_check)
# Check to ensure that fluentd is reachable
if settings.LOGGING_HEALTH_ADDRESS:
logging_check = 'timeout -s SIGKILL 5s curl %s; if [[ $? != 0 ]]; then exit %d; fi'
logging_check = logging_check % (settings.LOGGING_HEALTH_ADDRESS, HealthTask.BAD_LOGSTASH_CODE)
health_check_commands.append(logging_check)
self._command = ' && '.join(health_check_commands)
# Node task properties
self.task_type = 'health-check'
self.title = 'Node Health Check'
self.description = 'Checks the health status of the node'
def get_resources(self):
"""See :meth:`job.tasks.base_task.Task.get_resources`
"""
return NodeResources([Cpus(0.1), Mem(32.0)])
| 0 | 0 | 0 |
d997ece80f2c50ef76115ac62ab2023286b010bd | 11,486 | py | Python | bommerge/bommerge.py | sakoPO/bommerge | 88a3697ce03908053c9d7c52fd3f78db0ce52371 | [
"BSD-3-Clause"
] | null | null | null | bommerge/bommerge.py | sakoPO/bommerge | 88a3697ce03908053c9d7c52fd3f78db0ce52371 | [
"BSD-3-Clause"
] | null | null | null | bommerge/bommerge.py | sakoPO/bommerge | 88a3697ce03908053c9d7c52fd3f78db0ce52371 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
try:
from components import resistor
from components import capacitor
from components import voltage
from components import tolerance
except:
from bommerge.components import resistor
from bommerge.components import capacitor
from bommerge.components import voltage
from bommerge.components import tolerance
from gui.projectConfigurationWidget import ProjectConfigurationWidget
from parsers import csvToJson
from utils import files
import os
from decimal import *
try:
import Tkinter as tk
import ttk
except ImportError:
import tkinter as tk
from tkinter import ttk
if __name__ == "__main__":
main()
| 41.168459 | 133 | 0.610134 | #!/usr/bin/env python
try:
from components import resistor
from components import capacitor
from components import voltage
from components import tolerance
except:
from bommerge.components import resistor
from bommerge.components import capacitor
from bommerge.components import voltage
from bommerge.components import tolerance
from gui.projectConfigurationWidget import ProjectConfigurationWidget
from parsers import csvToJson
from utils import files
import os
from decimal import *
try:
import Tkinter as tk
import ttk
except ImportError:
import tkinter as tk
from tkinter import ttk
def loadProject(filename):
project = files.load_json_file(filename)
project_directory = files.get_directory_from_path(filename)
print("Loading project: " + project_directory)
for file in project:
if not os.path.isabs(file['filename']):
file['filename'] = os.path.normpath(os.path.join(project_directory, file['filename']))
print(project)
return project
def saveProject(filename, project):
project_directory = files.get_directory_from_path(filename)
for file in project:
normalized_path = os.path.normpath(file['filename'])
print(normalized_path)
file['filename'] = os.path.relpath(normalized_path, project_directory)
files.save_json_file(filename, project)
def read_configuration():
user_dir = files.get_user_home_directory()
configuration_file = user_dir + '/.bommerge/configuration.json'
if files.file_exist(configuration_file):
configuration = files.load_json_file(configuration_file)
token = str(configuration['Distributors']['TME']['token'])
app_secret = str(configuration['Distributors']['TME']['app_secret'])
tme_config = {'token': token, 'app_secret': app_secret}
return tme_config
else:
print("Unable to read bommerge configuration file. " + str(configuration_file))
def find_component(components_group, group, tme_config):
def to_string(case):
if case:
return case
return 'None'
from distributor_connector import tme
from distributor_connector import partkeepr
distributors = [tme.TME(tme_config['token'], tme_config['app_secret']),
partkeepr.Partkeepr("https://partkeepr.locallan", "auto", "auto")]
for shop in distributors:
for component in components_group:
if 'Manufacturer Part Number' in component and component['Manufacturer Part Number'] != "":
print("Request for " + component['Manufacturer Part Number'])
found = shop.find_component(component['Manufacturer Part Number'])
else:
if group == "Capacitors":
print("Request for " + to_string(component['Capacitance']) + " " + to_string(
component['Voltage']) + ' ' + to_string(component['Case']))
try:
capacitor_parameters = {'Capacitance': capacitor.convert_capacitance_co_farads(component['Capacitance']),
'Case': to_string(component['Case']),
'Voltage': voltage.string_to_voltage(component['Voltage']),
'Dielectric Type': component['Dielectric Type']}
found = shop.find_capacitor_by_parameters(capacitor_parameters)
if found:
for part in found:
if 'Voltage' in part['Parameters']:
part['Parameters']['Voltage'] = voltage.volts_to_string(part['Parameters']['Voltage'])
if 'Capacitance' in part['Parameters']:
part['Parameters']['Capacitance'] = capacitor.farads_to_string(part['Parameters']['Capacitance'])
except InvalidOperation:
print(component)
raise
elif group == "Resistors":
print("Request for " + to_string(component['Resistance']) + ' ' + to_string(
component['Case']) + ' ' + to_string(component['Tolerance']))
if component['Resistance'] is None:
print("Skipping...")
component["Distributors"] = []
continue
try:
resistor_parameters = {'Resistance': resistor.convert_resistance_to_ohms(component['Resistance']),
'Case': component['Case'],
'Tolerance': tolerance.string_to_tolerance(component['Tolerance'])}
found = shop.find_resistor_by_parameters(resistor_parameters)
if found:
for part in found:
if 'Resistance' in part['Parameters']:
part['Parameters']['Resistance'] = resistor.ohms_to_string(part['Parameters']['Resistance'])
if 'Tolerance' in part['Parameters']:
part['Parameters']['Tolerance'] = tolerance.tolerance_to_string(part['Parameters']['Tolerance'])
except TypeError:
print(component)
raise
elif group in ["IntegratedCircuits"] and component['Comment'] != '':
print("Request for " + to_string(component['Comment']))
found = shop.find_component(component['Comment'])
else:
found = None
if "Distributors" not in component:
component["Distributors"] = []
if found:
component["Distributors"].append({"Name": shop.name, "Components": found})
def find_component_comment(components_group, tme_config):
from distributor_connector import tme
shop = tme.tme()
for component in components_group:
if 'Comment' in component and component['Comment'] != "":
print("Request for " + component['Comment'])
found = shop.find_component(component['Comment'])
if found:
print(found)
# for component in found['Data']['ProductList']:
# print(component['Symbol'] + " : " + component['OriginalSymbol'] + " : " + component['Producer'])
# if 'stockAndPrice' in found:
# print(found['stockAndPrice'])
def ged_distributor_stock(merged, tme_config):
for group in merged.keys():
find_component(merged[group], group, tme_config)
# find_component(merged["Resistors"])
# find_component_comment(merged["IntegratedCircuits"])
def parse_files_if_needed(project_file_list, destynation):
print("Parsing csv files and saving results to: " + destynation)
for f in project_file_list:
file_path = f['filename']
if files.get_file_extension(file_path) == '.json':
files.copy(file_path, destynation + '/' + files.get_filename_from_path(file_path))
else:
csvToJson.convert(file_path, destynation)
def remove_DNF_and_empty_components(components):
to_remove = []
for capacitor in components['Capacitors']:
if capacitor['Capacitance'] == 'DNF' or capacitor['Capacitance'] == '':
to_remove.append(capacitor)
for r in to_remove:
components['Capacitors'].remove(r)
# Remove empty resistors
to_remove = []
for capacitor in components['Resistors']:
if capacitor['Resistance'] == 'DNF' or capacitor['Resistance'] == '':
to_remove.append(capacitor)
for r in to_remove:
components['Resistors'].remove(r)
return components
def mergeProject(project, workingDirectory, nogui):
import os
import automaticMerger
import guiBOM as manual_merger
directory = workingDirectory + "/tmp"
files.make_directory_if_not_exist(directory)
parse_files_if_needed(project, directory)
for bom in project:
bom['filename'] = os.path.join(directory,
files.replace_file_extension(files.get_filename_from_path(bom['filename']),
'.json'))
components = automaticMerger.merge(project)
if nogui is None:
root = tk.Tk()
root.title("BOM Merger")
merger = manual_merger.ManualMerger(root, components)
root.mainloop()
if merger.result:
components = merger.components
else:
components = None
if components:
files.save_json_file(os.path.join(directory, 'automerged.json'), components)
from exporters import csvExporter
csvExporter.save(dict(components), os.path.join(workingDirectory, "mergedBOM.csv"))
components = remove_DNF_and_empty_components(components)
tme_config = read_configuration()
print(tme_config)
ged_distributor_stock(components, tme_config)
filename = directory + '/merged.json'
files.save_json_file(filename, components)
from gui import orderingDialog
root = tk.Tk()
orderingWidget = orderingDialog.OrderWidget(root, filename)
root.mainloop()
filename = directory + '/order.json'
files.save_json_file(filename, orderingWidget.components)
if orderingWidget.result:
for supplier in orderingWidget.result.keys():
csvExporter.save_list(orderingWidget.result[supplier],
workingDirectory + '/' + supplier + '_human_readable.csv')
order_list = []
for component in orderingWidget.result[supplier]:
order_list.append({'Part number': component['Shop Part Number'], 'Quantity': component['Quantity']})
csvExporter.save_list(order_list, workingDirectory + '/' + supplier + '.csv', write_header=False)
class ProjectConfigWidget(ProjectConfigurationWidget):
def load_project(self, filename):
return loadProject(filename)
def save_project_file(self, filename):
project = self.files_widget.create_file_list()
return saveProject(filename, project)
def first_run():
user_dir = files.get_user_home_directory()
configuration_file = user_dir + '/.bommerge/configuration.json'
return files.file_exist(configuration_file) is False
def main():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("-p", "--proj", help="Project definition file")
parser.add_argument("--nogui", help="Run bommerge without gui, only automatical merge will be performed.")
args = parser.parse_args()
if first_run():
pass
project = None
if args.proj:
project = loadProject(args.proj)
working_directory = files.get_directory_from_path(args.proj)
if project is None:
projectConfigGui = ProjectConfigWidget()
if projectConfigGui.result:
working_directory = files.get_directory_from_path(projectConfigGui.result)
project = loadProject(projectConfigGui.result)
if project:
mergeProject(project, working_directory, args.nogui)
if __name__ == "__main__":
main()
| 10,434 | 33 | 329 |
d499107a5099731a8a38c060088fba35d321c56e | 6,764 | py | Python | fairseq/data/token_block_dataset.py | Ruil/fairseq | 5cd5c334631a2aca1d3cfaaaddeec1b56df9e0e4 | [
"BSD-3-Clause"
] | null | null | null | fairseq/data/token_block_dataset.py | Ruil/fairseq | 5cd5c334631a2aca1d3cfaaaddeec1b56df9e0e4 | [
"BSD-3-Clause"
] | null | null | null | fairseq/data/token_block_dataset.py | Ruil/fairseq | 5cd5c334631a2aca1d3cfaaaddeec1b56df9e0e4 | [
"BSD-3-Clause"
] | null | null | null | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
import math
import numpy as np
import torch
from . import FairseqDataset
class TokenBlockDataset(FairseqDataset):
"""Break a Dataset of tokens into blocks.
Args:
dataset (~torch.utils.data.Dataset): dataset to break into blocks
sizes (List[int]): sentence lengths (required for 'complete' and 'eos')
block_size (int): maximum block size (ignored in 'eos' break mode)
break_mode (str, optional): Mode used for breaking tokens. Values can
be one of:
- 'none': break tokens into equally sized blocks (up to block_size)
- 'complete': break tokens into blocks (up to block_size) such that
blocks contains complete sentences, although block_size may be
exceeded if some sentences exceed block_size
- 'eos': each block contains one sentence (block_size is ignored)
include_targets (bool, optional): return next tokens as targets
(default: False).
"""
@property
| 39.555556 | 101 | 0.567859 | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
import math
import numpy as np
import torch
from . import FairseqDataset
class TokenBlockDataset(FairseqDataset):
"""Break a Dataset of tokens into blocks.
Args:
dataset (~torch.utils.data.Dataset): dataset to break into blocks
sizes (List[int]): sentence lengths (required for 'complete' and 'eos')
block_size (int): maximum block size (ignored in 'eos' break mode)
break_mode (str, optional): Mode used for breaking tokens. Values can
be one of:
- 'none': break tokens into equally sized blocks (up to block_size)
- 'complete': break tokens into blocks (up to block_size) such that
blocks contains complete sentences, although block_size may be
exceeded if some sentences exceed block_size
- 'eos': each block contains one sentence (block_size is ignored)
include_targets (bool, optional): return next tokens as targets
(default: False).
"""
def __init__(self, dataset, sizes, block_size, pad, eos, break_mode=None, include_targets=False):
super().__init__()
self.dataset = dataset
self.pad = pad
self.eos = eos
self.include_targets = include_targets
self.slice_indices = []
print('sizes: ', sizes)
print('len of sizes: ', len(sizes))
assert len(dataset) == len(sizes)
sizes = np.array(sizes, dtype=int)
if break_mode is None or break_mode == 'none':
total_size = sum(sizes)
print('total size: ', total_size)
#sys.exit()
length = math.ceil(total_size / block_size)
print('block_size: ', block_size)
#sys.exit()
def block_at(i):
start = i * block_size
end = min(start + block_size, total_size)
return (start, end)
self.slice_indices = [block_at(i) for i in range(length)]
#print('self.slice_indices: ', self.slice_indices)
#print('len indices: ', len(self.slice_indices))
#sys.exit()
elif break_mode == 'complete':
tok_idx = 0
sz_idx = 0
curr_size = 0
while sz_idx < len(sizes):
if curr_size + sizes[sz_idx] <= block_size or curr_size == 0:
curr_size += sizes[sz_idx]
sz_idx += 1
else:
self.slice_indices.append((tok_idx, tok_idx + curr_size))
tok_idx += curr_size
curr_size = 0
if curr_size > 0:
self.slice_indices.append((tok_idx, tok_idx + curr_size))
elif break_mode == 'eos':
self.slice_indices = np.empty((len(sizes), 2), dtype=int)
curr = 0
for i, sz in enumerate(sizes):
self.slice_indices[i] = (curr, curr + sz)
curr += sz
else:
raise ValueError('Invalid break_mode: ' + break_mode)
self.sizes = np.array([e - s for s, e in self.slice_indices])
#print('self.sizes: ', self.sizes)
#sys.exit()
self.slice_indices = np.array(self.slice_indices, dtype=int)
# build index mapping block indices to the underlying dataset indices
self.block_to_dataset_index = np.empty((len(self.slice_indices), 3), dtype=int)
ds_idx, ds_remaining = -1, 0
for i, (s, e) in enumerate(self.slice_indices):
to_consume = e - s
if ds_remaining == 0:
ds_idx += 1
ds_remaining = sizes[ds_idx]
start_ds_idx = ds_idx
start_offset = sizes[ds_idx] - ds_remaining
while to_consume > ds_remaining:
to_consume -= ds_remaining
ds_idx += 1
ds_remaining = sizes[ds_idx]
ds_remaining -= to_consume
self.block_to_dataset_index[i] = (
start_ds_idx, # starting index in dataset
start_offset, # starting offset within starting index
ds_idx, # ending index in dataset
)
#print('start_ds_idx: ', start_ds_idx)
#print('start_offset', start_offset)
#print('ds_idx', ds_idx)
#sys.exit()
#print('len self.slice_indices: ', len(self.slice_indices))
#print('map: ', self.block_to_dataset_index)
#sys.exit()
assert ds_remaining == 0
assert ds_idx == len(self.dataset) - 1
def __getitem__(self, index):
print('len block_to_dataset_index: ', len(self.block_to_dataset_index))
start_ds_idx, start_offset, end_ds_idx = self.block_to_dataset_index[index]
buffer = torch.cat([
self.dataset[idx] for idx in range(start_ds_idx, end_ds_idx + 1)
])
slice_s, slice_e = self.slice_indices[index]
length = slice_e - slice_s
s, e = start_offset, start_offset + length
item = buffer[s:e]
print('include targets: ', self.include_targets)
#sys.exit()
if self.include_targets:
# *target* is the original sentence (=item)
# *source* is rotated left by 1 (maybe left-padded with eos)
# *past_target* is rotated left by 2 (left-padded as needed)
if s == 0:
source = torch.cat([item.new([self.eos]), buffer[0:e - 1]])
past_target = torch.cat([item.new([self.pad, self.eos]), buffer[0:e - 2]])
else:
source = buffer[s - 1:e - 1]
if s == 1:
past_target = torch.cat([item.new([self.eos]), buffer[0:e - 2]])
else:
past_target = buffer[s - 2:e - 2]
print('idx: ', index)
print('target: ', past_target)
print('item: ', item)
print('source: ', source)
return source, item, past_target
sys.exit()
return item
def __len__(self):
return len(self.slice_indices)
@property
def supports_prefetch(self):
return getattr(self.dataset, 'supports_prefetch', False)
def prefetch(self, indices):
self.dataset.prefetch({
ds_idx
for index in indices
for start_ds_idx, _, end_ds_idx in [self.block_to_dataset_index[index]]
for ds_idx in range(start_ds_idx, end_ds_idx + 1)
})
| 5,336 | 0 | 134 |
1bfa0533a1a96ae691f7676df2620d787c8e4bee | 358 | py | Python | scripts/a3_withdraw_matic.py | PatrickAlphaC/vrf_pizza | c39c03ea707234a814d3d57419111c8e049d87a9 | [
"MIT"
] | 9 | 2021-03-13T03:47:12.000Z | 2021-09-01T13:49:58.000Z | scripts/a3_withdraw_matic.py | PatrickAlphaC/vrf_pizza | c39c03ea707234a814d3d57419111c8e049d87a9 | [
"MIT"
] | 1 | 2021-03-07T21:32:55.000Z | 2021-03-29T00:34:22.000Z | scripts/a3_withdraw_matic.py | PatrickAlphaC/vrf_pizza | c39c03ea707234a814d3d57419111c8e049d87a9 | [
"MIT"
] | 1 | 2021-09-01T13:49:58.000Z | 2021-09-01T13:49:58.000Z | #!/usr/bin/python3
import os
from brownie import VRF_Pizza, VRF_Pizza_RNG, accounts, network, config, interface
STATIC_SEED = 123
| 27.538462 | 82 | 0.723464 | #!/usr/bin/python3
import os
from brownie import VRF_Pizza, VRF_Pizza_RNG, accounts, network, config, interface
STATIC_SEED = 123
def main():
dev = accounts.add(os.getenv(config['wallets']['from_key']))
# Get the most recent PriceFeed Object
vrf_pizza = VRF_Pizza[len(VRF_Pizza) - 1]
vrf_pizza.withdraw(4000000000000000000, {'from': dev})
| 203 | 0 | 23 |
bc757e04afd30e67cca0d546bb738a5c4acbd1ce | 1,460 | py | Python | .venv/lib/python3.8/site-packages/vectorbt/utils/math.py | eo1989/VectorBTanalysis | bea3deaf2ee3fc114b308146f2af3e4f35f70197 | [
"MIT"
] | null | null | null | .venv/lib/python3.8/site-packages/vectorbt/utils/math.py | eo1989/VectorBTanalysis | bea3deaf2ee3fc114b308146f2af3e4f35f70197 | [
"MIT"
] | null | null | null | .venv/lib/python3.8/site-packages/vectorbt/utils/math.py | eo1989/VectorBTanalysis | bea3deaf2ee3fc114b308146f2af3e4f35f70197 | [
"MIT"
] | null | null | null | """Math utilities."""
import numpy as np
from numba import njit
rel_tol = 1e-10
abs_tol = 0.
def is_close(a, b, rel_tol=rel_tol, abs_tol=abs_tol):
"""Tell whether two values are approximately equal."""
if np.isnan(a) or np.isnan(b):
return False
if np.isinf(a) or np.isinf(b):
return False
if a == b:
return True
return abs(a - b) <= max(rel_tol * max(abs(a), abs(b)), abs_tol)
is_close_nb = njit(cache=True)(is_close)
"""Numba-compiled version of `is_close`."""
def is_close_or_less(a, b, rel_tol=rel_tol, abs_tol=abs_tol):
"""Tell whether the first value is approximately less than or equal to the second value."""
if is_close(a, b, rel_tol=rel_tol, abs_tol=abs_tol):
return True
return a < b
@njit(cache=True)
def is_close_or_less_nb(a, b, rel_tol=rel_tol, abs_tol=abs_tol):
"""Numba-compiled version of `is_close_or_less`."""
if is_close_nb(a, b, rel_tol=rel_tol, abs_tol=abs_tol):
return True
return a < b
def is_less(a, b, rel_tol=rel_tol, abs_tol=abs_tol):
"""Tell whether the first value is approximately less than the second value."""
if is_close(a, b, rel_tol=rel_tol, abs_tol=abs_tol):
return False
return a < b
@njit(cache=True)
def is_less_nb(a, b, rel_tol=rel_tol, abs_tol=abs_tol):
"""Numba-compiled version of `is_less`."""
if is_close_nb(a, b, rel_tol=rel_tol, abs_tol=abs_tol):
return False
return a < b
| 27.54717 | 95 | 0.666438 | """Math utilities."""
import numpy as np
from numba import njit
rel_tol = 1e-10
abs_tol = 0.
def is_close(a, b, rel_tol=rel_tol, abs_tol=abs_tol):
"""Tell whether two values are approximately equal."""
if np.isnan(a) or np.isnan(b):
return False
if np.isinf(a) or np.isinf(b):
return False
if a == b:
return True
return abs(a - b) <= max(rel_tol * max(abs(a), abs(b)), abs_tol)
is_close_nb = njit(cache=True)(is_close)
"""Numba-compiled version of `is_close`."""
def is_close_or_less(a, b, rel_tol=rel_tol, abs_tol=abs_tol):
"""Tell whether the first value is approximately less than or equal to the second value."""
if is_close(a, b, rel_tol=rel_tol, abs_tol=abs_tol):
return True
return a < b
@njit(cache=True)
def is_close_or_less_nb(a, b, rel_tol=rel_tol, abs_tol=abs_tol):
"""Numba-compiled version of `is_close_or_less`."""
if is_close_nb(a, b, rel_tol=rel_tol, abs_tol=abs_tol):
return True
return a < b
def is_less(a, b, rel_tol=rel_tol, abs_tol=abs_tol):
"""Tell whether the first value is approximately less than the second value."""
if is_close(a, b, rel_tol=rel_tol, abs_tol=abs_tol):
return False
return a < b
@njit(cache=True)
def is_less_nb(a, b, rel_tol=rel_tol, abs_tol=abs_tol):
"""Numba-compiled version of `is_less`."""
if is_close_nb(a, b, rel_tol=rel_tol, abs_tol=abs_tol):
return False
return a < b
| 0 | 0 | 0 |
d3ea9909cecbb0a14b18ea874848473f176f24e3 | 831 | py | Python | tsaotun/lib/Docker/Image/rm.py | qazbnm456/tsaotun | 70186faebd5303961d5996c758f7c9147c4439ba | [
"Apache-2.0"
] | 47 | 2017-01-15T08:33:46.000Z | 2022-02-11T22:37:48.000Z | tsaotun/lib/Docker/Image/rm.py | qazbnm456/dokcer | 70186faebd5303961d5996c758f7c9147c4439ba | [
"Apache-2.0"
] | null | null | null | tsaotun/lib/Docker/Image/rm.py | qazbnm456/dokcer | 70186faebd5303961d5996c758f7c9147c4439ba | [
"Apache-2.0"
] | 2 | 2017-01-16T13:10:22.000Z | 2019-03-28T17:05:04.000Z | """This module contains `docker image rm` class"""
from docker.errors import APIError
from .command import Command
class Rm(Command):
"""This class implements `docker image rm` command"""
name = "image rm"
require = []
| 24.441176 | 57 | 0.54994 | """This module contains `docker image rm` class"""
from docker.errors import APIError
from .command import Command
class Rm(Command):
"""This class implements `docker image rm` command"""
name = "image rm"
require = []
def __init__(self):
Command.__init__(self)
self.settings[self.name] = None
def eval_command(self, args):
try:
Images = []
images = args['images']
del args['images']
for Image in images:
Images.append(Image)
args['image'] = Image
self.client.remove_image(**args)
del args['image']
self.settings[self.name] = '\n'.join(Images)
except APIError as e:
raise e
def final(self):
return self.settings[self.name]
| 514 | 0 | 81 |
d7272969a073a6a76d09365b16f3fbe62aad6ad2 | 22,050 | py | Python | vstools/readers.py | libyal/vstools | 3e8bb323c4f1b51ff0dc18654707ab44b6bb6cd5 | [
"Apache-2.0"
] | 3 | 2017-07-02T11:01:22.000Z | 2021-01-31T04:52:58.000Z | vstools/readers.py | libyal/vstools | 3e8bb323c4f1b51ff0dc18654707ab44b6bb6cd5 | [
"Apache-2.0"
] | 7 | 2017-06-30T08:09:34.000Z | 2018-03-22T07:29:51.000Z | vstools/readers.py | libyal/vstools | 3e8bb323c4f1b51ff0dc18654707ab44b6bb6cd5 | [
"Apache-2.0"
] | 1 | 2017-09-10T07:34:04.000Z | 2017-09-10T07:34:04.000Z | # -*- coding: utf-8 -*-
"""Project and solution file reader classes."""
import abc
import re
from vstools import resources
class FileReader(object):
"""File reader."""
def __init__(self, encoding='utf-8'):
"""Initializes a file reader.
Args:
encoding (str): encoding.
"""
super(FileReader, self).__init__()
self._encoding = encoding
self._file = None
self._line = None
def _ReadBinaryData(self, size):
"""Reads binary data.
Args:
size (int): number of bytes to read.
Returns:
bytes: binary data.
"""
return self._file.read(size)
def _ReadLine(self, look_ahead=False):
"""Reads a line.
Args:
look_ahead (Optional[bool]): indicated if the line should be considered
read (False) or not (True).
Returns:
str: line stripped of leading and trailing white space or None if no
input is available.
"""
if self._line is not None:
line = self._line
if not look_ahead:
self._line = None
else:
line = self._file.readline()
if line:
line = line.strip()
if look_ahead:
self._line = line
if isinstance(line, bytes):
line = line.decode(self._encoding)
return line
def Close(self):
"""Closes the file."""
self._file.close()
def Open(self, filename):
"""Opens the file.
Args:
filename (str): path of the file.
"""
self._file = open(filename, 'rb') # pylint: disable=consider-using-with
class VSProjectFileReader(FileReader):
"""Visual Studio project file reader."""
class VS2008ProjectFileReader(VSProjectFileReader):
"""Visual Studio 2008 project file reader."""
_CONFIGURATION_OPTIONS = {
'CharacterSet': 'character_set',
'ConfigurationType': 'output_type',
'ManagedExtensions': 'managed_extensions',
'WholeProgramOptimization': 'whole_program_optimization',
}
_TOOL_COMPILER_CONFIGURATION_OPTIONS = {
'AdditionalIncludeDirectories': 'include_directories',
'BasicRuntimeChecks': 'basic_runtime_checks',
'CompileAs': 'compile_as',
'DebugInformationFormat': 'debug_information_format',
'Detect64BitPortabilityProblems': 'detect_64bit_portability_problems',
'EnableFunctionLevelLinking': 'enable_function_level_linking',
'EnableIntrinsicFunctions': 'enable_intrinsic_functions',
'Optimization': 'optimization',
'PreprocessorDefinitions': 'preprocessor_definitions',
'RuntimeLibrary': 'runtime_library',
'SmallerTypeCheck': 'smaller_type_check',
'UsePrecompiledHeader': 'precompiled_header',
'WarnAsError': 'warning_as_error',
'WarningLevel': 'warning_level',
}
_TOOL_LIBRARIAN_CONFIGURATION_OPTIONS = {
'IgnoreAllDefaultLibraries': 'librarian_ignore_defaults',
'OutputFile': 'librarian_output_file',
}
_TOOL_LINKER_CONFIGURATION_OPTIONS = {
'AdditionalDependencies': 'additional_dependencies',
'AdditionalLibraryDirectories': 'library_directories',
'DataExecutionPrevention': 'data_execution_prevention',
'EnableCOMDATFolding': 'enable_comdat_folding',
'FixedBaseAddress': 'fixed_base_address',
'GenerateDebugInformation': 'generate_debug_information',
'ImportLibrary': 'linker_values_set',
'LinkIncremental': 'link_incremental',
'ModuleDefinitionFile': 'module_definition_file',
'OptimizeReferences': 'optimize_references',
'OutputDirectory': 'linker_output_directory',
'OutputFile': 'linker_output_file',
'RandomizedBaseAddress': 'randomized_base_address',
'SubSystem': 'sub_system',
'TargetMachine': 'target_machine',
}
def _ParseConfigurationOption(
self, project_configuration, definition, name, line):
"""Parses a configuration option.
Args:
project_configuration (VSProjectConfiguration): project configuration.
definition (str): definition of the configuration value in file.
name (str): name of the configuration value in the project information.
line (str): line that contains the configuration value.
"""
regex_pattern = '{0:s}="([^"]*)"'.format(definition)
values = re.findall(regex_pattern, line)
if len(values) == 1:
setattr(project_configuration, name, values[0])
def _ParseConfigurationOptions(
self, project_configuration, configuration_options, line):
"""Parses configuration options.
Args:
project_configuration (VSProjectConfiguration): project configuration.
configuration_options (dict[str, str]): configuration options defined
as a name per definition.
line (str): line that contains the configuration value.
"""
configuration_definition, _, _ = line.partition('=')
configuration_value = configuration_options.get(
configuration_definition, None)
if configuration_value:
self._ParseConfigurationOption(
project_configuration, configuration_definition, configuration_value,
line)
def _ReadConfiguration(self, line):
"""Reads a configuration.
Args:
line (str): line that contains the start of the configuration section.
Returns:
VSProjectConfiguration: configuration or None if no configuration was
found.
"""
if not line or not line.startswith('<Configuration'):
return None
project_configuration = resources.VSProjectConfiguration()
found_tool = False
found_tool_compiler = False
found_tool_librarian = False
found_tool_linker = False
while line:
line = self._ReadLine()
if line.startswith('</Configuration>'):
break
if found_tool:
if line.startswith('/>'):
found_tool = False
found_tool_compiler = False
found_tool_librarian = False
found_tool_linker = False
elif found_tool_compiler:
self._ParseConfigurationOptions(
project_configuration, self._TOOL_COMPILER_CONFIGURATION_OPTIONS,
line)
if isinstance(
project_configuration.include_directories, str):
project_configuration.include_directories = (
project_configuration.include_directories.split(';'))
elif found_tool_librarian:
self._ParseConfigurationOptions(
project_configuration, self._TOOL_LIBRARIAN_CONFIGURATION_OPTIONS,
line)
elif found_tool_linker:
self._ParseConfigurationOptions(
project_configuration, self._TOOL_LINKER_CONFIGURATION_OPTIONS,
line)
additional_dependencies = (
project_configuration.additional_dependencies)
if isinstance(additional_dependencies, str):
# pylint: disable=no-member
additional_dependencies = additional_dependencies.split(' ')
project_configuration.additional_dependencies = []
for dependency in additional_dependencies:
dependency.replace('$(ConfigurationName)', '$(Configuration)')
project_configuration.additional_dependencies.append(dependency)
if isinstance(
project_configuration.library_directories, str):
project_configuration.library_directories = (
project_configuration.library_directories.split(';'))
elif line.startswith('Name="VCCLCompilerTool"'):
found_tool_compiler = True
elif line.startswith('Name="VCLibrarianTool"'):
found_tool_librarian = True
elif line.startswith('Name="VCLinkerTool"'):
found_tool_linker = True
elif line.startswith('<Tool'):
found_tool = True
elif line.startswith('Name='):
# For more than 1 match findall will return a list with a tuple.
values = re.findall('Name="([^|]*)[|]([^"]*)"', line)[0]
if len(values) == 2:
project_configuration.name = values[0]
project_configuration.platform = values[1]
else:
self._ParseConfigurationOptions(
project_configuration, self._CONFIGURATION_OPTIONS, line)
# TODO: PlatformToolset.
# TargetFrameworkVersion ?
# Add the target machine when not defined.
if not project_configuration.target_machine:
if project_configuration.platform == 'Win32':
project_configuration.target_machine = '1'
# TODO: assuming here that 2 is x64.
elif project_configuration.platform == 'x64':
project_configuration.target_machine = '2'
return project_configuration
def _ReadConfigurations(self, project_information):
"""Reads the configurations.
Args:
project_information (VSProjectInformation): project information.
"""
# Find the start of the configurations section.
result = False
line = self._ReadLine()
while line:
result = line.startswith('<Configurations>')
if result:
break
line = self._ReadLine()
if not result:
return
while line:
line = self._ReadLine()
if line.startswith('</Configurations>'):
break
if line.startswith('<Configuration'):
project_configuration = self._ReadConfiguration(line)
if project_configuration:
project_information.configurations.Append(project_configuration)
def _ReadFiles(self, project_information):
"""Reads the files.
Args:
project_information (VSProjectInformation): project information.
"""
# Find the start of the files section.
result = False
line = self._ReadLine()
while line:
result = line.startswith('<Files>')
if result:
break
line = self._ReadLine()
if result:
found_filter = False
found_filter_source_files = False
found_filter_header_files = False
found_filter_resource_files = False
while line:
line = self._ReadLine()
if line.startswith('</Files>'):
break
if found_filter:
if line.startswith('</Filter>'):
found_filter = False
found_filter_source_files = False
found_filter_header_files = False
found_filter_resource_files = False
elif found_filter_source_files:
if line.startswith('RelativePath='):
values = re.findall('RelativePath="([^"]*)"', line)
if len(values) == 1:
project_information.source_files.append(values[0])
elif found_filter_header_files:
if line.startswith('RelativePath='):
values = re.findall('RelativePath="([^"]*)"', line)
if len(values) == 1:
project_information.header_files.append(values[0])
elif found_filter_resource_files:
if line.startswith('RelativePath='):
values = re.findall('RelativePath="([^"]*)"', line)
if len(values) == 1:
project_information.resource_files.append(values[0])
elif line.startswith('Name="Source Files"'):
found_filter_source_files = True
elif line.startswith('Name="Header Files"'):
found_filter_header_files = True
elif line.startswith('Name="Resource Files"'):
found_filter_resource_files = True
elif line.startswith('<Filter'):
found_filter = True
def _ReadProjectInformation(self, project_information):
"""Reads project information.
Args:
project_information (VSProjectInformation): project information.
"""
line = self._ReadLine()
while line:
if line.startswith('>'):
break
if line.startswith('Name='):
values = re.findall('Name="([^"]*)"', line)
if len(values) == 1:
project_information.name = values[0]
elif line.startswith('ProjectGUID='):
values = re.findall('ProjectGUID="{([^}]*)}"', line)
if len(values) == 1:
project_information.guid = values[0]
elif line.startswith('RootNamespace='):
values = re.findall('RootNamespace="([^"]*)"', line)
if len(values) == 1:
project_information.root_name_space = values[0]
elif line.startswith('Keyword='):
values = re.findall('Keyword="([^"]*)"', line)
if len(values) == 1:
project_information.keyword = values[0]
line = self._ReadLine()
def ReadHeader(self):
"""Reads a file header.
Returns:
bool: True if successful or false otherwise.
"""
# TODO check encoding?
line = self._ReadLine()
if not line or not line.startswith('<?xml version="1.0"'):
return False
line = self._ReadLine()
if not line or not line.startswith('<VisualStudioProject'):
return False
line = self._ReadLine()
if not line or not line.startswith('ProjectType="Visual C++"'):
return False
line = self._ReadLine()
if not line or not line.startswith('Version="9,00"'):
return False
return True
def ReadProject(self):
"""Reads the project.
Returns:
VSProjectInformation: project information if successful or None otherwise.
"""
project_information = resources.VSProjectInformation()
self._ReadProjectInformation(project_information)
self._ReadConfigurations(project_information)
self._ReadFiles(project_information)
return project_information
class VS2010ProjectFileReader(VSProjectFileReader):
"""Visual Studio 2010 project file reader."""
# TODO: implement.
class VS2012ProjectFileReader(VSProjectFileReader):
"""Visual Studio 2012 project file reader."""
# TODO: implement.
class VS2013ProjectFileReader(VSProjectFileReader):
"""Visual Studio 2013 project file reader."""
# TODO: implement.
class VS2015ProjectFileReader(VSProjectFileReader):
"""Visual Studio 2015 project file reader."""
# TODO: implement.
class VS2017ProjectFileReader(VSProjectFileReader):
"""Visual Studio 2017 project file reader."""
# TODO: implement.
class VS2019ProjectFileReader(VSProjectFileReader):
"""Visual Studio 2019 project file reader."""
# TODO: implement.
class VSSolutionFileReader(FileReader):
"""Visual Studio solution file reader."""
# Note that redundant-returns-doc is broken for pylint 1.7.x for abstract
# methods
# pylint: disable=redundant-returns-doc
@abc.abstractmethod
def _CheckFormatVersion(self, line):
"""Checks the format version.
Args:
line (str): line containing the Visual Studio format version.
Returns:
bool: True if successful or false otherwise.
"""
# pylint: disable=unused-argument
def _CheckVisualStudioVersion(self, line):
"""Checks the Visual Studio version.
Args:
line (str): line containing the Visual Studio format version.
Returns:
bool: True if successful or false otherwise.
"""
return False
def ReadConfigurations(self):
"""Reads the configurations.
Returns:
VSConfigurations: configurations or None if not available.
"""
solution_configurations = resources.VSConfigurations()
line = self._ReadLine(look_ahead=True)
if not line or line != 'Global':
return None
found_section = False
line = self._ReadLine()
while line and line != 'EndGlobal':
line = self._ReadLine()
if found_section:
if line == 'EndGlobalSection':
found_section = False
else:
# For more than 1 match findall will return a list with a tuple.
values = re.findall('([^|]*)[|]([^ ]*) = ([^|]*)[|]([^ ]*)', line)
if len(values) == 1:
values = values[0]
if (len(values) == 4 and values[0] == values[2] and
values[1] == values[3]):
configuration = resources.VSSolutionConfiguration()
configuration.name = values[0]
configuration.platform = values[1]
solution_configurations.Append(configuration)
elif line == ('GlobalSection(SolutionConfigurationPlatforms) = '
'preSolution'):
found_section = True
return solution_configurations
def ReadHeader(self):
"""Reads a file header.
Returns:
bool: True if successful or false otherwise.
"""
binary_data = self._ReadBinaryData(5)
if binary_data != b'\xef\xbb\xbf\r\n':
return False
line = self._ReadLine()
if not line or not line.startswith(
'Microsoft Visual Studio Solution File, Format Version '):
return False
if not self._CheckFormatVersion(line):
return False
visual_studio_version_line = None
line = self._ReadLine(look_ahead=True)
while line:
if line.startswith('# Visual C++ '):
self._ReadLine()
elif line.startswith('VisualStudioVersion = '):
visual_studio_version_line = self._ReadLine()
else:
break
line = self._ReadLine(look_ahead=True)
if visual_studio_version_line and not self._CheckVisualStudioVersion(
visual_studio_version_line):
return False
return True
def ReadProject(self):
"""Reads a project.
Returns:
VSSolutionProject: project if successful or None otherwise.
"""
# 8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942 is a Visual C++ related GUID.
line = self._ReadLine(look_ahead=True)
if not line or not line.startswith(
'Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = '):
return None
# For more than 1 match findall will return a list with a tuple.
values = re.findall(
('Project\\("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}"\\) = "([^"]*)", '
'"([^"]*)\\.vcproj", '
'"{([0-9A-F]*-[0-9A-F]*-[0-9A-F]*-[0-9A-F]*-[0-9A-F]*)}"'),
line)
if len(values) != 1:
return None
values = values[0]
if len(values) != 3:
return None
solution_project = resources.VSSolutionProject(
values[0], values[1], values[2])
found_dependencies = False
line = self._ReadLine()
while line and line != 'EndProject':
line = self._ReadLine()
if found_dependencies:
if line == 'EndProjectSection':
found_dependencies = False
else:
# The dependencies are defined as: {%GUID%} = {%GUID%}
# For more than 1 match findall will return a list with a tuple.
guids = re.findall(
('{([0-9A-F]*-[0-9A-F]*-[0-9A-F]*-[0-9A-F]*-[0-9A-F]*)} = '
'{([0-9A-F]*-[0-9A-F]*-[0-9A-F]*-[0-9A-F]*-[0-9A-F]*)}'),
line)
if len(guids) == 1:
guids = guids[0]
if len(guids) == 2 and guids[0] == guids[1]:
solution_project.AddDependency(guids[0])
elif line == 'ProjectSection(ProjectDependencies) = postProject':
found_dependencies = True
return solution_project
def ReadProjects(self):
"""Reads the projects.
Returns:
list[VSSolutionProject]: projects in preserved order.
"""
solution_projects = []
solution_project = self.ReadProject()
while solution_project:
solution_projects.append(solution_project)
solution_project = self.ReadProject()
return solution_projects
class VS2008SolutionFileReader(VSSolutionFileReader):
"""Visual Studio 2008 solution file reader."""
def _CheckFormatVersion(self, line):
"""Checks the format version.
Args:
line (str): line containing the Visual Studio format version.
Returns:
bool: True if successful or false otherwise.
"""
return line.endswith(' 10.00')
class VS2010SolutionFileReader(VSSolutionFileReader):
"""Visual Studio 2010 solution file reader."""
def _CheckFormatVersion(self, line):
"""Checks the format version.
Args:
line (str): line containing the Visual Studio format version.
Returns:
bool: True if successful or false otherwise.
"""
return line.endswith(' 11.00')
class VS2012SolutionFileReader(VSSolutionFileReader):
"""Visual Studio 2012 solution file reader."""
def _CheckFormatVersion(self, line):
"""Checks the format version.
Args:
line (str): line containing the Visual Studio format version.
Returns:
bool: True if successful or false otherwise.
"""
return line.endswith(' 12.00')
class VS2013SolutionFileReader(VS2012SolutionFileReader):
"""Visual Studio 2013 solution file reader."""
def _CheckVisualStudioVersion(self, line):
"""Checks the Visual Studio version.
Args:
line (str): line containing the Visual Studio format version.
Returns:
bool: True if successful or false otherwise.
"""
version = line.split(' = ')[1]
return version.startswith('12.')
class VS2015SolutionFileReader(VS2012SolutionFileReader):
"""Visual Studio 2015 solution file reader."""
def _CheckVisualStudioVersion(self, line):
"""Checks the Visual Studio version.
Args:
line (str): line containing the Visual Studio format version.
Returns:
bool: True if successful or false otherwise.
"""
version = line.split(' = ')[1]
return version.startswith('14.')
class VS2017SolutionFileReader(VS2012SolutionFileReader):
"""Visual Studio 2017 solution file reader."""
def _CheckVisualStudioVersion(self, line):
"""Checks the Visual Studio version.
Args:
line (str): line containing the Visual Studio format version.
Returns:
bool: True if successful or false otherwise.
"""
version = line.split(' = ')[1]
return version.startswith('15.')
class VS2019SolutionFileReader(VS2012SolutionFileReader):
"""Visual Studio 2019 solution file reader."""
def _CheckVisualStudioVersion(self, line):
"""Checks the Visual Studio version.
Args:
line (str): line containing the Visual Studio format version.
Returns:
bool: True if successful or false otherwise.
"""
version = line.split(' = ')[1]
return version.startswith('15.')
| 28.451613 | 80 | 0.651519 | # -*- coding: utf-8 -*-
"""Project and solution file reader classes."""
import abc
import re
from vstools import resources
class FileReader(object):
"""File reader."""
def __init__(self, encoding='utf-8'):
"""Initializes a file reader.
Args:
encoding (str): encoding.
"""
super(FileReader, self).__init__()
self._encoding = encoding
self._file = None
self._line = None
def _ReadBinaryData(self, size):
"""Reads binary data.
Args:
size (int): number of bytes to read.
Returns:
bytes: binary data.
"""
return self._file.read(size)
def _ReadLine(self, look_ahead=False):
"""Reads a line.
Args:
look_ahead (Optional[bool]): indicated if the line should be considered
read (False) or not (True).
Returns:
str: line stripped of leading and trailing white space or None if no
input is available.
"""
if self._line is not None:
line = self._line
if not look_ahead:
self._line = None
else:
line = self._file.readline()
if line:
line = line.strip()
if look_ahead:
self._line = line
if isinstance(line, bytes):
line = line.decode(self._encoding)
return line
def Close(self):
"""Closes the file."""
self._file.close()
def Open(self, filename):
"""Opens the file.
Args:
filename (str): path of the file.
"""
self._file = open(filename, 'rb') # pylint: disable=consider-using-with
class VSProjectFileReader(FileReader):
"""Visual Studio project file reader."""
class VS2008ProjectFileReader(VSProjectFileReader):
"""Visual Studio 2008 project file reader."""
_CONFIGURATION_OPTIONS = {
'CharacterSet': 'character_set',
'ConfigurationType': 'output_type',
'ManagedExtensions': 'managed_extensions',
'WholeProgramOptimization': 'whole_program_optimization',
}
_TOOL_COMPILER_CONFIGURATION_OPTIONS = {
'AdditionalIncludeDirectories': 'include_directories',
'BasicRuntimeChecks': 'basic_runtime_checks',
'CompileAs': 'compile_as',
'DebugInformationFormat': 'debug_information_format',
'Detect64BitPortabilityProblems': 'detect_64bit_portability_problems',
'EnableFunctionLevelLinking': 'enable_function_level_linking',
'EnableIntrinsicFunctions': 'enable_intrinsic_functions',
'Optimization': 'optimization',
'PreprocessorDefinitions': 'preprocessor_definitions',
'RuntimeLibrary': 'runtime_library',
'SmallerTypeCheck': 'smaller_type_check',
'UsePrecompiledHeader': 'precompiled_header',
'WarnAsError': 'warning_as_error',
'WarningLevel': 'warning_level',
}
_TOOL_LIBRARIAN_CONFIGURATION_OPTIONS = {
'IgnoreAllDefaultLibraries': 'librarian_ignore_defaults',
'OutputFile': 'librarian_output_file',
}
_TOOL_LINKER_CONFIGURATION_OPTIONS = {
'AdditionalDependencies': 'additional_dependencies',
'AdditionalLibraryDirectories': 'library_directories',
'DataExecutionPrevention': 'data_execution_prevention',
'EnableCOMDATFolding': 'enable_comdat_folding',
'FixedBaseAddress': 'fixed_base_address',
'GenerateDebugInformation': 'generate_debug_information',
'ImportLibrary': 'linker_values_set',
'LinkIncremental': 'link_incremental',
'ModuleDefinitionFile': 'module_definition_file',
'OptimizeReferences': 'optimize_references',
'OutputDirectory': 'linker_output_directory',
'OutputFile': 'linker_output_file',
'RandomizedBaseAddress': 'randomized_base_address',
'SubSystem': 'sub_system',
'TargetMachine': 'target_machine',
}
def _ParseConfigurationOption(
self, project_configuration, definition, name, line):
"""Parses a configuration option.
Args:
project_configuration (VSProjectConfiguration): project configuration.
definition (str): definition of the configuration value in file.
name (str): name of the configuration value in the project information.
line (str): line that contains the configuration value.
"""
regex_pattern = '{0:s}="([^"]*)"'.format(definition)
values = re.findall(regex_pattern, line)
if len(values) == 1:
setattr(project_configuration, name, values[0])
def _ParseConfigurationOptions(
self, project_configuration, configuration_options, line):
"""Parses configuration options.
Args:
project_configuration (VSProjectConfiguration): project configuration.
configuration_options (dict[str, str]): configuration options defined
as a name per definition.
line (str): line that contains the configuration value.
"""
configuration_definition, _, _ = line.partition('=')
configuration_value = configuration_options.get(
configuration_definition, None)
if configuration_value:
self._ParseConfigurationOption(
project_configuration, configuration_definition, configuration_value,
line)
def _ReadConfiguration(self, line):
"""Reads a configuration.
Args:
line (str): line that contains the start of the configuration section.
Returns:
VSProjectConfiguration: configuration or None if no configuration was
found.
"""
if not line or not line.startswith('<Configuration'):
return None
project_configuration = resources.VSProjectConfiguration()
found_tool = False
found_tool_compiler = False
found_tool_librarian = False
found_tool_linker = False
while line:
line = self._ReadLine()
if line.startswith('</Configuration>'):
break
if found_tool:
if line.startswith('/>'):
found_tool = False
found_tool_compiler = False
found_tool_librarian = False
found_tool_linker = False
elif found_tool_compiler:
self._ParseConfigurationOptions(
project_configuration, self._TOOL_COMPILER_CONFIGURATION_OPTIONS,
line)
if isinstance(
project_configuration.include_directories, str):
project_configuration.include_directories = (
project_configuration.include_directories.split(';'))
elif found_tool_librarian:
self._ParseConfigurationOptions(
project_configuration, self._TOOL_LIBRARIAN_CONFIGURATION_OPTIONS,
line)
elif found_tool_linker:
self._ParseConfigurationOptions(
project_configuration, self._TOOL_LINKER_CONFIGURATION_OPTIONS,
line)
additional_dependencies = (
project_configuration.additional_dependencies)
if isinstance(additional_dependencies, str):
# pylint: disable=no-member
additional_dependencies = additional_dependencies.split(' ')
project_configuration.additional_dependencies = []
for dependency in additional_dependencies:
dependency.replace('$(ConfigurationName)', '$(Configuration)')
project_configuration.additional_dependencies.append(dependency)
if isinstance(
project_configuration.library_directories, str):
project_configuration.library_directories = (
project_configuration.library_directories.split(';'))
elif line.startswith('Name="VCCLCompilerTool"'):
found_tool_compiler = True
elif line.startswith('Name="VCLibrarianTool"'):
found_tool_librarian = True
elif line.startswith('Name="VCLinkerTool"'):
found_tool_linker = True
elif line.startswith('<Tool'):
found_tool = True
elif line.startswith('Name='):
# For more than 1 match findall will return a list with a tuple.
values = re.findall('Name="([^|]*)[|]([^"]*)"', line)[0]
if len(values) == 2:
project_configuration.name = values[0]
project_configuration.platform = values[1]
else:
self._ParseConfigurationOptions(
project_configuration, self._CONFIGURATION_OPTIONS, line)
# TODO: PlatformToolset.
# TargetFrameworkVersion ?
# Add the target machine when not defined.
if not project_configuration.target_machine:
if project_configuration.platform == 'Win32':
project_configuration.target_machine = '1'
# TODO: assuming here that 2 is x64.
elif project_configuration.platform == 'x64':
project_configuration.target_machine = '2'
return project_configuration
def _ReadConfigurations(self, project_information):
"""Reads the configurations.
Args:
project_information (VSProjectInformation): project information.
"""
# Find the start of the configurations section.
result = False
line = self._ReadLine()
while line:
result = line.startswith('<Configurations>')
if result:
break
line = self._ReadLine()
if not result:
return
while line:
line = self._ReadLine()
if line.startswith('</Configurations>'):
break
if line.startswith('<Configuration'):
project_configuration = self._ReadConfiguration(line)
if project_configuration:
project_information.configurations.Append(project_configuration)
def _ReadFiles(self, project_information):
"""Reads the files.
Args:
project_information (VSProjectInformation): project information.
"""
# Find the start of the files section.
result = False
line = self._ReadLine()
while line:
result = line.startswith('<Files>')
if result:
break
line = self._ReadLine()
if result:
found_filter = False
found_filter_source_files = False
found_filter_header_files = False
found_filter_resource_files = False
while line:
line = self._ReadLine()
if line.startswith('</Files>'):
break
if found_filter:
if line.startswith('</Filter>'):
found_filter = False
found_filter_source_files = False
found_filter_header_files = False
found_filter_resource_files = False
elif found_filter_source_files:
if line.startswith('RelativePath='):
values = re.findall('RelativePath="([^"]*)"', line)
if len(values) == 1:
project_information.source_files.append(values[0])
elif found_filter_header_files:
if line.startswith('RelativePath='):
values = re.findall('RelativePath="([^"]*)"', line)
if len(values) == 1:
project_information.header_files.append(values[0])
elif found_filter_resource_files:
if line.startswith('RelativePath='):
values = re.findall('RelativePath="([^"]*)"', line)
if len(values) == 1:
project_information.resource_files.append(values[0])
elif line.startswith('Name="Source Files"'):
found_filter_source_files = True
elif line.startswith('Name="Header Files"'):
found_filter_header_files = True
elif line.startswith('Name="Resource Files"'):
found_filter_resource_files = True
elif line.startswith('<Filter'):
found_filter = True
def _ReadProjectInformation(self, project_information):
"""Reads project information.
Args:
project_information (VSProjectInformation): project information.
"""
line = self._ReadLine()
while line:
if line.startswith('>'):
break
if line.startswith('Name='):
values = re.findall('Name="([^"]*)"', line)
if len(values) == 1:
project_information.name = values[0]
elif line.startswith('ProjectGUID='):
values = re.findall('ProjectGUID="{([^}]*)}"', line)
if len(values) == 1:
project_information.guid = values[0]
elif line.startswith('RootNamespace='):
values = re.findall('RootNamespace="([^"]*)"', line)
if len(values) == 1:
project_information.root_name_space = values[0]
elif line.startswith('Keyword='):
values = re.findall('Keyword="([^"]*)"', line)
if len(values) == 1:
project_information.keyword = values[0]
line = self._ReadLine()
def ReadHeader(self):
"""Reads a file header.
Returns:
bool: True if successful or false otherwise.
"""
# TODO check encoding?
line = self._ReadLine()
if not line or not line.startswith('<?xml version="1.0"'):
return False
line = self._ReadLine()
if not line or not line.startswith('<VisualStudioProject'):
return False
line = self._ReadLine()
if not line or not line.startswith('ProjectType="Visual C++"'):
return False
line = self._ReadLine()
if not line or not line.startswith('Version="9,00"'):
return False
return True
def ReadProject(self):
"""Reads the project.
Returns:
VSProjectInformation: project information if successful or None otherwise.
"""
project_information = resources.VSProjectInformation()
self._ReadProjectInformation(project_information)
self._ReadConfigurations(project_information)
self._ReadFiles(project_information)
return project_information
class VS2010ProjectFileReader(VSProjectFileReader):
"""Visual Studio 2010 project file reader."""
# TODO: implement.
class VS2012ProjectFileReader(VSProjectFileReader):
"""Visual Studio 2012 project file reader."""
# TODO: implement.
class VS2013ProjectFileReader(VSProjectFileReader):
"""Visual Studio 2013 project file reader."""
# TODO: implement.
class VS2015ProjectFileReader(VSProjectFileReader):
"""Visual Studio 2015 project file reader."""
# TODO: implement.
class VS2017ProjectFileReader(VSProjectFileReader):
"""Visual Studio 2017 project file reader."""
# TODO: implement.
class VS2019ProjectFileReader(VSProjectFileReader):
"""Visual Studio 2019 project file reader."""
# TODO: implement.
class VSSolutionFileReader(FileReader):
"""Visual Studio solution file reader."""
# Note that redundant-returns-doc is broken for pylint 1.7.x for abstract
# methods
# pylint: disable=redundant-returns-doc
@abc.abstractmethod
def _CheckFormatVersion(self, line):
"""Checks the format version.
Args:
line (str): line containing the Visual Studio format version.
Returns:
bool: True if successful or false otherwise.
"""
# pylint: disable=unused-argument
def _CheckVisualStudioVersion(self, line):
"""Checks the Visual Studio version.
Args:
line (str): line containing the Visual Studio format version.
Returns:
bool: True if successful or false otherwise.
"""
return False
def ReadConfigurations(self):
"""Reads the configurations.
Returns:
VSConfigurations: configurations or None if not available.
"""
solution_configurations = resources.VSConfigurations()
line = self._ReadLine(look_ahead=True)
if not line or line != 'Global':
return None
found_section = False
line = self._ReadLine()
while line and line != 'EndGlobal':
line = self._ReadLine()
if found_section:
if line == 'EndGlobalSection':
found_section = False
else:
# For more than 1 match findall will return a list with a tuple.
values = re.findall('([^|]*)[|]([^ ]*) = ([^|]*)[|]([^ ]*)', line)
if len(values) == 1:
values = values[0]
if (len(values) == 4 and values[0] == values[2] and
values[1] == values[3]):
configuration = resources.VSSolutionConfiguration()
configuration.name = values[0]
configuration.platform = values[1]
solution_configurations.Append(configuration)
elif line == ('GlobalSection(SolutionConfigurationPlatforms) = '
'preSolution'):
found_section = True
return solution_configurations
def ReadHeader(self):
"""Reads a file header.
Returns:
bool: True if successful or false otherwise.
"""
binary_data = self._ReadBinaryData(5)
if binary_data != b'\xef\xbb\xbf\r\n':
return False
line = self._ReadLine()
if not line or not line.startswith(
'Microsoft Visual Studio Solution File, Format Version '):
return False
if not self._CheckFormatVersion(line):
return False
visual_studio_version_line = None
line = self._ReadLine(look_ahead=True)
while line:
if line.startswith('# Visual C++ '):
self._ReadLine()
elif line.startswith('VisualStudioVersion = '):
visual_studio_version_line = self._ReadLine()
else:
break
line = self._ReadLine(look_ahead=True)
if visual_studio_version_line and not self._CheckVisualStudioVersion(
visual_studio_version_line):
return False
return True
def ReadProject(self):
"""Reads a project.
Returns:
VSSolutionProject: project if successful or None otherwise.
"""
# 8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942 is a Visual C++ related GUID.
line = self._ReadLine(look_ahead=True)
if not line or not line.startswith(
'Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = '):
return None
# For more than 1 match findall will return a list with a tuple.
values = re.findall(
('Project\\("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}"\\) = "([^"]*)", '
'"([^"]*)\\.vcproj", '
'"{([0-9A-F]*-[0-9A-F]*-[0-9A-F]*-[0-9A-F]*-[0-9A-F]*)}"'),
line)
if len(values) != 1:
return None
values = values[0]
if len(values) != 3:
return None
solution_project = resources.VSSolutionProject(
values[0], values[1], values[2])
found_dependencies = False
line = self._ReadLine()
while line and line != 'EndProject':
line = self._ReadLine()
if found_dependencies:
if line == 'EndProjectSection':
found_dependencies = False
else:
# The dependencies are defined as: {%GUID%} = {%GUID%}
# For more than 1 match findall will return a list with a tuple.
guids = re.findall(
('{([0-9A-F]*-[0-9A-F]*-[0-9A-F]*-[0-9A-F]*-[0-9A-F]*)} = '
'{([0-9A-F]*-[0-9A-F]*-[0-9A-F]*-[0-9A-F]*-[0-9A-F]*)}'),
line)
if len(guids) == 1:
guids = guids[0]
if len(guids) == 2 and guids[0] == guids[1]:
solution_project.AddDependency(guids[0])
elif line == 'ProjectSection(ProjectDependencies) = postProject':
found_dependencies = True
return solution_project
def ReadProjects(self):
"""Reads the projects.
Returns:
list[VSSolutionProject]: projects in preserved order.
"""
solution_projects = []
solution_project = self.ReadProject()
while solution_project:
solution_projects.append(solution_project)
solution_project = self.ReadProject()
return solution_projects
class VS2008SolutionFileReader(VSSolutionFileReader):
"""Visual Studio 2008 solution file reader."""
def _CheckFormatVersion(self, line):
"""Checks the format version.
Args:
line (str): line containing the Visual Studio format version.
Returns:
bool: True if successful or false otherwise.
"""
return line.endswith(' 10.00')
class VS2010SolutionFileReader(VSSolutionFileReader):
"""Visual Studio 2010 solution file reader."""
def _CheckFormatVersion(self, line):
"""Checks the format version.
Args:
line (str): line containing the Visual Studio format version.
Returns:
bool: True if successful or false otherwise.
"""
return line.endswith(' 11.00')
class VS2012SolutionFileReader(VSSolutionFileReader):
"""Visual Studio 2012 solution file reader."""
def _CheckFormatVersion(self, line):
"""Checks the format version.
Args:
line (str): line containing the Visual Studio format version.
Returns:
bool: True if successful or false otherwise.
"""
return line.endswith(' 12.00')
class VS2013SolutionFileReader(VS2012SolutionFileReader):
"""Visual Studio 2013 solution file reader."""
def _CheckVisualStudioVersion(self, line):
"""Checks the Visual Studio version.
Args:
line (str): line containing the Visual Studio format version.
Returns:
bool: True if successful or false otherwise.
"""
version = line.split(' = ')[1]
return version.startswith('12.')
class VS2015SolutionFileReader(VS2012SolutionFileReader):
"""Visual Studio 2015 solution file reader."""
def _CheckVisualStudioVersion(self, line):
"""Checks the Visual Studio version.
Args:
line (str): line containing the Visual Studio format version.
Returns:
bool: True if successful or false otherwise.
"""
version = line.split(' = ')[1]
return version.startswith('14.')
class VS2017SolutionFileReader(VS2012SolutionFileReader):
"""Visual Studio 2017 solution file reader."""
def _CheckVisualStudioVersion(self, line):
"""Checks the Visual Studio version.
Args:
line (str): line containing the Visual Studio format version.
Returns:
bool: True if successful or false otherwise.
"""
version = line.split(' = ')[1]
return version.startswith('15.')
class VS2019SolutionFileReader(VS2012SolutionFileReader):
"""Visual Studio 2019 solution file reader."""
def _CheckVisualStudioVersion(self, line):
"""Checks the Visual Studio version.
Args:
line (str): line containing the Visual Studio format version.
Returns:
bool: True if successful or false otherwise.
"""
version = line.split(' = ')[1]
return version.startswith('15.')
| 0 | 0 | 0 |
9103dc90b43809f95c0f53e862bd0ec25567ff06 | 14,802 | py | Python | tests/test_cli.py | jhermann/pip-upgrader | ba8613d47cd91f1e1f4ae546c561eb030ba5ddf8 | [
"Apache-2.0"
] | null | null | null | tests/test_cli.py | jhermann/pip-upgrader | ba8613d47cd91f1e1f4ae546c561eb030ba5ddf8 | [
"Apache-2.0"
] | null | null | null | tests/test_cli.py | jhermann/pip-upgrader | ba8613d47cd91f1e1f4ae546c561eb030ba5ddf8 | [
"Apache-2.0"
] | null | null | null | from __future__ import unicode_literals
from subprocess import PIPE, Popen as popen
from unittest import TestCase
import responses
from packaging.utils import canonicalize_name
from pip_upgrader import __version__ as VERSION
from pip_upgrader import cli
from pip_upgrader.packages_status_detector import PackagesStatusDetector
try:
from unittest.mock import patch
except ImportError:
from mock import patch
try:
from io import StringIO
except ImportError: # pragma: nocover
from cStringIO import StringIO
@patch('pip_upgrader.packages_interactive_selector.user_input', return_value='all')
@patch('pip_upgrader.virtualenv_checker.is_virtualenv', return_value=True)
| 45.128049 | 119 | 0.657344 | from __future__ import unicode_literals
from subprocess import PIPE, Popen as popen
from unittest import TestCase
import responses
from packaging.utils import canonicalize_name
from pip_upgrader import __version__ as VERSION
from pip_upgrader import cli
from pip_upgrader.packages_status_detector import PackagesStatusDetector
try:
from unittest.mock import patch
except ImportError:
from mock import patch
try:
from io import StringIO
except ImportError: # pragma: nocover
from cStringIO import StringIO
class TestHelp(TestCase):
def test_returns_usage_information(self):
output = popen(['pip-upgrade', '-h'], stdout=PIPE).communicate()[0]
self.assertTrue('Usage:' in output.decode('utf-8'))
output = popen(['pip-upgrade', '--help'], stdout=PIPE).communicate()[0]
self.assertTrue('Usage:' in output.decode('utf-8'))
class TestVersion(TestCase):
def test_returns_version_information(self):
output = popen(['pip-upgrade', '--version'], stdout=PIPE).communicate()[0]
self.assertEqual(output.strip().decode('utf-8'), VERSION)
@patch('pip_upgrader.packages_interactive_selector.user_input', return_value='all')
@patch('pip_upgrader.virtualenv_checker.is_virtualenv', return_value=True)
class TestCommand(TestCase):
def _add_responses_mocks(self):
for package in ['Django', 'celery', 'django-rest-auth', 'ipython']:
with open('tests/fixtures/{}.json'.format(package)) as fh:
body = fh.read()
responses.add(responses.GET,
"https://pypi.python.org/pypi/{}/json".format(package),
body=body,
content_type="application/json")
with open('tests/fixtures/{}.html'.format(canonicalize_name(package))) as fh:
body_html = fh.read()
responses.add(responses.GET,
"https://pypi.python.org/simple/{}".format(canonicalize_name(package)),
body=body_html)
def setUp(self):
self._add_responses_mocks()
@responses.activate
@patch('pip_upgrader.cli.get_options', return_value={'--dry-run': True, '-p': []})
def test_command_basic_usage(self, options_mock, is_virtualenv_mock, user_input_mock):
with patch('sys.stdout', new_callable=StringIO) as stdout_mock:
cli.main()
output = stdout_mock.getvalue()
self.assertTrue(user_input_mock.called)
self.assertIn('Available upgrades', output)
self.assertIn('ipython ... up to date', output)
self.assertIn('django-rest-auth ... upgrade available: 0.9.0 ==>', output)
self.assertNotIn('ipdb', output)
self.assertIn('Successfully upgraded', output)
self.assertIn('this was a simulation using --dry-run', output)
@responses.activate
@patch('pip_upgrader.cli.get_options', return_value={'--dry-run': True, '-p': []})
def test_command_simple_html_index_url(self, options_mock, is_virtualenv_mock, user_input_mock):
with patch('sys.stdout', new_callable=StringIO) as stdout_mock, \
patch('pip_upgrader.packages_status_detector.PackagesStatusDetector.pip_config_locations',
new=PackagesStatusDetector.pip_config_locations + ['pip.test.conf']):
cli.main()
output = stdout_mock.getvalue()
self.assertTrue(user_input_mock.called)
# checks if new index-url was discovered from config file
self.assertIn('Setting API url', output)
self.assertIn('https://pypi.python.org/simple/{package}', output)
self.assertIn('Available upgrades', output)
self.assertIn('ipython ... up to date', output)
self.assertIn('django-rest-auth ... upgrade available: 0.9.0 ==>', output)
self.assertNotIn('ipdb', output)
self.assertIn('Successfully upgraded', output)
self.assertIn('this was a simulation using --dry-run', output)
@responses.activate
@patch('pip_upgrader.cli.get_options', return_value={'--dry-run': True, '-p': []})
@patch.dict('os.environ', {'PIP_INDEX_URL': 'https://pypi.python.org/simple/'})
def test_command_pip_index_url_environ(self, options_mock, is_virtualenv_mock, user_input_mock):
with patch('sys.stdout', new_callable=StringIO) as stdout_mock:
cli.main()
output = stdout_mock.getvalue()
self.assertTrue(user_input_mock.called)
# checks if new index-url was discovered from config file
self.assertIn('Setting API url', output)
self.assertIn('https://pypi.python.org/simple/{package}', output)
self.assertIn('Available upgrades', output)
self.assertIn('ipython ... up to date', output)
self.assertIn('django-rest-auth ... upgrade available: 0.9.0 ==>', output)
self.assertNotIn('ipdb', output)
self.assertIn('Successfully upgraded', output)
self.assertIn('this was a simulation using --dry-run', output)
@responses.activate
@patch('pip_upgrader.cli.get_options', return_value={'--dry-run': True, '-p': [], '--use-default-index': True})
def test_command__use_default_index(self, options_mock, is_virtualenv_mock, user_input_mock):
with patch('sys.stdout', new_callable=StringIO) as stdout_mock, \
patch('pip_upgrader.packages_status_detector.PackagesStatusDetector.pip_config_locations',
new=PackagesStatusDetector.pip_config_locations + ['pip.test.conf']):
cli.main()
output = stdout_mock.getvalue()
# checks if new index-url was discovered from config file
self.assertNotIn('Setting API url', output)
self.assertIn('Successfully upgraded', output)
@responses.activate
@patch('pip_upgrader.cli.get_options', return_value={'--dry-run': True, '-p': []})
def test_command_interactive_bad_choices(self, options_mock, is_virtualenv_mock, user_input_mock):
user_input_mock.return_value = ''
with patch('sys.stdout', new_callable=StringIO) as stdout_mock:
cli.main()
output = stdout_mock.getvalue()
self.assertTrue(user_input_mock.called)
self.assertIn('No choice selected', output)
self.assertNotIn('Setting API url', output)
user_input_mock.return_value = '5 6 7'
with patch('sys.stdout', new_callable=StringIO) as stdout_mock:
cli.main()
output = stdout_mock.getvalue()
self.assertTrue(user_input_mock.called)
self.assertIn('No valid choice selected.', output)
@responses.activate
@patch('pip_upgrader.cli.get_options', return_value={'--dry-run': True, '-p': ['all']})
def test_command_not_interactive_all_packages(self, options_mock, is_virtualenv_mock, user_input_mock):
with patch('sys.stdout', new_callable=StringIO) as stdout_mock:
cli.main()
output = stdout_mock.getvalue()
# no user_input should be called
self.assertFalse(user_input_mock.called)
self.assertNotIn('Setting API url', output)
self.assertNotIn('Available upgrades', output)
self.assertIn('Django ... upgrade available: 1.10 ==>', output)
self.assertIn('django-rest-auth ... upgrade available: 0.9.0 ==>', output)
self.assertIn('ipython ... up to date', output)
self.assertNotIn('ipdb', output)
self.assertIn('celery ... upgrade available: 3.1.1 ==>', output)
self.assertIn('Successfully upgraded', output)
self.assertIn('this was a simulation using --dry-run', output)
@responses.activate
@patch('pip_upgrader.cli.get_options', return_value={'--dry-run': True, '-p': ['django', 'bad_package']})
def test_command_not_interactive_specific_package(self, options_mock, is_virtualenv_mock, user_input_mock):
with patch('sys.stdout', new_callable=StringIO) as stdout_mock:
cli.main()
output = stdout_mock.getvalue()
# no user_input should be called
self.assertFalse(user_input_mock.called)
self.assertNotIn('Setting API url', output)
self.assertIn('Django ... upgrade available: 1.10 ==>', output)
self.assertNotIn('django-rest-auth', output)
self.assertNotIn('ipython ... up to date', output)
self.assertNotIn('ipdb', output)
self.assertNotIn('celery ... upgrade available: 3.1.1 ==>', output)
self.assertIn('Successfully upgraded', output)
@responses.activate
@patch('pip_upgrader.cli.get_options', return_value={'--dry-run': True, '-p': ['ipython']})
def test_command_not_interactive_all_packages_up_to_date(self, options_mock, is_virtualenv_mock, user_input_mock):
with patch('sys.stdout', new_callable=StringIO) as stdout_mock:
cli.main()
output = stdout_mock.getvalue()
# no user_input should be called
self.assertFalse(user_input_mock.called)
self.assertNotIn('Setting API url', output)
self.assertIn('All packages are up-to-date.', output)
@responses.activate
@patch('pip_upgrader.cli.get_options', return_value={'--dry-run': True, '-p': ['all'],
'<requirements_file>': ['requirements/production.txt']})
def test_command_not_interactive_explicit_requirements(self, options_mock, is_virtualenv_mock, user_input_mock):
with patch('sys.stdout', new_callable=StringIO) as stdout_mock:
cli.main()
output = stdout_mock.getvalue()
# no user_input should be called
self.assertFalse(user_input_mock.called)
self.assertNotIn('Setting API url', output)
self.assertNotIn('Django ... upgrade available: 1.10 ==>', output)
self.assertNotIn('django-rest-auth', output)
self.assertNotIn('ipython ... up to date', output)
self.assertNotIn('ipdb', output)
self.assertIn('celery ... upgrade available: 3.1.1 ==>', output)
self.assertIn('Successfully upgraded', output)
@responses.activate
@patch('pip_upgrader.cli.get_options', return_value={'--dry-run': True, '-p': ['all'],
'<requirements_file>': ['requirements/local.txt']})
def test_command_not_recursive_requirements_include(self, options_mock, is_virtualenv_mock, user_input_mock):
with patch('sys.stdout', new_callable=StringIO) as stdout_mock:
cli.main()
output = stdout_mock.getvalue()
# no user_input should be called
self.assertFalse(user_input_mock.called)
self.assertIn('celery ... upgrade available: 3.1.1 ==>', output)
self.assertIn('requirements/local.txt', output)
self.assertIn('requirements/production.txt', output)
self.assertIn('requirements/extra/debug.txt', output)
self.assertIn('requirements/extra/debug2.txt', output)
self.assertNotIn('requirements/extra/bad_file.txt', output)
self.assertIn('Successfully upgraded', output)
@responses.activate
@patch('pip_upgrader.cli.get_options', return_value={'--dry-run': True, '-p': ['django'], '--prerelease': True})
def test_command_not_specific_package_prerelease(self, options_mock, is_virtualenv_mock, user_input_mock):
with patch('sys.stdout', new_callable=StringIO) as stdout_mock:
cli.main()
output = stdout_mock.getvalue()
# no user_input should be called
self.assertFalse(user_input_mock.called)
self.assertNotIn('Setting API url', output)
self.assertIn('Django ... upgrade available: 1.10 ==> 1.11rc1', output)
self.assertNotIn('django-rest-auth', output)
self.assertNotIn('ipython ... up to date', output)
self.assertNotIn('ipdb', output)
self.assertNotIn('celery ... upgrade available: 3.1.1 ==>', output)
self.assertIn('Successfully upgraded', output)
@responses.activate
@patch('pip_upgrader.cli.get_options', return_value={'--dry-run': True, '-p': ['django'], '--prerelease': True})
def test_command_not_specific_package_prerelease_html_api(self, options_mock, is_virtualenv_mock, user_input_mock):
with patch('sys.stdout', new_callable=StringIO) as stdout_mock, \
patch('pip_upgrader.packages_status_detector.PackagesStatusDetector.pip_config_locations',
new=PackagesStatusDetector.pip_config_locations + ['pip.test.conf']):
cli.main()
output = stdout_mock.getvalue()
# no user_input should be called
self.assertFalse(user_input_mock.called)
self.assertIn('Setting API url', output)
self.assertIn('Django ... upgrade available: 1.10 ==> 1.11rc1', output)
self.assertNotIn('django-rest-auth', output)
self.assertNotIn('ipython ... up to date', output)
self.assertNotIn('ipdb', output)
self.assertNotIn('celery ... upgrade available: 3.1.1 ==>', output)
self.assertIn('Successfully upgraded', output)
@responses.activate
@patch('pip_upgrader.cli.get_options', return_value={'--dry-run': True, '--skip-virtualenv-check': False,
'-p': ['django']})
def test_command_not_interactive_not_virtualenv(self, options_mock, is_virtualenv_mock, user_input_mock):
is_virtualenv_mock.return_value = False
with patch('sys.stdout', new_callable=StringIO) as stdout_mock:
cli.main()
output = stdout_mock.getvalue()
self.assertNotIn('Setting API url', output)
self.assertIn("It seems you haven't activated a virtualenv", output)
self.assertNotIn('Successfully upgraded', output)
@responses.activate
@patch('pip_upgrader.cli.get_options', return_value={'--dry-run': True, '--skip-virtualenv-check': True,
'-p': ['django']})
def test_command_not_interactive_not_virtualenv_skip(self, options_mock, is_virtualenv_mock, user_input_mock):
is_virtualenv_mock.return_value = False
with patch('sys.stdout', new_callable=StringIO) as stdout_mock:
cli.main()
output = stdout_mock.getvalue()
self.assertFalse(user_input_mock.called)
self.assertNotIn('Setting API url', output)
self.assertIn('Django ... upgrade available: 1.10 ==>', output)
self.assertNotIn('django-rest-auth', output)
self.assertNotIn('ipython ... up to date', output)
self.assertNotIn('ipdb', output)
self.assertNotIn('celery ... upgrade available: 3.1.1 ==>', output)
self.assertIn('Successfully upgraded', output)
| 11,349 | 2,642 | 120 |
3762dac387dc47cb0414df8d63924698e9c65760 | 559 | py | Python | code/wind-analysis/complete/wind-reducer-max.py | cssherry/ox-clo | 4c771048f8b02639c7ac9e3609b92f08a2a345c1 | [
"CC0-1.0"
] | null | null | null | code/wind-analysis/complete/wind-reducer-max.py | cssherry/ox-clo | 4c771048f8b02639c7ac9e3609b92f08a2a345c1 | [
"CC0-1.0"
] | null | null | null | code/wind-analysis/complete/wind-reducer-max.py | cssherry/ox-clo | 4c771048f8b02639c7ac9e3609b92f08a2a345c1 | [
"CC0-1.0"
] | null | null | null | #! /usr/bin/env python
import sys
speeds = dict()
# counts = dict()
# need this for extension
for line in sys.stdin:
try:
line = line.strip()
station, speed = line.split('\t')
speed = float(speed)
# Add code here to use the data
if station in speeds:
if speeds[station] < speed:
speeds[station] = speed
else:
speeds[station] = speed
except ValueError:
pass
for k, v in speeds.iteritems():
result = [k,str(v)]
print ('\t'.join(result))
# add code here to output the results | 17.46875 | 39 | 0.593918 | #! /usr/bin/env python
import sys
speeds = dict()
# counts = dict()
# need this for extension
for line in sys.stdin:
try:
line = line.strip()
station, speed = line.split('\t')
speed = float(speed)
# Add code here to use the data
if station in speeds:
if speeds[station] < speed:
speeds[station] = speed
else:
speeds[station] = speed
except ValueError:
pass
for k, v in speeds.iteritems():
result = [k,str(v)]
print ('\t'.join(result))
# add code here to output the results | 0 | 0 | 0 |
9e57b930e0dbd8dcbcf97bd5461976d81c31f4a1 | 25,064 | py | Python | pointnav_vo/vo/dataset/regression_geo_invariance_iter_dataset.py | rxlqn/PointNav-VO | 6f7672482a3f1628a3b11025709518ee166e812b | [
"Apache-2.0"
] | 25 | 2021-08-28T04:06:31.000Z | 2022-03-02T23:03:13.000Z | pointnav_vo/vo/dataset/regression_geo_invariance_iter_dataset.py | rxlqn/PointNav-VO | 6f7672482a3f1628a3b11025709518ee166e812b | [
"Apache-2.0"
] | 11 | 2021-10-01T07:03:11.000Z | 2022-03-26T02:28:44.000Z | pointnav_vo/vo/dataset/regression_geo_invariance_iter_dataset.py | rxlqn/PointNav-VO | 6f7672482a3f1628a3b11025709518ee166e812b | [
"Apache-2.0"
] | 5 | 2021-09-01T09:05:42.000Z | 2022-01-27T10:11:37.000Z | #! /usr/bin/env python
import os
import h5py
import random
import time
import copy
import numpy as np
from tqdm import tqdm
from collections import OrderedDict, defaultdict
import torch
import torch.nn as nn
from torch.utils.data import Dataset, IterableDataset
from habitat import logger
from habitat.utils.geometry_utils import (
quaternion_from_coeff,
agent_state_target2ref,
)
from pointnav_vo.utils.geometry_utils import (
quaternion_to_array,
NormalizedDepth2TopDownViewHabitat,
NormalizedDepth2TopDownViewHabitatTorch,
)
from pointnav_vo.vo.dataset.regression_iter_dataset import BaseRegressionDataset
from pointnav_vo.vo.common.common_vars import *
FloatTensor = torch.FloatTensor
class StatePairRegressionDataset(BaseRegressionDataset):
r"""Data loader for state-pairs from Habitat simularo.
"""
def __init__(
self,
eval_flag=False,
data_file=None,
num_workers=0,
act_type=-1,
vis_size_w=256,
vis_size_h=256,
collision="-1",
discretize_depth="none",
discretized_depth_channels=0,
gen_top_down_view=False,
top_down_view_infos={},
geo_invariance_types=[],
partial_data_n_splits=1,
# data_aug=False,
):
f"""Valid combination of action and geometric consistency types are:
left OR right
|-- inverse_data_augment_only
left AND right
|-- inverse_joint_train
"""
assert (
np.sum(
[
"inverse_data_augment_only" in geo_invariance_types,
"inverse_joint_train" in geo_invariance_types,
]
)
<= 1
), f"inverse_data_augment_only and inverse_joint_train should not appear together."
if "inverse_joint_train" in geo_invariance_types:
assert (
isinstance(act_type, list)
and set(act_type) == set([TURN_LEFT, TURN_RIGHT])
) or (
isinstance(act_type, int) and act_type == -1
), f"When enabling joint-training with geometric inversion, action types must be [left, right] OR -1."
else:
assert isinstance(act_type, int)
if "inverse_data_augment_only" in geo_invariance_types:
assert (
act_type != MOVE_FORWARD
), f"Data augmentation for geometric consistency about inversion is not suitable for forward action."
self._eval = eval_flag
self._data_f = data_file
self._num_workers = num_workers
self._act_type = act_type
self._collision = collision
self._geo_invariance_types = geo_invariance_types
self._len = 0
self._act_left_right_len = 0
self._vis_size_w = vis_size_w
self._vis_size_h = vis_size_h
self._partial_data_n_splits = partial_data_n_splits
self._gen_top_down_view = gen_top_down_view
self._top_down_view_infos = top_down_view_infos
# RGB stored with uint8
self._rgb_pair_size = 2 * self._vis_size_w * self._vis_size_h * 3
# Depth stored with float16
self._depth_pair_size = 2 * self._vis_size_w * self._vis_size_h * 2
with h5py.File(data_file, "r", libver="latest") as f:
self._chunk_size = f[list(f.keys())[0]]["prev_rgbs"].shape[0]
# for rgb + depth
self._chunk_bytes = int(
np.ceil((self._rgb_pair_size + self._depth_pair_size) * self._chunk_size)
)
# for misc information
self._chunk_bytes += 20 * 2
logger.info(f"\nDataset: chunk bytes {self._chunk_bytes / (1024 * 1024)} MB\n")
self._discretize_depth = discretize_depth
self._discretized_depth_channels = discretized_depth_channels
if self._discretize_depth == "hard":
self._discretized_depth_end_vals = []
for i in np.arange(self._discretized_depth_channels):
self._discretized_depth_end_vals.append(
i * 1.0 / self._discretized_depth_channels
)
self._discretized_depth_end_vals.append(1.0)
logger.info("Get index mapping from h5py ...")
all_chunk_keys = []
with h5py.File(data_file, "r", libver="latest") as f:
for chunk_k in tqdm(sorted(f.keys())):
all_chunk_keys.append(chunk_k)
valid_idxes, transform_idxes = self._get_valid_idxes(f, chunk_k)
self._len += len(valid_idxes)
logger.info("... done.\n")
if not self._eval:
random.shuffle(all_chunk_keys)
if num_workers == 0:
# no separate worker
self._chunk_splits = all_chunk_keys
elif num_workers > 0:
self._chunk_splits = defaultdict(list)
for i, chunk_k in enumerate(all_chunk_keys):
self._chunk_splits[i % num_workers].append(chunk_k)
else:
raise ValueError
@property
@property
@property
@property
| 38.56 | 148 | 0.615385 | #! /usr/bin/env python
import os
import h5py
import random
import time
import copy
import numpy as np
from tqdm import tqdm
from collections import OrderedDict, defaultdict
import torch
import torch.nn as nn
from torch.utils.data import Dataset, IterableDataset
from habitat import logger
from habitat.utils.geometry_utils import (
quaternion_from_coeff,
agent_state_target2ref,
)
from pointnav_vo.utils.geometry_utils import (
quaternion_to_array,
NormalizedDepth2TopDownViewHabitat,
NormalizedDepth2TopDownViewHabitatTorch,
)
from pointnav_vo.vo.dataset.regression_iter_dataset import BaseRegressionDataset
from pointnav_vo.vo.common.common_vars import *
FloatTensor = torch.FloatTensor
class StatePairRegressionDataset(BaseRegressionDataset):
r"""Data loader for state-pairs from Habitat simularo.
"""
def __init__(
self,
eval_flag=False,
data_file=None,
num_workers=0,
act_type=-1,
vis_size_w=256,
vis_size_h=256,
collision="-1",
discretize_depth="none",
discretized_depth_channels=0,
gen_top_down_view=False,
top_down_view_infos={},
geo_invariance_types=[],
partial_data_n_splits=1,
# data_aug=False,
):
f"""Valid combination of action and geometric consistency types are:
left OR right
|-- inverse_data_augment_only
left AND right
|-- inverse_joint_train
"""
assert (
np.sum(
[
"inverse_data_augment_only" in geo_invariance_types,
"inverse_joint_train" in geo_invariance_types,
]
)
<= 1
), f"inverse_data_augment_only and inverse_joint_train should not appear together."
if "inverse_joint_train" in geo_invariance_types:
assert (
isinstance(act_type, list)
and set(act_type) == set([TURN_LEFT, TURN_RIGHT])
) or (
isinstance(act_type, int) and act_type == -1
), f"When enabling joint-training with geometric inversion, action types must be [left, right] OR -1."
else:
assert isinstance(act_type, int)
if "inverse_data_augment_only" in geo_invariance_types:
assert (
act_type != MOVE_FORWARD
), f"Data augmentation for geometric consistency about inversion is not suitable for forward action."
self._eval = eval_flag
self._data_f = data_file
self._num_workers = num_workers
self._act_type = act_type
self._collision = collision
self._geo_invariance_types = geo_invariance_types
self._len = 0
self._act_left_right_len = 0
self._vis_size_w = vis_size_w
self._vis_size_h = vis_size_h
self._partial_data_n_splits = partial_data_n_splits
self._gen_top_down_view = gen_top_down_view
self._top_down_view_infos = top_down_view_infos
# RGB stored with uint8
self._rgb_pair_size = 2 * self._vis_size_w * self._vis_size_h * 3
# Depth stored with float16
self._depth_pair_size = 2 * self._vis_size_w * self._vis_size_h * 2
with h5py.File(data_file, "r", libver="latest") as f:
self._chunk_size = f[list(f.keys())[0]]["prev_rgbs"].shape[0]
# for rgb + depth
self._chunk_bytes = int(
np.ceil((self._rgb_pair_size + self._depth_pair_size) * self._chunk_size)
)
# for misc information
self._chunk_bytes += 20 * 2
logger.info(f"\nDataset: chunk bytes {self._chunk_bytes / (1024 * 1024)} MB\n")
self._discretize_depth = discretize_depth
self._discretized_depth_channels = discretized_depth_channels
if self._discretize_depth == "hard":
self._discretized_depth_end_vals = []
for i in np.arange(self._discretized_depth_channels):
self._discretized_depth_end_vals.append(
i * 1.0 / self._discretized_depth_channels
)
self._discretized_depth_end_vals.append(1.0)
logger.info("Get index mapping from h5py ...")
all_chunk_keys = []
with h5py.File(data_file, "r", libver="latest") as f:
for chunk_k in tqdm(sorted(f.keys())):
all_chunk_keys.append(chunk_k)
valid_idxes, transform_idxes = self._get_valid_idxes(f, chunk_k)
self._len += len(valid_idxes)
logger.info("... done.\n")
if not self._eval:
random.shuffle(all_chunk_keys)
if num_workers == 0:
# no separate worker
self._chunk_splits = all_chunk_keys
elif num_workers > 0:
self._chunk_splits = defaultdict(list)
for i, chunk_k in enumerate(all_chunk_keys):
self._chunk_splits[i % num_workers].append(chunk_k)
else:
raise ValueError
@property
def data_f(self):
return self._data_f
@property
def num_workers(self):
return self._num_workers
@property
def geo_invariance_types(self):
return self._geo_invariance_types
@property
def act_left_right_len(self):
return self._act_left_right_len
def __len__(self):
return int(self._len / self._partial_data_n_splits)
def _get_valid_idxes(self, h5_f, chunk_k):
act_left_right_idxes = np.where(
(h5_f[chunk_k]["actions"][()] == TURN_LEFT)
| (h5_f[chunk_k]["actions"][()] == TURN_RIGHT)
)[0]
self._act_left_right_len += len(act_left_right_idxes)
transform_idxes = {}
if isinstance(self._act_type, int):
if self._act_type == -1:
# all data is valid
valid_act_idxes = np.arange(h5_f[chunk_k]["actions"].shape[0])
elif "inverse_data_augment_only" in self._geo_invariance_types:
# in this situation, action must be left or right
assert self._act_type != MOVE_FORWARD
valid_act_idxes = act_left_right_idxes
else:
valid_act_idxes = np.where(
h5_f[chunk_k]["actions"][()] == self._act_type
)[0]
else:
assert isinstance(self._act_type, list)
assert set(self._act_type) == set([TURN_LEFT, TURN_RIGHT])
valid_act_idxes = act_left_right_idxes
if self._collision == "-1":
final_valid_idxes = valid_act_idxes
else:
raise ValueError
return list(final_valid_idxes), transform_idxes
def _process_data(self, chunk_i, i):
actions = []
rgb_pairs = []
depth_pairs = []
discretized_depth_pairs = []
top_down_view_pairs = []
delta_xs = []
delta_ys = []
delta_zs = []
delta_yaws = []
data_types = []
dz_regress_masks = []
chunk_idxs = []
entry_idxs = []
# rgb in HDF5: uint8, reshaped as a vector
prev_rgb = self._prev_rgbs[i, :].reshape(
(self._vis_size_h, self._vis_size_w, 3)
)
cur_rgb = self._cur_rgbs[i, :].reshape((self._vis_size_h, self._vis_size_w, 3))
# depth in HDF5: float16, reshaped as a vector
prev_depth = self._prev_depths[i, :].reshape(
(self._vis_size_h, self._vis_size_w, 1)
)
cur_depth = self._cur_depths[i, :].reshape(
(self._vis_size_h, self._vis_size_w, 1)
)
# discretize depth map to one-hot representation
if self._discretize_depth == "hard":
prev_discretized_depth = self._discretize_depth_func(i, prev_depth[..., 0])
cur_discretized_depth = self._discretize_depth_func(i, cur_depth[..., 0])
else:
prev_discretized_depth = np.zeros((self._vis_size_h, self._vis_size_w, 1))
cur_discretized_depth = np.zeros((self._vis_size_h, self._vis_size_w, 1))
# generate top_down_view
if self._gen_top_down_view:
prev_top_down_view = []
cur_top_down_view = []
tmp_prev = torch.FloatTensor(prev_depth)
tmp_cur = torch.FloatTensor(cur_depth)
# opencv has issues with multiprocessing,
# we have to move top-down-view generator inside child process to avoid permenant hang
# https://github.com/opencv/opencv/issues/5150
top_down_view_generator = NormalizedDepth2TopDownViewHabitat(
**self._top_down_view_infos
)
prev_top_down_view.append(
top_down_view_generator.gen_top_down_view(prev_depth)
)
cur_top_down_view.append(
top_down_view_generator.gen_top_down_view(cur_depth)
)
prev_top_down_view = np.concatenate(prev_top_down_view, axis=2)
cur_top_down_view = np.concatenate(cur_top_down_view, axis=2)
else:
prev_top_down_view = np.zeros((self._vis_size_h, self._vis_size_w, 1))
cur_top_down_view = np.zeros((self._vis_size_h, self._vis_size_w, 1))
# delta states of prev to cur
delta_pos_cur_rel_to_prev = self._delta_positions[i, :]
dx_cur_rel_to_prev = FloatTensor([delta_pos_cur_rel_to_prev[0]])
dy_cur_rel_to_prev = FloatTensor([delta_pos_cur_rel_to_prev[1]])
dz_cur_rel_to_prev = FloatTensor([delta_pos_cur_rel_to_prev[2]])
delta_rotation_quaternion_cur_rel_to_prev = self._delta_rotations[i, :]
# NOTE: must use arctan2 to get correct yaw
dyaw_cur_rel_to_prev = FloatTensor(
[
2
* np.arctan2(
delta_rotation_quaternion_cur_rel_to_prev[1],
delta_rotation_quaternion_cur_rel_to_prev[3],
)
]
)
# three situations:
# - no constrain on target action type
# - this data's action type matches the target action type
# - inverse_joint_train has been enabled
# NOTE: be careful that this branch will NOT always execute.
# For example,
# when training separate action model for TURN_LEFT and enable inverse_data_augment_only,
# if self._actions[i] == TURN_RIGHT, this branch will not be executed.
if (
(self._act_type == -1)
or (
isinstance(self._act_type, int) and (self._actions[i] == self._act_type)
)
or ("inverse_joint_train" in self._geo_invariance_types)
):
# action in HDF5: uint8
actions.append(self._actions[i])
data_types.append(CUR_REL_TO_PREV)
dz_regress_masks.append(1.0)
chunk_idxs.append(chunk_i)
entry_idxs.append(i)
# to allow more processes, we use uint8 here to save each worker's memory usage
rgb_pair_cur_rel_to_prev = torch.ByteTensor(
np.concatenate([prev_rgb, cur_rgb], axis=2)
)
rgb_pairs.append(rgb_pair_cur_rel_to_prev)
depth_pair_cur_rel_to_prev = FloatTensor(
np.concatenate([prev_depth, cur_depth], axis=2)
)
depth_pairs.append(depth_pair_cur_rel_to_prev)
# save memory with unin8
discretized_depth_pair_cur_rel_to_prev = torch.ByteTensor(
np.concatenate([prev_discretized_depth, cur_discretized_depth], axis=2)
)
discretized_depth_pairs.append(discretized_depth_pair_cur_rel_to_prev)
top_down_view_pair_cur_rel_to_prev = FloatTensor(
np.concatenate([prev_top_down_view, cur_top_down_view], axis=2)
)
top_down_view_pairs.append(top_down_view_pair_cur_rel_to_prev)
delta_xs.append(dx_cur_rel_to_prev)
delta_ys.append(dy_cur_rel_to_prev)
delta_zs.append(dz_cur_rel_to_prev)
delta_yaws.append(dyaw_cur_rel_to_prev)
# valid situations:
# - act_type != -1
# - inverse_data_augment_only has been enabled and self._actions[i] != self._act_type
# - inverse_joint_train has been enabled
flag1 = (
(self._act_type != -1)
and ("inverse_data_augment_only" in self._geo_invariance_types)
and (self._actions[i] != MOVE_FORWARD)
and (self._actions[i] != self._act_type)
)
flag2 = (
(self._act_type != -1)
and (self._actions[i] != MOVE_FORWARD)
and ("inverse_joint_train" in self._geo_invariance_types)
)
if flag1 or flag2:
# get the opposite action from self._actions[i],
# namely, if self._actions[i] is TURN_LEFT, add TURN_RIGHT
tmp_act_list = [TURN_LEFT, TURN_RIGHT]
actions.append(tmp_act_list[1 - (self._actions[i] == TURN_RIGHT)])
chunk_idxs.append(chunk_i)
entry_idxs.append(i)
data_types.append(PREV_REL_TO_CUR)
dz_regress_masks.append(1.0)
# save memory
rgb_pair_prev_rel_to_cur = torch.ByteTensor(
np.concatenate([cur_rgb, prev_rgb], axis=2)
)
rgb_pairs.append(rgb_pair_prev_rel_to_cur)
depth_pair_prev_rel_to_cur = FloatTensor(
np.concatenate([cur_depth, prev_depth], axis=2)
)
depth_pairs.append(depth_pair_prev_rel_to_cur)
# save memory
discretized_depth_pair_prev_rel_to_cur = torch.ByteTensor(
np.concatenate([cur_discretized_depth, prev_discretized_depth], axis=2)
)
discretized_depth_pairs.append(discretized_depth_pair_prev_rel_to_cur)
top_down_view_pair_prev_rel_to_cur = FloatTensor(
np.concatenate([cur_top_down_view, prev_top_down_view], axis=2)
)
top_down_view_pairs.append(top_down_view_pair_prev_rel_to_cur)
prev_state = (
quaternion_from_coeff(self._prev_global_rotations[i, :]),
self._prev_global_positions[i, :],
)
cur_state = (
self._cur_global_rotations[i, :],
self._cur_global_positions[i, :],
)
delta_state_prev_rel_to_cur = agent_state_target2ref(cur_state, prev_state)
delta_rotation_quaternion_prev_rel_to_cur = quaternion_to_array(
delta_state_prev_rel_to_cur[0]
)
delta_pos_prev_rel_to_cur = delta_state_prev_rel_to_cur[1]
dx_prev_rel_to_cur = FloatTensor([delta_pos_prev_rel_to_cur[0]])
dy_prev_rel_to_cur = FloatTensor([delta_pos_prev_rel_to_cur[1]])
dz_prev_rel_to_cur = FloatTensor([delta_pos_prev_rel_to_cur[2]])
dyaw_prev_rel_to_cur = FloatTensor(
[
2
* np.arctan2(
delta_rotation_quaternion_prev_rel_to_cur[1],
delta_rotation_quaternion_prev_rel_to_cur[3],
)
]
)
delta_xs.append(dx_prev_rel_to_cur)
delta_ys.append(dy_prev_rel_to_cur)
delta_zs.append(dz_prev_rel_to_cur)
delta_yaws.append(dyaw_prev_rel_to_cur)
actions = torch.Tensor(actions).long().unsqueeze(1)
data_types = torch.Tensor(data_types).unsqueeze(1)
rgb_pairs = torch.stack(rgb_pairs, dim=0)
depth_pairs = torch.stack(depth_pairs, dim=0)
discretized_depth_pairs = torch.stack(discretized_depth_pairs, dim=0)
top_down_view_pairs = torch.stack(top_down_view_pairs, dim=0)
delta_xs = torch.stack(delta_xs, dim=0)
delta_ys = torch.stack(delta_ys, dim=0)
delta_zs = torch.stack(delta_zs, dim=0)
delta_yaws = torch.stack(delta_yaws, dim=0)
dz_regress_masks = torch.Tensor(dz_regress_masks).unsqueeze(1)
chunk_idxs = torch.Tensor(chunk_idxs).unsqueeze(1)
entry_idxs = torch.Tensor(entry_idxs).unsqueeze(1)
return (
data_types,
rgb_pairs,
depth_pairs,
discretized_depth_pairs,
top_down_view_pairs,
actions,
delta_xs,
delta_ys,
delta_zs,
delta_yaws,
dz_regress_masks,
chunk_idxs,
entry_idxs,
)
def __iter__(self):
try:
worker_info = torch.utils.data.get_worker_info()
except:
worker_info = None
if worker_info is None:
# zero-worker data loading, return the full iterator
chunk_list = self._chunk_splits
worker_id = -1
else:
# in a worker process
chunk_list = self._chunk_splits[worker_info.id]
worker_id = worker_info.id
if not self._eval:
random.shuffle(chunk_list)
for chunk_k in chunk_list:
# logger.info(f"Worker {worker_id} is loading {chunk_k} into memory ...")
# tmp_start = time.time()
# load data into memory
with h5py.File(
self._data_f,
"r",
libver="latest",
# rdcc_nbytes=self._chunk_bytes,
rdcc_nslots=1e7,
) as f:
# get valid indexes
valid_idxes, _ = self._get_valid_idxes(f, chunk_k)
self._actions = f[chunk_k]["actions"][()]
self._prev_rgbs = f[chunk_k]["prev_rgbs"][()]
self._cur_rgbs = f[chunk_k]["cur_rgbs"][()]
self._prev_depths = f[chunk_k]["prev_depths"][()]
self._cur_depths = f[chunk_k]["cur_depths"][()]
self._delta_positions = f[chunk_k]["delta_positions"][()]
self._delta_rotations = f[chunk_k]["delta_rotations"][()]
# for geometric consistency
self._prev_global_positions = f[chunk_k]["prev_global_positions"][()]
self._prev_global_rotations = f[chunk_k]["prev_global_rotations"][()]
self._cur_global_positions = f[chunk_k]["cur_global_positions"][()]
self._cur_global_rotations = f[chunk_k]["cur_global_rotations"][()]
# tmp_interval = time.time() - tmp_start
# logger.info(f"... worker {worker_id} done ({tmp_interval:.2f}s).")
# NOTE: must use random.shuffle instead of np.random.shuffle to make it random
# np.random.shuffle always produces same order.
# Similar issue here: https://discourse.allennlp.org/t/how-to-shuffle-the-data-each-batch-when-lazy-true/233
# It seems like numpy changes shuffle behaviour:
# - https://numpy.org/doc/stable/reference/random/generated/numpy.random.shuffle.html
# - https://numpy.org/doc/stable/reference/random/generated/numpy.random.Generator.shuffle.html#numpy.random.Generator.shuffle
if not self._eval:
random.shuffle(valid_idxes)
for i, idx in enumerate(valid_idxes):
if self._eval:
_use_cur_data = True
else:
if i % self._partial_data_n_splits == 0:
_use_cur_data = True
else:
_use_cur_data = False
if _use_cur_data:
out = self._process_data(int(chunk_k.split("_")[1]), idx)
yield out
def normal_collate_func(batch):
data_types = torch.cat([i[0] for i in batch], 0)
rgb_pairs = torch.cat([i[1] for i in batch], 0)
depth_pairs = torch.cat([i[2] for i in batch], 0)
discretized_depth_pairs = torch.cat([i[3] for i in batch], 0)
top_down_view_pairs = torch.cat([i[4] for i in batch], 0)
actions = torch.cat([i[5] for i in batch], 0)
delta_xs = torch.cat([i[6] for i in batch], 0)
delta_ys = torch.cat([i[7] for i in batch], 0)
delta_zs = torch.cat([i[8] for i in batch], 0)
delta_yaws = torch.cat([i[9] for i in batch], 0)
dz_regress_masks = torch.cat([i[10] for i in batch], 0)
chunk_idxs = torch.cat([i[11] for i in batch], 0)
entry_idxs = torch.cat([i[12] for i in batch], 0)
return (
data_types,
rgb_pairs,
depth_pairs,
discretized_depth_pairs,
top_down_view_pairs,
actions,
delta_xs,
delta_ys,
delta_zs,
delta_yaws,
dz_regress_masks,
chunk_idxs,
entry_idxs,
)
def fast_collate_func(data):
batch = list(zip(*data))
data_types = torch.cat(batch[0], dim=0)
# rgb_pairs = [_.detach().clone() for _ in batch[1]]
# depth_pairs = [_.detach().clone() for _ in batch[2]]
# discretized_depth_pairs = [_.detach().clone() for _ in batch[3]]
# top_down_view_pairs = [_.detach().clone() for _ in batch[4]]
# NOTE: torch.cat on CPU is really slow but it is fast on GPU (https://github.com/pytorch/pytorch/issues/18634).
# So we have an 'ugly' hack here that we return a list and postpone torch.cat until data is tranferred to GPU.
# Please do not push tensor to GPU within collate func. Reasons:
# 1) GPU does not have that many memory to allocate all worker's batch;
# 2) We do not need to have all batches on GPU, we just need one at a time.
#
# With large probability, you may encounter 'RunTimeError: too many openfiles.'
# This is because PyTorch default uses file_descriptor as sharing strategies
# (https://pytorch.org/docs/master/multiprocessing.html?highlight=sharing%20strategy#sharing-strategies).
# Each tensor needs a file descriptor in shared memory.
# Therefore, with pushing a list of tensors into shared memory,
# we will have #file_descriptor \approx len(rgb_pairs) * 4 * #workers (this may be the upper bound since processes are killed after completion).
#
# However, for Ubuntu, by default the limit number of file_descriptor is 1024.
# You have several options to eliminate this error:
# 1) use command `ulimit -n X` to set the limit to X. For most systems, X <= 65535
# 2) change PyTorch's sharing stragety to file_system: torch.multiprocessing.set_sharing_strategy('file_system')
# 3) replace all tensors with numpy.array since np.ndarray uses pickle instead of shared memory to communicate
# (https://github.com/pytorch/pytorch/issues/973#issuecomment-291287925)
# I did not benchmark this since create tensor from numpy need to copy data.
# I prefer to distribute all this work across workers instead of handling them in main process.
#
# Please note, when using file_descriptor as sharing strategy,
# if you have many workers in total from all your programs (saying 50) and do not set ulimit properly,
# you may encounter some weird issues, such as:
# - training hangs forever
# - torch.save crashes and reports file too large
# - torch.save successfully but the checkpoint file is corrupted
# - RuntimeError: received 0 items of ancdata
# - RuntimeError: unable to open shared memory object
# ...
# I could solve them by reducing the number of workers
# but I do not have clear explanations why they fail without proper traceback.
#
# Unfortunately, using file_system as sharing strategy is not perfect.
# As PyTorch doc states that it is prone to memory leak.
# Alough PyTorhc designs a daemon to handle this, there are still possibilities that leak will happen.
#
# Overall, here are your choices:
# 1) if you care about speed, use a list and choose sharing strategy based on your own consideration
# 2) if you do not care about speed, use torch.cat here and you are safe:
# rgb_pairs = torch.cat(batch[1], 0)
# depth_pairs = torch.cat(batch[2], 0)
# discretized_depth_pairs = torch.cat(batch[3], 0)
# top_down_view_pairs = torch.cat(batch[4], 0)
rgb_pairs = list(batch[1])
depth_pairs = list(batch[2])
discretized_depth_pairs = list(batch[3])
top_down_view_pairs = list(batch[4])
actions = torch.cat(batch[5], dim=0)
delta_xs = torch.cat(batch[6], dim=0)
delta_ys = torch.cat(batch[7], dim=0)
delta_zs = torch.cat(batch[8], dim=0)
delta_yaws = torch.cat(batch[9], dim=0)
dz_regress_masks = torch.cat(batch[10], dim=0)
chunk_idxs = torch.cat(batch[11], 0)
entry_idxs = torch.cat(batch[12], 0)
del batch[:]
return (
data_types,
rgb_pairs,
depth_pairs,
discretized_depth_pairs,
top_down_view_pairs,
actions,
delta_xs,
delta_ys,
delta_zs,
delta_yaws,
dz_regress_masks,
chunk_idxs,
entry_idxs,
)
| 19,688 | 0 | 258 |
35e3ccc0677f04b0f9a898da83053bc4351ca28a | 699 | py | Python | mapillary_tools/uploader_utils.py | duckonomy/mapillary_tools | 6b9b20fb54e3a26510534f5fd788599c0790ecec | [
"BSD-2-Clause"
] | null | null | null | mapillary_tools/uploader_utils.py | duckonomy/mapillary_tools | 6b9b20fb54e3a26510534f5fd788599c0790ecec | [
"BSD-2-Clause"
] | null | null | null | mapillary_tools/uploader_utils.py | duckonomy/mapillary_tools | 6b9b20fb54e3a26510534f5fd788599c0790ecec | [
"BSD-2-Clause"
] | null | null | null | import os
| 31.772727 | 73 | 0.715308 | import os
def set_video_as_uploaded(video):
current_base_path = os.path.dirname(video)
new_base_path = os.path.join(current_base_path, "uploaded")
if not os.path.exists(new_base_path):
os.mkdir(new_base_path)
# Move video to uploaded folder
new_video_path = os.path.join(new_base_path, os.path.basename(video))
os.rename(video, new_video_path)
# Move GPX file
basename = os.path.basename(video)
video_key = os.path.splitext(basename)[0]
gpx_filename= "{}.gpx".format(video_key)
gpx_path = os.path.join(current_base_path, gpx_filename)
new_gpx_path = os.path.join(new_base_path, gpx_filename)
os.rename(gpx_path, new_gpx_path)
| 666 | 0 | 23 |
651cd1831dd5eccc15391398626087e34c64f27c | 765 | py | Python | wordy/wordy.py | cmccandless/ExercismSolutions-python | d80bf441c842daa2eb446bdba9c03d3e8864ea58 | [
"MIT"
] | null | null | null | wordy/wordy.py | cmccandless/ExercismSolutions-python | d80bf441c842daa2eb446bdba9c03d3e8864ea58 | [
"MIT"
] | null | null | null | wordy/wordy.py | cmccandless/ExercismSolutions-python | d80bf441c842daa2eb446bdba9c03d3e8864ea58 | [
"MIT"
] | null | null | null | op = {
'plus': lambda x, y: x + y,
'minus': lambda x, y: x - y,
'multiplied': lambda x, y: x * y,
'divided': lambda x, y: x / y,
}
| 27.321429 | 54 | 0.439216 | op = {
'plus': lambda x, y: x + y,
'minus': lambda x, y: x - y,
'multiplied': lambda x, y: x * y,
'divided': lambda x, y: x / y,
}
def calculate(expr):
s = list(reversed(expr[:-1].split()[2:]))
r, o = (0, op['plus'])
while len(s) > 0:
x = s.pop()
try:
r, o = (o(r, int(x)), None)
except TypeError:
raise ValueError('missing operation')
except ValueError:
if o is not None:
raise ValueError('missing number')
try:
o = op[x]
except Exception:
raise ValueError('unknown operation')
if x == 'multiplied' or x == 'divided':
s.pop()
return r
| 585 | 0 | 25 |
44e56108a3dfaf1d288521f22b2e95d208b3b8e5 | 901 | py | Python | aiphysim/dataloading/koopman_dataset.py | perovai/deepkoopman | eb6de915f5ea1f20b47cb3a22a384f55c30f0558 | [
"MIT"
] | null | null | null | aiphysim/dataloading/koopman_dataset.py | perovai/deepkoopman | eb6de915f5ea1f20b47cb3a22a384f55c30f0558 | [
"MIT"
] | 10 | 2021-07-07T09:24:33.000Z | 2021-09-27T14:32:59.000Z | aiphysim/dataloading/koopman_dataset.py | perovai/deepkoopman | eb6de915f5ea1f20b47cb3a22a384f55c30f0558 | [
"MIT"
] | null | null | null | import numpy as np
import torch
from torch.utils.data import Dataset
| 29.064516 | 87 | 0.63707 | import numpy as np
import torch
from torch.utils.data import Dataset
class KoopmanDataset(Dataset):
def __init__(self, file_paths, sequence_length=51, limit=-1):
self.file_paths = file_paths
self.sequence_length = sequence_length
self.input_dim = None
self.data = []
for file in self.file_paths:
self.data.append(self.load_file(file))
self.data = torch.cat([n for n in self.data])
if limit > 0:
self.data = self.data[:limit]
def load_file(self, file_path):
x = torch.from_numpy(np.genfromtxt(file_path, delimiter=",", dtype=np.float32))
if self.input_dim is None:
self.input_dim = x.shape[1]
return x.reshape(-1, self.sequence_length, self.input_dim)
def __len__(self):
return len(self.data)
def __getitem__(self, index):
return self.data[index]
| 692 | 9 | 130 |
e693c32dff9ada05fbab24b39e3c9417628ef392 | 3,718 | py | Python | tasks-deploy/code-lock/generate.py | irdkwmnsb/lkshl-ctf | e5c0200ddc8ba73df5f321b87b9763fb1bbaba57 | [
"MIT"
] | 3 | 2021-03-30T06:27:58.000Z | 2021-04-03T17:56:35.000Z | tasks-deploy/code-lock/generate.py | irdkwmnsb/lkshl-ctf | e5c0200ddc8ba73df5f321b87b9763fb1bbaba57 | [
"MIT"
] | null | null | null | tasks-deploy/code-lock/generate.py | irdkwmnsb/lkshl-ctf | e5c0200ddc8ba73df5f321b87b9763fb1bbaba57 | [
"MIT"
] | null | null | null | TASK_URL = "http://code-lock.ctf.sicamp.ru"
TITLE = "Кодовый замок"
STATEMENT_TEMPLATE = f'''
"Безопасность превыше всего" - сказал ваш приятель и поставил на входную дверь кодовый замок. Но теперь вам срочно нужно попасть к нему домой, а кода вы не знаете. Что же делать?
[{TASK_URL}/{{0}}]({TASK_URL}/{{0}})
'''
tokens = ['CyCHgDjGEITw', 'mhLo7Fo1T0CB', 'MvXYeD9qdKbD', 'IcI8GUb3ifnu', 'WfClJasVVMqE', '6N111DvhAm1a', 'oKZdNHSWuEzB', '83xZkQHbnkB2', 'WAa0NtH4OhIq', 'hkiUVGhgnhQ9', 'cRx7j5c4BQ1f', 'T1HHyn3puLoX', 'Yydst5aHZyVp', 'cpnyMwLbKB5p', '8iHeN1y18NjN', 'LSwR2lsiDTA3', 'gG4EHjKbOZm2', 'zYgMKfs9y2Wc', 'oFw8MCETmi6p', '8cdmXvDNUZRM', 'HDmbVl56YPBr', 'tnBzvB42h501', 'Gk7rzhpwMyhf', 'fwHG0HAE6mpP', 'OQNSYN9nbfYR', 'w59HnPeSuwWt', 'QQpSYcygsvRy', 'dMTpLdze1Ex7', 'ULkRAfUEZy4U', 'Shpapnk5hsta', 'DVeq1osBM2YR', 'GyURFUZMB4Xm', 'X69RCwKq2u1l', 'kYPOfWmvzAWM', 'BKKTXLNiII07', '32neeyDHhzqB', 'GNwN6ZHCGCdw', '4P3IF6Upprg3', 'jTIiNfjvwByW', 'QVlqAFqMIJOf', 'jPhyNMm4X5Rn', 'AyJqWGxn89fb', 'eD526FoOaDYe', 'VKxmUzze44bm', 'SrlFcn7xYZ16', '5YPYsFFYXIVk', 'bmsWWn8pgEAS', 'yy6PPRzGx9n0', '21w2OU52qu1n', 'yz4IMAZNUxAg', 'U1BvSWxNl9Ol', 'AzWWPWmCXNyD', '2NmfDErj74Hy', 'OawqmtEXpyO5', '1t8Il35pZ4dC', 'D46PRTiXfS85', 'FALm3qlwKlby', 'y5p75Oc2Wlq1', 'KCGvCrvXm7Dv', 'kXRW6JlaWqAP', 'BRS3SNN1pE8i', '4kTbKTwv4TDa', 'seRMOBGjStBR', 'NEjfEx8hkIlw', 'CwDwPLL4lJ8h', 'JqrI0EvPVsUO', 'YuxW4zurp837', 'lqAfwBcmEIYB', 'gIEsoiXHNlaN', '9aArBkyeOkp0', 'e9fvVfyqxzMK', 'AtWnForNaW08', 'QfG31wSauKFk', 'jj8SFRXo0g10', 'UwgRJmdbVn6h', 'of7KckpcWd6l', 'vQC0iZzrFoy2', 'B35uVOu9MFP4', 'vJeWdKmi3bQ5', 'JCRWGhFBBBdT', 'zJtVGIIWcaRH', 'hCQfcouybmtC', 'I3Lvi2CBfg3r', 'hsGrtOj9RoSF', 'Z2M6Lpp38sP5', 'Ddd1Q5xIN1gF', 'vq6wMS9ot0zQ', 'hQjBcbnBCHxC', 'p1PwrBlPZ0ow', '6qJmIbbe6xoB', 'WQcb7xfEnOYR', 'Ey6vDVRYc2Vg', 'wEPIInOjeYUY', 'fb6WnAMMEgmY', 'Jm4yz5txSDmS', 'fXOMpdzreZvs', 'CkC0BkFtTlyq', 'DZ0gQRmgZsU1', 'DauMhJ1BsGza', 'hUUYMo9s2BPD', 'QQxDRxvmupZk', '4L7iiN4r06lT', '5zyfpXsYhBKg', 'JrNowXOUjujL', 'qTHahv28s258', 'rBu8EOrxNWZ5', '68pRpcX5n8Ev', 'jC6tKJcWjKtR', '8xasHzOz7jrx', 'e6OGhsh3lyer', '1Am3cf613rjd', 'VgN14kseLLgW', 'r0H7c44NjU4J', 'R8zcG9pVe4If', 'hz9enCVyG3Qe', '42ZrkxaG0Odd', 'BjAIeDoudYpr', 'c8XtYZASWW1E', 'XreXXid6ctjG', 'lu1Jl8lKScR5', 'ZbZuuURyxIxS', 'SeRZjEEHdjU5', '5wvjcDrLGtXE', 'TfoMA9ujFFZq', '2wUgbKrHw5MQ', 'bC9MrAkpJiRA', 'k8njmmSu5NXE', '0wEywfzDTs6l', 'iUVECD8GiYyE', 'O2hKKZqGekW6', 'CAXzTL2d1qO2', 'y797wsSFo5yg', '9RAhxCoi7VnP', 'MUzYZ9uHKkHp', 'hzFbJ1dTuW9l', 'xL313rNuBsdP', 'QffVzcK7XdwG', 'n0gE4RRtOLwv', 'KQOYiuUQc8ij', '9NtrplWWpd3F', '9sbNujphpvkK', 'TPo2EW4NtMms', 'lYT0LmVD3xW9', 'b1rhQTMP1Fx2', 'm6DaZ98S8G5i', 'GMYZBbKzuzlo', 'gzlFPb5KSvzm', 'RumlfxO98mbL', 'FDFcbQ3tqsqB', 'MFYBWyrNxrju', 'Yhz3FR5otGne', 'AkCV4tMFjjmN', 'DSLm94pZ7x2k', '2B3a4m8BtviD', 'DdpfmV6EcR2s', 'XZRnVv0pyTLM', 'Rec0l44sVRmo', 'ktyovd71S95u', 'RuxFDjYa3CzC', 'szRnOXD2BeKN', 'rGgeMiZEik6Q', 'ijj71npkEoqP', 'YpqcsLAkhR3n', 'dDQOSdN8opNw', 'J6Xyv2PcJIeY', 'eloBCeinF7Gp', 'YyUBfPqsv3An', 'arWweSmLMKkF', 'MhvfwsyrJXhb', 'QBqD3MDwBW6T', '1TmDk7sEdo8j', 'HnaaZYWYEhGY', '6IL7tGERPBoo', 'EdwBz7J0R1Sv', 'doKRJdkcVLOC', 'B6hAOtsGnnLb', 'gNjBR7OQuPFN', 'inMM26GlBXcY', 'sTBXHVijFmOR', 't8Lpl3Ad9rtc', '9KUt31evAPwJ', 'U0DZAVBlygDM', 'F7AGLWs6vR52', '7CeZX2qdLfKP', '4c73P4ua8mJv', 'nm84pMyyKjnT', 'JUl0SS3Oaglq', 'WtbMPWqY6n41', 'T2aTQb00cqx5', 'IB5yetiYZNGl', 'MXTPGNTcP02Q', 'PI82YnVTiBd5', 'YYyak43raQDP', 'CREXo6BLfX5Z', 'TXR49vSnEnzD', 'a5uy3qptZXQZ', 'TBnL6rzijVzJ', 'vFraC8uBBYMG', 'xoVeBYoWmkSX', 'QrkcALhYHdI4'] | 265.571429 | 3,209 | 0.743679 | TASK_URL = "http://code-lock.ctf.sicamp.ru"
TITLE = "Кодовый замок"
STATEMENT_TEMPLATE = f'''
"Безопасность превыше всего" - сказал ваш приятель и поставил на входную дверь кодовый замок. Но теперь вам срочно нужно попасть к нему домой, а кода вы не знаете. Что же делать?
[{TASK_URL}/{{0}}]({TASK_URL}/{{0}})
'''
def generate(context):
participant = context['participant']
token = tokens[participant.id % len(tokens)]
return TaskStatement(TITLE, STATEMENT_TEMPLATE.format(token))
tokens = ['CyCHgDjGEITw', 'mhLo7Fo1T0CB', 'MvXYeD9qdKbD', 'IcI8GUb3ifnu', 'WfClJasVVMqE', '6N111DvhAm1a', 'oKZdNHSWuEzB', '83xZkQHbnkB2', 'WAa0NtH4OhIq', 'hkiUVGhgnhQ9', 'cRx7j5c4BQ1f', 'T1HHyn3puLoX', 'Yydst5aHZyVp', 'cpnyMwLbKB5p', '8iHeN1y18NjN', 'LSwR2lsiDTA3', 'gG4EHjKbOZm2', 'zYgMKfs9y2Wc', 'oFw8MCETmi6p', '8cdmXvDNUZRM', 'HDmbVl56YPBr', 'tnBzvB42h501', 'Gk7rzhpwMyhf', 'fwHG0HAE6mpP', 'OQNSYN9nbfYR', 'w59HnPeSuwWt', 'QQpSYcygsvRy', 'dMTpLdze1Ex7', 'ULkRAfUEZy4U', 'Shpapnk5hsta', 'DVeq1osBM2YR', 'GyURFUZMB4Xm', 'X69RCwKq2u1l', 'kYPOfWmvzAWM', 'BKKTXLNiII07', '32neeyDHhzqB', 'GNwN6ZHCGCdw', '4P3IF6Upprg3', 'jTIiNfjvwByW', 'QVlqAFqMIJOf', 'jPhyNMm4X5Rn', 'AyJqWGxn89fb', 'eD526FoOaDYe', 'VKxmUzze44bm', 'SrlFcn7xYZ16', '5YPYsFFYXIVk', 'bmsWWn8pgEAS', 'yy6PPRzGx9n0', '21w2OU52qu1n', 'yz4IMAZNUxAg', 'U1BvSWxNl9Ol', 'AzWWPWmCXNyD', '2NmfDErj74Hy', 'OawqmtEXpyO5', '1t8Il35pZ4dC', 'D46PRTiXfS85', 'FALm3qlwKlby', 'y5p75Oc2Wlq1', 'KCGvCrvXm7Dv', 'kXRW6JlaWqAP', 'BRS3SNN1pE8i', '4kTbKTwv4TDa', 'seRMOBGjStBR', 'NEjfEx8hkIlw', 'CwDwPLL4lJ8h', 'JqrI0EvPVsUO', 'YuxW4zurp837', 'lqAfwBcmEIYB', 'gIEsoiXHNlaN', '9aArBkyeOkp0', 'e9fvVfyqxzMK', 'AtWnForNaW08', 'QfG31wSauKFk', 'jj8SFRXo0g10', 'UwgRJmdbVn6h', 'of7KckpcWd6l', 'vQC0iZzrFoy2', 'B35uVOu9MFP4', 'vJeWdKmi3bQ5', 'JCRWGhFBBBdT', 'zJtVGIIWcaRH', 'hCQfcouybmtC', 'I3Lvi2CBfg3r', 'hsGrtOj9RoSF', 'Z2M6Lpp38sP5', 'Ddd1Q5xIN1gF', 'vq6wMS9ot0zQ', 'hQjBcbnBCHxC', 'p1PwrBlPZ0ow', '6qJmIbbe6xoB', 'WQcb7xfEnOYR', 'Ey6vDVRYc2Vg', 'wEPIInOjeYUY', 'fb6WnAMMEgmY', 'Jm4yz5txSDmS', 'fXOMpdzreZvs', 'CkC0BkFtTlyq', 'DZ0gQRmgZsU1', 'DauMhJ1BsGza', 'hUUYMo9s2BPD', 'QQxDRxvmupZk', '4L7iiN4r06lT', '5zyfpXsYhBKg', 'JrNowXOUjujL', 'qTHahv28s258', 'rBu8EOrxNWZ5', '68pRpcX5n8Ev', 'jC6tKJcWjKtR', '8xasHzOz7jrx', 'e6OGhsh3lyer', '1Am3cf613rjd', 'VgN14kseLLgW', 'r0H7c44NjU4J', 'R8zcG9pVe4If', 'hz9enCVyG3Qe', '42ZrkxaG0Odd', 'BjAIeDoudYpr', 'c8XtYZASWW1E', 'XreXXid6ctjG', 'lu1Jl8lKScR5', 'ZbZuuURyxIxS', 'SeRZjEEHdjU5', '5wvjcDrLGtXE', 'TfoMA9ujFFZq', '2wUgbKrHw5MQ', 'bC9MrAkpJiRA', 'k8njmmSu5NXE', '0wEywfzDTs6l', 'iUVECD8GiYyE', 'O2hKKZqGekW6', 'CAXzTL2d1qO2', 'y797wsSFo5yg', '9RAhxCoi7VnP', 'MUzYZ9uHKkHp', 'hzFbJ1dTuW9l', 'xL313rNuBsdP', 'QffVzcK7XdwG', 'n0gE4RRtOLwv', 'KQOYiuUQc8ij', '9NtrplWWpd3F', '9sbNujphpvkK', 'TPo2EW4NtMms', 'lYT0LmVD3xW9', 'b1rhQTMP1Fx2', 'm6DaZ98S8G5i', 'GMYZBbKzuzlo', 'gzlFPb5KSvzm', 'RumlfxO98mbL', 'FDFcbQ3tqsqB', 'MFYBWyrNxrju', 'Yhz3FR5otGne', 'AkCV4tMFjjmN', 'DSLm94pZ7x2k', '2B3a4m8BtviD', 'DdpfmV6EcR2s', 'XZRnVv0pyTLM', 'Rec0l44sVRmo', 'ktyovd71S95u', 'RuxFDjYa3CzC', 'szRnOXD2BeKN', 'rGgeMiZEik6Q', 'ijj71npkEoqP', 'YpqcsLAkhR3n', 'dDQOSdN8opNw', 'J6Xyv2PcJIeY', 'eloBCeinF7Gp', 'YyUBfPqsv3An', 'arWweSmLMKkF', 'MhvfwsyrJXhb', 'QBqD3MDwBW6T', '1TmDk7sEdo8j', 'HnaaZYWYEhGY', '6IL7tGERPBoo', 'EdwBz7J0R1Sv', 'doKRJdkcVLOC', 'B6hAOtsGnnLb', 'gNjBR7OQuPFN', 'inMM26GlBXcY', 'sTBXHVijFmOR', 't8Lpl3Ad9rtc', '9KUt31evAPwJ', 'U0DZAVBlygDM', 'F7AGLWs6vR52', '7CeZX2qdLfKP', '4c73P4ua8mJv', 'nm84pMyyKjnT', 'JUl0SS3Oaglq', 'WtbMPWqY6n41', 'T2aTQb00cqx5', 'IB5yetiYZNGl', 'MXTPGNTcP02Q', 'PI82YnVTiBd5', 'YYyak43raQDP', 'CREXo6BLfX5Z', 'TXR49vSnEnzD', 'a5uy3qptZXQZ', 'TBnL6rzijVzJ', 'vFraC8uBBYMG', 'xoVeBYoWmkSX', 'QrkcALhYHdI4'] | 160 | 0 | 25 |
80d96f5c5eee131c39bba40f38a78bb59c0e41f9 | 69 | py | Python | playbook/roles/jupyter/files/.ipyparallel/profile_mpi/ipengine_config.py | kemusiro/rpicluster-for-openmpi | fb27da339e8f13efdcc108a005a26e335527ade1 | [
"MIT"
] | null | null | null | playbook/roles/jupyter/files/.ipyparallel/profile_mpi/ipengine_config.py | kemusiro/rpicluster-for-openmpi | fb27da339e8f13efdcc108a005a26e335527ade1 | [
"MIT"
] | null | null | null | playbook/roles/jupyter/files/.ipyparallel/profile_mpi/ipengine_config.py | kemusiro/rpicluster-for-openmpi | fb27da339e8f13efdcc108a005a26e335527ade1 | [
"MIT"
] | null | null | null | c.IPEngineApp.wait_for_url_file = 30
c.RegistrationFactory.ip = '*'
| 17.25 | 36 | 0.768116 | c.IPEngineApp.wait_for_url_file = 30
c.RegistrationFactory.ip = '*'
| 0 | 0 | 0 |
f714c0f34fff9800c3a67b955f8cc23e9eeb99c8 | 9,027 | py | Python | emu/containers/docker_container.py | CONQ-Agency/android-emulator-container-scripts | 0d5f55ca938818486a2ad638b91464e952e87cf4 | [
"Apache-2.0"
] | null | null | null | emu/containers/docker_container.py | CONQ-Agency/android-emulator-container-scripts | 0d5f55ca938818486a2ad638b91464e952e87cf4 | [
"Apache-2.0"
] | 1 | 2021-06-15T11:59:58.000Z | 2021-06-16T12:08:38.000Z | emu/containers/docker_container.py | CONQ-Agency/android-emulator-container-scripts | 0d5f55ca938818486a2ad638b91464e952e87cf4 | [
"Apache-2.0"
] | 1 | 2021-05-12T14:08:12.000Z | 2021-05-12T14:08:12.000Z | # Copyright 2019 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import re
import sys
import shutil
import abc
import docker
from tqdm import tqdm
from emu.utils import mkdir_p
class ProgressTracker(object):
"""Tracks progress using tqdm for a set of layers that are pushed."""
def update(self, entry):
"""Update the progress bars given a an entry.."""
if "id" not in entry:
return
identity = entry["id"]
if identity not in self.progress:
self.idx += 1
self.progress[identity] = {
"tqdm": tqdm(total=0, position=self.idx, unit="B", unit_scale=True), # The progress bar
"total": 0, # Total of bytes we are shipping
"status": "", # Status message.
"current": 0, # Current of total already send.
}
prog = self.progress[identity]
total = int(entry.get("progressDetail", {}).get("total", -1))
current = int(entry.get("progressDetail", {}).get("current", 0))
if prog["total"] != total and total != -1:
prog["total"] = total
prog["tqdm"].reset(total=total)
if prog["status"] != entry["status"]:
prog["tqdm"].set_description("{0} {1}".format(entry.get("status"), identity))
if current != 0:
diff = current - prog["current"]
prog["current"] = current
prog["tqdm"].update(diff)
class DockerContainer(object):
"""A Docker Device is capable of creating and launching docker images.
In order to successfully create and launch a docker image you must either
run this as root, or have enabled sudoless docker.
"""
TAG_REGEX = re.compile(r"[a-zA-Z0-9][a-zA-Z0-9._-]*:?[a-zA-Z0-9._-]*")
def launch(self, port_map):
"""Launches the container with the given sha, publishing abd on port, and gRPC on port 8554
Returns the container.
"""
image = self.docker_image()
client = docker.from_env()
try:
container = client.containers.run(
image=image.id,
privileged=True,
publish_all_ports=True,
detach=True,
ports=port_map,
)
print("Launched {} (id:{})".format(container.name, container.id))
print("docker logs -f {}".format(container.name))
print("docker stop {}".format(container.name))
return container
except:
logging.exception("Unable to run the %s", image_sha)
print("Unable to start the container, try running it as:")
print("./run.sh ", image_sha)
def create_container(self, dest):
"""Creates the docker container, returning the sha of the container, or None in case of failure."""
identity = None
image_tag = self.full_name()
print("docker build {} -t {}".format(dest, image_tag))
try:
api_client = self.get_api_client()
logging.info("build(path=%s, tag=%s, rm=True, decode=True)", dest, image_tag)
result = api_client.build(path=dest, tag=image_tag, rm=True, decode=True)
for entry in result:
if "stream" in entry:
sys.stdout.write(entry["stream"])
if "aux" in entry and "ID" in entry["aux"]:
identity = entry["aux"]["ID"]
client = docker.from_env()
image = client.images.get(identity)
image.tag(self.repo + self.image_name(), "latest")
except:
logging.exception("Failed to create container.", exc_info=True)
logging.warning("You can manually create the container as follows:")
logging.warning("docker build -t %s %s", image_tag, dest)
return identity
def pull(self, image, tag):
"""Tries to retrieve the given image and tag.
Return True if succeeded, False when failed.
"""
client = self.get_api_client()
try:
tracker = ProgressTracker()
result = client.pull(self.repo + image, tag)
for entry in result:
tracker.update(entry)
except:
logging.info("Failed to retrieve image, this is not uncommon.", exc_info=True)
return False
return True
def docker_image(self):
"""The docker local docker image if any
Returns:
{docker.models.images.Image}: A docker image object, or None.
"""
client = self.get_client()
for img in client.images.list():
for tag in img.tags:
if self.image_name() in tag:
return img
return None
def available(self):
"""True if this container image is locally available."""
return self.docker_image() != None
def can_pull(self):
"""True if this container image can be pulled from a registry."""
return self.pull(self.image_name(), self.docker_tag())
@abc.abstractmethod
def write(self, destination):
"""Method responsible for writing the Dockerfile and all necessary files to build a container.
Args:
destination ({string}): A path to a directory where all the container files should reside.
Raises:
NotImplementedError: [description]
"""
raise NotImplementedError()
@abc.abstractmethod
def image_name(self):
"""The image name without the tag used to uniquely identify this image.
Raises:
NotImplementedError: [description]
"""
raise NotImplementedError()
@abc.abstractmethod
@abc.abstractmethod
def depends_on(self):
"""Name of the system image this container is build on."""
raise NotImplementedError()
| 34.586207 | 107 | 0.583029 | # Copyright 2019 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import re
import sys
import shutil
import abc
import docker
from tqdm import tqdm
from emu.utils import mkdir_p
class ProgressTracker(object):
"""Tracks progress using tqdm for a set of layers that are pushed."""
def __init__(self):
# This tracks the information for a given layer id.
self.progress = {}
self.idx = -1
def __del__(self):
for k in self.progress:
self.progress[k]["tqdm"].close()
def update(self, entry):
"""Update the progress bars given a an entry.."""
if "id" not in entry:
return
identity = entry["id"]
if identity not in self.progress:
self.idx += 1
self.progress[identity] = {
"tqdm": tqdm(total=0, position=self.idx, unit="B", unit_scale=True), # The progress bar
"total": 0, # Total of bytes we are shipping
"status": "", # Status message.
"current": 0, # Current of total already send.
}
prog = self.progress[identity]
total = int(entry.get("progressDetail", {}).get("total", -1))
current = int(entry.get("progressDetail", {}).get("current", 0))
if prog["total"] != total and total != -1:
prog["total"] = total
prog["tqdm"].reset(total=total)
if prog["status"] != entry["status"]:
prog["tqdm"].set_description("{0} {1}".format(entry.get("status"), identity))
if current != 0:
diff = current - prog["current"]
prog["current"] = current
prog["tqdm"].update(diff)
class DockerContainer(object):
"""A Docker Device is capable of creating and launching docker images.
In order to successfully create and launch a docker image you must either
run this as root, or have enabled sudoless docker.
"""
TAG_REGEX = re.compile(r"[a-zA-Z0-9][a-zA-Z0-9._-]*:?[a-zA-Z0-9._-]*")
def __init__(self, repo=None):
if repo and repo[-1] != "/":
repo += "/"
self.repo = repo
def get_client(self):
return docker.from_env()
def get_api_client(self):
try:
api_client = docker.APIClient()
logging.info(api_client.version())
return api_client
except:
logging.exception("Failed to create default client, trying domain socket.", exc_info=True)
api_client = docker.APIClient(base_url="unix://var/run/docker.sock")
logging.info(api_client.version())
return api_client
def push(self):
image = self.full_name()
print("Pushing docker image: {}.. be patient this can take a while!".format(self.full_name()))
tracker = ProgressTracker()
try:
client = docker.from_env()
result = client.images.push(image, "latest", stream=True, decode=True)
for entry in result:
tracker.update(entry)
self.docker_image().tag("{}{}:latest".format(self.repo, self.image_name()))
except:
logging.exception("Failed to push image.", exc_info=True)
logging.warning("You can manually push the image as follows:")
logging.warning("docker push %s", image)
def launch(self, port_map):
"""Launches the container with the given sha, publishing abd on port, and gRPC on port 8554
Returns the container.
"""
image = self.docker_image()
client = docker.from_env()
try:
container = client.containers.run(
image=image.id,
privileged=True,
publish_all_ports=True,
detach=True,
ports=port_map,
)
print("Launched {} (id:{})".format(container.name, container.id))
print("docker logs -f {}".format(container.name))
print("docker stop {}".format(container.name))
return container
except:
logging.exception("Unable to run the %s", image_sha)
print("Unable to start the container, try running it as:")
print("./run.sh ", image_sha)
def create_container(self, dest):
"""Creates the docker container, returning the sha of the container, or None in case of failure."""
identity = None
image_tag = self.full_name()
print("docker build {} -t {}".format(dest, image_tag))
try:
api_client = self.get_api_client()
logging.info("build(path=%s, tag=%s, rm=True, decode=True)", dest, image_tag)
result = api_client.build(path=dest, tag=image_tag, rm=True, decode=True)
for entry in result:
if "stream" in entry:
sys.stdout.write(entry["stream"])
if "aux" in entry and "ID" in entry["aux"]:
identity = entry["aux"]["ID"]
client = docker.from_env()
image = client.images.get(identity)
image.tag(self.repo + self.image_name(), "latest")
except:
logging.exception("Failed to create container.", exc_info=True)
logging.warning("You can manually create the container as follows:")
logging.warning("docker build -t %s %s", image_tag, dest)
return identity
def clean(self, dest):
if os.path.exists(dest):
shutil.rmtree(dest)
mkdir_p(dest)
def pull(self, image, tag):
"""Tries to retrieve the given image and tag.
Return True if succeeded, False when failed.
"""
client = self.get_api_client()
try:
tracker = ProgressTracker()
result = client.pull(self.repo + image, tag)
for entry in result:
tracker.update(entry)
except:
logging.info("Failed to retrieve image, this is not uncommon.", exc_info=True)
return False
return True
def full_name(self):
if self.repo:
return "{}{}:{}".format(self.repo, self.image_name(), self.docker_tag())
return (self.image_name(), self.docker_tag())
def latest_name(self):
if self.repo:
return "{}{}:{}".format(self.repo, self.image_name(), "latest")
return (self.image_name(), "latest")
def create_cloud_build_step(self, dest):
return {
"name": "gcr.io/cloud-builders/docker",
"args": [
"build",
"-t",
self.full_name(),
"-t",
self.latest_name(),
os.path.basename(dest),
],
}
def docker_image(self):
"""The docker local docker image if any
Returns:
{docker.models.images.Image}: A docker image object, or None.
"""
client = self.get_client()
for img in client.images.list():
for tag in img.tags:
if self.image_name() in tag:
return img
return None
def available(self):
"""True if this container image is locally available."""
return self.docker_image() != None
def build(self, dest):
self.write(dest)
return self.create_container(dest)
def can_pull(self):
"""True if this container image can be pulled from a registry."""
return self.pull(self.image_name(), self.docker_tag())
@abc.abstractmethod
def write(self, destination):
"""Method responsible for writing the Dockerfile and all necessary files to build a container.
Args:
destination ({string}): A path to a directory where all the container files should reside.
Raises:
NotImplementedError: [description]
"""
raise NotImplementedError()
@abc.abstractmethod
def image_name(self):
"""The image name without the tag used to uniquely identify this image.
Raises:
NotImplementedError: [description]
"""
raise NotImplementedError()
@abc.abstractmethod
def docker_tag(self):
raise NotImplementedError()
@abc.abstractmethod
def depends_on(self):
"""Name of the system image this container is build on."""
raise NotImplementedError()
def __str__(self):
return self.image_name() + ":" + self.docker_tag()
| 2,254 | 0 | 350 |
0ddf1e275961fe3e95a3ee2b91a70698817c2960 | 3,618 | py | Python | python/2020/day16.py | SylvainDe/aoc | b8a4609327831685ef94c9960350ff7bb5ace1a5 | [
"MIT"
] | null | null | null | python/2020/day16.py | SylvainDe/aoc | b8a4609327831685ef94c9960350ff7bb5ace1a5 | [
"MIT"
] | null | null | null | python/2020/day16.py | SylvainDe/aoc | b8a4609327831685ef94c9960350ff7bb5ace1a5 | [
"MIT"
] | null | null | null | # vi: set shiftwidth=4 tabstop=4 expandtab:
import datetime
import re
rules_re = re.compile(r"(?P<min>\d+)-(?P<max>\d+)")
if __name__ == "__main__":
begin = datetime.datetime.now()
run_tests()
get_solutions()
end = datetime.datetime.now()
print(end - begin)
| 28.046512 | 86 | 0.609729 | # vi: set shiftwidth=4 tabstop=4 expandtab:
import datetime
import re
rules_re = re.compile(r"(?P<min>\d+)-(?P<max>\d+)")
def get_rule_from_string(string):
name, values = string.split(": ")
return (name, [(int(mini), int(maxi)) for mini, maxi in rules_re.findall(values)])
def get_ticket_from_string(string):
return [int(n) for n in string.split(",")]
def get_info_from_string(string):
rules, ticket, tickets = string.split("\n\n")
return (
[get_rule_from_string(r) for r in rules.splitlines()],
get_ticket_from_string(ticket.splitlines()[1]),
[get_ticket_from_string(s) for s in tickets.splitlines()[1:] if s],
)
def get_info_from_file(file_path="../../resources/year2020_day16_input.txt"):
with open(file_path) as f:
return get_info_from_string(f.read())
def value_valid_for_a_field(rules, n):
return any(mini <= n <= maxi for _, lst in rules for mini, maxi in lst)
def validity_rate(rules, tickets):
return sum(n for t in tickets for n in t if not value_valid_for_a_field(rules, n))
def guess_field_position(rules, tickets):
positions = set(range(len(tickets[0])))
impossible_positions = {name: set() for name, _ in rules}
for t in tickets:
# Only valid tickets
if all(value_valid_for_a_field(rules, n) for n in t):
for i, n in enumerate(t):
for name, lst in rules:
if not any(mini <= n <= maxi for (mini, maxi) in lst):
impossible_positions[name].add(i)
return guess_positions(impossible_positions, positions)
def guess_positions(impossible_positions, positions):
found = dict()
while impossible_positions:
for name, imposs in impossible_positions.items():
possible = positions - imposs
if len(possible) == 0:
print("Impossible")
return None
elif len(possible) == 1:
unique_pos = possible.pop()
found[name] = unique_pos
del impossible_positions[name]
for name, imposs in impossible_positions.items():
imposs.add(unique_pos)
break
else:
# TODO
print("No change")
return None
return found
def run_tests():
example1 = """class: 1-3 or 5-7
row: 6-11 or 33-44
seat: 13-40 or 45-50
your ticket:
7,1,14
nearby tickets:
7,3,47
40,4,50
55,2,20
38,6,12"""
rules, ticket, tickets = get_info_from_string(example1)
assert rules == [
("class", [(1, 3), (5, 7)]),
("row", [(6, 11), (33, 44)]),
("seat", [(13, 40), (45, 50)]),
]
assert ticket == [7, 1, 14]
assert tickets == [[7, 3, 47], [40, 4, 50], [55, 2, 20], [38, 6, 12]]
assert validity_rate(rules, tickets)
example2 = """class: 0-1 or 4-19
row: 0-5 or 8-19
seat: 0-13 or 16-19
your ticket:
11,12,13
nearby tickets:
3,9,18
15,1,5
5,14,9"""
rules, ticket, tickets = get_info_from_string(example2)
assert guess_field_position(rules, tickets) == {"seat": 2, "class": 1, "row": 0}
def get_solutions():
rules, ticket, tickets = get_info_from_file()
print(validity_rate(rules, tickets) == 26053)
mult = 1
field_position = guess_field_position(rules, tickets)
for field, idx in field_position.items():
if field.startswith("departure"):
mult *= ticket[idx]
print(mult == 1515506256421)
if __name__ == "__main__":
begin = datetime.datetime.now()
run_tests()
get_solutions()
end = datetime.datetime.now()
print(end - begin)
| 3,097 | 0 | 230 |
a6e3a48eaa19202a70ac26f47f0ceced944359ba | 750 | py | Python | scripts/1_deploy_pokemon.py | abhicoder29/pokemon-nft | 29d517ac996c878558795cc135cd17828976c179 | [
"MIT"
] | 1 | 2021-12-30T07:58:20.000Z | 2021-12-30T07:58:20.000Z | scripts/1_deploy_pokemon.py | abhicoder29/pokemon-nft | 29d517ac996c878558795cc135cd17828976c179 | [
"MIT"
] | null | null | null | scripts/1_deploy_pokemon.py | abhicoder29/pokemon-nft | 29d517ac996c878558795cc135cd17828976c179 | [
"MIT"
] | null | null | null | #!/usr/bin/python3
from brownie import Pokemon, accounts, network, config
from scripts.helpful_scripts import fund
import asyncio
import json
| 34.090909 | 108 | 0.684 | #!/usr/bin/python3
from brownie import Pokemon, accounts, network, config
from scripts.helpful_scripts import fund
import asyncio
import json
def main():
dev = accounts.add(config["wallets"]["from_key"])
print(network.show_active())
# publish_source = True if os.getenv("ETHERSCAN_TOKEN") else False # Currently having an issue with this
publish_source = True
pokemon = Pokemon.deploy(
config["networks"][network.show_active()]["vrf_coordinator"],
config["networks"][network.show_active()]["link_token"],
config["networks"][network.show_active()]["keyhash"],
config["networks"][network.show_active()]["fee"],
{"from": dev},
publish_source=publish_source,
)
return pokemon
| 584 | 0 | 23 |
fe8d456a5fa9c6585a8f352262524692f6af0703 | 4,442 | py | Python | nets/mobilenet.py | bubbliiiing/arcface-tf2 | b2d6b1c10898689f0f4f6c952feefb3eb43b8e7d | [
"MIT"
] | 2 | 2022-03-08T03:49:14.000Z | 2022-03-08T09:42:29.000Z | nets/mobilenet.py | bubbliiiing/arcface-tf2 | b2d6b1c10898689f0f4f6c952feefb3eb43b8e7d | [
"MIT"
] | null | null | null | nets/mobilenet.py | bubbliiiing/arcface-tf2 | b2d6b1c10898689f0f4f6c952feefb3eb43b8e7d | [
"MIT"
] | 1 | 2022-03-16T01:51:20.000Z | 2022-03-16T01:51:20.000Z |
from tensorflow.keras import backend as K
from tensorflow.keras import initializers
from tensorflow.keras.layers import (Activation, BatchNormalization, Conv2D,
Dense, DepthwiseConv2D, Dropout, Flatten,
PReLU)
from tensorflow.keras.regularizers import l2
| 50.477273 | 119 | 0.660738 |
from tensorflow.keras import backend as K
from tensorflow.keras import initializers
from tensorflow.keras.layers import (Activation, BatchNormalization, Conv2D,
Dense, DepthwiseConv2D, Dropout, Flatten,
PReLU)
from tensorflow.keras.regularizers import l2
def _conv_block(inputs, filters, kernel=(3, 3), strides=(1, 1), weight_decay=5e-4):
x = Conv2D(filters, kernel,
padding='same',
use_bias=False,
strides=strides,
name='conv1',
kernel_initializer=initializers.RandomNormal(stddev=0.1),
kernel_regularizer=l2(weight_decay),
bias_initializer='zeros')(inputs)
x = BatchNormalization(name='conv1_bn', epsilon=1e-5)(x)
return Activation(relu6, name='conv1_relu')(x)
def _depthwise_conv_block(inputs, pointwise_conv_filters,
depth_multiplier=1, strides=(1, 1), block_id=1, weight_decay=5e-4):
x = DepthwiseConv2D((3, 3),
padding='same',
depth_multiplier=depth_multiplier,
strides=strides,
use_bias=False,
name='conv_dw_%d' % block_id,
depthwise_initializer=initializers.RandomNormal(stddev=0.1),
depthwise_regularizer=l2(weight_decay),
bias_initializer='zeros')(inputs)
x = BatchNormalization(name='conv_dw_%d_bn' % block_id, epsilon=1e-5)(x)
x = Activation(relu6, name='conv_dw_%d_relu' % block_id)(x)
x = Conv2D(pointwise_conv_filters, (1, 1),
padding='same',
use_bias=False,
strides=(1, 1),
name='conv_pw_%d' % block_id,
kernel_initializer=initializers.RandomNormal(stddev=0.1),
kernel_regularizer=l2(weight_decay),
bias_initializer='zeros')(x)
x = BatchNormalization(name='conv_pw_%d_bn' % block_id, epsilon=1e-5)(x)
return Activation(relu6, name='conv_pw_%d_relu' % block_id)(x)
def relu6(x):
return K.relu(x, max_value=6)
def MobilenetV1(inputs, embedding_size, dropout_keep_prob=0.5, depth_multiplier=1, weight_decay=5e-4):
x = _conv_block(inputs, 32, strides=(1, 1))
x = _depthwise_conv_block(x, 64, depth_multiplier, block_id=1, weight_decay=weight_decay)
x = _depthwise_conv_block(x, 128, depth_multiplier, strides=(2, 2), block_id=2, weight_decay=weight_decay)
x = _depthwise_conv_block(x, 128, depth_multiplier, block_id=3, weight_decay=weight_decay)
x = _depthwise_conv_block(x, 256, depth_multiplier, strides=(2, 2), block_id=4, weight_decay=weight_decay)
x = _depthwise_conv_block(x, 256, depth_multiplier, block_id=5, weight_decay=weight_decay)
x = _depthwise_conv_block(x, 512, depth_multiplier, strides=(2, 2), block_id=6, weight_decay=weight_decay)
x = _depthwise_conv_block(x, 512, depth_multiplier, block_id=7, weight_decay=weight_decay)
x = _depthwise_conv_block(x, 512, depth_multiplier, block_id=8, weight_decay=weight_decay)
x = _depthwise_conv_block(x, 512, depth_multiplier, block_id=9, weight_decay=weight_decay)
x = _depthwise_conv_block(x, 512, depth_multiplier, block_id=10, weight_decay=weight_decay)
x = _depthwise_conv_block(x, 512, depth_multiplier, block_id=11, weight_decay=weight_decay)
x = _depthwise_conv_block(x, 1024, depth_multiplier, strides=(2, 2), block_id=12, weight_decay=weight_decay)
x = _depthwise_conv_block(x, 1024, depth_multiplier, block_id=13, weight_decay=weight_decay)
x = Conv2D(512, kernel_size=1, use_bias=False, name='sep',
kernel_initializer=initializers.RandomNormal(stddev=0.1),
kernel_regularizer=l2(weight_decay),
bias_initializer='zeros')(x)
x = BatchNormalization(name='sep_bn', epsilon=1e-5)(x)
x = PReLU(alpha_initializer=initializers.constant(0.25), alpha_regularizer=l2(weight_decay), shared_axes=[1, 2])(x)
x = BatchNormalization(name='bn2', epsilon=1e-5)(x)
x = Dropout(dropout_keep_prob)(x)
x = Flatten()(x)
x = Dense(embedding_size, name='linear',
kernel_initializer=initializers.RandomNormal(stddev=0.1),
kernel_regularizer=l2(weight_decay),
bias_initializer='zeros')(x)
x = BatchNormalization(name='features', epsilon=1e-5)(x)
return x
| 4,018 | 0 | 92 |
4f74195555db7ef71e27e1720160a87892b9d005 | 4,174 | py | Python | medkit/bases/base_dataset.py | XanderJC/medkit-learn | 13b06906f6d093c4978f52f1e8f06e998608faa3 | [
"Apache-2.0"
] | 14 | 2021-06-06T18:14:55.000Z | 2022-03-18T14:00:05.000Z | medkit/bases/base_dataset.py | vanderschaarlab/medkit-learn | a8bed6e7a76769f28abfadb3231f139cbfe44b49 | [
"Apache-2.0"
] | 1 | 2021-06-13T05:34:21.000Z | 2021-06-15T09:52:50.000Z | medkit/bases/base_dataset.py | vanderschaarlab/medkit-learn | a8bed6e7a76769f28abfadb3231f139cbfe44b49 | [
"Apache-2.0"
] | 1 | 2021-10-18T14:07:45.000Z | 2021-10-18T14:07:45.000Z | from .__head__ import *
class BaseDataset(torch.utils.data.Dataset):
"""
Base Dataset class to be passed to torch Dataloader so train functions can be unified.
Not particularly designed for end-user, but for pre-training models.
"""
def __len__(self):
"Total number of samples"
return self.N
def __getitem__(self, index):
"Generates one batch of data"
return (
self.X_static[index],
self.X_series[index],
self.X_mask[index],
self.y_series[index],
)
def get_whole_batch(self):
"Returns all data as a single batch"
return self.X_static, self.X_series, self.X_mask, self.y_series
class standard_dataset(BaseDataset):
"""
Dataset to be passed to a torch DataLoader
"""
| 34.213115 | 97 | 0.576186 | from .__head__ import *
class BaseDataset(torch.utils.data.Dataset):
"""
Base Dataset class to be passed to torch Dataloader so train functions can be unified.
Not particularly designed for end-user, but for pre-training models.
"""
def __init__(self):
super(BaseDataset, self).__init__()
self.N = None # No. of items in dataset
self.X_static = None # Static features [N,static_dim]
self.X_series = None # Series features [N,max_seq_length,series_dim]
self.X_mask = None # Mask [N,max_seq_length]
self.y_series = None # Static features [N,max_seq_length]
def __len__(self):
"Total number of samples"
return self.N
def __getitem__(self, index):
"Generates one batch of data"
return (
self.X_static[index],
self.X_series[index],
self.X_mask[index],
self.y_series[index],
)
def get_whole_batch(self):
"Returns all data as a single batch"
return self.X_static, self.X_series, self.X_mask, self.y_series
class standard_dataset(BaseDataset):
"""
Dataset to be passed to a torch DataLoader
"""
def __init__(self, domain, max_seq_length=50, test=False, save_scale=False):
super(standard_dataset, self).__init__()
scale = scaler(domain)
fold = "train"
if test:
fold = "test"
path_head = f"data/{domain.base_name}/{domain.base_name}_temporal_{fold}_data_eav.csv.gz"
path = resource_filename("medkit", path_head)
data = pd.read_csv(path)
if domain.base_name == "icu":
series_df = data
series_df.fillna(series_df.mean(), inplace=True)
else:
series_df = pd.pivot_table(
data, index=["id", "time"], columns="variable", values="value"
).reset_index(level=[0, 1])
series_df.fillna(method="ffill", inplace=True)
series_df.fillna(0, inplace=True)
unique_ids = pd.unique(series_df["id"])
self.N = len(unique_ids)
series = torch.zeros((len(unique_ids), max_seq_length, domain.series_in_dim))
y_series = torch.zeros((len(unique_ids), max_seq_length))
for i, ids in enumerate(unique_ids):
patient = series_df[series_df["id"] == ids].sort_values(by=["time"])
cov = patient[domain.series_names].to_numpy()
if domain.bin_out_dim > 0:
cov[:, -domain.bin_out_dim :] = (
cov[:, -domain.bin_out_dim :] > 0
).astype(int)
targets = patient[domain.action_names].to_numpy()
targets = (targets > 0).astype(int)
y = targets[:, 0]
num_targets = targets.shape[1]
for j in range(num_targets - 1):
y += (2 ** (j + 1)) * targets[:, j + 1]
seq_length = len(cov)
cov = torch.tensor(cov)
y = torch.tensor(y)
if seq_length > max_seq_length:
series[i, :, :] = cov[:max_seq_length, :]
y_series[i, :] = y[:max_seq_length]
else:
series[i, :seq_length, :] = cov
y_series[i, :seq_length] = y
mask = (series[:, :, 0] != 0).float()
path_head = (
f"data/{domain.base_name}/{domain.base_name}_static_{fold}_data.csv.gz"
)
path = resource_filename("medkit", path_head)
static_df = pd.read_csv(path)
static_df.fillna(static_df.mean(), inplace=True)
static = torch.zeros((len(unique_ids), domain.static_in_dim))
for i, ids in enumerate(unique_ids):
patient = static_df[static_df["id"] == ids]
cov = patient[domain.static_names].to_numpy()
cov = torch.tensor(cov)
static[i, :] = cov
normed_series = scale.fit_series(series, mask)
normed_static = scale.fit_static(static)
if save_scale:
scale.save_params()
self.X_static = normed_static
self.X_series = normed_series
self.X_mask = mask
self.y_series = y_series
| 3,304 | 0 | 54 |
45a0b3e934efd1b716e4e65e5d9b8da1fcedcf16 | 1,313 | py | Python | colorthermoexample.py | majikpig/ubtech | 8d21f4b522e06f998bfdf3b1947db23bfc349e3d | [
"MIT"
] | null | null | null | colorthermoexample.py | majikpig/ubtech | 8d21f4b522e06f998bfdf3b1947db23bfc349e3d | [
"MIT"
] | null | null | null | colorthermoexample.py | majikpig/ubtech | 8d21f4b522e06f998bfdf3b1947db23bfc349e3d | [
"MIT"
] | 1 | 2019-03-23T16:08:30.000Z | 2019-03-23T16:08:30.000Z | import tempsensor
import time
import btnlib as btn
import ledlib as led
red = 80
orange = 75
yellow = 70
green = 65
blue = 60
purple = 55
while btn.isOn(btn.switch): #while the switch is on:
led.turn_on(led.white)
temp = tempsensor.tempF() #get the temp
print temp
if temp > red :
led.turn_on_all()
elif temp > orange :
led.turn_off(led.red)
led.turn_on(led.orange)
led.turn_on(led.yellow)
led.turn_on(led.green)
led.turn_on(led.blue)
led.turn_on(led.purple)
elif temp > yellow :
led.turn_off(led.red)
led.turn_off(led.orange)
led.turn_on(led.yellow)
led.turn_on(led.green)
led.turn_on(led.blue)
led.turn_on(led.purple)
elif temp > green :
led.turn_off(led.red)
led.turn_off(led.orange)
led.turn_off(led.yellow)
led.turn_on(led.green)
led.turn_on(led.blue)
led.turn_on(led.purple)
elif temp > blue :
led.turn_off(led.red)
led.turn_off(led.orange)
led.turn_off(led.yellow)
led.turn_off(led.green)
led.turn_on(led.blue)
led.turn_on(led.purple)
elif temp > purple :
led.turn_off(led.red)
led.turn_off(led.orange)
led.turn_off(led.yellow)
led.turn_off(led.green)
led.turn_off(led.blue)
led.turn_on(led.purple)
else :
led.turn_off_all()
led.turn_on(led.white)
time.sleep(0.5)
led.turn_off(led.white)
time.sleep(0.5)
led.GPIO.cleanup()
| 20.84127 | 52 | 0.711348 | import tempsensor
import time
import btnlib as btn
import ledlib as led
red = 80
orange = 75
yellow = 70
green = 65
blue = 60
purple = 55
while btn.isOn(btn.switch): #while the switch is on:
led.turn_on(led.white)
temp = tempsensor.tempF() #get the temp
print temp
if temp > red :
led.turn_on_all()
elif temp > orange :
led.turn_off(led.red)
led.turn_on(led.orange)
led.turn_on(led.yellow)
led.turn_on(led.green)
led.turn_on(led.blue)
led.turn_on(led.purple)
elif temp > yellow :
led.turn_off(led.red)
led.turn_off(led.orange)
led.turn_on(led.yellow)
led.turn_on(led.green)
led.turn_on(led.blue)
led.turn_on(led.purple)
elif temp > green :
led.turn_off(led.red)
led.turn_off(led.orange)
led.turn_off(led.yellow)
led.turn_on(led.green)
led.turn_on(led.blue)
led.turn_on(led.purple)
elif temp > blue :
led.turn_off(led.red)
led.turn_off(led.orange)
led.turn_off(led.yellow)
led.turn_off(led.green)
led.turn_on(led.blue)
led.turn_on(led.purple)
elif temp > purple :
led.turn_off(led.red)
led.turn_off(led.orange)
led.turn_off(led.yellow)
led.turn_off(led.green)
led.turn_off(led.blue)
led.turn_on(led.purple)
else :
led.turn_off_all()
led.turn_on(led.white)
time.sleep(0.5)
led.turn_off(led.white)
time.sleep(0.5)
led.GPIO.cleanup()
| 0 | 0 | 0 |
18792a2154e363e0b2b8ccacb226f0fb273783c3 | 723 | py | Python | sim_examples/inference_cnn_loadweight.py | thuime/XPEsim | ab97d4c3c5b32ce4804ca58ad0f0083bb9de11fc | [
"MIT"
] | 27 | 2018-04-17T02:39:57.000Z | 2022-03-25T13:51:14.000Z | sim_examples/inference_cnn_loadweight.py | Arvin-xd/XPEsim | ab97d4c3c5b32ce4804ca58ad0f0083bb9de11fc | [
"MIT"
] | 1 | 2018-12-13T17:58:20.000Z | 2021-04-15T15:32:04.000Z | sim_examples/inference_cnn_loadweight.py | Arvin-xd/XPEsim | ab97d4c3c5b32ce4804ca58ad0f0083bb9de11fc | [
"MIT"
] | 14 | 2018-04-17T02:40:17.000Z | 2021-12-24T22:11:40.000Z | import simulator
import numpy as np
'''
TODO Support CNN in the Hardware Evaluation
'''
weights_dir = "./data/mnist-lenet.npz"
image_dir = "./data/dataset/mnist/test.npy"
batch_size = 10 # The number of input picture
weights = np.load(weights_dir)['arr_0'].item()
data = np.load(image_dir)[:batch_size]
images = data[:, 0]
labels = data[:, 1]
params = simulator.Parameterinput() # Read parameters in simconfig
# Define the neural network
net = [
['Conv2d',],
['Conv2d',],
['Linear',],
['Linear',],
['Linear',],
]
# SIM
HWsim = simulator.SystemSim(params)
HWsim.apply(net, weights, images, labels) # Forward computing
HWsim.show() # Show the result in console
| 22.59375 | 67 | 0.644537 | import simulator
import numpy as np
'''
TODO Support CNN in the Hardware Evaluation
'''
weights_dir = "./data/mnist-lenet.npz"
image_dir = "./data/dataset/mnist/test.npy"
batch_size = 10 # The number of input picture
weights = np.load(weights_dir)['arr_0'].item()
data = np.load(image_dir)[:batch_size]
images = data[:, 0]
labels = data[:, 1]
params = simulator.Parameterinput() # Read parameters in simconfig
# Define the neural network
net = [
['Conv2d',],
['Conv2d',],
['Linear',],
['Linear',],
['Linear',],
]
# SIM
HWsim = simulator.SystemSim(params)
HWsim.apply(net, weights, images, labels) # Forward computing
HWsim.show() # Show the result in console
| 0 | 0 | 0 |
ae3ea7a76f6a45e67113e758d9604cf094c6cf81 | 1,468 | py | Python | 07_docker/marathon_tweets/tweet_collector/get_tweets.py | bfc782/spice_weasel | 7f2d7586ae485001a9b875b62d093fd04e28faa8 | [
"CC0-1.0"
] | null | null | null | 07_docker/marathon_tweets/tweet_collector/get_tweets.py | bfc782/spice_weasel | 7f2d7586ae485001a9b875b62d093fd04e28faa8 | [
"CC0-1.0"
] | null | null | null | 07_docker/marathon_tweets/tweet_collector/get_tweets.py | bfc782/spice_weasel | 7f2d7586ae485001a9b875b62d093fd04e28faa8 | [
"CC0-1.0"
] | null | null | null | #import config
import os
from tweepy import OAuthHandler, Stream
from tweepy.streaming import StreamListener
import json
import logging
import pymongo
import time
client = pymongo.MongoClient("mongodb")
db = client.tweets
time.sleep(10)
api_key = os.getenv('TWITTER_CONSUMER_API_KEY')
api_secret = os.getenv('TWITTER_CONSUMER_API_SECRET')
access_token = os.getenv('TWITTER_ACCESS_TOKEN')
access_secret = os.getenv('TWITTER_ACCESS_TOKEN_SECRET')
def authenticate():
'''Function for handling Twitter Auth. Pulls Env Variables in bash_profile'''
auth = OAuthHandler(api_key, api_secret)
auth.set_access_token(access_token, access_secret)
return auth
if __name__ == '__main__':
auth = authenticate()
listener = TwitterListener()
stream = Stream(auth, listener)
stream.filter(track=['marathon'], languages=['en'])
| 25.754386 | 90 | 0.681199 | #import config
import os
from tweepy import OAuthHandler, Stream
from tweepy.streaming import StreamListener
import json
import logging
import pymongo
import time
client = pymongo.MongoClient("mongodb")
db = client.tweets
time.sleep(10)
api_key = os.getenv('TWITTER_CONSUMER_API_KEY')
api_secret = os.getenv('TWITTER_CONSUMER_API_SECRET')
access_token = os.getenv('TWITTER_ACCESS_TOKEN')
access_secret = os.getenv('TWITTER_ACCESS_TOKEN_SECRET')
def authenticate():
'''Function for handling Twitter Auth. Pulls Env Variables in bash_profile'''
auth = OAuthHandler(api_key, api_secret)
auth.set_access_token(access_token, access_secret)
return auth
class TwitterListener(StreamListener):
def on_data(self, data):
'''This method is what happens to every tweet as it is intercepted in real time'''
t = json.loads(data)
tweet = {
'created_at': t['created_at'],
'text': t['text'],
'username': t['user']['screen_name'],
'followers_count': t['user']['followers_count']
}
# logging.critical(f'\n\n TWEET INCOMING: {tweet["text"]}\n\n')
db.collections.tweets.insert_one(tweet)
def on_error(self, status):
if status -- 420:
print(status)
return False
if __name__ == '__main__':
auth = authenticate()
listener = TwitterListener()
stream = Stream(auth, listener)
stream.filter(track=['marathon'], languages=['en'])
| 84 | 512 | 23 |
6cef8f5b03beb08dc2b0a1f5dceed760540264db | 859 | py | Python | Script Examples/example_py_script.py | snichola/Aurora | 3d38de87e9bb8affb309a0dd6f47c8479c5557be | [
"MIT"
] | null | null | null | Script Examples/example_py_script.py | snichola/Aurora | 3d38de87e9bb8affb309a0dd6f47c8479c5557be | [
"MIT"
] | null | null | null | Script Examples/example_py_script.py | snichola/Aurora | 3d38de87e9bb8affb309a0dd6f47c8479c5557be | [
"MIT"
] | null | null | null | import clr
clr.AddReference("Aurora")
clr.AddReference("System.Drawing")
from Aurora import Global
from Aurora.Settings import KeySequence
from Aurora.Devices import DeviceKeys
from Aurora.EffectsEngine import EffectLayer
from System.Drawing import Color
import System
array_device_keys = System.Array[DeviceKeys] | 37.347826 | 207 | 0.772992 | import clr
clr.AddReference("Aurora")
clr.AddReference("System.Drawing")
from Aurora import Global
from Aurora.Settings import KeySequence
from Aurora.Devices import DeviceKeys
from Aurora.EffectsEngine import EffectLayer
from System.Drawing import Color
import System
array_device_keys = System.Array[DeviceKeys]
class main():
ID = "ExamplePyEffect"
ForegroundColour = Color.Red
BackgroundColour = Color.Black
DefaultKeys = KeySequence(array_device_keys((DeviceKeys.TAB, DeviceKeys.Q, DeviceKeys.W, DeviceKeys.E, DeviceKeys.R, DeviceKeys.T, DeviceKeys.Y, DeviceKeys.U, DeviceKeys.I, DeviceKeys.O, DeviceKeys.P)))
def UpdateLights(self, settings, state):
layer = EffectLayer(self.ID)
layer.PercentEffect(self.ForegroundColour, self.BackgroundColour, settings.Keys, System.DateTime.Now.Second, 60);
return layer | 199 | 321 | 23 |
021ec52cf537b565b4e716e86143546f5b5a72a2 | 1,692 | py | Python | uninas/optimization/hpo/pymoo/mutation.py | cogsys-tuebingen/uninas | 06729b9cf517ec416fb798ae387c5bd9c3a278ac | [
"MIT"
] | 18 | 2020-11-22T16:03:08.000Z | 2022-03-15T12:11:46.000Z | uninas/optimization/hpo/pymoo/mutation.py | cogsys-tuebingen/uninas | 06729b9cf517ec416fb798ae387c5bd9c3a278ac | [
"MIT"
] | 2 | 2022-01-04T08:10:17.000Z | 2022-01-05T08:13:14.000Z | uninas/optimization/hpo/pymoo/mutation.py | cogsys-tuebingen/uninas | 06729b9cf517ec416fb798ae387c5bd9c3a278ac | [
"MIT"
] | 6 | 2021-03-08T07:08:52.000Z | 2022-02-24T12:00:43.000Z | from pymoo.model.mutation import Mutation
from pymoo.operators.mutation.no_mutation import NoMutation
from pymoo.operators.mutation.polynomial_mutation import PolynomialMutation
from pymoo.operators.integer_from_float_operator import IntegerFromFloatMutation
from uninas.utils.args import ArgsInterface, Argument, Namespace
from uninas.register import Register
@Register.hpo_pymoo_crossover()
class NoPymooMutation(AbstractPymooCrossover):
"""
No mutation at all
"""
@classmethod
@Register.hpo_pymoo_mutation()
class PolynomialPymooMutation(AbstractPymooCrossover):
"""
IntegerFromFloatMutation for integer variables, PolynomialMutation for floats
"""
@classmethod
def args_to_add(cls, index=None) -> [Argument]:
""" list arguments to add to argparse when this class (or a child class) is chosen """
return super().args_to_add(index) + [
Argument('type', default='int', type=str, choices=['int', 'real'], help='?'),
Argument('eta', default=30, type=int, help='?'),
]
@classmethod
| 35.25 | 94 | 0.706265 | from pymoo.model.mutation import Mutation
from pymoo.operators.mutation.no_mutation import NoMutation
from pymoo.operators.mutation.polynomial_mutation import PolynomialMutation
from pymoo.operators.integer_from_float_operator import IntegerFromFloatMutation
from uninas.utils.args import ArgsInterface, Argument, Namespace
from uninas.register import Register
class AbstractPymooCrossover(ArgsInterface):
@classmethod
def from_args(cls, args: Namespace, index=None) -> Mutation:
raise NotImplementedError
@Register.hpo_pymoo_crossover()
class NoPymooMutation(AbstractPymooCrossover):
"""
No mutation at all
"""
@classmethod
def from_args(cls, args: Namespace, index=None) -> Mutation:
return NoMutation()
@Register.hpo_pymoo_mutation()
class PolynomialPymooMutation(AbstractPymooCrossover):
"""
IntegerFromFloatMutation for integer variables, PolynomialMutation for floats
"""
@classmethod
def args_to_add(cls, index=None) -> [Argument]:
""" list arguments to add to argparse when this class (or a child class) is chosen """
return super().args_to_add(index) + [
Argument('type', default='int', type=str, choices=['int', 'real'], help='?'),
Argument('eta', default=30, type=int, help='?'),
]
@classmethod
def from_args(cls, args: Namespace, index=None) -> Mutation:
type_, eta = cls._parsed_arguments(['type', 'eta'], args, index=index)
if type_ == 'int':
return IntegerFromFloatMutation(clazz=PolynomialMutation, eta=eta)
elif type_ == 'real':
return PolynomialMutation(eta=eta)
raise NotImplementedError
| 475 | 66 | 75 |
c5b1a0e5bbf1b1c1c34ff6523e7d2c68a22c4255 | 1,077 | py | Python | list_files.py | diegomendesmoreno/stm8_standalone_programmer | 6f37d0416c91ff6ff90df3d197b359a41bb23705 | [
"MIT"
] | null | null | null | list_files.py | diegomendesmoreno/stm8_standalone_programmer | 6f37d0416c91ff6ff90df3d197b359a41bb23705 | [
"MIT"
] | null | null | null | list_files.py | diegomendesmoreno/stm8_standalone_programmer | 6f37d0416c91ff6ff90df3d197b359a41bb23705 | [
"MIT"
] | null | null | null | import os
if __name__ == "__main__":
file_list = FileList()
file_list.list_files()
print(file_list.num_files, "files listed")
for f in file_list.files:
print(f)
| 31.676471 | 59 | 0.508821 | import os
class FileList:
def __init__(self):
self.path = 'Bin/' # Search in the 'Bin' directory
self.files = []
self.num_files = 0
def list_files(self):
# r=root, d=directories, f = files
for r, d, f in os.walk(self.path):
for file in f:
if '.s19' in file:
self.files.append(os.path.join(file))
self.num_files = self.num_files + 1
if '.srec' in file:
self.files.append(os.path.join(file))
self.num_files = self.num_files + 1
if '.ihx' in file:
self.files.append(os.path.join(file))
self.num_files = self.num_files + 1
if '.hex' in file:
self.files.append(os.path.join(file))
self.num_files = self.num_files + 1
if __name__ == "__main__":
file_list = FileList()
file_list.list_files()
print(file_list.num_files, "files listed")
for f in file_list.files:
print(f)
| 819 | -6 | 76 |
950517294b314e4c36db0912d9df4efeae2545e1 | 7,534 | py | Python | cows/service/imps/csmlbackend/wfs_csmlstoredqueries.py | cedadev/cows | db9ed729c886b271ce85355b97e39243081e8246 | [
"BSD-2-Clause"
] | 2 | 2018-05-09T16:12:43.000Z | 2018-08-21T17:10:22.000Z | cows/service/imps/csmlbackend/wfs_csmlstoredqueries.py | cedadev/cows | db9ed729c886b271ce85355b97e39243081e8246 | [
"BSD-2-Clause"
] | null | null | null | cows/service/imps/csmlbackend/wfs_csmlstoredqueries.py | cedadev/cows | db9ed729c886b271ce85355b97e39243081e8246 | [
"BSD-2-Clause"
] | null | null | null | # BSD Licence
# Copyright (c) 2009, Science & Technology Facilities Council (STFC)
# All rights reserved.
#
# See the LICENSE file in the source distribution of this software for
# the full license text.
""" This module contains CSML WFS 2.0 stored query implementations.
To add a new stored query, write the functional code here, then describe the functionality as
a new query in:
wfs_csmllayer.CSMLStoredQueries.queries
These query functions should either return a resultset (python list) containing the features,
or 2 lists, one being the resultset, and the other containing string representations of XML addtional
objects - in CSML these are going to be StorageDescriptors.
"""
import wfs_csmllayer
import logging
log = logging.getLogger(__name__)
import csml.parser, csml.csmllibs.csmlextra
from xml.etree import ElementTree as etree
from pylons import request, config
# qualifiedFeatureType='{http://ndg.nerc.ac.uk/csml}' + storagedescriptor.__class__.__name__
# emptyelem=etree.Element(qualifiedFeatureType)
# log.debug(request.environ)
# storagedescriptor.fileName.CONTENT='http://'+request.environ['HTTP_HOST']+'/filestore/' +storagedescriptor.fileName.CONTENT
# csmlelem=storagedescriptor.toXML(emptyelem)
# storagedescXML=etree.tostring(csmlelem)
# return [csmlfi], [storagedescXML]
| 50.226667 | 157 | 0.765729 | # BSD Licence
# Copyright (c) 2009, Science & Technology Facilities Council (STFC)
# All rights reserved.
#
# See the LICENSE file in the source distribution of this software for
# the full license text.
""" This module contains CSML WFS 2.0 stored query implementations.
To add a new stored query, write the functional code here, then describe the functionality as
a new query in:
wfs_csmllayer.CSMLStoredQueries.queries
These query functions should either return a resultset (python list) containing the features,
or 2 lists, one being the resultset, and the other containing string representations of XML addtional
objects - in CSML these are going to be StorageDescriptors.
"""
import wfs_csmllayer
import logging
log = logging.getLogger(__name__)
import csml.parser, csml.csmllibs.csmlextra
from xml.etree import ElementTree as etree
from pylons import request, config
class dummyfeaturefortesting(object):
def __init__(self, x):
self.x=x
def toGML(self):
return '<someGML>%s</someGML>'%self.x
def _getCSMLFilename():
return 'csml%s.nc'%(csml.csmllibs.csmlextra.getRandomID())
def query_one_func(featureset, arg1='string1', arg2='string2'):
result = [dummyfeaturefortesting(arg1 + ' ' + arg2)]
return result
def query_getFeatureByPhenomenon(featureset, phenomenon):
return featureset.getFeaturesByPropertyEqualTo('csml:parameter', phenomenon)
def query_getFeatureById(featureset, id):
return [featureset.getFeatureByGMLid(id)]
def query_extractPointFromPointSeries(featureset, featureid, timeinstance):
#TODO: configure output directory
#TODO: error handling
csmloutputdir=config['cows.csml.publish_dir']
feature=featureset.getFeatureByGMLid(featureid)._feature
ncname=_getCSMLFilename()
newfeature, netcdfpath, storagedescriptor=feature.subsetToPoint(time=str(timeinstance), ncname=ncname, outputdir=csmloutputdir)
#modify the rangeset of the feature to store the data inline and discard the storage descriptor.
storagedescriptor.fileName.CONTENT=csmloutputdir+'/'+storagedescriptor.fileName.CONTENT
data=storagedescriptor.getData()
qlist=csml.parser.MeasureOrNullList()
qlist.uom=newfeature.value.rangeSet.valueArray.valueComponent.uom
qlist.CONTENT=str(data[0])
rs=csml.parser.RangeSet()
rs.quantityList=qlist
newfeature.value.rangeSet=rs
#wrap the feature in a wfs CSMLFeatureInstance object:
csmlfi=wfs_csmllayer.CSMLFeatureInstance('subset feature title', 'subset feature abstract', newfeature)
return [csmlfi]
#This following code is used to maintain the storage descriptor filename consistency, but has been commented out for this
#operation as there is only a single point value. However code is left here as reminder as it will be needed
#for other feature types soon
#change the path of the storage descriptor to the download url - assumes routes maps to filestore
#And serialise the storage descriptor as XML.
# qualifiedFeatureType='{http://ndg.nerc.ac.uk/csml}' + storagedescriptor.__class__.__name__
# emptyelem=etree.Element(qualifiedFeatureType)
# log.debug(request.environ)
# storagedescriptor.fileName.CONTENT='http://'+request.environ['HTTP_HOST']+'/filestore/' +storagedescriptor.fileName.CONTENT
# csmlelem=storagedescriptor.toXML(emptyelem)
# storagedescXML=etree.tostring(csmlelem)
# return [csmlfi], [storagedescXML]
def query_extractPointSeriesFromPointSeries(featureset, featureid, starttime, endtime):
csmloutputdir=config['cows.csml.publish_dir']
feature=featureset.getFeatureByGMLid(featureid)._feature
timerange=(starttime, endtime,)
subsetdictionary={'times':timerange}
ncname=_getCSMLFilename()
newfeature, netcdfpath, storagedescriptor=feature.subsetToPointSeries(outputdir=csmloutputdir, ncname=ncname,**subsetdictionary)
#wrap this in a wfs CSMLFeatureInstance object:
csmlfi=wfs_csmllayer.CSMLFeatureInstance('subset feature title', 'subset feature abstract', newfeature)
#And serialise the storage descriptor as XML.
qualifiedFeatureType='{http://ndg.nerc.ac.uk/csml}' + storagedescriptor.__class__.__name__
emptyelem=etree.Element(qualifiedFeatureType)
log.debug(request.environ)
#change the path of the storage descriptor to the download url - assumes routes maps to filestore
#TODO: THIS SHOULD HANDLE SERVER PROXIES.
storagedescriptor.fileName.CONTENT='http://'+request.environ['HTTP_HOST']+'/filestore/' +ncname
csmlelem=storagedescriptor.toXML(emptyelem)
storagedescXML=etree.tostring(csmlelem)
return [csmlfi], [storagedescXML]
def query_extractPointSeriesFromGridSeries(featureset, featureid, latitude, longitude, mintime, maxtime):
csmloutputdir=config['cows.csml.publish_dir']
feature=featureset.getFeatureByGMLid(featureid)._feature
timerange=(mintime, maxtime,)
lat=float(latitude)
lon=float(longitude)
subsetdictionary={'time':timerange, 'latitude':lat, 'longitude':lon}
ncname=_getCSMLFilename()
newfeature, netcdfpath, storagedescriptor=feature.subsetToPointSeries(outputdir=csmloutputdir, ncname=ncname,**subsetdictionary)
#wrap this in a wfs CSMLFeatureInstance object:
csmlfi=wfs_csmllayer.CSMLFeatureInstance('subset feature title', 'subset feature abstract', newfeature)
#And serialise the storage descriptor as XML.
qualifiedFeatureType='{http://ndg.nerc.ac.uk/csml}' + storagedescriptor.__class__.__name__
emptyelem=etree.Element(qualifiedFeatureType)
log.debug(request.environ)
#change the path of the storage descriptor to the download url - assumes routes maps to filestore
#TODO: THIS SHOULD HANDLE SERVER PROXIES.
storagedescriptor.fileName.CONTENT='http://'+request.environ['HTTP_HOST']+'/filestore/' +ncname
csmlelem=storagedescriptor.toXML(emptyelem)
storagedescXML=etree.tostring(csmlelem)
return [csmlfi], [storagedescXML]
def query_extractGridSeriesFromGridSeries(featureset, featureid, bbox, mintime, maxtime):
#TODO factor out code in common with other subsetting queries
csmloutputdir=config['cows.csml.publish_dir']
feature=featureset.getFeatureByGMLid(featureid)._feature
timerange=(mintime, maxtime,)
#break the bounding box into latitude and longitude:
#TODO, this really needs to be generic and refer to the srs of the underlying feature.
bb=str(bbox).split(',')
lat=(float(bb[1]),float(bb[3]))
lon=(float(bb[0]),float(bb[2]))
log.debug('requesting latitude: %s, longitude: %s'%(lat, lon))
ncname=_getCSMLFilename()
newfeature, netcdfpath, storagedescriptor=feature.subsetToGridSeries(time=timerange, latitude=lat, longitude=lon, ncname=ncname, outputdir=csmloutputdir)
#wrap this in a wfs CSMLFeatureInstance object:
csmlfi=wfs_csmllayer.CSMLFeatureInstance('subset feature title', 'subset feature abstract', newfeature)
#And serialise the storage descriptor as XML.
qualifiedFeatureType='{http://ndg.nerc.ac.uk/csml}' + storagedescriptor.__class__.__name__
emptyelem=etree.Element(qualifiedFeatureType)
log.debug(request.environ)
#change the path of the storage descriptor to the download url - assumes routes maps to filestore
storagedescriptor.fileName.CONTENT='http://'+request.environ['HTTP_HOST']+'/filestore/' +storagedescriptor.fileName.CONTENT
csmlelem=storagedescriptor.toXML(emptyelem)
storagedescXML=etree.tostring(csmlelem)
return [csmlfi], [storagedescXML]
| 5,914 | 16 | 263 |
5bf3053088af5e798861a4a5b0c9a40c7343b799 | 1,383 | py | Python | markov_chain/specialists.py | pesikj/DataAnalysisUsingPython | 00269a7a7b5388fbbdcf3ddadd951a80a07f9c3a | [
"MIT"
] | null | null | null | markov_chain/specialists.py | pesikj/DataAnalysisUsingPython | 00269a7a7b5388fbbdcf3ddadd951a80a07f9c3a | [
"MIT"
] | null | null | null | markov_chain/specialists.py | pesikj/DataAnalysisUsingPython | 00269a7a7b5388fbbdcf3ddadd951a80a07f9c3a | [
"MIT"
] | null | null | null | import numpy as np
from numpy.linalg import inv
from scipy.stats import binom
#a1 = failed
#a2 = passed
testPassingProbabilities = [0.7, 0.6, 0.8, 0.9]
noOfSpecialistsCurrent = [100, 70, 50, 60]
wantedSpecialists = 5
wantedWithProbability = 0.9
transitionMatrix = np.zeros((len(testPassingProbabilities) + 2, len(testPassingProbabilities) + 2))
transitionMatrix[0, 0] = 1
transitionMatrix[1, 1] = 1
for i in range(2, len(testPassingProbabilities) + 1):
listIndex = i - 2
transitionMatrix[i, 0] = 1 - testPassingProbabilities[listIndex]
transitionMatrix[i, i+1] = testPassingProbabilities[listIndex]
transitionMatrix[i+1, 0] = 1 - testPassingProbabilities[-1]
transitionMatrix[i+1, 1] = testPassingProbabilities[-1]
Q = transitionMatrix[2:,2:]
I = np.identity(len(testPassingProbabilities))
N = inv(I - Q)
R = transitionMatrix[2:,:2]
B = np.dot(N,R)
passVector = B[:, 1]
noOfSpecialistsFinal = round(sum(passVector * noOfSpecialistsCurrent), 3)
print(f"The expected number of specialists is {noOfSpecialistsFinal}.")
newStudentPassingProbability = float(passVector[0])
for employeesSentToCourse in range(wantedSpecialists, 100):
probability = 1 - binom.cdf(wantedSpecialists - 1, employeesSentToCourse, newStudentPassingProbability)
if probability > wantedWithProbability:
print(f"{employeesSentToCourse} must be sent to the course.")
break | 34.575 | 107 | 0.748373 | import numpy as np
from numpy.linalg import inv
from scipy.stats import binom
#a1 = failed
#a2 = passed
testPassingProbabilities = [0.7, 0.6, 0.8, 0.9]
noOfSpecialistsCurrent = [100, 70, 50, 60]
wantedSpecialists = 5
wantedWithProbability = 0.9
transitionMatrix = np.zeros((len(testPassingProbabilities) + 2, len(testPassingProbabilities) + 2))
transitionMatrix[0, 0] = 1
transitionMatrix[1, 1] = 1
for i in range(2, len(testPassingProbabilities) + 1):
listIndex = i - 2
transitionMatrix[i, 0] = 1 - testPassingProbabilities[listIndex]
transitionMatrix[i, i+1] = testPassingProbabilities[listIndex]
transitionMatrix[i+1, 0] = 1 - testPassingProbabilities[-1]
transitionMatrix[i+1, 1] = testPassingProbabilities[-1]
Q = transitionMatrix[2:,2:]
I = np.identity(len(testPassingProbabilities))
N = inv(I - Q)
R = transitionMatrix[2:,:2]
B = np.dot(N,R)
passVector = B[:, 1]
noOfSpecialistsFinal = round(sum(passVector * noOfSpecialistsCurrent), 3)
print(f"The expected number of specialists is {noOfSpecialistsFinal}.")
newStudentPassingProbability = float(passVector[0])
for employeesSentToCourse in range(wantedSpecialists, 100):
probability = 1 - binom.cdf(wantedSpecialists - 1, employeesSentToCourse, newStudentPassingProbability)
if probability > wantedWithProbability:
print(f"{employeesSentToCourse} must be sent to the course.")
break | 0 | 0 | 0 |
83de024d990b9741a3362c8d9bb1b9a580904364 | 14,833 | py | Python | eosfactory/shell/wallet.py | ephdtrg/eosfactory | 721843707cb277618142c6d9518e3f231cae3b79 | [
"MIT"
] | null | null | null | eosfactory/shell/wallet.py | ephdtrg/eosfactory | 721843707cb277618142c6d9518e3f231cae3b79 | [
"MIT"
] | null | null | null | eosfactory/shell/wallet.py | ephdtrg/eosfactory | 721843707cb277618142c6d9518e3f231cae3b79 | [
"MIT"
] | null | null | null | import os
import json
import inspect
import eosfactory.core.logger as logger
import eosfactory.core.errors as errors
import eosfactory.core.setup as setup
import eosfactory.core.interface as interface
import eosfactory.core.teos as teos
if setup.node_api == "cleos":
import eosfactory.core.cleos as cleos
elif setup.node_api == "eosjs":
import eosfactory.core.eosjs as cleos
import eosfactory.core.manager as manager
class Wallet(cleos.WalletCreate):
''' Create a new wallet locally and operate it.
- **parameters**::
name: The name of the new wallet, defaults to `default`.
is_verbose: If `0`, do not print unless on error,
default is `1`.
- **attributes**::
name: The name of the wallet.
password: The password returned by wallet create.
error: Whether any error ocurred.
json: The json representation of the object.
is_verbose: Verbosity at the constraction time.
'''
wallet_keys = None
wallet = None
globals = None
setup.node_api
def index(self):
''' Lists opened wallets, * marks unlocked.
Returns `cleos.WalletList` object
'''
result = cleos.WalletList(is_verbose=0)
logger.OUT(result.out_msg)
def open(self):
''' Opens the wallet.
Returns `WalletOpen` object
'''
result = cleos.WalletOpen(self.name, is_verbose=False)
logger.TRACE('''
* Wallet ``{}`` opened.
'''.format(self.name))
def lock(self):
''' Lock the wallet.
Returns `cleos.WalletLock` object.
'''
result = cleos.WalletLock(self.name, is_verbose=False)
logger.TRACE("Wallet `{}` locked.".format(self.name))
def lock_all(self):
''' Lock the wallet.
Returns `cleos.WalletLock` object.
'''
result = cleos.WalletLockAll(is_verbose=False)
logger.TRACE("All wallets locked.")
def unlock(self):
''' Unlock the wallet.
Returns `WalletUnlock` object.
'''
result = cleos.WalletUnlock(
self.name, self.password, is_verbose=False)
logger.TRACE('''
* Wallet ``{}`` unlocked.
'''.format(self.name))
def open_unlock(self):
''' Open&Unlock automatics.
'''
cleos.WalletOpen(self.name, is_verbose=False)
cleos.WalletUnlock(
self.name, self.password, is_verbose=False)
def remove_key(self, account_or_key):
'''
'''
self.open_unlock()
removed_keys = []
account_name = None
if isinstance(account_or_key, interface.Account):
cleos.WalletRemove_key(
interface.key_arg(
account_or_key, is_owner_key=True, is_private_key=True),
self.name, is_verbose=False)
removed_keys.append(interface.key_arg(
account_or_key, is_owner_key=True, is_private_key=False))
cleos.WalletRemove_key(
interface.key_arg(
account_or_key, is_owner_key=False, is_private_key=True),
self.name, is_verbose=False)
removed_keys.append(interface.key_arg(
account_or_key, is_owner_key=False, is_private_key=False))
else:
cleos.WalletRemove_key(
interface.key_arg(
account_or_key, is_private_key=True),
self.name, is_verbose=False)
removed_keys.append(interface.key_arg(
account_or_key, is_private_key=False))
if account_name is None:
if len(removed_keys) > 0:
logger.TRACE('''
Removing key '{}'
from the wallet '{}'
'''.format(removed_keys[0], self.name),
verbosity
)
else:
logger.TRACE('''
Removing keys of the account '{}' from the wallet '{}'
'''.format(account_name, self.name)
)
wallet_keys = cleos.WalletKeys(is_verbose=False)
for key in removed_keys:
if key in wallet_keys.json:
raise errors.Error('''
Failed to remove key '{}' from the wallet '{}'
'''.format(key, self.name))
logger.TRACE('''
* Cross-checked: all listed keys removed from the wallet.
''')
return True
def import_key(self, account_or_key):
''' Imports private keys of an account into wallet.
Returns list of `cleos.WalletImport` objects
'''
self.open_unlock()
imported_keys = []
account_name = None
if isinstance(account_or_key, interface.Account):
account_name = account_or_key.name
wallet_import = cleos.WalletImport(
interface.key_arg(
account_or_key, is_owner_key=True, is_private_key=True),
self.name, is_verbose=False)
imported_keys.append(interface.key_arg(
account_or_key, is_owner_key=True, is_private_key=False))
wallet_import = cleos.WalletImport(
interface.key_arg(
account_or_key, is_owner_key=False, is_private_key=True),
self.name, is_verbose=False)
imported_keys.append(interface.key_arg(
account_or_key, is_owner_key=False, is_private_key=False))
logger.TRACE('''
* Importing keys of the account ``{}`` into the wallet ``{}``
'''.format(account_name, self.name)
)
else:
wallet_import = cleos.WalletImport(
interface.key_arg(account_or_key, is_private_key=True),
self.name, is_verbose=False)
logger.TRACE('''
* Importing keys into the wallet ``{}``
'''.format(self.name)
)
return True
wallet_keys = cleos.WalletKeys(is_verbose=False)
if len(imported_keys) == 0:
raise errors.Error('''
The list of imported keys is empty.
''')
ok = True
for key in imported_keys:
if not key in wallet_keys.json:
ok = False
raise errors.Error('''
Failed to import keys of the account '{}' into the wallet '{}'
'''.format(
account_name if account_name else "n/a", self.name))
if ok:
logger.TRACE('''
* Cross-checked: all account keys are in the wallet.
''')
return True
def restore_accounts(self):
'''
'''
self.open_unlock()
account_map = manager.account_map()
new_map = {}
wallet_keys = cleos.WalletKeys(is_verbose=0)
if len(account_map) > 0:
logger.INFO('''
######### Restore cached account objects:
''')
for name, object_name in account_map.items():
try:
account_ = cleos.GetAccount(
name, is_info=False, is_verbose=False)
if account_.owner_key in wallet_keys.json and \
account_.active_key in wallet_keys.json:
new_map[name] = object_name
from eosfactory.shell.account import create_account
create_account(
object_name, name, restore=True, verbosity=None)
except errors.AccountDoesNotExistError:
pass
manager.save_account_map(new_map)
else:
logger.INFO('''
* The wallet is empty.
''')
def keys(self):
''' Lists public keys from all unlocked wallets.
Returns `cleos.WalletKeys` object.
'''
self.open_unlock()
self.wallet_keys = cleos.WalletKeys(is_verbose=False)
logger.TRACE('''
Keys in all open walets:
{}
'''.format(self.wallet_keys.out_msg))
def private_keys(self):
''' Lists public keys from all unlocked wallets.
Returns `cleos.WalletKeys` object.
'''
self.open_unlock()
self.wallet_private_keys = cleos.WalletPrivateKeys(is_verbose=False)
logger.TRACE('''
Keys in all open walets:
{}
'''.format(json.dumps(
self.wallet_private_keys.json, indent=4)))
def map_account(self, account_object_name, account_object):
'''
'''
if not self.is_name_taken(account_object_name, account_object.name):
account_map_json = manager.account_map(self)
if account_map_json is None:
return
account_map_json[account_object.name] = account_object_name
with open(self.wallet_dir + setup.account_map, "w") as out:
out.write(json.dumps(
account_map_json, indent=3, sort_keys=True))
logger.TRACE('''
* Account object ``{}`` stored in the file
``{}`` in the wallet directory:
{}
'''.format(
account_object_name,
setup.account_map,
self.wallet_dir + setup.account_map))
| 34.256351 | 103 | 0.523697 | import os
import json
import inspect
import eosfactory.core.logger as logger
import eosfactory.core.errors as errors
import eosfactory.core.setup as setup
import eosfactory.core.interface as interface
import eosfactory.core.teos as teos
if setup.node_api == "cleos":
import eosfactory.core.cleos as cleos
elif setup.node_api == "eosjs":
import eosfactory.core.eosjs as cleos
import eosfactory.core.manager as manager
def wallet_json_read():
try:
with open(manager.wallet_dir() + setup.password_map, "r") as input:
return json.load(input)
except:
return {}
def wallet_json_write(wallet_json):
with open(manager.wallet_dir() + setup.password_map, "w+") as out:
json.dump(wallet_json, out)
def create_wallet(
name=None, password=None, verbosity=None, file=False,
globals=None):
if globals:
Wallet.globals = globals
else:
Wallet.globals = inspect.stack()[1][0].f_globals
Wallet.wallet = Wallet(name, password, verbosity, file)
Wallet.wallet.restore_accounts()
def get_wallet():
return Wallet.wallet
class Wallet(cleos.WalletCreate):
''' Create a new wallet locally and operate it.
- **parameters**::
name: The name of the new wallet, defaults to `default`.
is_verbose: If `0`, do not print unless on error,
default is `1`.
- **attributes**::
name: The name of the wallet.
password: The password returned by wallet create.
error: Whether any error ocurred.
json: The json representation of the object.
is_verbose: Verbosity at the constraction time.
'''
wallet_keys = None
wallet = None
globals = None
setup.node_api
def __init__(self, name=None, password="", verbosity=None, file=False):
cleos.set_local_nodeos_address_if_none()
if name is None:
name = setup.wallet_default_name
else:
name = setup.file_prefix() + name
if not self.wallet is None:
raise errors.Error('''
It can be only one ``Wallet`` object in the script; there is one
named ``{}``.
'''.format(wallet.name))
return
self.wallet_dir = manager.wallet_dir()
logger.INFO('''
* Wallet name is ``{}``, wallet directory is
{}.
'''.format(name, self.wallet_dir))
if not password: # look for password:
passwords = wallet_json_read()
if name in passwords:
password = passwords[name]
logger.INFO('''
The password is restored from the file:
{}
'''.format(
os.path.join(self.wallet_dir, setup.password_map)),
verbosity)
cleos.WalletCreate.__init__(self, name, password, is_verbose=False)
if self.is_created: # new password
logger.INFO('''
* Created wallet ``{}``.
'''.format(self.name),
verbosity
)
###############################################################################
# TO DO: detect live node!!!!!!!!!!
if manager.is_local_testnet() or file or True:
###############################################################################
password_map = wallet_json_read()
password_map[name] = self.password
wallet_json_write(password_map)
logger.INFO('''
* Password is saved to the file ``{}`` in the wallet directory.
'''.format(setup.password_map),
verbosity
)
else:
logger.OUT(self.out_msg)
else:
logger.TRACE('''
Opened wallet ``{}``
'''.format(self.name))
def index(self):
''' Lists opened wallets, * marks unlocked.
Returns `cleos.WalletList` object
'''
result = cleos.WalletList(is_verbose=0)
logger.OUT(result.out_msg)
def open(self):
''' Opens the wallet.
Returns `WalletOpen` object
'''
result = cleos.WalletOpen(self.name, is_verbose=False)
logger.TRACE('''
* Wallet ``{}`` opened.
'''.format(self.name))
def lock(self):
''' Lock the wallet.
Returns `cleos.WalletLock` object.
'''
result = cleos.WalletLock(self.name, is_verbose=False)
logger.TRACE("Wallet `{}` locked.".format(self.name))
def lock_all(self):
''' Lock the wallet.
Returns `cleos.WalletLock` object.
'''
result = cleos.WalletLockAll(is_verbose=False)
logger.TRACE("All wallets locked.")
def unlock(self):
''' Unlock the wallet.
Returns `WalletUnlock` object.
'''
result = cleos.WalletUnlock(
self.name, self.password, is_verbose=False)
logger.TRACE('''
* Wallet ``{}`` unlocked.
'''.format(self.name))
def open_unlock(self):
''' Open&Unlock automatics.
'''
cleos.WalletOpen(self.name, is_verbose=False)
cleos.WalletUnlock(
self.name, self.password, is_verbose=False)
def remove_key(self, account_or_key):
'''
'''
self.open_unlock()
removed_keys = []
account_name = None
if isinstance(account_or_key, interface.Account):
cleos.WalletRemove_key(
interface.key_arg(
account_or_key, is_owner_key=True, is_private_key=True),
self.name, is_verbose=False)
removed_keys.append(interface.key_arg(
account_or_key, is_owner_key=True, is_private_key=False))
cleos.WalletRemove_key(
interface.key_arg(
account_or_key, is_owner_key=False, is_private_key=True),
self.name, is_verbose=False)
removed_keys.append(interface.key_arg(
account_or_key, is_owner_key=False, is_private_key=False))
else:
cleos.WalletRemove_key(
interface.key_arg(
account_or_key, is_private_key=True),
self.name, is_verbose=False)
removed_keys.append(interface.key_arg(
account_or_key, is_private_key=False))
if account_name is None:
if len(removed_keys) > 0:
logger.TRACE('''
Removing key '{}'
from the wallet '{}'
'''.format(removed_keys[0], self.name),
verbosity
)
else:
logger.TRACE('''
Removing keys of the account '{}' from the wallet '{}'
'''.format(account_name, self.name)
)
wallet_keys = cleos.WalletKeys(is_verbose=False)
for key in removed_keys:
if key in wallet_keys.json:
raise errors.Error('''
Failed to remove key '{}' from the wallet '{}'
'''.format(key, self.name))
logger.TRACE('''
* Cross-checked: all listed keys removed from the wallet.
''')
return True
def import_key(self, account_or_key):
''' Imports private keys of an account into wallet.
Returns list of `cleos.WalletImport` objects
'''
self.open_unlock()
imported_keys = []
account_name = None
if isinstance(account_or_key, interface.Account):
account_name = account_or_key.name
wallet_import = cleos.WalletImport(
interface.key_arg(
account_or_key, is_owner_key=True, is_private_key=True),
self.name, is_verbose=False)
imported_keys.append(interface.key_arg(
account_or_key, is_owner_key=True, is_private_key=False))
wallet_import = cleos.WalletImport(
interface.key_arg(
account_or_key, is_owner_key=False, is_private_key=True),
self.name, is_verbose=False)
imported_keys.append(interface.key_arg(
account_or_key, is_owner_key=False, is_private_key=False))
logger.TRACE('''
* Importing keys of the account ``{}`` into the wallet ``{}``
'''.format(account_name, self.name)
)
else:
wallet_import = cleos.WalletImport(
interface.key_arg(account_or_key, is_private_key=True),
self.name, is_verbose=False)
logger.TRACE('''
* Importing keys into the wallet ``{}``
'''.format(self.name)
)
return True
wallet_keys = cleos.WalletKeys(is_verbose=False)
if len(imported_keys) == 0:
raise errors.Error('''
The list of imported keys is empty.
''')
ok = True
for key in imported_keys:
if not key in wallet_keys.json:
ok = False
raise errors.Error('''
Failed to import keys of the account '{}' into the wallet '{}'
'''.format(
account_name if account_name else "n/a", self.name))
if ok:
logger.TRACE('''
* Cross-checked: all account keys are in the wallet.
''')
return True
def keys_in_wallets(self, keys):
self.open_unlock()
result = cleos.WalletKeys(is_verbose=False)
for key in keys:
if not key in result.json:
return False
return True
def restore_accounts(self):
'''
'''
self.open_unlock()
account_map = manager.account_map()
new_map = {}
wallet_keys = cleos.WalletKeys(is_verbose=0)
if len(account_map) > 0:
logger.INFO('''
######### Restore cached account objects:
''')
for name, object_name in account_map.items():
try:
account_ = cleos.GetAccount(
name, is_info=False, is_verbose=False)
if account_.owner_key in wallet_keys.json and \
account_.active_key in wallet_keys.json:
new_map[name] = object_name
from eosfactory.shell.account import create_account
create_account(
object_name, name, restore=True, verbosity=None)
except errors.AccountDoesNotExistError:
pass
manager.save_account_map(new_map)
else:
logger.INFO('''
* The wallet is empty.
''')
def delete_globals(self):
account_map = manager.account_map()
for name, object_name in account_map.items():
del Wallet.globals[object_name]
def stop(self):
cleos.WalletStop()
def keys(self):
''' Lists public keys from all unlocked wallets.
Returns `cleos.WalletKeys` object.
'''
self.open_unlock()
self.wallet_keys = cleos.WalletKeys(is_verbose=False)
logger.TRACE('''
Keys in all open walets:
{}
'''.format(self.wallet_keys.out_msg))
def private_keys(self):
''' Lists public keys from all unlocked wallets.
Returns `cleos.WalletKeys` object.
'''
self.open_unlock()
self.wallet_private_keys = cleos.WalletPrivateKeys(is_verbose=False)
logger.TRACE('''
Keys in all open walets:
{}
'''.format(json.dumps(
self.wallet_private_keys.json, indent=4)))
def edit_account_map(self, text_editor="nano"):
manager.edit_account_map(text_editor)
def is_name_taken(self, account_object_name, account_name):
while True:
account_map_json = manager.account_map(self)
if account_map_json is None:
return False
is_taken = False
for name, object_name in account_map_json.items():
if object_name == account_object_name:
if not name == account_name:
logger.OUT('''
The given account object name
``{}``
points to an existing account, of the name {},
mapped in a file in directory:
{}
Cannot overwrite it.
However, you can free the name by changing the mapping.
Do you want to edit the file?
'''.format(
account_object_name, name, self.wallet_dir))
is_taken = True
break
if is_taken:
temp = None
if account_object_name in Wallet.globals:
temp = Wallet.globals[account_object_name]
del Wallet.globals[account_object_name]
answer = input("y/n <<< ")
if answer == "y":
manager.edit_account_map()
continue
else:
if temp:
Wallet.globals[account_object_name] = temp
raise errors.Error('''
Use the function 'manager.edit_account_map(text_editor="nano")' to edit the file.
''')
else:
break
def map_account(self, account_object_name, account_object):
'''
'''
if not self.is_name_taken(account_object_name, account_object.name):
account_map_json = manager.account_map(self)
if account_map_json is None:
return
account_map_json[account_object.name] = account_object_name
with open(self.wallet_dir + setup.account_map, "w") as out:
out.write(json.dumps(
account_map_json, indent=3, sort_keys=True))
logger.TRACE('''
* Account object ``{}`` stored in the file
``{}`` in the wallet directory:
{}
'''.format(
account_object_name,
setup.account_map,
self.wallet_dir + setup.account_map))
| 4,932 | 0 | 265 |
9ff5a5ee62d2ada8f035ed015df587c46f476bb6 | 302 | py | Python | codes/Ex074.py | BelfortJoao/Curso-phyton01 | 79376233be228f39bf548f90b8d9bd5419ac067a | [
"MIT"
] | 3 | 2021-08-17T14:02:14.000Z | 2021-08-19T02:37:30.000Z | codes/Ex074.py | BelfortJoao/Curso-phyton01 | 79376233be228f39bf548f90b8d9bd5419ac067a | [
"MIT"
] | null | null | null | codes/Ex074.py | BelfortJoao/Curso-phyton01 | 79376233be228f39bf548f90b8d9bd5419ac067a | [
"MIT"
] | null | null | null | from random import randint
x = (randint(1, 10), randint(1, 10), randint(1, 10), randint(1, 10), randint(1, 10), randint(1, 10))
print('sorteei os numeros:', end='')
for n in x:
print(f'{n}', end=", ")
print(f"\nO maior valor na ordem foi {max(x)}")
print(f"\nO menor valor na ordem foi {min(x)}")
| 37.75 | 100 | 0.625828 | from random import randint
x = (randint(1, 10), randint(1, 10), randint(1, 10), randint(1, 10), randint(1, 10), randint(1, 10))
print('sorteei os numeros:', end='')
for n in x:
print(f'{n}', end=", ")
print(f"\nO maior valor na ordem foi {max(x)}")
print(f"\nO menor valor na ordem foi {min(x)}")
| 0 | 0 | 0 |
6c38fa0bd9e315dd1842c9792fab88a708b5c6cb | 15,911 | py | Python | extensions/mark_invoices.py | time-track-tool/time-track-tool | a1c280f32a7766e460c862633b748fa206256f24 | [
"MIT"
] | null | null | null | extensions/mark_invoices.py | time-track-tool/time-track-tool | a1c280f32a7766e460c862633b748fa206256f24 | [
"MIT"
] | 1 | 2019-07-03T13:32:38.000Z | 2019-07-03T13:32:38.000Z | extensions/mark_invoices.py | time-track-tool/time-track-tool | a1c280f32a7766e460c862633b748fa206256f24 | [
"MIT"
] | 1 | 2019-05-15T16:01:31.000Z | 2019-05-15T16:01:31.000Z | #! /usr/bin/python
# Copyright (C) 2005 Dr. Ralf Schlatterbeck Open Source Consulting.
# Reichergasse 131, A-3411 Weidling.
# Web: http://www.runtux.com Email: office@runtux.com
# All rights reserved
# ****************************************************************************
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
# ****************************************************************************
from cStringIO import StringIO
from os.path import splitext
from roundup.cgi.actions import Action, EditItemAction
from roundup.cgi import templating
from roundup import hyperdb
from roundup.date import Date, Interval
from roundup.cgi.exceptions import Redirect
try :
import ooopy.Transforms as Transforms
from ooopy.OOoPy import OOoPy
from ooopy.Transformer import Transformer, autosuper
from ooopy.Transforms import renumber_all, get_meta, set_meta
except ImportError :
from rsclib.autosuper import autosuper
Reject = ValueError
# end class Invoice
# end class Unmark_Invoice
# end class Mark_Invoice
# end class OOoPy_Invoice_Wrapper
# end class Generate_Invoice
# end class _Mark_Invoice
# end class Mark_Invoice_Sent
# end class Mark_Single_Invoice_Sent
# end def create_file
# end def handle
# end class Download_Letter
# end def handle
# end class Personalized_Template
class Edit_Payment_Action (EditItemAction, autosuper) :
""" Remove items that did not change (for which we defined a hidden
attribute in the mask) from the new items. Then proceed as usual
like for EditItemAction.
"""
# end def _editnodes
# end class Edit_Payment_Action
# end def init
| 35.515625 | 80 | 0.539941 | #! /usr/bin/python
# Copyright (C) 2005 Dr. Ralf Schlatterbeck Open Source Consulting.
# Reichergasse 131, A-3411 Weidling.
# Web: http://www.runtux.com Email: office@runtux.com
# All rights reserved
# ****************************************************************************
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
# ****************************************************************************
from cStringIO import StringIO
from os.path import splitext
from roundup.cgi.actions import Action, EditItemAction
from roundup.cgi import templating
from roundup import hyperdb
from roundup.date import Date, Interval
from roundup.cgi.exceptions import Redirect
try :
import ooopy.Transforms as Transforms
from ooopy.OOoPy import OOoPy
from ooopy.Transformer import Transformer, autosuper
from ooopy.Transforms import renumber_all, get_meta, set_meta
except ImportError :
from rsclib.autosuper import autosuper
Reject = ValueError
class Invoice (Action, autosuper) :
name = 'invoice actions'
permissionType = 'Edit'
def marked (self, send_it = False) :
marked_spec = {'invoice_group' : self.invoice_group}
if send_it :
marked_spec ['send_it'] = True
return self.db.invoice.filter (None, marked_spec)
# end def marked
def get_iv_template (self, iv) :
""" Get the correct invoice_template for the invoice_level <=
n_sent for the current invoice.
"""
db = self.db
aboprice = db.abo.get (iv ['abo'], 'aboprice')
iv_tmplates = db.abo_price.get (aboprice, 'invoice_template')
if not iv_tmplates :
raise Reject, \
( db._ ('No invoice_template defined for all invoices: %s')
% iv ['invoice_no']
)
ivts = [db.invoice_template.getnode (i) for i in iv_tmplates]
ivts = [i for i in ivts if i ['invoice_level'] <= iv ['n_sent']]
if not ivts :
raise Reject, \
( db._ ('No matching invoice_template for invoice %s')
% iv ['invoice_no']
)
max = ivts [0]
for ivt in ivts :
if ivt ['invoice_level'] > max ['invoice_level'] :
max = ivt
return max
# end def get_iv_template
def handle (self) :
# figure the request
request = templating.HTMLRequest (self.client)
filterspec = request.filterspec
if request.classname != 'invoice' :
raise Reject, self.db._ ('You can only mark invoices')
# get invoice_group -- if existing:
self.invoice_group = None
try :
self.invoice_group = filterspec ['invoice_group'][0]
except KeyError :
pass
# end def handle
def _unmark (self) :
for i in self.marked () :
self.db.invoice.set (i, invoice_group = None)
self.db.commit ()
# end def _unmark
def redirect (self) :
url = templating.HTMLRequest (self.client).indexargs_url \
('' , { '@template' : 'send' })
raise Redirect, url
# end def redirect
# end class Invoice
class Unmark_Invoice (Invoice) :
name = 'unmark'
def handle (self) :
''' Remove mark created by Mark_Invoice. '''
self.__super.handle ()
self._unmark ()
self.redirect ()
# end def handle
# end class Unmark_Invoice
class Mark_Invoice (Invoice) :
name = 'mark'
def iv_filter (self, ids) :
""" Filter invoices for
- invoice belongs to a running subscription
- invoice is in the correct self.invoice_group
- correct interval: We do not want to send invoices before
the interval of the invoice_template has expired after
sending the last invoice (last_sent < now - interval)
where interval is in months
"""
db = self.db
retval = []
for id in ids :
iv = self.db.invoice.getnode (id)
abo = self.db.abo.getnode (iv ['abo'])
if abo ['end'] :
continue
grp = self.db.abo_price.get (abo ['aboprice'], 'invoice_group')
if grp != self.invoice_group :
continue
ivt = self.get_iv_template (iv)
interval = ivt ['interval']
if iv ['last_sent'] > self.now - Interval ('%dm' % interval) :
continue
retval.append (iv)
return retval
# end def iv_filter
def handle (self) :
''' Mark invoices with the given invoice_group. '''
self.__super.handle ()
self.now = Date ('.')
if self.marked () :
raise Reject, self.db._ ('invoices are already marked')
invoice = self.db.invoice
spec = \
{ 'open' : True
, 'period_start' : ';1m'
}
ids = invoice.filter (None, spec)
invoices = self.iv_filter (ids)
for i in invoices :
invoice.set \
(i ['id'], send_it = True, invoice_group = self.invoice_group)
self.db.commit ()
self.redirect ()
# end def handle
# end class Mark_Invoice
class OOoPy_Invoice_Wrapper (autosuper) :
def __init__ (self, db, iv, date = None, address = None) :
self.db = db
if not date :
date = Date ('.')
self.items = {'date' : date}
if iv :
self.items ['invoice'] = iv
if not address :
address = self._deref ('invoice.payer')
self.items ['address'] = address
# end def __init__
def _pretty (self, item) :
if isinstance (item, Date) :
return item.pretty ('%d. %m. %Y')
return str (item).decode ('utf-8')
# end def _pretty
def _deref (self, name) :
""" dereference a dotted name -- we may want to cache this."""
names = name.split ('.')
x = self.items [names [0]]
if not x :
raise KeyError, names [0]
for i in names [1:] :
p = x.cl.properties [i]
if isinstance (p, hyperdb.Link) :
x = self.db.getclass (p.classname).getnode (x [i])
else :
x = x [i]
if x : return x
return ""
# end def _split
def __getitem__ (self, name) :
return self._pretty (self._deref (name))
# end def __getitem__
def has_key (self, name) :
try :
self._deref (name)
except KeyError :
return False
return True
# end def has_key
__contains__ = has_key
# end class OOoPy_Invoice_Wrapper
class Generate_Invoice (Invoice) :
def handle (self) :
''' Prepare invoices for printout and send to browser.'''
self.__super.handle ()
mimetype = None
extension = None
invoices = [self.db.invoice.getnode (i) for i in self.marked (True)]
ivts = [(self.get_iv_template (i), i) for i in invoices]
iv_by_tid = {}
tp_by_tid = {}
for i in ivts :
tid = i [0]['id']
if tid not in iv_by_tid :
iv_by_tid [tid] = []
tp_by_tid [tid] = i [0]
iv_by_tid [tid].append (OOoPy_Invoice_Wrapper (self.db, i [1]))
sio = {}
for tid, tp in tp_by_tid.iteritems () :
sio [tid] = StringIO ()
fileid = self.db.tmplate.get (tp ['tmplate'], 'files')[-1]
file = StringIO (self.db.file.get (fileid, 'content'))
extension = splitext (self.db.file.get (fileid, 'name'))[1]
o = OOoPy (infile = file, outfile = sio [tid])
t = Transformer \
( o.mimetype
, get_meta (o.mimetype)
, Transforms.Addpagebreak_Style ()
, Transforms.Mailmerge (iterator = iv_by_tid [tid])
, renumber_all (o.mimetype)
, set_meta (o.mimetype)
, Transforms.Fix_OOo_Tag ()
)
t.transform (o)
mimetype = o.mimetype
o.close ()
outfiles = sio.values ()
if len (outfiles) > 1 :
out = StringIO ()
o = OOoPy (infile = outfiles [0], outfile = out)
t = Transformer \
( o.mimetype
, get_meta (o.mimetype)
, Transforms.Concatenate (* (outfiles [1:]))
, renumber_all (o.mimetype)
, set_meta (o.mimetype)
, Transforms.Fix_OOo_Tag ()
)
t.transform (o)
o.close ()
else :
out = outfiles [0]
h = self.client.additional_headers
h ['Content-Type'] = mimetype
h ['Content-Disposition'] = 'inline; filename=inv%s' % extension
self.client.header ()
return out.getvalue ()
# end def handle
# end class Generate_Invoice
class _Mark_Invoice (Invoice) :
def _mark_ivs (self, invoices) :
''' Mark the marked invoices as sent.'''
now = Date ('.')
for iv in invoices :
id = iv ['id']
ivt = self.get_iv_template (iv)
file = self.db.tmplate.get (ivt ['tmplate'], 'files') [-1]
letter = self.db.letter.create \
( subject = ivt ['name']
, address = iv.payer
, date = now
, files = [file]
, messages = []
, invoice = id
)
letters = iv ['letters']
letters.append (letter)
self.db.invoice.set \
( id
, letters = letters
, n_sent = iv ['n_sent'] + 1
, last_sent = now
)
# end def _mark_ivs
# end class _Mark_Invoice
class Mark_Invoice_Sent (_Mark_Invoice) :
def handle (self) :
''' Mark the marked invoices as sent and remove mark.'''
self.__super.handle ()
invoices = [self.db.invoice.getnode (i) for i in self.marked (True)]
self._mark_ivs (invoices)
self._unmark ()
self.db.commit ()
self.redirect ()
# end def handle
# end class Mark_Invoice_Sent
class Mark_Single_Invoice_Sent (_Mark_Invoice) :
def handle (self) :
'''get current invoice and handle it'''
self.__super.handle ()
invoice = self.db.invoice.getnode (self.context ['context'].id)
invoices = [invoice]
self._mark_ivs (invoices)
self.db.commit ()
raise Redirect ('invoice%s' % invoice.id)
# end def handle
# end class Mark_Single_Invoice_Sent
class Download_Letter (Action, autosuper) :
def create_file (self, file, invoice, date, address) :
if ( file.type not in
( 'application/vnd.sun.xml.writer'
, 'application/vnd.oasis.opendocument.text'
)
and splitext (file.name) [1] not in ('.sxw', '.odt')
) :
raise Redirect, 'file%s/%s' % (file.id, file.name)
out = StringIO ()
o = OOoPy (infile = StringIO (file.content), outfile = out)
t = Transformer \
( o.mimetype
, Transforms.Editinfo ()
, Transforms.Field_Replace
( replace = OOoPy_Invoice_Wrapper
(self.db, invoice, date, address)
)
, Transforms.Fix_OOo_Tag ()
)
t.transform (o)
o.close ()
h = self.client.additional_headers
h ['Content-Type'] = file.type
h ['Content-Disposition'] = 'inline; filename=inv.sxw'
self.client.header ()
return out.getvalue ()
# end def create_file
def handle (self) :
request = templating.HTMLRequest (self.client)
filterspec = request.filterspec
if request.classname != 'letter' :
raise Reject, self.db._ ('You can only download letters')
# get id:
try :
self.id = request.form ['id'].value
except KeyError :
self.id = filterspec ['id'][0]
letter = self.db.letter.getnode (str (self.id))
files = letter.files
if not files :
raise Redirect, 'letter%s' % self.id
invoice = letter.invoice
if invoice :
invoice = self.db.invoice.getnode (invoice)
return self.create_file \
( self.db.file.getnode (files [0])
, invoice
, letter.date
, self.db.address.getnode (letter.address)
)
# end def handle
# end class Download_Letter
class Personalized_Template (Download_Letter) :
def handle (self) :
request = templating.HTMLRequest (self.client)
filterspec = request.filterspec
_ = self.db._
if request.classname != 'address' :
raise Reject, _ ('You can only download templates for an address')
try :
template = request.form ['tmplate'].value
except KeyError :
template = filterspec ['tmplate'][0]
template = self.db.tmplate.getnode (template)
address = self.db.address.getnode (self.context ['context'].id)
files = template.files
if not files :
raise Reject, _ ('No files for %(tmplate)s' % template.name)
return self.create_file \
( self.db.file.getnode (files [0])
, None
, Date ('.')
, address
)
# end def handle
# end class Personalized_Template
class Edit_Payment_Action (EditItemAction, autosuper) :
""" Remove items that did not change (for which we defined a hidden
attribute in the mask) from the new items. Then proceed as usual
like for EditItemAction.
"""
def _editnodes (self, props, links) :
# use props.items here, with iteritems we get a RuntimeError
# "dictionary changed size during iteration"
for (cl, id), val in props.items () :
if ( cl == 'payment'
and int (id) < 0
and sorted (val.keys ()) == ['invoice', 'receipt_no']
and val ['receipt_no'] == 'auto'
) :
del props [(cl, id)]
return EditItemAction._editnodes (self, props, links)
# end def _editnodes
# end class Edit_Payment_Action
def init (instance) :
reg = instance.registerAction
reg ('mark_invoice', Mark_Invoice)
reg ('unmark_invoice', Unmark_Invoice)
reg ('mark_invoice_sent', Mark_Invoice_Sent)
reg ('generate_invoice', Generate_Invoice)
reg ('mark_single_invoice_sent', Mark_Single_Invoice_Sent)
reg ('download_letter', Download_Letter)
reg ('personalized_template', Personalized_Template)
reg ('edit_payment', Edit_Payment_Action)
# end def init
| 5,128 | 7,965 | 358 |
91e13dc5dce4394facd41c473ef017637c7b7efb | 3,614 | py | Python | lib/market/market_quote.py | myron0330/metatrade | b0358ad3dce6ba50e4801b6af557d7883d8a5d9a | [
"MIT"
] | 1 | 2018-06-28T09:49:08.000Z | 2018-06-28T09:49:08.000Z | lib/market/market_quote.py | myron0330/metatrade | b0358ad3dce6ba50e4801b6af557d7883d8a5d9a | [
"MIT"
] | null | null | null | lib/market/market_quote.py | myron0330/metatrade | b0358ad3dce6ba50e4801b6af557d7883d8a5d9a | [
"MIT"
] | null | null | null | # -*- coding: UTF-8 -*-
# **********************************************************************************#
# File:
# **********************************************************************************#
import time
from datetime import datetime
from utils.dict import CompositeDict
from utils.decorator import singleton
from .. configs import logger
from .. data.database_api import load_futures_rt_minute_data
def _get_minute_price_info(universe):
"""
Get the latest info of stocks.
Args:
universe(list): universe list
"""
futures_rt_minute_data = load_futures_rt_minute_data(universe)
info = dict()
for symbol, minute_bar_list in futures_rt_minute_data.iteritems():
if minute_bar_list:
item = minute_bar_list[-1]
item.update({
'secID': symbol
})
info[symbol] = minute_bar_list[-1]
return info
@singleton
| 34.75 | 101 | 0.522413 | # -*- coding: UTF-8 -*-
# **********************************************************************************#
# File:
# **********************************************************************************#
import time
from datetime import datetime
from utils.dict import CompositeDict
from utils.decorator import singleton
from .. configs import logger
from .. data.database_api import load_futures_rt_minute_data
def _get_minute_price_info(universe):
"""
Get the latest info of stocks.
Args:
universe(list): universe list
"""
futures_rt_minute_data = load_futures_rt_minute_data(universe)
info = dict()
for symbol, minute_bar_list in futures_rt_minute_data.iteritems():
if minute_bar_list:
item = minute_bar_list[-1]
item.update({
'secID': symbol
})
info[symbol] = minute_bar_list[-1]
return info
@singleton
class MarketQuote(object):
def __init__(self, clock=None, universe=None, bar_collection=None):
self.clock = clock
self.universe = universe
self.bar_collection = bar_collection or dict()
self._bar_version = -1
def fetch_data_from_database_api(self):
"""
Fetch data from database api.
"""
last_minute = None
while True:
cur_minute = self.clock.current_minute
if last_minute != cur_minute:
last_minute = cur_minute
try:
second = datetime.now().second
if second < 10:
time.sleep(10 - second)
self._refresh_bar_collection()
cur_info = self.bar_collection[self._bar_version]
for future in self.universe:
if future in cur_info:
if cur_info[future]['barTime'] == last_minute:
yield cur_info[future]
except:
import traceback
logger.error('Fetch failed: %s' % (traceback.format_exc()))
time.sleep(5)
def publish_bar_data_section(self):
"""
Publish the latest surface bar data.
"""
last_minute = None
while True:
current_minute = self.clock.current_minute
if last_minute != current_minute:
last_minute = current_minute
try:
second = datetime.now().second
if second < 2:
time.sleep(2 - second)
self._refresh_bar_collection()
current_info = self.bar_collection[self._bar_version]
response = CompositeDict()
for symbol in self.universe:
if symbol in current_info and current_info[symbol]['barTime'] == last_minute:
response[last_minute][symbol] = current_info[symbol]
if response:
yield response
except:
import traceback
logger.error('publish bar data failed: %s' % (traceback.format_exc()))
time.sleep(5)
def _bar_version_next(self):
"""
Bar version next.
"""
return (self._bar_version + 1) % 2
def _refresh_bar_collection(self):
"""
Refresh bar collection.
"""
self.bar_collection[self._bar_version_next()] = _get_minute_price_info(self.universe)
self._bar_version = self._bar_version_next()
| 192 | 2,472 | 22 |
e68c200f43159bf514afe9d579f8930fcb12c17b | 1,179 | py | Python | src/shapefiles/migrations/0001_initial.py | ro-hit81/reposit | 2d16ce6aaa565f5f659d8e370c888844a01f6030 | [
"bzip2-1.0.6"
] | null | null | null | src/shapefiles/migrations/0001_initial.py | ro-hit81/reposit | 2d16ce6aaa565f5f659d8e370c888844a01f6030 | [
"bzip2-1.0.6"
] | null | null | null | src/shapefiles/migrations/0001_initial.py | ro-hit81/reposit | 2d16ce6aaa565f5f659d8e370c888844a01f6030 | [
"bzip2-1.0.6"
] | null | null | null | # Generated by Django 2.2.1 on 2019-08-03 05:44
import django.contrib.gis.db.models.fields
from django.db import migrations, models
| 35.727273 | 114 | 0.579304 | # Generated by Django 2.2.1 on 2019-08-03 05:44
import django.contrib.gis.db.models.fields
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Parcel',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('objectid', models.BigIntegerField()),
('parcelkey', models.CharField(max_length=26)),
('ownerid', models.CharField(max_length=30)),
('registered', models.CharField(max_length=50)),
('eastparcel', models.CharField(max_length=50)),
('westparcel', models.CharField(max_length=50)),
('northparce', models.CharField(max_length=50)),
('southparce', models.CharField(max_length=50)),
('parcelnoen', models.BigIntegerField()),
('shape_area', models.FloatField()),
('geom', django.contrib.gis.db.models.fields.MultiPolygonField(srid=4326)),
],
),
]
| 0 | 1,022 | 23 |
2aa5c07dd8585aedf7b1d130332630d71b1b552a | 3,068 | py | Python | sport.py | hexuustc/weixin_sport | 6b4ab18dc48c9fb97301d2f193746effc4988f74 | [
"MIT"
] | 1 | 2021-09-26T06:54:44.000Z | 2021-09-26T06:54:44.000Z | sport.py | hexuustc/weixin_sport | 6b4ab18dc48c9fb97301d2f193746effc4988f74 | [
"MIT"
] | null | null | null | sport.py | hexuustc/weixin_sport | 6b4ab18dc48c9fb97301d2f193746effc4988f74 | [
"MIT"
] | null | null | null | # encoding=utf8
import requests
import datetime
import pytz
import argparse
import random
import json
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='weixin auto sport script.')
parser.add_argument('user', help='your user name', type=str)
parser.add_argument('password', help='your password', type=str)
args = parser.parse_args()
autorepoter = Report(user=args.user, password=args.password)
count = 3
while count != 0:
ret = autorepoter.report()
if ret != False:
break
print("Sport Failed, retry...")
count = count - 1
if count != 0:
exit(0)
else:
exit(-1) | 35.674419 | 159 | 0.550847 | # encoding=utf8
import requests
import datetime
import pytz
import argparse
import random
import json
class Report(object):
def __init__(self, user, password):
self.user = user
self.password = password
def report(self):
url = "https://xzdx.xyz/x/ym/yzmb/api.php"
timenow = datetime.datetime.now(pytz.timezone('Asia/Shanghai'))
print(timenow.hour)
if timenow.hour<=8 and timenow.hour>=7:
step = random.randint(1000,1500)
elif timenow.hour<=12 and timenow.hour>=11:
step = random.randint(2000,2500)
elif timenow.hour<=18 and timenow.hour>=17:
step = random.randint(4000,4500)
elif timenow.hour<=23 and timenow.hour>=22:
step = random.randint(6000,7000)
else:
step = random.randint(1500,2000)
headers = {
'authority': 'xzdx.xyz',
'method': 'POST',
'path': '/x/ym/yzmb/api.php',
'scheme': 'https',
'accept': '*/*',
'accept-encoding': 'gzip, deflate, br',
'accept-language': 'zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6',
'content-length': '48',
'content-type': 'application/x-www-form-urlencoded; charset=UTF-8',
'origin': 'https://xzdx.xyz',
'referer': 'https://xzdx.xyz/x/ym/yzmb/',
'sec-ch-ua': '"Chromium";v="94", "Microsoft Edge";v="94", ";Not A Brand";v="99"',
'sec-ch-ua-mobile': '?0',
'sec-ch-ua-platform': '"Windows"',
'sec-fetch-dest': 'empty',
'sec-fetch-mode': 'cors',
'sec-fetch-site': 'same-origin',
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/94.0.4606.61 Safari/537.36 Edg/94.0.992.31',
'x-requesteded-with':'XMLHttpRequest'
}
data = {
'user':self.user,
'password':self.password,
'steps':step
}
ret = requests.post(url,data=data,headers=headers)
status = ret.status_code
string = ret.content.decode('UTF-8')
json_message = json.loads(string)
code = json_message['code']
print(status)
print(string)
print(code)
if status==200 and code=='200':
print("Sport Success!")
return True
else:
print("Sport Failed!")
return False
return False
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='weixin auto sport script.')
parser.add_argument('user', help='your user name', type=str)
parser.add_argument('password', help='your password', type=str)
args = parser.parse_args()
autorepoter = Report(user=args.user, password=args.password)
count = 3
while count != 0:
ret = autorepoter.report()
if ret != False:
break
print("Sport Failed, retry...")
count = count - 1
if count != 0:
exit(0)
else:
exit(-1) | 2,314 | 0 | 76 |
9aca37e5b5791be898dfe5347b24154e7d5b9ccf | 1,634 | py | Python | aerisweather/responses/ObservationsSummaryDewPt.py | jkoelndorfer/aerisweather-python-sdk | ef37fe3132e87e7f86c2d1ba40a2ac2b7b4db604 | [
"MIT"
] | 5 | 2019-02-26T20:56:14.000Z | 2021-05-05T21:04:45.000Z | aerisweather/responses/ObservationsSummaryDewPt.py | jkoelndorfer/aerisweather-python-sdk | ef37fe3132e87e7f86c2d1ba40a2ac2b7b4db604 | [
"MIT"
] | 2 | 2021-03-02T00:35:42.000Z | 2021-03-03T17:07:37.000Z | aerisweather/responses/ObservationsSummaryDewPt.py | jkoelndorfer/aerisweather-python-sdk | ef37fe3132e87e7f86c2d1ba40a2ac2b7b4db604 | [
"MIT"
] | 8 | 2018-05-26T18:52:21.000Z | 2022-01-20T19:47:15.000Z |
class ObservationsSummaryDewPt:
""" Defines an object for the observations summary period temp data. """
dewpt = {}
def __init__(self, dewpt_json):
"""
Constructor - this takes an individual observations summary period's dewpoint json.
{
maxC": 5,
"maxF": 41,
"minC": -3,
"minF": 26,
"avgC": -0.6,
"avgF": 30.9,
"count": 23
},
"""
self.dewpt = dewpt_json
@property
def maxC(self) -> float:
""" The maximum dew point in Celsius. Null if unavailable. """
return self.dewpt["maxC"]
@property
def maxF(self) -> float:
""" The maximum dew point in Fahrenheit. Null if unavailable. """
return self.dewpt["maxF"]
@property
def minC(self) -> float:
""" The minimum dew point in Celsius. Null if unavailable. """
return self.dewpt["minC"]
@property
def minF(self) -> float:
""" The minimum dew point in Fahrenheit. Null if unavailable. """
return self.dewpt["minF"]
@property
def avgC(self) -> float:
""" The average dew point in Celsius. Null if unavailable. """
return self.dewpt["avgC"]
@property
def avgF(self) -> float:
""" The average dew point in Fahrenheit. Null if unavailable. """
return self.dewpt["avgF"]
@property
def count(self) -> int:
""" The total number of observations that included dew point information. """
return self.dewpt["count"]
| 28.172414 | 91 | 0.53672 |
class ObservationsSummaryDewPt:
""" Defines an object for the observations summary period temp data. """
dewpt = {}
def __init__(self, dewpt_json):
"""
Constructor - this takes an individual observations summary period's dewpoint json.
{
maxC": 5,
"maxF": 41,
"minC": -3,
"minF": 26,
"avgC": -0.6,
"avgF": 30.9,
"count": 23
},
"""
self.dewpt = dewpt_json
@property
def maxC(self) -> float:
""" The maximum dew point in Celsius. Null if unavailable. """
return self.dewpt["maxC"]
@property
def maxF(self) -> float:
""" The maximum dew point in Fahrenheit. Null if unavailable. """
return self.dewpt["maxF"]
@property
def minC(self) -> float:
""" The minimum dew point in Celsius. Null if unavailable. """
return self.dewpt["minC"]
@property
def minF(self) -> float:
""" The minimum dew point in Fahrenheit. Null if unavailable. """
return self.dewpt["minF"]
@property
def avgC(self) -> float:
""" The average dew point in Celsius. Null if unavailable. """
return self.dewpt["avgC"]
@property
def avgF(self) -> float:
""" The average dew point in Fahrenheit. Null if unavailable. """
return self.dewpt["avgF"]
@property
def count(self) -> int:
""" The total number of observations that included dew point information. """
return self.dewpt["count"]
| 0 | 0 | 0 |
ecf14aa3926e57d3af91bbd3bfcf4c68f37171a2 | 1,220 | py | Python | receivePage.py | jb55/lightning-qt | 34d6c990a74a5d6d8a10eb75a98d15a6c7eb1891 | [
"BSD-3-Clause-Clear"
] | 26 | 2019-05-15T17:35:58.000Z | 2019-05-25T11:59:45.000Z | receivePage.py | jb55/lightning-qt | 34d6c990a74a5d6d8a10eb75a98d15a6c7eb1891 | [
"BSD-3-Clause-Clear"
] | 16 | 2019-06-07T08:33:38.000Z | 2021-08-30T10:16:47.000Z | receivePage.py | jb55/lightning-qt | 34d6c990a74a5d6d8a10eb75a98d15a6c7eb1891 | [
"BSD-3-Clause-Clear"
] | 8 | 2019-06-12T13:29:14.000Z | 2021-04-01T21:28:03.000Z | from PyQt5.QtWidgets import QWidget
from forms.ui_receivePage import Ui_ReceivePage
class ReceivePage(QWidget, Ui_ReceivePage):
"""The page to generate bolt11 invoices"""
def clearForm(self):
"""Reset the form to the default values"""
self.spinValue.setValue(1)
self.lineLabel.setText("")
self.lineDescription.setText("")
self.spinExpiry.setValue(604800) # A week
def generateInvoice(self):
"""Generate an invoice and display it"""
amount_msat = self.spinValue.value()
label = self.lineLabel.text()
description = self.lineDescription.text()
expiry = self.spinExpiry.value()
invoice = self.plugin.rpc.invoice(amount_msat, label, description, expiry)
# Condition to prevent RPC error
if invoice:
self.textResultInvoice.setText(invoice["bolt11"])
def initUi(self):
"""Initialize the UI by connecting actions"""
self.buttonGenerate.clicked.connect(self.generateInvoice)
self.buttonClear.clicked.connect(self.clearForm)
| 34.857143 | 82 | 0.656557 | from PyQt5.QtWidgets import QWidget
from forms.ui_receivePage import Ui_ReceivePage
class ReceivePage(QWidget, Ui_ReceivePage):
"""The page to generate bolt11 invoices"""
def __init__(self, plugin):
super().__init__()
self.setupUi(self)
self.plugin = plugin
self.initUi()
def clearForm(self):
"""Reset the form to the default values"""
self.spinValue.setValue(1)
self.lineLabel.setText("")
self.lineDescription.setText("")
self.spinExpiry.setValue(604800) # A week
def generateInvoice(self):
"""Generate an invoice and display it"""
amount_msat = self.spinValue.value()
label = self.lineLabel.text()
description = self.lineDescription.text()
expiry = self.spinExpiry.value()
invoice = self.plugin.rpc.invoice(amount_msat, label, description, expiry)
# Condition to prevent RPC error
if invoice:
self.textResultInvoice.setText(invoice["bolt11"])
def initUi(self):
"""Initialize the UI by connecting actions"""
self.buttonGenerate.clicked.connect(self.generateInvoice)
self.buttonClear.clicked.connect(self.clearForm)
| 111 | 0 | 26 |
f8eb53b19f7d0ed80f555946ddeb4af5858c61b4 | 2,698 | py | Python | web/wrpbg/pb_server.py | mccreery/sandbox | 43e81c7f908feedd8e1b6e28ee07870d02012f4c | [
"Unlicense"
] | 1 | 2020-06-11T21:19:25.000Z | 2020-06-11T21:19:25.000Z | web/wrpbg/pb_server.py | mccreery/sandbox | 43e81c7f908feedd8e1b6e28ee07870d02012f4c | [
"Unlicense"
] | null | null | null | web/wrpbg/pb_server.py | mccreery/sandbox | 43e81c7f908feedd8e1b6e28ee07870d02012f4c | [
"Unlicense"
] | 1 | 2020-06-11T21:19:26.000Z | 2020-06-11T21:19:26.000Z | import http.server, requests, json
username_cache = {}
whitelist = ("current.html", "runner64.png", "flags.min.css", "flags.png")
mimes = {
"html": "text/html",
"css": "text/css",
"png": "image/png",
"json": "application/jsonn"
}
PAGE_404 = """<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<title>404 Not Found</title>
</head>
<body>
nuthin to see here bruh
</body>
</html>
"""
HEADERS = {"User-Agent": "Jobicade's Magnificent PB/WR grabber"}
GAME_ID = "4pd0n31e"
CATEGORY_ID = "wk6pexd1"
USER_ID = "48g0ln2x"
BASE = "http://www.speedrun.com/api/v1/"
PB_QUERY = BASE + "users/" + USER_ID + "/personal-bests"
WR_QUERY = BASE + "leaderboards/" + GAME_ID + "/category/" + CATEGORY_ID
httpd = http.server.HTTPServer(('127.0.0.1', 80), RecordRequest)
sa = httpd.socket.getsockname()
print("Serving HTTP on", sa[0], "port", sa[1], "...")
httpd.serve_forever()
| 27.252525 | 113 | 0.642328 | import http.server, requests, json
username_cache = {}
whitelist = ("current.html", "runner64.png", "flags.min.css", "flags.png")
mimes = {
"html": "text/html",
"css": "text/css",
"png": "image/png",
"json": "application/jsonn"
}
PAGE_404 = """<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<title>404 Not Found</title>
</head>
<body>
nuthin to see here bruh
</body>
</html>
"""
HEADERS = {"User-Agent": "Jobicade's Magnificent PB/WR grabber"}
GAME_ID = "4pd0n31e"
CATEGORY_ID = "wk6pexd1"
USER_ID = "48g0ln2x"
BASE = "http://www.speedrun.com/api/v1/"
PB_QUERY = BASE + "users/" + USER_ID + "/personal-bests"
WR_QUERY = BASE + "leaderboards/" + GAME_ID + "/category/" + CATEGORY_ID
class RecordRequest(http.server.BaseHTTPRequestHandler):
@staticmethod
def json(url, path=None, **kwargs):
root = requests.get(url, **kwargs, headers=HEADERS).json()
if path:
for key in path:
root = root[key]
return root
def do_GET(self):
if self.path[0] == "/":
self.path = self.path[1:]
headers_out = {}
code = 200
redirects = ("current", "current.html", "index", "index.html")
if self.path in redirects:
code = 301
headers_out["Location"] = "/"
self.path = "current.html"
# This is the correct path, don't complain about it
if self.path == "": self.path = "current.html"
if self.path == "data":
headers_out["Content-Type"] = "application/json; charset=utf-8"
# Grab responses from speedrun.com...
pb = RecordRequest.json(PB_QUERY, ("data", 0), params={"game": GAME_ID, "category": CATEGORY_ID})
wr = RecordRequest.json(WR_QUERY, ("data", "runs", 0, "run"), params={"top": 1, "timing": "realtime_noloads"})
wr_runner = wr["players"][0]
if not wr_runner["id"] in username_cache:
username_cache[wr_runner["id"]] = RecordRequest.json(wr_runner["uri"], ("data",))
pb["run"]["place"] = pb["place"]
pb = pb["run"]
wr["player"] = username_cache[wr_runner["id"]]
response = json.dumps({"pb": pb, "wr": wr}).encode("utf-8")
elif self.path in whitelist:
headers_out["Content-Type"] = mimes[self.path[self.path.rfind(".")+1:]]
with open(self.path, "rb") as f:
response = f.read()
else:
code = 404
response = PAGE_404.encode("utf-8")
headers_out["Content-Type"] = "text/html; charset=utf-8"
headers_out["Content-Length"] = len(response)
self.send_response(code)
for key in headers_out:
self.send_header(key, headers_out[key])
self.end_headers()
self.wfile.write(response)
httpd = http.server.HTTPServer(('127.0.0.1', 80), RecordRequest)
sa = httpd.socket.getsockname()
print("Serving HTTP on", sa[0], "port", sa[1], "...")
httpd.serve_forever()
| 1,671 | 97 | 23 |
025a893749801984f1c46542201e277d879dc8b8 | 244 | py | Python | tottle/types/objects/login.py | muffleo/tottle | 69a5bdda879ab56d43505d517d3369a687c135a2 | [
"MIT"
] | 12 | 2020-09-06T15:31:34.000Z | 2021-02-27T20:30:34.000Z | tottle/types/objects/login.py | cyanlabs-org/tottle | 6cf02022ed7b445c9b5af475c6e854b91780d792 | [
"MIT"
] | 2 | 2021-04-13T06:43:42.000Z | 2021-07-07T20:52:39.000Z | tottle/types/objects/login.py | cyanlabs-org/tottle | 6cf02022ed7b445c9b5af475c6e854b91780d792 | [
"MIT"
] | 4 | 2020-09-12T03:09:25.000Z | 2021-03-22T08:52:04.000Z | from pydantic import BaseModel
from typing import Optional
| 24.4 | 47 | 0.741803 | from pydantic import BaseModel
from typing import Optional
class LoginUrl(BaseModel):
url: Optional[str] = None
forward_text: Optional[str] = None
bot_username: Optional[str] = None
request_write_access: Optional[bool] = None
| 0 | 161 | 23 |
750320b65b80fda001114fd35158b9000f594ba5 | 2,060 | py | Python | phuber/evaluator.py | dmizr/phuber | 3b70eadd9bd1420047ada743ff5604eda48d63ac | [
"MIT"
] | 12 | 2020-12-17T14:54:03.000Z | 2021-12-13T21:30:13.000Z | phuber/evaluator.py | dmizr/phuber | 3b70eadd9bd1420047ada743ff5604eda48d63ac | [
"MIT"
] | 5 | 2020-12-30T21:18:05.000Z | 2021-04-16T21:27:35.000Z | phuber/evaluator.py | dmizr/phuber | 3b70eadd9bd1420047ada743ff5604eda48d63ac | [
"MIT"
] | 7 | 2021-04-15T02:13:26.000Z | 2021-12-01T21:20:59.000Z | import logging
from typing import Optional
import torch
import tqdm
from torch.utils.data import DataLoader
from phuber.metrics import AccuracyMetric
class Evaluator:
"""Model evaluator
Args:
model: model to be evaluated
device: device on which to evaluate model
loader: dataloader on which to evaluate model
checkpoint_path: path to model checkpoint
"""
def evaluate(self) -> float:
"""Evaluates the model
Returns:
(float) accuracy (on a 0 to 1 scale)
"""
# Progress bar
pbar = tqdm.tqdm(total=len(self.loader), leave=False)
pbar.set_description("Evaluating... ")
# Set to eval
self.model.eval()
# Loop
for data, target in self.loader:
with torch.no_grad():
# To device
data, target = data.to(self.device), target.to(self.device)
# Forward
out = self.model(data)
self.acc_metric.update(out, target)
# Update progress bar
pbar.update()
pbar.close()
accuracy = self.acc_metric.compute()
self.logger.info(f"Accuracy: {accuracy:.4f}\n")
return accuracy
| 23.678161 | 75 | 0.584466 | import logging
from typing import Optional
import torch
import tqdm
from torch.utils.data import DataLoader
from phuber.metrics import AccuracyMetric
class Evaluator:
"""Model evaluator
Args:
model: model to be evaluated
device: device on which to evaluate model
loader: dataloader on which to evaluate model
checkpoint_path: path to model checkpoint
"""
def __init__(
self,
model: torch.nn.Module,
device: torch.device,
loader: DataLoader,
checkpoint_path: Optional[str] = None,
) -> None:
# Logging
self.logger = logging.getLogger()
# Device
self.device = device
# Data
self.loader = loader
# Model
self.model = model
if checkpoint_path:
self._load_from_checkpoint(checkpoint_path)
# Metrics
self.acc_metric = AccuracyMetric(k=1)
def evaluate(self) -> float:
"""Evaluates the model
Returns:
(float) accuracy (on a 0 to 1 scale)
"""
# Progress bar
pbar = tqdm.tqdm(total=len(self.loader), leave=False)
pbar.set_description("Evaluating... ")
# Set to eval
self.model.eval()
# Loop
for data, target in self.loader:
with torch.no_grad():
# To device
data, target = data.to(self.device), target.to(self.device)
# Forward
out = self.model(data)
self.acc_metric.update(out, target)
# Update progress bar
pbar.update()
pbar.close()
accuracy = self.acc_metric.compute()
self.logger.info(f"Accuracy: {accuracy:.4f}\n")
return accuracy
def _load_from_checkpoint(self, checkpoint_path: str) -> None:
checkpoint = torch.load(checkpoint_path, map_location=self.device)
self.model.load_state_dict(checkpoint["model"])
self.logger.info(f"Checkpoint loaded: {checkpoint_path}")
| 742 | 0 | 54 |
8c562da3b379f70ca048f530b14ad2a31026c4c4 | 14,618 | py | Python | gpcsup/__init__.py | braedon/gpcsup | a20c9fbc7d5c0191a87553b8588757b1528fe655 | [
"MIT"
] | null | null | null | gpcsup/__init__.py | braedon/gpcsup | a20c9fbc7d5c0191a87553b8588757b1528fe655 | [
"MIT"
] | null | null | null | gpcsup/__init__.py | braedon/gpcsup | a20c9fbc7d5c0191a87553b8588757b1528fe655 | [
"MIT"
] | null | null | null | import functools
import idna
import logging
import re
import requests
import rfc3339
import time
from bottle import Bottle, request, response, static_file, template, redirect
from datetime import timedelta
from publicsuffixlist import PublicSuffixList
from requests_oauthlib import OAuth1
from urllib.parse import urlsplit
from utils.param_parse import (ParamParseError, parse_params,
integer_param, string_param, boolean_param)
from .misc import html_default_error_hander, security_headers, set_headers
log = logging.getLogger(__name__)
# Disable some logging to reduce log spam.
# Elasticsearch logs all requests at (at least) INFO level.
logging.getLogger('elasticsearch').setLevel(logging.WARNING)
PSL_CACHE_SIZE = 10_000
psl = PublicSuffixList()
# Domains are a series of two or more names, separated by periods, with an optional trailing period.
# (Technically one name is allowed, but TLDs aren't usually HTTP sites.)
# (Note that we actually strip any trailing period during normalization - along with lowercasing
# characters - but support has been left in the regex for completeness.)
# Each name can contain latin characters (case insensitive), digits, or dashes.
# Names can't be longer than 63 characters, or start/end with a dash.
# The final name - the TLD - can't be numeric (only digits).
DOMAIN_REGEX = re.compile(r'^(?:[a-z\d](?:[a-z\d-]{0,61}[a-z\d])?\.)+(?!\d+\.?$)[a-z\d](?:[a-z\d-]{0,61}[a-z\d])?\.?$')
DOMAIN_MAX_LENGTH = 253
REQUEST_TIMEOUT_INDIVIDUAL = 5
SCAN_START_TIMEOUT = 20
SCAN_TIMEOUT = 30
SCAN_AGENT = 'GpcSupBot'
SCAN_HEADERS = {'User-Agent': f'{SCAN_AGENT}/0.1 (https://gpcsup.com)'}
ROBOTS_MAX_CONTENT_LENGTH = 512 * 1024 # 512kB
GPC_PATH = '/.well-known/gpc.json'
GPC_MAX_CONTENT_LENGTH = 1024 # 1kB
SCAN_TTL = timedelta(minutes=10)
NEXT_SCAN_OFFSET = timedelta(days=7)
SCAN_FAIL_OFFSETS = [
timedelta(days=1),
timedelta(days=7),
timedelta(days=30),
]
SCAN_RESULT_MAX_AGE_SECS = SCAN_TTL.seconds
SCAN_RESULT_HEADERS = {'Cache-Control': f'max-age={SCAN_RESULT_MAX_AGE_SECS}'}
STATIC_FILE_MAX_AGE_SECS = timedelta(hours=1).seconds
STATIC_FILE_HEADERS = {'Cache-Control': f'max-age={STATIC_FILE_MAX_AGE_SECS}'}
SITES_PAGE_SIZE = 8
SERVER_READY = True
class ScanError(Exception):
"""The scan has failed, and the user should be shown the specified template."""
@functools.lru_cache(maxsize=PSL_CACHE_SIZE)
| 36.636591 | 119 | 0.606718 | import functools
import idna
import logging
import re
import requests
import rfc3339
import time
from bottle import Bottle, request, response, static_file, template, redirect
from datetime import timedelta
from publicsuffixlist import PublicSuffixList
from requests_oauthlib import OAuth1
from urllib.parse import urlsplit
from utils.param_parse import (ParamParseError, parse_params,
integer_param, string_param, boolean_param)
from .misc import html_default_error_hander, security_headers, set_headers
log = logging.getLogger(__name__)
# Disable some logging to reduce log spam.
# Elasticsearch logs all requests at (at least) INFO level.
logging.getLogger('elasticsearch').setLevel(logging.WARNING)
PSL_CACHE_SIZE = 10_000
psl = PublicSuffixList()
# Domains are a series of two or more names, separated by periods, with an optional trailing period.
# (Technically one name is allowed, but TLDs aren't usually HTTP sites.)
# (Note that we actually strip any trailing period during normalization - along with lowercasing
# characters - but support has been left in the regex for completeness.)
# Each name can contain latin characters (case insensitive), digits, or dashes.
# Names can't be longer than 63 characters, or start/end with a dash.
# The final name - the TLD - can't be numeric (only digits).
DOMAIN_REGEX = re.compile(r'^(?:[a-z\d](?:[a-z\d-]{0,61}[a-z\d])?\.)+(?!\d+\.?$)[a-z\d](?:[a-z\d-]{0,61}[a-z\d])?\.?$')
DOMAIN_MAX_LENGTH = 253
REQUEST_TIMEOUT_INDIVIDUAL = 5
SCAN_START_TIMEOUT = 20
SCAN_TIMEOUT = 30
SCAN_AGENT = 'GpcSupBot'
SCAN_HEADERS = {'User-Agent': f'{SCAN_AGENT}/0.1 (https://gpcsup.com)'}
ROBOTS_MAX_CONTENT_LENGTH = 512 * 1024 # 512kB
GPC_PATH = '/.well-known/gpc.json'
GPC_MAX_CONTENT_LENGTH = 1024 # 1kB
SCAN_TTL = timedelta(minutes=10)
NEXT_SCAN_OFFSET = timedelta(days=7)
SCAN_FAIL_OFFSETS = [
timedelta(days=1),
timedelta(days=7),
timedelta(days=30),
]
SCAN_RESULT_MAX_AGE_SECS = SCAN_TTL.seconds
SCAN_RESULT_HEADERS = {'Cache-Control': f'max-age={SCAN_RESULT_MAX_AGE_SECS}'}
STATIC_FILE_MAX_AGE_SECS = timedelta(hours=1).seconds
STATIC_FILE_HEADERS = {'Cache-Control': f'max-age={STATIC_FILE_MAX_AGE_SECS}'}
SITES_PAGE_SIZE = 8
SERVER_READY = True
class ScanError(Exception):
"""The scan has failed, and the user should be shown the specified template."""
def __init__(self, template, **kwargs):
self.template = template
self.kwargs = kwargs
@functools.lru_cache(maxsize=PSL_CACHE_SIZE)
def extract_base_domain(domain, return_unknown=True):
base_domain = psl.privatesuffix(domain)
# If return_unknown is set, return the domain if its eTLD isn't known.
if base_domain is None and return_unknown:
base_domain = domain
return base_domain
def domain_is_www_subdomain(domain):
base_domain = extract_base_domain(domain)
return domain == f'www.{base_domain}'
def normalise_domain(domain):
domain = domain.lower()
# Handle users copying domains with the scheme attached.
# Only allow these two schemes - GPC is for HTTP(s).
if domain.startswith('https://'):
domain = domain[8:]
elif domain.startswith('http://'):
domain = domain[7:]
# Similar to handling schemes, handle one slash at the end of the domain.
if domain.endswith('/'):
domain = domain[:-1]
# Strip any optional trailing period from the domain.
if domain.endswith('.'):
domain = domain[:-1]
try:
# Convert to and from IDNA encoding with compatibility mapping enabled to normalise.
domain = idna.decode(idna.encode(domain, uts46=True))
except idna.IDNAError:
# Ignore IDNA errors and return the domain without IDNA normalisation.
# Any IDNA error will cause check_domain() to fail anyway.
pass
return domain
def check_domain(domain):
try:
# Convert domains to IDNA format before checking length and format.
idna_domain = idna.encode(domain).decode('ASCII')
except idna.IDNAError as e:
log.warning('IDNA error when checking %(domain)s: %(error)s',
{'domain': domain, 'error': e})
return False
if len(idna_domain) > DOMAIN_MAX_LENGTH:
return False
match = DOMAIN_REGEX.fullmatch(idna_domain)
if match is None:
return False
return True
def extract_domain_from_url(url):
split_url = urlsplit(url)
domain = split_url.netloc
if ':' in domain:
domain = domain.split(':', 1)[0]
return normalise_domain(domain)
def construct_app(es_dao, well_known_sites_endpoint, testing_mode, **kwargs):
app = Bottle()
app.default_error_handler = html_default_error_hander
app.install(security_headers)
@app.get('/-/live')
def live():
return 'Live'
@app.get('/-/ready')
def ready():
if SERVER_READY:
return 'Ready'
else:
response.status = 503
return 'Unavailable'
@app.get('/main.css')
def css():
return static_file('main.css', root='static', headers=STATIC_FILE_HEADERS.copy())
# Set CORP to allow Firefox for Android to load icons.
# Firefox for Android seems to consider the icon loader a different origin.
#
# Favicon stuff generated at:
# https://favicon.io/favicon-generator/?t=gs&ff=Roboto Slab&fs=80&fc=%23fff&b=rounded&bc=%2300885D
@app.get('/favicon.ico',
sh_updates={'Cross-Origin-Resource-Policy': 'cross-origin'})
def icon():
return static_file('favicon.ico', root='static', headers=STATIC_FILE_HEADERS.copy())
@app.get('/<filename>.png',
sh_updates={'Cross-Origin-Resource-Policy': 'cross-origin'})
def root_pngs(filename):
return static_file(f'{filename}.png', root='static', headers=STATIC_FILE_HEADERS.copy())
@app.get('/<filename>.js')
def root_js(filename):
return static_file(f'{filename}.js', root='static', headers=STATIC_FILE_HEADERS.copy())
@app.get('/.well-known/gpc.json')
def global_privacy_control():
return {'gpc': True, 'lastUpdate': '2021-07-17'}
@app.get('/')
def index():
try:
params = parse_params(request.query.decode(),
domain=string_param('domain', strip=True,
min_length=1, max_length=DOMAIN_MAX_LENGTH))
domain = params.get('domain')
except ParamParseError:
domain = None
if domain:
domain = normalise_domain(domain)
if not check_domain(domain):
domain = None
scanned_count, supporting_count = es_dao.count(timeout=30)
r = template('index', domain=domain,
scanned_count=scanned_count, supporting_count=supporting_count)
set_headers(r, STATIC_FILE_HEADERS)
return r
@app.post('/')
def check_site():
try:
params = parse_params(request.forms.decode(),
domain=string_param('domain', required=True, strip=True,
min_length=1, max_length=DOMAIN_MAX_LENGTH),
no_rescan=boolean_param('no_rescan', default=False, empty=True,
strip=True))
except ParamParseError:
return template('gpc_invalid', domain=None)
domain = normalise_domain(params['domain'])
if not check_domain(domain):
return template('gpc_invalid', domain=domain)
result = es_dao.get(domain)
if result is not None:
if params['no_rescan'] or result['status'] == 'pending':
redirect(f'/sites/{domain}')
# Non-pending scans should have a scan datetime.
last_scan_dt = rfc3339.parse_datetime(result['last_scan_dt'])
# If the last scan hasn't expired yet, don't rescan.
if rfc3339.now() < last_scan_dt + SCAN_TTL:
if testing_mode:
log.info('Would have redirected to existing scan for %(domain)s if on prod.',
{'domain': domain})
else:
redirect(f'/sites/{domain}')
r = requests.post(well_known_sites_endpoint, data={'domain': domain, 'rescan': 'true'})
r.raise_for_status()
redirect(f'/sites/{domain}')
@app.get('/sites/')
def get_sites():
params = parse_params(request.params.decode(),
page=integer_param('page', default=0, positive=True))
page = params['page']
offset = page * SITES_PAGE_SIZE
total, results = es_dao.find(supports_gpc=True, is_base_domain=True,
sort=['id'], offset=offset, limit=SITES_PAGE_SIZE, timeout=30)
domains = [result[0]['domain'] for result in results]
previous_page = page - 1 if page > 0 else None
next_page = page + 1
next_offset = next_page * SITES_PAGE_SIZE
if next_offset >= total:
next_page = None
return template('sites', domains=domains, previous_page=previous_page, next_page=next_page)
@app.get('/sites/<domain>')
def get_site(domain):
domain = normalise_domain(domain)
if not check_domain(domain):
return template('gpc_invalid', domain=domain)
# Well-Known doesn't scan www subdomains - redirect to the base domain instead.
if domain_is_www_subdomain(domain):
base_domain = extract_base_domain(domain)
redirect(f'/sites/{base_domain}')
result = es_dao.get(domain)
if result is None:
redirect(f'/?domain={domain}')
status = result['status']
scan_data = result.get('scan_data')
if status == 'pending':
return template('gpc_pending', domain=domain)
elif status == 'blocked':
return template('gpc_blocked', domain=domain)
elif status == 'failed' and not scan_data:
return template('gpc_error', domain=domain)
# Status should be `ok`, or `failed` but with a previously successful scan.
# In either case, `scan_data` should be present.
assert scan_data
scheme = scan_data['scheme']
scan_dt = rfc3339.parse_datetime(result['scan_dt'])
if result['scan_priority'] == 0:
rescan_queued = True
can_rescan = False
else:
rescan_queued = False
last_scan_dt = rfc3339.parse_datetime(result['last_scan_dt'])
can_rescan = (last_scan_dt + SCAN_TTL) < rfc3339.now()
error = scan_data.get('error')
if error:
message = None
if error == 'not-found':
message = 'The GPC support resource was not found.'
elif error in ('unexpected-scheme-redirect', 'unexpected-status',
'client-error', 'server-error', 'unexpected-status'):
message = 'Server responded unexpectedly when fetching the GPC support resource.'
elif error in ('parse-error', 'json-parse-error', 'unexpected-json-root-type',
'content-too-long', 'content-length-too-long', 'bad-content'):
message = 'The GPC support resource is invalid.'
elif error:
log.error('Unsupported GPC scan error %(error)s', {'error': error})
r = template('gpc_unknown', scheme=scheme, domain=domain,
message=message, scan_dt=scan_dt,
rescan_queued=rescan_queued, can_rescan=can_rescan)
set_headers(r, SCAN_RESULT_HEADERS)
return r
else:
assert scan_data['found'], 'gpc.json should have been found if no error.'
gpc_data = scan_data['gpc']
warnings = scan_data.get('warnings') or []
warnings += gpc_data.get('warning_codes') or []
message = None
if warnings:
message_parts = []
for warning in warnings:
if warning == 'wrong-content-type':
message_parts.append('incorrect content type')
elif warning == 'invalid-update-field':
message_parts.append('invalid last update field')
if message_parts:
message = ' and '.join(message_parts) + '.'
last_update = gpc_data['parsed'].get('lastUpdate')
template_name = 'gpc_supported' if gpc_data['parsed']['gpc'] else 'gpc_unsupported'
r = template(template_name, scheme=scheme, domain=domain,
last_update=last_update, message=message, scan_dt=scan_dt,
rescan_queued=rescan_queued, can_rescan=can_rescan)
set_headers(r, SCAN_RESULT_HEADERS)
return r
return app
def run_twitter_worker(es_dao,
twitter_consumer_key, twitter_consumer_secret,
twitter_token_key, twitter_token_secret,
testing_mode, **kwargs):
oauth = OAuth1(client_key=twitter_consumer_key,
client_secret=twitter_consumer_secret,
resource_owner_key=twitter_token_key,
resource_owner_secret=twitter_token_secret)
while True:
domains = es_dao.find_tweetable()
if domains:
for domain in domains:
if testing_mode:
log.info('Would tweet about `%(domain)s` supporting GPC.',
{'domain': domain})
else:
es_dao.set_tweeting(domain, wait_for=True)
tweet = f'{domain} is reporting that it supports #GPC'
r = requests.post('https://api.twitter.com/1.1/statuses/update.json',
data={'status': tweet},
auth=oauth)
r.raise_for_status()
r_json = r.json()
tweet_id = r_json['id_str']
log.info('Tweeted about `%(domain)s` supporting GPC. Tweet ID: `%(tweet_id)s`',
{'domain': domain,
'tweet_id': tweet_id,
'full_response': r_json})
es_dao.set_tweeted(domain, wait_for=True)
else:
time.sleep(60)
| 12,017 | 0 | 187 |
60c96f5be72bddae518c87b5b2b4d754b4bd152e | 1,158 | py | Python | rhea/system/reset.py | mngr0/rhea | 9ad8d193f7f78f1d192af438568d45fb5a398c8c | [
"MIT"
] | null | null | null | rhea/system/reset.py | mngr0/rhea | 9ad8d193f7f78f1d192af438568d45fb5a398c8c | [
"MIT"
] | null | null | null | rhea/system/reset.py | mngr0/rhea | 9ad8d193f7f78f1d192af438568d45fb5a398c8c | [
"MIT"
] | null | null | null |
import myhdl
from myhdl import delay
| 33.085714 | 76 | 0.582902 |
import myhdl
from myhdl import delay
class Reset(myhdl.ResetSignal):
def __init__(self, val, active, async):
""" Reset signal
This is a thin wrapper around the myhdl.ResetSignal to
provide the generator ``pulse`` that is often used in
testbenches.
Arguments:
val (int, bool): default value of the reset signal
active (int, bool): active state, when is reset active
async (bool): asynchronous reset or not
"""
super(Reset, self).__init__(val, active, async)
def pulse(self, delays=10):
if isinstance(delays, int):
self.next = self.active
yield delay(delays)
self.next = not self.active
elif isinstance(delays, tuple):
assert len(delays) in (1, 2, 3), "Incorrect number of delays"
self.next = not self.active if len(delays) == 3 else self.active
for dd in delays:
yield delay(dd)
self.next = not self.val
self.next = not self.active
else:
raise ValueError("{} type not supported".format(type(delays)))
| 573 | 523 | 23 |
6b4c0e4b75266ca932765f704bc1f94c6b42332b | 8,005 | py | Python | frappe/database/query.py | juhiwue/frappe | 77f88af74e037dcca0bae3f3ef1e8cae7fb0f699 | [
"MIT"
] | null | null | null | frappe/database/query.py | juhiwue/frappe | 77f88af74e037dcca0bae3f3ef1e8cae7fb0f699 | [
"MIT"
] | null | null | null | frappe/database/query.py | juhiwue/frappe | 77f88af74e037dcca0bae3f3ef1e8cae7fb0f699 | [
"MIT"
] | null | null | null | import operator
import re
from typing import Any, Dict, List, Tuple, Union
import frappe
from frappe import _
from frappe.query_builder import Criterion, Field, Order
def like(key: str, value: str) -> frappe.qb:
"""Wrapper method for `LIKE`
Args:
key (str): field
value (str): criterion
Returns:
frappe.qb: `frappe.qb object with `LIKE`
"""
return Field(key).like(value)
def func_in(key: str, value: Union[List, Tuple]) -> frappe.qb:
"""Wrapper method for `IN`
Args:
key (str): field
value (Union[int, str]): criterion
Returns:
frappe.qb: `frappe.qb object with `IN`
"""
return Field(key).isin(value)
def not_like(key: str, value: str) -> frappe.qb:
"""Wrapper method for `NOT LIKE`
Args:
key (str): field
value (str): criterion
Returns:
frappe.qb: `frappe.qb object with `NOT LIKE`
"""
return Field(key).not_like(value)
def func_not_in(key: str, value: Union[List, Tuple]):
"""Wrapper method for `NOT IN`
Args:
key (str): field
value (Union[int, str]): criterion
Returns:
frappe.qb: `frappe.qb object with `NOT IN`
"""
return Field(key).notin(value)
def func_regex(key: str, value: str) -> frappe.qb:
"""Wrapper method for `REGEX`
Args:
key (str): field
value (str): criterion
Returns:
frappe.qb: `frappe.qb object with `REGEX`
"""
return Field(key).regex(value)
def func_between(key: str, value: Union[List, Tuple]) -> frappe.qb:
"""Wrapper method for `BETWEEN`
Args:
key (str): field
value (Union[int, str]): criterion
Returns:
frappe.qb: `frappe.qb object with `BETWEEN`
"""
return Field(key)[slice(*value)]
def make_function(key: Any, value: Union[int, str]):
"""returns fucntion query
Args:
key (Any): field
value (Union[int, str]): criterion
Returns:
frappe.qb: frappe.qb object
"""
return OPERATOR_MAP[value[0]](key, value[1])
def change_orderby(order: str):
"""Convert orderby to standart Order object
Args:
order (str): Field, order
Returns:
tuple: field, order
"""
order = order.split()
if order[1].lower() == "asc":
orderby, order = order[0], Order.asc
return orderby, order
orderby, order = order[0], Order.desc
return orderby, order
OPERATOR_MAP = {
"+": operator.add,
"=": operator.eq,
"-": operator.sub,
"!=": operator.ne,
"<": operator.lt,
">": operator.gt,
"<=": operator.le,
">=": operator.ge,
"in": func_in,
"not in": func_not_in,
"like": like,
"not like": not_like,
"regex": func_regex,
"between": func_between
}
| 24.331307 | 101 | 0.672705 | import operator
import re
from typing import Any, Dict, List, Tuple, Union
import frappe
from frappe import _
from frappe.query_builder import Criterion, Field, Order
def like(key: str, value: str) -> frappe.qb:
"""Wrapper method for `LIKE`
Args:
key (str): field
value (str): criterion
Returns:
frappe.qb: `frappe.qb object with `LIKE`
"""
return Field(key).like(value)
def func_in(key: str, value: Union[List, Tuple]) -> frappe.qb:
"""Wrapper method for `IN`
Args:
key (str): field
value (Union[int, str]): criterion
Returns:
frappe.qb: `frappe.qb object with `IN`
"""
return Field(key).isin(value)
def not_like(key: str, value: str) -> frappe.qb:
"""Wrapper method for `NOT LIKE`
Args:
key (str): field
value (str): criterion
Returns:
frappe.qb: `frappe.qb object with `NOT LIKE`
"""
return Field(key).not_like(value)
def func_not_in(key: str, value: Union[List, Tuple]):
"""Wrapper method for `NOT IN`
Args:
key (str): field
value (Union[int, str]): criterion
Returns:
frappe.qb: `frappe.qb object with `NOT IN`
"""
return Field(key).notin(value)
def func_regex(key: str, value: str) -> frappe.qb:
"""Wrapper method for `REGEX`
Args:
key (str): field
value (str): criterion
Returns:
frappe.qb: `frappe.qb object with `REGEX`
"""
return Field(key).regex(value)
def func_between(key: str, value: Union[List, Tuple]) -> frappe.qb:
"""Wrapper method for `BETWEEN`
Args:
key (str): field
value (Union[int, str]): criterion
Returns:
frappe.qb: `frappe.qb object with `BETWEEN`
"""
return Field(key)[slice(*value)]
def make_function(key: Any, value: Union[int, str]):
"""returns fucntion query
Args:
key (Any): field
value (Union[int, str]): criterion
Returns:
frappe.qb: frappe.qb object
"""
return OPERATOR_MAP[value[0]](key, value[1])
def change_orderby(order: str):
"""Convert orderby to standart Order object
Args:
order (str): Field, order
Returns:
tuple: field, order
"""
order = order.split()
if order[1].lower() == "asc":
orderby, order = order[0], Order.asc
return orderby, order
orderby, order = order[0], Order.desc
return orderby, order
OPERATOR_MAP = {
"+": operator.add,
"=": operator.eq,
"-": operator.sub,
"!=": operator.ne,
"<": operator.lt,
">": operator.gt,
"<=": operator.le,
">=": operator.ge,
"in": func_in,
"not in": func_not_in,
"like": like,
"not like": not_like,
"regex": func_regex,
"between": func_between
}
class Query:
def get_condition(self, table: str, **kwargs) -> frappe.qb:
"""Get initial table object
Args:
table (str): DocType
Returns:
frappe.qb: DocType with initial condition
"""
if kwargs.get("update"):
return frappe.qb.update(table)
if kwargs.get("into"):
return frappe.qb.into(table)
return frappe.qb.from_(table)
def criterion_query(self, table: str, criterion: Criterion, **kwargs) -> frappe.qb:
"""Generate filters from Criterion objects
Args:
table (str): DocType
criterion (Criterion): Filters
Returns:
frappe.qb: condition object
"""
condition = self.add_conditions(self.get_condition(table, **kwargs), **kwargs)
return condition.where(criterion)
def add_conditions(self, conditions: frappe.qb, **kwargs):
"""Adding additional conditions
Args:
conditions (frappe.qb): built conditions
Returns:
conditions (frappe.qb): frappe.qb object
"""
if kwargs.get("orderby"):
orderby = kwargs.get("orderby")
order = kwargs.get("order") if kwargs.get("order") else Order.desc
if isinstance(orderby, str) and len(orderby.split()) > 1:
orderby, order = change_orderby(orderby)
conditions = conditions.orderby(orderby, order=order)
if kwargs.get("limit"):
conditions = conditions.limit(kwargs.get("limit"))
if kwargs.get("distinct"):
conditions = conditions.distinct()
if kwargs.get("for_update"):
conditions = conditions.for_update()
return conditions
def misc_query(self, table: str, filters: Union[List, Tuple] = None, **kwargs):
"""Build conditions using the given Lists or Tuple filters
Args:
table (str): DocType
filters (Union[List, Tuple], optional): Filters. Defaults to None.
"""
conditions = self.get_condition(table, **kwargs)
if not filters:
return conditions
if isinstance(filters, list):
for f in filters:
if not isinstance(f, (list, tuple)):
_operator = OPERATOR_MAP[filters[1]]
if not isinstance(filters[0], str):
conditions = make_function(filters[0], filters[2])
break
conditions = conditions.where(_operator(Field(filters[0]), filters[2]))
break
else:
_operator = OPERATOR_MAP[f[1]]
conditions = conditions.where(_operator(Field(f[0]), f[2]))
conditions = self.add_conditions(conditions, **kwargs)
return conditions
def dict_query(self, table: str, filters: Dict[str, Union[str, int]] = None, **kwargs) -> frappe.qb:
"""Build conditions using the given dictionary filters
Args:
table (str): DocType
filters (Dict[str, Union[str, int]], optional): Filters. Defaults to None.
Returns:
frappe.qb: conditions object
"""
conditions = self.get_condition(table, **kwargs)
if not filters:
conditions = self.add_conditions(conditions, **kwargs)
return conditions
for key in filters:
value = filters.get(key)
_operator = OPERATOR_MAP["="]
if not isinstance(key, str):
conditions = conditions.where(make_function(key, value))
continue
if isinstance(value, (list, tuple)):
if isinstance(value[1], (list, tuple)) or value[0] in list(OPERATOR_MAP.keys())[-4:]:
_operator = OPERATOR_MAP[value[0]]
conditions = conditions.where(_operator(key, value[1]))
else:
_operator = OPERATOR_MAP[value[0]]
conditions = conditions.where(_operator(Field(key), value[1]))
else:
conditions = conditions.where(_operator(Field(key), value))
conditions = self.add_conditions(conditions, **kwargs)
return conditions
def build_conditions(
self,
table: str,
filters: Union[Dict[str, Union[str, int]], str, int] = None,
**kwargs
) -> frappe.qb:
"""Build conditions for sql query
Args:
filters (Union[Dict[str, Union[str, int]], str, int]): conditions in Dict
table (str): DocType
Returns:
frappe.qb: frappe.qb conditions object
"""
if isinstance(filters, int) or isinstance(filters, str):
filters = {"name": str(filters)}
if isinstance(filters, Criterion):
criterion = self.criterion_query(table, filters, **kwargs)
elif isinstance(filters, (list, tuple)):
criterion = self.misc_query(table, filters, **kwargs)
else:
criterion = self.dict_query(filters=filters, table=table, **kwargs)
return criterion
def get_sql(
self,
table: str,
fields: Union[List, Tuple],
filters: Union[Dict[str, Union[str, int]], str, int] = None,
**kwargs
):
criterion = self.build_conditions(table, filters, **kwargs)
if isinstance(fields, (list, tuple)):
query = criterion.select(*kwargs.get("field_objects", fields))
elif isinstance(fields, Criterion):
query = criterion.select(fields)
else:
query = criterion.select(fields)
return query
class Permission:
@classmethod
def check_permissions(cls, query, **kwargs):
if not isinstance(query, str):
query = query.get_sql()
doctype = cls.get_tables_from_query(query)
if isinstance(doctype, str):
doctype = [doctype]
for dt in doctype:
dt = re.sub("^tab", "", dt)
if not frappe.has_permission(
dt,
"select",
user=kwargs.get("user"),
parent_doctype=kwargs.get("parent_doctype"),
) and not frappe.has_permission(
dt,
"read",
user=kwargs.get("user"),
parent_doctype=kwargs.get("parent_doctype"),
):
frappe.throw(
_("Insufficient Permission for {0}").format(frappe.bold(dt))
)
@staticmethod
def get_tables_from_query(query: str):
return [table for table in re.findall(r"\w+", query) if table.startswith("tab")] | 1,124 | 4,299 | 46 |
b32d432b5b14e98ef90fed8ac7f7c9ad1ef97f2b | 1,470 | py | Python | lib/exchange/upbit/adapter/post_orders_response_adapter.py | webclinic017/bitrush | 5d76c98a17bb830dba1bdd103475c120903ded90 | [
"MIT"
] | 1 | 2022-01-09T21:17:23.000Z | 2022-01-09T21:17:23.000Z | lib/exchange/upbit/adapter/post_orders_response_adapter.py | webclinic017/bitrush | 5d76c98a17bb830dba1bdd103475c120903ded90 | [
"MIT"
] | null | null | null | lib/exchange/upbit/adapter/post_orders_response_adapter.py | webclinic017/bitrush | 5d76c98a17bb830dba1bdd103475c120903ded90 | [
"MIT"
] | 1 | 2022-01-09T21:17:17.000Z | 2022-01-09T21:17:17.000Z | import json
from decimal import Decimal
from typing import Optional, Dict
from lib.exchange.upbit.model.post_orders_response import PostOrdersResponse
from lib.order.order import Order
from lib.order.order_type import OrderType
from lib.type import JsonString
| 28.823529 | 86 | 0.682993 | import json
from decimal import Decimal
from typing import Optional, Dict
from lib.exchange.upbit.model.post_orders_response import PostOrdersResponse
from lib.order.order import Order
from lib.order.order_type import OrderType
from lib.type import JsonString
class PostOrdersResponseAdapter(Order):
response: PostOrdersResponse
custom_order_id: str
def __init__(self, response: Dict, custom_order_id: str):
"""
Args:
custom_order_id (str): custom order id to group each order items
"""
self.response = PostOrdersResponse(**response)
self.custom_order_id = custom_order_id
def get_id(self) -> str:
return self.custom_order_id
def get_exchange(self) -> str:
return "upbit"
def is_filled(self) -> bool:
return self.response.state != "wait"
def get_order_type(self) -> OrderType:
return OrderType.BUY if self.response.side == "bid" else OrderType.SELL
def get_ticker(self) -> str:
return self.response.market.replace("KRW-", "")
def get_volume(self) -> Decimal:
return Decimal(self.response.volume) if self.response.volume else Decimal("0")
def get_avg_price(self) -> Decimal:
return Decimal(self.response.price) if self.response.price else Decimal("0")
def get_amount(self):
# order is just placed
return Decimal("0")
def get_raw_data(self) -> Dict:
return self.response.dict()
| 586 | 599 | 23 |
862cafed433a27c4c636e2ae6a6289b7ce389dab | 2,788 | py | Python | test/cut/test_cut_set_creation.py | pzelasko/lhotse | 41984467d2ead1dc69f418638b969e46f63308c7 | [
"Apache-2.0"
] | 64 | 2020-04-27T14:55:15.000Z | 2020-10-25T06:57:56.000Z | test/cut/test_cut_set_creation.py | pzelasko/lhotse | 41984467d2ead1dc69f418638b969e46f63308c7 | [
"Apache-2.0"
] | 85 | 2020-04-26T06:29:47.000Z | 2020-10-19T20:28:52.000Z | test/cut/test_cut_set_creation.py | pzelasko/lhotse | 41984467d2ead1dc69f418638b969e46f63308c7 | [
"Apache-2.0"
] | 17 | 2020-06-19T06:26:33.000Z | 2020-10-12T15:19:15.000Z | import pytest
from lhotse.cut import make_windowed_cuts_from_features
from lhotse.features import Features, FeatureSet
def features(rec_id, start, duration):
"""Helper method for fixture readability (specify only relevant attributes)."""
return Features(
recording_id=rec_id,
channels=0,
start=start,
duration=duration,
sampling_rate=16000,
type="irrelevant",
num_frames=round(duration / 0.01),
num_features=23,
storage_type="irrelevant",
storage_path="irrelevant",
storage_key="irrelevant",
frame_shift=0.01,
)
@pytest.fixture
# noinspection PyMethodMayBeStatic
| 34 | 83 | 0.641679 | import pytest
from lhotse.cut import make_windowed_cuts_from_features
from lhotse.features import Features, FeatureSet
def features(rec_id, start, duration):
"""Helper method for fixture readability (specify only relevant attributes)."""
return Features(
recording_id=rec_id,
channels=0,
start=start,
duration=duration,
sampling_rate=16000,
type="irrelevant",
num_frames=round(duration / 0.01),
num_features=23,
storage_type="irrelevant",
storage_path="irrelevant",
storage_key="irrelevant",
frame_shift=0.01,
)
@pytest.fixture
def feature_set():
return FeatureSet(
features=[features("rec-1", 0.0, 600.0), features("rec-2", 0.0, 357.0)]
)
# noinspection PyMethodMayBeStatic
class TestMakeWindowedCutsFromFeatures:
def test_full_shift_no_shorter_cuts(self, feature_set):
cut_set = make_windowed_cuts_from_features(
feature_set=feature_set, cut_duration=5.0
)
assert len(cut_set) == 191
assert len([c for c in cut_set if c.recording_id == "rec-1"]) == 120
assert len([c for c in cut_set if c.recording_id == "rec-2"]) == 71
assert all(c.duration == 5.0 for c in cut_set)
def test_full_shift_with_shorter_cuts(self, feature_set):
cut_set = make_windowed_cuts_from_features(
feature_set=feature_set, cut_duration=5.0, keep_shorter_windows=True
)
assert len(cut_set) == 192
assert len([c for c in cut_set if c.recording_id == "rec-1"]) == 120
assert len([c for c in cut_set if c.recording_id == "rec-2"]) == 72
assert not all(c.duration == 5.0 for c in cut_set)
def test_half_shift_no_shorter_cuts(self, feature_set):
cut_set = make_windowed_cuts_from_features(
feature_set=feature_set, cut_duration=5.0, cut_shift=2.5
)
assert len(cut_set) == 380
# below, the last window is only 2.5s duration
assert len([c for c in cut_set if c.recording_id == "rec-1"]) == 239
# below, the last two windows are 2.0s and 4.5s duration
assert len([c for c in cut_set if c.recording_id == "rec-2"]) == 141
assert all(c.duration == 5.0 for c in cut_set)
def test_half_shift_with_shorter_cuts(self, feature_set):
cut_set = make_windowed_cuts_from_features(
feature_set=feature_set,
cut_duration=5.0,
cut_shift=2.5,
keep_shorter_windows=True,
)
assert len(cut_set) == 383
assert len([c for c in cut_set if c.recording_id == "rec-1"]) == 240
assert len([c for c in cut_set if c.recording_id == "rec-2"]) == 143
assert not all(c.duration == 5.0 for c in cut_set)
| 1,941 | 18 | 151 |
1cdf6f8a3f98fd2219e9f83b7c43e32f18d29505 | 3,608 | py | Python | torchdynamo/allowed_functions.py | frank-wei/torchdynamo | 26c4c1b593bebf4246e566749dade38254b59ffb | [
"BSD-3-Clause"
] | 40 | 2022-03-02T01:29:29.000Z | 2022-03-31T16:02:01.000Z | torchdynamo/allowed_functions.py | frank-wei/torchdynamo | 26c4c1b593bebf4246e566749dade38254b59ffb | [
"BSD-3-Clause"
] | 62 | 2022-03-11T00:12:37.000Z | 2022-03-31T20:04:27.000Z | torchdynamo/allowed_functions.py | frank-wei/torchdynamo | 26c4c1b593bebf4246e566749dade38254b59ffb | [
"BSD-3-Clause"
] | 4 | 2022-03-09T18:43:20.000Z | 2022-03-31T01:01:26.000Z | import builtins
import collections
import copy
import functools
import itertools
import math
import operator
import types
import warnings
from functools import lru_cache
import numpy
import torch
from . import config
@lru_cache(None)
@lru_cache(None)
def _allowed_function_ids():
"""
Walk torch.* and get the ids of all the stuff in it
"""
warnings.filterwarnings("ignore", category=UserWarning, module="torch.distributed")
torch.distributions.Distribution.set_default_validate_args(False)
torch_object_ids = dict()
_find_torch_objects(torch)
_find_torch_objects(math)
for idx in _disallowed_function_ids():
if idx in torch_object_ids:
del torch_object_ids[idx]
return torch_object_ids
def is_allowed(obj):
"""Is this safe to trace like torch.add ?"""
return id(obj) in _allowed_function_ids()
def is_disallowed(obj):
"""Is this safe to trace like torch.add ?"""
return id(obj) in _disallowed_function_ids()
@lru_cache(None)
@lru_cache(None)
| 26.925373 | 87 | 0.625277 | import builtins
import collections
import copy
import functools
import itertools
import math
import operator
import types
import warnings
from functools import lru_cache
import numpy
import torch
from . import config
@lru_cache(None)
def _disallowed_function_ids():
remove = [
True,
False,
None,
collections.OrderedDict,
copy.copy,
copy.deepcopy,
torch.autocast_decrement_nesting,
torch.autocast_increment_nesting,
torch.autograd.grad,
torch.clear_autocast_cache,
torch.cuda.current_device,
torch.distributions.constraints.is_dependent,
torch.distributions.normal.Normal,
torch.inference_mode,
torch.set_anomaly_enabled,
torch.set_autocast_cache_enabled,
torch.set_autocast_cpu_dtype,
torch.set_autocast_cpu_enabled,
torch.set_autocast_enabled,
torch.set_autocast_gpu_dtype,
torch.autograd.profiler.profile,
warnings.warn,
]
return {id(x) for x in remove}
@lru_cache(None)
def _allowed_function_ids():
"""
Walk torch.* and get the ids of all the stuff in it
"""
warnings.filterwarnings("ignore", category=UserWarning, module="torch.distributed")
torch.distributions.Distribution.set_default_validate_args(False)
torch_object_ids = dict()
def _find_torch_objects(module):
if any(
module.__name__.startswith(mod_name)
for mod_name in config.allowed_functions_module_string_ignorelist
):
return
torch_object_ids[id(module)] = module.__name__
for name, obj in list(module.__dict__.items()):
if id(obj) not in torch_object_ids:
if isinstance(obj, types.ModuleType):
if obj.__name__.startswith("torch."):
torch_object_ids[id(obj)] = f"{module.__name__}.{name}"
_find_torch_objects(obj)
else:
torch_object_ids[id(obj)] = f"{module.__name__}.{name}"
_find_torch_objects(torch)
_find_torch_objects(math)
for idx in _disallowed_function_ids():
if idx in torch_object_ids:
del torch_object_ids[idx]
return torch_object_ids
def is_allowed(obj):
"""Is this safe to trace like torch.add ?"""
return id(obj) in _allowed_function_ids()
def is_disallowed(obj):
"""Is this safe to trace like torch.add ?"""
return id(obj) in _disallowed_function_ids()
@lru_cache(None)
def _builtin_function_ids():
rv = {
id(v): f"builtins.{k}"
for k, v in builtins.__dict__.items()
if not k.startswith("_") and callable(v)
}
rv.update(
{
id(v): f"operator.{k}"
for k, v in operator.__dict__.items()
if not k.startswith("_") and callable(v)
}
)
rv.update(
{id(v): f"functools.{v.__name__}" for v in (itertools.chain, itertools.islice)}
)
rv[id(functools.reduce)] = "functools.reduce"
return rv
def is_builtin(obj):
return id(obj) in _builtin_function_ids()
@lru_cache(None)
def _numpy_function_ids():
rv = dict()
for mod in (numpy, numpy.random):
rv.update(
{
id(v): f"{mod.__name__}.{k}"
for k, v in mod.__dict__.items()
if callable(v)
and (getattr(v, "__module__", None) or mod.__name__) == mod.__name__
}
)
return rv
def is_numpy(obj):
return isinstance(obj, numpy.ndarray) or id(obj) in _numpy_function_ids()
| 2,432 | 0 | 139 |
d10f5a7b45ca2a640954b3379de28abe47aa8a4b | 9,790 | py | Python | utils/cloze_mc_model.py | kenchan0226/control-sum-cmdp | 5181e8e0c9bf6bef48f66457e06d3f398f4a428a | [
"MIT"
] | 3 | 2021-08-10T02:31:24.000Z | 2021-12-28T09:34:01.000Z | utils/cloze_mc_model.py | kenchan0226/control-sum-cmdp | 5181e8e0c9bf6bef48f66457e06d3f398f4a428a | [
"MIT"
] | null | null | null | utils/cloze_mc_model.py | kenchan0226/control-sum-cmdp | 5181e8e0c9bf6bef48f66457e06d3f398f4a428a | [
"MIT"
] | 1 | 2021-12-02T15:53:22.000Z | 2021-12-02T15:53:22.000Z | from transformers import BertConfig, BertForSequenceClassification, BertTokenizer
import torch
import os
import json
import logging
MAX_CONTEXT_LEN = 120
if __name__ == "__main__":
cloze_model = ClozeMCModel("cuda:0")
split_dir="/mnt/sharedfolder/hpchan/datasets/cased-cnn-dailymail_coref/train"
with open(os.path.join(split_dir, "{}.json".format(0))) as f:
js = json.loads(f.read())
"""
masked_questions_ids_list = js["masked_question_ids_list"]
multiple_choices_ids_list = js["multiple_choices_ids_list"]
answer_id_list = js["answer_idx_list"]
summary_sent_list = js["abstract"]
summary_str = ' '.join(summary_sent_list)
context_str_list = [summary_str for i in range(len(answer_id_list))]
context_str_list[-1] = "Mentally ill inmates in Miami are housed on the `` forgotten floor '' Judge Steven Leifman says most are there as a result of `` avoidable felonies ''"
confidence_score = cloze_model.compute_confidence_score(masked_questions_ids_list, multiple_choices_ids_list, answer_id_list, context_str_list)
print(masked_questions_ids_list)
print(multiple_choices_ids_list)
print(context_str_list)
print(answer_id_list)
print(len(answer_id_list))
print(confidence_score)
"""
masked_questions_ids_2dlist = []
multiple_choices_ids_2dlist = []
answer_id_2dlist = []
context_str_2dlist = []
for i in range(3):
with open(os.path.join(split_dir, "{}.json".format(i))) as f:
js = json.loads(f.read())
masked_questions_ids_list = js["masked_question_ids_list"]
multiple_choices_ids_list = js["multiple_choices_ids_list"]
answer_id_list = js["answer_idx_list"]
summary_sent_list = js["abstract"]
summary_str = ' '.join(summary_sent_list)
context_str_list = [summary_str for i in range(len(answer_id_list))]
masked_questions_ids_2dlist.append(masked_questions_ids_list)
multiple_choices_ids_2dlist.append(multiple_choices_ids_list)
answer_id_2dlist.append(answer_id_list)
context_str_2dlist.append(context_str_list)
num_questions_per_sample = [len(questions) for questions in masked_questions_ids_2dlist]
print(num_questions_per_sample)
flattened_masked_questions_ids_list = []
flattened_answer_id_list = []
flattened_multiple_choices_ids_list = []
flattened_context_str_list = []
for masked_question_ids_list, answer_id_list, multiple_choices_ids_list, context_str_list in zip(masked_questions_ids_2dlist, answer_id_2dlist,
multiple_choices_ids_2dlist, context_str_2dlist):
flattened_masked_questions_ids_list += masked_question_ids_list
flattened_answer_id_list += answer_id_list
flattened_multiple_choices_ids_list += multiple_choices_ids_list
flattened_context_str_list += context_str_list
print(flattened_context_str_list)
print(len(flattened_context_str_list))
print(len(flattened_masked_questions_ids_list))
print(len(flattened_answer_id_list))
print(len(flattened_multiple_choices_ids_list))
confidence_score = cloze_model.compute_confidence_score(flattened_masked_questions_ids_list, flattened_multiple_choices_ids_list, flattened_answer_id_list, flattened_context_str_list)
print(confidence_score)
num_processed_samples = 0
score_for_each_batch = []
for i in range(len(num_questions_per_sample)):
# average for each batch
avg_score = confidence_score[num_processed_samples:num_processed_samples+num_questions_per_sample[i]].mean(dim=0)
score_for_each_batch.append(avg_score)
print(num_processed_samples)
print(num_processed_samples+num_questions_per_sample[i])
num_processed_samples += num_questions_per_sample[i]
print(score_for_each_batch)
score_for_each_batch = torch.cat(score_for_each_batch, dim=0)
print(score_for_each_batch)
print(score_for_each_batch.size())
| 56.264368 | 187 | 0.71665 | from transformers import BertConfig, BertForSequenceClassification, BertTokenizer
import torch
import os
import json
import logging
MAX_CONTEXT_LEN = 120
class ClozeMCModel:
def __init__(self, device):
self.device = device
self.model_path = "saved_models/entity_cloze_mc_coref_cnn_remove_duplicate"
config = BertConfig.from_pretrained(self.model_path)
self.tokenizer = BertTokenizer.from_pretrained(self.model_path)
self.model = BertForSequenceClassification.from_pretrained(self.model_path, config=config, cache_dir="../../").cuda()
self.model.eval()
self.batch_size = 42
ids = self.tokenizer.convert_tokens_to_ids(['[CLS]', '[SEP]', '[MC]'])
self.cls_id = [ids[0]]
self.sep_id = [ids[1]]
self.mc_id = [ids[2]]
def encode(self, masked_question_list, multiple_choices_list, answer_idx_list):
masked_question_ids_list = []
multiple_choices_ids_list = []
answer_idx_list_filtered = []
for masked_question, multiple_choices, answer_idx in zip(masked_question_list, multiple_choices_list,
answer_idx_list):
masked_question_ids = self.tokenizer.convert_tokens_to_ids(self.tokenizer.tokenize(masked_question))
multiple_choices_str = multiple_choices[0] + ' [MC] ' + multiple_choices[1] + ' [MC] ' + multiple_choices[
2] + ' [MC] ' + multiple_choices[3]
multiple_choices_ids = self.tokenizer.convert_tokens_to_ids(self.tokenizer.tokenize(multiple_choices_str))
if len(masked_question_ids) <= 125 and len(multiple_choices_ids) <= 125:
masked_question_ids_list.append(masked_question_ids)
multiple_choices_ids_list.append(multiple_choices_ids)
answer_idx_list_filtered.append(answer_idx)
return masked_question_ids_list, multiple_choices_ids_list, answer_idx_list_filtered
def compute_batch(self, masked_questions_ids_batch, multiple_choices_ids_batch, answer_ids_batch, context_str_batch):
input_ids_list = []
input_lens = []
token_type_ids_list = []
for masked_question_ids, multiple_choices_ids, context_str in zip(masked_questions_ids_batch, multiple_choices_ids_batch, context_str_batch):
context_ids = self.tokenizer.convert_tokens_to_ids(self.tokenizer.tokenize(context_str))
context_ids = context_ids[:MAX_CONTEXT_LEN]
first_segment_ids = self.cls_id + masked_question_ids + self.sep_id
first_segment_end_position = len(first_segment_ids) - 1
input_ids = first_segment_ids + context_ids + self.sep_id + multiple_choices_ids + self.sep_id
input_ids_list.append(input_ids)
input_lens.append(len(input_ids))
token_type_ids = [0 if i <= first_segment_end_position else 1 for i in range( len(input_ids) )]
token_type_ids_list.append(token_type_ids)
max_input_len = max(input_lens)
input_ids_list_padded = []
token_type_ids_list_padded = []
for input_ids, token_type_ids in zip(input_ids_list, token_type_ids_list):
padding_len = max_input_len - len(input_ids)
input_ids_list_padded.append(input_ids + [self.tokenizer.pad_token_id] * padding_len)
token_type_ids_list_padded.append(token_type_ids + [1] * padding_len)
with torch.no_grad():
input_ids_tensor = torch.LongTensor(input_ids_list_padded).to(self.device)
token_type_ids_tensor = torch.LongTensor(token_type_ids_list_padded).to(self.device)
answer_ids_tensor = torch.LongTensor(answer_ids_batch).to(self.device)
attention_mask = torch.ne(input_ids_tensor, self.tokenizer.pad_token_id).float()
try:
outputs = self.model(input_ids=input_ids_tensor, attention_mask=attention_mask, token_type_ids=token_type_ids_tensor)
except:
logging.info(input_ids_tensor.size())
logging.info(attention_mask.size())
logging.info(token_type_ids_tensor.size())
logging.info()
for input_ids in input_ids_list_padded:
logging.info(input_ids)
logging.info(self.tokenizer(input_ids, clean_up_tokenization_spaces=False))
for token_type_ids in token_type_ids_list_padded:
logging.info(token_type_ids)
logging.info(attention_mask.cpu().numpy())
exit()
logits = outputs[0] # [batch_size, 4]
confidence_scores_all = torch.nn.functional.softmax(logits, dim=1) # [batch_size, 4]
ground_truth_confidence_scores = torch.gather(confidence_scores_all, dim=1, index=answer_ids_tensor.unsqueeze(1)) # [batch_size]
return ground_truth_confidence_scores
def compute_confidence_score(self, masked_questions_ids_list, multiple_choices_ids_list, answer_id_list, context_str_list):
iter_range = range(0, len(masked_questions_ids_list), self.batch_size)
confidence_scores_list = []
for batch_start in iter_range:
masked_questions_ids_batch = masked_questions_ids_list[batch_start:batch_start+self.batch_size]
multiple_choices_ids_batch = multiple_choices_ids_list[batch_start:batch_start+self.batch_size]
answer_id_batch = answer_id_list[batch_start:batch_start+self.batch_size]
context_str_batch = context_str_list[batch_start:batch_start+self.batch_size]
confidence_scores = self.compute_batch(masked_questions_ids_batch, multiple_choices_ids_batch, answer_id_batch, context_str_batch)
# [batch_size]
confidence_scores_list.append(confidence_scores)
return torch.cat(confidence_scores_list, dim=0) # [len(masked_questions_ids_list)]
if __name__ == "__main__":
cloze_model = ClozeMCModel("cuda:0")
split_dir="/mnt/sharedfolder/hpchan/datasets/cased-cnn-dailymail_coref/train"
with open(os.path.join(split_dir, "{}.json".format(0))) as f:
js = json.loads(f.read())
"""
masked_questions_ids_list = js["masked_question_ids_list"]
multiple_choices_ids_list = js["multiple_choices_ids_list"]
answer_id_list = js["answer_idx_list"]
summary_sent_list = js["abstract"]
summary_str = ' '.join(summary_sent_list)
context_str_list = [summary_str for i in range(len(answer_id_list))]
context_str_list[-1] = "Mentally ill inmates in Miami are housed on the `` forgotten floor '' Judge Steven Leifman says most are there as a result of `` avoidable felonies ''"
confidence_score = cloze_model.compute_confidence_score(masked_questions_ids_list, multiple_choices_ids_list, answer_id_list, context_str_list)
print(masked_questions_ids_list)
print(multiple_choices_ids_list)
print(context_str_list)
print(answer_id_list)
print(len(answer_id_list))
print(confidence_score)
"""
masked_questions_ids_2dlist = []
multiple_choices_ids_2dlist = []
answer_id_2dlist = []
context_str_2dlist = []
for i in range(3):
with open(os.path.join(split_dir, "{}.json".format(i))) as f:
js = json.loads(f.read())
masked_questions_ids_list = js["masked_question_ids_list"]
multiple_choices_ids_list = js["multiple_choices_ids_list"]
answer_id_list = js["answer_idx_list"]
summary_sent_list = js["abstract"]
summary_str = ' '.join(summary_sent_list)
context_str_list = [summary_str for i in range(len(answer_id_list))]
masked_questions_ids_2dlist.append(masked_questions_ids_list)
multiple_choices_ids_2dlist.append(multiple_choices_ids_list)
answer_id_2dlist.append(answer_id_list)
context_str_2dlist.append(context_str_list)
num_questions_per_sample = [len(questions) for questions in masked_questions_ids_2dlist]
print(num_questions_per_sample)
flattened_masked_questions_ids_list = []
flattened_answer_id_list = []
flattened_multiple_choices_ids_list = []
flattened_context_str_list = []
for masked_question_ids_list, answer_id_list, multiple_choices_ids_list, context_str_list in zip(masked_questions_ids_2dlist, answer_id_2dlist,
multiple_choices_ids_2dlist, context_str_2dlist):
flattened_masked_questions_ids_list += masked_question_ids_list
flattened_answer_id_list += answer_id_list
flattened_multiple_choices_ids_list += multiple_choices_ids_list
flattened_context_str_list += context_str_list
print(flattened_context_str_list)
print(len(flattened_context_str_list))
print(len(flattened_masked_questions_ids_list))
print(len(flattened_answer_id_list))
print(len(flattened_multiple_choices_ids_list))
confidence_score = cloze_model.compute_confidence_score(flattened_masked_questions_ids_list, flattened_multiple_choices_ids_list, flattened_answer_id_list, flattened_context_str_list)
print(confidence_score)
num_processed_samples = 0
score_for_each_batch = []
for i in range(len(num_questions_per_sample)):
# average for each batch
avg_score = confidence_score[num_processed_samples:num_processed_samples+num_questions_per_sample[i]].mean(dim=0)
score_for_each_batch.append(avg_score)
print(num_processed_samples)
print(num_processed_samples+num_questions_per_sample[i])
num_processed_samples += num_questions_per_sample[i]
print(score_for_each_batch)
score_for_each_batch = torch.cat(score_for_each_batch, dim=0)
print(score_for_each_batch)
print(score_for_each_batch.size())
| 5,642 | -2 | 130 |
145f855179547244441085eb5696c4ff8167f388 | 4,685 | py | Python | tests/hypothesis2_4_masks.py | davtoh/RRTools | 6dde2d4622719d9031bf21ffbf7723231a0e2003 | [
"BSD-3-Clause"
] | 1 | 2019-07-16T03:54:22.000Z | 2019-07-16T03:54:22.000Z | tests/hypothesis2_4_masks.py | davtoh/RRTools | 6dde2d4622719d9031bf21ffbf7723231a0e2003 | [
"BSD-3-Clause"
] | null | null | null | tests/hypothesis2_4_masks.py | davtoh/RRTools | 6dde2d4622719d9031bf21ffbf7723231a0e2003 | [
"BSD-3-Clause"
] | 1 | 2019-07-09T02:49:06.000Z | 2019-07-09T02:49:06.000Z | from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
from builtins import range
from past.utils import old_div
from .tesisfunctions import Plotim,overlay,padVH
import cv2
import numpy as np
#from invariantMoments import centroid,invmoments,normalizedinvariantmoment,bwmoment
from .tesisfunctions import sigmoid,histogram,brightness,getthresh,threshold,pad,graphpolygontest, polygontest
#http://stackoverflow.com/questions/14725181/speed-up-iteration-over-numpy-arrays-opencv-cv2-image
#http://opencv-python-tutroals.readthedocs.org/en/latest/py_tutorials/py_imgproc/py_contours/py_contour_features/py_contour_features.html
fn1 = r'im1_2.jpg'
#fn1 = r"asift2Result_with_alfa1.png"
#fn1 = r"im_completer_Result2.png"
fore = cv2.imread(fn1)
fore = cv2.resize(fore,(300,300))
name = fn1.split('\\')[-1].split(".")[0]
fore2 = fore.copy()
"""
fore = fore.astype("float")
fb = fore[:,:,0]
fg = fore[:,:,1]
fr = fore[:,:,2]
# threshold retinal area
alfa = -1
beta = 50 # if alfa >0 :if beta = 50 with noise, if beta = 200 without noise
th = 1
kernel = np.ones((100,100),np.uint8)
enhanced = sigmoid(fr,alfa,beta)
thresh = cv2.threshold(enhanced.astype("uint8"),th,1,cv2.THRESH_BINARY_INV)[1]
#dilation = cv2.dilate(thresh,kernel,iterations = 1)
#erosion = cv2.erode(dilation,kernel,iterations = 1)
#closing = cv2.morphologyEx(thresh, cv2.MORPH_CLOSE, kernel)
opening = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, kernel)
#dilation = cv2.dilate(opening,kernel,iterations = 1)
lastthresh = opening
"""
P = brightness(fore)
thresh = getthresh(cv2.resize(P,(300,300)))
print(thresh)
lastthresh=threshold(P,thresh,1,0)
thresh,lastthresh = cv2.threshold(P,0,1,cv2.THRESH_BINARY+cv2.THRESH_OTSU)
#lastthresh = pad(lastthresh,1)
plotc = Plotim(name + " overlayed lastthresh", overlay(fore.copy(), lastthresh * 255, alpha=lastthresh))
plotc.show()
# find biggest area
contours,hierarchy = cv2.findContours(lastthresh.copy(),cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
print("objects: ",len(contours))
index = 0
maxarea = 0
#objectarea = np.sum(lastthresh)
for i in range(len(contours)):
area = cv2.contourArea(contours[i])
if area>maxarea:
index = i
maxarea = area
print("area contour:",maxarea,"index: ",index)
cnt = contours[index]
print("optaining polygon test...")
polygontest = graphpolygontest((P,cnt)).show()
#DEFECTS
pallet = [[0,0,0],[255,255,255]]
pallet = np.array(pallet,np.uint8)
imdefects = pallet[lastthresh]
hull = cv2.convexHull(cnt,returnPoints = False)
defects = cv2.convexityDefects(cnt,hull)
distances = defects[:,0,3]
two_max = np.argpartition(distances, -2)[-2:] # get indeces of two maximum values
for i in range(defects.shape[0]):
s,e,f,d = defects[i,0]
start = tuple(cnt[s][0])
end = tuple(cnt[e][0])
far = tuple(cnt[f][0])
cv2.line(imdefects,start,end,[0,255,0],2)
cv2.circle(imdefects,far,5,[0,0,255],-1)
#SEPARATING LINE
points = defects[:,0,2]
x1,y1 = tuple(cnt[points[two_max[0]]][0])
x2,y2 = tuple(cnt[points[two_max[1]]][0])
m = old_div((y2-y1),float(x2-x1))
b = int(y1-x1*m)
# find interception with xf and yf axis
if b>imdefects.shape[0]: # if start outside yf
start = int(old_div((imdefects.shape[0]-b),m)),imdefects.shape[0] # (yf-b)/m, yf
else: # if start inside yf
start = 0,b # 0,y
y = int(m*imdefects.shape[1]+b) # m*xf+b
if y<0: # if end outside yf
end = int(old_div(-b,m)),0# x,0
else: # if end inside yf
end = imdefects.shape[1],y # xf, y
cv2.line(imdefects,start,end,[0,0,100],2)
plotc = Plotim(name + " defects", imdefects)
plotc.show()
#ROI
ROI = np.zeros(P.shape,dtype=np.uint8)
cv2.drawContours(ROI,[cnt],0,1,-1)
plotc = Plotim(name + " ROI", ROI)
plotc.show()
M = cv2.moments(cnt) # find moments
#M2 = invmoments(ROI,Area=None,center=None)
#cx = int(M['m10']/M['m00'])
#cy = int(M['m01']/M['m00'])
#x,y = centroid(ROI,maxarea)
#normalizedinvariantmoment(ROI,maxarea,0,0,x,y)
#n00 = bwmoment(ROI,0,0,cx,cy)
#print "(cx,cy)",(cx,cy)
#print "x,y",x,y
#cv2.circle(fore, (cx,cy), 10, (0, 0, 255), -1, 8)
#cv2.circle(fore, (int(x),int(y)), 10, (0, 255, 255), -1, 8)
cv2.drawContours(fore,[cnt],0,(0, 0, 255),2)
ellipse = cv2.fitEllipse(cnt)
cv2.ellipse(fore,ellipse,(0,255,0),2)
plotc = Plotim(name + " description", fore)
plotc.show()
mask = np.ones(P.shape,dtype=np.uint8)
cv2.ellipse(mask,ellipse,0,-1)
fore2[mask>0]=0
plotc = Plotim(name + " result", fore2)
plotc.show()
cv2.imwrite("mask_"+name+".png",fore2)
"""
# Saving the objects:
import pickle
data = {"thresh":thresh,"lastthresh":lastthresh,"cnt":cnt,"ellipse":ellipse,"polygontest":polygontest}
with open("masks_"+name+'.pickle', 'w') as f:
pickle.dump(data, f)""" | 32.534722 | 137 | 0.704803 | from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
from builtins import range
from past.utils import old_div
from .tesisfunctions import Plotim,overlay,padVH
import cv2
import numpy as np
#from invariantMoments import centroid,invmoments,normalizedinvariantmoment,bwmoment
from .tesisfunctions import sigmoid,histogram,brightness,getthresh,threshold,pad,graphpolygontest, polygontest
#http://stackoverflow.com/questions/14725181/speed-up-iteration-over-numpy-arrays-opencv-cv2-image
#http://opencv-python-tutroals.readthedocs.org/en/latest/py_tutorials/py_imgproc/py_contours/py_contour_features/py_contour_features.html
fn1 = r'im1_2.jpg'
#fn1 = r"asift2Result_with_alfa1.png"
#fn1 = r"im_completer_Result2.png"
fore = cv2.imread(fn1)
fore = cv2.resize(fore,(300,300))
name = fn1.split('\\')[-1].split(".")[0]
fore2 = fore.copy()
"""
fore = fore.astype("float")
fb = fore[:,:,0]
fg = fore[:,:,1]
fr = fore[:,:,2]
# threshold retinal area
alfa = -1
beta = 50 # if alfa >0 :if beta = 50 with noise, if beta = 200 without noise
th = 1
kernel = np.ones((100,100),np.uint8)
enhanced = sigmoid(fr,alfa,beta)
thresh = cv2.threshold(enhanced.astype("uint8"),th,1,cv2.THRESH_BINARY_INV)[1]
#dilation = cv2.dilate(thresh,kernel,iterations = 1)
#erosion = cv2.erode(dilation,kernel,iterations = 1)
#closing = cv2.morphologyEx(thresh, cv2.MORPH_CLOSE, kernel)
opening = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, kernel)
#dilation = cv2.dilate(opening,kernel,iterations = 1)
lastthresh = opening
"""
P = brightness(fore)
thresh = getthresh(cv2.resize(P,(300,300)))
print(thresh)
lastthresh=threshold(P,thresh,1,0)
thresh,lastthresh = cv2.threshold(P,0,1,cv2.THRESH_BINARY+cv2.THRESH_OTSU)
#lastthresh = pad(lastthresh,1)
plotc = Plotim(name + " overlayed lastthresh", overlay(fore.copy(), lastthresh * 255, alpha=lastthresh))
plotc.show()
# find biggest area
contours,hierarchy = cv2.findContours(lastthresh.copy(),cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
print("objects: ",len(contours))
index = 0
maxarea = 0
#objectarea = np.sum(lastthresh)
for i in range(len(contours)):
area = cv2.contourArea(contours[i])
if area>maxarea:
index = i
maxarea = area
print("area contour:",maxarea,"index: ",index)
cnt = contours[index]
print("optaining polygon test...")
polygontest = graphpolygontest((P,cnt)).show()
#DEFECTS
pallet = [[0,0,0],[255,255,255]]
pallet = np.array(pallet,np.uint8)
imdefects = pallet[lastthresh]
hull = cv2.convexHull(cnt,returnPoints = False)
defects = cv2.convexityDefects(cnt,hull)
distances = defects[:,0,3]
two_max = np.argpartition(distances, -2)[-2:] # get indeces of two maximum values
for i in range(defects.shape[0]):
s,e,f,d = defects[i,0]
start = tuple(cnt[s][0])
end = tuple(cnt[e][0])
far = tuple(cnt[f][0])
cv2.line(imdefects,start,end,[0,255,0],2)
cv2.circle(imdefects,far,5,[0,0,255],-1)
#SEPARATING LINE
points = defects[:,0,2]
x1,y1 = tuple(cnt[points[two_max[0]]][0])
x2,y2 = tuple(cnt[points[two_max[1]]][0])
m = old_div((y2-y1),float(x2-x1))
b = int(y1-x1*m)
# find interception with xf and yf axis
if b>imdefects.shape[0]: # if start outside yf
start = int(old_div((imdefects.shape[0]-b),m)),imdefects.shape[0] # (yf-b)/m, yf
else: # if start inside yf
start = 0,b # 0,y
y = int(m*imdefects.shape[1]+b) # m*xf+b
if y<0: # if end outside yf
end = int(old_div(-b,m)),0# x,0
else: # if end inside yf
end = imdefects.shape[1],y # xf, y
cv2.line(imdefects,start,end,[0,0,100],2)
plotc = Plotim(name + " defects", imdefects)
plotc.show()
#ROI
ROI = np.zeros(P.shape,dtype=np.uint8)
cv2.drawContours(ROI,[cnt],0,1,-1)
plotc = Plotim(name + " ROI", ROI)
plotc.show()
M = cv2.moments(cnt) # find moments
#M2 = invmoments(ROI,Area=None,center=None)
#cx = int(M['m10']/M['m00'])
#cy = int(M['m01']/M['m00'])
#x,y = centroid(ROI,maxarea)
#normalizedinvariantmoment(ROI,maxarea,0,0,x,y)
#n00 = bwmoment(ROI,0,0,cx,cy)
#print "(cx,cy)",(cx,cy)
#print "x,y",x,y
#cv2.circle(fore, (cx,cy), 10, (0, 0, 255), -1, 8)
#cv2.circle(fore, (int(x),int(y)), 10, (0, 255, 255), -1, 8)
cv2.drawContours(fore,[cnt],0,(0, 0, 255),2)
ellipse = cv2.fitEllipse(cnt)
cv2.ellipse(fore,ellipse,(0,255,0),2)
plotc = Plotim(name + " description", fore)
plotc.show()
mask = np.ones(P.shape,dtype=np.uint8)
cv2.ellipse(mask,ellipse,0,-1)
fore2[mask>0]=0
plotc = Plotim(name + " result", fore2)
plotc.show()
cv2.imwrite("mask_"+name+".png",fore2)
"""
# Saving the objects:
import pickle
data = {"thresh":thresh,"lastthresh":lastthresh,"cnt":cnt,"ellipse":ellipse,"polygontest":polygontest}
with open("masks_"+name+'.pickle', 'w') as f:
pickle.dump(data, f)""" | 0 | 0 | 0 |
8eb87ebacb5175749b25e51af42bb1da2f1af53b | 4,924 | py | Python | PyNewsletter.py | Diogo-Paulico/PyNewsletter | a8e4782e96a21fb436d020643083edb3008d0268 | [
"MIT"
] | 3 | 2021-02-16T11:40:23.000Z | 2021-02-27T20:03:42.000Z | PyNewsletter.py | Diogo-Paulico/PyNewsletter | a8e4782e96a21fb436d020643083edb3008d0268 | [
"MIT"
] | 1 | 2021-02-27T20:43:06.000Z | 2021-02-27T20:43:06.000Z | PyNewsletter.py | Diogo-Paulico/PyNewsletter | a8e4782e96a21fb436d020643083edb3008d0268 | [
"MIT"
] | null | null | null | import email, smtplib, ssl, sys, imaplib, email.header
import pandas as pd
from config import *
from email import encoders
from email.mime.base import MIMEBase
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
# Loads the csv file given by the CSV_FILE variable
# @return -> list of email addresses contained in the column Email
# Loads the email body from the TEXT_BODY_FILE
body = open(TEXT_BODY_FILE,'r').read()
# Removes the given email from the loaded CSV file containing the emails
# @return -> True is the emailing list changed, False otherwise
# will use the IMAP protocol to check the EMAIL_FOLDER for unseen messages with the subject given by CANCEL_SUBJECT_KEYWORD
# @return -> the list of emails to be removed
# will handle all the unsubcribing, getting the emails to unsubscribe using the getUnsubscribers() function
# and remove them using the removeSubscriber(email_to_remove) function
# @return -> list of email adresses removed from the mailing list
# builds the message object for the emails given the email adress of the receiver (email_receiver)
# @return -> message object "stringified"
# builds the message object for the unsubscribed emails given the email adress of the receiver (email_receiver) and the path to the file containing the
# message to be sent to unsubscribers (UNSUB_BODY_FILE)
# @return -> message object "stringified"
# Will import the email adresses, handle unsubscribing emails using the previously defined functions,
# then it will send the unsub message to emails that asked to be removed if SEND_UNSUB_MESSAGE is set to True
# and the newsletter to the emails present in the emailing list
pyNewsletter() | 35.941606 | 152 | 0.667141 | import email, smtplib, ssl, sys, imaplib, email.header
import pandas as pd
from config import *
from email import encoders
from email.mime.base import MIMEBase
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
# Loads the csv file given by the CSV_FILE variable
# @return -> list of email addresses contained in the column Email
def buildContacts():
global df
df = pd.read_csv(CSV_FILE)
contacts = df.Email
return contacts
# Loads the email body from the TEXT_BODY_FILE
body = open(TEXT_BODY_FILE,'r').read()
# Removes the given email from the loaded CSV file containing the emails
# @return -> True is the emailing list changed, False otherwise
def removeSubscriber(email_to_remove):
global df
count = df.Email.count()
df = df[df.Email != email_to_remove]
df.Email.to_csv(CSV_FILE, index = False)
return (df.Email.count() != count)
# will use the IMAP protocol to check the EMAIL_FOLDER for unseen messages with the subject given by CANCEL_SUBJECT_KEYWORD
# @return -> the list of emails to be removed
def getUnsubscribers():
emailsToRemove=[]
M = imaplib.IMAP4_SSL(IMAP_ADRESS)
try:
rv, data = M.login(SENDER_EMAIL, PASSWORD)
except imaplib.IMAP4.error:
print ("LOGIN FAILED!!! ")
sys.exit(1)
rv, data = M.select(EMAIL_FOLDER)
if rv != 'OK':
M.logout()
return
rv, data = M.search(None, '(UNSEEN SUBJECT "{0}")'.format(CANCEL_SUBJECT_KEYWORD))
if rv != 'OK' or str(data[0]) == "b''":
return
for num in data[0].split():
rv, data = M.fetch(num, '(RFC822)')
if rv != 'OK':
print("ERROR getting message", num)
return
msg = email.message_from_bytes(data[0][1])
hdr = email.header.make_header(email.header.decode_header(msg['Subject']))
subject = str(hdr)
emailFrom = str(email.utils.parseaddr(msg['From'])[1])
emailsToRemove.append(emailFrom.strip())
M.close()
M.logout()
return emailsToRemove
# will handle all the unsubcribing, getting the emails to unsubscribe using the getUnsubscribers() function
# and remove them using the removeSubscriber(email_to_remove) function
# @return -> list of email adresses removed from the mailing list
def unsubHandler():
count = 0
toUnsub = getUnsubscribers()
unSubd = []
emailingListChanged = False
if toUnsub:
for email in toUnsub:
emailingListChanged = removeSubscriber(email)
if emailingListChanged:
count += 1
unSubd.append(email)
print('Removed %d subscribers' % (count))
else:
print('No subscribers removed')
return unSubd
# builds the message object for the emails given the email adress of the receiver (email_receiver)
# @return -> message object "stringified"
def messageBuilder(email_receiver):
message = MIMEMultipart()
message["From"] = SENDER_NAME
message["To"] = email_receiver
message["Subject"] = SUBJECT
message.attach(MIMEText(body, "plain"))
return message.as_string()
# builds the message object for the unsubscribed emails given the email adress of the receiver (email_receiver) and the path to the file containing the
# message to be sent to unsubscribers (UNSUB_BODY_FILE)
# @return -> message object "stringified"
def unsubMessageBuilder(email_receiver):
unsubBody = open(UNSUB_BODY_FILE,'r').read()
message = MIMEMultipart()
message["From"] = SENDER_NAME
message["To"] = email_receiver
message["Subject"] = UNSUB_MESSAGE_SUBJECT
message.attach(MIMEText(unsubBody, "plain"))
return message.as_string()
# Will import the email adresses, handle unsubscribing emails using the previously defined functions,
# then it will send the unsub message to emails that asked to be removed if SEND_UNSUB_MESSAGE is set to True
# and the newsletter to the emails present in the emailing list
def pyNewsletter():
contacts = buildContacts()
unsubEmails = unsubHandler()
contacts = buildContacts()
counter = 0
context = ssl.create_default_context()
with smtplib.SMTP_SSL(SMTP_ADRESS, SSL_PORT, context=context) as server:
try:
server.login(SENDER_EMAIL, PASSWORD)
if unsubEmails and SEND_UNSUB_MESSAGE:
for unsubEmail in unsubEmails:
server.sendmail(SENDER_EMAIL, unsubEmail, unsubMessageBuilder(unsubEmail))
for receiver_email in contacts:
server.sendmail(SENDER_EMAIL, receiver_email, messageBuilder(receiver_email))
counter+=1
except:
print("SMTP server connection error")
print('Sent ' + str(counter) + ' out of '+ str(contacts.size) + ' emails.')
pyNewsletter() | 3,073 | 0 | 154 |
4aa1225b275666806c08c3f04e1756fc027da524 | 808 | py | Python | aoc15-2.py | UnrelatedString/advent-of-code-2021 | f802240ce01dc9e31f152915456aff2644b281e2 | [
"MIT"
] | null | null | null | aoc15-2.py | UnrelatedString/advent-of-code-2021 | f802240ce01dc9e31f152915456aff2644b281e2 | [
"MIT"
] | 1 | 2021-12-10T07:24:13.000Z | 2021-12-11T05:10:50.000Z | aoc15-2.py | UnrelatedString/advent-of-code-2021 | f802240ce01dc9e31f152915456aff2644b281e2 | [
"MIT"
] | null | null | null | import sys
from stuff import *
import heapq as hq
ls = lines()#chunks()#nums()
cs = {}
d = {}
for y,l in enumerate(ls):
for x,n in enumerate(l):
n = eval(n)
for Y in range(5):
for X in range(5):
c=complex(x+len(l)*X,y+len(ls)*Y)
cs[c] = (n-1+X+Y)%9+1
d[c] = float('inf')
#u.add(complex(x,y))
g = c #thank fuck for python's scoping being shit
d[0] = 0
u = {*cs.keys()}
h = []
hq.heappush(h,(0,(0,0)))
while g in u:
_,c = hq.heappop(h)#min(u,key=d.__getitem__)
c = complex(*c)
u.remove(c)
for o in von_neumann:
if c+o not in cs: continue
if d[c+o] == float('inf'):
hq.heappush(h,(d[c]+cs[c+o],c_t(c+o))) #uh?
d[c+o] = min(d[c+o], d[c]+cs[c+o])
print(d[g])
| 21.837838 | 55 | 0.483911 | import sys
from stuff import *
import heapq as hq
ls = lines()#chunks()#nums()
cs = {}
d = {}
for y,l in enumerate(ls):
for x,n in enumerate(l):
n = eval(n)
for Y in range(5):
for X in range(5):
c=complex(x+len(l)*X,y+len(ls)*Y)
cs[c] = (n-1+X+Y)%9+1
d[c] = float('inf')
#u.add(complex(x,y))
g = c #thank fuck for python's scoping being shit
d[0] = 0
u = {*cs.keys()}
h = []
hq.heappush(h,(0,(0,0)))
while g in u:
_,c = hq.heappop(h)#min(u,key=d.__getitem__)
c = complex(*c)
u.remove(c)
for o in von_neumann:
if c+o not in cs: continue
if d[c+o] == float('inf'):
hq.heappush(h,(d[c]+cs[c+o],c_t(c+o))) #uh?
d[c+o] = min(d[c+o], d[c]+cs[c+o])
print(d[g])
| 0 | 0 | 0 |
b9a14b5da4025ad529f64c2d622dcacffe4710d0 | 14,219 | py | Python | ee/tasks/test/test_calculate_cohort.py | msnitish/posthog | cb86113f568e72eedcb64b5fd00c313d21e72f90 | [
"MIT"
] | null | null | null | ee/tasks/test/test_calculate_cohort.py | msnitish/posthog | cb86113f568e72eedcb64b5fd00c313d21e72f90 | [
"MIT"
] | null | null | null | ee/tasks/test/test_calculate_cohort.py | msnitish/posthog | cb86113f568e72eedcb64b5fd00c313d21e72f90 | [
"MIT"
] | null | null | null | import json
import urllib.parse
from unittest.mock import patch
from uuid import uuid4
from freezegun import freeze_time
from ee.clickhouse.models.event import create_event
from ee.clickhouse.util import ClickhouseTestMixin
from posthog.client import sync_execute
from posthog.models.cohort import Cohort
from posthog.models.person import Person
from posthog.tasks.calculate_cohort import insert_cohort_from_insight_filter
from posthog.tasks.test.test_calculate_cohort import calculate_cohort_test_factory
| 39.279006 | 373 | 0.486673 | import json
import urllib.parse
from unittest.mock import patch
from uuid import uuid4
from freezegun import freeze_time
from ee.clickhouse.models.event import create_event
from ee.clickhouse.util import ClickhouseTestMixin
from posthog.client import sync_execute
from posthog.models.cohort import Cohort
from posthog.models.person import Person
from posthog.tasks.calculate_cohort import insert_cohort_from_insight_filter
from posthog.tasks.test.test_calculate_cohort import calculate_cohort_test_factory
def _create_event(**kwargs):
kwargs.update({"event_uuid": uuid4()})
create_event(**kwargs)
def _create_person(**kwargs):
person = Person.objects.create(**kwargs)
return Person(id=person.uuid)
class TestClickhouseCalculateCohort(ClickhouseTestMixin, calculate_cohort_test_factory(_create_event, _create_person)): # type: ignore
@patch("posthog.tasks.calculate_cohort.insert_cohort_from_insight_filter.delay")
def test_create_stickiness_cohort(self, _insert_cohort_from_insight_filter):
_create_person(team_id=self.team.pk, distinct_ids=["blabla"])
_create_event(
team=self.team,
event="$pageview",
distinct_id="blabla",
properties={"$math_prop": 1},
timestamp="2021-01-01T12:00:00Z",
)
response = self.client.post(
f"/api/projects/{self.team.id}/cohorts/?insight=STICKINESS&properties=%5B%5D&interval=day&display=ActionsLineGraph&events=%5B%7B%22id%22%3A%22%24pageview%22%2C%22name%22%3A%22%24pageview%22%2C%22type%22%3A%22events%22%2C%22order%22%3A0%7D%5D&shown_as=Stickiness&date_from=2021-01-01&entity_id=%24pageview&entity_type=events&stickiness_days=1&label=%24pageview",
{"name": "test", "is_static": True},
).json()
cohort_id = response["id"]
_insert_cohort_from_insight_filter.assert_called_once_with(
cohort_id,
{
"insight": "STICKINESS",
"properties": "[]",
"interval": "day",
"display": "ActionsLineGraph",
"events": '[{"id":"$pageview","name":"$pageview","type":"events","order":0}]',
"shown_as": "Stickiness",
"date_from": "2021-01-01",
"entity_id": "$pageview",
"entity_type": "events",
"stickiness_days": "1",
"label": "$pageview",
},
)
insert_cohort_from_insight_filter(
cohort_id,
{
"date_from": "2021-01-01",
"events": [
{
"id": "$pageview",
"type": "events",
"order": 0,
"name": "$pageview",
"custom_name": None,
"math": None,
"math_property": None,
"math_group_type_index": None,
"properties": [],
}
],
"insight": "STICKINESS",
"interval": "day",
"selected_interval": 1,
"shown_as": "Stickiness",
"entity_id": "$pageview",
"entity_type": "events",
"entity_math": None,
},
)
cohort = Cohort.objects.get(pk=cohort_id)
people = Person.objects.filter(cohort__id=cohort.pk)
self.assertEqual(people.count(), 1)
@patch("posthog.tasks.calculate_cohort.insert_cohort_from_insight_filter.delay")
def test_create_trends_cohort(self, _insert_cohort_from_insight_filter):
_create_person(team_id=self.team.pk, distinct_ids=["blabla"])
with freeze_time("2021-01-01 00:06:34"):
_create_event(
team=self.team,
event="$pageview",
distinct_id="blabla",
properties={"$math_prop": 1},
timestamp="2021-01-01T12:00:00Z",
)
with freeze_time("2021-01-02 00:06:34"):
_create_event(
team=self.team,
event="$pageview",
distinct_id="blabla",
properties={"$math_prop": 4},
timestamp="2021-01-01T12:00:00Z",
)
response = self.client.post(
f"/api/projects/{self.team.id}/cohorts/?interval=day&display=ActionsLineGraph&events=%5B%7B%22id%22%3A%22%24pageview%22%2C%22name%22%3A%22%24pageview%22%2C%22type%22%3A%22events%22%2C%22order%22%3A0%7D%5D&properties=%5B%5D&entity_id=%24pageview&entity_type=events&date_from=2021-01-01&date_to=2021-01-01&label=%24pageview",
{"name": "test", "is_static": True},
).json()
cohort_id = response["id"]
_insert_cohort_from_insight_filter.assert_called_once_with(
cohort_id,
{
"interval": "day",
"display": "ActionsLineGraph",
"events": '[{"id":"$pageview","name":"$pageview","type":"events","order":0}]',
"properties": "[]",
"entity_id": "$pageview",
"entity_type": "events",
"date_from": "2021-01-01",
"date_to": "2021-01-01",
"label": "$pageview",
},
)
insert_cohort_from_insight_filter(
cohort_id,
{
"date_from": "2021-01-01",
"date_to": "2021-01-01",
"display": "ActionsLineGraph",
"events": [
{
"id": "$pageview",
"type": "events",
"order": 0,
"name": "$pageview",
"math": None,
"math_property": None,
"math_group_type_index": None,
"properties": [],
}
],
"entity_id": "$pageview",
"entity_type": "events",
"insight": "TRENDS",
"interval": "day",
},
)
cohort = Cohort.objects.get(pk=cohort_id)
people = Person.objects.filter(cohort__id=cohort.pk)
self.assertEqual(cohort.errors_calculating, 0)
self.assertEqual(
people.count(),
1,
{
"a": sync_execute(
"select person_id from person_static_cohort where team_id = {} and cohort_id = {} ".format(
self.team.id, cohort.pk
)
),
"b": sync_execute(
"select person_id from person_static_cohort FINAL where team_id = {} and cohort_id = {} ".format(
self.team.id, cohort.pk
)
),
},
)
@patch("posthog.tasks.calculate_cohort.insert_cohort_from_insight_filter.delay")
def test_create_trends_cohort_arg_test(self, _insert_cohort_from_insight_filter):
# prior to 8124, subtitute parameters was called on insight cohorting which caused '%' in LIKE arguments to be interepreted as a missing parameter
_create_person(team_id=self.team.pk, distinct_ids=["blabla"])
with freeze_time("2021-01-01 00:06:34"):
_create_event(
team=self.team,
event="$pageview",
distinct_id="blabla",
properties={"$domain": "https://app.posthog.com/123"},
timestamp="2021-01-01T12:00:00Z",
)
with freeze_time("2021-01-02 00:06:34"):
_create_event(
team=self.team,
event="$pageview",
distinct_id="blabla",
properties={"$domain": "https://app.posthog.com/123"},
timestamp="2021-01-01T12:00:00Z",
)
params = {
"date_from": "2021-01-01",
"date_to": "2021-01-01",
"display": "ActionsLineGraph",
"events": json.dumps([{"id": "$pageview", "name": "$pageview", "type": "events", "order": 0}]),
"entity_id": "$pageview",
"entity_type": "events",
"insight": "TRENDS",
"interval": "day",
"properties": json.dumps(
[{"key": "$domain", "value": "app.posthog.com", "operator": "icontains", "type": "event"}]
),
}
response = self.client.post(
f"/api/projects/{self.team.id}/cohorts/?{urllib.parse.urlencode(params)}",
{"name": "test", "is_static": True},
).json()
cohort_id = response["id"]
_insert_cohort_from_insight_filter.assert_called_once_with(
cohort_id,
{
"date_from": "2021-01-01",
"date_to": "2021-01-01",
"display": "ActionsLineGraph",
"events": '[{"id": "$pageview", "name": "$pageview", "type": "events", "order": 0}]',
"entity_id": "$pageview",
"entity_type": "events",
"insight": "TRENDS",
"interval": "day",
"properties": '[{"key": "$domain", "value": "app.posthog.com", "operator": "icontains", "type": "event"}]',
},
)
insert_cohort_from_insight_filter(
cohort_id,
{
"date_from": "2021-01-01",
"date_to": "2021-01-01",
"display": "ActionsLineGraph",
"events": [
{
"id": "$pageview",
"type": "events",
"order": 0,
"name": "$pageview",
"math": None,
"math_property": None,
"math_group_type_index": None,
"properties": [],
}
],
"properties": [
{"key": "$domain", "value": "app.posthog.com", "operator": "icontains", "type": "event"}
],
"entity_id": "$pageview",
"entity_type": "events",
"insight": "TRENDS",
"interval": "day",
},
)
cohort = Cohort.objects.get(pk=cohort_id)
people = Person.objects.filter(cohort__id=cohort.pk)
self.assertEqual(cohort.errors_calculating, 0)
self.assertEqual(
people.count(),
1,
{
"a": sync_execute(
"select person_id from person_static_cohort where team_id = {} and cohort_id = {} ".format(
self.team.id, cohort.pk
)
),
"b": sync_execute(
"select person_id from person_static_cohort FINAL where team_id = {} and cohort_id = {} ".format(
self.team.id, cohort.pk
)
),
},
)
@patch("posthog.tasks.calculate_cohort.insert_cohort_from_insight_filter.delay")
def test_create_funnels_cohort(self, _insert_cohort_from_insight_filter):
_create_person(team_id=self.team.pk, distinct_ids=["blabla"])
with freeze_time("2021-01-01 00:06:34"):
_create_event(
team=self.team,
event="$pageview",
distinct_id="blabla",
properties={"$math_prop": 1},
timestamp="2021-01-01T12:00:00Z",
)
with freeze_time("2021-01-02 00:06:34"):
_create_event(
team=self.team,
event="$another_view",
distinct_id="blabla",
properties={"$math_prop": 4},
timestamp="2021-01-02T12:00:00Z",
)
params = {
"insight": "FUNNELS",
"events": json.dumps(
[
{
"id": "$pageview",
"math": None,
"name": "$pageview",
"type": "events",
"order": 0,
"properties": [],
"math_property": None,
},
{
"id": "$another_view",
"math": None,
"name": "$another_view",
"type": "events",
"order": 1,
"properties": [],
"math_property": None,
},
]
),
"display": "FunnelViz",
"interval": "day",
"layout": "horizontal",
"date_from": "2021-01-01",
"date_to": "2021-01-07",
"funnel_step": 1,
}
response = self.client.post(
f"/api/projects/{self.team.id}/cohorts/?{urllib.parse.urlencode(params)}",
{"name": "test", "is_static": True},
).json()
cohort_id = response["id"]
_insert_cohort_from_insight_filter.assert_called_once_with(
cohort_id,
{
"insight": "FUNNELS",
"events": '[{"id": "$pageview", "math": null, "name": "$pageview", "type": "events", "order": 0, "properties": [], "math_property": null}, {"id": "$another_view", "math": null, "name": "$another_view", "type": "events", "order": 1, "properties": [], "math_property": null}]',
"display": "FunnelViz",
"interval": "day",
"layout": "horizontal",
"date_from": "2021-01-01",
"date_to": "2021-01-07",
"funnel_step": "1",
},
)
insert_cohort_from_insight_filter(
cohort_id, params,
)
cohort = Cohort.objects.get(pk=cohort_id)
people = Person.objects.filter(cohort__id=cohort.pk)
self.assertEqual(cohort.errors_calculating, 0)
self.assertEqual(people.count(), 1)
| 13,078 | 561 | 69 |
ca605f86181c04b6e4c6071ff1fa0fae67e38c02 | 1,994 | py | Python | core/urls.py | jianxinwang/GenEsysV | 444b4685b603c3da5ac9d9265817cac98c33c97e | [
"BSD-3-Clause"
] | 14 | 2019-02-27T13:57:36.000Z | 2021-10-01T14:29:38.000Z | core/urls.py | jianxinwang/GenEsysV | 444b4685b603c3da5ac9d9265817cac98c33c97e | [
"BSD-3-Clause"
] | 11 | 2019-06-13T06:38:43.000Z | 2021-06-15T13:51:39.000Z | core/urls.py | jianxinwang/GenEsysV | 444b4685b603c3da5ac9d9265817cac98c33c97e | [
"BSD-3-Clause"
] | 5 | 2019-02-27T13:57:40.000Z | 2020-02-09T13:15:06.000Z | from django.urls import path
import core.views as core_views
urlpatterns = (
path('dataset-snippet/<int:study_id>',
core_views.DatasetSnippetView.as_view(), name='dataset-snippet'),
path('analysis-type-snippet/<int:dataset_id>',
core_views.AnalysisTypeSnippetView.as_view(), name='analysis-type-snippet'),
path('filter-snippet/<int:dataset_id>',
core_views.FilterSnippetView.as_view(), name='filter-snippet'),
path('attribute-snippet/<int:dataset_id>',
core_views.AttributeSnippetView.as_view(), name='attribute-snippet'),
path('search-router/', core_views.SearchRouterView.as_view(), name='search-router'),
path('download-router/<int:search_log_id>', core_views.DownloadRouterView.as_view(), name='download-router'),
path('additional-form-router/<int:dataset_id>/<int:analysis_type_id>', core_views.AdditionalFormRouterView.as_view(), name='additional-form-router'),
path('base-search/', core_views.BaseSearchView.as_view(), name='base-search'),
path('base-download/<int:search_log_id>', core_views.BaseDownloadView.as_view(), name='base-download'),
path('save-search/', core_views.save_search, name='save-search'),
path('saved-search-list/', core_views.SavedSearchListView.as_view(), name='saved-search-list'),
path('retrieve-saved-search/<int:saved_search_id>', core_views.RetrieveSavedSearchView.as_view(), name='retrieve-saved-search'),
path('core-document-view/<int:dataset_id>/<document_es_id>/', core_views.BaseDocumentView.as_view(), name='core-document-view'),
path('core-document-review-create/<int:dataset_id>/<document_es_id>/', core_views.DocumentReviewCreateView.as_view(), name='core-document-review-create'),
path('core-document-review-update/<int:dataset_id>/<int:document_review_id>/', core_views.DocumentReviewUpdateView.as_view(), name='core-document-review-update'),
path('core-document-list/', core_views.DocumentReviewListView.as_view(), name='core-document-list'),
)
| 73.851852 | 166 | 0.746239 | from django.urls import path
import core.views as core_views
urlpatterns = (
path('dataset-snippet/<int:study_id>',
core_views.DatasetSnippetView.as_view(), name='dataset-snippet'),
path('analysis-type-snippet/<int:dataset_id>',
core_views.AnalysisTypeSnippetView.as_view(), name='analysis-type-snippet'),
path('filter-snippet/<int:dataset_id>',
core_views.FilterSnippetView.as_view(), name='filter-snippet'),
path('attribute-snippet/<int:dataset_id>',
core_views.AttributeSnippetView.as_view(), name='attribute-snippet'),
path('search-router/', core_views.SearchRouterView.as_view(), name='search-router'),
path('download-router/<int:search_log_id>', core_views.DownloadRouterView.as_view(), name='download-router'),
path('additional-form-router/<int:dataset_id>/<int:analysis_type_id>', core_views.AdditionalFormRouterView.as_view(), name='additional-form-router'),
path('base-search/', core_views.BaseSearchView.as_view(), name='base-search'),
path('base-download/<int:search_log_id>', core_views.BaseDownloadView.as_view(), name='base-download'),
path('save-search/', core_views.save_search, name='save-search'),
path('saved-search-list/', core_views.SavedSearchListView.as_view(), name='saved-search-list'),
path('retrieve-saved-search/<int:saved_search_id>', core_views.RetrieveSavedSearchView.as_view(), name='retrieve-saved-search'),
path('core-document-view/<int:dataset_id>/<document_es_id>/', core_views.BaseDocumentView.as_view(), name='core-document-view'),
path('core-document-review-create/<int:dataset_id>/<document_es_id>/', core_views.DocumentReviewCreateView.as_view(), name='core-document-review-create'),
path('core-document-review-update/<int:dataset_id>/<int:document_review_id>/', core_views.DocumentReviewUpdateView.as_view(), name='core-document-review-update'),
path('core-document-list/', core_views.DocumentReviewListView.as_view(), name='core-document-list'),
)
| 0 | 0 | 0 |
fa98caab9a7b0286b7b7c07c740bcf0da51f0601 | 203 | py | Python | days/04-06-collections/D5/sorting.py | angersa/100daysofcode-with-python-course | 4b7a28604214a49faf243c543f67bc8dc3eab349 | [
"MIT"
] | null | null | null | days/04-06-collections/D5/sorting.py | angersa/100daysofcode-with-python-course | 4b7a28604214a49faf243c543f67bc8dc3eab349 | [
"MIT"
] | null | null | null | days/04-06-collections/D5/sorting.py | angersa/100daysofcode-with-python-course | 4b7a28604214a49faf243c543f67bc8dc3eab349 | [
"MIT"
] | null | null | null | disordered = {(10, 5): 'b', (3, 10): 'a', (5, 2): 'c'}
# sort keys, then get values from original - fast
sorted_list = sorted(disordered.items(), key=lambda x: x[0][1], reverse=True)
print(sorted_list) | 33.833333 | 77 | 0.640394 | disordered = {(10, 5): 'b', (3, 10): 'a', (5, 2): 'c'}
# sort keys, then get values from original - fast
sorted_list = sorted(disordered.items(), key=lambda x: x[0][1], reverse=True)
print(sorted_list) | 0 | 0 | 0 |
b5036c9d9c6977e05e22272804dea05480f37a6c | 8,284 | py | Python | ichnaea/api/locate/tests/test_result.py | mikiec84/ichnaea | ec223cefb788bb921c0e7f5f51bd3b20eae29edd | [
"Apache-2.0"
] | 348 | 2015-01-13T11:48:07.000Z | 2022-03-31T08:33:07.000Z | ichnaea/api/locate/tests/test_result.py | mikiec84/ichnaea | ec223cefb788bb921c0e7f5f51bd3b20eae29edd | [
"Apache-2.0"
] | 1,274 | 2015-01-02T18:15:56.000Z | 2022-03-23T15:29:08.000Z | ichnaea/api/locate/tests/test_result.py | mikiec84/ichnaea | ec223cefb788bb921c0e7f5f51bd3b20eae29edd | [
"Apache-2.0"
] | 149 | 2015-01-04T21:15:07.000Z | 2021-12-10T06:05:09.000Z | from ichnaea.api.locate.constants import DataAccuracy, DataSource
from ichnaea.api.locate.query import Query
from ichnaea.api.locate.result import (
Position,
PositionResultList,
Region,
RegionResultList,
Result,
ResultList,
)
from ichnaea.models import encode_mac
from ichnaea.tests.factories import WifiShardFactory
| 33.538462 | 83 | 0.601762 | from ichnaea.api.locate.constants import DataAccuracy, DataSource
from ichnaea.api.locate.query import Query
from ichnaea.api.locate.result import (
Position,
PositionResultList,
Region,
RegionResultList,
Result,
ResultList,
)
from ichnaea.models import encode_mac
from ichnaea.tests.factories import WifiShardFactory
class TestResult(object):
def test_repr(self):
result = Result(region_code="DE", lat=1.0)
rep = repr(result)
assert rep.startswith("Result")
assert "DE" not in rep
assert "1.0" not in rep
def test_data_accuracy(self):
assert Result().data_accuracy is DataAccuracy.none
def test_used_networks(self):
wifi = WifiShardFactory.build()
network = ("wifi", encode_mac(wifi.mac), True)
assert Result().used_networks == []
assert Result(used_networks=[network]).used_networks == [network]
class TestPosition(object):
def test_repr(self):
position = Position(lat=1.0, lon=-1.1, accuracy=100.0, score=2.0)
rep = repr(position)
assert rep.startswith("Position")
assert "1.0" in rep
assert "-1.1" in rep
assert "100.0" in rep
assert position.score == 2.0
def test_json(self):
assert Position().json() == {"position": {"source": "query"}}
assert Position(lat=1.0, lon=1.0, accuracy=2.0).json() == {
"position": {
"latitude": 1.0,
"longitude": 1.0,
"accuracy": 2.0,
"source": "query",
}
}
def test_data_accuracy(self):
def check(accuracy, expected):
pos = Position(lat=1.0, lon=1.0, accuracy=accuracy)
assert pos.data_accuracy is expected
check(None, DataAccuracy.none)
check(0.0, DataAccuracy.high)
check(100, DataAccuracy.high)
check(20000.0, DataAccuracy.medium)
check(10 ** 6, DataAccuracy.low)
class TestRegion(object):
def test_repr(self):
region = Region(
region_code="DE", region_name="Germany", accuracy=100.0, score=2.0
)
rep = repr(region)
assert rep.startswith("Region")
assert "DE" in rep
assert "Germany" in rep
assert region.score == 2.0
def test_data_accuracy(self):
assert Region().data_accuracy is DataAccuracy.none
region = Region(region_code="DE", region_name="Germany", accuracy=100000.0)
assert region.data_accuracy is DataAccuracy.low
region = Region(region_code="VA", region_name="Holy See", accuracy=1000.0)
assert region.data_accuracy is DataAccuracy.medium
class TestResultList(object):
def _make_result(self):
return Position(
lat=1.0, lon=1.0, accuracy=10.0, score=0.5, source=DataSource.internal
)
def test_init(self):
results = ResultList(self._make_result())
assert len(results) == 1
def test_repr(self):
results = ResultList([self._make_result(), self._make_result()])
rep = repr(results)
assert rep.startswith("ResultList:")
assert "Position<" in rep
assert "lat:1.0" in rep
assert "lon:1.0" in rep
assert "accuracy:10.0" in rep
assert "score:0.5" in rep
def test_add(self):
results = ResultList()
results.add(self._make_result())
assert len(results) == 1
def test_add_many(self):
results = ResultList(self._make_result())
results.add((self._make_result(), self._make_result()))
assert len(results) == 3
def test_len(self):
results = ResultList()
results.add(self._make_result())
results.add(self._make_result())
assert len(results) == 2
def test_getitem(self):
result = self._make_result()
results = ResultList()
results.add(result)
assert results[0] == result
def test_iterable(self):
result = self._make_result()
results = ResultList()
results.add(result)
results.add(result)
for res in results:
assert res == result
class TestPositionResultList(object):
def test_repr(self):
assert repr(PositionResultList()).startswith("PositionResultList:")
def test_best_empty(self):
assert PositionResultList().best() is None
def test_best(self):
gb1 = Position(lat=51.5, lon=-0.1, accuracy=100000.0, score=0.6)
gb2 = Position(lat=51.5002, lon=-0.1, accuracy=10000.0, score=1.5)
gb3 = Position(lat=51.7, lon=-0.1, accuracy=1000.0, score=5.0)
bt1 = Position(lat=27.5002, lon=90.5, accuracy=1000.0, score=0.5)
bt2 = Position(lat=27.5, lon=90.5, accuracy=2000.0, score=2.0)
bt3 = Position(lat=27.7, lon=90.7, accuracy=500.0, score=5.0)
bt4 = Position(lat=27.9, lon=90.7, accuracy=300.0, score=5.0)
# single result works
assert PositionResultList([gb1]).best().lat == 51.5
# the lowest accuracy result from the best cluster wins
assert PositionResultList([bt1, bt2]).best().lat == 27.5002
assert PositionResultList([gb1, bt2]).best().lat == 27.5
assert PositionResultList([gb1, gb2, bt2]).best().lat == 51.5002
assert PositionResultList([gb1, gb3, bt1, bt2]).best().lat == 51.7
assert PositionResultList([gb1, gb2, bt2, bt3]).best().lat == 27.7
# break tie by accuracy
assert PositionResultList([gb3, bt3]).best().lat == 27.7
assert PositionResultList([bt3, bt4]).best().lat == 27.9
def test_satisfies(self):
wifis = WifiShardFactory.build_batch(2)
wifi_query = [{"macAddress": wifi.mac} for wifi in wifis]
positions = PositionResultList(
[
Position(lat=1.0, lon=1.0, accuracy=100.0, score=0.5),
Position(lat=1.0, lon=1.0, accuracy=10000.0, score=0.6),
]
)
query = Query(api_type="locate", wifi=wifi_query)
assert positions.satisfies(query)
def test_satisfies_empty(self):
wifis = WifiShardFactory.build_batch(2)
wifi_query = [{"macAddress": wifi.mac} for wifi in wifis]
positions = PositionResultList()
query = Query(api_type="locate", wifi=wifi_query)
assert not positions.satisfies(query)
def test_satisfies_fail(self):
wifis = WifiShardFactory.build_batch(2)
wifi_query = [{"macAddress": wifi.mac} for wifi in wifis]
positions = PositionResultList(
Position(lat=1.0, lon=1.0, accuracy=2500.0, score=2.0)
)
query = Query(api_type="locate", wifi=wifi_query)
assert not positions.satisfies(query)
class TestRegionResultList(object):
def test_repr(self):
assert repr(RegionResultList()).startswith("RegionResultList:")
def test_best_empty(self):
assert RegionResultList().best() is None
def test_best(self):
us1 = Region(
region_code="US",
region_name="us",
accuracy=200000.0,
score=3.0,
source=DataSource.geoip,
)
us2 = Region(
region_code="US",
region_name="us",
accuracy=200000.0,
score=3.0,
source=DataSource.geoip,
)
gb1 = Region(
region_code="GB",
region_name="gb",
accuracy=100000.0,
score=5.0,
source=DataSource.geoip,
)
gb2 = Region(
region_code="GB",
region_name="gb",
accuracy=100000.0,
score=3.0,
source=DataSource.geoip,
)
# highest combined score wins
assert RegionResultList([us1, gb1]).best().region_code == "GB"
assert RegionResultList([us1, gb1, us2]).best().region_code == "US"
# break tie by accuracy
assert RegionResultList([us1, gb2]).best().region_code == "US"
def test_satisfies(self):
regions = RegionResultList(
Region(region_code="DE", region_name="Germany", accuracy=100000.0)
)
assert regions.satisfies(Query())
def test_satisfies_fail(self):
assert not RegionResultList().satisfies(Query())
| 7,023 | 52 | 861 |
d40d1402ab5ddaccc60f54e6b0bffbc87134f18a | 16,587 | py | Python | vcstools/beam_calc.py | cplee1/vcstools_chris_duplicate | e929e01d886e6ce8c113af3b02c2590b441daa03 | [
"AFL-3.0"
] | 5 | 2020-07-01T13:18:53.000Z | 2022-03-04T20:54:14.000Z | vcstools/beam_calc.py | cplee1/vcstools_chris_duplicate | e929e01d886e6ce8c113af3b02c2590b441daa03 | [
"AFL-3.0"
] | 44 | 2019-10-19T00:13:55.000Z | 2022-01-01T04:42:02.000Z | vcstools/beam_calc.py | cplee1/vcstools_chris_duplicate | e929e01d886e6ce8c113af3b02c2590b441daa03 | [
"AFL-3.0"
] | 6 | 2019-10-29T10:51:51.000Z | 2022-03-04T20:54:28.000Z | import os
import sys
import numpy as np
import math
#MWA scripts
from mwa_pb import primary_beam
from vcstools.pointing_utils import sex2deg
from vcstools.metadb_utils import mwa_alt_az_za, get_common_obs_metadata, getmeta
import logging
logger = logging.getLogger(__name__)
def pixel_area(ra_min, ra_max, dec_min, dec_max):
"""
Calculate the area of a pixel on the sky from the pixel borders
Parameters:
-----------
ra_min: float
The Right Acension minimum in degrees
ra_max: float
The Right Acension maximum in degrees
dec_min: float
The Declination minimum in degrees
dec_max: float
The Declination maximum in degrees
Returns:
--------
area: float
Area of the pixel in square degrees
"""
return (180./math.pi) * (ra_max - ra_min) * (math.sin(math.radians(dec_max)) - math.sin(math.radians(dec_min)))
def field_of_view(obsid,
beam_meta_data=None, dur=None):
"""
Will find the field-of-view of the observation (including the drift) in square degrees.
Parameters:
-----------
obsid: int
The observation ID
beam_meta_data: list
OPTIONAL - the list of common metadata from vcstools.metadb_utils.get_common_obs_metadata.
By default will download the metadata for you
dur: int
OPTIONAL - Duration of observation to calculate for in seconds
By default will use the entire observation duration
Returns:
--------
area: float
The field-of-view of the observation in square degrees
"""
if beam_meta_data is None:
beam_meta_data = get_common_obs_metadata(obsid)
if dur is None:
dt = 296
else:
dt = 100
# Change the dur to the inpur dur
obsid, ra, dec, _, delays, centrefreq, channels = beam_meta_data
beam_meta_data = [obsid, ra, dec, dur, delays, centrefreq, channels]
# Make a pixel for each degree on the sky
names_ra_dec = []
for ra in range(0,360):
for dec in range(-90,90):
names_ra_dec.append(["sky_pos", ra+0.5, dec+0.5])
# Get tile beam power for all pixels
sky_powers = get_beam_power_over_time(beam_meta_data, names_ra_dec,
degrees=True, dt=dt)
# Find the maximum power over all time
max_sky_powers = []
for pixel_power in sky_powers:
temp_power = 0.
for time_power in pixel_power:
if time_power[0] > temp_power:
temp_power = time_power[0]
max_sky_powers.append(temp_power)
# Find all pixels greater than the half power point and sum their area
half_power_point = max(max_sky_powers) / 2
i = 0
area_sum = 0
for ra in range(0,360):
for dec in range(-90,90):
if max_sky_powers[i] > half_power_point:
area_sum = area_sum + pixel_area(ra, ra+1, dec, dec+1)
i = i + 1
return area_sum
def beam_enter_exit(powers, duration, dt=296, min_power=0.3):
"""
Calculates when the source enters and exits the beam
beam_enter_exit(min_power, powers, imax, dt):
powers: list of powers fo the duration every dt and freq powers[times][freqs]
dt: the time interval of how often powers are calculated
duration: duration of the observation according to the metadata in seconds
min_power: zenith normalised power cut off
"""
from scipy.interpolate import UnivariateSpline
time_steps = np.array(range(0, duration, dt), dtype=float)
#For each time step record the min power so even if the source is in
#one freq channel it's recorded
powers_freq_min = []
for p in powers:
powers_freq_min.append(float(min(p) - min_power))
if min(powers_freq_min) > 0.:
enter_beam = 0.
exit_beam = 1.
else:
powers_freq_min = np.array(powers_freq_min)
logger.debug("time_steps: {}".format(time_steps))
logger.debug("powers: {}".format(powers_freq_min))
try:
spline = UnivariateSpline(time_steps, powers_freq_min , s=0.)
except:
return None, None
if len(spline.roots()) == 2:
enter_beam, exit_beam = spline.roots()
enter_beam /= duration
exit_beam /= duration
elif len(spline.roots()) == 1:
if powers_freq_min[0] > powers_freq_min[-1]:
#power declines so starts in beem then exits
enter_beam = 0.
exit_beam = spline.roots()[0]/duration
else:
enter_beam = spline.roots()[0]/duration
exit_beam = 1.
else:
enter_beam = 0.
exit_beam = 1.
return enter_beam, exit_beam
def get_beam_power_over_time(beam_meta_data, names_ra_dec,
dt=296, centeronly=True, verbose=False,
option='analytic', degrees=False,
start_time=0):
"""
Calulates the power (gain at coordinate/gain at zenith) for each source over time.
get_beam_power_over_time(beam_meta_data, names_ra_dec,
dt=296, centeronly=True, verbose=False,
option = 'analytic')
Args:
beam_meta_data: [obsid,ra, dec, time, delays,centrefreq, channels]
obsid metadata obtained from meta.get_common_obs_metadata
names_ra_dec: and array in the format [[source_name, RAJ, DecJ]]
dt: time step in seconds for power calculations (default 296)
centeronly: only calculates for the centre frequency (default True)
verbose: prints extra data to (default False)
option: primary beam model [analytic, advanced, full_EE]
start_time: the time in seconds from the begining of the observation to
start calculating at
"""
obsid, _, _, time, delays, centrefreq, channels = beam_meta_data
names_ra_dec = np.array(names_ra_dec)
logger.info("Calculating beam power for OBS ID: {0}".format(obsid))
starttimes=np.arange(start_time,time+start_time,dt)
stoptimes=starttimes+dt
stoptimes[stoptimes>time]=time
Ntimes=len(starttimes)
midtimes=float(obsid)+0.5*(starttimes+stoptimes)
if not centeronly:
PowersX=np.zeros((len(names_ra_dec),
Ntimes,
len(channels)))
PowersY=np.zeros((len(names_ra_dec),
Ntimes,
len(channels)))
# in Hz
frequencies=np.array(channels)*1.28e6
else:
PowersX=np.zeros((len(names_ra_dec),
Ntimes,1))
PowersY=np.zeros((len(names_ra_dec),
Ntimes,1))
if centrefreq > 1e6:
logger.warning("centrefreq is greater than 1e6, assuming input with units of Hz.")
frequencies=np.array([centrefreq])
else:
frequencies=np.array([centrefreq])*1e6
if degrees:
RAs = np.array(names_ra_dec[:,1],dtype=float)
Decs = np.array(names_ra_dec[:,2],dtype=float)
else:
RAs, Decs = sex2deg(names_ra_dec[:,1],names_ra_dec[:,2])
if len(RAs)==0:
sys.stderr.write('Must supply >=1 source positions\n')
return None
if not len(RAs)==len(Decs):
sys.stderr.write('Must supply equal numbers of RAs and Decs\n')
return None
if verbose is False:
#Supress print statements of the primary beam model functions
sys.stdout = open(os.devnull, 'w')
for itime in range(Ntimes):
# this differ's from the previous ephem_utils method by 0.1 degrees
_, Azs, Zas = mwa_alt_az_za(midtimes[itime], ra=RAs, dec=Decs, degrees=True)
# go from altitude to zenith angle
theta = np.radians(Zas)
phi = np.radians(Azs)
for ifreq in range(len(frequencies)):
#Decide on beam model
if option == 'analytic':
rX,rY=primary_beam.MWA_Tile_analytic(theta, phi,
freq=frequencies[ifreq], delays=delays,
zenithnorm=True,
power=True)
elif option == 'advanced':
rX,rY=primary_beam.MWA_Tile_advanced(theta, phi,
freq=frequencies[ifreq], delays=delays,
zenithnorm=True,
power=True)
elif option == 'full_EE':
rX,rY=primary_beam.MWA_Tile_full_EE(theta, phi,
freq=frequencies[ifreq], delays=delays,
zenithnorm=True,
power=True)
PowersX[:,itime,ifreq]=rX
PowersY[:,itime,ifreq]=rY
if verbose is False:
sys.stdout = sys.__stdout__
Powers=0.5*(PowersX+PowersY)
return Powers
def find_sources_in_obs(obsid_list, names_ra_dec,
obs_for_source=False, dt_input=100, beam='analytic',
min_power=0.3, cal_check=False, all_volt=False,
degrees_check=False, metadata_list=None):
"""
Either creates text files for each MWA obs ID of each source within it or a text
file for each source with each MWA obs is that the source is in.
Args:
obsid_list: list of MWA obs IDs
names_ra_dec: [[source_name, ra, dec]]
dt: the time step in seconds to do power calculations
beam: beam simulation type ['analytic', 'advanced', 'full_EE']
min_power: if above the minium power assumes it's in the beam
cal_check: checks the MWA pulsar database if there is a calibration for the obsid
all_volt: Use all voltages observations including some inital test data
with incorrect formats
degrees_check: if false ra and dec is in hms, if true in degrees
Output [output_data, obsid_meta]:
output_data: The format of output_data is dependant on obs_for_source.
If obs_for_source is True:
output_data = {jname:[[obsid, duration, enter, exit, max_power],
[obsid, duration, enter, exit, max_power]]}
If obs_for_source is False:
ouput_data = {obsid:[[jname, enter, exit, max_power],
[jname, enter, exit, max_power]]}
obsid_meta: a list of the output of get_common_obs_metadata for each obsid
"""
#prepares metadata calls and calculates power
powers = []
#powers[obsid][source][time][freq]
obsid_meta = []
obsid_to_remove = []
for i, obsid in enumerate(obsid_list):
if metadata_list:
beam_meta_data, _ = metadata_list[i]
else:
beam_meta_data = get_common_obs_metadata(obsid)
#beam_meta_data = obsid,ra_obs,dec_obs,time_obs,delays,centrefreq,channels
if dt_input * 4 > beam_meta_data[3]:
# If the observation time is very short then a smaller dt time is required
# to get enough ower imformation
dt = int(beam_meta_data[3] / 4.)
else:
dt = dt_input
logger.debug("obsid: {0}, time_obs {1} s, dt {2} s".format(obsid, beam_meta_data[3], dt))
# Perform the file meta data call
files_meta_data = getmeta(service='data_files', params={'obs_id':obsid, 'nocache':1})
if files_meta_data is None:
logger.warning("No file metadata data found for obsid {}. Skipping".format(obsid))
obsid_to_remove.append(obsid)
continue
# Check raw voltage files
raw_available = False
raw_deleted = False
for file_name in files_meta_data.keys():
if file_name.endswith('dat'):
deleted = files_meta_data[file_name]['deleted']
if deleted:
raw_deleted = True
else:
raw_available = True
# Check combined voltage tar files
comb_available = False
comb_deleted = False
for file_name in files_meta_data.keys():
if file_name.endswith('tar'):
deleted = files_meta_data[file_name]['deleted']
if deleted:
comb_deleted = True
else:
comb_available = True
if raw_available or comb_available or all_volt:
powers.append(get_beam_power_over_time(beam_meta_data, names_ra_dec,
dt=dt, centeronly=True, verbose=False,
option=beam, degrees=degrees_check))
obsid_meta.append(beam_meta_data)
elif raw_deleted and comb_deleted:
logger.warning('Raw and combined voltage files deleted for {}'.format(obsid))
obsid_to_remove.append(obsid)
elif raw_deleted:
logger.warning('Raw voltage files deleted for {}'.format(obsid))
obsid_to_remove.append(obsid)
elif comb_deleted:
logger.warning('Combined voltage files deleted for {}'.format(obsid))
obsid_to_remove.append(obsid)
else:
logger.warning('No raw or combined voltage files for {}'.format(obsid))
obsid_to_remove.append(obsid)
for otr in obsid_to_remove:
obsid_list.remove(otr)
#chooses whether to list the source in each obs or the obs for each source
output_data = {}
if obs_for_source:
for sn, source in enumerate(names_ra_dec):
source_data = []
for on, obsid in enumerate(obsid_list):
source_ob_power = powers[on][sn]
if max(source_ob_power) > min_power:
duration = obsid_meta[on][3]
centre_freq = obsid_meta[on][5] #MHz
channels = obsid_meta[on][6]
bandwidth = (channels[-1] - channels[0] + 1.)*1.28 #MHz
logger.debug("Running beam_enter_exit on obsid: {}".format(obsid))
enter_beam, exit_beam = beam_enter_exit(source_ob_power,duration,
dt=dt, min_power=min_power)
if enter_beam is not None:
source_data.append([obsid, duration, enter_beam, exit_beam,
max(source_ob_power)[0],
centre_freq, bandwidth])
# For each source make a dictionary key that contains a list of
# lists of the data for each obsid
output_data[source[0]] = source_data
else:
#output a list of sorces for each obs
for on, obsid in enumerate(obsid_list):
duration = obsid_meta[on][3]
obsid_data = []
for sn, source in enumerate(names_ra_dec):
source_ob_power = powers[on][sn]
if max(source_ob_power) > min_power:
enter_beam, exit_beam = beam_enter_exit(source_ob_power, duration,
dt=dt, min_power=min_power)
obsid_data.append([source[0], enter_beam, exit_beam, max(source_ob_power)[0]])
# For each obsid make a dictionary key that contains a list of
# lists of the data for each source/pulsar
output_data[obsid] = obsid_data
return output_data, obsid_meta | 40.555012 | 115 | 0.5816 | import os
import sys
import numpy as np
import math
#MWA scripts
from mwa_pb import primary_beam
from vcstools.pointing_utils import sex2deg
from vcstools.metadb_utils import mwa_alt_az_za, get_common_obs_metadata, getmeta
import logging
logger = logging.getLogger(__name__)
def pixel_area(ra_min, ra_max, dec_min, dec_max):
"""
Calculate the area of a pixel on the sky from the pixel borders
Parameters:
-----------
ra_min: float
The Right Acension minimum in degrees
ra_max: float
The Right Acension maximum in degrees
dec_min: float
The Declination minimum in degrees
dec_max: float
The Declination maximum in degrees
Returns:
--------
area: float
Area of the pixel in square degrees
"""
return (180./math.pi) * (ra_max - ra_min) * (math.sin(math.radians(dec_max)) - math.sin(math.radians(dec_min)))
def field_of_view(obsid,
beam_meta_data=None, dur=None):
"""
Will find the field-of-view of the observation (including the drift) in square degrees.
Parameters:
-----------
obsid: int
The observation ID
beam_meta_data: list
OPTIONAL - the list of common metadata from vcstools.metadb_utils.get_common_obs_metadata.
By default will download the metadata for you
dur: int
OPTIONAL - Duration of observation to calculate for in seconds
By default will use the entire observation duration
Returns:
--------
area: float
The field-of-view of the observation in square degrees
"""
if beam_meta_data is None:
beam_meta_data = get_common_obs_metadata(obsid)
if dur is None:
dt = 296
else:
dt = 100
# Change the dur to the inpur dur
obsid, ra, dec, _, delays, centrefreq, channels = beam_meta_data
beam_meta_data = [obsid, ra, dec, dur, delays, centrefreq, channels]
# Make a pixel for each degree on the sky
names_ra_dec = []
for ra in range(0,360):
for dec in range(-90,90):
names_ra_dec.append(["sky_pos", ra+0.5, dec+0.5])
# Get tile beam power for all pixels
sky_powers = get_beam_power_over_time(beam_meta_data, names_ra_dec,
degrees=True, dt=dt)
# Find the maximum power over all time
max_sky_powers = []
for pixel_power in sky_powers:
temp_power = 0.
for time_power in pixel_power:
if time_power[0] > temp_power:
temp_power = time_power[0]
max_sky_powers.append(temp_power)
# Find all pixels greater than the half power point and sum their area
half_power_point = max(max_sky_powers) / 2
i = 0
area_sum = 0
for ra in range(0,360):
for dec in range(-90,90):
if max_sky_powers[i] > half_power_point:
area_sum = area_sum + pixel_area(ra, ra+1, dec, dec+1)
i = i + 1
return area_sum
def from_power_to_gain(powers, cfreq, n, coh=True):
from astropy.constants import c,k_B
from math import sqrt
obswl = c.value/cfreq
#for coherent
if coh:
coeff = obswl**2*16*n/(4*np.pi*k_B.value)
else:
coeff = obswl**2*16*sqrt(n)/(4*np.pi*k_B.value)
logger.debug("Wavelength {} m".format(obswl))
logger.debug("Gain coefficient: {}".format(coeff))
SI_to_Jy = 1e-26
return (powers*coeff)*SI_to_Jy
def get_Trec(tab, obsfreq):
Trec = 0.0
for r in range(len(tab)-1):
if tab[r][0]==obsfreq:
Trec = tab[r][1]
elif tab[r][0] < obsfreq < tab[r+1][0]:
Trec = ((tab[r][1] + tab[r+1][1])/2)
if Trec == 0.0:
logger.debug("ERROR getting Trec")
return Trec
def beam_enter_exit(powers, duration, dt=296, min_power=0.3):
"""
Calculates when the source enters and exits the beam
beam_enter_exit(min_power, powers, imax, dt):
powers: list of powers fo the duration every dt and freq powers[times][freqs]
dt: the time interval of how often powers are calculated
duration: duration of the observation according to the metadata in seconds
min_power: zenith normalised power cut off
"""
from scipy.interpolate import UnivariateSpline
time_steps = np.array(range(0, duration, dt), dtype=float)
#For each time step record the min power so even if the source is in
#one freq channel it's recorded
powers_freq_min = []
for p in powers:
powers_freq_min.append(float(min(p) - min_power))
if min(powers_freq_min) > 0.:
enter_beam = 0.
exit_beam = 1.
else:
powers_freq_min = np.array(powers_freq_min)
logger.debug("time_steps: {}".format(time_steps))
logger.debug("powers: {}".format(powers_freq_min))
try:
spline = UnivariateSpline(time_steps, powers_freq_min , s=0.)
except:
return None, None
if len(spline.roots()) == 2:
enter_beam, exit_beam = spline.roots()
enter_beam /= duration
exit_beam /= duration
elif len(spline.roots()) == 1:
if powers_freq_min[0] > powers_freq_min[-1]:
#power declines so starts in beem then exits
enter_beam = 0.
exit_beam = spline.roots()[0]/duration
else:
enter_beam = spline.roots()[0]/duration
exit_beam = 1.
else:
enter_beam = 0.
exit_beam = 1.
return enter_beam, exit_beam
def get_beam_power_over_time(beam_meta_data, names_ra_dec,
dt=296, centeronly=True, verbose=False,
option='analytic', degrees=False,
start_time=0):
"""
Calulates the power (gain at coordinate/gain at zenith) for each source over time.
get_beam_power_over_time(beam_meta_data, names_ra_dec,
dt=296, centeronly=True, verbose=False,
option = 'analytic')
Args:
beam_meta_data: [obsid,ra, dec, time, delays,centrefreq, channels]
obsid metadata obtained from meta.get_common_obs_metadata
names_ra_dec: and array in the format [[source_name, RAJ, DecJ]]
dt: time step in seconds for power calculations (default 296)
centeronly: only calculates for the centre frequency (default True)
verbose: prints extra data to (default False)
option: primary beam model [analytic, advanced, full_EE]
start_time: the time in seconds from the begining of the observation to
start calculating at
"""
obsid, _, _, time, delays, centrefreq, channels = beam_meta_data
names_ra_dec = np.array(names_ra_dec)
logger.info("Calculating beam power for OBS ID: {0}".format(obsid))
starttimes=np.arange(start_time,time+start_time,dt)
stoptimes=starttimes+dt
stoptimes[stoptimes>time]=time
Ntimes=len(starttimes)
midtimes=float(obsid)+0.5*(starttimes+stoptimes)
if not centeronly:
PowersX=np.zeros((len(names_ra_dec),
Ntimes,
len(channels)))
PowersY=np.zeros((len(names_ra_dec),
Ntimes,
len(channels)))
# in Hz
frequencies=np.array(channels)*1.28e6
else:
PowersX=np.zeros((len(names_ra_dec),
Ntimes,1))
PowersY=np.zeros((len(names_ra_dec),
Ntimes,1))
if centrefreq > 1e6:
logger.warning("centrefreq is greater than 1e6, assuming input with units of Hz.")
frequencies=np.array([centrefreq])
else:
frequencies=np.array([centrefreq])*1e6
if degrees:
RAs = np.array(names_ra_dec[:,1],dtype=float)
Decs = np.array(names_ra_dec[:,2],dtype=float)
else:
RAs, Decs = sex2deg(names_ra_dec[:,1],names_ra_dec[:,2])
if len(RAs)==0:
sys.stderr.write('Must supply >=1 source positions\n')
return None
if not len(RAs)==len(Decs):
sys.stderr.write('Must supply equal numbers of RAs and Decs\n')
return None
if verbose is False:
#Supress print statements of the primary beam model functions
sys.stdout = open(os.devnull, 'w')
for itime in range(Ntimes):
# this differ's from the previous ephem_utils method by 0.1 degrees
_, Azs, Zas = mwa_alt_az_za(midtimes[itime], ra=RAs, dec=Decs, degrees=True)
# go from altitude to zenith angle
theta = np.radians(Zas)
phi = np.radians(Azs)
for ifreq in range(len(frequencies)):
#Decide on beam model
if option == 'analytic':
rX,rY=primary_beam.MWA_Tile_analytic(theta, phi,
freq=frequencies[ifreq], delays=delays,
zenithnorm=True,
power=True)
elif option == 'advanced':
rX,rY=primary_beam.MWA_Tile_advanced(theta, phi,
freq=frequencies[ifreq], delays=delays,
zenithnorm=True,
power=True)
elif option == 'full_EE':
rX,rY=primary_beam.MWA_Tile_full_EE(theta, phi,
freq=frequencies[ifreq], delays=delays,
zenithnorm=True,
power=True)
PowersX[:,itime,ifreq]=rX
PowersY[:,itime,ifreq]=rY
if verbose is False:
sys.stdout = sys.__stdout__
Powers=0.5*(PowersX+PowersY)
return Powers
def find_sources_in_obs(obsid_list, names_ra_dec,
obs_for_source=False, dt_input=100, beam='analytic',
min_power=0.3, cal_check=False, all_volt=False,
degrees_check=False, metadata_list=None):
"""
Either creates text files for each MWA obs ID of each source within it or a text
file for each source with each MWA obs is that the source is in.
Args:
obsid_list: list of MWA obs IDs
names_ra_dec: [[source_name, ra, dec]]
dt: the time step in seconds to do power calculations
beam: beam simulation type ['analytic', 'advanced', 'full_EE']
min_power: if above the minium power assumes it's in the beam
cal_check: checks the MWA pulsar database if there is a calibration for the obsid
all_volt: Use all voltages observations including some inital test data
with incorrect formats
degrees_check: if false ra and dec is in hms, if true in degrees
Output [output_data, obsid_meta]:
output_data: The format of output_data is dependant on obs_for_source.
If obs_for_source is True:
output_data = {jname:[[obsid, duration, enter, exit, max_power],
[obsid, duration, enter, exit, max_power]]}
If obs_for_source is False:
ouput_data = {obsid:[[jname, enter, exit, max_power],
[jname, enter, exit, max_power]]}
obsid_meta: a list of the output of get_common_obs_metadata for each obsid
"""
#prepares metadata calls and calculates power
powers = []
#powers[obsid][source][time][freq]
obsid_meta = []
obsid_to_remove = []
for i, obsid in enumerate(obsid_list):
if metadata_list:
beam_meta_data, _ = metadata_list[i]
else:
beam_meta_data = get_common_obs_metadata(obsid)
#beam_meta_data = obsid,ra_obs,dec_obs,time_obs,delays,centrefreq,channels
if dt_input * 4 > beam_meta_data[3]:
# If the observation time is very short then a smaller dt time is required
# to get enough ower imformation
dt = int(beam_meta_data[3] / 4.)
else:
dt = dt_input
logger.debug("obsid: {0}, time_obs {1} s, dt {2} s".format(obsid, beam_meta_data[3], dt))
# Perform the file meta data call
files_meta_data = getmeta(service='data_files', params={'obs_id':obsid, 'nocache':1})
if files_meta_data is None:
logger.warning("No file metadata data found for obsid {}. Skipping".format(obsid))
obsid_to_remove.append(obsid)
continue
# Check raw voltage files
raw_available = False
raw_deleted = False
for file_name in files_meta_data.keys():
if file_name.endswith('dat'):
deleted = files_meta_data[file_name]['deleted']
if deleted:
raw_deleted = True
else:
raw_available = True
# Check combined voltage tar files
comb_available = False
comb_deleted = False
for file_name in files_meta_data.keys():
if file_name.endswith('tar'):
deleted = files_meta_data[file_name]['deleted']
if deleted:
comb_deleted = True
else:
comb_available = True
if raw_available or comb_available or all_volt:
powers.append(get_beam_power_over_time(beam_meta_data, names_ra_dec,
dt=dt, centeronly=True, verbose=False,
option=beam, degrees=degrees_check))
obsid_meta.append(beam_meta_data)
elif raw_deleted and comb_deleted:
logger.warning('Raw and combined voltage files deleted for {}'.format(obsid))
obsid_to_remove.append(obsid)
elif raw_deleted:
logger.warning('Raw voltage files deleted for {}'.format(obsid))
obsid_to_remove.append(obsid)
elif comb_deleted:
logger.warning('Combined voltage files deleted for {}'.format(obsid))
obsid_to_remove.append(obsid)
else:
logger.warning('No raw or combined voltage files for {}'.format(obsid))
obsid_to_remove.append(obsid)
for otr in obsid_to_remove:
obsid_list.remove(otr)
#chooses whether to list the source in each obs or the obs for each source
output_data = {}
if obs_for_source:
for sn, source in enumerate(names_ra_dec):
source_data = []
for on, obsid in enumerate(obsid_list):
source_ob_power = powers[on][sn]
if max(source_ob_power) > min_power:
duration = obsid_meta[on][3]
centre_freq = obsid_meta[on][5] #MHz
channels = obsid_meta[on][6]
bandwidth = (channels[-1] - channels[0] + 1.)*1.28 #MHz
logger.debug("Running beam_enter_exit on obsid: {}".format(obsid))
enter_beam, exit_beam = beam_enter_exit(source_ob_power,duration,
dt=dt, min_power=min_power)
if enter_beam is not None:
source_data.append([obsid, duration, enter_beam, exit_beam,
max(source_ob_power)[0],
centre_freq, bandwidth])
# For each source make a dictionary key that contains a list of
# lists of the data for each obsid
output_data[source[0]] = source_data
else:
#output a list of sorces for each obs
for on, obsid in enumerate(obsid_list):
duration = obsid_meta[on][3]
obsid_data = []
for sn, source in enumerate(names_ra_dec):
source_ob_power = powers[on][sn]
if max(source_ob_power) > min_power:
enter_beam, exit_beam = beam_enter_exit(source_ob_power, duration,
dt=dt, min_power=min_power)
obsid_data.append([source[0], enter_beam, exit_beam, max(source_ob_power)[0]])
# For each obsid make a dictionary key that contains a list of
# lists of the data for each source/pulsar
output_data[obsid] = obsid_data
return output_data, obsid_meta | 719 | 0 | 46 |
be10b0d724e03294d196bc011b322a0131e5d9cc | 668 | py | Python | sample_list.py | xuanyuyt/libfacedetection | a2576ef7c5f35dd8bc64227b60948da8f4fd5d5e | [
"BSD-3-Clause"
] | null | null | null | sample_list.py | xuanyuyt/libfacedetection | a2576ef7c5f35dd8bc64227b60948da8f4fd5d5e | [
"BSD-3-Clause"
] | null | null | null | sample_list.py | xuanyuyt/libfacedetection | a2576ef7c5f35dd8bc64227b60948da8f4fd5d5e | [
"BSD-3-Clause"
] | null | null | null | # -*- coding=utf-8 -*-
import os
if __name__ == '__main__':
# ================================================================== #
# Select Sample List #
# ================================================================== #
output = open('ALL.txt', 'w')
with open('Path_Xmls.txt', 'r') as fp:
for oneFile in fp:
xmlname = oneFile.strip()
jpgname = xmlname.replace('.xml', '.jpg').replace('Annotations', 'JPEGImages')
output.write(jpgname)
output.write('\t')
output.write(xmlname)
output.write('\n')
output.close()
| 37.111111 | 90 | 0.378743 | # -*- coding=utf-8 -*-
import os
if __name__ == '__main__':
# ================================================================== #
# Select Sample List #
# ================================================================== #
output = open('ALL.txt', 'w')
with open('Path_Xmls.txt', 'r') as fp:
for oneFile in fp:
xmlname = oneFile.strip()
jpgname = xmlname.replace('.xml', '.jpg').replace('Annotations', 'JPEGImages')
output.write(jpgname)
output.write('\t')
output.write(xmlname)
output.write('\n')
output.close()
| 0 | 0 | 0 |
cea272597e7c5a179183dd81f275ae2984ba6975 | 1,675 | py | Python | superb/urls.py | gheyderov/E-commerce-website | 9a87e8e6658a69fb017bdc3b36d6dc5417e3124e | [
"MIT"
] | null | null | null | superb/urls.py | gheyderov/E-commerce-website | 9a87e8e6658a69fb017bdc3b36d6dc5417e3124e | [
"MIT"
] | null | null | null | superb/urls.py | gheyderov/E-commerce-website | 9a87e8e6658a69fb017bdc3b36d6dc5417e3124e | [
"MIT"
] | null | null | null | """superb URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from django.conf.urls.static import static
from django.conf import settings
from rest_framework_simplejwt.views import (
TokenObtainPairView,
TokenRefreshView,
)
urlpatterns = [
path ('admin/', admin.site.urls),
path('ckeditor/', include('ckeditor_uploader.urls')),
path ('', include("core.urls")),
path ('', include("blog.urls")),
path ('', include("accounts.urls", namespace = "accounts")),
path ('', include("order.urls", namespace="order")),
path ('', include("products.urls")),
path('api-auth/', include('rest_framework.urls')),
path('api/', include('api.urls')),
path('api/token/', TokenObtainPairView.as_view(), name='token_obtain_pair'),
path('api/token/refresh/', TokenRefreshView.as_view(), name='token_refresh'),
]
if settings.DEBUG:
urlpatterns += static(settings.STATIC_URL, document_root = settings.STATIC_ROOT)
urlpatterns += static(settings.MEDIA_URL, document_root = settings.MEDIA_ROOT) | 37.222222 | 84 | 0.699701 | """superb URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from django.conf.urls.static import static
from django.conf import settings
from rest_framework_simplejwt.views import (
TokenObtainPairView,
TokenRefreshView,
)
urlpatterns = [
path ('admin/', admin.site.urls),
path('ckeditor/', include('ckeditor_uploader.urls')),
path ('', include("core.urls")),
path ('', include("blog.urls")),
path ('', include("accounts.urls", namespace = "accounts")),
path ('', include("order.urls", namespace="order")),
path ('', include("products.urls")),
path('api-auth/', include('rest_framework.urls')),
path('api/', include('api.urls')),
path('api/token/', TokenObtainPairView.as_view(), name='token_obtain_pair'),
path('api/token/refresh/', TokenRefreshView.as_view(), name='token_refresh'),
]
if settings.DEBUG:
urlpatterns += static(settings.STATIC_URL, document_root = settings.STATIC_ROOT)
urlpatterns += static(settings.MEDIA_URL, document_root = settings.MEDIA_ROOT) | 0 | 0 | 0 |
e8aeb956c1e734978949a081f9e6a6d6823a2d71 | 164 | py | Python | src/__main__.py | cesarabelfigueroa/FileSystem | fcd42f62ee406dbaf3180b0c15903c196c885e4a | [
"MIT"
] | null | null | null | src/__main__.py | cesarabelfigueroa/FileSystem | fcd42f62ee406dbaf3180b0c15903c196c885e4a | [
"MIT"
] | null | null | null | src/__main__.py | cesarabelfigueroa/FileSystem | fcd42f62ee406dbaf3180b0c15903c196c885e4a | [
"MIT"
] | null | null | null | from classes.Shell import Shell
from classes.FileSystem import FileSystem
main() | 20.5 | 41 | 0.75 | from classes.Shell import Shell
from classes.FileSystem import FileSystem
def main():
shell = Shell("./disk/Disk.bin")
shell.createRoot()
shell.execute()
main() | 61 | 0 | 23 |
539793bf8d49e145d5788dcd14ff3f0d24bf7a75 | 74 | py | Python | amadeus/version.py | minjikarin/amadeus-python | 14a004912ee8c36ee4fd79651ea1b23afe6b2a6e | [
"MIT"
] | null | null | null | amadeus/version.py | minjikarin/amadeus-python | 14a004912ee8c36ee4fd79651ea1b23afe6b2a6e | [
"MIT"
] | null | null | null | amadeus/version.py | minjikarin/amadeus-python | 14a004912ee8c36ee4fd79651ea1b23afe6b2a6e | [
"MIT"
] | null | null | null | version_info = (5, 3, 1)
version = '.'.join(str(v) for v in version_info)
| 24.666667 | 48 | 0.648649 | version_info = (5, 3, 1)
version = '.'.join(str(v) for v in version_info)
| 0 | 0 | 0 |
9774458a9bbc1046d5b4bd5e8043fb960f7c9702 | 11,119 | py | Python | Tests/varLib/instancer/names_test.py | odidev/fonttools | 27b5f568f562971d7fbf64eeb027ea61e4939db4 | [
"Apache-2.0",
"MIT"
] | 2,705 | 2016-09-27T10:02:12.000Z | 2022-03-31T09:37:46.000Z | Tests/varLib/instancer/names_test.py | odidev/fonttools | 27b5f568f562971d7fbf64eeb027ea61e4939db4 | [
"Apache-2.0",
"MIT"
] | 1,599 | 2016-09-27T09:07:36.000Z | 2022-03-31T23:04:51.000Z | Tests/varLib/instancer/names_test.py | odidev/fonttools | 27b5f568f562971d7fbf64eeb027ea61e4939db4 | [
"Apache-2.0",
"MIT"
] | 352 | 2016-10-07T04:18:15.000Z | 2022-03-30T07:35:01.000Z | from fontTools.ttLib.tables import otTables
from fontTools.otlLib.builder import buildStatTable
from fontTools.varLib import instancer
import pytest
@pytest.mark.parametrize(
"limits, expected, isNonRIBBI",
[
# Regular
(
{"wght": 400},
{
(1, 3, 1, 0x409): "Test Variable Font",
(2, 3, 1, 0x409): "Regular",
(3, 3, 1, 0x409): "2.001;GOOG;TestVariableFont-Regular",
(6, 3, 1, 0x409): "TestVariableFont-Regular",
},
False,
),
# Regular Normal (width axis Normal isn't included since it is elided)
(
{"wght": 400, "wdth": 100},
{
(1, 3, 1, 0x409): "Test Variable Font",
(2, 3, 1, 0x409): "Regular",
(3, 3, 1, 0x409): "2.001;GOOG;TestVariableFont-Regular",
(6, 3, 1, 0x409): "TestVariableFont-Regular",
},
False,
),
# Black
(
{"wght": 900},
{
(1, 3, 1, 0x409): "Test Variable Font Black",
(2, 3, 1, 0x409): "Regular",
(3, 3, 1, 0x409): "2.001;GOOG;TestVariableFont-Black",
(6, 3, 1, 0x409): "TestVariableFont-Black",
(16, 3, 1, 0x409): "Test Variable Font",
(17, 3, 1, 0x409): "Black",
},
True,
),
# Thin
(
{"wght": 100},
{
(1, 3, 1, 0x409): "Test Variable Font Thin",
(2, 3, 1, 0x409): "Regular",
(3, 3, 1, 0x409): "2.001;GOOG;TestVariableFont-Thin",
(6, 3, 1, 0x409): "TestVariableFont-Thin",
(16, 3, 1, 0x409): "Test Variable Font",
(17, 3, 1, 0x409): "Thin",
},
True,
),
# Thin Condensed
(
{"wght": 100, "wdth": 79},
{
(1, 3, 1, 0x409): "Test Variable Font Thin Condensed",
(2, 3, 1, 0x409): "Regular",
(3, 3, 1, 0x409): "2.001;GOOG;TestVariableFont-ThinCondensed",
(6, 3, 1, 0x409): "TestVariableFont-ThinCondensed",
(16, 3, 1, 0x409): "Test Variable Font",
(17, 3, 1, 0x409): "Thin Condensed",
},
True,
),
# Condensed with unpinned weights
(
{"wdth": 79, "wght": instancer.AxisRange(400, 900)},
{
(1, 3, 1, 0x409): "Test Variable Font Condensed",
(2, 3, 1, 0x409): "Regular",
(3, 3, 1, 0x409): "2.001;GOOG;TestVariableFont-Condensed",
(6, 3, 1, 0x409): "TestVariableFont-Condensed",
(16, 3, 1, 0x409): "Test Variable Font",
(17, 3, 1, 0x409): "Condensed",
},
True,
),
],
)
@pytest.mark.parametrize(
"limits, expected, isNonRIBBI",
[
# Regular | Normal
(
{"wght": 400},
{
(1, 3, 1, 0x409): "Test Variable Font",
(2, 3, 1, 0x409): "Normal",
},
False,
),
# Black | Negreta
(
{"wght": 900},
{
(1, 3, 1, 0x409): "Test Variable Font Negreta",
(2, 3, 1, 0x409): "Normal",
(16, 3, 1, 0x409): "Test Variable Font",
(17, 3, 1, 0x409): "Negreta",
},
True,
),
# Black Condensed | Negreta Zhuštěné
(
{"wght": 900, "wdth": 79},
{
(1, 3, 1, 0x409): "Test Variable Font Negreta Zhuštěné",
(2, 3, 1, 0x409): "Normal",
(16, 3, 1, 0x409): "Test Variable Font",
(17, 3, 1, 0x409): "Negreta Zhuštěné",
},
True,
),
],
)
@pytest.mark.parametrize(
"limits, expected, isNonRIBBI",
[
# Regular | Normal
(
{"wght": 400},
{
(1, 3, 1, 0x409): "Test Variable Font",
(2, 3, 1, 0x409): "Italic",
(6, 3, 1, 0x409): "TestVariableFont-Italic",
},
False,
),
# Black Condensed Italic
(
{"wght": 900, "wdth": 79},
{
(1, 3, 1, 0x409): "Test Variable Font Black Condensed",
(2, 3, 1, 0x409): "Italic",
(6, 3, 1, 0x409): "TestVariableFont-BlackCondensedItalic",
(16, 3, 1, 0x409): "Test Variable Font",
(17, 3, 1, 0x409): "Black Condensed Italic",
},
True,
),
],
)
| 34.424149 | 88 | 0.559133 | from fontTools.ttLib.tables import otTables
from fontTools.otlLib.builder import buildStatTable
from fontTools.varLib import instancer
import pytest
def test_pruningUnusedNames(varfont):
varNameIDs = instancer.names.getVariationNameIDs(varfont)
assert varNameIDs == set(range(256, 297 + 1))
fvar = varfont["fvar"]
stat = varfont["STAT"].table
with instancer.names.pruningUnusedNames(varfont):
del fvar.axes[0] # Weight (nameID=256)
del fvar.instances[0] # Thin (nameID=258)
del stat.DesignAxisRecord.Axis[0] # Weight (nameID=256)
del stat.AxisValueArray.AxisValue[0] # Thin (nameID=258)
assert not any(n for n in varfont["name"].names if n.nameID in {256, 258})
with instancer.names.pruningUnusedNames(varfont):
del varfont["fvar"]
del varfont["STAT"]
assert not any(n for n in varfont["name"].names if n.nameID in varNameIDs)
assert "ltag" not in varfont
def _test_name_records(varfont, expected, isNonRIBBI, platforms=[0x409]):
nametable = varfont["name"]
font_names = {
(r.nameID, r.platformID, r.platEncID, r.langID): r.toUnicode()
for r in nametable.names
}
for k in expected:
if k[-1] not in platforms:
continue
assert font_names[k] == expected[k]
font_nameids = set(i[0] for i in font_names)
if isNonRIBBI:
assert 16 in font_nameids
assert 17 in font_nameids
if "fvar" not in varfont:
assert 25 not in font_nameids
@pytest.mark.parametrize(
"limits, expected, isNonRIBBI",
[
# Regular
(
{"wght": 400},
{
(1, 3, 1, 0x409): "Test Variable Font",
(2, 3, 1, 0x409): "Regular",
(3, 3, 1, 0x409): "2.001;GOOG;TestVariableFont-Regular",
(6, 3, 1, 0x409): "TestVariableFont-Regular",
},
False,
),
# Regular Normal (width axis Normal isn't included since it is elided)
(
{"wght": 400, "wdth": 100},
{
(1, 3, 1, 0x409): "Test Variable Font",
(2, 3, 1, 0x409): "Regular",
(3, 3, 1, 0x409): "2.001;GOOG;TestVariableFont-Regular",
(6, 3, 1, 0x409): "TestVariableFont-Regular",
},
False,
),
# Black
(
{"wght": 900},
{
(1, 3, 1, 0x409): "Test Variable Font Black",
(2, 3, 1, 0x409): "Regular",
(3, 3, 1, 0x409): "2.001;GOOG;TestVariableFont-Black",
(6, 3, 1, 0x409): "TestVariableFont-Black",
(16, 3, 1, 0x409): "Test Variable Font",
(17, 3, 1, 0x409): "Black",
},
True,
),
# Thin
(
{"wght": 100},
{
(1, 3, 1, 0x409): "Test Variable Font Thin",
(2, 3, 1, 0x409): "Regular",
(3, 3, 1, 0x409): "2.001;GOOG;TestVariableFont-Thin",
(6, 3, 1, 0x409): "TestVariableFont-Thin",
(16, 3, 1, 0x409): "Test Variable Font",
(17, 3, 1, 0x409): "Thin",
},
True,
),
# Thin Condensed
(
{"wght": 100, "wdth": 79},
{
(1, 3, 1, 0x409): "Test Variable Font Thin Condensed",
(2, 3, 1, 0x409): "Regular",
(3, 3, 1, 0x409): "2.001;GOOG;TestVariableFont-ThinCondensed",
(6, 3, 1, 0x409): "TestVariableFont-ThinCondensed",
(16, 3, 1, 0x409): "Test Variable Font",
(17, 3, 1, 0x409): "Thin Condensed",
},
True,
),
# Condensed with unpinned weights
(
{"wdth": 79, "wght": instancer.AxisRange(400, 900)},
{
(1, 3, 1, 0x409): "Test Variable Font Condensed",
(2, 3, 1, 0x409): "Regular",
(3, 3, 1, 0x409): "2.001;GOOG;TestVariableFont-Condensed",
(6, 3, 1, 0x409): "TestVariableFont-Condensed",
(16, 3, 1, 0x409): "Test Variable Font",
(17, 3, 1, 0x409): "Condensed",
},
True,
),
],
)
def test_updateNameTable_with_registered_axes_ribbi(
varfont, limits, expected, isNonRIBBI
):
instancer.names.updateNameTable(varfont, limits)
_test_name_records(varfont, expected, isNonRIBBI)
def test_updatetNameTable_axis_order(varfont):
axes = [
dict(
tag="wght",
name="Weight",
values=[
dict(value=400, name="Regular"),
],
),
dict(
tag="wdth",
name="Width",
values=[
dict(value=75, name="Condensed"),
],
),
]
nametable = varfont["name"]
buildStatTable(varfont, axes)
instancer.names.updateNameTable(varfont, {"wdth": 75, "wght": 400})
assert nametable.getName(17, 3, 1, 0x409).toUnicode() == "Regular Condensed"
# Swap the axes so the names get swapped
axes[0], axes[1] = axes[1], axes[0]
buildStatTable(varfont, axes)
instancer.names.updateNameTable(varfont, {"wdth": 75, "wght": 400})
assert nametable.getName(17, 3, 1, 0x409).toUnicode() == "Condensed Regular"
@pytest.mark.parametrize(
"limits, expected, isNonRIBBI",
[
# Regular | Normal
(
{"wght": 400},
{
(1, 3, 1, 0x409): "Test Variable Font",
(2, 3, 1, 0x409): "Normal",
},
False,
),
# Black | Negreta
(
{"wght": 900},
{
(1, 3, 1, 0x409): "Test Variable Font Negreta",
(2, 3, 1, 0x409): "Normal",
(16, 3, 1, 0x409): "Test Variable Font",
(17, 3, 1, 0x409): "Negreta",
},
True,
),
# Black Condensed | Negreta Zhuštěné
(
{"wght": 900, "wdth": 79},
{
(1, 3, 1, 0x409): "Test Variable Font Negreta Zhuštěné",
(2, 3, 1, 0x409): "Normal",
(16, 3, 1, 0x409): "Test Variable Font",
(17, 3, 1, 0x409): "Negreta Zhuštěné",
},
True,
),
],
)
def test_updateNameTable_with_multilingual_names(varfont, limits, expected, isNonRIBBI):
name = varfont["name"]
# langID 0x405 is the Czech Windows langID
name.setName("Test Variable Font", 1, 3, 1, 0x405)
name.setName("Normal", 2, 3, 1, 0x405)
name.setName("Normal", 261, 3, 1, 0x405) # nameID 261=Regular STAT entry
name.setName("Negreta", 266, 3, 1, 0x405) # nameID 266=Black STAT entry
name.setName("Zhuštěné", 279, 3, 1, 0x405) # nameID 279=Condensed STAT entry
instancer.names.updateNameTable(varfont, limits)
_test_name_records(varfont, expected, isNonRIBBI, platforms=[0x405])
def test_updateNameTable_missing_axisValues(varfont):
with pytest.raises(ValueError, match="Cannot find Axis Values \['wght=200'\]"):
instancer.names.updateNameTable(varfont, {"wght": 200})
def test_updateNameTable_missing_stat(varfont):
del varfont["STAT"]
with pytest.raises(
ValueError, match="Cannot update name table since there is no STAT table."
):
instancer.names.updateNameTable(varfont, {"wght": 400})
@pytest.mark.parametrize(
"limits, expected, isNonRIBBI",
[
# Regular | Normal
(
{"wght": 400},
{
(1, 3, 1, 0x409): "Test Variable Font",
(2, 3, 1, 0x409): "Italic",
(6, 3, 1, 0x409): "TestVariableFont-Italic",
},
False,
),
# Black Condensed Italic
(
{"wght": 900, "wdth": 79},
{
(1, 3, 1, 0x409): "Test Variable Font Black Condensed",
(2, 3, 1, 0x409): "Italic",
(6, 3, 1, 0x409): "TestVariableFont-BlackCondensedItalic",
(16, 3, 1, 0x409): "Test Variable Font",
(17, 3, 1, 0x409): "Black Condensed Italic",
},
True,
),
],
)
def test_updateNameTable_vf_with_italic_attribute(
varfont, limits, expected, isNonRIBBI
):
font_link_axisValue = varfont["STAT"].table.AxisValueArray.AxisValue[4]
# Unset ELIDABLE_AXIS_VALUE_NAME flag
font_link_axisValue.Flags &= ~instancer.names.ELIDABLE_AXIS_VALUE_NAME
font_link_axisValue.ValueNameID = 294 # Roman --> Italic
instancer.names.updateNameTable(varfont, limits)
_test_name_records(varfont, expected, isNonRIBBI)
def test_updateNameTable_format4_axisValues(varfont):
# format 4 axisValues should dominate the other axisValues
stat = varfont["STAT"].table
axisValue = otTables.AxisValue()
axisValue.Format = 4
axisValue.Flags = 0
varfont["name"].setName("Dominant Value", 297, 3, 1, 0x409)
axisValue.ValueNameID = 297
axisValue.AxisValueRecord = []
for tag, value in (("wght", 900), ("wdth", 79)):
rec = otTables.AxisValueRecord()
rec.AxisIndex = next(
i for i, a in enumerate(stat.DesignAxisRecord.Axis) if a.AxisTag == tag
)
rec.Value = value
axisValue.AxisValueRecord.append(rec)
stat.AxisValueArray.AxisValue.append(axisValue)
instancer.names.updateNameTable(varfont, {"wdth": 79, "wght": 900})
expected = {
(1, 3, 1, 0x409): "Test Variable Font Dominant Value",
(2, 3, 1, 0x409): "Regular",
(16, 3, 1, 0x409): "Test Variable Font",
(17, 3, 1, 0x409): "Dominant Value",
}
_test_name_records(varfont, expected, isNonRIBBI=True)
def test_updateNameTable_elided_axisValues(varfont):
stat = varfont["STAT"].table
# set ELIDABLE_AXIS_VALUE_NAME flag for all axisValues
for axisValue in stat.AxisValueArray.AxisValue:
axisValue.Flags |= instancer.names.ELIDABLE_AXIS_VALUE_NAME
stat.ElidedFallbackNameID = 266 # Regular --> Black
instancer.names.updateNameTable(varfont, {"wght": 400})
# Since all axis values are elided, the elided fallback name
# must be used to construct the style names. Since we
# changed it to Black, we need both a typoSubFamilyName and
# the subFamilyName set so it conforms to the RIBBI model.
expected = {(2, 3, 1, 0x409): "Regular", (17, 3, 1, 0x409): "Black"}
_test_name_records(varfont, expected, isNonRIBBI=True)
def test_updateNameTable_existing_subfamily_name_is_not_regular(varfont):
# Check the subFamily name will be set to Regular when we update a name
# table to a non-RIBBI style and the current subFamily name is a RIBBI
# style which isn't Regular.
varfont["name"].setName("Bold", 2, 3, 1, 0x409) # subFamily Regular --> Bold
instancer.names.updateNameTable(varfont, {"wght": 100})
expected = {(2, 3, 1, 0x409): "Regular", (17, 3, 1, 0x409): "Thin"}
_test_name_records(varfont, expected, isNonRIBBI=True)
| 6,104 | 0 | 250 |
516979c22c20cedc5cccce7806521c8edb663484 | 1,482 | py | Python | tests/migrations/0001_initial.py | ixc/wagtailstreamforms | 93bf352219608e803948cd28385566e0ca63fa7c | [
"MIT"
] | null | null | null | tests/migrations/0001_initial.py | ixc/wagtailstreamforms | 93bf352219608e803948cd28385566e0ca63fa7c | [
"MIT"
] | null | null | null | tests/migrations/0001_initial.py | ixc/wagtailstreamforms | 93bf352219608e803948cd28385566e0ca63fa7c | [
"MIT"
] | 1 | 2021-01-27T10:49:35.000Z | 2021-01-27T10:49:35.000Z | # Generated by Django 2.2.1 on 2019-05-17 19:15
from django.db import migrations, models
import django.db.models.deletion
import wagtailstreamforms.fields
| 34.465116 | 156 | 0.589069 | # Generated by Django 2.2.1 on 2019-05-17 19:15
from django.db import migrations, models
import django.db.models.deletion
import wagtailstreamforms.fields
class Migration(migrations.Migration):
initial = True
dependencies = [
('wagtailstreamforms', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='HookSelectModel',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('hooks', wagtailstreamforms.fields.HookSelectField(blank=True, help_text='Some hooks', null=True)),
],
),
migrations.CreateModel(
name='InvalidFormSettingsModel',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
],
),
migrations.CreateModel(
name='ValidFormSettingsModel',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('number', models.IntegerField()),
('form', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='advanced_settings', to='wagtailstreamforms.Form')),
],
options={
'abstract': False,
},
),
]
| 0 | 1,302 | 23 |
83ff131905fef6cb336a239bba7b3fdd20edf5cf | 492 | py | Python | PyRaMD/integrate/velocity_verlet.py | adodin/simple_MD | 93d725e6cb3de9901b44967d75e3db9d726b936a | [
"MIT"
] | null | null | null | PyRaMD/integrate/velocity_verlet.py | adodin/simple_MD | 93d725e6cb3de9901b44967d75e3db9d726b936a | [
"MIT"
] | 5 | 2020-03-11T20:32:25.000Z | 2020-03-12T00:43:06.000Z | PyRaMD/integrate/velocity_verlet.py | adodin/simple_MD | 93d725e6cb3de9901b44967d75e3db9d726b936a | [
"MIT"
] | null | null | null | # Velocity Verlet Integrator as a Functor containing the integrator specs
# Initializes Integrator with time step & force field | 32.8 | 73 | 0.603659 | # Velocity Verlet Integrator as a Functor containing the integrator specs
class VelocityVerlet:
# Initializes Integrator with time step & force field
def __init__(self, dt, force_field):
self.dt = dt
self.force_field = force_field
def __call__(self, x, v, m, *args, **kwargs):
a_1 = self.force_field(x)
x = x + v * self.dt + 0.5 * a_1 * self.dt**2
a_2 = self.force_field(x)
v = v + 0.5 * (a_1 + a_2) * self.dt
return x, v | 284 | 0 | 76 |
e3bb77c5090710317d11e372d9d96a50cd00e5eb | 8,369 | py | Python | pyblockchain/blockchain.py | kadonoamino/python_blockchain | 2ef52bccc812f0f7c48e61256d125e196ad51bce | [
"MIT"
] | null | null | null | pyblockchain/blockchain.py | kadonoamino/python_blockchain | 2ef52bccc812f0f7c48e61256d125e196ad51bce | [
"MIT"
] | null | null | null | pyblockchain/blockchain.py | kadonoamino/python_blockchain | 2ef52bccc812f0f7c48e61256d125e196ad51bce | [
"MIT"
] | null | null | null | import contextlib
import hashlib
import json
import logging
import sys
import time
import threading
from ecdsa import NIST256p
from ecdsa import VerifyingKey
import requests
import utils
MINING_DIFFICULTY = 3
MINING_SENDER = 'THE BLOCKCHAIN'
MINING_REWARD = 1.0
MINING_TIMER_SEC = 20
BLOCKCHAIN_PORT_RANGE = (5000, 5003)
NEIGHBOURS_IP_RANGE_NUM = (0, 1)
BLOCKCHAIN_NEIGHBOURS_SYNC_TIME_SEC = 20
logging.basicConfig(level=logging.INFO, stream=sys.stdout)
logger = logging.getLogger(__name__)
| 35.163866 | 79 | 0.602342 | import contextlib
import hashlib
import json
import logging
import sys
import time
import threading
from ecdsa import NIST256p
from ecdsa import VerifyingKey
import requests
import utils
MINING_DIFFICULTY = 3
MINING_SENDER = 'THE BLOCKCHAIN'
MINING_REWARD = 1.0
MINING_TIMER_SEC = 20
BLOCKCHAIN_PORT_RANGE = (5000, 5003)
NEIGHBOURS_IP_RANGE_NUM = (0, 1)
BLOCKCHAIN_NEIGHBOURS_SYNC_TIME_SEC = 20
logging.basicConfig(level=logging.INFO, stream=sys.stdout)
logger = logging.getLogger(__name__)
class BlockChain(object):
def __init__(self, blockchain_address=None, port=None):
self.transaction_pool = []
self.chain = []
self.neighbours = []
self.create_block(0, self.hash({}))
self.blockchain_address = blockchain_address
self.port = port
self.mining_semaphore = threading.Semaphore(1)
self.sync_neighbours_semaphore = threading.Semaphore(1)
def run(self):
self.sync_neighbours()
self.resolve_conflicts()
self.start_mining()
def set_neighbours(self):
self.neighbours = utils.find_neighbours(
utils.get_host(), self.port,
NEIGHBOURS_IP_RANGE_NUM[0], NEIGHBOURS_IP_RANGE_NUM[1],
BLOCKCHAIN_PORT_RANGE[0], BLOCKCHAIN_PORT_RANGE[1])
logger.info({
'action': 'set_neighbours', 'neighbours': self.neighbours
})
def sync_neighbours(self):
is_acquire = self.sync_neighbours_semaphore.acquire(blocking=False)
if is_acquire:
with contextlib.ExitStack() as stack:
stack.callback(self.sync_neighbours_semaphore.release)
self.set_neighbours()
loop = threading.Timer(
BLOCKCHAIN_NEIGHBOURS_SYNC_TIME_SEC, self.sync_neighbours)
loop.start()
def create_block(self, nonce, previous_hash):
block = utils.sorted_dict_by_key({
'timestamp': time.time(),
'transactions': self.transaction_pool,
'nonce': nonce,
'previous_hash': previous_hash
})
self.chain.append(block)
self.transaction_pool = []
for node in self.neighbours:
requests.delete(f'http://{node}/transactions')
return block
def hash(self, block):
sorted_block = json.dumps(block, sort_keys=True)
return hashlib.sha256(sorted_block.encode()).hexdigest()
def add_transaction(self, sender_blockchain_address,
recipient_blockchain_address, value,
sender_public_key=None, signature=None):
transaction = utils.sorted_dict_by_key({
'sender_blockchain_address': sender_blockchain_address,
'recipient_blockchain_address': recipient_blockchain_address,
'value': float(value)
})
if sender_blockchain_address == MINING_SENDER:
self.transaction_pool.append(transaction)
return True
if self.verify_transaction_signature(
sender_public_key, signature, transaction):
if (self.calculate_total_amount(sender_blockchain_address)
< float(value)):
logger.error(
{'action': 'add_transaction', 'error': 'no_value'})
return False
self.transaction_pool.append(transaction)
return True
return False
def create_transaction(self, sender_blockchain_address,
recipient_blockchain_address, value,
sender_public_key, signature):
is_transacted = self.add_transaction(
sender_blockchain_address, recipient_blockchain_address,
value, sender_public_key, signature)
if is_transacted:
for node in self.neighbours:
requests.put(
f'http://{node}/transactions',
json={
'sender_blockchain_address': sender_blockchain_address,
'recipient_blockchain_address':
recipient_blockchain_address,
'value': value,
'sender_public_key': sender_public_key,
'signature': signature,
}
)
return is_transacted
def verify_transaction_signature(
self, sender_public_key, signature, transaction):
sha256 = hashlib.sha256()
sha256.update(str(transaction).encode('utf-8'))
message = sha256.digest()
signature_bytes = bytes().fromhex(signature)
verifying_key = VerifyingKey.from_string(
bytes().fromhex(sender_public_key), curve=NIST256p)
verified_key = verifying_key.verify(signature_bytes, message)
return verified_key
def valid_proof(self, transactions, previous_hash, nonce,
difficulty=MINING_DIFFICULTY):
guess_block = utils.sorted_dict_by_key({
'transactions': transactions,
'nonce': nonce,
'previous_hash': previous_hash
})
guess_hash = self.hash(guess_block)
return guess_hash[:difficulty] == '0'*difficulty
def proof_of_work(self):
transactions = self.transaction_pool.copy()
previous_hash = self.hash(self.chain[-1])
nonce = 0
while self.valid_proof(transactions, previous_hash, nonce) is False:
nonce += 1
return nonce
def mining(self):
# if not self.transaction_pool:
# return False
self.add_transaction(
sender_blockchain_address=MINING_SENDER,
recipient_blockchain_address=self.blockchain_address,
value=MINING_REWARD)
nonce = self.proof_of_work()
previous_hash = self.hash(self.chain[-1])
self.create_block(nonce, previous_hash)
logger.info({'action': 'mining', 'status': 'success'})
for node in self.neighbours:
requests.put(f'http://{node}/consensus')
return True
def start_mining(self):
is_acquire = self.mining_semaphore.acquire(blocking=False)
if is_acquire:
with contextlib.ExitStack() as stack:
stack.callback(self.mining_semaphore.release)
self.mining()
loop = threading.Timer(MINING_TIMER_SEC, self.start_mining)
loop.start()
def calculate_total_amount(self, blockchain_address):
total_amount = 0.0
for block in self.chain:
for transaction in block['transactions']:
value = float(transaction['value'])
if blockchain_address == \
transaction['recipient_blockchain_address']:
total_amount += value
if blockchain_address == \
transaction['sender_blockchain_address']:
total_amount -= value
return total_amount
def valid_chain(self, chain):
pre_block = chain[0]
current_index = 1
while current_index < len(chain):
block = chain[current_index]
if block['previous_hash'] != self.hash(pre_block):
return False
if not self.valid_proof(
block['transactions'], block['previous_hash'],
block['nonce'], MINING_DIFFICULTY):
return False
pre_block = block
current_index += 1
return True
def resolve_conflicts(self):
longest_chain = None
max_length = len(self.chain)
for node in self.neighbours:
response = requests.get(f'http://{node}/chain')
if response.status_code == 200:
response_json = response.json()
chain = response_json['chain']
chain_length = len(chain)
if chain_length > max_length and self.valid_chain(chain):
max_length = chain_length
longest_chain = chain
if longest_chain:
self.chain = longest_chain
logger.info({'action': 'resolve_conflicts', 'status': 'replaced'})
return True
logger.info({'action': 'resolve_conflicts', 'status': 'not_replaced'})
return False
| 7,413 | 4 | 455 |
58059077979675f58d0fbc442c579293c01a16d0 | 3,683 | py | Python | Arrows.py | OceanNuclear/ComplexAnalysis | 7adccc0d81ea1216e305d35e248e6d886c682e6c | [
"MIT"
] | null | null | null | Arrows.py | OceanNuclear/ComplexAnalysis | 7adccc0d81ea1216e305d35e248e6d886c682e6c | [
"MIT"
] | null | null | null | Arrows.py | OceanNuclear/ComplexAnalysis | 7adccc0d81ea1216e305d35e248e6d886c682e6c | [
"MIT"
] | null | null | null | #!/home/ocean/anaconda3/bin/python3
from numpy import cos, arccos, sin, arctan, tan, pi, sqrt, e; from numpy import array as ary; import numpy as np; tau = 2*pi
from matplotlib import pyplot as plt
import seaborn as sns
#uses arrows to plot where each point lands after being transformed by the function
LongComputationalTime = False
x = np.linspace(-5,5, 50)
y = np.linspace(-5,5, 40)
startingPts = mymeshgrid(x,y)
#Oh yeah computer memory problem. Now I've got a wobble in my grid.
#startingPts = np.concatenate(np.transpose(np.meshgrid(x,y)), axis=0)
cycleLen=len(startingPts)
cycleLen=6
uncleanLandingPts = [complexFunc(z) for z in startingPts]
landingPts = np.array([z if np.isfinite(z).all() else [0,0] for z in uncleanLandingPts])
sortedLandingPoints = np.reshape(landingPts,[len(x),len(y),2])
colorcycle = iter([rotateColorSpace(theta) for theta in np.linspace(0,tau, cycleLen)])
#Order=FABCDEF
if LongComputationalTime:
for n in range(len(startingPts)):
plt.annotate("", xy=startingPts[n], xytext=landingPts[n],arrowprops=dict(color = next(colorcycle), arrowstyle= '<-', alpha=0.5,),)#use arrowheads
#pass
#plot the y direction gridlines landing location
for n in range(len(x)):
plt.plot(sortedLandingPoints[n,:,0],sortedLandingPoints[n,:,1], lw=0.8, alpha=1, color= "green")#next(colorcycle))
#for n in range (int(len(y)/2)) :next(colorcycle)
#for n in range (int(cycleLen/3)): next(colorcycle) #shift the color cycle foward by 1/3
#plot the x direction gridlines landing location
for n in range(len(y)):
plt.plot(sortedLandingPoints[:,n,0],sortedLandingPoints[:,n,1], lw=0.8, alpha=1, color= "red")#next(colorcycle))
xmin = [min(startingPts[:,0]),min(landingPts[:,0])]
xmax = [max(startingPts[:,0]),max(landingPts[:,0])]
ymin = [min(startingPts[:,1]),min(landingPts[:,1])]
ymax = [max(startingPts[:,1]),max(landingPts[:,1])]
plt.xlim( min (xmin) , max ( xmax ) )
plt.ylim( min (ymin) , max ( ymax ) )
plt.show()
plt.plot(landingPts[:,:]-np.ones(np.shape(landingPts[:,:])))
plt.show()
Radius = 100
small = 0.001
st_line = np.linspace(0,0,100)-1j*np.logspace(np.log10(small),np.log10(Radius),100)
theta=np.linspace(-pi/2,pi/2,200)
theta_2 = theta[:int(len(theta)/2)]
theta_3 = theta[int(len(theta)/2):]
semiCirc= Radius*cos(theta)+1j*Radius*sin(theta)
smallCirc =small*cos(-theta_2)+1j*small*sin(-theta_2)
smallCirc2=small*cos(-theta_3)+1j*small*sin(-theta_3)
reverse_st = -st_line[::-1]
Dee = np.concatenate([smallCirc2,st_line,semiCirc,reverse_st,smallCirc])
color = sns.palettes.hls_palette(n_colors=6)
for n in range(0,6):
segment=complexInput(Dee)[100*n:100*(n+1)]
plt.plot(segment.real,segment.imag,color=color[n])
#plt.xlim(-1,1)
#plt.ylim(-1,1)
#plt.scatter(complexInput(Dee).real[::20],complexInput(Dee.imag)[::20])
plt.show() | 39.180851 | 147 | 0.684768 | #!/home/ocean/anaconda3/bin/python3
from numpy import cos, arccos, sin, arctan, tan, pi, sqrt, e; from numpy import array as ary; import numpy as np; tau = 2*pi
from matplotlib import pyplot as plt
import seaborn as sns
#uses arrows to plot where each point lands after being transformed by the function
LongComputationalTime = False
def complexFunc(point): #if LongComputationalTime == True
[x,y] = point
z = x+1j*y
function = (z**2+4)/((z-3)*(z+1)*(z+6)*(z))
return [np.real(function),np.imag(function)]
def complexInput(z): #if LongComputationalTime == False
#RC=0.0000001
function = -(z+1)/((z-2)*(z+3))
#(z**2+4)/((z-3)*(z+1)*(z+6)*(z))
return function
def mymeshgrid(x,y):
coord = []
for x1 in x:
for y1 in y:
coord.append([x1,y1])
return np.array(coord).reshape([len(x)*len(y),2])
def rotateColorSpace(theta):
#given a vector pointing at 1,0,0; we want to rotate it around the (1/sqrt(3))*[1,1,1] axis by theta degrees.
w = cos(theta/2)
x = sin(theta/2)/sqrt(3)
y = sin(theta/2)/sqrt(3)
z = sin(theta/2)/sqrt(3)
A = ary([[ y*y+z*z, w*z-x*y,-w*y-x*z],
[-w*z-x*y, x*x+z*z, w*x-y*z],
[ w*y-x*z,-w*x-y*z, x*x+y*y]])
R = np.identity(3) # = identity matrix.
R-= 2*A
return (np.round(R@ary([1,0,0])))
x = np.linspace(-5,5, 50)
y = np.linspace(-5,5, 40)
startingPts = mymeshgrid(x,y)
#Oh yeah computer memory problem. Now I've got a wobble in my grid.
#startingPts = np.concatenate(np.transpose(np.meshgrid(x,y)), axis=0)
cycleLen=len(startingPts)
cycleLen=6
uncleanLandingPts = [complexFunc(z) for z in startingPts]
landingPts = np.array([z if np.isfinite(z).all() else [0,0] for z in uncleanLandingPts])
sortedLandingPoints = np.reshape(landingPts,[len(x),len(y),2])
colorcycle = iter([rotateColorSpace(theta) for theta in np.linspace(0,tau, cycleLen)])
#Order=FABCDEF
if LongComputationalTime:
for n in range(len(startingPts)):
plt.annotate("", xy=startingPts[n], xytext=landingPts[n],arrowprops=dict(color = next(colorcycle), arrowstyle= '<-', alpha=0.5,),)#use arrowheads
#pass
#plot the y direction gridlines landing location
for n in range(len(x)):
plt.plot(sortedLandingPoints[n,:,0],sortedLandingPoints[n,:,1], lw=0.8, alpha=1, color= "green")#next(colorcycle))
#for n in range (int(len(y)/2)) :next(colorcycle)
#for n in range (int(cycleLen/3)): next(colorcycle) #shift the color cycle foward by 1/3
#plot the x direction gridlines landing location
for n in range(len(y)):
plt.plot(sortedLandingPoints[:,n,0],sortedLandingPoints[:,n,1], lw=0.8, alpha=1, color= "red")#next(colorcycle))
xmin = [min(startingPts[:,0]),min(landingPts[:,0])]
xmax = [max(startingPts[:,0]),max(landingPts[:,0])]
ymin = [min(startingPts[:,1]),min(landingPts[:,1])]
ymax = [max(startingPts[:,1]),max(landingPts[:,1])]
plt.xlim( min (xmin) , max ( xmax ) )
plt.ylim( min (ymin) , max ( ymax ) )
plt.show()
plt.plot(landingPts[:,:]-np.ones(np.shape(landingPts[:,:])))
plt.show()
Radius = 100
small = 0.001
st_line = np.linspace(0,0,100)-1j*np.logspace(np.log10(small),np.log10(Radius),100)
theta=np.linspace(-pi/2,pi/2,200)
theta_2 = theta[:int(len(theta)/2)]
theta_3 = theta[int(len(theta)/2):]
semiCirc= Radius*cos(theta)+1j*Radius*sin(theta)
smallCirc =small*cos(-theta_2)+1j*small*sin(-theta_2)
smallCirc2=small*cos(-theta_3)+1j*small*sin(-theta_3)
reverse_st = -st_line[::-1]
Dee = np.concatenate([smallCirc2,st_line,semiCirc,reverse_st,smallCirc])
color = sns.palettes.hls_palette(n_colors=6)
for n in range(0,6):
segment=complexInput(Dee)[100*n:100*(n+1)]
plt.plot(segment.real,segment.imag,color=color[n])
#plt.xlim(-1,1)
#plt.ylim(-1,1)
#plt.scatter(complexInput(Dee).real[::20],complexInput(Dee.imag)[::20])
plt.show() | 813 | 0 | 90 |
79b8a4167d43787a167e247f4ecbeea1783ea099 | 1,419 | py | Python | src/tests/utils/test_process_hooks.py | DmitryBurnaev/podcast-service | 53349a3f9aed22a8024d0c83380f9a02464962a3 | [
"MIT"
] | 5 | 2021-07-01T16:31:29.000Z | 2022-01-29T14:32:13.000Z | src/tests/utils/test_process_hooks.py | DmitryBurnaev/podcast-service | 53349a3f9aed22a8024d0c83380f9a02464962a3 | [
"MIT"
] | 45 | 2020-10-25T19:41:26.000Z | 2022-03-25T06:31:58.000Z | src/tests/utils/test_process_hooks.py | DmitryBurnaev/podcast-service | 53349a3f9aed22a8024d0c83380f9a02464962a3 | [
"MIT"
] | 1 | 2022-01-27T11:30:07.000Z | 2022-01-27T11:30:07.000Z | from core import settings
from modules.podcast.utils import episode_process_hook
from modules.podcast.models import EpisodeStatus
| 35.475 | 97 | 0.609584 | from core import settings
from modules.podcast.utils import episode_process_hook
from modules.podcast.models import EpisodeStatus
class TestEpisodeProcessHooks:
def test_call_hook__ok(self, mocked_redis):
mocked_redis.get.return_value = {"total_bytes": 1024}
episode_process_hook(
EpisodeStatus.DL_EPISODE_DOWNLOADING,
"test-episode.mp3",
total_bytes=1024,
processed_bytes=124,
)
mocked_redis.set.assert_called_with(
"test-episode",
{
"event_key": "test-episode",
"status": str(EpisodeStatus.DL_EPISODE_DOWNLOADING),
"processed_bytes": 124,
"total_bytes": 1024,
},
ttl=settings.DOWNLOAD_EVENT_REDIS_TTL,
)
def test_call_hook__with_chunks__ok(self, mocked_redis):
mocked_redis.get.return_value = {"total_bytes": 1024, "processed_bytes": 200}
episode_process_hook(EpisodeStatus.DL_EPISODE_DOWNLOADING, "test-episode.mp3", chunk=100)
mocked_redis.set.assert_called_with(
"test-episode",
{
"event_key": "test-episode",
"status": str(EpisodeStatus.DL_EPISODE_DOWNLOADING),
"processed_bytes": 200 + 100,
"total_bytes": 1024,
},
ttl=settings.DOWNLOAD_EVENT_REDIS_TTL,
)
| 1,203 | 9 | 76 |
9c35893b27a1c1dbd31544a1f881eac44b0b9ec6 | 61 | py | Python | xomx/feature_selection/__init__.py | perrin-isir/xomx | 9ca0ad56c333ebf4444f38bd9fa59cdd4e533756 | [
"BSD-3-Clause"
] | 4 | 2021-12-16T21:34:32.000Z | 2021-12-22T09:25:53.000Z | xomx/feature_selection/__init__.py | perrin-isir/xomx | 9ca0ad56c333ebf4444f38bd9fa59cdd4e533756 | [
"BSD-3-Clause"
] | 2 | 2021-12-15T15:51:42.000Z | 2022-03-31T08:17:26.000Z | xomx/feature_selection/__init__.py | perrin-isir/xomx | 9ca0ad56c333ebf4444f38bd9fa59cdd4e533756 | [
"BSD-3-Clause"
] | 2 | 2021-12-14T16:50:39.000Z | 2022-03-14T09:27:51.000Z | from .RFEExtraTrees import RFEExtraTrees, load_RFEExtraTrees
| 30.5 | 60 | 0.885246 | from .RFEExtraTrees import RFEExtraTrees, load_RFEExtraTrees
| 0 | 0 | 0 |
5e3409be2e89ecf64071b4b6812018a6ca38c8c8 | 9,575 | py | Python | src/julia/pseudo_python_cli.py | dpinol/pyjulia | cec4bf0b0eac7e39cecd8f3e7882563062903d0f | [
"MIT"
] | 649 | 2016-09-09T07:38:19.000Z | 2022-03-28T04:30:55.000Z | src/julia/pseudo_python_cli.py | dpinol/pyjulia | cec4bf0b0eac7e39cecd8f3e7882563062903d0f | [
"MIT"
] | 362 | 2016-09-08T16:25:30.000Z | 2022-03-05T23:15:05.000Z | src/julia/pseudo_python_cli.py | dpinol/pyjulia | cec4bf0b0eac7e39cecd8f3e7882563062903d0f | [
"MIT"
] | 85 | 2016-11-08T09:32:44.000Z | 2022-03-03T13:10:37.000Z | """
Pseudo Python command line interface.
It tries to mimic a subset of Python CLI:
https://docs.python.org/3/using/cmdline.html
"""
from __future__ import absolute_import, print_function
import code
import copy
import runpy
import sys
import traceback
from collections import namedtuple
try:
from types import SimpleNamespace
except ImportError:
from argparse import Namespace as SimpleNamespace
ARGUMENT_HELP = """
positional arguments:
script path to file (default: None)
args arguments passed to program in sys.argv[1:]
optional arguments:
-h, --help show this help message and exit
-i inspect interactively after running script.
--version, -V Print the Python version number and exit.
-VV is not supported.
-c COMMAND Execute the Python code in COMMAND.
-m MODULE Search sys.path for the named MODULE and execute its contents
as the __main__ module.
"""
ArgDest = namedtuple("ArgDest", "dest names default")
Optional = namedtuple("Optional", "name is_long argdest nargs action terminal")
Result = namedtuple("Result", "option values")
class PyArgumentParser(object):
"""
`ArgumentParser`-like parser with "terminal option" support.
Major differences:
* Formatted help has to be provided to `description`.
* Many options for `.add_argument` are not supported.
* Especially, there is no positional argument support: all positional
arguments go into `ns.args`.
* `.add_argument` can take boolean option `terminal` (default: `False`)
to stop parsing after consuming the given option.
"""
# Once we drop Python 2, we can do:
"""
def add_argument(self, name, *alt, dest=None, nargs=None, action=None,
default=None, terminal=False):
"""
# fmt: off
def _find_matches(self, arg):
"""
Return a list of `.Result`.
If value presents in `arg` (i.e., ``--long-option=value``), it
becomes the element of `.Result.values` (a list). Otherwise,
this list has to be filled by the caller (`_parse_until_terminal`).
"""
for opt in self._options:
if arg == opt.name:
return [Result(opt, [])]
elif arg.startswith(opt.name):
# i.e., len(arg) > len(opt.name):
if opt.is_long and arg[len(opt.name)] == "=":
return [Result(opt, [arg[len(opt.name) + 1:]])]
elif not opt.is_long:
if opt.nargs != 0:
return [Result(opt, [arg[len(opt.name):]])]
else:
results = [Result(opt, [])]
rest = "-" + arg[len(opt.name):]
results.extend(self._find_matches(rest))
return results
# arg="-ih" -> rest="-h"
return []
# fmt: on
if __name__ == "__main__":
sys.exit(main())
| 30.110063 | 85 | 0.552376 | """
Pseudo Python command line interface.
It tries to mimic a subset of Python CLI:
https://docs.python.org/3/using/cmdline.html
"""
from __future__ import absolute_import, print_function
import code
import copy
import runpy
import sys
import traceback
from collections import namedtuple
try:
from types import SimpleNamespace
except ImportError:
from argparse import Namespace as SimpleNamespace
ARGUMENT_HELP = """
positional arguments:
script path to file (default: None)
args arguments passed to program in sys.argv[1:]
optional arguments:
-h, --help show this help message and exit
-i inspect interactively after running script.
--version, -V Print the Python version number and exit.
-VV is not supported.
-c COMMAND Execute the Python code in COMMAND.
-m MODULE Search sys.path for the named MODULE and execute its contents
as the __main__ module.
"""
def python(module, command, script, args, interactive):
if command:
sys.argv[0] = "-c"
assert sys.argv
sys.argv[1:] = args
if script:
sys.argv[0] = script
banner = ""
try:
if command:
scope = {}
exec(command, scope)
elif module:
scope = runpy.run_module(module, run_name="__main__", alter_sys=True)
elif script == "-":
source = sys.stdin.read()
exec(compile(source, "<stdin>", "exec"), scope)
elif script:
scope = runpy.run_path(script, run_name="__main__")
else:
interactive = True
scope = None
banner = None # show banner
except Exception:
if not interactive:
raise
traceback.print_exc()
if interactive:
code.interact(banner=banner, local=scope)
ArgDest = namedtuple("ArgDest", "dest names default")
Optional = namedtuple("Optional", "name is_long argdest nargs action terminal")
Result = namedtuple("Result", "option values")
class PyArgumentParser(object):
"""
`ArgumentParser`-like parser with "terminal option" support.
Major differences:
* Formatted help has to be provided to `description`.
* Many options for `.add_argument` are not supported.
* Especially, there is no positional argument support: all positional
arguments go into `ns.args`.
* `.add_argument` can take boolean option `terminal` (default: `False`)
to stop parsing after consuming the given option.
"""
def __init__(self, prog=None, usage="%(prog)s [options] [args]", description=""):
self.prog = sys.argv[0] if prog is None else prog
self.usage = usage
self.description = description
self._dests = ["args"]
self._argdests = [ArgDest("args", (), [])]
self._options = []
self.add_argument("--help", "-h", "-?", action="store_true")
def format_usage(self):
return "usage: " + self.usage % {"prog": self.prog}
# Once we drop Python 2, we can do:
"""
def add_argument(self, name, *alt, dest=None, nargs=None, action=None,
default=None, terminal=False):
"""
def add_argument(self, name, *alt, **kwargs):
return self._add_argument_impl(name, alt, **kwargs)
# fmt: off
def _add_argument_impl(self, name, alt, dest=None, nargs=None, action=None,
default=None, terminal=False):
if dest is None:
if name.startswith("--"):
dest = name[2:]
elif not name.startswith("-"):
dest = name
else:
raise ValueError(name)
if not name.startswith("-"):
raise NotImplementedError(
"Positional arguments are not supported."
" All positional arguments will be stored in `ns.args`.")
if terminal and action is not None:
raise NotImplementedError(
"Terminal option is assumed to have argument."
" Non-`None` action={} is not supported".format())
if nargs is not None and action is not None:
raise TypeError("`nargs` and `action` are mutually exclusive")
if action == "store_true":
nargs = 0
assert nargs is None or isinstance(nargs, int)
assert action in (None, "store_true")
assert dest not in self._dests
self._dests.append(dest)
argdest = ArgDest(
dest=dest,
names=(name,) + alt,
default=default,
)
self._argdests.append(argdest)
for arg in (name,) + alt:
self._options.append(Optional(
name=arg,
is_long=arg.startswith("--"),
argdest=argdest,
nargs=nargs,
action=action,
terminal=terminal,
))
def parse_args(self, args):
ns = SimpleNamespace(**{
argdest.dest: copy.copy(argdest.default)
for argdest in self._argdests
})
args_iter = iter(args)
self._parse_until_terminal(ns, args_iter)
ns.args.extend(args_iter)
if ns.help:
self.print_help()
self.exit()
del ns.help
return ns
def _parse_until_terminal(self, ns, args_iter):
seen = set()
for a in args_iter:
results = self._find_matches(a)
if not results:
ns.args.append(a)
break
for i, res in enumerate(results):
dest = res.option.argdest.dest
if dest in seen:
self._usage_and_error(
"{} provided more than twice"
.format(", ".join(res.option.argdest.names)))
seen.add(dest)
num_args = res.option.nargs
if num_args is None:
num_args = 1
while len(res.values) < num_args:
try:
res.values.append(next(args_iter))
except StopIteration:
self.error(self.format_usage())
if res.option.action == "store_true":
setattr(ns, dest, True)
else:
value = res.values
if res.option.nargs is None:
value, = value
setattr(ns, dest, value)
if res.option.terminal:
assert i == len(results) - 1
return
def _find_matches(self, arg):
"""
Return a list of `.Result`.
If value presents in `arg` (i.e., ``--long-option=value``), it
becomes the element of `.Result.values` (a list). Otherwise,
this list has to be filled by the caller (`_parse_until_terminal`).
"""
for opt in self._options:
if arg == opt.name:
return [Result(opt, [])]
elif arg.startswith(opt.name):
# i.e., len(arg) > len(opt.name):
if opt.is_long and arg[len(opt.name)] == "=":
return [Result(opt, [arg[len(opt.name) + 1:]])]
elif not opt.is_long:
if opt.nargs != 0:
return [Result(opt, [arg[len(opt.name):]])]
else:
results = [Result(opt, [])]
rest = "-" + arg[len(opt.name):]
results.extend(self._find_matches(rest))
return results
# arg="-ih" -> rest="-h"
return []
# fmt: on
def print_usage(self, file=None):
print(self.format_usage(), file=file or sys.stdout)
def print_help(self):
self.print_usage()
print()
print(self.description)
def exit(self, status=0):
sys.exit(status)
def _usage_and_error(self, message):
self.print_usage(sys.stderr)
print(file=sys.stderr)
self.error(message)
def error(self, message):
print(message, file=sys.stderr)
self.exit(2)
def make_parser(description=__doc__ + ARGUMENT_HELP):
parser = PyArgumentParser(
prog=None if sys.argv[0] else "python",
usage="%(prog)s [option] ... [-c cmd | -m mod | script | -] [args]",
description=description,
)
parser.add_argument("-i", dest="interactive", action="store_true")
parser.add_argument("--version", "-V", action="store_true")
parser.add_argument("-c", dest="command", terminal=True)
parser.add_argument("-m", dest="module", terminal=True)
return parser
def parse_args_with(parser, args):
ns = parser.parse_args(args)
if ns.command and ns.module:
parser.error("-c and -m are mutually exclusive")
if ns.version:
print("Python {0}.{1}.{2}".format(*sys.version_info))
parser.exit()
del ns.version
ns.script = None
if (not (ns.command or ns.module)) and ns.args:
ns.script = ns.args[0]
ns.args = ns.args[1:]
return ns
def parse_args(args):
return parse_args_with(make_parser(), args)
def main(args=None):
if args is None:
args = sys.argv[1:]
try:
ns = parse_args(args)
python(**vars(ns))
except SystemExit as err:
return err.code
except Exception:
traceback.print_exc()
return 1
if __name__ == "__main__":
sys.exit(main())
| 6,153 | 0 | 412 |
e39df32cb7e8758760f31753af692ff76943f2fa | 5,600 | py | Python | sppas/sppas/src/ui/wxgui/dialogs/trsinfodialog.py | mirfan899/MTTS | 3167b65f576abcc27a8767d24c274a04712bd948 | [
"MIT"
] | null | null | null | sppas/sppas/src/ui/wxgui/dialogs/trsinfodialog.py | mirfan899/MTTS | 3167b65f576abcc27a8767d24c274a04712bd948 | [
"MIT"
] | null | null | null | sppas/sppas/src/ui/wxgui/dialogs/trsinfodialog.py | mirfan899/MTTS | 3167b65f576abcc27a8767d24c274a04712bd948 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
# ---------------------------------------------------------------------------
# ___ __ __ __ ___
# / | \ | \ | \ / Automatic
# \__ |__/ |__/ |___| \__ Annotation
# \ | | | | \ of
# ___/ | | | | ___/ Speech
#
#
# http://www.sppas.org/
#
# ---------------------------------------------------------------------------
# Laboratoire Parole et Langage, Aix-en-Provence, France
# Copyright (C) 2011-2016 Brigitte Bigi
#
# This banner notice must not be removed
# ---------------------------------------------------------------------------
# Use of this software is governed by the GNU Public License, version 3.
#
# SPPAS is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SPPAS is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SPPAS. If not, see <http://www.gnu.org/licenses/>.
#
# ---------------------------------------------------------------------------
# File: trsinfodialog.py
# ----------------------------------------------------------------------------
__docformat__ = """epytext"""
__authors__ = """Brigitte Bigi"""
__copyright__ = """Copyright (C) 2011-2015 Brigitte Bigi"""
import wx
from sppas.src.ui.wxgui.panels.trslist import TrsList
from sppas.src.ui.wxgui.cutils.imageutils import spBitmap
from sppas.src.ui.wxgui.cutils.ctrlutils import CreateGenButton
from sppas.src.ui.wxgui.sp_icons import APP_ICON
from sppas.src.ui.wxgui.sp_icons import CANCEL_ICON
from sppas.src.ui.wxgui.sp_icons import INFO_ICON
from sppas.src.ui.wxgui.sp_consts import FRAME_STYLE
from sppas.src.ui.wxgui.sp_consts import FRAME_TITLE
# ----------------------------------------------------------------------------
class TrsInfoDialog( wx.Dialog ):
"""
@author: Brigitte Bigi
@contact: develop@sppas.org
@license: GPL, v3
@summary: Open a dialog with information about a transcription.
"""
# End __init__
# ------------------------------------------------------------------------
# ------------------------------------------------------------------------
# Create the GUI
# ------------------------------------------------------------------------
# ------------------------------------------------------------------------------
| 38.095238 | 136 | 0.563393 | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
# ---------------------------------------------------------------------------
# ___ __ __ __ ___
# / | \ | \ | \ / Automatic
# \__ |__/ |__/ |___| \__ Annotation
# \ | | | | \ of
# ___/ | | | | ___/ Speech
#
#
# http://www.sppas.org/
#
# ---------------------------------------------------------------------------
# Laboratoire Parole et Langage, Aix-en-Provence, France
# Copyright (C) 2011-2016 Brigitte Bigi
#
# This banner notice must not be removed
# ---------------------------------------------------------------------------
# Use of this software is governed by the GNU Public License, version 3.
#
# SPPAS is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SPPAS is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SPPAS. If not, see <http://www.gnu.org/licenses/>.
#
# ---------------------------------------------------------------------------
# File: trsinfodialog.py
# ----------------------------------------------------------------------------
__docformat__ = """epytext"""
__authors__ = """Brigitte Bigi"""
__copyright__ = """Copyright (C) 2011-2015 Brigitte Bigi"""
import wx
from sppas.src.ui.wxgui.panels.trslist import TrsList
from sppas.src.ui.wxgui.cutils.imageutils import spBitmap
from sppas.src.ui.wxgui.cutils.ctrlutils import CreateGenButton
from sppas.src.ui.wxgui.sp_icons import APP_ICON
from sppas.src.ui.wxgui.sp_icons import CANCEL_ICON
from sppas.src.ui.wxgui.sp_icons import INFO_ICON
from sppas.src.ui.wxgui.sp_consts import FRAME_STYLE
from sppas.src.ui.wxgui.sp_consts import FRAME_TITLE
# ----------------------------------------------------------------------------
class TrsInfoDialog( wx.Dialog ):
"""
@author: Brigitte Bigi
@contact: develop@sppas.org
@license: GPL, v3
@summary: Open a dialog with information about a transcription.
"""
def __init__(self, parent, prefsIO, trsname, trs):
wx.Dialog.__init__(self, parent, title=FRAME_TITLE+" - Information", size=(580, -1), style=FRAME_STYLE)
self.preferences = prefsIO
self.trsname = trsname
self.trs = trs
self._create_gui()
# End __init__
# ------------------------------------------------------------------------
# ------------------------------------------------------------------------
# Create the GUI
# ------------------------------------------------------------------------
def _create_gui(self):
self._init_infos()
self._create_title_label()
self._create_content()
self._create_close_button()
self._layout_components()
self._set_focus_component()
def _init_infos( self ):
wx.GetApp().SetAppName( "trsinfo" )
# icon
_icon = wx.EmptyIcon()
_icon.CopyFromBitmap( spBitmap(APP_ICON) )
self.SetIcon(_icon)
# colors
self.SetBackgroundColour( self.preferences.GetValue('M_BG_COLOUR'))
self.SetForegroundColour( self.preferences.GetValue('M_FG_COLOUR'))
self.SetFont( self.preferences.GetValue('M_FONT'))
def _create_title_label(self):
self.title_layout = wx.BoxSizer(wx.HORIZONTAL)
bmp = wx.BitmapButton(self, bitmap=spBitmap(INFO_ICON, 32, theme=self.preferences.GetValue('M_ICON_THEME')), style=wx.NO_BORDER)
font = self.preferences.GetValue('M_FONT')
font.SetWeight(wx.BOLD)
font.SetPointSize(font.GetPointSize() + 2)
self.title_label = wx.StaticText(self, label="Transcription file properties", style=wx.ALIGN_CENTER)
self.title_label.SetFont( font )
self.title_layout.Add(bmp, flag=wx.TOP|wx.RIGHT|wx.ALIGN_RIGHT, border=5)
self.title_layout.Add(self.title_label, flag=wx.EXPAND|wx.ALL|wx.wx.ALIGN_CENTER_VERTICAL, border=5)
def _create_content(self):
# the information panel
self.trspanel = TrsList( self, self.trsname, self.trs)
def _create_close_button(self):
bmp = spBitmap(CANCEL_ICON, theme=self.preferences.GetValue('M_ICON_THEME'))
color = self.preferences.GetValue('M_BG_COLOUR')
self.btn_close = CreateGenButton(self, wx.ID_CLOSE, bmp, text=" Close", tooltip="Close this frame", colour=color)
self.btn_close.SetFont( self.preferences.GetValue('M_FONT'))
self.btn_close.SetDefault()
self.btn_close.SetFocus()
self.SetAffirmativeId(wx.ID_CLOSE)
def _layout_components(self):
vbox = wx.BoxSizer(wx.VERTICAL)
vbox.Add(self.title_layout, 0, flag=wx.ALL|wx.EXPAND, border=5)
vbox.Add(self.trspanel, 1, flag=wx.ALL|wx.EXPAND, border=0)
vbox.Add(self.btn_close, 0, flag=wx.ALL|wx.EXPAND, border=5)
self.SetMinSize((380,280))
self.SetSizer( vbox )
self.Show()
def _set_focus_component(self):
self.btn_close.SetFocus()
# ------------------------------------------------------------------------------
| 2,480 | 0 | 216 |
b9df1422c289da33fd8f1fd710c8f912544cf226 | 14,741 | py | Python | utils/syn_functions.py | droidadroit/nasbot | f8d5d0ba8b77c37ebaa6cd2ab148a2633ff20207 | [
"MIT"
] | null | null | null | utils/syn_functions.py | droidadroit/nasbot | f8d5d0ba8b77c37ebaa6cd2ab148a2633ff20207 | [
"MIT"
] | null | null | null | utils/syn_functions.py | droidadroit/nasbot | f8d5d0ba8b77c37ebaa6cd2ab148a2633ff20207 | [
"MIT"
] | null | null | null | """
A collection of utilities for MF-GP Bandits.
-- kandasamy@cs.cmu.edu
"""
# pylint: disable=import-error
# pylint: disable=no-member
# pylint: disable=invalid-name
# pylint: disable=relative-import
# pylint: disable=super-on-old-class
import numpy as np
# Local imports
from ..opt.function_caller import get_euc_function_caller_from_function
# Hartmann Functions ---------------------------------------------------------------------
def hartmann(x, alpha, A, P, max_val=np.inf):
""" Computes the hartmann function for any given A and P. """
log_sum_terms = (A * (P - x)**2).sum(axis=1)
return min(max_val, alpha.dot(np.exp(-log_sum_terms)))
def _get_hartmann_data(domain_dim):
""" Returns A and P for the 3D hartmann function. """
# pylint: disable=bad-whitespace
if domain_dim == 3:
A = np.array([[3.0, 10, 30],
[0.1, 10, 35],
[3.0, 10, 30],
[0.1, 10, 35]], dtype=np.float64)
P = 1e-4 * np.array([[3689, 1170, 2673],
[4699, 4387, 7470],
[1091, 8732, 5547],
[ 381, 5743, 8828]], dtype=np.float64)
alpha = np.array([1.0, 1.2, 3.0, 3.2])
domain = [[0, 1]] * 3
opt_pt = np.array([0.114614, 0.555649, 0.852547])
max_val = 3.86278
elif domain_dim == 6:
A = np.array([[ 10, 3, 17, 3.5, 1.7, 8],
[0.05, 10, 17, 0.1, 8, 14],
[ 3, 3.5, 1.7, 10, 17, 8],
[ 17, 8, 0.05, 10, 0.1, 14]], dtype=np.float64)
P = 1e-4 * np.array([[1312, 1696, 5569, 124, 8283, 5886],
[2329, 4135, 8307, 3736, 1004, 9991],
[2348, 1451, 3522, 2883, 3047, 6650],
[4047, 8828, 8732, 5743, 1091, 381]], dtype=np.float64)
alpha = np.array([1.0, 1.2, 3.0, 3.2])
domain = [[0, 1]] * 6
opt_pt = np.array([0.20169, 0.150011, 0.476874, 0.275332, 0.311652, 0.6573])
max_val = 3.322368
else:
raise NotImplementedError('Only implemented in 3 and 6 dimensions.')
return A, P, alpha, opt_pt, domain, max_val
def get_hartmann_high_d_function_caller(domain_dim, **kwargs):
""" Constructs a higher dimensional Hartmann function. """
group_dim = 6
num_groups = int(domain_dim/group_dim)
A, P, alpha, l_opt_pt, l_domain_bounds, l_max_val = _get_hartmann_data(group_dim)
hartmann_func = lambda x: hartmann(x, alpha, A, P, l_max_val)
def _eval_highd_hartmann_func(x):
""" Evaluates the higher dimensional hartmann function. """
ret = 0
for j in range(num_groups):
ret += hartmann_func(x[j*group_dim:(j+1)*group_dim])
return ret
opt_pt = np.tile(l_opt_pt, num_groups+1)[0:domain_dim]
opt_val = num_groups * l_max_val
domain_bounds = np.tile(np.array(l_domain_bounds).T, num_groups+1).T[0:domain_dim]
return get_euc_function_caller_from_function(_eval_highd_hartmann_func, domain_bounds,
vectorised=False, opt_pt=opt_pt, opt_val=opt_val, **kwargs)
def get_hartmann_high_d_function_caller_from_descr(descr, **kwargs):
""" Constructs a high dimensional hartmann function from a string. """
domain_dim = int(descr[8:])
return get_hartmann_high_d_function_caller(domain_dim, **kwargs)
def get_hartmann_function_caller(domain_dim, **kwargs):
""" Returns a FunctionCaller object for the hartmann function. """
A, P, alpha, opt_pt, domain_bounds, max_val = _get_hartmann_data(domain_dim)
hartmann_func = lambda x: hartmann(x, alpha, A, P, max_val)
return get_euc_function_caller_from_function(hartmann_func, domain_bounds,
vectorised=False,
opt_pt=opt_pt, opt_val=max_val, **kwargs)
# Hartmann Functions end here ------------------------------------------------------------
# Shekel Function ------------------------------------------------------------------------
def shekel(x, C, beta, max_val=np.inf):
""" Computes the Shekel function for the given C and beta. """
inv_terms = ((C.T - x)**2).sum(axis=1) + beta
return min(max_val, (1/inv_terms).sum())
def _get_shekel_data():
""" Returns the C, beta parameters and optimal values for the shekel function. """
C = [[4, 1, 8, 6, 3, 2, 5, 8, 6, 7],
[4, 1, 8, 6, 7, 9, 3, 1, 2, 3],
[4, 1, 8, 6, 3, 2, 5, 8, 6, 7],
[4, 1, 8, 6, 7, 9, 3, 1, 2, 3]]
C = np.array(C, dtype=np.double)
beta = 0.1 * np.array([1, 2, 2, 4, 4, 6, 3, 7, 5, 5], dtype=np.double)
opt_pt = np.array([4, 4, 4, 4], dtype=np.double)
opt_val = shekel(opt_pt, C, beta)
domain_bounds = [[0, 10]] * 4
return C, beta, opt_pt, domain_bounds, opt_val
def get_shekel_function_caller(**kwargs):
""" Returns a FunctionCaller object for the hartmann function. """
C, beta, opt_pt, domain_bounds, opt_val = _get_shekel_data()
shekel_func = lambda x: shekel(x, C, beta, opt_val)
return get_euc_function_caller_from_function(shekel_func, domain_bounds,
vectorised=False, opt_pt=opt_pt, opt_val=opt_val, **kwargs)
# Shekel function ends here --------------------------------------------------------------
# Currin Exponential Function ------------------------------------------------------------
def currin_exp(x, alpha):
""" Computes the currin exponential function. """
x1 = x[0]
x2 = x[1]
val_1 = 1 - alpha * np.exp(-1/(2 * x2))
val_2 = (2300*x1**3 + 1900*x1**2 + 2092*x1 + 60) / (100*x1**3 + 500*x1**2 + 4*x1 + 20)
return val_1 * val_2
def get_currin_exp_function_caller(**kwargs):
""" Returns a FunctionCaller object for the Currin Exponential function. """
currin_exp_func = lambda x: currin_exp(x, 1)
opt_val = 13.798650
opt_pt = None
domain_bounds = np.array([[0, 1], [0, 1]])
return get_euc_function_caller_from_function(currin_exp_func,
domain_bounds=domain_bounds, vectorised=False, opt_val=opt_val,
opt_pt=opt_pt, **kwargs)
# Branin Function ------------------------------------------------------------------------
def branin(x, a, b, c, r, s, t):
""" Computes the Branin function. """
x1 = x[0]
x2 = x[1]
neg_ret = a * (x2 - b*x1**2 + c*x1 - r)**2 + s*(1-t)*np.cos(x1) + s
return -neg_ret
def _get_branin_data():
""" Gets the constants for the branin function. """
a = 1
b = 5.1/(4*np.pi**2)
c = 5/np.pi
r = 6
s = 10
t = 1/(8*np.pi)
opt_pt = np.array([np.pi, 2.275])
domain_bounds = np.array([[-5, 10], [0, 15]])
return a, b, c, r, s, t, opt_pt, domain_bounds
def get_branin_function_caller(**kwargs):
""" Returns a FunctionCaller object for the Branin function. """
a, b, c, r, s, t, opt_pt, domain_bounds = _get_branin_data()
branin_func = lambda x: branin(x, a, b, c, r, s, t)
opt_val = branin_func(opt_pt)
return get_euc_function_caller_from_function(branin_func, domain_bounds=domain_bounds,
vectorised=False, opt_val=opt_val, opt_pt=opt_pt, **kwargs)
def get_branin_high_d_function_caller(domain_dim, **kwargs):
""" Constructs a higher dimensional Hartmann function. """
group_dim = 2
num_groups = int(domain_dim/group_dim)
a, b, c, r, s, t, opt_pt_2d, domain_bounds_2d = _get_branin_data()
branin_func = lambda x: branin(x, a, b, c, r, s, t)
def _eval_highd_branin_func(x):
""" Evaluates higher dimensional branin function. """
ret = 0
for j in range(num_groups):
ret += branin_func(x[j*group_dim:(j+1)*group_dim])
return ret
opt_pt = np.tile(opt_pt_2d, num_groups+1)[0:domain_dim]
opt_val = _eval_highd_branin_func(opt_pt)
domain_bounds = np.tile(np.array(domain_bounds_2d).T, num_groups+1).T[0:domain_dim]
return get_euc_function_caller_from_function(_eval_highd_branin_func, domain_bounds,
vectorised=False, opt_pt=opt_pt, opt_val=opt_val, **kwargs)
def get_branin_high_d_function_caller_from_descr(descr, **kwargs):
""" Constructs a high dimensional hartmann function from a string. """
domain_dim = int(descr[7:])
return get_branin_high_d_function_caller(domain_dim, **kwargs)
# Borehole Function ----------------------------------------------------------------------
def borehole(x, z, max_val):
""" Computes the Bore Hole function. """
# pylint: disable=bad-whitespace
rw = x[0]
r = x[1]
Tu = x[2]
Hu = x[3]
Tl = x[4]
Hl = x[5]
L = x[6]
Kw = x[7]
# Compute high fidelity function
frac2 = 2*L*Tu/(np.log(r/rw) * rw**2 * Kw)
f2 = min(max_val, 2 * np.pi * Tu * (Hu - Hl)/(np.log(r/rw) * (1 + frac2 + Tu/Tl)))
# Compute low fidelity function
f1 = 5 * Tu * (Hu - Hl)/(np.log(r/rw) * (1.5 + frac2 + Tu/Tl))
# Compute final output
return f2*z + f1*(1-z)
def get_borehole_function_caller(**kwargs):
""" Returns a FunctionCaller object for the Borehold function. """
opt_val = 309.523221
opt_pt = None
borehole_func = lambda x: borehole(x, 1, opt_val)
domain_bounds = [[0.05, 0.15],
[100, 50000],
[63070, 115600],
[990, 1110],
[63.1, 116],
[700, 820],
[1120, 1680],
[9855, 12045]]
return get_euc_function_caller_from_function(borehole_func, domain_bounds,
vectorised=False, opt_val=opt_val, opt_pt=opt_pt, **kwargs)
# Park1 function ==================================================================
def park1(x, max_val=np.inf):
""" Computes the park1 function. """
x1 = x[0]
x2 = x[1]
x3 = x[2]
x4 = x[3]
ret1 = (x1/2) * (np.sqrt(1 + (x2 + x3**2)*x4/(x1**2)) - 1)
ret2 = (x1 + 3*x4) * np.exp(1 + np.sin(x3))
return min(ret1 + ret2, max_val)
def get_park1_function_caller(**kwargs):
""" Returns the park1 function caller. """
opt_val = 25.5872304
opt_pt = None
func = lambda x: park1(x, opt_val)
domain_bounds = [[0, 1]] * 4
return get_euc_function_caller_from_function(func, domain_bounds,
vectorised=False, opt_val=opt_val, opt_pt=opt_pt, **kwargs)
# Park2 function ==================================================================
def park2(x, max_val=np.inf):
""" Comutes the park2 function """
x1 = x[0]
x2 = x[1]
x3 = x[2]
x4 = x[3]
ret = (2.0/3.0) * np.exp(x1 + x2) - x4*np.sin(x3) + x3
return min(ret, max_val)
def get_park2_function_caller(**kwargs):
""" Returns function caller for park2. """
opt_val = 5.925698
opt_pt = None
func = lambda x: park2(x, opt_val)
domain_bounds = [[0, 1]] * 4
return get_euc_function_caller_from_function(func, domain_bounds,
vectorised=False, opt_val=opt_val, opt_pt=opt_pt, **kwargs)
def get_high_d_function_from_low_d(domain_dim, group_dim, low_d_func):
""" Constructs a higher dimensional Hartmann function. """
num_groups = int(domain_dim/group_dim)
def _eval_highd_func(x):
""" Evaluates the higher dimensional function. """
ret = 0
for j in range(num_groups):
ret += low_d_func(x[j*group_dim: (j+1)*group_dim])
return ret
return _eval_highd_func, num_groups
def get_high_d_function_caller_from_low_d_func(domain_dim, low_d_func,
low_d_domain_bounds, low_d_opt_val, low_d_opt_pt, **kwargs):
""" Gets a low dimensional function caller from a high dimensional one. """
group_dim = len(low_d_domain_bounds)
high_d_func, num_groups = get_high_d_function_from_low_d(domain_dim, group_dim,
low_d_func)
high_d_domain_bounds = np.tile(np.array(low_d_domain_bounds).T,
num_groups+1).T[0:domain_dim]
high_d_opt_pt = None
high_d_opt_val = None
if low_d_opt_pt is not None:
high_d_opt_pt = np.tile(low_d_opt_pt, num_groups+1)[0:domain_dim]
high_d_opt_val = high_d_func(high_d_opt_pt)
elif low_d_opt_val is not None:
high_d_opt_val = num_groups * low_d_opt_val
# Return
func_caller = get_euc_function_caller_from_function(high_d_func,
high_d_domain_bounds,
vectorised=False,
opt_val=high_d_opt_val,
opt_pt=high_d_opt_pt,
**kwargs)
return func_caller
def get_high_d_function_caller_from_low_d_func_caller(domain_dim,
low_d_func_caller, **kwargs):
""" Gets a high dimensional function caller from a low dimensional one. """
return get_high_d_function_caller_from_low_d_func(domain_dim, low_d_func_caller.func,
low_d_func_caller.domain.bounds, low_d_func_caller.opt_val,
low_d_func_caller.raw_opt_pt, **kwargs)
def get_high_d_function_caller_from_description(descr, **kwargs):
""" Gets a high dimensional function caller from the description. """
segments = descr.split('-')
domain_dim = int(segments[1])
descr_to_func_dict = {'hartmann': lambda: get_hartmann_function_caller(6,),
'branin': get_branin_function_caller,
'currinexp': get_currin_exp_function_caller,
'park1': get_park1_function_caller,
'park2': get_park2_function_caller,
'borehole': get_borehole_function_caller,
'shekel': get_shekel_function_caller}
low_d_func_caller = descr_to_func_dict[segments[0].lower()](**kwargs)
return get_high_d_function_caller_from_low_d_func_caller(domain_dim,
low_d_func_caller, **kwargs)
# Finally, one very convenient wrapper.
def get_syn_function_caller_from_name(function_name, **kwargs):
""" A very convenient wrapper so that you can just get the function from the name. """
#pylint: disable=too-many-return-statements
if function_name.lower() == 'hartmann3':
return get_hartmann_function_caller(3, descr=function_name, **kwargs)
elif function_name.lower() == 'hartmann6':
return get_hartmann_function_caller(6, descr=function_name, **kwargs)
elif function_name.lower() == 'currinexp':
return get_currin_exp_function_caller(descr=function_name, **kwargs)
elif function_name.lower() == 'branin':
return get_branin_function_caller(descr=function_name, **kwargs)
elif function_name.lower() == 'borehole':
return get_borehole_function_caller(descr=function_name, **kwargs)
elif function_name.lower() == 'shekel':
return get_shekel_function_caller(descr=function_name, **kwargs)
elif function_name.lower() == 'park1':
return get_park1_function_caller(descr=function_name, **kwargs)
elif function_name.lower() == 'park2':
return get_park2_function_caller(descr=function_name, **kwargs)
else:
return get_high_d_function_caller_from_description(descr=function_name, **kwargs)
| 42.976676 | 90 | 0.617462 | """
A collection of utilities for MF-GP Bandits.
-- kandasamy@cs.cmu.edu
"""
# pylint: disable=import-error
# pylint: disable=no-member
# pylint: disable=invalid-name
# pylint: disable=relative-import
# pylint: disable=super-on-old-class
import numpy as np
# Local imports
from ..opt.function_caller import get_euc_function_caller_from_function
# Hartmann Functions ---------------------------------------------------------------------
def hartmann(x, alpha, A, P, max_val=np.inf):
""" Computes the hartmann function for any given A and P. """
log_sum_terms = (A * (P - x)**2).sum(axis=1)
return min(max_val, alpha.dot(np.exp(-log_sum_terms)))
def _get_hartmann_data(domain_dim):
""" Returns A and P for the 3D hartmann function. """
# pylint: disable=bad-whitespace
if domain_dim == 3:
A = np.array([[3.0, 10, 30],
[0.1, 10, 35],
[3.0, 10, 30],
[0.1, 10, 35]], dtype=np.float64)
P = 1e-4 * np.array([[3689, 1170, 2673],
[4699, 4387, 7470],
[1091, 8732, 5547],
[ 381, 5743, 8828]], dtype=np.float64)
alpha = np.array([1.0, 1.2, 3.0, 3.2])
domain = [[0, 1]] * 3
opt_pt = np.array([0.114614, 0.555649, 0.852547])
max_val = 3.86278
elif domain_dim == 6:
A = np.array([[ 10, 3, 17, 3.5, 1.7, 8],
[0.05, 10, 17, 0.1, 8, 14],
[ 3, 3.5, 1.7, 10, 17, 8],
[ 17, 8, 0.05, 10, 0.1, 14]], dtype=np.float64)
P = 1e-4 * np.array([[1312, 1696, 5569, 124, 8283, 5886],
[2329, 4135, 8307, 3736, 1004, 9991],
[2348, 1451, 3522, 2883, 3047, 6650],
[4047, 8828, 8732, 5743, 1091, 381]], dtype=np.float64)
alpha = np.array([1.0, 1.2, 3.0, 3.2])
domain = [[0, 1]] * 6
opt_pt = np.array([0.20169, 0.150011, 0.476874, 0.275332, 0.311652, 0.6573])
max_val = 3.322368
else:
raise NotImplementedError('Only implemented in 3 and 6 dimensions.')
return A, P, alpha, opt_pt, domain, max_val
def get_hartmann_high_d_function_caller(domain_dim, **kwargs):
""" Constructs a higher dimensional Hartmann function. """
group_dim = 6
num_groups = int(domain_dim/group_dim)
A, P, alpha, l_opt_pt, l_domain_bounds, l_max_val = _get_hartmann_data(group_dim)
hartmann_func = lambda x: hartmann(x, alpha, A, P, l_max_val)
def _eval_highd_hartmann_func(x):
""" Evaluates the higher dimensional hartmann function. """
ret = 0
for j in range(num_groups):
ret += hartmann_func(x[j*group_dim:(j+1)*group_dim])
return ret
opt_pt = np.tile(l_opt_pt, num_groups+1)[0:domain_dim]
opt_val = num_groups * l_max_val
domain_bounds = np.tile(np.array(l_domain_bounds).T, num_groups+1).T[0:domain_dim]
return get_euc_function_caller_from_function(_eval_highd_hartmann_func, domain_bounds,
vectorised=False, opt_pt=opt_pt, opt_val=opt_val, **kwargs)
def get_hartmann_high_d_function_caller_from_descr(descr, **kwargs):
""" Constructs a high dimensional hartmann function from a string. """
domain_dim = int(descr[8:])
return get_hartmann_high_d_function_caller(domain_dim, **kwargs)
def get_hartmann_function_caller(domain_dim, **kwargs):
""" Returns a FunctionCaller object for the hartmann function. """
A, P, alpha, opt_pt, domain_bounds, max_val = _get_hartmann_data(domain_dim)
hartmann_func = lambda x: hartmann(x, alpha, A, P, max_val)
return get_euc_function_caller_from_function(hartmann_func, domain_bounds,
vectorised=False,
opt_pt=opt_pt, opt_val=max_val, **kwargs)
# Hartmann Functions end here ------------------------------------------------------------
# Shekel Function ------------------------------------------------------------------------
def shekel(x, C, beta, max_val=np.inf):
""" Computes the Shekel function for the given C and beta. """
inv_terms = ((C.T - x)**2).sum(axis=1) + beta
return min(max_val, (1/inv_terms).sum())
def _get_shekel_data():
""" Returns the C, beta parameters and optimal values for the shekel function. """
C = [[4, 1, 8, 6, 3, 2, 5, 8, 6, 7],
[4, 1, 8, 6, 7, 9, 3, 1, 2, 3],
[4, 1, 8, 6, 3, 2, 5, 8, 6, 7],
[4, 1, 8, 6, 7, 9, 3, 1, 2, 3]]
C = np.array(C, dtype=np.double)
beta = 0.1 * np.array([1, 2, 2, 4, 4, 6, 3, 7, 5, 5], dtype=np.double)
opt_pt = np.array([4, 4, 4, 4], dtype=np.double)
opt_val = shekel(opt_pt, C, beta)
domain_bounds = [[0, 10]] * 4
return C, beta, opt_pt, domain_bounds, opt_val
def get_shekel_function_caller(**kwargs):
""" Returns a FunctionCaller object for the hartmann function. """
C, beta, opt_pt, domain_bounds, opt_val = _get_shekel_data()
shekel_func = lambda x: shekel(x, C, beta, opt_val)
return get_euc_function_caller_from_function(shekel_func, domain_bounds,
vectorised=False, opt_pt=opt_pt, opt_val=opt_val, **kwargs)
# Shekel function ends here --------------------------------------------------------------
# Currin Exponential Function ------------------------------------------------------------
def currin_exp(x, alpha):
""" Computes the currin exponential function. """
x1 = x[0]
x2 = x[1]
val_1 = 1 - alpha * np.exp(-1/(2 * x2))
val_2 = (2300*x1**3 + 1900*x1**2 + 2092*x1 + 60) / (100*x1**3 + 500*x1**2 + 4*x1 + 20)
return val_1 * val_2
def get_currin_exp_function_caller(**kwargs):
""" Returns a FunctionCaller object for the Currin Exponential function. """
currin_exp_func = lambda x: currin_exp(x, 1)
opt_val = 13.798650
opt_pt = None
domain_bounds = np.array([[0, 1], [0, 1]])
return get_euc_function_caller_from_function(currin_exp_func,
domain_bounds=domain_bounds, vectorised=False, opt_val=opt_val,
opt_pt=opt_pt, **kwargs)
# Branin Function ------------------------------------------------------------------------
def branin(x, a, b, c, r, s, t):
""" Computes the Branin function. """
x1 = x[0]
x2 = x[1]
neg_ret = a * (x2 - b*x1**2 + c*x1 - r)**2 + s*(1-t)*np.cos(x1) + s
return -neg_ret
def _get_branin_data():
""" Gets the constants for the branin function. """
a = 1
b = 5.1/(4*np.pi**2)
c = 5/np.pi
r = 6
s = 10
t = 1/(8*np.pi)
opt_pt = np.array([np.pi, 2.275])
domain_bounds = np.array([[-5, 10], [0, 15]])
return a, b, c, r, s, t, opt_pt, domain_bounds
def get_branin_function_caller(**kwargs):
""" Returns a FunctionCaller object for the Branin function. """
a, b, c, r, s, t, opt_pt, domain_bounds = _get_branin_data()
branin_func = lambda x: branin(x, a, b, c, r, s, t)
opt_val = branin_func(opt_pt)
return get_euc_function_caller_from_function(branin_func, domain_bounds=domain_bounds,
vectorised=False, opt_val=opt_val, opt_pt=opt_pt, **kwargs)
def get_branin_high_d_function_caller(domain_dim, **kwargs):
""" Constructs a higher dimensional Hartmann function. """
group_dim = 2
num_groups = int(domain_dim/group_dim)
a, b, c, r, s, t, opt_pt_2d, domain_bounds_2d = _get_branin_data()
branin_func = lambda x: branin(x, a, b, c, r, s, t)
def _eval_highd_branin_func(x):
""" Evaluates higher dimensional branin function. """
ret = 0
for j in range(num_groups):
ret += branin_func(x[j*group_dim:(j+1)*group_dim])
return ret
opt_pt = np.tile(opt_pt_2d, num_groups+1)[0:domain_dim]
opt_val = _eval_highd_branin_func(opt_pt)
domain_bounds = np.tile(np.array(domain_bounds_2d).T, num_groups+1).T[0:domain_dim]
return get_euc_function_caller_from_function(_eval_highd_branin_func, domain_bounds,
vectorised=False, opt_pt=opt_pt, opt_val=opt_val, **kwargs)
def get_branin_high_d_function_caller_from_descr(descr, **kwargs):
""" Constructs a high dimensional hartmann function from a string. """
domain_dim = int(descr[7:])
return get_branin_high_d_function_caller(domain_dim, **kwargs)
# Borehole Function ----------------------------------------------------------------------
def borehole(x, z, max_val):
""" Computes the Bore Hole function. """
# pylint: disable=bad-whitespace
rw = x[0]
r = x[1]
Tu = x[2]
Hu = x[3]
Tl = x[4]
Hl = x[5]
L = x[6]
Kw = x[7]
# Compute high fidelity function
frac2 = 2*L*Tu/(np.log(r/rw) * rw**2 * Kw)
f2 = min(max_val, 2 * np.pi * Tu * (Hu - Hl)/(np.log(r/rw) * (1 + frac2 + Tu/Tl)))
# Compute low fidelity function
f1 = 5 * Tu * (Hu - Hl)/(np.log(r/rw) * (1.5 + frac2 + Tu/Tl))
# Compute final output
return f2*z + f1*(1-z)
def get_borehole_function_caller(**kwargs):
""" Returns a FunctionCaller object for the Borehold function. """
opt_val = 309.523221
opt_pt = None
borehole_func = lambda x: borehole(x, 1, opt_val)
domain_bounds = [[0.05, 0.15],
[100, 50000],
[63070, 115600],
[990, 1110],
[63.1, 116],
[700, 820],
[1120, 1680],
[9855, 12045]]
return get_euc_function_caller_from_function(borehole_func, domain_bounds,
vectorised=False, opt_val=opt_val, opt_pt=opt_pt, **kwargs)
# Park1 function ==================================================================
def park1(x, max_val=np.inf):
""" Computes the park1 function. """
x1 = x[0]
x2 = x[1]
x3 = x[2]
x4 = x[3]
ret1 = (x1/2) * (np.sqrt(1 + (x2 + x3**2)*x4/(x1**2)) - 1)
ret2 = (x1 + 3*x4) * np.exp(1 + np.sin(x3))
return min(ret1 + ret2, max_val)
def get_park1_function_caller(**kwargs):
""" Returns the park1 function caller. """
opt_val = 25.5872304
opt_pt = None
func = lambda x: park1(x, opt_val)
domain_bounds = [[0, 1]] * 4
return get_euc_function_caller_from_function(func, domain_bounds,
vectorised=False, opt_val=opt_val, opt_pt=opt_pt, **kwargs)
# Park2 function ==================================================================
def park2(x, max_val=np.inf):
""" Comutes the park2 function """
x1 = x[0]
x2 = x[1]
x3 = x[2]
x4 = x[3]
ret = (2.0/3.0) * np.exp(x1 + x2) - x4*np.sin(x3) + x3
return min(ret, max_val)
def get_park2_function_caller(**kwargs):
""" Returns function caller for park2. """
opt_val = 5.925698
opt_pt = None
func = lambda x: park2(x, opt_val)
domain_bounds = [[0, 1]] * 4
return get_euc_function_caller_from_function(func, domain_bounds,
vectorised=False, opt_val=opt_val, opt_pt=opt_pt, **kwargs)
def get_high_d_function_from_low_d(domain_dim, group_dim, low_d_func):
""" Constructs a higher dimensional Hartmann function. """
num_groups = int(domain_dim/group_dim)
def _eval_highd_func(x):
""" Evaluates the higher dimensional function. """
ret = 0
for j in range(num_groups):
ret += low_d_func(x[j*group_dim: (j+1)*group_dim])
return ret
return _eval_highd_func, num_groups
def get_high_d_function_caller_from_low_d_func(domain_dim, low_d_func,
low_d_domain_bounds, low_d_opt_val, low_d_opt_pt, **kwargs):
""" Gets a low dimensional function caller from a high dimensional one. """
group_dim = len(low_d_domain_bounds)
high_d_func, num_groups = get_high_d_function_from_low_d(domain_dim, group_dim,
low_d_func)
high_d_domain_bounds = np.tile(np.array(low_d_domain_bounds).T,
num_groups+1).T[0:domain_dim]
high_d_opt_pt = None
high_d_opt_val = None
if low_d_opt_pt is not None:
high_d_opt_pt = np.tile(low_d_opt_pt, num_groups+1)[0:domain_dim]
high_d_opt_val = high_d_func(high_d_opt_pt)
elif low_d_opt_val is not None:
high_d_opt_val = num_groups * low_d_opt_val
# Return
func_caller = get_euc_function_caller_from_function(high_d_func,
high_d_domain_bounds,
vectorised=False,
opt_val=high_d_opt_val,
opt_pt=high_d_opt_pt,
**kwargs)
return func_caller
def get_high_d_function_caller_from_low_d_func_caller(domain_dim,
low_d_func_caller, **kwargs):
""" Gets a high dimensional function caller from a low dimensional one. """
return get_high_d_function_caller_from_low_d_func(domain_dim, low_d_func_caller.func,
low_d_func_caller.domain.bounds, low_d_func_caller.opt_val,
low_d_func_caller.raw_opt_pt, **kwargs)
def get_high_d_function_caller_from_description(descr, **kwargs):
""" Gets a high dimensional function caller from the description. """
segments = descr.split('-')
domain_dim = int(segments[1])
descr_to_func_dict = {'hartmann': lambda: get_hartmann_function_caller(6,),
'branin': get_branin_function_caller,
'currinexp': get_currin_exp_function_caller,
'park1': get_park1_function_caller,
'park2': get_park2_function_caller,
'borehole': get_borehole_function_caller,
'shekel': get_shekel_function_caller}
low_d_func_caller = descr_to_func_dict[segments[0].lower()](**kwargs)
return get_high_d_function_caller_from_low_d_func_caller(domain_dim,
low_d_func_caller, **kwargs)
# Finally, one very convenient wrapper.
def get_syn_function_caller_from_name(function_name, **kwargs):
""" A very convenient wrapper so that you can just get the function from the name. """
#pylint: disable=too-many-return-statements
if function_name.lower() == 'hartmann3':
return get_hartmann_function_caller(3, descr=function_name, **kwargs)
elif function_name.lower() == 'hartmann6':
return get_hartmann_function_caller(6, descr=function_name, **kwargs)
elif function_name.lower() == 'currinexp':
return get_currin_exp_function_caller(descr=function_name, **kwargs)
elif function_name.lower() == 'branin':
return get_branin_function_caller(descr=function_name, **kwargs)
elif function_name.lower() == 'borehole':
return get_borehole_function_caller(descr=function_name, **kwargs)
elif function_name.lower() == 'shekel':
return get_shekel_function_caller(descr=function_name, **kwargs)
elif function_name.lower() == 'park1':
return get_park1_function_caller(descr=function_name, **kwargs)
elif function_name.lower() == 'park2':
return get_park2_function_caller(descr=function_name, **kwargs)
else:
return get_high_d_function_caller_from_description(descr=function_name, **kwargs)
| 0 | 0 | 0 |
93c3a0380c312370c881e10fbe697c1f82ff806e | 8,417 | py | Python | bindings/pydairlib/lcm/process_lcm_log.py | Brian-Acosta/dairlib | 88da55d6e4378b93a787f3587d08b8a60f2f03f0 | [
"BSD-3-Clause"
] | 32 | 2019-04-15T03:10:26.000Z | 2022-03-28T17:27:03.000Z | bindings/pydairlib/lcm/process_lcm_log.py | Brian-Acosta/dairlib | 88da55d6e4378b93a787f3587d08b8a60f2f03f0 | [
"BSD-3-Clause"
] | 157 | 2019-02-21T03:13:57.000Z | 2022-03-09T19:13:59.000Z | bindings/pydairlib/lcm/process_lcm_log.py | Brian-Acosta/dairlib | 88da55d6e4378b93a787f3587d08b8a60f2f03f0 | [
"BSD-3-Clause"
] | 22 | 2019-03-02T22:31:42.000Z | 2022-03-10T21:28:50.000Z | import dairlib
import drake
import numpy as np
# Class to easily convert list of lcmt_osc_tracking_data_t to numpy arrays
| 38.967593 | 116 | 0.667578 | import dairlib
import drake
import numpy as np
# Class to easily convert list of lcmt_osc_tracking_data_t to numpy arrays
class lcmt_osc_tracking_data_t:
def __init__(self):
self.t = []
self.y_dim = 0
self.name = ""
self.is_active = []
self.y = []
self.y_des = []
self.error_y = []
self.ydot = []
self.ydot_des = []
self.error_ydot = []
self.yddot_des = []
self.yddot_command = []
self.yddot_command_sol = []
def append(self, msg, t):
self.t.append(t)
self.is_active.append(msg.is_active)
self.y.append(msg.y)
self.y_des.append(msg.y_des)
self.error_y.append(msg.error_y)
self.ydot.append(msg.ydot)
self.ydot_des.append(msg.ydot_des)
self.error_ydot.append(msg.error_ydot)
self.yddot_des.append(msg.yddot_des)
self.yddot_command.append(msg.yddot_command)
self.yddot_command_sol.append(msg.yddot_command_sol)
def convertToNP(self):
self.t = np.array(self.t)
self.is_active = np.array(self.is_active)
self.y = np.array(self.y)
self.y_des = np.array(self.y_des)
self.error_y = np.array(self.error_y)
self.ydot = np.array(self.ydot)
self.ydot_des = np.array(self.ydot_des)
self.error_ydot = np.array(self.error_ydot)
self.yddot_des = np.array(self.yddot_des)
self.yddot_command = np.array(self.yddot_command)
self.yddot_command_sol = np.array(self.yddot_command_sol)
def process_log(log, pos_map, vel_map, act_map, controller_channel):
t_x = []
t_u = []
t_controller_switch = []
t_contact_info = []
fsm = []
q = []
v = []
u_meas = []
u = []
kp = []
kd = []
t_pd = []
estop_signal = []
switch_signal = []
osc_debug = dict()
contact_forces = [[], [], [], []] # Allocate space for all 4 point contacts
contact_info_locs = [[], [], [], []]
cassie_out = [] # Cassie out types
osc_output = []
u_pd = []
t_u_pd = []
t_lcmlog_u = []
full_log = dict()
channel_to_type_map = dict()
unknown_types = set()
known_lcm_types = [dairlib.lcmt_robot_output, dairlib.lcmt_cassie_out, dairlib.lcmt_controller_switch,
dairlib.lcmt_osc_output, dairlib.lcmt_pd_config, dairlib.lcmt_robot_input,
drake.lcmt_contact_results_for_viz, dairlib.lcmt_contact, dairlib.lcmt_input_supervisor_status]
osc_debug_channels = ['OSC_DEBUG_WALKING', 'OSC_DEBUG_RUNNING', 'OSC_DEBUG_STANDING', 'OSC_DEBUG_JUMPING']
# osc_debug_channels = ['OSC_DEBUG_WALKING', 'OSC_DEBUG_RUNNING', 'OSC_DEBUG_JUMPING']
state_feedback_channels = ['CASSIE_STATE_SIMULATION', 'CASSIE_STATE_DISPATCHER']
for event in log:
if event.channel not in full_log and event.channel not in unknown_types:
for lcmtype in known_lcm_types:
try:
lcmtype.decode(event.data)
channel_to_type_map[event.channel] = lcmtype
except ValueError:
continue
if event.channel in channel_to_type_map:
full_log[event.channel] = []
else:
unknown_types.add(event.channel)
if event.channel in full_log:
full_log[event.channel].append(channel_to_type_map[event.channel].decode(event.data))
if event.channel in state_feedback_channels:
msg = dairlib.lcmt_robot_output.decode(event.data)
q_temp = [[] for i in range(len(msg.position))]
v_temp = [[] for i in range(len(msg.velocity))]
u_temp = [[] for i in range(len(msg.effort))]
for i in range(len(q_temp)):
q_temp[pos_map[msg.position_names[i]]] = msg.position[i]
for i in range(len(v_temp)):
v_temp[vel_map[msg.velocity_names[i]]] = msg.velocity[i]
for i in range(len(u_temp)):
u_temp[act_map[msg.effort_names[i]]] = msg.effort[i]
q.append(q_temp)
v.append(v_temp)
u_meas.append(u_temp)
t_x.append(msg.utime / 1e6)
if event.channel == "CASSIE_INPUT" or event.channel == controller_channel:
msg = dairlib.lcmt_robot_input.decode(event.data)
u.append(msg.efforts)
t_u.append(msg.utime / 1e6)
if event.channel == "PD_CONTROL":
msg = dairlib.lcmt_robot_input.decode(event.data)
u_pd.append(msg.efforts)
t_u_pd.append(msg.utime / 1e6)
if event.channel == "INPUT_SWITCH":
msg = dairlib.lcmt_controller_switch.decode(event.data)
switch_signal.append(msg.channel == "OSC_STANDING")
t_controller_switch.append(msg.utime / 1e6)
if event.channel == "PD_CONFIG":
msg = dairlib.lcmt_pd_config.decode(event.data)
kp.append(msg.kp)
kd.append(msg.kd)
t_pd.append(msg.timestamp / 1e6)
if event.channel == "CASSIE_OUTPUT_ECHO":
msg = dairlib.lcmt_cassie_out.decode(event.data)
cassie_out.append(msg)
if event.channel in osc_debug_channels:
msg = dairlib.lcmt_osc_output.decode(event.data)
t_lcmlog_u.append(event.timestamp / 1e6)
osc_output.append(msg)
num_osc_tracking_data = len(msg.tracking_data)
for i in range(num_osc_tracking_data):
if msg.tracking_data[i].name not in osc_debug:
osc_debug[msg.tracking_data[i].name] = lcmt_osc_tracking_data_t()
osc_debug[msg.tracking_data[i].name].append(msg.tracking_data[i], msg.utime / 1e6)
fsm.append(msg.fsm_state)
if event.channel == "CASSIE_CONTACT_DRAKE" or event.channel == "CASSIE_CONTACT_MUJOCO":
# Need to distinguish between front and rear contact forces
# Best way is to track the contact location and group by proximity
msg = drake.lcmt_contact_results_for_viz.decode(event.data)
t_contact_info.append(msg.timestamp / 1e6)
num_left_contacts = 0
num_right_contacts = 0
for i in range(msg.num_point_pair_contacts):
if "toe_left" in msg.point_pair_contact_info[i].body2_name:
if(num_left_contacts >= 2):
continue
contact_info_locs[num_left_contacts].append(
msg.point_pair_contact_info[i].contact_point)
contact_forces[num_left_contacts].append(
msg.point_pair_contact_info[i].contact_force)
num_left_contacts += 1
elif "toe_right" in msg.point_pair_contact_info[i].body2_name:
if(num_right_contacts >= 2):
continue
contact_info_locs[2 + num_right_contacts].append(
msg.point_pair_contact_info[i].contact_point)
contact_forces[2 + num_right_contacts].append(
msg.point_pair_contact_info[i].contact_force)
num_right_contacts += 1
while num_left_contacts != 2:
contact_forces[num_left_contacts].append((0.0, 0.0, 0.0))
contact_info_locs[num_left_contacts].append((0.0, 0.0, 0.0))
num_left_contacts += 1
while num_right_contacts != 2:
contact_forces[2 + num_right_contacts].append((0.0, 0.0, 0.0))
contact_info_locs[2 + num_right_contacts].append((0.0, 0.0,
0.0))
num_right_contacts += 1
# Convert into numpy arrays
t_x = np.array(t_x)
t_u = np.array(t_u)
t_lcmlog_u = np.array(t_lcmlog_u)
t_controller_switch = np.array(t_controller_switch)
t_contact_info = np.array(t_contact_info)
t_pd = np.array(t_pd)
fsm = np.array(fsm)
q = np.array(q)
v = np.array(v)
u_meas = np.array(u_meas)
u = np.array(u)
u_pd = np.array(u_pd)
kp = np.array(kp)
kd = np.array(kd)
estop_signal = np.array(estop_signal)
switch_signal = np.array(switch_signal)
contact_forces = np.array(contact_forces)
contact_info_locs = np.array(contact_info_locs)
for i in range(contact_info_locs.shape[1]):
# Swap front and rear contacts if necessary
# Order will be front contact in index 1
if contact_info_locs[0, i, 0] > contact_info_locs[1, i, 0]:
contact_forces[[0, 1], i, :] = contact_forces[[1, 0], i, :]
contact_info_locs[[0, 1], i, :] = contact_info_locs[[1, 0], i, :]
if contact_info_locs[2, i, 0] > contact_info_locs[3, i, 0]:
contact_forces[[2, 3], i, :] = contact_forces[[3, 2], i, :]
contact_info_locs[[2, 3], i, :] = contact_info_locs[[3, 2], i, :]
for key in osc_debug:
osc_debug[key].convertToNP()
x = np.hstack((q, v)) # combine into state vector
return x, u_meas, t_x, u, t_u, contact_forces, contact_info_locs, \
t_contact_info, osc_debug, fsm, estop_signal, \
switch_signal, t_controller_switch, t_pd, kp, kd, cassie_out, u_pd, \
t_u_pd, osc_output, full_log, t_lcmlog_u
| 8,163 | 10 | 119 |
4893f6a866e8ab79f5abd9ffd38994d1c37af014 | 14,032 | py | Python | navigation_old.py | mferg00/soccer-robot-ai | e25b685304fe671a0cccc17d666e9d22dc53e806 | [
"Apache-2.0"
] | null | null | null | navigation_old.py | mferg00/soccer-robot-ai | e25b685304fe671a0cccc17d666e9d22dc53e806 | [
"Apache-2.0"
] | null | null | null | navigation_old.py | mferg00/soccer-robot-ai | e25b685304fe671a0cccc17d666e9d22dc53e806 | [
"Apache-2.0"
] | null | null | null | import RPi.GPIO as GPIO
from drive import Drive
from dribbler import Dribbler
from field import Field
from enum import Enum
from kicker import Kicker
import time
import numpy as np
#########
# ENUMS #
#
############
# FUNCTIONS
# these should be very generic
###########
# VARIABLES
# used for condition checking or generic functions within navigation class
BALL_IN_DRIBBLER_DIST = 9 # min distance when ball is in the dribbler
BALL_IN_DRIBBLER_ANGLE = 15 # ball must be within +-28 deg of fov to be in dribbler (32 is max)
GOAL_DIST = 90 # distance to stop at to shoot for goal
GOAL_ANGLE = 3 # goal must be within +-3 deg of fov to be in dribbler
OBSTACLE_DIST = 30 # min dist to begin circ,ing object
MIN_SPEED = 35 # the speed the robot will be at when its target is very close
#############
# NAV CLASS #
#
| 30.438178 | 153 | 0.553521 | import RPi.GPIO as GPIO
from drive import Drive
from dribbler import Dribbler
from field import Field
from enum import Enum
from kicker import Kicker
import time
import numpy as np
#########
# ENUMS #
#
class state(Enum):
none = 1
ball_search = 2
go_to_ball = 3
goal_search = 4
go_to_goal = 5
shoot_ball = 6
class search(Enum):
none = 1
spin = 2
go_to_obstacle = 3
circle_obstacle = 4
next_obstacle = 5
############
# FUNCTIONS
# these should be very generic
def within_angle(heading, limit):
if(heading <= limit and heading >= -limit):
return True
return False
def check_speed(speed):
if speed > 100:
speed = 100
if speed < -100:
speed = -100
return speed
def cap_ran(ran):
if ran is None:
return None
if(ran < 5):
ran = 5
elif(ran > 250):
ran = 250
return ran
###########
# VARIABLES
# used for condition checking or generic functions within navigation class
BALL_IN_DRIBBLER_DIST = 9 # min distance when ball is in the dribbler
BALL_IN_DRIBBLER_ANGLE = 15 # ball must be within +-28 deg of fov to be in dribbler (32 is max)
GOAL_DIST = 90 # distance to stop at to shoot for goal
GOAL_ANGLE = 3 # goal must be within +-3 deg of fov to be in dribbler
OBSTACLE_DIST = 30 # min dist to begin circ,ing object
MIN_SPEED = 35 # the speed the robot will be at when its target is very close
#############
# NAV CLASS #
#
class Navigation():
def __init__(self):
# this is only changed once the ball is kicked
self.switch_goal = False
self.prev_state = state.none
self.prev_ball = -1
self.prev_goal = -1
# CONDITIONS
self.ball_in_dribbler = False
self.ball_found = False
self.goal_found = False
self.goal_aligned = False
# STATES
self.state = state.none
self.search = search.none
# OBJECTS
self.drive_sys = Drive()
self.dribbler = Dribbler()
self.field = Field()
self.kicker = Kicker()
# MOTOR pwm
self.left_motor = 0
self.right_motor = 0
# range/headings
self.obs_range = [None, None, None]
self.obs_heading = [None, None, None]
self.ball_range = None
self.ball_heading = None
self.goal_range = None
self.goal_heading = None
####################
# GENERIC FUNCTIONS
#
# input: left and right motor pwms
# output: pwm value that gets larger the sharper the robot is turning
# WORK IN PROGRESS
def choose_drib_speed(self):
# find ratio: -1 is spinning on spot, 1 is going straight
ratio = 0
if(self.left_motor == 0 or self.right_motor == 0):
ratio = 0
else:
ratio = min(self.left_motor, self.right_motor)/max(self.left_motor, self.right_motor)
# convert ratio to: 0 is straight, 2 is spinning on spot
ratio = -ratio + 1
# if motor is going forward (ratio = 0), dribbler is at 50 pwm
# if motor is doing a full spin (ratio = 2), dribbler is at 90 pwm
speed = 60 + (ratio * 100)
return check_speed(speed)
# input: object heading/range
# output: drive and center towards object, with speed slowing down based on proximity
# example: object at 100cm, -16deg:
# left_motor: 50 + 100/8 - -16 = 78.5
# right_motor: 50 + 100/8 + -16 = 46.5
def goto_object_simple(self, speed, ran, head):
self.right_motor = check_speed( (MIN_SPEED * speed) + (ran / 5) - (head) )
self.left_motor = check_speed( (MIN_SPEED * speed) + (ran / 5) + (head) )
self.drive_sys.drive(self.left_motor, self.right_motor)
# input: object target
# output: drive towards object and avoid obstacles
prev_heading = -999
def goto_object(self, speed=1, object='ball'):
'''
def smooth(heading):
if self.prev_heading == -999:
self.prev_heading = heading
else:
if abs(self.prev_heading - heading) > 5:
self.prev_heading -= np.sign(self.prev_heading)*5
else:
self.prev_heading = heading
return self.prev_heading'''
if(self.obs_range[0] is not None and self.obs_range[0] <= self.ball_range):
self.field.change_state(self.state)
target_heading = self.field.update(self.obs_range, self.obs_heading, self.ball_range, self.ball_heading, self.goal_range, self.goal_heading)
if object == 'ball':
target_range = self.ball_range
elif object == 'goal':
target_range = self.goal_range
else:
return
else:
if object == 'ball':
target_range = self.ball_range
target_heading = self.ball_heading
elif object == 'goal':
target_range = self.goal_range
target_heading = self.goal_heading
self.goto_object_simple(speed, target_range, target_heading)
def stop(self):
self.drive_sys.stop()
self.dribbler.stop()
GPIO.cleanup()
####################
# UPDATE FUNCTION
#
# input: vision feed values
# output: true if goal needs to be switched
def update(self, obs, ball, goal):
# return true if goal needs to be switched, so vision can be changed from main function
if(self.switch_goal):
self.switch_goal = False
return True
# VISION INFO
self.obs_range = [cap_ran(obs[0][0]), cap_ran(obs[1][0]), cap_ran(obs[2][0])]
self.obs_heading = [obs[0][1], obs[1][1], obs[2][1]]
self.ball_range = cap_ran(ball[0])
self.ball_heading = ball[1]
self.goal_range = cap_ran(goal[0])
self.goal_heading = goal[1]
# ACT BASED ON VISION INFO
self.decide_conditions()
self.decide_state()
self.decide_action()
return False
##################
# DECIDE FUNCTIONS
# decides what to do based on the data from update()
# input: vision data
# output: condition bool variables
dribbler_counter = 0
def decide_conditions(self):
# if ball is seen
if(self.ball_range is not None):
self.ball_found = True
self.prev_ball = np.sign(self.ball_heading)
# if ball is within dribbler range/angle
if(self.ball_range < BALL_IN_DRIBBLER_DIST and within_angle(self.ball_heading, BALL_IN_DRIBBLER_ANGLE)):
self.ball_in_dribbler = True
else:
self.ball_in_dribbler = False
else:
self.ball_found = False
# if goal is seen
if(self.goal_range is not None):
self.goal_found = True
self.prev_goal = np.sign(self.goal_heading)
#if goal is within shooting range/angle
if(self.goal_range < GOAL_DIST and within_angle(self.goal_heading, GOAL_ANGLE)):
self.goal_aligned = True
else:
self.goal_aligned = False
else:
self.goal_found = False
def change_state(self, state):
self.state = state
# input: condition variables
# output: state value
def decide_state(self):
if(not self.ball_found and not self.ball_in_dribbler):
self.state = state.ball_search
elif(self.ball_found and not self.ball_in_dribbler):
self.state = state.go_to_ball
elif(self.ball_found and self.ball_in_dribbler and not self.goal_found):
self.state = state.goal_search
if(self.prev_state != self.state):
self.drive_sys.stop()
time.sleep(0.25)
elif(self.ball_found and self.ball_in_dribbler and self.goal_found and not self.goal_aligned):
self.state = state.go_to_goal
elif(self.ball_found and self.ball_in_dribbler and self.goal_found and self.goal_aligned):
self.state = state.shoot_ball
else:
self.state = state.ball_search
self.prev_state = self.state
# print(self.state)
# input: state/search value
# output: appropriate function
def decide_action(self):
if(self.state == state.ball_search):
self.ball_search()
else:
self.search_lock = False
if(self.state == state.go_to_ball):
self.go_to_ball()
if(self.state == state.goal_search):
self.goal_search()
if(self.state == state.go_to_goal):
self.go_to_goal()
if(self.state == state.shoot_ball):
self.shoot_ball()
# if state == none
else:
pass
##################
# ACTION FUNCTIONS
#
# input: search value
# output: appropriate ball searching method
search_lock = False
def ball_search(self):
if(not self.search_lock):
if(self.search == search.none):
self.search = search.spin
if(self.search == search.spin):
# will not be true until until spin is complete
#print("searching for ball")
if(self.drive_sys.spin(speed=30, direction=self.prev_ball, radius=0, cycles=10)):
self.search = search.go_to_obstacle
if(self.search == search.go_to_obstacle):
self.search_lock = True
if(self.obs_range[0] is None):
#print("searching for obstacle")
self.drive_sys.spin(speed=30, direction=self.prev_ball, radius=0, cycles=None)
else:
if(self.obs_range[0] > OBSTACLE_DIST):
#print("goi8ng to obstacle")
self.goto_object_simple(1, self.obs_range[0], self.obs_heading[0])
else:
self.search = search.circle_obstacle
if(self.search == search.circle_obstacle):
#print("circling obstacle")
# will not be true until circle is complete
if(self.circle_obstacle(adjust=100)):
#print("CIRCLE OBSTACLE COMPLETE")
# if after all that the ball is not found, restart the cycle again
self.search = search.none
self.search_lock = False
orientate = True
spin_out_timer = 0
right_wheel = 40
left_wheel = 30
prev_state = 0
adjust_counter = 0
# input: largest obstacle range/heading
# output: circling obstacle
def circle_obstacle(self, adjust=10):
# count how many times the robot has to adjust based on if it saw the object
if(self.obs_range[0] is None and self.prev_state is not None):
self.adjust_counter += 1
self.prev_state = self.obs_range[0]
# if it has reached the limit, reset everything for the next time the function is called
if(self.adjust_counter >= adjust):
self.orientate = True
self.spin_out_timer = 0
self.right_wheel = 40
self.left_wheel = 30
self.prev_state = 0
self.adjust_counter = 0
self.drive_sys.stop()
return True
# if the robot needs to oreintate itself
if(self.orientate and self.obs_range[0] is not None):
self.drive_sys.drive(-30, 30)
self.spin_out_timer = 0
# else if it needs to circle
else:
self.orientate = False
if self.obs_range[0] is not None:
self.spin_out_timer = 0
self.right_wheel = 40
self.left_wheel = 30
if(self.obs_range[0] is None):
self.spin_out_timer += 1
self.right_wheel += 0.4
self.left_wheel -= 0.4
# spin in
if(self.spin_out_timer >= 10):
self.drive_sys.drive(self.right_wheel, self.left_wheel)
# spin out
if(self.spin_out_timer < 10):
self.drive_sys.drive(-40, 40)
return False
# input: ball range/heading, ball_in_dribbler variable
# output: driving to ball, turning dribbler on if in range
# WILL DRIVE STRAIGHT TOWARDS BALL, IGNORING OPBSTACLES
def go_to_ball(self):
# self.goto_object_simple(1, self.ball_range, self.ball_heading)
# REPLACE ^ WITH:
self.goto_object(speed=1, object='ball')
if(self.ball_range < 20):
self.dribbler.start(95)
else:
self.dribbler.stop()
# input:
# output: turning in a circle without losing ball
def goal_search(self):
self.dribbler.start(90)
self.drive_sys.spin(speed=20, direction=self.prev_goal, radius=10, cycles=None)
def go_to_goal(self):
self.dribbler.start(95)
self.goto_object_simple(0.65, self.goal_range, self.goal_heading)
# REPLACE ^ WITH:
# self.goto_object(speed=0.65, object='goal')
def shoot_ball(self):
self.dribbler.stop()
self.kicker.kick()
self.drive_sys.stop()
self.switch_goal = True
| 9,270 | 3,752 | 149 |
e8cc4bd94d2a885ce94833f097eea32502e6fb37 | 5,842 | py | Python | DataMining/weather/cylinders.py | navijo/FlOYBD | beb4fb32598e955ce3b47ad647452c15a87cfae8 | [
"MIT"
] | null | null | null | DataMining/weather/cylinders.py | navijo/FlOYBD | beb4fb32598e955ce3b47ad647452c15a87cfae8 | [
"MIT"
] | 7 | 2020-03-24T15:55:32.000Z | 2021-08-23T20:43:13.000Z | DataMining/weather/cylinders.py | navijo/FlOYBD | beb4fb32598e955ce3b47ad647452c15a87cfae8 | [
"MIT"
] | 3 | 2017-07-26T11:46:05.000Z | 2018-04-29T12:07:59.000Z | import simplekml
import json
from polycircles import polycircles
import time
from subprocess import call
'''
https://github.com/LiquidGalaxyLAB/ViewYourData/blob/master/VYD_Project/VYD/Utils/PresentationManager/cylinder_generator.py
https://developers.google.com/maps/documentation/geocoding/intro?hl=es-419
'''
| 40.853147 | 156 | 0.626498 | import simplekml
import json
from polycircles import polycircles
import time
from subprocess import call
class CylindersKml(object):
def __init__(self, name, data):
self.name = name
self.data = data
self.kml_var = simplekml.Kml()
def makeKML(self):
current_milli_time = int(round(time.time()))
self.parseData()
self.saveKml(current_milli_time)
#self.sendKml(current_milli_time)
def parseData(self):
for element in self.data:
if(not element['description'][0]==None and not element['description'][1]==None):
self.newCylinder(element['name'], element['description'], element['coordinates'], element['extra'])
self.newPointer(element['name'], element['description'], element['coordinates'], element['extra'])
def newPointer(self, name, description, coordinates, extra):
pointer_max = self.kml_var.newpoint(name=str(description[0])+ u'\u2103')
pointer_min = self.kml_var.newpoint(name=str(description[1])+ u'\u2103')
self.generatePointer(pointer_max, description[0], coordinates, 'max')
self.generatePointer(pointer_min, description[1], coordinates, 'min')
if extra:
print ('There is extra !')
def generatePointer(self, point, temp, coordinates, flag):
point.altitudemode = 'relativeToGround'
point.gxballoonvisibility = 0
point.style.iconstyle.scale = 0
point.style.labelstyle.scale = 1
point.style.balloonstyle.displaymode = 'hide'
if flag == 'min':
point.style.labelstyle.color = simplekml.Color.lightblue
point.coords = [(float(coordinates['lng'])-0.025, float(coordinates['lat'])-0.025, 2200*int(temp))]
elif flag == 'max':
point.style.labelstyle.color = simplekml.Color.red
point.coords = [(float(coordinates['lng']), float(coordinates['lat']), 2200*int(temp))]
def newCylinder(self, name, description, coordinates, extra):
shape_polycircle_max = self.kml_var.newmultigeometry(name=name+'-max')
shape_polycircle_min = self.kml_var.newmultigeometry(name=name+'-min')
self.generateCylinder(shape_polycircle_max, description[0], coordinates, 'max')
self.generateCylinder(shape_polycircle_min, description[1], coordinates, 'min')
if extra:
print ('There is extra !')
def generateCylinder(self, shape, temp, coordinates, flag):
if flag == 'min':
polycircle = polycircles.Polycircle(latitude=float(coordinates['lat'])-0.025,
longitude=float(coordinates['lng'])-0.025, radius=1000, number_of_vertices=100)
elif flag == 'max':
polycircle = polycircles.Polycircle(latitude=float(coordinates['lat']),
longitude=float(coordinates['lng']), radius=1000, number_of_vertices=100)
latloncircle = polycircle.to_lon_lat()
latlonaltcircle = []
polygon_circle = []
multiplier = 2000
temperature = int(temp)
# 'Pal' cap a dalt i cercle al final del pal (a dalt de tot)
for element in latloncircle:
tup = (element[0], element[1], (temperature * multiplier) + 10,)
latlonaltcircle.append(tup)
# Cilindre (interior / exterior)
for element in latloncircle:
tup = (element[0], element[1], temperature * multiplier,)
latlonaltcircle.append(tup)
tup = (element[0], element[1], 0,)
latlonaltcircle.append(tup)
# Un altre cilindre (interior / exterior ?)
for element in latloncircle:
tup = (element[0], element[1], 0,)
latlonaltcircle.append(tup)
tup = (element[0], element[1], temperature * multiplier,)
latlonaltcircle.append(tup)
for element in latloncircle:
tup = (element[0], element[1], 0,)
latlonaltcircle.append(tup)
pol = shape.newpolygon()
pol.outerboundaryis = latlonaltcircle
pol.altitudemode = simplekml.AltitudeMode.relativetoground
pol.extrude = 5
pol.style.linestyle.width = 5000
polygon_circle.append(polycircle)
latlonaltcircle = []
# Cyrcle (tapadera del cilindre) de dalt de tot (interior i exterior)
for element in latloncircle:
tup = (element[0], element[1], (temperature * multiplier) + 20,)
latlonaltcircle.append(tup)
pol = shape.newpolygon()
pol.outerboundaryis = latlonaltcircle
pol.altitudemode = simplekml.AltitudeMode.relativetoground
pol.extrude = 5
self.addColor(pol, flag)
pol.style.linestyle.width = 5000
polygon_circle.append(polycircle)
def addColor(self, polygon, flag):
if flag == 'min':
polygon.style.polystyle.color = simplekml.Color.blue
polygon.style.linestyle.color = simplekml.Color.blue
elif flag =='max':
polygon.style.polystyle.color = simplekml.Color.red
polygon.style.linestyle.color = simplekml.Color.red
def saveKml(self,current_milli_time):
self.kml_var.save("./cylinders/cylinders_" + self.name+str(current_milli_time)+".kml")
def sendKml(self,current_milli_time):
command = "sshpass -p 'lqgalaxy' echo 'http://130.206.117.178:8000/cylinders_weather.kml' | ssh lg@192.168.88.242 'cat - > /var/www/html/kmls.txt'"
call([command])
'''
https://github.com/LiquidGalaxyLAB/ViewYourData/blob/master/VYD_Project/VYD/Utils/PresentationManager/cylinder_generator.py
https://developers.google.com/maps/documentation/geocoding/intro?hl=es-419
'''
| 5,186 | 6 | 315 |
58700b1ab5a6bb83d87d663e865662eaab49e36a | 45,318 | py | Python | plugins/bot_cockroach.py | opq-osc/botoy-plugins | 8889942cfb5d44c94705b6841cbd821ed6ee6b4b | [
"MIT"
] | 20 | 2021-04-22T14:34:27.000Z | 2022-03-22T01:30:25.000Z | plugins/bot_cockroach.py | opq-osc/botoy-plugins | 8889942cfb5d44c94705b6841cbd821ed6ee6b4b | [
"MIT"
] | 1 | 2021-05-18T12:09:19.000Z | 2021-05-19T04:23:49.000Z | plugins/bot_cockroach.py | opq-osc/botoy-plugins | 8889942cfb5d44c94705b6841cbd821ed6ee6b4b | [
"MIT"
] | 4 | 2021-06-13T04:21:09.000Z | 2022-01-06T12:13:22.000Z | """
来点小强+{可选小强数量}
可以艾特一个人使用
"""
import base64
import random
from io import BytesIO
import httpx
from botoy import GroupMsg, S
from botoy import decorators as deco
from botoy.parser import group as gp
from PIL import Image
小强 = Image.open(
BytesIO(
base64.b64decode(
"""
iVBORw0KGgoAAAANSUhEUgAAAKAAAACgCAYAAACLz2ctAAAgAElEQVR4nOS9CZhdV3UlvM45d3zz
q1dzlVRVUkmyRku2PINnMAQMNEOGJp2EpNMNSYD8nQQa0mkSOoHMCSGBJh0ShgRImAzYjhkcsLGN
5UG2LGuwNZVU81xvvPM5/Z1z75PKUkkqyaq08b+/z5ZU9d6799277j57r732PuT1jKJoMhx2Q1S5
wHEAOQBThKAHgCninwlC4AFoBcAATAIgiE0Iof58lWWhhVK4yb8vtRF5Dr4PEobIA1ggBBUhkJJ/
B/AECMSi81quCVzY+V6vM9xkaZjly3ufvF4LnPysH4UPBUIMdVgMEBwBtC17fRygENHFXCp5dA6o
70+Tf59+XGlPhCFqnKvrd8ZnCMDQCH6qTQdrcLSaFOOgODjtISP4BV0ZeS66xEiBQTSArB/hECF4
6Bx40C7g81+y5gKwANxp69ApRbjMB0C+LMtwZasmDoUCleW8R6fAbChwTAgIsrzjaES+T//geMTf
Z9FwSN4kn5j3HvWCg0f84DfpRVzYJvAMAuw+32kQsiT4Xgr2sgAgkhtCoggFCngQy/KCEYH02OvW
GvSIx8/vB0nihduogCOk1zn/UeQrQqHOqXc2YO+AEAcJNe4POWk5HkZvKDHKLxSACnyEoC4EOqnA
XIjlPT0vQXtZALAJg2oQQd5NkXi3876PoiUKCJ/yo4VoGYuNAqB8mc6Q1in8Za5PPgcGdXH0rSXy
5j0N+80TfjTv+WHnRmaFjC7vXJsmX5ohFD4EvhM4MJNlDxcRerwU7GXjAeUXkV5JCzlyGkVwvrtK
gDCi2yyNdhepQLBMb2YTgl2NEMc4Vzf/fCbPogGgq2B6KcYQimjXnO/ehEjzbIm+ZcaRSD7LAlHn
8WDgwhEcaY1ecAz7UrKXDQBFEnTL+xnyeNk7F6SIBCilm3VC2kIsHwdVIbDN1nAlISeTr3MZUe8B
ChrbfH8ZUycc/1qTCbicoCFEkjYt//tpFPhh4GJe5i3kzMTjx81eNgBEcoPcSGBeCBUnncvkUtqq
kw2jER+uRcvN9oiCQkansAgQLOcdQiAQBLmIphfC8K6KzCwjipBE4IIna/r5TT4gLZTgyZBj+uKS
5pekvawAiAQiMrlghJwdhAIICGAzOmAj/KxOhaKZzmuJx0tBgMkseBnYoQSIgDUyXqwF0X00EmAE
sIgAkeBbxmcIlXQAIQhO8PM9Wj9e9rICoEgCcgsCDSrUUrykESBL6U4C5MdC/uTycuZTRmnCuy3H
eQkBk9INGhHo0/GYJnmci2AqS5TiS+5yUqUfL3tZesAwEiiLs0dX8jclxt4yEmHuyYAvGw7y81Zr
FN06RW2ZQaNMGWxKuuQxx4FpTR79AvGXAsWxSOBg8PLyfng5AtBLCNqbdYIUJSoZOd1k3ugzdv1h
P/q7ndKbLWP5FUm1IUcEjDBCeplYkC+zNJInjIhWyjyTiGWylKeO3K5RfKbqX8B7fnzsxw6AIgFY
FkBZiDNKb82S1FgkYBJyZlVEUjWEbrE5UjkefLFIsEQR60yTL5MQyFIGXQJ2mewxFwIWoYUaF+QZ
N2io8tgF4M8EwS4e4kT48kk8FtslBaBFCGwi6wMrs1RQIqMv8BMARglN5xjdonH+2OmhvHyRwwE3
4GeciaROiob+7gbnT80KES7XG8nPkYDuNxiMs3jWpUwuvYTQVh8i6DSWVz05dUyBTkbwHefM7/Fy
sUsDQOkRhMBhz8cwCIIVCpUlsPKUvXk/yNcMQtbdSLRbiPB3nX5zZObJJM13WpzGY19n2YRuP+57
/6nB4+V42cenRJX5GpwvmzeUlyZDaWsUkYVGQE5WLc5n8uPThOCQz3HiZRj7Ne2SAJAkapkJHmEt
Icvixy7GQiG4xph7LTU+OM9D1BkecaRPWQQGmnidhogk/fECfyOX0FW68RvDQgzt5fx5SdUsOwER
Alt1AoMIzEQXEMUJ6TnRqhGU8xqBtsx3SuBKgcUeL3rZej9c0iVYCNgmw/Z2W8m6VspqIb/3ldvb
viE8RF/7wZiVMVl8txLLUoJDQYDh4NS6LH8dCeUZrXWa9uac7/3HLYvkSuez+L0EGwwGixK0LRN9
pEkNEdJRjcRcCIFlC5w4UCfA6MuIdF7KLhkApUpkzI/w6HRDMfbeCmHQ4QKNhca9r1hnv3o0DRTT
p2gNVaulQGNWKC9sJ8CM67ECg6b5Jz6Pds9H0XMKfMtUAcjXSiJ5IeQoRxdUvlVeuM9gnRxil8vF
eXOXppBCxpntNmAGBE70cmP/TtklA2CzLkoDgQITqK3QNZMZ8KNPzH4lPIJbayYzqn54kp+wKDAf
CTwdxMsWFeJk9tpO6cZexv7D9xvOlloCqnOdYtN7SQBJMG+3NKwzKRYSMMj6cXCeejOS9xPAWIj4
ifllAKlZ892UFsga7Oxk+svELjkNc0TEHsJdsYdWwCDswcugZcZcb8tciN1Nr5ImwPEQmFHHJqdQ
BIFXWNZd/+aHfzIkxAIuQpzZo1N0yQw4inPuFJWcYAzEs5mMMCMhMBqS0AU5kGLnTnnU7zhBWuco
GkA9+vGUWF2IXXIAyou4lsXZ3sokI0QW8Y/XfVr2mP6Wugh2N2UhvvKAzbM4Rc7cYJl/007AdgX+
x3qWeRRJaBekjEoI5EyCfUGELywig7sIwSAF6uf0gkLGf607s3bGoPxpgfOQ0ALoTccxay2MM6rz
OUAZUzZXm6XBHf/0/zeK6AYX6ua1aEB9hZYPSiQVIna3Uu1WxgJ1U2kiTz8ixCLvJ7BF11671dB/
5au1xkBw0iGe2z3L15UAbGlWPwAMKzdLTqJtQtE8UMKCpb5mUwndq7N3dNlCH5rje5hcGpZQv0hP
KRXZhgG02xTDjVh2JQ8ZnesaxqR6yzYm6gtCeIshJj2vPH7BYKhSgiHvpZnMrEglZE8km5oAd4UA
KC90nkX3bqDab9uSdBFAmgLjEVCLTrmD1ZQW3phO3fvVuvO+ac6HLuQYWxMhKU0EDqcnD9Kh9BOg
M3ndUtYQBFdl2W+VCccIF2V9UayMRRSR/JlNgPWpmBaSXyGM+1Ww0aKYcSLY7LTatgAYJV1por03
IP6H2sipcqEfCcWFrioauLZo4Vk3wrHR+lm/6wr1kC3LVgSA8iHvoUBthbw+UUwffyAEfreD0jzA
y22U4OAi6qUDwOty1tETUbDnUBD8SeY8nymfFUeVvoDLk1Kf/Le9xGtp8voKF1iHpT1goD6LXVm0
WdszC8Ff+VxgKR8k5foGA/pSFAERyuOR5DP9CLi+qOEHXKAevjCD5kQgTbU7Whhy41wuOuTk+zI2
Q1+rrV4v4+HaSziLXhEAygREai31FX26+DMBUN4nyHaf4wFLAE8H4mT++oaseXfWQvGTk450Uqgl
1ZqljCTLbHuy9LZIFfM5Lg5JQDiU/Hl6rCsS8N5YYJ+WMdpMNfqfa/FCAMnXyGw8oxEUUlR51MU9
JvIYARfIM6Dbpvi3hVDVhZt+UIaIg7r2PoOJf5JSLYmxwIlQBsFgycJAq4m9Iw1EJntJS7hWBIBl
HteFW+nKZcOUCC/iGPEjso0BD4zLvsrkSX9dyvzTrSX2un8d825eK+CbzQB8iUBcerwRec4Adgih
KJD6MvQC8veSKD4s4opFE1zNXl0TbNu2PLv83+aDLz0CXm6eQ9NLyXBwjU7QnaGQEHEinNFjIt9S
CQU2Zxl21zhmQ65umHx/ibFrNhWMjQs157s5QVALODZvyOLRUQ+ztRCdBX4xyf6/u60IACUOKkQg
QwlWyvtL8plwUS0a9KqcDhypxj/frOn/6e2d9DfuWvD+cI8fPVBYBpHBF6ldtEUc4LlMBvltGrDD
JHi6LtT7/CR7lvbaIvucJF0mKvzdg4k0v/mZ8k/NIFiXIoh4Mzde+jylV8wQ4DUFir0LsQpbLuU7
26wPFVs0HJiPDjIGrCqYeNWVLdg3OwX/JZpwLGUrJMcSqEYEhIkLqhpcqOlEHOKcbJwIgKO+QBHa
zg+s1j63x/fv/5e56APy48bOJ3tP3ETuIo4vNQLb0hQOFdhV5ehiBJuFQIVrWzfn2eWPzQefboDP
ZAg5qaCW5bgBi6I9BcwGskEdKgE5m1HVbipQsijW5YARj6MRiPTlq+zXVoWYPwxeWZ8x0b8qjYob
wfe56hn+cbEV0wNKbsqkFNpKKWOk4gVirx7RHfscgaxgud/s0R8Xul/5m2PRq7CY+1rifjSzUbJo
2bxQC5ML2KLH3k+Sx8wn2Jajn2ec4/ly9K7m2AyV2cqHxqLQ7Pj6JPXp8xpJGAXZbmrXI9kX/Ibu
HhuP7i9/qy8COrkGpxogCg3UGoH6UHYxyv//B7ZiAJStiHUqTi5Jl9pkQ48OPmlTMhPUKd5S1Hdf
1xrg156LrqyJJt23suE3SWK5RrLizQQEbYb+c2sKuPzJ2eCXOEQgvZ8nhBof0mNpoJZM0LgseCzv
GEQoMEWcw/MpQg/IWPpr5uqyu450b+1Of5iaxtbApKnpKka39WbrgccPD0/4z0xOeAfSWW1CmOeu
QItlSXJXxlYMgPLplpo8fXmNXxds8c0X+yOQ1DUZ8543dwdr/+x4+Kb9Lg7jlETxpDW94cWAskkq
xx+Ak3SKFL3KZvguHciAYL2hP/iakvbKJ6rBI1+u8783FSMg0EIIdthyEgOBiEQsnjgLAFnSLSfJ
dj+kKDukT3Byhc609aWstuOarszWjjZjQzFPcOerSrenM/rtQiegpg6fE9x8Rye8QGB41MXwsOMF
485Xvn24/NuAmjG1pMkeGtUmvQL36Xy2YgBUlEJIFAhX5OkSqkn8QCDYdW/t963758MP3LNAvqF+
12zYPm0FvliPyJMqiyFjuUTDZQgpjo0HFWU1gp9vYX+6sYhX7pnjwT3z4etFMjRJqnNuzFLkIXAs
ELCW0IApwElmUxDMu2xgoYGrXI3sLGXYzQMF7apihqGrVUN3h45SyYBuyrEgRGkwCQMMg6mlPWtQ
MJ3AyjJ0duVx3TVF07a0t/N/PHG799jUYMB5reZEZ3y3vEHQYhBM14SioP49bWWIaEIwFwH31lZO
Si6XtQ5oV7ymSIxHpsK/++Qs/vAk1M9oA1lOXnt2W+DA1jTQLUtwLlBgZEPG1l45x8O/a7PE7ety
+GzVZ93Hq3hqd93/uQr4vIT9ep3glSk1Sgaj/qkHIR5yJBlnCl82qYf6LRFnr8kIXKen8MqrWiiK
WYquFoZSgcG0GIQc60EpJIBoADCdguoEjMflOhFw5cFYBHhehFolAGEUazfk0NNudezMm2Jzu4E/
e2wG84uvDQE6bIY2g2J0JTPGs9iKeUA5m2VqpT5cGTXf0a49mWEcD81qv3N7Jjqj4C4Qe6lH6xyj
55KtLHo9a474SH4mvZjUhMq+3lWS8HM5KNfeAMFu2Z4P1vfb+m/tnSXRjCN+9zI7+r2szlVqu1aj
uMGWY9k45qNmlSKe2OBwAj807ogEe2PewG1Fk69P6yFaUgKlLJBLS0+mg1OChRqg+TJ5IaDSDUcU
VHDQJMSRukFNRKpRXmq3qGZCMwwYBoFuAHOTHu753sTfHxuq1I0go+/zojM0IoYO3JTXlpUQXWpb
EQDKpW6l1BfNZfSXW/SHt+VC3DOhDfVZ5IYMFV89fYU1SczLVS9AeOqAICRExXmyQLFOF2KeEEgx
qcEobFtP2wZ776ZC1JOi7LU/mqY/rLi4l+vBv9xVgapE7LQJ+giDwyOEKp7j4JzADbQdGcF+OaXx
13SkxUDeCpA1IphanDUFhGDWZYrCMgwBwiJwwhESjoCEkPReIAdNUgHKCBgjSOUyaGlJo61koq3V
htMQaEz5aDg+Zmc9eB7Hrde2bOl/Y8/Ro4dq9qqj1bcsEDxCkmsi/992AU1Wl9ouqSJ6JYGHRZNM
70gZf35zq7jyy+Ph1Z6v/0FnDldRgq8uFm/KV+Y0gvurHJVz8LJNQDtNOoYBZlZHWiPQBX19OQqm
dBLsCkOaDgl7f6utvdNiweFxJ3xzitDDJ1xv7po8wZ5QWPvqArflCIogmPc5ClIuE7GOcqi/NaOR
X9ieDXZ2pUKl4ja1CE4gMO8DE17cRxNJOlqqYJgMXXgseuUxdxhGkfKihmFCN2xougErZYEt6JhY
CJEeXcDmQR8tWQKn5sFzBbImwfo1KazuT1/NKMGWHSUcmycfefD5qZtNFmdCnBGs0SNw/v+m8+6S
K6JXmvpYy7Q3/VQX+/8+P+6+xyD08Rs7yA3fmeC7pgMBY1Gcp6kciGD3ec7HTFTWtqpiSILYwPqu
PCYbLjSf3ny8QtvbmdhQ0PXPyuXO0AWGa+xZKoRta9QlkYHZIECHFrlvzFF0mRwHGwJBoG9xob8z
Y+GXLrNdqzcHtOYNNEJg3AFmF4D5gKMWxRn2SRUZ4Wr0h3wceCAgAjlCF8jkC8gUiopVpCICFR7c
WhU1x4Gtc0RphuHhNFovK6K7ZMRfTmbdOsHMnA8ehVi3IYudV/dc/sj+USNrar5GCBYkwjXZ8Xcq
B25Wr7QL5Edpwo0qBmSZfujHqjE9A9r9q13G1x9Z8L58X1V8/OfadIy4wrwih3TE2QsoHzkVYTwQ
qNRj9/dCJQlIlhAtDRH0l9LYtKEDI6PV//ytI9NjvTa7N9eawp4jntXphTdcaYtrQhg/64vom5NR
+EfpkFzVZrLbJhvk9SfqWqWka0/Nhp5qQyhpBMfq5p1Z0P/cmYve0GH46CkSpPNZTLgMT1Y4xqsu
Zp0g7pkhRBH1LAGcpGi4B9AQSNkaiq1tSOXyqlwn1S9uFMCvz0AnshcwQsOXXpPCYBoaHjA+7aNk
19BZMqHbGvSUDtOiUralLozrc1i2VVhD0dOTpsfk8YdqXLUuNJdgSf+kKBkMQWZngfnlTI1o8qGy
ht6tAekL0IJe0iV4pazpVd/Zau6bDaORv5uNfjIN4MnpEDuLhNkWXX+oGgsJyKkTUm5lPY/OmJ4g
k8YCEzePEhycZnS4a9tqmOn5t27zo5Rt6vdWZpzf2ErEn3Ie4Lgv7nUo/2inxh+SCtTAE49w8I+F
gkOjHGnbw0IgMFE33tZqaO8dMPwbegwPq7p0hNkOHK8RDM9FGJmrYKHhgTIKixEYNPZ2USjgVaGy
2XwhjZaBLtjpTPzEMMAPGqgszKBRXkDocegaoOkMESdKLZM2gSiM4HIKRkI4boQwEKC6gK5CIiCK
OAI52CgU0DQNnk6Lka0dK7scXsBhW1SpbHSlyNHen6YY1xnumg3PFEgsZRK8vqoIEVUulMt6eZlT
vF7yHrDp0d6WNb/eqqPwhxNer/z3tXK50wTSoOUHZnnqcX/p2Sk/15/BpryOaSdCI5TVg0jeEQQR
2zYz7txRKvu/ed/Dh1Gf50eu7Cy9i9fnF0TZzZeDYGRMC3710QX2zdUpjl6DQIsIZOwkY03dBngU
oeyYP28y9u5+y7myQ/exqtdGWFqDZyscxycaGJuZhdvwYJkEWZMpolm+L6jHni7fkkX7llVqeZWt
VG7gotqoYmFqCtXZCkQIpZTWDUm7MPiyquJx5XKoYnIiwDKVB6Kq5iwQhBFoSMEjgUgusQEQBRF0
jcGyNHx7NtC02QAb0wxdFoHBOFKavrYcmt93PP6Pq03xR5qIwXGu1lWqRLcxMb8jRZA14p+5fPlV
wJf+EiwELjf1X746rb3pkzPOKxcE6q0MWK8BoWwM0jm1GmJXv0ZfsFVBlDQOvep127CtvwXVuq+6
zMyUiYHVRRw4NOe0f/6ptxome3/FNN6bJtX/mKpOk7mGnz9R9t/jkvDj9ZTMpBkMEEhBqRkRZLQI
tYig4RlvZIT9Zp/hvKLXBrpX5+C0DuBAg+DYWBknTowgcgNkUwRGmqnYjkjv5ACptIm+9atRbG8H
0TU4oYOphUnMjI2hMttQN0+3pGKGKRfEuUC1zuVzo5ZrSZukLIZiSxEm4/BrFTCDwWaxBjOQni7g
CNWfAkxLEkRdRyRV2iYCO60hx2ISvOxoP+lV9X+edeufpQg+uD5tL2tguyy35gkwkJENW8BcyFU1
hiy34fqlDkB50VooXfvTeeNvv1bxP3g04A/Jn2+y4qFDsx4xWs0o+9pOMp9hFCE/1QfcrgNP1QR+
6dNPqqrMFV0ZvH1jB+Yihkx3DnsPTD3amc/3ZA3/2GZ3YVXZd/m+SfeTU673QaFhQQpFyyEwmIrQ
YzTDAIJZX7+6EukfXa07t/bZPjr78oh6L8MR38TBE+M4fuQYuBMimyUgeYZQustaBJsDxY5WdF45
iHQxDzdwMD01gcmREZRnY1G/aQGpTHL3ZATBBdx6rOsrtuXR0taOTC4L09CgMR0dvT0oDx/Gkad2
Q9eZErfKeE56PW4m3VIkPncRceiGganpipcO+VNdJjDjCMtg5v/xGtrPDnvOHzcQvL+VnFvA2tQz
1tXYYeCWYjysfS6I48ALDcResgBsJq+/ULAO7vejR37UCD4qv17JJFibIahI5l8jnVxQ2IJ7ORrB
T76+vIWyR+T+ioDvxgx0o0oRNFyMzXoYf3ZirfCiP7x1s6UV4K969HgDx6a9H4LxX0mZRNE2cnlr
4QSWLpDWBaYcracRGP+rx/TfsdFuoK8vCzKwCSdEBvuGJnD44JOIGiEyGYAUGbj0BnMRbA0YGOhH
a98qkJSJSmUe+/c+iZnxGQQyw7UXga5pkr9sRMrjlbqKWLV2EKWODtipFELPReQF0BiBphjzUMVu
UhMpjyWncUHEc2xkYitBLBKKTEbJJ47Ozw75uK3AzJ0DfakPnxgJjZxR//WZyPuYE5w7ez215QTQ
bgA78lR53Vnv4oU3FwxAmlQHsJIKiqSX940Z8xuCQfv8gnuD+jkBNhsUfZqGqkkQBKI9o3F8ZUQM
ydIfS5hCK8lB9ohTyUg2ZYBqFHMV/PStmzs+f316VjtyeAZf2B983rLZF/JZdud8XWz0GvxAi00h
MSEl1wsRQaVm/VqO4E+2pBvWhl4d9vorMJnuxIHjk3h694OolgOkMoCWAC+YjWDqwOoNg+haswqa
yTA5NoKRJ49gYSEA1QHDotDtF04ZV7J8L4LnAPmSjb4Nl6GrbzU0XYfvOJgbm1DJgpm2VV2aRCEi
p6HmVWcNoioZEgixFCtxfzwWP9i2gePDPpjwun7vvWu/NzdF8djuBbhu9VX5lPgeaUhl9tkHh5Bm
qwUE+m2KHouo5bsRLX++zlJ2wQCUAefAotl8l94IXDmWzNB+4cqU/ob75ho7V0mqisTLi1EN8bgj
0L2+IJvE2wyvAceUfkGoLQyk55Pn+NRpzbKjNR+eQ3/rJ7Zk//i63Azu+8EE7ppkH1qdpR9uNzmG
G+F9FQ/MNAkyphQbcIy5+g7Otb+8zHBu3N4BtG/ZhLniauydLGPfjx7EyFgN3AKyJSbV2QjmIrWT
0uCm9SgN9KgKxtDxIYwePqZkVEYKsHPs1A0Tp4DHI456TcDOMGzYMYjetWtg2Sl4ngffdeCW6yr7
tTJpVRZUkx3CAGGtrDyfzKp12dQkqRweg1CCVXo+HkToW1PCdx+Zw/TELLl2Wzv+8QdDnx+brv3G
qoI2vZwZsbVEmd2fIUp8IasyssZ9LjHtcuyCASj3ZEsjBsRKAFBJ3Slp++m89Q9fr3l/vCfkT8qO
NksI7Exesz+MUGlw3Nia2uAHHm5KCX91mmBOyPRf3gAK6lM0wrgy48t92SriFlKr/PHluTq+9p0q
9jj6L1rp4B90KW8RFC6X+6SRqK0AzIcCMw3rfQXC/+iqgoPNm9rhDGzDoQbB/kd2Y+jIFFxJh+Sp
AlxUjhCFwOrBfnSs7wPXKIaPHMPI80MKDFZaejy25JIhwee5EWSFtmdNNwY2bUS+VIJbq8FxGuBh
BGeuAjNtwcqkJOcSb9fAGLjvgbt1pK14OzDXkwkOiZdfLfFMskQcBIhgI3CqePjJmccef3r6fekW
9kC+qON8bIlQwg+goAObMrEqSNIzxiXSu16UBzxxCQ58LrshZz1xMAzHdzWC9yNZ8rdCLhFU/T0N
3jdcD45rPVre1gkWPFKpB8C9dYGRMNY335lhyNgkloURLbs2Q78ppifwt0N4/HBo/sw60zsin3xJ
a8yFcqa0rIJIXk7rmazpn9tuOrfe0EfQdeU1OKKVsHfvIRzed0hNLAgzFJpUHTciBA7Qurob3ZvX
gzOB0aETGH7+mLqxdpoqMIizKD4l+OrVCIZJsPG6HegdGEAYhqjOzUG3TISOh8bMAtIteRhpCzwI
FdDkUiuXZVGrQpdpNRhcjyBjU2RTFLpcjilVx5UxYUvRwnOHPdQqDgZXmV9wPPcBh1IEvlAP0Nkc
CU9olk4d2JojcImAvM6XkvFdFgBJUmL59+hgvtrW/9c2i67+s+lGL008rTx+n04UKSo95AYK7UTZ
f/Ns2dshdWyCiAM90k065KTu6knfRw+TkxrUgMgrt1Avs6aToVIvlO4+OHuk4cdB+vq0zOi4uhHD
DeP13Rr53B15p3jtjk6Egzvw0EgVTz36PVTmffAUwC0G5kdKcJUq5dF1zUboORvTI2MY2v+cSixS
mVMAWOqSqSVX9vpWOAqtWWy+ZicKpVY0qlVEYQjdtuBV66hNziHXVYKRssF9X4GZkXioZjplojxZ
Qa0iUChoKKSAfFZGcUSR3Qr4ajWIwMwc6nMEOvVQc4MFuXY6XqTiyAAUJQb1wHhJRUOWHN2YPkR/
mqJD0jiI4F0Av7dcWxYAJfjyybizleq3krSKQdjaNxWN//FQ2fsQi8ToQNLMI5/iST/CXOKBOXCk
zdbf3Wsbb0yHlfCoQ26lTLt1oxZ9n/j8m/IilUKKbh5vHcsoy0zUAxSFjje8av2aB4/t+ckxr/Ev
7YQgpXHUPIAH9of6NO93b+vi2HT9FTiW6sEPHnoGz+07DmYCJEfjkth8hKylofOaLbC6WjE3MYHj
Tz6ORi1COktg2mcHHhLwhXJZrQE9A53YfIe7g7oAACAASURBVM1VYExHdWFB/V6CzSnXUB6dRKmv
G4ZtIvB8VU5jcSMWMmkTXt3B+NAkbB1oSQMpNScxBh7TY0ZUCAKdcYxPMyXNKmYijJXDA+1ppvg7
n3PUZd8yj0eBdBsEUr0/60UoMoJciqAzTRBU47k75AJ3ZiKL6spns/MCsNl4vSCEKresBABJssz+
ekl/pBKFM0/Xww+nEh5QLgOMUWRsIx5AFIRoBBwLVPxpLqe9x57n2kabfSoi+IeCHj2kJ+KCPgoU
WRy/hCBRPRCYKgfYeFUfXr1n/r888MzQv7QKH7Mu0T3f/OJVlvOW2zfZKO28Hg9PRbj/3u9gft6D
nSMQsvrR4DACoHtdP7L9vai5LvY98igWpmuw00BW7pF7DuAhKVf6XojAA9ZtG8Tgtq2qQtGoxT2l
hm3BrdYxf3wMrWt6YaQsBK4XVzgIIDFm2SbmZyrwFyrI8HlkCnEFhOlMEc5yHxLW9H6Co70jg2NH
GYpWGRMzbjQr8Lws6alGUEUQcJyQvc1q+itBjQN7GgF2ZCnKBsN8IJKG+OWbvGeykmIoJblKnc86
q+q8APSSpIMnILnULpgkx9hpab+/NkPaPzrqb5yIp4rGGjkA6zMW1vcWMR9EKGQsZG0dowv+PEKf
10LuEcI3r05F809V4zktEoDjkcDxMFQPTJqScNAkSk0s1cW33jhw24O7n4crZe2e/vQNOWfTLVe3
g266Ft/aPYQHHn5G6j5hF6jKTlEWaGsporS2D76p47l9BzA7OgvDXB7w0Oxsa4TqdVuu3Y7V69bD
qdcRBnEJUbNM+I6L2SPDKoM2MykEjqtAKwUCts5g6BTjx8ZhGyZ62wzMTYUA0ZUkX1ZNmMYUEE8G
ItxH1W9Fve6jr+Bj6HD9ybUZumBQuuSNVCRzIv+STtS7kFHEi0zqMMcI5L4o6Letr7/X4p82RHR3
tMSnnROAzdKW0qEt2hb0UlmTVbfAut/Rof/24xX/f9cCfnB14v3CJAYcrLqY2j+GWQi4/SXk8iVo
lF6WgcdGK8GhCcHm56sMk45QD4ssjZUswInim8cjEnCpGHECPHX/MxjsSqEd5ONzdVz1hi530ytu
vAwLHevxlfuewDMHxmDIbE/eAdljCwMta7pAixmMzExh8tiY6gvJ5JcHPCxKNiybYet116C1qwf1
akXVhBXANKZq1DPPH0ehpx2pYg5+3Uk8nxx9rCuObuT5EfXwbdqyGlPP7ILvcGQypqqCSHGqpsX/
QTUacXT3ZLH7WARLVECoheNjjfvzNjljeHt8kkl3HJMVDqLUBUrGcYEIFAonAq6Ua2v6H9iG/qbh
sPE+uR3aUoc9KwB58t9GQlQvxEqkH/Lz5wRwW4t2n8NCfG4uehdbJA6V3usWOaePC9QQTx+YHp/H
wKoismlzm406Rsri0IgCCpAz4rG2JBJoePHFU9+Diyku66oawdjDe7E7smCG5Nfe3Bvi5tfsxJDZ
jc9/5d8wNFZDqhDXlLUAMLUUMp2tqBKOib37lT5PZrYg5ILysVo5QraYwvZXXIdsoQX1Sln9vKkg
0gwdE88ehpXPItfVphIQSuI5MLalg0Yhhg+PoaWYxoYNPfDL86hOjiGdIdA1qmI+GaboOlXb78vY
L/I9OOjE9EwNt16hYdcTVTzn+F/JhEu05IkYbLJi0pJnSDMBU5ALHu0hEINswmHYnqXvTmvaB780
5bxhWESHzjZv4qwAlNWEMSHUkrZSSlnp4Xqp/lM3tWLr740Fd0oXHS360pdToCfhHnUQGFz2ZXNn
rOZgXXf7VaxBUWlgfI3N1fanVZersbmmQZE2SKwQkYQ5xYgO4RfTwnhkUsPotIvXrQGu/4kbsauR
xec+dw9qjQiZFgbKibr4Uh/s6gLV2Sm4ZReGZP4Ntuwm2sWZbntvC7Zddy10w0KtvPCCFlE7m8b0
4RMqZmtd0wO/1jgFPhn3+j5GjoyirbOI/t4WRUVVpsYQ1TxkS5YCn2VSpFOxGG2+HIFQKT7N4+lj
HFpQRqlQwtP7J0+kU3S3Ts7cGUpeJxnntRGqarziAm54UwvohIDHmaHp7N3XtvL3dttY9Y/H+V92
2eG3BCdSt7nk+5cEoEiWXNmjMLVCcV9cm6T4lS76pf1u8NSBGrk7tegLZQmwg8U7X5rJuRcYskVd
/zM7pP8nR6NcEISoU+y15QyaSD71sZeUHWJzZjwdQGbQDo8qJZM/MVezrq8u+HjbZcDW19yO786Z
+OyX7lVVjEKegUYUuiAIKIcv66quq2a6mDY7dWGW8/2kBi/kcOoCvYPd2HL1Veq9kmaR5UAZV8rv
adom6jPzaMyW0b11HSI/SDrmpOczITwXY0fG0N7Tgr7uIqgfgFOK+sQwLANI20wJTh2fwK1wWBZV
Jb60GSEwS5ianMdNlxt4dl8D+0Zqfy/5QXJaAUFea/nQXi25mEBg3wVMl5evbCRD23sy+GB/ivxB
SufI6hEWfIorc/j1Vk0//pmF4C9HzvIZSwKwub9GaQW9n0w8NpvaR7bnOD51lL92ezLjWU3WJXEW
Ne6fOn4cK2LKE8Fn2Yn6XTeszmTqQYhJH18cTMnGH7q+leKQDgj5sK3OCBypEcxIj8YETtTNw+uZ
d/2dGwQGXv1a3Dut4YtfuhsmBLI5BhZSdXNcGqkgnAoJ4Atv1ZafEfihqucObh3EussvVzSKLKfJ
pdatxIMi7Vxaeb2ZY2Moru6EbhkI6o7qCbEsE5HrYvLwCHr62tHVkYNwXOjZHGpjQ9CqC0gVDMzU
CJwFgdYWgvYsQzaruovRf1knfnQoBHFm0dffjfu/PybbBj6djvdtOnmuIplx2Gtr6NAojjpLpQln
mnQdsgac4WClFD6y2cL75PJd9sRTE5748+G6tk+j6Gq3+B+XQ/0vNJ/oBUR/stRnnQFAkiQbrDn1
/oJvwfkt3rGImnd2kg88MR/871EPk61YVBtNRI7plKEqDlw0LxuBD7ELfvAXGZP8xZwXooPp3448
o6MuwocPkugdEr/yPa8vEpQbHClFYKf+exd1fubWPoHe234C/zqt46tf/pba1jWb0UFDAkE5PBLv
gHQh22m94NrJMRxOqDLJzVdvRd+GDXAbDVUK0+TkgposrYWwshnotomp547DTFkodLfBr9bjzWls
E5HjYvrICFb3t6G9PYeo4cKyDKRNgpmxY5goS95SVyLXVZ06VndqqgdYak9TaYZpL4PRoXG8+uoM
jh1y8a/7F757FBgxghdmtfJB22pSbMlS1Wd9vrZgtQFQpNoAutamyQc6bPJLDEgdq5MvHK+JP3QD
sTcv20epkPzjU/ur4l6bkKuzOrc9f+mE5gUApMmIMZpQGctopb1wS2al3JzS/jFjcHx6WLzLlcA6
jeS0DQ2XrSnFVZhFVybPKNyAPSQpca/qojfH1tfD4C8rfvCnRyPZ7S2wmUpxgFweOCZ96wM35dyP
3Ngr0HrLa/GDahrf/PrXoYdcgY/IpiAat09K13uxD5wEj1MPVXXl8ldcha6+ATQqcaZrGBq8hovQ
DZAu5sA0DY25ivKGPZsHEDqxvkjGfNzzMXVoGKv721Fqy4NEEYqFFKr1AM8+fRjDR+Zh2QZW5YGe
doZMhsEP4/FvXETo7O/BPQ/PIq/V0N/fi699fQrDNecP2nEq9iNJY79Ud68psHi45TmqHKoOIvOB
iBY6TPYHLSYk8EbnXfGbJBKfnOFcgbkgiWwWT8WQD3GKSV8ZPlbnQiWWS9kLABglwNMWzbm71MbV
oB6WfXUneeueqv+JSU7UbuD+yS8bX5y1BRsFjWLeDV4ACgmUtrx1B3M9PDUafsQw+G+nDQ7qn7p8
/USocbeUWe+53PQ/cmMPR+6G2/Co34J7v3EXZGtaJqvHVI9sgZQZ34Vu4nsa+OqVCHZGx+U3XIuW
9g40KmU1hEgzNQSur7SI6WJWuXepXp4/MI50S1Z5Qq/mwJZBXRBg4uAQVg10qKVXLskzkws4Ml8D
ldWS48+hJyfQ3UHBZJIlW5P4qQpFZ08Gz49rKM+U8dbXtODwMQePPzV/cAB4oEhOPVyBSswIukoa
qnJTx2RbsBeCLl6KlP4vYHaPRT7UbbD355moPFcLfmrcId/YmBUo6gSVEGDh0uIUiuZxT00jW2za
qRfGAJwjRHU3rVTsJ5+k16bZZwyd474ZKLGBf9rwRnmSGR6iMVtRwfxi80CQRTA46wWYAfleSeOw
QBV10PyScm7+3jnrXWsywcduKkWwrrgW+61+3H/XN1CfqSOf11WXWewwX5yeDQnNki+lsf0V1yOT
y5+kWSQpLALpqRtIFzKqPiyX4vLYFEQYoa2/WwHTMjQYVGByaFyBT/aJHNk/hMgP4Ts+Wjpa0Z4K
UJ31IJim+M0soTD15MxpvOzZbW3Y/0ANG3qBUtHE40/M4+6Z6v/gSUjVvMaSKH5lSQfRAM/hSJ9G
8MafylFzCfI2+7W1JfZx+bB6YfCJssd/VfYyS0JA7rX3Yqd5nASgmyQdW0W8zdVK9LjFA3tY9pUd
ePOehejTQajVVIfRooPJi5VjAixwMOq9cF8N+TdZKtps1q6e5xRzhB2Qpbk5IXDQPfUZ05F+23rw
T9xQCGBtvAwjHdvw0Hfvx+TQDNJ5qnR66vNepNdr0iydq0rYdv210HVDgU/WY0miX2yUq0hlU9AM
Teny5FJZGZ9Fobs1JvgZRdpiOPz4QQVQeUMnh6eUhD5fyKB3SyvSKQuHvv+vcCXxXDRg6lT9d7IH
hhD0DuTxyB4HvDaHna8oYGzMx0OPzx33ga+ai0awSXZjihEQm6nWgdO5PiEVLz6HE2j/YWeb/okU
JZ2Vhv/ovkr0C1nGn9tiyyoJUcPVL4VpTWCEqu8WWHWWfS9evBE4Uhpks7/MUI4T0/zdV0oR6Wmx
n7xAwwI44CQT4xftq6GqSxHJuT7bNBEwL0/CiXTczY3jyZamJWibriqR791SdGGsasf02hvwxO6n
cWTvEVgZokakvVhWPRYUcLh1gb71Pdh81c54MHm9pjJnuRhpuob6bFl5PDNtKhm9lU5h+uioUquU
etrUEptNGxh97gSslIm2npKqhbV3tSBlx0uylE+PPrsHtakKMkUNOiOwDarEoGqkGmNKDTPlpDB0
dBa3XmGoGHPf/jJ+MF35jbakrJmwCKgLoebKyB7kxQ++pKxkN53rkZ39femPFnR2e9UJMLLgvaPi
h5+R9WXZTnqpRy4rAOqJ1v8w4nFqK2HqBoHhne34xaN18d1nOHGKpym8SJLiH5ATns6ieuiEGAy5
JmkAFvnsKtsIHs/ZsYrD4dT66YLxwK0tDbCCherW23Hw+CieeeQxJYOXJasXO7mhKZv3XWBwy1ps
uGI7Qt9H4LrKgymvZhpK0SLLa+nWPHgQKK8opxPU5ipoW90BEgbIZ02MPj+C0Atx2ZXr1O+5H6jY
Uapdcrkc/PIspg89B8Mmqqc3ZcbeT23OIzsBNaEkW/c87GBtR4Du3hbMzYb4/q7ZYxT4qpqRIBOO
JMQ6lHwPfdH3kbzkdAXFUt7+q62t9s+mCcf+sfpnpqv+e7tsXskYFDXvxU0YO5spAMolN08IMmSl
ZPbxZKguRv5rzqTYVSb/rWhG0E5z/2pzFlPDuwZbVX8HPy3AIFIzFIg7tmISM9VQ8yLrkXmX/pwR
BV/sZwQF3fz2De1uK5P7rG25CWMNjid/8AD8AEhnLw343GQc6tZrtqieDc91ECmAxeDTDYbI8+FU
6ih0tKjZafKwdsbGxOER9RDIqoas7c6MzKBRaWDd5WvgOR5EFMZlQDmdwFaNxxh75gnVUG7nTGQs
ImvgCOTwIp2q69HTn8fuIxTUrWDHzox8C/bsqeJ7M9V3YpEERQJwMvm7otVJnGRUqiEKBfvdq3qy
f6U2Zqz5818+Xv+vAQm+vD0vx4DQFZ3zrcWaLaVyRA5Lb6ZyKcwTBFe3kD+qBpifqfJnO5eYXCkB
2JUycFlHFq4fnQEY3dQwVRGeMDna3HGsLbraiar5BZOyn99hsBPb24Ib5dNb7tuERnE1nvzWNzE7
7SBTZC9eS5tkulaKYdu1V6Fj1Wo4taqiWeTyRBKASqn8/NQUUhkbhqUjcINYHhWGcOaraO9tU/Xb
8nQF06PTGNwyoMROQRgkO70LpG1TkdbDjz8Mb66KQoumKJQgokpuJUuNuq2jUNIxw4s4MTSNV26W
A14YanMh7tk185gLfMdWoRVRYc0EiZdfJJ5QXt+gxge2Drbc1duW2jY6VT9x4HjlZ3IEP2K6nP5L
kxh5ZUXIygNK7yT7auWXl17nUk/ZkF+Bcgy0ZfX8w3PRe09EAabPgvRKI0A0UkbDD88ADWEEVYff
u+WWtX/WRisY2VfHtpKH0ap+h50BNmZCTFITvO9y7Nn1IwwdnkQqdw7N+TLsZLJR5ih15LD1uquQ
zRdVskGSbfyb3WiSVF4Ym1FBdL69qLya/J2dNjFxeEwJBjr6O9BYqCllS/faLlWO86TsSs75A0fK
NqGbFib3/Aje+BRYSkMjYEjLAeUmi2cHpnRIL293deKpvS42dbuqX1gC+8En5nCi0vj5Tc1Zh0Rg
AQQTCZmvSp2RkIMxX33n9e3fdgTBD5+e/XDZcT4kVyeDSMW3ONlhuNKmACg9jxztn5UzPeSJvojs
cCmTlMdqm/43apk40Wj8bT6ZYrDYREKU6n6I8tCcaolsNLu7yCkgV7k4+Nx0eGTDlg1rO/buRgUE
Uw0Nd/S6WKgD9fXrMT0xg72PPQs9BSVTuljvt7istmpQJhtXqDjMqVbU70hS2FeeS6OKUG4sVNHW
36Wk9VJKpU494qjNV9HW26rKb8cPHEexo4hiexFOpaHO0WIUxVwKuqbhyOOPYUzyhHkNNmHIWRQd
eaZqvcLQlHKlY3UJIxUbrWwaq3pSClnlUR//9NjMp44ABw0SV5BaDYodKYqW+VAJTitCoJDSLn/7
Hb3fHp3wcM8Px37CMPi/tuY0+N7F6f9ejJ3MgvVEfFBTpOSlRb8kO7e1Wv+lzrRDpRRz8/TM2Q0y
zsgbDE/NNvB0NVCbAEo1TIXHWbmdnKPcz+PJ7z77vtt33vbVy2/vxl99eRybuwOULIEnK8D0XB3H
9z+mRAy2cfFLb1NAKstqG6/cpLrVZF+GV6snNEvs/UiyCY1pGRg9cAJ21kYqa8GrNRRPJj3c5PFJ
9druwW4899hzSOVS6F3fqwjqVMaC8D2EroeRqo/JI4fhjAwjI2u7NkVbXkrjmWrJFJqmtPNtPWnU
jA7MHR7B+k4oMUK7zfC1B6fmx0P/V9oWMRuDcullBAfk9RNxReJ920v/NjzUwL27p7ans2SPRdh5
u+NWyk7ygPH2UlSVvQJc3JiFpUz144eiPW0axjwnH2stGDBPowAkSHI6w5gb4Bk/XpvrNsVgXseM
E+CEJ7DXjeX5EoQ1z/3avY8Pf3xH18Avpo2x9LZiiNGGlI8RHHvuqNro2Uov3Qa5XFOVjbSOzVdf
ic5Vq+DXa4iiKAYdjbcGpAmBb8iJB1PziMIAbatjcpk2x/RLrzNTRudAF8YOj6FRqaNvYx+mhqdV
eU42nNu2BdMwML7vGdBaHb0dBJkUUfo+NYZD11T1RMrBskUd9qq1eO7pKXSm6giQRmuB4cCzdfzN
ofmfqUNO9o0v7ia13zGwn8d738mf7ui039pVslr+9u4T11o29liaDsKX0xm8MnYSgEIJPqUWjiKk
/AW6vBcFQNnWl2Z3ymVq76z3Ddkk84Lt/EXCqxHgoZlTFUM5atmNBEId6DAIrBTFAxWuyj7yTX99
3/73vL235adv32yk6zUfQw7BlEvUeA7VE3sR4DvZIF4VaO0sYOt1VyOby8GR8R5J+m0Rg48k7ZHy
Txm/LUzModBROAW8RGg6OzqjiGjJu02dmMTA1jVoVBvwXU9lwqW2FuSKWVQO7UW7VofVJQWmGgSj
ME0NqZSmKipySmWuqCM9sAaHj1bRwqeRbUmpkpqoC3zpgcnPOxDflvuSyGSjmGy6OBfFDiWV/Psn
1uc/Pj7pTDaE2GU1gHIQIpUiyKSpGuv2720vqAX7ECjJmEnecCOeWfdiTcaXa3LsNa4QYsrxR6Q0
SiyKMWUCVNQZ9lZ9TLtnyh/CKNYEbjUoZnSBZ8I4TmmNxEc36XNttohw1ANG3XjfDikL0ikumDpY
TC6vXteLjTuvAGNM1XQVxRITBSeBp+auSCI9Y2N6aEJluqWuFjgV56T4l2qaApqMI2dGp7H6sj61
JMvlVGc5ZDJp1WJ57EcPoTYyibxUY+ua6vGwTIZMSlO0jgRfa4eNMs1j5FAdJcwg02LB8Tj6O018
87szjYcXnF8cSDxyu8UwaBA1varTopkhLsQcUL/Z1javajE7v/zdsddryRIdBhzVcuwAZF13xWiQ
s9gZcix5/FzAMOzFqpWL8SSLTW5LUdShtejR8JYWA8Zpv5dgkQH4I7NL6yWaUJ2JBFoEsFYp3rSO
13ay/z6Q9TBckVsgENQjOS6CXFTcoMAXRHAbwOC2Qazfvh2RHIlRr52M95o0SzPrlT+QVQ8RBKhL
cnlVOwJvUde2VP3UXXiO1AL66FzTBSNjo9Fw1XdmRhqeG2Bo1w/hTc6hUJKtkxo0SbHIpTatwbI1
xfW1d9vYN8owNV/FjoG4sarhEXS1G5g86uDPd02/aRIIFc8nJ0vYFFaGwYmE3C0911vHf38awXvW
byn8vdMIgsfqwT0yTnSTnmvZEltdiOCkKNakLmC22os0cTZBqhSGTjcCPHVJinIEqwJa7hEYy8qR
tIuyApmldZsaHpv3UQ3OfSzZ35tjHBs0OfebvveaDh+cAtMBMONDxX1B8oWiC4hfm55PZrobdmzA
4Nat8BuNuHrBToFPejxKcFLZIR9MO21j7OCwKn3lWvNqaSVJv4gst8mWSgnArrXdSOdTCpBSq2in
06ph/Oiuh+FPz6FYij2dlNbLzjbp+TJpHZoMP1bn8eCzEYZPVPDa69MKlDWHo62kozwd4X9+a/R3
JgT/7uLvpFVDDFVCpTDiwFiXZcy/yrb+pb/HvvrBx2ffaSXluabJZEmCcLTB0Wdr0Kh4wd7Fl9KS
uVMnY68zACgSKVYP5dAIg20aap+yizG5WMom7E6ND2SKtnNlMY0wOLXMmjQuJ/3+4TMF23JJCCKg
4isy1bJAL/NcUkppKBoaefuh6ZjnmuRyjzaBKicnR4tdiBNUKo+GwJrNa7Bu2za41RoED5Wna36W
ollOOlcRTyegVHWu1St1lWBIL9cEn+qpYLIHOEDHQCesrK2mpMoOPzuTAQ99HN31EPzZMlpadRgm
VQS2JH9zkmCWU/pTQLarBd95Bpgdq+DOG9MKpJU6R6mow3UEfnDvxDd2V7zfX/x98jpBb5apSVYp
EdfSZ7zwK2/c2fGMFkT1x0cbn2pdYvKpBKTM5h2Xq8GfF8PENR98LR7KdVLZdPowJtnhsFanKATa
2ZuS5IR5mR0ZXhgPormIGr4sTs2GkdzzLFso2uWuVVm17CBJTuQet988MAfPDc/YLpVIyU+efGB7
hl7HTHaNZWjtKnaKu2qw4KfR8DmqswEW5kNUuQ9Ldi2lJBCXTz5LhUnPmi5suHw73Fo9Bt8i4SZL
vB9ZFP/JDzdsA2OHRmBaJrKlrCqpyQdOAloOgpTAlLObU8VMXA2RUxhyOURODUOPPYSwWkeuJGVh
DCGPm8qLGQOdLQZaixRBKo+vPBzC8st4yy0p+IKhXIvQUdLh1QU+/tWJRx+crr6JN7fISL7PqhBy
l061h13zgQk4vO5WDXv3zf++TE56zjJYSsbPfj1SjfjtWSCvxQ+Te/rrFl0LKRjxVMIYh1MShBL8
TRKCo7lpuUCHTrDNoGjXCVo0gtF5enYARmqkBVVjXEf8AHKomHWBE7FkDJmSJHSLVZqqYrxy3AFP
lloJwIUqwejzFbwy4feahR+5lX2LwT68ozP8nZIdp8QsJaCZIm5BtDTViB0xhqrMfmdC9WWeHYtw
YGRa9tahWEyIknOcsCr1caCjp0clHHLcWZPbQ+zEkmX3VBWhuQmd1/DQqDho7+9Qy6xIGmvlcuy5
vqJbCp0typWYhg47m4EzN4Njux5BUHeRypuIBFU1XVnBaC3oWNVqor2kYbRm4L6H6xgourj1xjQq
DoHrRWhv0TFfEfj6NycfGZ6u3jgBYHHZ6hXyegN4LopDEZHs/v6mtflPCCbw189X/yKb1ISXuiyk
2Q8UCYxV4/uw3gAsGu84lWfx3yU7ET9s8bjgdo0onPSngf0Vgf0V4G1dBIagkHs5y43Lb0kRbJNz
pAnBSADMytbZpNX2LDcn/nMga0BzCfY2fLXj0IWM0+LJHOGNXE+/osXU8iULvqrxQpWVZDY8NF17
wRb5cUOUtvn6HvxOUY/QCIgaliMng0suTNIOcmmuN+L6nm1TbFmnY4fBcHNo4+hMN769awa7j4wg
pUUo5NiSbYbNG6D2tnCdmNMjp5aM5rJLF+U18ZQrmflaitOTdEmmJaeqGSffSSimj0/AzmUUpRM6
PohtY+roEMb2PqniimzRUtWPtEWRsigKaR2rO2x0t+t4/FCE7+8u46ZtGnZuzmJGjn6LuPJ8k7MR
/unuyR9U5uq3tEoXUz+Vsm6RPdwAjgLIyitI4hjbBnpetyV321eenPtIK+BlMxoelyO+zlZvXXQj
nl8Q+J7UCMhGfAoMyGGXOsHWPMWeOYHheY5b11DkQ8CPCFrMWEghtYLSG6blvioQaCPAepOoSVtT
PJbsNw9/TjwFqs9AYJ1loEOnGHZ9aISrWGY5Jl9VjuRgEI1KdtQWYTwlXm4zJTgePTSLA24Ek1DU
lHcVapbLzhz92Lp8gIoHtb+GJ6ukauiOgJGS06AIBroZ0jaBZD1mq/J5DGAwF1f2ZbHjslV44sgq
/PPdz+Po7Cw683KY4ikgkqTPQQFM4fdKYwAAIABJREFUA2aGh9Dd16/KbFJcQJvebzFSySnvJ2O7
RtVBa08rfCdQD5K8TlL3N354VDUIZYsZiJAjlc1gbugwJg8cgBS4SPpEjtdVDeVSjJrSMdibQqmg
4e5HGjhwPMCbbkxhYJWFiblQHVZ6vtHxCJ+5e+orlXr9bataGGacU/dgHYArhVCbEMrEicvuvpAr
8vnajszvZ/MMeigWXt+S+vVDfvhEDnhoifb0M4Ao4zi5vMo9PyrgmPZkxx7DW1soDi2EcJI+Cgk4
yXbIZVgkMaDaoDuJA4Nkw0e6BElxTgCSJLBciDiyGsW1bZIWiMBDsazmZYnTmidQyNL0nqGq9535
huLLWiyKsOFg/3gDPcZJ/6L+q3Otf1tB3CaD5+fKmiKXLRaqAFnNOxYUh8Cw/6iJNf0pbBk0UTA5
5iocvtBx/EQNllbGTRs6sXPrTnzx7nF8/QfPIKVHKEpvmIzyjXdcl725BNXpOubGhtEzuA5OuZwQ
zuQkSa6Mxx7QSJuYGppUWW62taAyX57IreZHp9FYqGPNjkGYpqkAPfXcsygfO4BcLi7LyeQlrqYQ
5NM6tq/JqOz3M/eUUW0I/MLr88imGSan4znQMuE4dNzHP39n8q+rrv/uYp6pfeeaJqdWXCeEWi79
RFjawWRmr6pGxuu2Zn5+as6LBlqs3zpBg4lg3N31GpCTS/S5b2AMQjXKiBK1g6dUxI258bZe8u8v
VmW07PmAsgAuA9u9EwRjAUfGiG/QuY5PVABKcHMjqIehn5ssO7A0gvpkIHc7R9aWWxSEGPaEEpTK
zKtNwzv60xwTDaASRejMxGARSVAtd5+UurnpBQ+PP1rH80fT2L4li01rNNRrITxNbmuqYe9TI2hv
ncGv/eRl2HH57fiLT/wIk7N1DLQT8DApoyViAVlomD1+VHlBXe4Ew6O4g7YpguA4GfNIJYlMOHLt
BeUJpVJGtw241QamhibQt2UN0rksvEoVJ574EWpj40qLKGvFLNlgUIK3Javjqg1ZtaXCp+5aQC5D
8ctvLijgTM+FqhSXy+g4ctTFp+6d+FVXhJ9YUzTUbkeCxuNLZDvDq5IJYlqiJpcdaOOUYAjArZ2p
j/e3G+ST902+rUcn3/RpEBFNqATvfNZkStRqYBNYtpw2QeE5l1YncEETUkMRB+QDaYqANUdfnEq3
z/Yet9qY0hnt6rbjuykvIDMZyr5AX1rHpCxTeaEarrgtR9/UYgb4v+V9B5hlVZXuv/c+6ca6lau6
q0N1pJsGmhwUEAxgFtOoKA6mGWcMw1NH583oG4xjeqPO4DzTMKIyOgKKEhUlh+6mG+hEp+rqUDne
WzeduPf71j6nmga7kVCFlKzvuzQ03ffeOmedtVf41/8/VGS691cOccjRp39wzYKfBZobIoxMTOH3
v6thZLSA00/IwGERXF/ByWQwMRVg/PaNOOtFx6D7Sy/H5V++DyMjI+juUAiD+DzQDpgGyiMVFAcO
oqN7SRwFE+97bKKi9PL45NC4/ku51gLcqqudSXCBni170baoA53dXRju3Yu+RzbpSpdocHWDWcSk
lTQmbG+wccaqLIYnI3zzp+M4ZZWDd7y6Ef0joe5JdjQZKFUUrr29ONHfW3qLycPfM5sfAgxoEkkG
LLA5DjCmk/kkhxVNwvgLXvGvpj/3zjMaP9A3Uh/bM+H9YslCB3khMDyp9ArlkzGMqOnrLYCmPNOe
TT12L4ydfSbnxk+fJZ+EUiga1rguAiiKcHVkqS4NsaLN0AAbuZRv3VuMYPoKXfTnCecWSixwDLys
wcIjfSV4kueWZnA8pdak0UHJKgkwq8Mby0lxQJGCcnDixytEIXq3jMEMPBx7bC5hs4v3MmA1YP0d
O7DqhCl868vn4jOfX4+BvfvR3aVQ9RI0C81TDYXRPTt0RUxVKwELDh/R6+9AgtGTlaTAiG8fzXl7
N+1CY1szFq/pRs+GdRjatUPnQYUWW+d6poiHxsRctaA1hVNXprF+u4fbNlRw3hkFtDZyjIwFmmzS
ThtYv83D+i1Tt4+MV9/Z1sgHClkDY8kWkEYG0QMggB2h0hHPZodQORExta0qOFeszYrC0kaBy+6e
+uwBA7hx1MPZrYbe2z2aMcQ0HWSkc+IRPYWpD4Q45ZoFloJn9JZ07bMEe4+Uzg+nCIVMa3xMaiap
6Rf9dw2SOJh/lzWEsdNn7ftonS8vzrEY/xzdpAla1rYFmgyOJm58osmRWgprxCclR+gISLhAooyl
AoXuA72C5EWcxVWaYzYA+3vKGBj0dW4ynZxQLpltKeDRLUMo79qEL1x+BhYtW4yhPiDvKF0UkWBg
lqh6R+sY3bsD6WxCYqEew8cRIGBqrBhL3Hc0wnM9ZBoymDg4pjOqtvk5bL3tt+jftkMzFhQaHdgG
sVVx7eA0213ZlcFJy9K4+b4qbl1XwWvOdHDJW+fBa+zEtr2+bi5//8bJ2q/vHfuk69fPX9JmDLQ7
hm5dkPQ/ceAQcykVdg+5UjvFEsWQCiQsUkUKgR01/yojY73x0hc3vuOhA7X6znHvO20hwOsKB6ak
jsTiMGHu6ZeRRNaMydDWzOBk49NNzvJs+Bn7NN0I2sjP8BitS4kqXRBLPeFFiAxPXZc1FV6SYR+f
lMAtrtwvIvaeJY4otWfMd065siMTqf+1Isc/nTEjVENKdBmGfKAYJhXWtNMllRn9GsVtPF2NDdUZ
lh/TgHlNXIvTTEcolUTtpo4CenZPYGLzBvzz5WehfekCTAwAaSuOqPSi6cPIjj0ojw4hlc0/th2t
YmRLebKspb4IZJAr5BDVAwzs2g+TTWDPA3eiMjyKQpOpKXRpc416fHTs5jMmTl6WxfJ5Dq66ZQrb
D/h4/0WNWNLlYMumYbzqomUYES344o/6vjE5Wsl0NgZfGTQY/mcsxC3FEBtrEsOUG4YSlVDhPldi
ty+xj9SJaMfE0Zpbh/Z0/UAOGYbCvZsrH35pg/CPtYVeua2UpJ6kEOaTNpenDnsRTV7ZBJqboTXf
aI/mOaAEf3ZKSSpJ5HM8rsw216ND1B6HmwtZ7PbCDSc3mx9/qIwrslG0ryfy5sPk161ta/h+plDg
p7Yys2dvCRlDohZyzUZPHfgqzSmF0jlPrIURl/OaUkzEF6rmCpy+LI1VXQaqbgilF8BjAWhK5qXJ
UfaBzoUF9O6dhJXaiA9ddhq+9I8lhOUpWJkkEqSISBLo3bgRx7zkfDiZHGqVqvbgeoWIgwRaFy+A
aTCE1TIevW8bgvKA5kvRk45GW/+/tBXT5BJKuilr4YSlKQ0a/Y9fTqK5wcD7Xl/Q+dxERaI+UUY4
OITXfeB0bFy3N7eyJbpsZNKcPxUG14+G6u7RcDoEMc3oIBLenOnRVJsdU9jRuIF+hhbFj33pCntN
sRaQSsAx7Slx9oFAPkjMIfkkoh3Jr+haq6QQpI98rvCBz+pUZ0mRQV2BvGBYnjbQRrsLyQ5qKsGh
0Wv9kPpowVRYnTGuHqC/15pBkE298YDK3v6ON600G/M2ilWJAjmOZPAk00dvVXO8MJ3vlZO8z09Q
0uSkE3UDpy5KYc18hlI5IPUkjb0LPIlCiqInxwMPFHHfrcPY3R+ha0kTdm8fQq66F69963GYKMWO
nfSYtaZHdaKOXfffp9ct04VG5Joa4VUDfZQG5VH0P7IO62+4E97kAFqauabVIF4XxyKRG6GPOcc2
sLA5hTN1sSHxXzdNaVnUV56Z0U3a4lTc4yM415abH8aKToY3v/us997U6/1fbiq3levOip4irOAM
S2nMlkT8w72D0hKPijoSNJQKC5vtr69eaBoP7fbhKvNvByrWD0YYO3/SYhiwGIYshuEnvHoI1GEw
5Iha7E8Nx3q6xpIbV5MKraZAJlKYIoJlKgCIeyViEFYIYav7e1zZc1xanTnkOhc25zK3ZOzcNf/w
0dMvDB/ZhAfuG0Iub+qGMjkYCQ8GCf8cnaYOjzV2SQlI85XQUeNxnN0usKolxERZgBk0MJT6gWjN
c1p+x90PTqDVq2BhCtjw20FUX9yOU1Y2Y88jB3D8scvR86Ju7N/Yi0yLVnHV5uSA0tAktt/5O7St
WKmbzEP7RxB5RZT2+BgdBbJZA/M6TN2XpL4e5XpE9E2N4IaMha5mC0s6Hdy7vY7bNtTw2hdncNpq
B7etr2PxPFOvWE5OxbofQdnF+h/dibdf9iZse+hg7w/u3flPlPyf3WrhvCzH7n2eXqusMeD6J/D2
6DrBZNil/4st/fia9AVjkwHW9dYuGvLlr0/OK7uUhnpo/AgQIRnfPJrInJfiyEuplQAEf+7w0TNW
18Q6sQoVqWAwErEz9StrmHpSQNXaVQflhxrbs1jdZP2YeeZdH7n4mDcV712PX/54B6K0gG3EEc6T
0zsNBBNiOgckAUENuVKx+MxQnWN1g8Cago/xYoDAi/RCNzWrm9PAjj6JX/2uiJxXQWtzzLS+phBg
4+1D2LovRGNLBvXBPpx8QiNENgURTjdd4/uUztGIzsfBh7ag98GtqI+PoGD5cD0TDQ1pdM+jB4Kc
jiNLIzXK+SyBjoKN1V0pLGpP4fr7arh9Yw3vfU0eZxybwgNbXNyzpfSf9983tZ0EUYmqbXzSQ2Q7
GNq4B2ObHsVnr7ioe7WT3jwZAG9eZKLFAkapHQUcESLFOX+5ySBoLtyatf/lxE4Dv99e37it4v8y
YDKyDNSERP0Q5/L0K4oJrldnLZDy/HIzzrXlc+d7h/xmRk0nrlxBiAg5Q6LBkst8yU84I82xIitv
eVBmrjn5xPnN81Lq7KG778d3frwHYxloBAbB9UtBXGxQH47wpWHiiIFk+sgtR3GBstjheFFLiKlA
wfclwhqxE4RoTitsPQjc8EAFnRkXDQ1UBEHngJQTHFsIse7uMYzXRTzgNyexYFGWRAx10KbjkWjL
6EUw9YY8Q0ODge52gVrdhmAmls8PYZtCz7MzxFRgkEKRgUUtDlYvSsN2DPzgphL6hj188uImNKYF
fnD9+A3X/G78lX1F970/Pzj1iuvuqUqHENiElq5ECA0bd/3bzbBMF1/+5huPOx34j7In1/ZV8Lqs
YBbtcVF1fwwBNZRCg1LIQmHYlWujyLr/eJin/OVK581DlRC/2e9/lRx2wJO6efy4DlmSaxAU7qIF
Wby6I6ULtvHgMMDFc2hG0uOfMT4YypNEGOkndj1NTDistZb9OQIcnduI0R2DU10lK0BxqIgr9gRo
aCFqL4Eg5GiwA+1kdARbiTNL9lh+Rt+UsH+2EjinOaa59MJ4SYig5Y3Sx+6DBm5+tI72rAfLiR2P
jm6KqkUfaCkAmQM1rNtUwWvOyWj20qVdJnp6DDgs1LsYh6BntNVGRYXJ0F80oKTA8YsiDRqVSujP
NQza1TXQlDexoN1G31iEa+8oYWGLwFtfWsCGXS5+s674137N/05z3kDNM9BTC/u/1jN1TiSz96zu
VBithvAtA9laBbd9/hpccMVf4T33n/3Xv/qvu9/e1WH/h2L4zQS1kxyBBVWpKYuDJG8tucFX0x3W
R16/KLNhRTPDPXu9UsDC/5lPbVo6NVyFx616MGBRxsK5bWkscrgWcaRFtOfw1H2cGYyJVQF4i6Gi
u+MR/bMzanlQ4dBBum42sGkq2t7J/deluVPqtsP8mS1V3L+rijo3UbdSCKvhnVPSYC0OzlHc1w1o
cq00T/YTDkM1Emq3GHC8uYVhXkpqJDTX7R+mi5fxEsOv9oaw0q6Wua/4caV86D2o8nSB7lZg2/4S
egYdzC8wLJ5voGteBsXhku4nKj1TZfoopxxvy6CpC4zTl6l4zyOKZ7m2JdCQNdGYN9FUMLB+p4s7
NtZxbJeN809M4Zd3Tu194NGptzBHbZrXYKASceRZhE4CCyh5744h723SNX/a2gZM1iJ4ZhbBhj60
f+vX+MBX34DeR4f4D9bt/t+ubajXz8+gPDSFEokfEvmjFaO1baWwfdT/yGtWGtfRg3Znj/99qiVW
5TlkXWLDSKgflOMTGrzWtIlTFpNgdoRhN3rSKdZzYbwilc2UcY3BrU+VIjo8lb6pUsWYwEPwJDwG
xoxlQBNxZD0zVYd+P0rWOunGvygDLBIcnsHP6cx7gxnLx2hAGhKWpiVbXQgxVeffGXXll5igywVU
QwYPUmPPDOIlSebAdKlKIcNJjoEzGkMNxaejOtTaZloLBDcetDHKfY1wIQ67agKWnG5ehwlwktGU
PvCxu6cOy2R6vtzZ7oDEr3OWQltak25jsmZg84CF5izDmctiOSwCu9Kx25gz0VKw0dFqwbAYrr27
gns21XHR6VmcsMTBN68f++m67aU1C5vYpuaMods0RZ+WwhnePk/gvfMNLMzLzZUIsmcwlgGbdEOM
Wzbuvmo9+m+5H//0g7fkVjU0bCIFdCdnEKKlqyCYIKAoo9GYCzAPGKvAaM8q7BwO3Z2e//HVeYYV
Wa4JnjzNtQgsTIALrUk/tRzKP1nUO9zEsUINBRw/W+BYN3bZ7JJ6xI26ZLXxQHpVcLPNMm1PMVMq
OMT3PVKngowt5JxnygFTJudNBhMdYcgXScZXRoov5wrdjsVPqyh+lgzMv20yrW915f0WXzL0TNqI
JHdVxA+e1+k1tjn8jY9OWtvbst5GxtU5dV39KnBpYNzjqPG4XUHL7fOUg5e3krpkpGUBppG5pBZ+
14CDe8shck6kCcZVMlaKklJ/+ljVOyBJg7wccMyf74BHksaAmBr30GxHGKua2F80UaxzrOpQOK6L
8lGhR3GOJZBOmxooQODRfcMhfn1vFdJTeMNpWc1gMDjk4+ae8gfrhtpbVxx+pHRbigqpCiSoGU9L
VjUlXpoRWHRwMvhbBNbb0oncBDXdD9y1A8e9aAFe8YbTOh762eb29sbUabVaeP6YG/5GP/EyDgCE
RDy9zf7ZSQt52+Z+GTSbfGmKGcuiSLxiLJJTEGqIKnOanE4q6g5YyGRtzTxhadIGpVm8CIRKDps1
Y3EgLXGmcX3xwJxaY46WAIub9yQKRH9naRNxBTLdtcgYBJ9T6HOZVtesBXHOTul3PoZK6jHpNB6w
7nEYnZbAIBevWJMO0W6iu9cUXyMev1UZR0cNziQE3VArQmRGWMKg/Igz2tlYRAxNulIFnHR8gFN7
gcPUv+cHAt1pgvBEOFByoFT00wYT32022U83Fd0X3dZv/Msly4NLxlz+/tHQGGSGD9pL1yz5VoBF
loOdvomASXTDwslNEaQZYKAe8w8T0yddsB0TFm6fVDDtQLd9qkkeoRJcGj3xtBNLC1+aCjoE0g5Q
rHsYGYuwsJWE/BiEQxJgUqNpFjRILG6Reg+DnJMazNwUusBoKhASW+F3D7oYGAuwosPAyYsclKsS
A4MhSI1oWZP5tl1l/54BV6HDBkbpuAuAVpJWCOIvxzkOVN3oZxkruH5vCedWI+vW+c2RUzME+twI
P/y7q3HxF9+E//31133wx//+AMpRkVkp2oiLUErWT4NInHbiArFmqCQRRiK1KI13bx+RP7JS0aOP
KvT304kSxi2Yc9oymNecgR/FoOCne/byZEXSTOQvaFSXNpR2VD3GMyh1kfAUhxPrAmnYw5MRfhgV
ZX632zDfPxJEuL+sbsrysLfV4MdFDCsJuhcomFMEwQ5iRlGe0CkTRRgdy14CNFSSjYUKw0qqAV+p
EYuxQaGC4arEYpupih/gJqjwLmWwVOjzvhqTg/cU+efOKpqXvHKB1/3rAaN7LIyH6iQ4TUCHxrSP
VTB1FOtMRTAcH5M+09MQeuLylsKUx3HHmMCUcNGImEDRTbq1kseaZ3q3OGGAzSXXItRPYISJYoju
VqEvlm0LtHCJBa0k8EzM8QKhEhpQ4KQNFPKGvthbewPsG5KYKIfusnl8eFmzWLR/0EfaYAjqERrm
OZjfmT13ZziFbqbQYSoNTc+ECruqEv2IBQAF5P1Vpu6nm7fMDu4arqmTPM/+VVuTXEaN7X0VD1d9
6lpc9LfnYs1xjcWRiakLuxqjW8oDZa0MTxF1Wcb85MoWhXv2KmzpCz69LO9/MW1KSTlrbYqKNHIY
jrctbcbyjI2+mqfbVUezabDudLoVp2IxI6ob8lam0FYO0OkI5khDpfqqvK0aMNsNmeNFzK74ImwU
rDpcR2YqRJ1D7pRKbZVK9opDTvxYrUH13He7LPntbfWotD8IexfRKmLIsCOIaLnZGGY8dX/Zw1II
rMyYTe2FMBUqqXO8yGXRg0U1vLxBqFwYlSdlzKpUh0ITSUfRjJHkrWjh3aKKDdg1pixp+2/YGlAy
He655qB95Sey/NJ2W2KwzOAYCVwo4lAsQj4T6fyJxJIDLyai1BtzxP0iGR4smtgdBMhY8TFBFLPU
nK0nWSl9thFPseJNrUT921cx+36d+F/oswi5LaBvrCaTJL5Ei+s93YgLjFSBXQMBeoYk+sZCdDSp
65gTXHb7FrUqWKxuSdtKL5Wj6sPOZDB/QWFNtGW8eWUrxqtEJp70QlqkwoYjgKG6Q4rU4aOe4NfU
q86nSIQnl7exzwvx3/96J05Z21wQy52bb9hcek+DwJULDODhKssev5C/0YLE+mF5xwD3Pr+QZsKK
w/Ok/pmXtmTx8vY0FjsmhmoBAsIvPiarrI1OdHpRzu/qHqzo8hXOMhh7sQl+ck6wFRE9m3TacYZx
D8jbgG8ybB5X06nN+L6qGrA471uVlgd2V3g1L5A1GKxOk69kSmQ9qdJpzvoYk/dHSu2ikTQpiz1I
0J5GoTAYxut5aUJeRHoXICxClcmpUoiIv6XcaCe8wuQEIdMgBA1E4/GurAYwHnqOHoO2U2Qi1EkH
R2mCy5KfIHh/6wbvOXmAH1dIsVNoBZOSDM23x+KJhp6IyISpKhn9kQSoIxQmPIFtdLSJSIdllXAd
apoRGat9E6QqxeNdFpqqjOIx8AF9YSJBpySfJ999b9HAkJ5tcT1LrutCJ4IjI9y9L9gU+tGPTu8Q
9w5HcsPQhCSgxcTBicBvy8OiQqmNhQhlgJbWZpyatx5mTN5Sl+yXDsLbqCbqYMAlYLiVNJCTiDPP
pBYUQ0UpzM+zbhNRZfOg+tKSUHwhlwEO0rbalgmsaBJ4+QLrPw+UnQX1mvfZBma86+ROiUf6OaGp
f9psAnsrCgWh4BQcXFhIobstD8/zMFgLYkqRaafjsePRglG9zrvSip/faOL8rGDnm4wtCMP4oWEs
qtSVfJRLXB8qdTAChiyO4UlXjQJyMm+zQXBV0jgIvWoSo6UdbuhdkskoRMEmli+2tCLVyXvLaM8L
tZBQ8eMMe2OG1MMmAHh81yLe80z4UCRih5jmCaabLQ5bl/hjKcU0Y5MWpGIKLRrUKnH3uDq1Mcfv
qwic6QYMKcZhKAFPSS07ZYq4ylXJ7NNPKNEIrjVF0ZjHQtpMJauCifOTw7tRnG/ZiRPSf9OUwU6m
hRaPELgStglMuqpnqMRwcoEvLflM36CMkGhIKxyzIIWecnT7wUHvG8IPdYQc8jmqkJUdU+FPsha/
tB4qZNM+3IkKGtdksHxxtktOFt/XN2WdOhmqtiqCKwfAtPj3Asb0hEOjeSKFXZpaRFLasG9tSj6a
tupf7K9Yvy0E4nsNKXbCoGJwJxXWtoVoShmXX78zdeGFK6MlppS4Y79AXxj9XBI6yeRYtLAZjYUU
2ijE1H1UQ4lGHqM4SM7MdSPUInVCyhJv6bTF2/MWlpAgIi2A1ag4ktFPDRlezRVfPxzK4bqMQG1X
cloC1Jo8iXqIx3ZUJpLURRxUeQx3VSwZtsQBJC1UjyOiHuKqSWvsgCCQivGsZ8FPxzTiRMVABYJq
GTJB2BosK13TkCbBSkKMhRJLHIZTHI4HSyaqdBTbke73qZhuT0OIvEBoeEcgKGOLhzoycUIVC1Xr
lCCWmmca7SGTxScvkQQTUiKo04oAVShyazEIdy1tYp+QIiYjIsh9EESIuubh5LX+mhetriHvCIxs
G8bYRFXfgPFA3VissktNI8QEkYxP1rCgIHCdzPx8eLx4SY7V3e0Bbxh6wiNK/9VhABcUeDJ+1O2Y
ew0p/RMaOR4sBxsmgnCtCMxL62n+YSbViQemIg2kfelidubiBoXrtlrYUHbfuhfRRHfBwYnz81rk
h9ZfK3S00vgkClGshA5n/Kx8xnxHvsF5m62QsUIfbrUKLwoernnymtE6u6UOtSkrlGpgcf5syfgU
OUTrC/Y45jTN8/MUihm9I6yYRjWxRH+EDrjnzAHpyCNBlFER02zQE+klx7LvyZvabPfUBXYaD9eg
1R4JC3hBe4A1BY7r+03srwo0pf1k4z5uVXAusVhYGJCBbtfQZ5gqlswXSVimSEVHI0V5JuNJjZ/g
7PXT6SlIL9BSXdwPh7dW1X+sO6g+saIlSpIJqRf0W5sLyM/37GYjjfZ5OfQGDEsP9ugZMMCaKyFD
xpYYoUq3GOG4NMPihQ1rBodGXWpzGPWg9ETiH/oWTgTkKnFRwDWLfXTblii8O6OFdrgGGjAZXFmq
4sqKKz6mpPEZLnm+ajBsGRYT42X/IzbCn7+qPY2TFjeiGiqUvAgmk6h6UZPi7NzmbOp9kaFelbc4
cjZon3mqWq7+93jVvWZPOfqdJ1ip24GexdM1eUz/g/1BrjrT9pw4IE1HaGQ2Wlc6LyPS7cWI80c6
kpcee/ySTDvHih0PY1Q42Ets8iFwdb/AJ5dEeFe3hx/tt7C7bKM942uHoqcpEhGyNsOywMSkJNUf
iRqLQEVSvHbJYjZ4FTNsqcOYrbQWnSlAu8cpKbGzj+H+A9EtnYbsHZiUkwUrbDRZhDRRIcl4hdML
lRwqeVrdaKzo6SYvqYqvTrHTm22gL4zFAgeLlIOG6F5UWHXb7d6CQoN9cJEjkfZjQOk0KU+Ngm4E
7Hbjfh7B12pAvQxVryYpBZ1RkY7jEUb88OvFceM/VzrRcczh81Qkb+zKRWUDAvNabRSnPEzUo+Md
y7wgnXfeYXKaE0tklETdc3+2SU/3AAAdfklEQVQ7MF67oTLu3pCK5F6RAWyHEWZQF20cz5xJ9tnY
rDvg9J6oR8KAJjRv3WNigLSsrLCgtUkd/873ofcL78Ny38MjUwZSRohBl+En/Rxv75S4tNvH93tN
9ExZaMn6iaIhR51FEHaElshAsxQk1aBzKp0ratV14joksh2p2zd6wy/kKEuBzhxVxD4mywY29EX3
3h6EvzgvpRD67JaBMnt7VkTaUalqJhYFyzRVY3MejQ05uEVfN1mdSGFNK3/Pzrq8Z9xnizM51jVY
VRgfKWPevHkIXdkdWvWDJzkCZTDsDuK0g6Ld0my8L1LxJfLUtPXiPJUen4W2QnsOenJkCam7CCSo
p1Q0Kbm6K22GqNSBYg1QKfUyLzRf2d6QvtTkQaMZR84J6UU/GJ4oXjMyVfstDxHVacHcEKAdE5fJ
Q4v2f8qByKw7IHXSCSRapl6i+EMNOi6B7ffft6vPTHUtJnrbqqd9iyYfdAw9UGQY8QVe2yrxnsUB
rh9QeGjSRi7l65YLS5JdPTExAs3ympNctyIoSlIfL0gWx0VCfJkzFTpSEp25EBMex7o+s1KuuOec
ywkVwjDly6tOcvjbYUc68jWlgXTaxp49/QcHDoygyTHx4MFJlBj4WVnrur4Qtf+ecM9+sWVfoSD+
hsZcu3pLWP3yJehuSL83G9bWMSG9IoFsp/OhJF8loR6D1h4zDAcrSm+fUT9zVaPSNBiEs6QG+nyH
WFtpz1pioARxsIQ3LZ2XffMxJ7W9hZsWiiWfnG6ryKkv+FXv5n2jle1ZYmaIQmRSHCIieYdYmuyZ
NKFny2bdAekGjnsKlUAdsVJWGjEdfGXstzef37FEQAkLpoj0Ue1pBi2FPXWG7/RxnNqgNBN+MWAY
qNG6Y/AYcQFpvlHLRrOthjAIxk/tJMbQoPWH42Vzqqhpkb3RBkoex54p81deEH6yECq5Dwx79LBf
PbwmNOBakWZwoOmKV+W0nP8XAz1jN6wDrqXjchVntJF5241T3l/lwdHk8+/VPXxAWDB695WwQkos
6cpfUuutXTwW2r/2Zfg5A9GmaLobkLAHUKOc+pI66bfJKTkCFWmArEzm632TEGMVvLq9Lfvuc05q
fmNjW17nEgHUuv7x6g8nS+Ubrao64ORtpAgipkwITfj5PPG0o9isOiDlYDxS2BAcXa4zNnXrBbZ1
pVEXl466HK2m1O2AQHENhaK+JBVzG2oMPS5HgxHqipZRDWUEh1j9Dzk4RTxyxuQ3FLUNeNw8FwKo
BAJjVQOhry6rs/Abi9ISm+vAJhmDHi5s4D8mxYBiovLi12zUqxEsodw1+fQ1ftX/kIzCK7qkotnf
vxMBU95kGGHew5Zrb2522EnDwyWE1TrM9tzBTVuHvrK2Da9R4K/c7YXaAUmVwHwCjbUGxJJDavoT
CS/iKNX5qYFpfKR7Qf6d71jSpPmoI4l7pJLf3rRv/I6Bsdogcc00Z41YDYn6ltGf9lh9Ovac5ID+
U6ilJjz/PZFv/s+Yz9qd0PrOcofZB2UAVwlNTpRygGaSQggkxl0GZoTgoQkjEpAievyTfti/xqqY
MeGi9EniywCP2L21KPhYWsh1OZOhHDE9vTmJKMQs4x/bbIzuq4efa7D4p6uBhEwbOhLf1Tvx8ULg
71ts8dY76tBsYd2coSC5lpGdUgwtYK2KCc3SMDFcQntHbn4Z+PeFjvvv1MwYLgGrTKA7EzMy6AUg
Pq2JJ3WfNVTGknGPX5Iy+YeO7TCb53fmkGtM7wVT//bA1qFrxie8vmUE1xEMTQ2OjoREhHSIIHEO
2XNSBRtPUfy6NRXestVTGPDluhMM5+dpIdZMEu4olLAqppZqpV6gH3BII4KyAsC34jGSiHQLgR1q
nsfhj0UGQo3e5HSzfx8i+kZrKvy1oPxICqQEw9aSxITUqH2iY954zaT/haw0TjrbEJ/2ohAsbVKr
iNYiq3bG+n1T1dcPFTn1RCBRImQJtX4UX9uWiRZQsht4IXp3DqN5fjtf4PBXkXr4eAicnmLoTsVE
4jT6slncIqoFotmTxoVLU+yTKSGOy5kRLEN6lhH+29hU/dvbD1R3WMSFZjM0FmxNYM6C53iDaBbs
OW1EP5nReI9g9zHDZrTDVbXjyr718UZuvxMcJwQ8xALXhSOBQruFUd/A3oChwUKl5pqOwwwjxYmq
IwA3BKy0NW4Dg0HEttlcru+0g1+WrHDvEGHodLc+kTtN1jurCYuTIdUt84mcSLEDlQCuJeFIW8Cn
doYhDkbzmtDXO4ZWHiJrA731GNZE7t6RZZeRovv6yegTrQb7zPDu4VxTxoHZmDp+U7F605q0QqdD
ziZjXCJELlLiorTJ3pMx+bkNFkPGDMNy6F85GaofwlN3irpCuoEKCQuWbaAk5Wy35p5Te9444OEW
yyAoTMH72qjHv7bQNO6c53jnXPChi4HmRdj2wy/i/Cwwsj91ve14f9mV4w3jJf7N1Uvyrz/r1a3o
e2Qvrt/s9lZNcaqTkjDMeDxCYIXpFlCQ7L5O1hVOahKwxyI86sUrne0a5SPHImlUgggOT1kYnvID
p+7e50iJfTUPx2VI5zeO7P117bypY5j5zmo5+Ph+P/h6XyiGlxfdqyZ39aLLZK/a7+Nf7AzR5YqF
kPzCBoO9I2uwcylbs4xIeVH4Yy7Vj3sq8lZa7Gq3GZpNpeexPJFb/XO0WWD7mCkj2BWHKyWqyjv3
QIn9eNOQg62DLjYP2bh1yHxko1e/KApksdEJ94P7b9g0Gnywx0+pl158Jt50wYJTJoLo8qCm9My6
JGPgJEUgIlv0k3XP6dSpjceKkjsDiQcDhdBg/6vdYS3EKOwyThHVbHD9B2r7i0saEqJOapaTrO+C
BoWVjWiaUP5FRQRfPzsHpKX80UMl8/r+QRp3qbNXZuxvZrm5Y75t7G93xHeyljoBPPx2oPzz9tYD
3l+X74qUvDVFQAIRPyizqVL5fLHnZQQ83AidS/ArI8ffteHqH1yngFfPL9iVvgCfniTCThqtET+x
HjKr/3fvQ4N37BsPP/mSU1a+6xKkPnPfXb1vLtaDd6ZzeIjyfAIhGOzx0gvUCiEQayaBrRtMrDil
mX+9EkQ02UqzFGMhD9ebQhQ5N38gRHAeNTD1NCYCRTh6p/G9ofpdyjEvyHOcdWqerRkLxEvKIddY
xrIhPlKN8DDgfYqF6uaixOaJgKEjxTUQIo4E7JBI2iEO5ufHbZg1e947IJIwTc7TXMAvSj5+UYev
SXRShwEbdbRQDMsW53fUlHfpt6/d/pk1C3Jvmres7dPGgfFN8FQ/E/J7gZTXBExt0/21BJY3TWdL
blQk8iGuXC8MX7KrxHs6BQ5kchyjJfemnsnq5akQL87Y5qsDYqRlbK1lspMZZ6tMztpObFSHUERm
qNdMHxoNojtaUuqytS0Rbhu2bxmXzpc781wLA863FSYm64iIh1kovcsS1JXGKSojprGin5MQKG70
7BfGno82Jxxw2qhwmI5cOIqWLXFQZ/IOhImDPYNT31jWlvlGExpf09838bFUxD8NJv55POSRCbUv
Y8iHGeSjvmQTXOCAI1AlkcQMRy2KmOxK8Q/x0GB7el00doefOaUj88/+ZB2ZDI/pZpNGcjVUE4LJ
e6TUQMv1IwF2lH3sa+WssjsIcX+Z3/bVlfLGt6yQn7q71rLsYEV9WAk/dGputhpgHxX6wlCoM4ZC
a1o/cuMTLgaKQC4VYZ6QOt/MOgbqfqgxjH8uNqcc8KkaVbYkCEO8ecRl6Ap2w5SIbiCUcbspTrUs
/ioeipdJ8Attwd5EuVxnA0cXVxrsSo5FVLfE8j9aUS5NyioT7uadw5UHF6ZRKvtyD5TqiYCeEDg4
6iLIHSJQSnTi9IK7gBEx9IbypusP8LedVfeuPmtV8OZHZdvrH942usdsTH/hL87o2FdxJaqBwuio
i/Zl7ch6Pjb/drtGNA+WI/STpEPKxeL2CNISyFgmsmkTYxX5eO7EOWh/lg54uGnsGqFZpnGCkBts
Q26IVHR5yWVwA2WkBOvMcLStzPLWcZ+j0ZaoB3Jq2yTvmeeo38/PRKtZV+7L+yZbftq3Y5jOXj02
40a89E5SY9ZhuSUS6jjao1iTZphnAwtt9rNHx/nSyc0jXzj+7JyZOWvRwp27R/b3hgIVZqLqkbi1
gcCPwJOdDUszisRvOFYPMbavqBHrLXkboZTgWdpwMzXgU1dTc9D+7B3wD40luyFMRykDknYMCGp+
MKUnEhzUjxsLIgxIYG0G3QUjwtb+cvPK+Tns7ZtA5D8mc3C0vIwABASz4qFE3mJYV4tgwewbHDLG
hm7qyb/mralM2znL7v7FTTs+4DrqezlbwAliOlia/bJEmjZKRprONDuEUhgruRgpuWhImWhqTiPb
mEKLYyBrckyGck5Vzy9AB3y8scM2JQgB3WACm8clttdocMZO68oitb3Px7/u2XMTLbDnHQMnMYZ2
Fjeuj2QqqbZJRqLXJ+HHWDq/mYkzqpxde7CIK6r/tXXzq98W4qN/ffp3f3bttrO27x26dHFrJn43
zdoKcwoQdcA9xKqbIJKnGclG6wGG+kow+6fQlLfR0ZzG4o5MvLU1R+wF74BI6hkCftJRtrkYYaga
aedak8MHORNY1x/8fQ5RL0HHSrRZluU6RzyaAxL4lZimTkkDZ6e5jkgkAFgN5W/vHQ+Lps227I34
BT+6ascNr60E5l+957S//P3dfSfcdN1D75q/sNDTpIwPjQesfQvUZ8tQbk7FHItHMpaIPu4suYhK
Lt7CgZcsaMD4ePQnAZg+XXthO6BGysQSZGFVYUtVYmdtmgxcmF15vHvUlVHKVF+9wIr/QpDoixDS
iR3l6hHIYKKs8NDUY9wroa6uw18soSmLydDH1W9CZp710+t6NuzdM4GLP/yyE7sXnbf1oQf7RqJS
pUd1FS7mo5XymS0ZjLkBesaqf/THoW/+y/1F/TM1Z0xaXoJLgjbP4+nd83gSMntGkCyRKLUYnkJQ
iuDVpI6AxCxf0v+fvbvZ4mxXGd8oKVJ556iqmK9wMuEqbBLxUXtk4b/DxA4PiR4yTYV70FUo1yXG
g3BbNpvetX7z5NbLP/pzBOUJvOKCZW35hU3rlzaket937lK895QF6GpM/QG935FeZMVA4oe7xvHD
R8fwm+EqiC2g0WBaiNx4BoKTs20vmAioI5GIj1oar9WrxCmoDnEuc8RUcDQKO9Y0O9sd9k8EsapK
s/XBcqhXCqaXdKhEIBTLMZLh1AxDS+LMhx/J06icaW4+lkxaBhiwScWz7rTgJzLP/2dl8P+Gh//5
2tfvfcuLT2vHK1930keXL2s5+95N/W8aqdX31Z4B6mXcDXFXokC/zOE41pYamNsq4j3pJypg/qns
BeGA5ARU8UraoXBJHyRm9+KHgVhDPQFhy/Mm+7C0zL/OOr455nO5IOdc8jI32P/zeviZ5N30P+uK
4YEa8EBdYYkJrHViiawnS/9lQrbZmujzOgoPpsLovlGmMGqIty5P2we3bBju2rnlZlxw4fKTUqVg
W7HK/q4G/r1n8/PvcSX2uHFv81gbWMsY2hymsZJ/avuzd0CWRDZZUWB1aFUhcaiWfMzIGetgZ9QC
cUIopczbCqO+eHi4Uv9mp2n/8FTJihUW/N8nuhgdxbRmujcA8hxoTDR2j2QUxwjwkFHxr0wqn/p8
7Xkif1KwJfpLOfthX/odt964+5TuZqRztvXd9nHjtAh4f+ZZHKHTnDnVusQOD2AphsYMI24W+E9F
9y/5lcbeNsMR1yueif3ZOuA0+0rGNmBbpEkXTyrUoQH/YdwoiWPsDcIf7a+yH52dY2OOwe2Kyw+O
h/Wrco54pMuSy2lhSDyxtEyyaKKLezRU6LIYChmmlSufaDyhFqkmv+obSKTmlAa4Ch5jga3Cf5BK
bFUpdv1woF63vMHHhY3yfRvr5tVpFdxuPUWl0qNel2Qpv1JVCOpAaMX6KMQ5SKDcevLjUb6YoY29
hDXDTk4JN9momqlGz5+dA05vfKVMgQaDYahYw47eCQxLhTH25Fv8JBy4xGAfbU7LZsUNjLt0ynJ4
PHpkNJSPDLvxdOJIxhJxaCpMmo9S2kUJhVnHYcf/mhagI6OpzjL3j8lfbKlGB+fRpMXnrx+qsdOK
vvHuV8/D3xQa1dd/PcpPSs9YGcG0VolyAdMHmhzgGAdoSvJZX3NxR5oviPhj9muwEbCZVPQtjlda
TO9EP1v783FAFU8MGjMWUhkbW8ISDh6YxNBEVUPeUxyHVM2P8tc1D2KTxd6XTSkoZmLK9XfS6idB
6RPZtCe9YFZS7R6p/8YSxDVt080/9HsKeyfopcd44aDv/6uQUMROZsYbRuvvmVLr1zTgDS/uYCfe
OsqXe4h2GzPYZ2aJ3u8AFWQeYJIwIbGGBcB+KfUERrNi0coEYoWlrJq59smcdsDp+5w2YtjSFAP2
TtSwZdswtvYVUYniC2gcAs483jOmhXamV5rSEIuONdSanMP0gntkyM0kS0tkSKE+Lf94svRkMgcU
gKkw1bQiye9N78pIKI92hrOJExOlCInPrITEpkHjEyc24ienNah/vK2k/tL5IzK5z9R2kBhQHegk
DurkYUmxGAZmJS87Ueqcqc+f0w5IN5EiG+nEbXWBwUoF5QMlLTGU008sOxpqKzYGzdREN5xQMHkh
3mDyCLZjo1LjAy02ekgHT9P90kTD+SOXnfImrvT+yFG/Mzucbzu+qdNf0VWxQ9J39kMJFka6dTMa
yqt3FfmVJzaxd/dU+XszfPYkBKnIKGnWsbh1NNs2px2Qbt6eqsJEKeZWcRJZsHmH/sTRQ8X0DT8+
z5E3GHZVJJpN9lphSNiWhbCiNi0SONTjYyY7pCF8NNOOxBgqUYQaCw/12p7OiamjcqQ0H02sQciQ
UwSf5p99caf4/CILrw6V+pWYJRAWoXooN5wKn5sWzZxzwGmicVrspig3EsYRLH9YoHsql04lORlt
xBkWgUHF6UqocwqpWOfN9r3aw9TMLTO9l/yUvxviL8glwyoWPxhPNVxNT06IaZbx2Jmn/8dYVX1r
tYfPt2fwqY2T+FWaz46D8ISj23xMK3RWbY4houOjy6YlIzdCM565gvf07auHrGNxVlx9So6dVwxD
5DICigtaKnprq8E2/r4sv/JMv+8UYzg1UaJ8KnvRLHHW8URSYhqEHy/3R+U9FXb7qqw47/YxZVci
6c1KDExSkvAoIuQzbXPKAUXCNXN/WWrR5pjt6pldJJWAVZsj9spBFwNTkbwiw6K/acnarClnRHdV
oq1dhvXl852gv9+NfvJ0q75Yjk2hMk0R9xQsSh6wxcYfRnF6v6GK+vtlWb5haYpdUgnwvdm6eVS0
UWW8i6jkZtkJ55QDTi8n7Y4Fc2ekFDvOllf6fnjlVGB9bmEWbF6TDSaEGKr5b1QOP3N1yvxC6Mnr
uULl6fo6OU3/9KbdU/zzlMfmFDtixKyG8sFxV/rLc/zyh4rh98xZcg56sH2tG5wAYWflU2KbUw44
zatXoH2Np8YM+6SmEcYS6EqR9htbU7CAziYLGwYi2R8qr8sMf1KFurWiEKWfwYfJpJXxh5z4Rza6
GQT3Kqoja/eFHATN/+zCvPX5bR5vDiDHZwPOpA6T8teikbPwGdM253LAslLojknsZ+DJJKFA4IYa
0WuIjSc28zfQPsb+AdfvsFU/KQPVAzU2DY9/ukn5tHM8Fd/lSVFE/DfWUT6L3ufRivp6VwafPyMn
/mlHTV1mzVI1LBCTij6JpMiM2JxwwMf1kRXRtakZOhZIA0RpjB9n4f/bXEr9n7F1ntg07H5jOADG
SiShEMOonoumhKYyZkrPY494utLRzKQ7EoTrVqTF3w3Uw8sIWjUbTM48WdgfV4fNrWfBnvcOOD2t
oIqBNHNXEKfxjHKlxIpPEmpsKpD+wbHInUDwD/1JeUNC1plDcrFPz+QhAs5pqYv4SGZPQAJP31zq
7bWwWGznaBeD2BiGKuqyrmZ2Hxg/fTSS68Qs5IL085rTeiqziO2fEw5INzJPVUAIlMKZin6PN2pK
r84EB+w0Fi+qMKxxYsULko8lxxdP580SU0lUI6pXHsTvsTMBLRzJAUmoUconr+ylJkaX969SMjg2
K75x56Q6M/VsfvAnseCwyc1sueDz/whmcbuEkC0Efw9mKSeRepGI7YwicUyBh3CIc5CwelbcEH6m
N4D4BweUwkgg9epuH61dHiUvNJ7i7IvAFfvr4bdOyhofWzdJUCl5lDP72ZuRPDAv6COYUD8ToULB
iMURZ8NIRI8zsYODv/5AoAqCyeJMEN2aLKYn3kdL60SQPh3fnkVIIV6bnVPqK2c24mMtDnvffhff
j+UoZj5OTa8WWM/iIXwymxMOSP8gAWrlx+I2s2Fc63aoDUTf1ijUIjBVnAlfZ4lCJwE/p1U7Z8Iq
Uo3Uomjk5Abx+T1u9H17ljg62GETmtmIgnOiCiafqymGRhGLqsyGD7KYMWEf/bvNWJcEe2SmqL6d
ZJVpZAaRxD4UHqlE//ryJval3LDRLRD1zhZAQSAmb58NmxMOGANKFRaKWFlpNi4F13JefJz+fTJi
rVrXZKbem8VUHac5mDHaDFpuSkn8rMGWX1qTEf/wSFV+wFazR+Hm6V6omvE93jnhgAZi1aMxxNi8
2eoKSGA8TzK1JmtLW4kC5AwYMWbRPslkHZgBFLs2epshyXqLofRPbGbv31VlH8gKOSsxkD6rSQuJ
x4TqM1l1zxmCyjoYSAXcThqkM2+6OChTnmZGjMgnZ8zRtWpTpGLd4hlEE5O+cLGGn69oVBd39ImL
Ail/MVvzYZrOlDU+Z2arwLnhgCxWN5/wJFKz5IDT0wSHqRoYb63N4HVWpEIrmO41zmQ+RVIRm4vs
J2vb5MXLmtnf3zGBX7TNEk4wTJrodjBzaQTm0iyYmqItjtBbW7PRitHTCa5Q81l1wpOFEDMJSVf6
++cFg2MoLUcxE5bTzWt+60A1qlwwLzpj0wRbvC2U+1Kz1LSzEkd0ZvA950wVTNc0S71ALaM/O/0u
8rhSoKaaHN7RmqK1w5l7/7Rg6HcVpgI2c47N9DEsH5nk175hgXz3+S1m76bhYMmEkr0z9AlH/lim
jrDa/8xsTjgg7SkQG/11pQjGLFXB05JiL0mp6mgAdtVEFOMPZ+j9yZcXGAyn5qiHN0Nviphbesyz
Mz/bh50Ow8TrMtbfj0bhB2dLLY6+eotejo6eEsr7j9ncOIKTbbHOtECzwXRFPFtGwjDLhWo5oYVp
FtWZ+iRLAGMecKCuntFc+WhGVMFTUt69x1ffaUmHtzUJs7kztDQf4WwYIaVbaGyJaEbKkTnhgGEy
Pz2vwHBcmqM4SwhJQn5sLyFyTDSe3iK0cOBM3UaHqC/qCuM9EaZkTHUxE0bN50EE36qBgRQ+fanG
CdBqzNJDSm0YK0HKvGCO4OmciUiAKjJCbYaS+CcaOWCjxXhvDd5de5Sm1Z2pT1IJcSV16pqI/nKG
IGU0A+bq8d7AZhE8MNPvPUeKEKa7zyaP1yjNWXq6yQEFhxISZiGMtMzDTDqgIrJIh4F5TG++zcTF
V7PscLNtc6YKhm5jAE3m7F1sOlbKHsKUiXyXw2d87ETuTNjCvgloIe85LfAxQzZHGtFKw92GphgK
LtM51GyY1FUlikoo857JGEo1G06SZUDHNCvr7Pwoc8bmxiyYMS1AfWsxwr1Pg2ng6RpxypydFaW1
bUyUfIb8EfZzZ8KI9aBWiTkBwxcqUXdic6YNQ2OszjTDQosa0bPyEXr2m3aYO9+SuD2S+aEAUzNV
rR5utNFHbAmNyb5I7QXshHNmFEdA1NUOw1kZjvFZIs6R8civvyml0G4ZN149EZw9mzuxOcbwioR9
9GiaI3/uNjfaMCwuEHZMKaia1Nq+s2EUZVflkd0whp12YK491zbu3u7Vz+NPjdrlaRs11Ik5YTlj
Gm/3QoyCcyYC+tQAtYD5KYbiTIHqnmB6A4wzd8skvojIu3a+bd88GbCFrsTe2aJIGVZKD/fbEtDn
C83mzGI69QAHPIWtxL03a5M4LdV6hcWF12yq6lRYP+dhqcRszVW1Maap5Ra+EB0QwP8HOr8RIsI4
uEYAAAAASUVORK5CYII=
"""
)
)
)
if 小强.mode != "RGBA":
小强 = 小强.convert("RGBA")
@deco.ignore_botself
@deco.on_regexp(r"来点小强(\d{0,})")
| 69.082317 | 76 | 0.940553 | """
来点小强+{可选小强数量}
可以艾特一个人使用
"""
import base64
import random
from io import BytesIO
import httpx
from botoy import GroupMsg, S
from botoy import decorators as deco
from botoy.parser import group as gp
from PIL import Image
小强 = Image.open(
BytesIO(
base64.b64decode(
"""
iVBORw0KGgoAAAANSUhEUgAAAKAAAACgCAYAAACLz2ctAAAgAElEQVR4nOS9CZhdV3UlvM45d3zz
q1dzlVRVUkmyRku2PINnMAQMNEOGJp2EpNMNSYD8nQQa0mkSOoHMCSGBJh0ShgRImAzYjhkcsLGN
5UG2LGuwNZVU81xvvPM5/Z1z75PKUkkqyaq08b+/z5ZU9d6799277j57r732PuT1jKJoMhx2Q1S5
wHEAOQBThKAHgCninwlC4AFoBcAATAIgiE0Iof58lWWhhVK4yb8vtRF5Dr4PEobIA1ggBBUhkJJ/
B/AECMSi81quCVzY+V6vM9xkaZjly3ufvF4LnPysH4UPBUIMdVgMEBwBtC17fRygENHFXCp5dA6o
70+Tf59+XGlPhCFqnKvrd8ZnCMDQCH6qTQdrcLSaFOOgODjtISP4BV0ZeS66xEiBQTSArB/hECF4
6Bx40C7g81+y5gKwANxp69ApRbjMB0C+LMtwZasmDoUCleW8R6fAbChwTAgIsrzjaES+T//geMTf
Z9FwSN4kn5j3HvWCg0f84DfpRVzYJvAMAuw+32kQsiT4Xgr2sgAgkhtCoggFCngQy/KCEYH02OvW
GvSIx8/vB0nihduogCOk1zn/UeQrQqHOqXc2YO+AEAcJNe4POWk5HkZvKDHKLxSACnyEoC4EOqnA
XIjlPT0vQXtZALAJg2oQQd5NkXi3876PoiUKCJ/yo4VoGYuNAqB8mc6Q1in8Za5PPgcGdXH0rSXy
5j0N+80TfjTv+WHnRmaFjC7vXJsmX5ohFD4EvhM4MJNlDxcRerwU7GXjAeUXkV5JCzlyGkVwvrtK
gDCi2yyNdhepQLBMb2YTgl2NEMc4Vzf/fCbPogGgq2B6KcYQimjXnO/ehEjzbIm+ZcaRSD7LAlHn
8WDgwhEcaY1ecAz7UrKXDQBFEnTL+xnyeNk7F6SIBCilm3VC2kIsHwdVIbDN1nAlISeTr3MZUe8B
ChrbfH8ZUycc/1qTCbicoCFEkjYt//tpFPhh4GJe5i3kzMTjx81eNgBEcoPcSGBeCBUnncvkUtqq
kw2jER+uRcvN9oiCQkansAgQLOcdQiAQBLmIphfC8K6KzCwjipBE4IIna/r5TT4gLZTgyZBj+uKS
5pekvawAiAQiMrlghJwdhAIICGAzOmAj/KxOhaKZzmuJx0tBgMkseBnYoQSIgDUyXqwF0X00EmAE
sIgAkeBbxmcIlXQAIQhO8PM9Wj9e9rICoEgCcgsCDSrUUrykESBL6U4C5MdC/uTycuZTRmnCuy3H
eQkBk9INGhHo0/GYJnmci2AqS5TiS+5yUqUfL3tZesAwEiiLs0dX8jclxt4yEmHuyYAvGw7y81Zr
FN06RW2ZQaNMGWxKuuQxx4FpTR79AvGXAsWxSOBg8PLyfng5AtBLCNqbdYIUJSoZOd1k3ugzdv1h
P/q7ndKbLWP5FUm1IUcEjDBCeplYkC+zNJInjIhWyjyTiGWylKeO3K5RfKbqX8B7fnzsxw6AIgFY
FkBZiDNKb82S1FgkYBJyZlVEUjWEbrE5UjkefLFIsEQR60yTL5MQyFIGXQJ2mewxFwIWoYUaF+QZ
N2io8tgF4M8EwS4e4kT48kk8FtslBaBFCGwi6wMrs1RQIqMv8BMARglN5xjdonH+2OmhvHyRwwE3
4GeciaROiob+7gbnT80KES7XG8nPkYDuNxiMs3jWpUwuvYTQVh8i6DSWVz05dUyBTkbwHefM7/Fy
sUsDQOkRhMBhz8cwCIIVCpUlsPKUvXk/yNcMQtbdSLRbiPB3nX5zZObJJM13WpzGY19n2YRuP+57
/6nB4+V42cenRJX5GpwvmzeUlyZDaWsUkYVGQE5WLc5n8uPThOCQz3HiZRj7Ne2SAJAkapkJHmEt
Icvixy7GQiG4xph7LTU+OM9D1BkecaRPWQQGmnidhogk/fECfyOX0FW68RvDQgzt5fx5SdUsOwER
Alt1AoMIzEQXEMUJ6TnRqhGU8xqBtsx3SuBKgcUeL3rZej9c0iVYCNgmw/Z2W8m6VspqIb/3ldvb
viE8RF/7wZiVMVl8txLLUoJDQYDh4NS6LH8dCeUZrXWa9uac7/3HLYvkSuez+L0EGwwGixK0LRN9
pEkNEdJRjcRcCIFlC5w4UCfA6MuIdF7KLhkApUpkzI/w6HRDMfbeCmHQ4QKNhca9r1hnv3o0DRTT
p2gNVaulQGNWKC9sJ8CM67ECg6b5Jz6Pds9H0XMKfMtUAcjXSiJ5IeQoRxdUvlVeuM9gnRxil8vF
eXOXppBCxpntNmAGBE70cmP/TtklA2CzLkoDgQITqK3QNZMZ8KNPzH4lPIJbayYzqn54kp+wKDAf
CTwdxMsWFeJk9tpO6cZexv7D9xvOlloCqnOdYtN7SQBJMG+3NKwzKRYSMMj6cXCeejOS9xPAWIj4
ifllAKlZ892UFsga7Oxk+svELjkNc0TEHsJdsYdWwCDswcugZcZcb8tciN1Nr5ImwPEQmFHHJqdQ
BIFXWNZd/+aHfzIkxAIuQpzZo1N0yQw4inPuFJWcYAzEs5mMMCMhMBqS0AU5kGLnTnnU7zhBWuco
GkA9+vGUWF2IXXIAyou4lsXZ3sokI0QW8Y/XfVr2mP6Wugh2N2UhvvKAzbM4Rc7cYJl/007AdgX+
x3qWeRRJaBekjEoI5EyCfUGELywig7sIwSAF6uf0gkLGf607s3bGoPxpgfOQ0ALoTccxay2MM6rz
OUAZUzZXm6XBHf/0/zeK6AYX6ua1aEB9hZYPSiQVIna3Uu1WxgJ1U2kiTz8ixCLvJ7BF11671dB/
5au1xkBw0iGe2z3L15UAbGlWPwAMKzdLTqJtQtE8UMKCpb5mUwndq7N3dNlCH5rje5hcGpZQv0hP
KRXZhgG02xTDjVh2JQ8ZnesaxqR6yzYm6gtCeIshJj2vPH7BYKhSgiHvpZnMrEglZE8km5oAd4UA
KC90nkX3bqDab9uSdBFAmgLjEVCLTrmD1ZQW3phO3fvVuvO+ac6HLuQYWxMhKU0EDqcnD9Kh9BOg
M3ndUtYQBFdl2W+VCccIF2V9UayMRRSR/JlNgPWpmBaSXyGM+1Ww0aKYcSLY7LTatgAYJV1por03
IP6H2sipcqEfCcWFrioauLZo4Vk3wrHR+lm/6wr1kC3LVgSA8iHvoUBthbw+UUwffyAEfreD0jzA
y22U4OAi6qUDwOty1tETUbDnUBD8SeY8nymfFUeVvoDLk1Kf/Le9xGtp8voKF1iHpT1goD6LXVm0
WdszC8Ff+VxgKR8k5foGA/pSFAERyuOR5DP9CLi+qOEHXKAevjCD5kQgTbU7Whhy41wuOuTk+zI2
Q1+rrV4v4+HaSziLXhEAygREai31FX26+DMBUN4nyHaf4wFLAE8H4mT++oaseXfWQvGTk450Uqgl
1ZqljCTLbHuy9LZIFfM5Lg5JQDiU/Hl6rCsS8N5YYJ+WMdpMNfqfa/FCAMnXyGw8oxEUUlR51MU9
JvIYARfIM6Dbpvi3hVDVhZt+UIaIg7r2PoOJf5JSLYmxwIlQBsFgycJAq4m9Iw1EJntJS7hWBIBl
HteFW+nKZcOUCC/iGPEjso0BD4zLvsrkSX9dyvzTrSX2un8d825eK+CbzQB8iUBcerwRec4Adgih
KJD6MvQC8veSKD4s4opFE1zNXl0TbNu2PLv83+aDLz0CXm6eQ9NLyXBwjU7QnaGQEHEinNFjIt9S
CQU2Zxl21zhmQ65umHx/ibFrNhWMjQs157s5QVALODZvyOLRUQ+ztRCdBX4xyf6/u60IACUOKkQg
QwlWyvtL8plwUS0a9KqcDhypxj/frOn/6e2d9DfuWvD+cI8fPVBYBpHBF6ldtEUc4LlMBvltGrDD
JHi6LtT7/CR7lvbaIvucJF0mKvzdg4k0v/mZ8k/NIFiXIoh4Mzde+jylV8wQ4DUFir0LsQpbLuU7
26wPFVs0HJiPDjIGrCqYeNWVLdg3OwX/JZpwLGUrJMcSqEYEhIkLqhpcqOlEHOKcbJwIgKO+QBHa
zg+s1j63x/fv/5e56APy48bOJ3tP3ETuIo4vNQLb0hQOFdhV5ehiBJuFQIVrWzfn2eWPzQefboDP
ZAg5qaCW5bgBi6I9BcwGskEdKgE5m1HVbipQsijW5YARj6MRiPTlq+zXVoWYPwxeWZ8x0b8qjYob
wfe56hn+cbEV0wNKbsqkFNpKKWOk4gVirx7RHfscgaxgud/s0R8Xul/5m2PRq7CY+1rifjSzUbJo
2bxQC5ML2KLH3k+Sx8wn2Jajn2ec4/ly9K7m2AyV2cqHxqLQ7Pj6JPXp8xpJGAXZbmrXI9kX/Ibu
HhuP7i9/qy8COrkGpxogCg3UGoH6UHYxyv//B7ZiAJStiHUqTi5Jl9pkQ48OPmlTMhPUKd5S1Hdf
1xrg156LrqyJJt23suE3SWK5RrLizQQEbYb+c2sKuPzJ2eCXOEQgvZ8nhBof0mNpoJZM0LgseCzv
GEQoMEWcw/MpQg/IWPpr5uqyu450b+1Of5iaxtbApKnpKka39WbrgccPD0/4z0xOeAfSWW1CmOeu
QItlSXJXxlYMgPLplpo8fXmNXxds8c0X+yOQ1DUZ8543dwdr/+x4+Kb9Lg7jlETxpDW94cWAskkq
xx+Ak3SKFL3KZvguHciAYL2hP/iakvbKJ6rBI1+u8783FSMg0EIIdthyEgOBiEQsnjgLAFnSLSfJ
dj+kKDukT3Byhc609aWstuOarszWjjZjQzFPcOerSrenM/rtQiegpg6fE9x8Rye8QGB41MXwsOMF
485Xvn24/NuAmjG1pMkeGtUmvQL36Xy2YgBUlEJIFAhX5OkSqkn8QCDYdW/t963758MP3LNAvqF+
12zYPm0FvliPyJMqiyFjuUTDZQgpjo0HFWU1gp9vYX+6sYhX7pnjwT3z4etFMjRJqnNuzFLkIXAs
ELCW0IApwElmUxDMu2xgoYGrXI3sLGXYzQMF7apihqGrVUN3h45SyYBuyrEgRGkwCQMMg6mlPWtQ
MJ3AyjJ0duVx3TVF07a0t/N/PHG799jUYMB5reZEZ3y3vEHQYhBM14SioP49bWWIaEIwFwH31lZO
Si6XtQ5oV7ymSIxHpsK/++Qs/vAk1M9oA1lOXnt2W+DA1jTQLUtwLlBgZEPG1l45x8O/a7PE7ety
+GzVZ93Hq3hqd93/uQr4vIT9ep3glSk1Sgaj/qkHIR5yJBlnCl82qYf6LRFnr8kIXKen8MqrWiiK
WYquFoZSgcG0GIQc60EpJIBoADCdguoEjMflOhFw5cFYBHhehFolAGEUazfk0NNudezMm2Jzu4E/
e2wG84uvDQE6bIY2g2J0JTPGs9iKeUA5m2VqpT5cGTXf0a49mWEcD81qv3N7Jjqj4C4Qe6lH6xyj
55KtLHo9a474SH4mvZjUhMq+3lWS8HM5KNfeAMFu2Z4P1vfb+m/tnSXRjCN+9zI7+r2szlVqu1aj
uMGWY9k45qNmlSKe2OBwAj807ogEe2PewG1Fk69P6yFaUgKlLJBLS0+mg1OChRqg+TJ5IaDSDUcU
VHDQJMSRukFNRKpRXmq3qGZCMwwYBoFuAHOTHu753sTfHxuq1I0go+/zojM0IoYO3JTXlpUQXWpb
EQDKpW6l1BfNZfSXW/SHt+VC3DOhDfVZ5IYMFV89fYU1SczLVS9AeOqAICRExXmyQLFOF2KeEEgx
qcEobFtP2wZ776ZC1JOi7LU/mqY/rLi4l+vBv9xVgapE7LQJ+giDwyOEKp7j4JzADbQdGcF+OaXx
13SkxUDeCpA1IphanDUFhGDWZYrCMgwBwiJwwhESjoCEkPReIAdNUgHKCBgjSOUyaGlJo61koq3V
htMQaEz5aDg+Zmc9eB7Hrde2bOl/Y8/Ro4dq9qqj1bcsEDxCkmsi/992AU1Wl9ouqSJ6JYGHRZNM
70gZf35zq7jyy+Ph1Z6v/0FnDldRgq8uFm/KV+Y0gvurHJVz8LJNQDtNOoYBZlZHWiPQBX19OQqm
dBLsCkOaDgl7f6utvdNiweFxJ3xzitDDJ1xv7po8wZ5QWPvqArflCIogmPc5ClIuE7GOcqi/NaOR
X9ieDXZ2pUKl4ja1CE4gMO8DE17cRxNJOlqqYJgMXXgseuUxdxhGkfKihmFCN2xougErZYEt6JhY
CJEeXcDmQR8tWQKn5sFzBbImwfo1KazuT1/NKMGWHSUcmycfefD5qZtNFmdCnBGs0SNw/v+m8+6S
K6JXmvpYy7Q3/VQX+/8+P+6+xyD08Rs7yA3fmeC7pgMBY1Gcp6kciGD3ec7HTFTWtqpiSILYwPqu
PCYbLjSf3ny8QtvbmdhQ0PXPyuXO0AWGa+xZKoRta9QlkYHZIECHFrlvzFF0mRwHGwJBoG9xob8z
Y+GXLrNdqzcHtOYNNEJg3AFmF4D5gKMWxRn2SRUZ4Wr0h3wceCAgAjlCF8jkC8gUiopVpCICFR7c
WhU1x4Gtc0RphuHhNFovK6K7ZMRfTmbdOsHMnA8ehVi3IYudV/dc/sj+USNrar5GCBYkwjXZ8Xcq
B25Wr7QL5Edpwo0qBmSZfujHqjE9A9r9q13G1x9Z8L58X1V8/OfadIy4wrwih3TE2QsoHzkVYTwQ
qNRj9/dCJQlIlhAtDRH0l9LYtKEDI6PV//ytI9NjvTa7N9eawp4jntXphTdcaYtrQhg/64vom5NR
+EfpkFzVZrLbJhvk9SfqWqWka0/Nhp5qQyhpBMfq5p1Z0P/cmYve0GH46CkSpPNZTLgMT1Y4xqsu
Zp0g7pkhRBH1LAGcpGi4B9AQSNkaiq1tSOXyqlwn1S9uFMCvz0AnshcwQsOXXpPCYBoaHjA+7aNk
19BZMqHbGvSUDtOiUralLozrc1i2VVhD0dOTpsfk8YdqXLUuNJdgSf+kKBkMQWZngfnlTI1o8qGy
ht6tAekL0IJe0iV4pazpVd/Zau6bDaORv5uNfjIN4MnpEDuLhNkWXX+oGgsJyKkTUm5lPY/OmJ4g
k8YCEzePEhycZnS4a9tqmOn5t27zo5Rt6vdWZpzf2ErEn3Ie4Lgv7nUo/2inxh+SCtTAE49w8I+F
gkOjHGnbw0IgMFE33tZqaO8dMPwbegwPq7p0hNkOHK8RDM9FGJmrYKHhgTIKixEYNPZ2USjgVaGy
2XwhjZaBLtjpTPzEMMAPGqgszKBRXkDocegaoOkMESdKLZM2gSiM4HIKRkI4boQwEKC6gK5CIiCK
OAI52CgU0DQNnk6Lka0dK7scXsBhW1SpbHSlyNHen6YY1xnumg3PFEgsZRK8vqoIEVUulMt6eZlT
vF7yHrDp0d6WNb/eqqPwhxNer/z3tXK50wTSoOUHZnnqcX/p2Sk/15/BpryOaSdCI5TVg0jeEQQR
2zYz7txRKvu/ed/Dh1Gf50eu7Cy9i9fnF0TZzZeDYGRMC3710QX2zdUpjl6DQIsIZOwkY03dBngU
oeyYP28y9u5+y7myQ/exqtdGWFqDZyscxycaGJuZhdvwYJkEWZMpolm+L6jHni7fkkX7llVqeZWt
VG7gotqoYmFqCtXZCkQIpZTWDUm7MPiyquJx5XKoYnIiwDKVB6Kq5iwQhBFoSMEjgUgusQEQBRF0
jcGyNHx7NtC02QAb0wxdFoHBOFKavrYcmt93PP6Pq03xR5qIwXGu1lWqRLcxMb8jRZA14p+5fPlV
wJf+EiwELjf1X746rb3pkzPOKxcE6q0MWK8BoWwM0jm1GmJXv0ZfsFVBlDQOvep127CtvwXVuq+6
zMyUiYHVRRw4NOe0f/6ptxome3/FNN6bJtX/mKpOk7mGnz9R9t/jkvDj9ZTMpBkMEEhBqRkRZLQI
tYig4RlvZIT9Zp/hvKLXBrpX5+C0DuBAg+DYWBknTowgcgNkUwRGmqnYjkjv5ACptIm+9atRbG8H
0TU4oYOphUnMjI2hMttQN0+3pGKGKRfEuUC1zuVzo5ZrSZukLIZiSxEm4/BrFTCDwWaxBjOQni7g
CNWfAkxLEkRdRyRV2iYCO60hx2ISvOxoP+lV9X+edeufpQg+uD5tL2tguyy35gkwkJENW8BcyFU1
hiy34fqlDkB50VooXfvTeeNvv1bxP3g04A/Jn2+y4qFDsx4xWs0o+9pOMp9hFCE/1QfcrgNP1QR+
6dNPqqrMFV0ZvH1jB+Yihkx3DnsPTD3amc/3ZA3/2GZ3YVXZd/m+SfeTU673QaFhQQpFyyEwmIrQ
YzTDAIJZX7+6EukfXa07t/bZPjr78oh6L8MR38TBE+M4fuQYuBMimyUgeYZQustaBJsDxY5WdF45
iHQxDzdwMD01gcmREZRnY1G/aQGpTHL3ZATBBdx6rOsrtuXR0taOTC4L09CgMR0dvT0oDx/Gkad2
Q9eZErfKeE56PW4m3VIkPncRceiGganpipcO+VNdJjDjCMtg5v/xGtrPDnvOHzcQvL+VnFvA2tQz
1tXYYeCWYjysfS6I48ALDcResgBsJq+/ULAO7vejR37UCD4qv17JJFibIahI5l8jnVxQ2IJ7ORrB
T76+vIWyR+T+ioDvxgx0o0oRNFyMzXoYf3ZirfCiP7x1s6UV4K969HgDx6a9H4LxX0mZRNE2cnlr
4QSWLpDWBaYcracRGP+rx/TfsdFuoK8vCzKwCSdEBvuGJnD44JOIGiEyGYAUGbj0BnMRbA0YGOhH
a98qkJSJSmUe+/c+iZnxGQQyw7UXga5pkr9sRMrjlbqKWLV2EKWODtipFELPReQF0BiBphjzUMVu
UhMpjyWncUHEc2xkYitBLBKKTEbJJ47Ozw75uK3AzJ0DfakPnxgJjZxR//WZyPuYE5w7ez215QTQ
bgA78lR53Vnv4oU3FwxAmlQHsJIKiqSX940Z8xuCQfv8gnuD+jkBNhsUfZqGqkkQBKI9o3F8ZUQM
ydIfS5hCK8lB9ohTyUg2ZYBqFHMV/PStmzs+f316VjtyeAZf2B983rLZF/JZdud8XWz0GvxAi00h
MSEl1wsRQaVm/VqO4E+2pBvWhl4d9vorMJnuxIHjk3h694OolgOkMoCWAC+YjWDqwOoNg+haswqa
yTA5NoKRJ49gYSEA1QHDotDtF04ZV7J8L4LnAPmSjb4Nl6GrbzU0XYfvOJgbm1DJgpm2VV2aRCEi
p6HmVWcNoioZEgixFCtxfzwWP9i2gePDPpjwun7vvWu/NzdF8djuBbhu9VX5lPgeaUhl9tkHh5Bm
qwUE+m2KHouo5bsRLX++zlJ2wQCUAefAotl8l94IXDmWzNB+4cqU/ob75ho7V0mqisTLi1EN8bgj
0L2+IJvE2wyvAceUfkGoLQyk55Pn+NRpzbKjNR+eQ3/rJ7Zk//i63Azu+8EE7ppkH1qdpR9uNzmG
G+F9FQ/MNAkyphQbcIy5+g7Otb+8zHBu3N4BtG/ZhLniauydLGPfjx7EyFgN3AKyJSbV2QjmIrWT
0uCm9SgN9KgKxtDxIYwePqZkVEYKsHPs1A0Tp4DHI456TcDOMGzYMYjetWtg2Sl4ngffdeCW6yr7
tTJpVRZUkx3CAGGtrDyfzKp12dQkqRweg1CCVXo+HkToW1PCdx+Zw/TELLl2Wzv+8QdDnx+brv3G
qoI2vZwZsbVEmd2fIUp8IasyssZ9LjHtcuyCASj3ZEsjBsRKAFBJ3Slp++m89Q9fr3l/vCfkT8qO
NksI7Exesz+MUGlw3Nia2uAHHm5KCX91mmBOyPRf3gAK6lM0wrgy48t92SriFlKr/PHluTq+9p0q
9jj6L1rp4B90KW8RFC6X+6SRqK0AzIcCMw3rfQXC/+iqgoPNm9rhDGzDoQbB/kd2Y+jIFFxJh+Sp
AlxUjhCFwOrBfnSs7wPXKIaPHMPI80MKDFZaejy25JIhwee5EWSFtmdNNwY2bUS+VIJbq8FxGuBh
BGeuAjNtwcqkJOcSb9fAGLjvgbt1pK14OzDXkwkOiZdfLfFMskQcBIhgI3CqePjJmccef3r6fekW
9kC+qON8bIlQwg+goAObMrEqSNIzxiXSu16UBzxxCQ58LrshZz1xMAzHdzWC9yNZ8rdCLhFU/T0N
3jdcD45rPVre1gkWPFKpB8C9dYGRMNY335lhyNgkloURLbs2Q78ppifwt0N4/HBo/sw60zsin3xJ
a8yFcqa0rIJIXk7rmazpn9tuOrfe0EfQdeU1OKKVsHfvIRzed0hNLAgzFJpUHTciBA7Qurob3ZvX
gzOB0aETGH7+mLqxdpoqMIizKD4l+OrVCIZJsPG6HegdGEAYhqjOzUG3TISOh8bMAtIteRhpCzwI
FdDkUiuXZVGrQpdpNRhcjyBjU2RTFLpcjilVx5UxYUvRwnOHPdQqDgZXmV9wPPcBh1IEvlAP0Nkc
CU9olk4d2JojcImAvM6XkvFdFgBJUmL59+hgvtrW/9c2i67+s+lGL008rTx+n04UKSo95AYK7UTZ
f/Ns2dshdWyCiAM90k065KTu6knfRw+TkxrUgMgrt1Avs6aToVIvlO4+OHuk4cdB+vq0zOi4uhHD
DeP13Rr53B15p3jtjk6Egzvw0EgVTz36PVTmffAUwC0G5kdKcJUq5dF1zUboORvTI2MY2v+cSixS
mVMAWOqSqSVX9vpWOAqtWWy+ZicKpVY0qlVEYQjdtuBV66hNziHXVYKRssF9X4GZkXioZjplojxZ
Qa0iUChoKKSAfFZGcUSR3Qr4ajWIwMwc6nMEOvVQc4MFuXY6XqTiyAAUJQb1wHhJRUOWHN2YPkR/
mqJD0jiI4F0Av7dcWxYAJfjyybizleq3krSKQdjaNxWN//FQ2fsQi8ToQNLMI5/iST/CXOKBOXCk
zdbf3Wsbb0yHlfCoQ26lTLt1oxZ9n/j8m/IilUKKbh5vHcsoy0zUAxSFjje8av2aB4/t+ckxr/Ev
7YQgpXHUPIAH9of6NO93b+vi2HT9FTiW6sEPHnoGz+07DmYCJEfjkth8hKylofOaLbC6WjE3MYHj
Tz6ORi1COktg2mcHHhLwhXJZrQE9A53YfIe7g7oAACAASURBVM1VYExHdWFB/V6CzSnXUB6dRKmv
G4ZtIvB8VU5jcSMWMmkTXt3B+NAkbB1oSQMpNScxBh7TY0ZUCAKdcYxPMyXNKmYijJXDA+1ppvg7
n3PUZd8yj0eBdBsEUr0/60UoMoJciqAzTRBU47k75AJ3ZiKL6spns/MCsNl4vSCEKresBABJssz+
ekl/pBKFM0/Xww+nEh5QLgOMUWRsIx5AFIRoBBwLVPxpLqe9x57n2kabfSoi+IeCHj2kJ+KCPgoU
WRy/hCBRPRCYKgfYeFUfXr1n/r888MzQv7QKH7Mu0T3f/OJVlvOW2zfZKO28Hg9PRbj/3u9gft6D
nSMQsvrR4DACoHtdP7L9vai5LvY98igWpmuw00BW7pF7DuAhKVf6XojAA9ZtG8Tgtq2qQtGoxT2l
hm3BrdYxf3wMrWt6YaQsBK4XVzgIIDFm2SbmZyrwFyrI8HlkCnEFhOlMEc5yHxLW9H6Co70jg2NH
GYpWGRMzbjQr8Lws6alGUEUQcJyQvc1q+itBjQN7GgF2ZCnKBsN8IJKG+OWbvGeykmIoJblKnc86
q+q8APSSpIMnILnULpgkx9hpab+/NkPaPzrqb5yIp4rGGjkA6zMW1vcWMR9EKGQsZG0dowv+PEKf
10LuEcI3r05F809V4zktEoDjkcDxMFQPTJqScNAkSk0s1cW33jhw24O7n4crZe2e/vQNOWfTLVe3
g266Ft/aPYQHHn5G6j5hF6jKTlEWaGsporS2D76p47l9BzA7OgvDXB7w0Oxsa4TqdVuu3Y7V69bD
qdcRBnEJUbNM+I6L2SPDKoM2MykEjqtAKwUCts5g6BTjx8ZhGyZ62wzMTYUA0ZUkX1ZNmMYUEE8G
ItxH1W9Fve6jr+Bj6HD9ybUZumBQuuSNVCRzIv+STtS7kFHEi0zqMMcI5L4o6Letr7/X4p82RHR3
tMSnnROAzdKW0qEt2hb0UlmTVbfAut/Rof/24xX/f9cCfnB14v3CJAYcrLqY2j+GWQi4/SXk8iVo
lF6WgcdGK8GhCcHm56sMk45QD4ssjZUswInim8cjEnCpGHECPHX/MxjsSqEd5ONzdVz1hi530ytu
vAwLHevxlfuewDMHxmDIbE/eAdljCwMta7pAixmMzExh8tiY6gvJ5JcHPCxKNiybYet116C1qwf1
akXVhBXANKZq1DPPH0ehpx2pYg5+3Uk8nxx9rCuObuT5EfXwbdqyGlPP7ILvcGQypqqCSHGqpsX/
QTUacXT3ZLH7WARLVECoheNjjfvzNjljeHt8kkl3HJMVDqLUBUrGcYEIFAonAq6Ua2v6H9iG/qbh
sPE+uR3aUoc9KwB58t9GQlQvxEqkH/Lz5wRwW4t2n8NCfG4uehdbJA6V3usWOaePC9QQTx+YHp/H
wKoismlzm406Rsri0IgCCpAz4rG2JBJoePHFU9+Diyku66oawdjDe7E7smCG5Nfe3Bvi5tfsxJDZ
jc9/5d8wNFZDqhDXlLUAMLUUMp2tqBKOib37lT5PZrYg5ILysVo5QraYwvZXXIdsoQX1Sln9vKkg
0gwdE88ehpXPItfVphIQSuI5MLalg0Yhhg+PoaWYxoYNPfDL86hOjiGdIdA1qmI+GaboOlXb78vY
L/I9OOjE9EwNt16hYdcTVTzn+F/JhEu05IkYbLJi0pJnSDMBU5ALHu0hEINswmHYnqXvTmvaB780
5bxhWESHzjZv4qwAlNWEMSHUkrZSSlnp4Xqp/lM3tWLr740Fd0oXHS360pdToCfhHnUQGFz2ZXNn
rOZgXXf7VaxBUWlgfI3N1fanVZersbmmQZE2SKwQkYQ5xYgO4RfTwnhkUsPotIvXrQGu/4kbsauR
xec+dw9qjQiZFgbKibr4Uh/s6gLV2Sm4ZReGZP4Ntuwm2sWZbntvC7Zddy10w0KtvPCCFlE7m8b0
4RMqZmtd0wO/1jgFPhn3+j5GjoyirbOI/t4WRUVVpsYQ1TxkS5YCn2VSpFOxGG2+HIFQKT7N4+lj
HFpQRqlQwtP7J0+kU3S3Ts7cGUpeJxnntRGqarziAm54UwvohIDHmaHp7N3XtvL3dttY9Y/H+V92
2eG3BCdSt7nk+5cEoEiWXNmjMLVCcV9cm6T4lS76pf1u8NSBGrk7tegLZQmwg8U7X5rJuRcYskVd
/zM7pP8nR6NcEISoU+y15QyaSD71sZeUHWJzZjwdQGbQDo8qJZM/MVezrq8u+HjbZcDW19yO786Z
+OyX7lVVjEKegUYUuiAIKIcv66quq2a6mDY7dWGW8/2kBi/kcOoCvYPd2HL1Veq9kmaR5UAZV8rv
adom6jPzaMyW0b11HSI/SDrmpOczITwXY0fG0N7Tgr7uIqgfgFOK+sQwLANI20wJTh2fwK1wWBZV
Jb60GSEwS5ianMdNlxt4dl8D+0Zqfy/5QXJaAUFea/nQXi25mEBg3wVMl5evbCRD23sy+GB/ivxB
SufI6hEWfIorc/j1Vk0//pmF4C9HzvIZSwKwub9GaQW9n0w8NpvaR7bnOD51lL92ezLjWU3WJXEW
Ne6fOn4cK2LKE8Fn2Yn6XTeszmTqQYhJH18cTMnGH7q+leKQDgj5sK3OCBypEcxIj8YETtTNw+uZ
d/2dGwQGXv1a3Dut4YtfuhsmBLI5BhZSdXNcGqkgnAoJ4Atv1ZafEfihqucObh3EussvVzSKLKfJ
pdatxIMi7Vxaeb2ZY2Moru6EbhkI6o7qCbEsE5HrYvLwCHr62tHVkYNwXOjZHGpjQ9CqC0gVDMzU
CJwFgdYWgvYsQzaruovRf1knfnQoBHFm0dffjfu/PybbBj6djvdtOnmuIplx2Gtr6NAojjpLpQln
mnQdsgac4WClFD6y2cL75PJd9sRTE5748+G6tk+j6Gq3+B+XQ/0vNJ/oBUR/stRnnQFAkiQbrDn1
/oJvwfkt3rGImnd2kg88MR/871EPk61YVBtNRI7plKEqDlw0LxuBD7ELfvAXGZP8xZwXooPp3448
o6MuwocPkugdEr/yPa8vEpQbHClFYKf+exd1fubWPoHe234C/zqt46tf/pba1jWb0UFDAkE5PBLv
gHQh22m94NrJMRxOqDLJzVdvRd+GDXAbDVUK0+TkgposrYWwshnotomp547DTFkodLfBr9bjzWls
E5HjYvrICFb3t6G9PYeo4cKyDKRNgpmxY5goS95SVyLXVZ06VndqqgdYak9TaYZpL4PRoXG8+uoM
jh1y8a/7F757FBgxghdmtfJB22pSbMlS1Wd9vrZgtQFQpNoAutamyQc6bPJLDEgdq5MvHK+JP3QD
sTcv20epkPzjU/ur4l6bkKuzOrc9f+mE5gUApMmIMZpQGctopb1wS2al3JzS/jFjcHx6WLzLlcA6
jeS0DQ2XrSnFVZhFVybPKNyAPSQpca/qojfH1tfD4C8rfvCnRyPZ7S2wmUpxgFweOCZ96wM35dyP
3Ngr0HrLa/GDahrf/PrXoYdcgY/IpiAat09K13uxD5wEj1MPVXXl8ldcha6+ATQqcaZrGBq8hovQ
DZAu5sA0DY25ivKGPZsHEDqxvkjGfNzzMXVoGKv721Fqy4NEEYqFFKr1AM8+fRjDR+Zh2QZW5YGe
doZMhsEP4/FvXETo7O/BPQ/PIq/V0N/fi699fQrDNecP2nEq9iNJY79Ud68psHi45TmqHKoOIvOB
iBY6TPYHLSYk8EbnXfGbJBKfnOFcgbkgiWwWT8WQD3GKSV8ZPlbnQiWWS9kLABglwNMWzbm71MbV
oB6WfXUneeueqv+JSU7UbuD+yS8bX5y1BRsFjWLeDV4ACgmUtrx1B3M9PDUafsQw+G+nDQ7qn7p8
/USocbeUWe+53PQ/cmMPR+6G2/Co34J7v3EXZGtaJqvHVI9sgZQZ34Vu4nsa+OqVCHZGx+U3XIuW
9g40KmU1hEgzNQSur7SI6WJWuXepXp4/MI50S1Z5Qq/mwJZBXRBg4uAQVg10qKVXLskzkws4Ml8D
ldWS48+hJyfQ3UHBZJIlW5P4qQpFZ08Gz49rKM+U8dbXtODwMQePPzV/cAB4oEhOPVyBSswIukoa
qnJTx2RbsBeCLl6KlP4vYHaPRT7UbbD355moPFcLfmrcId/YmBUo6gSVEGDh0uIUiuZxT00jW2za
qRfGAJwjRHU3rVTsJ5+k16bZZwyd474ZKLGBf9rwRnmSGR6iMVtRwfxi80CQRTA46wWYAfleSeOw
QBV10PyScm7+3jnrXWsywcduKkWwrrgW+61+3H/XN1CfqSOf11WXWewwX5yeDQnNki+lsf0V1yOT
y5+kWSQpLALpqRtIFzKqPiyX4vLYFEQYoa2/WwHTMjQYVGByaFyBT/aJHNk/hMgP4Ts+Wjpa0Z4K
UJ31IJim+M0soTD15MxpvOzZbW3Y/0ANG3qBUtHE40/M4+6Z6v/gSUjVvMaSKH5lSQfRAM/hSJ9G
8MafylFzCfI2+7W1JfZx+bB6YfCJssd/VfYyS0JA7rX3Yqd5nASgmyQdW0W8zdVK9LjFA3tY9pUd
ePOehejTQajVVIfRooPJi5VjAixwMOq9cF8N+TdZKtps1q6e5xRzhB2Qpbk5IXDQPfUZ05F+23rw
T9xQCGBtvAwjHdvw0Hfvx+TQDNJ5qnR66vNepNdr0iydq0rYdv210HVDgU/WY0miX2yUq0hlU9AM
Teny5FJZGZ9Fobs1JvgZRdpiOPz4QQVQeUMnh6eUhD5fyKB3SyvSKQuHvv+vcCXxXDRg6lT9d7IH
hhD0DuTxyB4HvDaHna8oYGzMx0OPzx33ga+ai0awSXZjihEQm6nWgdO5PiEVLz6HE2j/YWeb/okU
JZ2Vhv/ovkr0C1nGn9tiyyoJUcPVL4VpTWCEqu8WWHWWfS9evBE4Uhpks7/MUI4T0/zdV0oR6Wmx
n7xAwwI44CQT4xftq6GqSxHJuT7bNBEwL0/CiXTczY3jyZamJWibriqR791SdGGsasf02hvwxO6n
cWTvEVgZokakvVhWPRYUcLh1gb71Pdh81c54MHm9pjJnuRhpuob6bFl5PDNtKhm9lU5h+uioUquU
etrUEptNGxh97gSslIm2npKqhbV3tSBlx0uylE+PPrsHtakKMkUNOiOwDarEoGqkGmNKDTPlpDB0
dBa3XmGoGHPf/jJ+MF35jbakrJmwCKgLoebKyB7kxQ++pKxkN53rkZ39femPFnR2e9UJMLLgvaPi
h5+R9WXZTnqpRy4rAOqJ1v8w4nFqK2HqBoHhne34xaN18d1nOHGKpym8SJLiH5ATns6ieuiEGAy5
JmkAFvnsKtsIHs/ZsYrD4dT66YLxwK0tDbCCherW23Hw+CieeeQxJYOXJasXO7mhKZv3XWBwy1ps
uGI7Qt9H4LrKgymvZhpK0SLLa+nWPHgQKK8opxPU5ipoW90BEgbIZ02MPj+C0Atx2ZXr1O+5H6jY
Uapdcrkc/PIspg89B8Mmqqc3ZcbeT23OIzsBNaEkW/c87GBtR4Du3hbMzYb4/q7ZYxT4qpqRIBOO
JMQ6lHwPfdH3kbzkdAXFUt7+q62t9s+mCcf+sfpnpqv+e7tsXskYFDXvxU0YO5spAMolN08IMmSl
ZPbxZKguRv5rzqTYVSb/rWhG0E5z/2pzFlPDuwZbVX8HPy3AIFIzFIg7tmISM9VQ8yLrkXmX/pwR
BV/sZwQF3fz2De1uK5P7rG25CWMNjid/8AD8AEhnLw343GQc6tZrtqieDc91ECmAxeDTDYbI8+FU
6ih0tKjZafKwdsbGxOER9RDIqoas7c6MzKBRaWDd5WvgOR5EFMZlQDmdwFaNxxh75gnVUG7nTGQs
ImvgCOTwIp2q69HTn8fuIxTUrWDHzox8C/bsqeJ7M9V3YpEERQJwMvm7otVJnGRUqiEKBfvdq3qy
f6U2Zqz5818+Xv+vAQm+vD0vx4DQFZ3zrcWaLaVyRA5Lb6ZyKcwTBFe3kD+qBpifqfJnO5eYXCkB
2JUycFlHFq4fnQEY3dQwVRGeMDna3HGsLbraiar5BZOyn99hsBPb24Ib5dNb7tuERnE1nvzWNzE7
7SBTZC9eS5tkulaKYdu1V6Fj1Wo4taqiWeTyRBKASqn8/NQUUhkbhqUjcINYHhWGcOaraO9tU/Xb
8nQF06PTGNwyoMROQRgkO70LpG1TkdbDjz8Mb66KQoumKJQgokpuJUuNuq2jUNIxw4s4MTSNV26W
A14YanMh7tk185gLfMdWoRVRYc0EiZdfJJ5QXt+gxge2Drbc1duW2jY6VT9x4HjlZ3IEP2K6nP5L
kxh5ZUXIygNK7yT7auWXl17nUk/ZkF+Bcgy0ZfX8w3PRe09EAabPgvRKI0A0UkbDD88ADWEEVYff
u+WWtX/WRisY2VfHtpKH0ap+h50BNmZCTFITvO9y7Nn1IwwdnkQqdw7N+TLsZLJR5ih15LD1uquQ
zRdVskGSbfyb3WiSVF4Ym1FBdL69qLya/J2dNjFxeEwJBjr6O9BYqCllS/faLlWO86TsSs75A0fK
NqGbFib3/Aje+BRYSkMjYEjLAeUmi2cHpnRIL293deKpvS42dbuqX1gC+8En5nCi0vj5Tc1Zh0Rg
AQQTCZmvSp2RkIMxX33n9e3fdgTBD5+e/XDZcT4kVyeDSMW3ONlhuNKmACg9jxztn5UzPeSJvojs
cCmTlMdqm/43apk40Wj8bT6ZYrDYREKU6n6I8tCcaolsNLu7yCkgV7k4+Nx0eGTDlg1rO/buRgUE
Uw0Nd/S6WKgD9fXrMT0xg72PPQs9BSVTuljvt7istmpQJhtXqDjMqVbU70hS2FeeS6OKUG4sVNHW
36Wk9VJKpU494qjNV9HW26rKb8cPHEexo4hiexFOpaHO0WIUxVwKuqbhyOOPYUzyhHkNNmHIWRQd
eaZqvcLQlHKlY3UJIxUbrWwaq3pSClnlUR//9NjMp44ABw0SV5BaDYodKYqW+VAJTitCoJDSLn/7
Hb3fHp3wcM8Px37CMPi/tuY0+N7F6f9ejJ3MgvVEfFBTpOSlRb8kO7e1Wv+lzrRDpRRz8/TM2Q0y
zsgbDE/NNvB0NVCbAEo1TIXHWbmdnKPcz+PJ7z77vtt33vbVy2/vxl99eRybuwOULIEnK8D0XB3H
9z+mRAy2cfFLb1NAKstqG6/cpLrVZF+GV6snNEvs/UiyCY1pGRg9cAJ21kYqa8GrNRRPJj3c5PFJ
9druwW4899hzSOVS6F3fqwjqVMaC8D2EroeRqo/JI4fhjAwjI2u7NkVbXkrjmWrJFJqmtPNtPWnU
jA7MHR7B+k4oMUK7zfC1B6fmx0P/V9oWMRuDcullBAfk9RNxReJ920v/NjzUwL27p7ans2SPRdh5
u+NWyk7ygPH2UlSVvQJc3JiFpUz144eiPW0axjwnH2stGDBPowAkSHI6w5gb4Bk/XpvrNsVgXseM
E+CEJ7DXjeX5EoQ1z/3avY8Pf3xH18Avpo2x9LZiiNGGlI8RHHvuqNro2Uov3Qa5XFOVjbSOzVdf
ic5Vq+DXa4iiKAYdjbcGpAmBb8iJB1PziMIAbatjcpk2x/RLrzNTRudAF8YOj6FRqaNvYx+mhqdV
eU42nNu2BdMwML7vGdBaHb0dBJkUUfo+NYZD11T1RMrBskUd9qq1eO7pKXSm6giQRmuB4cCzdfzN
ofmfqUNO9o0v7ia13zGwn8d738mf7ui039pVslr+9u4T11o29liaDsKX0xm8MnYSgEIJPqUWjiKk
/AW6vBcFQNnWl2Z3ymVq76z3Ddkk84Lt/EXCqxHgoZlTFUM5atmNBEId6DAIrBTFAxWuyj7yTX99
3/73vL235adv32yk6zUfQw7BlEvUeA7VE3sR4DvZIF4VaO0sYOt1VyOby8GR8R5J+m0Rg48k7ZHy
Txm/LUzModBROAW8RGg6OzqjiGjJu02dmMTA1jVoVBvwXU9lwqW2FuSKWVQO7UW7VofVJQWmGgSj
ME0NqZSmKipySmWuqCM9sAaHj1bRwqeRbUmpkpqoC3zpgcnPOxDflvuSyGSjmGy6OBfFDiWV/Psn
1uc/Pj7pTDaE2GU1gHIQIpUiyKSpGuv2720vqAX7ECjJmEnecCOeWfdiTcaXa3LsNa4QYsrxR6Q0
SiyKMWUCVNQZ9lZ9TLtnyh/CKNYEbjUoZnSBZ8I4TmmNxEc36XNttohw1ANG3XjfDikL0ikumDpY
TC6vXteLjTuvAGNM1XQVxRITBSeBp+auSCI9Y2N6aEJluqWuFjgV56T4l2qaApqMI2dGp7H6sj61
JMvlVGc5ZDJp1WJ57EcPoTYyibxUY+ua6vGwTIZMSlO0jgRfa4eNMs1j5FAdJcwg02LB8Tj6O018
87szjYcXnF8cSDxyu8UwaBA1varTopkhLsQcUL/Z1javajE7v/zdsddryRIdBhzVcuwAZF13xWiQ
s9gZcix5/FzAMOzFqpWL8SSLTW5LUdShtejR8JYWA8Zpv5dgkQH4I7NL6yWaUJ2JBFoEsFYp3rSO
13ay/z6Q9TBckVsgENQjOS6CXFTcoMAXRHAbwOC2Qazfvh2RHIlRr52M95o0SzPrlT+QVQ8RBKhL
cnlVOwJvUde2VP3UXXiO1AL66FzTBSNjo9Fw1XdmRhqeG2Bo1w/hTc6hUJKtkxo0SbHIpTatwbI1
xfW1d9vYN8owNV/FjoG4sarhEXS1G5g86uDPd02/aRIIFc8nJ0vYFFaGwYmE3C0911vHf38awXvW
byn8vdMIgsfqwT0yTnSTnmvZEltdiOCkKNakLmC22os0cTZBqhSGTjcCPHVJinIEqwJa7hEYy8qR
tIuyApmldZsaHpv3UQ3OfSzZ35tjHBs0OfebvveaDh+cAtMBMONDxX1B8oWiC4hfm55PZrobdmzA
4Nat8BuNuHrBToFPejxKcFLZIR9MO21j7OCwKn3lWvNqaSVJv4gst8mWSgnArrXdSOdTCpBSq2in
06ph/Oiuh+FPz6FYij2dlNbLzjbp+TJpHZoMP1bn8eCzEYZPVPDa69MKlDWHo62kozwd4X9+a/R3
JgT/7uLvpFVDDFVCpTDiwFiXZcy/yrb+pb/HvvrBx2ffaSXluabJZEmCcLTB0Wdr0Kh4wd7Fl9KS
uVMnY68zACgSKVYP5dAIg20aap+yizG5WMom7E6ND2SKtnNlMY0wOLXMmjQuJ/3+4TMF23JJCCKg
4isy1bJAL/NcUkppKBoaefuh6ZjnmuRyjzaBKicnR4tdiBNUKo+GwJrNa7Bu2za41RoED5Wna36W
ollOOlcRTyegVHWu1St1lWBIL9cEn+qpYLIHOEDHQCesrK2mpMoOPzuTAQ99HN31EPzZMlpadRgm
VQS2JH9zkmCWU/pTQLarBd95Bpgdq+DOG9MKpJU6R6mow3UEfnDvxDd2V7zfX/x98jpBb5apSVYp
EdfSZ7zwK2/c2fGMFkT1x0cbn2pdYvKpBKTM5h2Xq8GfF8PENR98LR7KdVLZdPowJtnhsFanKATa
2ZuS5IR5mR0ZXhgPormIGr4sTs2GkdzzLFso2uWuVVm17CBJTuQet988MAfPDc/YLpVIyU+efGB7
hl7HTHaNZWjtKnaKu2qw4KfR8DmqswEW5kNUuQ9Ldi2lJBCXTz5LhUnPmi5suHw73Fo9Bt8i4SZL
vB9ZFP/JDzdsA2OHRmBaJrKlrCqpyQdOAloOgpTAlLObU8VMXA2RUxhyOURODUOPPYSwWkeuJGVh
DCGPm8qLGQOdLQZaixRBKo+vPBzC8st4yy0p+IKhXIvQUdLh1QU+/tWJRx+crr6JN7fISL7PqhBy
l061h13zgQk4vO5WDXv3zf++TE56zjJYSsbPfj1SjfjtWSCvxQ+Te/rrFl0LKRjxVMIYh1MShBL8
TRKCo7lpuUCHTrDNoGjXCVo0gtF5enYARmqkBVVjXEf8AHKomHWBE7FkDJmSJHSLVZqqYrxy3AFP
lloJwIUqwejzFbwy4feahR+5lX2LwT68ozP8nZIdp8QsJaCZIm5BtDTViB0xhqrMfmdC9WWeHYtw
YGRa9tahWEyIknOcsCr1caCjp0clHHLcWZPbQ+zEkmX3VBWhuQmd1/DQqDho7+9Qy6xIGmvlcuy5
vqJbCp0typWYhg47m4EzN4Njux5BUHeRypuIBFU1XVnBaC3oWNVqor2kYbRm4L6H6xgourj1xjQq
DoHrRWhv0TFfEfj6NycfGZ6u3jgBYHHZ6hXyegN4LopDEZHs/v6mtflPCCbw189X/yKb1ISXuiyk
2Q8UCYxV4/uw3gAsGu84lWfx3yU7ET9s8bjgdo0onPSngf0Vgf0V4G1dBIagkHs5y43Lb0kRbJNz
pAnBSADMytbZpNX2LDcn/nMga0BzCfY2fLXj0IWM0+LJHOGNXE+/osXU8iULvqrxQpWVZDY8NF17
wRb5cUOUtvn6HvxOUY/QCIgaliMng0suTNIOcmmuN+L6nm1TbFmnY4fBcHNo4+hMN769awa7j4wg
pUUo5NiSbYbNG6D2tnCdmNMjp5aM5rJLF+U18ZQrmflaitOTdEmmJaeqGSffSSimj0/AzmUUpRM6
PohtY+roEMb2PqniimzRUtWPtEWRsigKaR2rO2x0t+t4/FCE7+8u46ZtGnZuzmJGjn6LuPJ8k7MR
/unuyR9U5uq3tEoXUz+Vsm6RPdwAjgLIyitI4hjbBnpetyV321eenPtIK+BlMxoelyO+zlZvXXQj
nl8Q+J7UCMhGfAoMyGGXOsHWPMWeOYHheY5b11DkQ8CPCFrMWEghtYLSG6blvioQaCPAepOoSVtT
PJbsNw9/TjwFqs9AYJ1loEOnGHZ9aISrWGY5Jl9VjuRgEI1KdtQWYTwlXm4zJTgePTSLA24Ek1DU
lHcVapbLzhz92Lp8gIoHtb+GJ6ukauiOgJGS06AIBroZ0jaBZD1mq/J5DGAwF1f2ZbHjslV44sgq
/PPdz+Po7Cw683KY4ikgkqTPQQFM4fdKYwAAIABJREFUA2aGh9Dd16/KbFJcQJvebzFSySnvJ2O7
RtVBa08rfCdQD5K8TlL3N354VDUIZYsZiJAjlc1gbugwJg8cgBS4SPpEjtdVDeVSjJrSMdibQqmg
4e5HGjhwPMCbbkxhYJWFiblQHVZ6vtHxCJ+5e+orlXr9bataGGacU/dgHYArhVCbEMrEicvuvpAr
8vnajszvZ/MMeigWXt+S+vVDfvhEDnhoifb0M4Ao4zi5vMo9PyrgmPZkxx7DW1soDi2EcJI+Cgk4
yXbIZVgkMaDaoDuJA4Nkw0e6BElxTgCSJLBciDiyGsW1bZIWiMBDsazmZYnTmidQyNL0nqGq9535
huLLWiyKsOFg/3gDPcZJ/6L+q3Otf1tB3CaD5+fKmiKXLRaqAFnNOxYUh8Cw/6iJNf0pbBk0UTA5
5iocvtBx/EQNllbGTRs6sXPrTnzx7nF8/QfPIKVHKEpvmIzyjXdcl725BNXpOubGhtEzuA5OuZwQ
zuQkSa6Mxx7QSJuYGppUWW62taAyX57IreZHp9FYqGPNjkGYpqkAPfXcsygfO4BcLi7LyeQlrqYQ
5NM6tq/JqOz3M/eUUW0I/MLr88imGSan4znQMuE4dNzHP39n8q+rrv/uYp6pfeeaJqdWXCeEWi79
RFjawWRmr6pGxuu2Zn5+as6LBlqs3zpBg4lg3N31GpCTS/S5b2AMQjXKiBK1g6dUxI258bZe8u8v
VmW07PmAsgAuA9u9EwRjAUfGiG/QuY5PVABKcHMjqIehn5ssO7A0gvpkIHc7R9aWWxSEGPaEEpTK
zKtNwzv60xwTDaASRejMxGARSVAtd5+UurnpBQ+PP1rH80fT2L4li01rNNRrITxNbmuqYe9TI2hv
ncGv/eRl2HH57fiLT/wIk7N1DLQT8DApoyViAVlomD1+VHlBXe4Ew6O4g7YpguA4GfNIJYlMOHLt
BeUJpVJGtw241QamhibQt2UN0rksvEoVJ574EWpj40qLKGvFLNlgUIK3Javjqg1ZtaXCp+5aQC5D
8ctvLijgTM+FqhSXy+g4ctTFp+6d+FVXhJ9YUzTUbkeCxuNLZDvDq5IJYlqiJpcdaOOUYAjArZ2p
j/e3G+ST902+rUcn3/RpEBFNqATvfNZkStRqYBNYtpw2QeE5l1YncEETUkMRB+QDaYqANUdfnEq3
z/Yet9qY0hnt6rbjuykvIDMZyr5AX1rHpCxTeaEarrgtR9/UYgb4v+V9B5hlVZXuv/c+6ca6lau6
q0N1pJsGmhwUEAxgFtOoKA6mGWcMw1NH583oG4xjeqPO4DzTMKIyOgKKEhUlh+6mG+hEp+rqUDne
WzeduPf71j6nmga7kVCFlKzvuzQ03ffeOmedtVf41/8/VGS691cOccjRp39wzYKfBZobIoxMTOH3
v6thZLSA00/IwGERXF/ByWQwMRVg/PaNOOtFx6D7Sy/H5V++DyMjI+juUAiD+DzQDpgGyiMVFAcO
oqN7SRwFE+97bKKi9PL45NC4/ku51gLcqqudSXCBni170baoA53dXRju3Yu+RzbpSpdocHWDWcSk
lTQmbG+wccaqLIYnI3zzp+M4ZZWDd7y6Ef0joe5JdjQZKFUUrr29ONHfW3qLycPfM5sfAgxoEkkG
LLA5DjCmk/kkhxVNwvgLXvGvpj/3zjMaP9A3Uh/bM+H9YslCB3khMDyp9ArlkzGMqOnrLYCmPNOe
TT12L4ydfSbnxk+fJZ+EUiga1rguAiiKcHVkqS4NsaLN0AAbuZRv3VuMYPoKXfTnCecWSixwDLys
wcIjfSV4kueWZnA8pdak0UHJKgkwq8Mby0lxQJGCcnDixytEIXq3jMEMPBx7bC5hs4v3MmA1YP0d
O7DqhCl868vn4jOfX4+BvfvR3aVQ9RI0C81TDYXRPTt0RUxVKwELDh/R6+9AgtGTlaTAiG8fzXl7
N+1CY1szFq/pRs+GdRjatUPnQYUWW+d6poiHxsRctaA1hVNXprF+u4fbNlRw3hkFtDZyjIwFmmzS
ThtYv83D+i1Tt4+MV9/Z1sgHClkDY8kWkEYG0QMggB2h0hHPZodQORExta0qOFeszYrC0kaBy+6e
+uwBA7hx1MPZrYbe2z2aMcQ0HWSkc+IRPYWpD4Q45ZoFloJn9JZ07bMEe4+Uzg+nCIVMa3xMaiap
6Rf9dw2SOJh/lzWEsdNn7ftonS8vzrEY/xzdpAla1rYFmgyOJm58osmRWgprxCclR+gISLhAooyl
AoXuA72C5EWcxVWaYzYA+3vKGBj0dW4ynZxQLpltKeDRLUMo79qEL1x+BhYtW4yhPiDvKF0UkWBg
lqh6R+sY3bsD6WxCYqEew8cRIGBqrBhL3Hc0wnM9ZBoymDg4pjOqtvk5bL3tt+jftkMzFhQaHdgG
sVVx7eA0213ZlcFJy9K4+b4qbl1XwWvOdHDJW+fBa+zEtr2+bi5//8bJ2q/vHfuk69fPX9JmDLQ7
hm5dkPQ/ceAQcykVdg+5UjvFEsWQCiQsUkUKgR01/yojY73x0hc3vuOhA7X6znHvO20hwOsKB6ak
jsTiMGHu6ZeRRNaMydDWzOBk49NNzvJs+Bn7NN0I2sjP8BitS4kqXRBLPeFFiAxPXZc1FV6SYR+f
lMAtrtwvIvaeJY4otWfMd065siMTqf+1Isc/nTEjVENKdBmGfKAYJhXWtNMllRn9GsVtPF2NDdUZ
lh/TgHlNXIvTTEcolUTtpo4CenZPYGLzBvzz5WehfekCTAwAaSuOqPSi6cPIjj0ojw4hlc0/th2t
YmRLebKspb4IZJAr5BDVAwzs2g+TTWDPA3eiMjyKQpOpKXRpc416fHTs5jMmTl6WxfJ5Dq66ZQrb
D/h4/0WNWNLlYMumYbzqomUYES344o/6vjE5Wsl0NgZfGTQY/mcsxC3FEBtrEsOUG4YSlVDhPldi
ty+xj9SJaMfE0Zpbh/Z0/UAOGYbCvZsrH35pg/CPtYVeua2UpJ6kEOaTNpenDnsRTV7ZBJqboTXf
aI/mOaAEf3ZKSSpJ5HM8rsw216ND1B6HmwtZ7PbCDSc3mx9/qIwrslG0ryfy5sPk161ta/h+plDg
p7Yys2dvCRlDohZyzUZPHfgqzSmF0jlPrIURl/OaUkzEF6rmCpy+LI1VXQaqbgilF8BjAWhK5qXJ
UfaBzoUF9O6dhJXaiA9ddhq+9I8lhOUpWJkkEqSISBLo3bgRx7zkfDiZHGqVqvbgeoWIgwRaFy+A
aTCE1TIevW8bgvKA5kvRk45GW/+/tBXT5BJKuilr4YSlKQ0a/Y9fTqK5wcD7Xl/Q+dxERaI+UUY4
OITXfeB0bFy3N7eyJbpsZNKcPxUG14+G6u7RcDoEMc3oIBLenOnRVJsdU9jRuIF+hhbFj33pCntN
sRaQSsAx7Slx9oFAPkjMIfkkoh3Jr+haq6QQpI98rvCBz+pUZ0mRQV2BvGBYnjbQRrsLyQ5qKsGh
0Wv9kPpowVRYnTGuHqC/15pBkE298YDK3v6ON600G/M2ilWJAjmOZPAk00dvVXO8MJ3vlZO8z09Q
0uSkE3UDpy5KYc18hlI5IPUkjb0LPIlCiqInxwMPFHHfrcPY3R+ha0kTdm8fQq66F69963GYKMWO
nfSYtaZHdaKOXfffp9ct04VG5Joa4VUDfZQG5VH0P7IO62+4E97kAFqauabVIF4XxyKRG6GPOcc2
sLA5hTN1sSHxXzdNaVnUV56Z0U3a4lTc4yM415abH8aKToY3v/us997U6/1fbiq3levOip4irOAM
S2nMlkT8w72D0hKPijoSNJQKC5vtr69eaBoP7fbhKvNvByrWD0YYO3/SYhiwGIYshuEnvHoI1GEw
5Iha7E8Nx3q6xpIbV5MKraZAJlKYIoJlKgCIeyViEFYIYav7e1zZc1xanTnkOhc25zK3ZOzcNf/w
0dMvDB/ZhAfuG0Iub+qGMjkYCQ8GCf8cnaYOjzV2SQlI85XQUeNxnN0usKolxERZgBk0MJT6gWjN
c1p+x90PTqDVq2BhCtjw20FUX9yOU1Y2Y88jB3D8scvR86Ju7N/Yi0yLVnHV5uSA0tAktt/5O7St
WKmbzEP7RxB5RZT2+BgdBbJZA/M6TN2XpL4e5XpE9E2N4IaMha5mC0s6Hdy7vY7bNtTw2hdncNpq
B7etr2PxPFOvWE5OxbofQdnF+h/dibdf9iZse+hg7w/u3flPlPyf3WrhvCzH7n2eXqusMeD6J/D2
6DrBZNil/4st/fia9AVjkwHW9dYuGvLlr0/OK7uUhnpo/AgQIRnfPJrInJfiyEuplQAEf+7w0TNW
18Q6sQoVqWAwErEz9StrmHpSQNXaVQflhxrbs1jdZP2YeeZdH7n4mDcV712PX/54B6K0gG3EEc6T
0zsNBBNiOgckAUENuVKx+MxQnWN1g8Cago/xYoDAi/RCNzWrm9PAjj6JX/2uiJxXQWtzzLS+phBg
4+1D2LovRGNLBvXBPpx8QiNENgURTjdd4/uUztGIzsfBh7ag98GtqI+PoGD5cD0TDQ1pdM+jB4Kc
jiNLIzXK+SyBjoKN1V0pLGpP4fr7arh9Yw3vfU0eZxybwgNbXNyzpfSf9983tZ0EUYmqbXzSQ2Q7
GNq4B2ObHsVnr7ioe7WT3jwZAG9eZKLFAkapHQUcESLFOX+5ySBoLtyatf/lxE4Dv99e37it4v8y
YDKyDNSERP0Q5/L0K4oJrldnLZDy/HIzzrXlc+d7h/xmRk0nrlxBiAg5Q6LBkst8yU84I82xIitv
eVBmrjn5xPnN81Lq7KG778d3frwHYxloBAbB9UtBXGxQH47wpWHiiIFk+sgtR3GBstjheFFLiKlA
wfclwhqxE4RoTitsPQjc8EAFnRkXDQ1UBEHngJQTHFsIse7uMYzXRTzgNyexYFGWRAx10KbjkWjL
6EUw9YY8Q0ODge52gVrdhmAmls8PYZtCz7MzxFRgkEKRgUUtDlYvSsN2DPzgphL6hj188uImNKYF
fnD9+A3X/G78lX1F970/Pzj1iuvuqUqHENiElq5ECA0bd/3bzbBMF1/+5huPOx34j7In1/ZV8Lqs
YBbtcVF1fwwBNZRCg1LIQmHYlWujyLr/eJin/OVK581DlRC/2e9/lRx2wJO6efy4DlmSaxAU7qIF
Wby6I6ULtvHgMMDFc2hG0uOfMT4YypNEGOkndj1NTDistZb9OQIcnduI0R2DU10lK0BxqIgr9gRo
aCFqL4Eg5GiwA+1kdARbiTNL9lh+Rt+UsH+2EjinOaa59MJ4SYig5Y3Sx+6DBm5+tI72rAfLiR2P
jm6KqkUfaCkAmQM1rNtUwWvOyWj20qVdJnp6DDgs1LsYh6BntNVGRYXJ0F80oKTA8YsiDRqVSujP
NQza1TXQlDexoN1G31iEa+8oYWGLwFtfWsCGXS5+s674137N/05z3kDNM9BTC/u/1jN1TiSz96zu
VBithvAtA9laBbd9/hpccMVf4T33n/3Xv/qvu9/e1WH/h2L4zQS1kxyBBVWpKYuDJG8tucFX0x3W
R16/KLNhRTPDPXu9UsDC/5lPbVo6NVyFx616MGBRxsK5bWkscrgWcaRFtOfw1H2cGYyJVQF4i6Gi
u+MR/bMzanlQ4dBBum42sGkq2t7J/deluVPqtsP8mS1V3L+rijo3UbdSCKvhnVPSYC0OzlHc1w1o
cq00T/YTDkM1Emq3GHC8uYVhXkpqJDTX7R+mi5fxEsOv9oaw0q6Wua/4caV86D2o8nSB7lZg2/4S
egYdzC8wLJ5voGteBsXhku4nKj1TZfoopxxvy6CpC4zTl6l4zyOKZ7m2JdCQNdGYN9FUMLB+p4s7
NtZxbJeN809M4Zd3Tu194NGptzBHbZrXYKASceRZhE4CCyh5744h723SNX/a2gZM1iJ4ZhbBhj60
f+vX+MBX34DeR4f4D9bt/t+ubajXz8+gPDSFEokfEvmjFaO1baWwfdT/yGtWGtfRg3Znj/99qiVW
5TlkXWLDSKgflOMTGrzWtIlTFpNgdoRhN3rSKdZzYbwilc2UcY3BrU+VIjo8lb6pUsWYwEPwJDwG
xoxlQBNxZD0zVYd+P0rWOunGvygDLBIcnsHP6cx7gxnLx2hAGhKWpiVbXQgxVeffGXXll5igywVU
QwYPUmPPDOIlSebAdKlKIcNJjoEzGkMNxaejOtTaZloLBDcetDHKfY1wIQ67agKWnG5ehwlwktGU
PvCxu6cOy2R6vtzZ7oDEr3OWQltak25jsmZg84CF5izDmctiOSwCu9Kx25gz0VKw0dFqwbAYrr27
gns21XHR6VmcsMTBN68f++m67aU1C5vYpuaMods0RZ+WwhnePk/gvfMNLMzLzZUIsmcwlgGbdEOM
Wzbuvmo9+m+5H//0g7fkVjU0bCIFdCdnEKKlqyCYIKAoo9GYCzAPGKvAaM8q7BwO3Z2e//HVeYYV
Wa4JnjzNtQgsTIALrUk/tRzKP1nUO9zEsUINBRw/W+BYN3bZ7JJ6xI26ZLXxQHpVcLPNMm1PMVMq
OMT3PVKngowt5JxnygFTJudNBhMdYcgXScZXRoov5wrdjsVPqyh+lgzMv20yrW915f0WXzL0TNqI
JHdVxA+e1+k1tjn8jY9OWtvbst5GxtU5dV39KnBpYNzjqPG4XUHL7fOUg5e3krpkpGUBppG5pBZ+
14CDe8shck6kCcZVMlaKklJ/+ljVOyBJg7wccMyf74BHksaAmBr30GxHGKua2F80UaxzrOpQOK6L
8lGhR3GOJZBOmxooQODRfcMhfn1vFdJTeMNpWc1gMDjk4+ae8gfrhtpbVxx+pHRbigqpCiSoGU9L
VjUlXpoRWHRwMvhbBNbb0oncBDXdD9y1A8e9aAFe8YbTOh762eb29sbUabVaeP6YG/5GP/EyDgCE
RDy9zf7ZSQt52+Z+GTSbfGmKGcuiSLxiLJJTEGqIKnOanE4q6g5YyGRtzTxhadIGpVm8CIRKDps1
Y3EgLXGmcX3xwJxaY46WAIub9yQKRH9naRNxBTLdtcgYBJ9T6HOZVtesBXHOTul3PoZK6jHpNB6w
7nEYnZbAIBevWJMO0W6iu9cUXyMev1UZR0cNziQE3VArQmRGWMKg/Igz2tlYRAxNulIFnHR8gFN7
gcPUv+cHAt1pgvBEOFByoFT00wYT32022U83Fd0X3dZv/Msly4NLxlz+/tHQGGSGD9pL1yz5VoBF
loOdvomASXTDwslNEaQZYKAe8w8T0yddsB0TFm6fVDDtQLd9qkkeoRJcGj3xtBNLC1+aCjoE0g5Q
rHsYGYuwsJWE/BiEQxJgUqNpFjRILG6Reg+DnJMazNwUusBoKhASW+F3D7oYGAuwosPAyYsclKsS
A4MhSI1oWZP5tl1l/54BV6HDBkbpuAuAVpJWCOIvxzkOVN3oZxkruH5vCedWI+vW+c2RUzME+twI
P/y7q3HxF9+E//31133wx//+AMpRkVkp2oiLUErWT4NInHbiArFmqCQRRiK1KI13bx+RP7JS0aOP
KvT304kSxi2Yc9oymNecgR/FoOCne/byZEXSTOQvaFSXNpR2VD3GMyh1kfAUhxPrAmnYw5MRfhgV
ZX632zDfPxJEuL+sbsrysLfV4MdFDCsJuhcomFMEwQ5iRlGe0CkTRRgdy14CNFSSjYUKw0qqAV+p
EYuxQaGC4arEYpupih/gJqjwLmWwVOjzvhqTg/cU+efOKpqXvHKB1/3rAaN7LIyH6iQ4TUCHxrSP
VTB1FOtMRTAcH5M+09MQeuLylsKUx3HHmMCUcNGImEDRTbq1kseaZ3q3OGGAzSXXItRPYISJYoju
VqEvlm0LtHCJBa0k8EzM8QKhEhpQ4KQNFPKGvthbewPsG5KYKIfusnl8eFmzWLR/0EfaYAjqERrm
OZjfmT13ZziFbqbQYSoNTc+ECruqEv2IBQAF5P1Vpu6nm7fMDu4arqmTPM/+VVuTXEaN7X0VD1d9
6lpc9LfnYs1xjcWRiakLuxqjW8oDZa0MTxF1Wcb85MoWhXv2KmzpCz69LO9/MW1KSTlrbYqKNHIY
jrctbcbyjI2+mqfbVUezabDudLoVp2IxI6ob8lam0FYO0OkI5khDpfqqvK0aMNsNmeNFzK74ImwU
rDpcR2YqRJ1D7pRKbZVK9opDTvxYrUH13He7LPntbfWotD8IexfRKmLIsCOIaLnZGGY8dX/Zw1II
rMyYTe2FMBUqqXO8yGXRg0U1vLxBqFwYlSdlzKpUh0ITSUfRjJHkrWjh3aKKDdg1pixp+2/YGlAy
He655qB95Sey/NJ2W2KwzOAYCVwo4lAsQj4T6fyJxJIDLyai1BtzxP0iGR4smtgdBMhY8TFBFLPU
nK0nWSl9thFPseJNrUT921cx+36d+F/oswi5LaBvrCaTJL5Ei+s93YgLjFSBXQMBeoYk+sZCdDSp
65gTXHb7FrUqWKxuSdtKL5Wj6sPOZDB/QWFNtGW8eWUrxqtEJp70QlqkwoYjgKG6Q4rU4aOe4NfU
q86nSIQnl7exzwvx3/96J05Z21wQy52bb9hcek+DwJULDODhKssev5C/0YLE+mF5xwD3Pr+QZsKK
w/Ok/pmXtmTx8vY0FjsmhmoBAsIvPiarrI1OdHpRzu/qHqzo8hXOMhh7sQl+ck6wFRE9m3TacYZx
D8jbgG8ybB5X06nN+L6qGrA471uVlgd2V3g1L5A1GKxOk69kSmQ9qdJpzvoYk/dHSu2ikTQpiz1I
0J5GoTAYxut5aUJeRHoXICxClcmpUoiIv6XcaCe8wuQEIdMgBA1E4/GurAYwHnqOHoO2U2Qi1EkH
R2mCy5KfIHh/6wbvOXmAH1dIsVNoBZOSDM23x+KJhp6IyISpKhn9kQSoIxQmPIFtdLSJSIdllXAd
apoRGat9E6QqxeNdFpqqjOIx8AF9YSJBpySfJ999b9HAkJ5tcT1LrutCJ4IjI9y9L9gU+tGPTu8Q
9w5HcsPQhCSgxcTBicBvy8OiQqmNhQhlgJbWZpyatx5mTN5Sl+yXDsLbqCbqYMAlYLiVNJCTiDPP
pBYUQ0UpzM+zbhNRZfOg+tKSUHwhlwEO0rbalgmsaBJ4+QLrPw+UnQX1mvfZBma86+ROiUf6OaGp
f9psAnsrCgWh4BQcXFhIobstD8/zMFgLYkqRaafjsePRglG9zrvSip/faOL8rGDnm4wtCMP4oWEs
qtSVfJRLXB8qdTAChiyO4UlXjQJyMm+zQXBV0jgIvWoSo6UdbuhdkskoRMEmli+2tCLVyXvLaM8L
tZBQ8eMMe2OG1MMmAHh81yLe80z4UCRih5jmCaabLQ5bl/hjKcU0Y5MWpGIKLRrUKnH3uDq1Mcfv
qwic6QYMKcZhKAFPSS07ZYq4ylXJ7NNPKNEIrjVF0ZjHQtpMJauCifOTw7tRnG/ZiRPSf9OUwU6m
hRaPELgStglMuqpnqMRwcoEvLflM36CMkGhIKxyzIIWecnT7wUHvG8IPdYQc8jmqkJUdU+FPsha/
tB4qZNM+3IkKGtdksHxxtktOFt/XN2WdOhmqtiqCKwfAtPj3Asb0hEOjeSKFXZpaRFLasG9tSj6a
tupf7K9Yvy0E4nsNKXbCoGJwJxXWtoVoShmXX78zdeGFK6MlppS4Y79AXxj9XBI6yeRYtLAZjYUU
2ijE1H1UQ4lGHqM4SM7MdSPUInVCyhJv6bTF2/MWlpAgIi2A1ag4ktFPDRlezRVfPxzK4bqMQG1X
cloC1Jo8iXqIx3ZUJpLURRxUeQx3VSwZtsQBJC1UjyOiHuKqSWvsgCCQivGsZ8FPxzTiRMVABYJq
GTJB2BosK13TkCbBSkKMhRJLHIZTHI4HSyaqdBTbke73qZhuT0OIvEBoeEcgKGOLhzoycUIVC1Xr
lCCWmmca7SGTxScvkQQTUiKo04oAVShyazEIdy1tYp+QIiYjIsh9EESIuubh5LX+mhetriHvCIxs
G8bYRFXfgPFA3VissktNI8QEkYxP1rCgIHCdzPx8eLx4SY7V3e0Bbxh6wiNK/9VhABcUeDJ+1O2Y
ew0p/RMaOR4sBxsmgnCtCMxL62n+YSbViQemIg2kfelidubiBoXrtlrYUHbfuhfRRHfBwYnz81rk
h9ZfK3S00vgkClGshA5n/Kx8xnxHvsF5m62QsUIfbrUKLwoernnymtE6u6UOtSkrlGpgcf5syfgU
OUTrC/Y45jTN8/MUihm9I6yYRjWxRH+EDrjnzAHpyCNBlFER02zQE+klx7LvyZvabPfUBXYaD9eg
1R4JC3hBe4A1BY7r+03srwo0pf1k4z5uVXAusVhYGJCBbtfQZ5gqlswXSVimSEVHI0V5JuNJjZ/g
7PXT6SlIL9BSXdwPh7dW1X+sO6g+saIlSpIJqRf0W5sLyM/37GYjjfZ5OfQGDEsP9ugZMMCaKyFD
xpYYoUq3GOG4NMPihQ1rBodGXWpzGPWg9ETiH/oWTgTkKnFRwDWLfXTblii8O6OFdrgGGjAZXFmq
4sqKKz6mpPEZLnm+ajBsGRYT42X/IzbCn7+qPY2TFjeiGiqUvAgmk6h6UZPi7NzmbOp9kaFelbc4
cjZon3mqWq7+93jVvWZPOfqdJ1ip24GexdM1eUz/g/1BrjrT9pw4IE1HaGQ2Wlc6LyPS7cWI80c6
kpcee/ySTDvHih0PY1Q42Ets8iFwdb/AJ5dEeFe3hx/tt7C7bKM942uHoqcpEhGyNsOywMSkJNUf
iRqLQEVSvHbJYjZ4FTNsqcOYrbQWnSlAu8cpKbGzj+H+A9EtnYbsHZiUkwUrbDRZhDRRIcl4hdML
lRwqeVrdaKzo6SYvqYqvTrHTm22gL4zFAgeLlIOG6F5UWHXb7d6CQoN9cJEjkfZjQOk0KU+Ngm4E
7Hbjfh7B12pAvQxVryYpBZ1RkY7jEUb88OvFceM/VzrRcczh81Qkb+zKRWUDAvNabRSnPEzUo+Md
y7wgnXfeYXKaE0tklETdc3+2SU/3AAAdfklEQVQ7MF67oTLu3pCK5F6RAWyHEWZQF20cz5xJ9tnY
rDvg9J6oR8KAJjRv3WNigLSsrLCgtUkd/873ofcL78Ny38MjUwZSRohBl+En/Rxv75S4tNvH93tN
9ExZaMn6iaIhR51FEHaElshAsxQk1aBzKp0ratV14joksh2p2zd6wy/kKEuBzhxVxD4mywY29EX3
3h6EvzgvpRD67JaBMnt7VkTaUalqJhYFyzRVY3MejQ05uEVfN1mdSGFNK3/Pzrq8Z9xnizM51jVY
VRgfKWPevHkIXdkdWvWDJzkCZTDsDuK0g6Ld0my8L1LxJfLUtPXiPJUen4W2QnsOenJkCam7CCSo
p1Q0Kbm6K22GqNSBYg1QKfUyLzRf2d6QvtTkQaMZR84J6UU/GJ4oXjMyVfstDxHVacHcEKAdE5fJ
Q4v2f8qByKw7IHXSCSRapl6i+EMNOi6B7ffft6vPTHUtJnrbqqd9iyYfdAw9UGQY8QVe2yrxnsUB
rh9QeGjSRi7l65YLS5JdPTExAs3ympNctyIoSlIfL0gWx0VCfJkzFTpSEp25EBMex7o+s1KuuOec
ywkVwjDly6tOcvjbYUc68jWlgXTaxp49/QcHDoygyTHx4MFJlBj4WVnrur4Qtf+ecM9+sWVfoSD+
hsZcu3pLWP3yJehuSL83G9bWMSG9IoFsp/OhJF8loR6D1h4zDAcrSm+fUT9zVaPSNBiEs6QG+nyH
WFtpz1pioARxsIQ3LZ2XffMxJ7W9hZsWiiWfnG6ryKkv+FXv5n2jle1ZYmaIQmRSHCIieYdYmuyZ
NKFny2bdAekGjnsKlUAdsVJWGjEdfGXstzef37FEQAkLpoj0Ue1pBi2FPXWG7/RxnNqgNBN+MWAY
qNG6Y/AYcQFpvlHLRrOthjAIxk/tJMbQoPWH42Vzqqhpkb3RBkoex54p81deEH6yECq5Dwx79LBf
PbwmNOBakWZwoOmKV+W0nP8XAz1jN6wDrqXjchVntJF5241T3l/lwdHk8+/VPXxAWDB695WwQkos
6cpfUuutXTwW2r/2Zfg5A9GmaLobkLAHUKOc+pI66bfJKTkCFWmArEzm632TEGMVvLq9Lfvuc05q
fmNjW17nEgHUuv7x6g8nS+Ubrao64ORtpAgipkwITfj5PPG0o9isOiDlYDxS2BAcXa4zNnXrBbZ1
pVEXl466HK2m1O2AQHENhaK+JBVzG2oMPS5HgxHqipZRDWUEh1j9Dzk4RTxyxuQ3FLUNeNw8FwKo
BAJjVQOhry6rs/Abi9ISm+vAJhmDHi5s4D8mxYBiovLi12zUqxEsodw1+fQ1ftX/kIzCK7qkotnf
vxMBU95kGGHew5Zrb2522EnDwyWE1TrM9tzBTVuHvrK2Da9R4K/c7YXaAUmVwHwCjbUGxJJDavoT
CS/iKNX5qYFpfKR7Qf6d71jSpPmoI4l7pJLf3rRv/I6Bsdogcc00Z41YDYn6ltGf9lh9Ovac5ID+
U6ilJjz/PZFv/s+Yz9qd0PrOcofZB2UAVwlNTpRygGaSQggkxl0GZoTgoQkjEpAievyTfti/xqqY
MeGi9EniywCP2L21KPhYWsh1OZOhHDE9vTmJKMQs4x/bbIzuq4efa7D4p6uBhEwbOhLf1Tvx8ULg
71ts8dY76tBsYd2coSC5lpGdUgwtYK2KCc3SMDFcQntHbn4Z+PeFjvvv1MwYLgGrTKA7EzMy6AUg
Pq2JJ3WfNVTGknGPX5Iy+YeO7TCb53fmkGtM7wVT//bA1qFrxie8vmUE1xEMTQ2OjoREhHSIIHEO
2XNSBRtPUfy6NRXestVTGPDluhMM5+dpIdZMEu4olLAqppZqpV6gH3BII4KyAsC34jGSiHQLgR1q
nsfhj0UGQo3e5HSzfx8i+kZrKvy1oPxICqQEw9aSxITUqH2iY954zaT/haw0TjrbEJ/2ohAsbVKr
iNYiq3bG+n1T1dcPFTn1RCBRImQJtX4UX9uWiRZQsht4IXp3DqN5fjtf4PBXkXr4eAicnmLoTsVE
4jT6slncIqoFotmTxoVLU+yTKSGOy5kRLEN6lhH+29hU/dvbD1R3WMSFZjM0FmxNYM6C53iDaBbs
OW1EP5nReI9g9zHDZrTDVbXjyr718UZuvxMcJwQ8xALXhSOBQruFUd/A3oChwUKl5pqOwwwjxYmq
IwA3BKy0NW4Dg0HEttlcru+0g1+WrHDvEGHodLc+kTtN1jurCYuTIdUt84mcSLEDlQCuJeFIW8Cn
doYhDkbzmtDXO4ZWHiJrA731GNZE7t6RZZeRovv6yegTrQb7zPDu4VxTxoHZmDp+U7F605q0QqdD
ziZjXCJELlLiorTJ3pMx+bkNFkPGDMNy6F85GaofwlN3irpCuoEKCQuWbaAk5Wy35p5Te9444OEW
yyAoTMH72qjHv7bQNO6c53jnXPChi4HmRdj2wy/i/Cwwsj91ve14f9mV4w3jJf7N1Uvyrz/r1a3o
e2Qvrt/s9lZNcaqTkjDMeDxCYIXpFlCQ7L5O1hVOahKwxyI86sUrne0a5SPHImlUgggOT1kYnvID
p+7e50iJfTUPx2VI5zeO7P117bypY5j5zmo5+Ph+P/h6XyiGlxfdqyZ39aLLZK/a7+Nf7AzR5YqF
kPzCBoO9I2uwcylbs4xIeVH4Yy7Vj3sq8lZa7Gq3GZpNpeexPJFb/XO0WWD7mCkj2BWHKyWqyjv3
QIn9eNOQg62DLjYP2bh1yHxko1e/KApksdEJ94P7b9g0Gnywx0+pl158Jt50wYJTJoLo8qCm9My6
JGPgJEUgIlv0k3XP6dSpjceKkjsDiQcDhdBg/6vdYS3EKOwyThHVbHD9B2r7i0saEqJOapaTrO+C
BoWVjWiaUP5FRQRfPzsHpKX80UMl8/r+QRp3qbNXZuxvZrm5Y75t7G93xHeyljoBPPx2oPzz9tYD
3l+X74qUvDVFQAIRPyizqVL5fLHnZQQ83AidS/ArI8ffteHqH1yngFfPL9iVvgCfniTCThqtET+x
HjKr/3fvQ4N37BsPP/mSU1a+6xKkPnPfXb1vLtaDd6ZzeIjyfAIhGOzx0gvUCiEQayaBrRtMrDil
mX+9EkQ02UqzFGMhD9ebQhQ5N38gRHAeNTD1NCYCRTh6p/G9ofpdyjEvyHOcdWqerRkLxEvKIddY
xrIhPlKN8DDgfYqF6uaixOaJgKEjxTUQIo4E7JBI2iEO5ufHbZg1e947IJIwTc7TXMAvSj5+UYev
SXRShwEbdbRQDMsW53fUlHfpt6/d/pk1C3Jvmres7dPGgfFN8FQ/E/J7gZTXBExt0/21BJY3TWdL
blQk8iGuXC8MX7KrxHs6BQ5kchyjJfemnsnq5akQL87Y5qsDYqRlbK1lspMZZ6tMztpObFSHUERm
qNdMHxoNojtaUuqytS0Rbhu2bxmXzpc781wLA863FSYm64iIh1kovcsS1JXGKSojprGin5MQKG70
7BfGno82Jxxw2qhwmI5cOIqWLXFQZ/IOhImDPYNT31jWlvlGExpf09838bFUxD8NJv55POSRCbUv
Y8iHGeSjvmQTXOCAI1AlkcQMRy2KmOxK8Q/x0GB7el00doefOaUj88/+ZB2ZDI/pZpNGcjVUE4LJ
e6TUQMv1IwF2lH3sa+WssjsIcX+Z3/bVlfLGt6yQn7q71rLsYEV9WAk/dGputhpgHxX6wlCoM4ZC
a1o/cuMTLgaKQC4VYZ6QOt/MOgbqfqgxjH8uNqcc8KkaVbYkCEO8ecRl6Ap2w5SIbiCUcbspTrUs
/ioeipdJ8Attwd5EuVxnA0cXVxrsSo5FVLfE8j9aUS5NyioT7uadw5UHF6ZRKvtyD5TqiYCeEDg4
6iLIHSJQSnTi9IK7gBEx9IbypusP8LedVfeuPmtV8OZHZdvrH942usdsTH/hL87o2FdxJaqBwuio
i/Zl7ch6Pjb/drtGNA+WI/STpEPKxeL2CNISyFgmsmkTYxX5eO7EOWh/lg54uGnsGqFZpnGCkBts
Q26IVHR5yWVwA2WkBOvMcLStzPLWcZ+j0ZaoB3Jq2yTvmeeo38/PRKtZV+7L+yZbftq3Y5jOXj02
40a89E5SY9ZhuSUS6jjao1iTZphnAwtt9rNHx/nSyc0jXzj+7JyZOWvRwp27R/b3hgIVZqLqkbi1
gcCPwJOdDUszisRvOFYPMbavqBHrLXkboZTgWdpwMzXgU1dTc9D+7B3wD40luyFMRykDknYMCGp+
MKUnEhzUjxsLIgxIYG0G3QUjwtb+cvPK+Tns7ZtA5D8mc3C0vIwABASz4qFE3mJYV4tgwewbHDLG
hm7qyb/mralM2znL7v7FTTs+4DrqezlbwAliOlia/bJEmjZKRprONDuEUhgruRgpuWhImWhqTiPb
mEKLYyBrckyGck5Vzy9AB3y8scM2JQgB3WACm8clttdocMZO68oitb3Px7/u2XMTLbDnHQMnMYZ2
Fjeuj2QqqbZJRqLXJ+HHWDq/mYkzqpxde7CIK6r/tXXzq98W4qN/ffp3f3bttrO27x26dHFrJn43
zdoKcwoQdcA9xKqbIJKnGclG6wGG+kow+6fQlLfR0ZzG4o5MvLU1R+wF74BI6hkCftJRtrkYYaga
aedak8MHORNY1x/8fQ5RL0HHSrRZluU6RzyaAxL4lZimTkkDZ6e5jkgkAFgN5W/vHQ+Lps227I34
BT+6ascNr60E5l+957S//P3dfSfcdN1D75q/sNDTpIwPjQesfQvUZ8tQbk7FHItHMpaIPu4suYhK
Lt7CgZcsaMD4ePQnAZg+XXthO6BGysQSZGFVYUtVYmdtmgxcmF15vHvUlVHKVF+9wIr/QpDoixDS
iR3l6hHIYKKs8NDUY9wroa6uw18soSmLydDH1W9CZp710+t6NuzdM4GLP/yyE7sXnbf1oQf7RqJS
pUd1FS7mo5XymS0ZjLkBesaqf/THoW/+y/1F/TM1Z0xaXoJLgjbP4+nd83gSMntGkCyRKLUYnkJQ
iuDVpI6AxCxf0v+fvbvZ4mxXGd8oKVJ556iqmK9wMuEqbBLxUXtk4b/DxA4PiR4yTYV70FUo1yXG
g3BbNpvetX7z5NbLP/pzBOUJvOKCZW35hU3rlzaket937lK895QF6GpM/QG935FeZMVA4oe7xvHD
R8fwm+EqiC2g0WBaiNx4BoKTs20vmAioI5GIj1oar9WrxCmoDnEuc8RUcDQKO9Y0O9sd9k8EsapK
s/XBcqhXCqaXdKhEIBTLMZLh1AxDS+LMhx/J06icaW4+lkxaBhiwScWz7rTgJzLP/2dl8P+Gh//5
2tfvfcuLT2vHK1930keXL2s5+95N/W8aqdX31Z4B6mXcDXFXokC/zOE41pYamNsq4j3pJypg/qns
BeGA5ARU8UraoXBJHyRm9+KHgVhDPQFhy/Mm+7C0zL/OOr455nO5IOdc8jI32P/zeviZ5N30P+uK
4YEa8EBdYYkJrHViiawnS/9lQrbZmujzOgoPpsLovlGmMGqIty5P2we3bBju2rnlZlxw4fKTUqVg
W7HK/q4G/r1n8/PvcSX2uHFv81gbWMsY2hymsZJ/avuzd0CWRDZZUWB1aFUhcaiWfMzIGetgZ9QC
cUIopczbCqO+eHi4Uv9mp2n/8FTJihUW/N8nuhgdxbRmujcA8hxoTDR2j2QUxwjwkFHxr0wqn/p8
7Xkif1KwJfpLOfthX/odt964+5TuZqRztvXd9nHjtAh4f+ZZHKHTnDnVusQOD2AphsYMI24W+E9F
9y/5lcbeNsMR1yueif3ZOuA0+0rGNmBbpEkXTyrUoQH/YdwoiWPsDcIf7a+yH52dY2OOwe2Kyw+O
h/Wrco54pMuSy2lhSDyxtEyyaKKLezRU6LIYChmmlSufaDyhFqkmv+obSKTmlAa4Ch5jga3Cf5BK
bFUpdv1woF63vMHHhY3yfRvr5tVpFdxuPUWl0qNel2Qpv1JVCOpAaMX6KMQ5SKDcevLjUb6YoY29
hDXDTk4JN9momqlGz5+dA05vfKVMgQaDYahYw47eCQxLhTH25Fv8JBy4xGAfbU7LZsUNjLt0ynJ4
PHpkNJSPDLvxdOJIxhJxaCpMmo9S2kUJhVnHYcf/mhagI6OpzjL3j8lfbKlGB+fRpMXnrx+qsdOK
vvHuV8/D3xQa1dd/PcpPSs9YGcG0VolyAdMHmhzgGAdoSvJZX3NxR5oviPhj9muwEbCZVPQtjlda
TO9EP1v783FAFU8MGjMWUhkbW8ISDh6YxNBEVUPeUxyHVM2P8tc1D2KTxd6XTSkoZmLK9XfS6idB
6RPZtCe9YFZS7R6p/8YSxDVt080/9HsKeyfopcd44aDv/6uQUMROZsYbRuvvmVLr1zTgDS/uYCfe
OsqXe4h2GzPYZ2aJ3u8AFWQeYJIwIbGGBcB+KfUERrNi0coEYoWlrJq59smcdsDp+5w2YtjSFAP2
TtSwZdswtvYVUYniC2gcAs483jOmhXamV5rSEIuONdSanMP0gntkyM0kS0tkSKE+Lf94svRkMgcU
gKkw1bQiye9N78pIKI92hrOJExOlCInPrITEpkHjEyc24ienNah/vK2k/tL5IzK5z9R2kBhQHegk
DurkYUmxGAZmJS87Ueqcqc+f0w5IN5EiG+nEbXWBwUoF5QMlLTGU008sOxpqKzYGzdREN5xQMHkh
3mDyCLZjo1LjAy02ekgHT9P90kTD+SOXnfImrvT+yFG/Mzucbzu+qdNf0VWxQ9J39kMJFka6dTMa
yqt3FfmVJzaxd/dU+XszfPYkBKnIKGnWsbh1NNs2px2Qbt6eqsJEKeZWcRJZsHmH/sTRQ8X0DT8+
z5E3GHZVJJpN9lphSNiWhbCiNi0SONTjYyY7pCF8NNOOxBgqUYQaCw/12p7OiamjcqQ0H02sQciQ
UwSf5p99caf4/CILrw6V+pWYJRAWoXooN5wKn5sWzZxzwGmicVrspig3EsYRLH9YoHsql04lORlt
xBkWgUHF6UqocwqpWOfN9r3aw9TMLTO9l/yUvxviL8glwyoWPxhPNVxNT06IaZbx2Jmn/8dYVX1r
tYfPt2fwqY2T+FWaz46D8ISj23xMK3RWbY4houOjy6YlIzdCM565gvf07auHrGNxVlx9So6dVwxD
5DICigtaKnprq8E2/r4sv/JMv+8UYzg1UaJ8KnvRLHHW8URSYhqEHy/3R+U9FXb7qqw47/YxZVci
6c1KDExSkvAoIuQzbXPKAUXCNXN/WWrR5pjt6pldJJWAVZsj9spBFwNTkbwiw6K/acnarClnRHdV
oq1dhvXl852gv9+NfvJ0q75Yjk2hMk0R9xQsSh6wxcYfRnF6v6GK+vtlWb5haYpdUgnwvdm6eVS0
UWW8i6jkZtkJ55QDTi8n7Y4Fc2ekFDvOllf6fnjlVGB9bmEWbF6TDSaEGKr5b1QOP3N1yvxC6Mnr
uULl6fo6OU3/9KbdU/zzlMfmFDtixKyG8sFxV/rLc/zyh4rh98xZcg56sH2tG5wAYWflU2KbUw44
zatXoH2Np8YM+6SmEcYS6EqR9htbU7CAziYLGwYi2R8qr8sMf1KFurWiEKWfwYfJpJXxh5z4Rza6
GQT3Kqoja/eFHATN/+zCvPX5bR5vDiDHZwPOpA6T8teikbPwGdM253LAslLojknsZ+DJJKFA4IYa
0WuIjSc28zfQPsb+AdfvsFU/KQPVAzU2DY9/ukn5tHM8Fd/lSVFE/DfWUT6L3ufRivp6VwafPyMn
/mlHTV1mzVI1LBCTij6JpMiM2JxwwMf1kRXRtakZOhZIA0RpjB9n4f/bXEr9n7F1ntg07H5jOADG
SiShEMOonoumhKYyZkrPY494utLRzKQ7EoTrVqTF3w3Uw8sIWjUbTM48WdgfV4fNrWfBnvcOOD2t
oIqBNHNXEKfxjHKlxIpPEmpsKpD+wbHInUDwD/1JeUNC1plDcrFPz+QhAs5pqYv4SGZPQAJP31zq
7bWwWGznaBeD2BiGKuqyrmZ2Hxg/fTSS68Qs5IL085rTeiqziO2fEw5INzJPVUAIlMKZin6PN2pK
r84EB+w0Fi+qMKxxYsULko8lxxdP580SU0lUI6pXHsTvsTMBLRzJAUmoUconr+ylJkaX969SMjg2
K75x56Q6M/VsfvAnseCwyc1sueDz/whmcbuEkC0Efw9mKSeRepGI7YwicUyBh3CIc5CwelbcEH6m
N4D4BweUwkgg9epuH61dHiUvNJ7i7IvAFfvr4bdOyhofWzdJUCl5lDP72ZuRPDAv6COYUD8ToULB
iMURZ8NIRI8zsYODv/5AoAqCyeJMEN2aLKYn3kdL60SQPh3fnkVIIV6bnVPqK2c24mMtDnvffhff
j+UoZj5OTa8WWM/iIXwymxMOSP8gAWrlx+I2s2Fc63aoDUTf1ijUIjBVnAlfZ4lCJwE/p1U7Z8Iq
Uo3Uomjk5Abx+T1u9H17ljg62GETmtmIgnOiCiafqymGRhGLqsyGD7KYMWEf/bvNWJcEe2SmqL6d
ZJVpZAaRxD4UHqlE//ryJval3LDRLRD1zhZAQSAmb58NmxMOGANKFRaKWFlpNi4F13JefJz+fTJi
rVrXZKbem8VUHac5mDHaDFpuSkn8rMGWX1qTEf/wSFV+wFazR+Hm6V6omvE93jnhgAZi1aMxxNi8
2eoKSGA8TzK1JmtLW4kC5AwYMWbRPslkHZgBFLs2epshyXqLofRPbGbv31VlH8gKOSsxkD6rSQuJ
x4TqM1l1zxmCyjoYSAXcThqkM2+6OChTnmZGjMgnZ8zRtWpTpGLd4hlEE5O+cLGGn69oVBd39ImL
Ail/MVvzYZrOlDU+Z2arwLnhgCxWN5/wJFKz5IDT0wSHqRoYb63N4HVWpEIrmO41zmQ+RVIRm4vs
J2vb5MXLmtnf3zGBX7TNEk4wTJrodjBzaQTm0iyYmqItjtBbW7PRitHTCa5Q81l1wpOFEDMJSVf6
++cFg2MoLUcxE5bTzWt+60A1qlwwLzpj0wRbvC2U+1Kz1LSzEkd0ZvA950wVTNc0S71ALaM/O/0u
8rhSoKaaHN7RmqK1w5l7/7Rg6HcVpgI2c47N9DEsH5nk175hgXz3+S1m76bhYMmEkr0z9AlH/lim
jrDa/8xsTjgg7SkQG/11pQjGLFXB05JiL0mp6mgAdtVEFOMPZ+j9yZcXGAyn5qiHN0Nviphbesyz
Mz/bh50Ow8TrMtbfj0bhB2dLLY6+eotejo6eEsr7j9ncOIKTbbHOtECzwXRFPFtGwjDLhWo5oYVp
FtWZ+iRLAGMecKCuntFc+WhGVMFTUt69x1ffaUmHtzUJs7kztDQf4WwYIaVbaGyJaEbKkTnhgGEy
Pz2vwHBcmqM4SwhJQn5sLyFyTDSe3iK0cOBM3UaHqC/qCuM9EaZkTHUxE0bN50EE36qBgRQ+fanG
CdBqzNJDSm0YK0HKvGCO4OmciUiAKjJCbYaS+CcaOWCjxXhvDd5de5Sm1Z2pT1IJcSV16pqI/nKG
IGU0A+bq8d7AZhE8MNPvPUeKEKa7zyaP1yjNWXq6yQEFhxISZiGMtMzDTDqgIrJIh4F5TG++zcTF
V7PscLNtc6YKhm5jAE3m7F1sOlbKHsKUiXyXw2d87ETuTNjCvgloIe85LfAxQzZHGtFKw92GphgK
LtM51GyY1FUlikoo857JGEo1G06SZUDHNCvr7Pwoc8bmxiyYMS1AfWsxwr1Pg2ng6RpxypydFaW1
bUyUfIb8EfZzZ8KI9aBWiTkBwxcqUXdic6YNQ2OszjTDQosa0bPyEXr2m3aYO9+SuD2S+aEAUzNV
rR5utNFHbAmNyb5I7QXshHNmFEdA1NUOw1kZjvFZIs6R8civvyml0G4ZN149EZw9mzuxOcbwioR9
9GiaI3/uNjfaMCwuEHZMKaia1Nq+s2EUZVflkd0whp12YK491zbu3u7Vz+NPjdrlaRs11Ik5YTlj
Gm/3QoyCcyYC+tQAtYD5KYbiTIHqnmB6A4wzd8skvojIu3a+bd88GbCFrsTe2aJIGVZKD/fbEtDn
C83mzGI69QAHPIWtxL03a5M4LdV6hcWF12yq6lRYP+dhqcRszVW1Maap5Ra+EB0QwP8HOr8RIsI4
uEYAAAAASUVORK5CYII=
"""
)
)
)
if 小强.mode != "RGBA":
小强 = 小强.convert("RGBA")
def 来只小强():
a = int(random.random() * 160)
a = a > 32 and a or 32
新小强 = 小强.resize((a, a))
return 新小强.rotate(random.randint(0, 12) * 30)
def get_avator(qq):
try:
return Image.open(
BytesIO(
httpx.get(
f"http://q1.qlogo.cn/g?b=qq&nk={qq}&s=640", timeout=10
).content
)
)
except Exception:
return
@deco.ignore_botself
@deco.on_regexp(r"来点小强(\d{0,})")
def receive_group_msg(ctx: GroupMsg):
at_data = gp.at(ctx)
if at_data:
user = at_data.UserID[0]
else:
user = ctx.FromUserId
avatar = get_avator(user)
if not avatar:
return
if avatar.mode != "RGBA":
avatar = avatar.convert("RGBA")
avatar_width, avatar_height = avatar.size
try:
num = int(ctx._match.group(1))
except Exception:
num = random.randint(1, 6)
if num < 0:
S.text('sb')
return
if num > 119:
S.text('强子过多, 呼叫119吧')
return
小强们 = [来只小强() for _ in range(num)]
for 一只小强 in 小强们:
x_offset, y_offset = int(一只小强.width * 0.3), int(一只小强.height * 0.3)
x, y = random.randint(1, avatar_width - x_offset), random.randint(
1, avatar_height - y_offset
)
avatar.paste(一只小强, (x, y), 一只小强)
buf = BytesIO()
avatar.save(buf, "png")
S.image(buf)
avatar.close()
| 1,395 | 0 | 68 |
7cef8e80ca6b2bbedf08d6f5a3faf512fb91050f | 777 | py | Python | pylabnet/hardware/interface/simple_p_gen.py | wi11dey/pylabnet | a6e3362f727c45aaa60e61496e858ae92e85574d | [
"MIT"
] | 10 | 2020-01-07T23:28:49.000Z | 2022-02-02T19:09:17.000Z | pylabnet/hardware/interface/simple_p_gen.py | wi11dey/pylabnet | a6e3362f727c45aaa60e61496e858ae92e85574d | [
"MIT"
] | 249 | 2019-12-28T19:38:49.000Z | 2022-03-28T16:45:32.000Z | pylabnet/hardware/interface/simple_p_gen.py | wi11dey/pylabnet | a6e3362f727c45aaa60e61496e858ae92e85574d | [
"MIT"
] | 5 | 2020-11-17T19:45:10.000Z | 2022-01-04T18:07:04.000Z | import abc
| 18.5 | 59 | 0.599743 | import abc
class SimplePGenInterface(abc.ABC):
@abc.abstractmethod
def activate_interface(self):
pass
@abc.abstractmethod
def write(self, pb_obj, step_adj=True):
pass
@abc.abstractmethod
def set_rep(self, rep_num):
pass
@abc.abstractmethod
def start(self):
pass
@abc.abstractmethod
def stop(self):
pass
@abc.abstractmethod
def get_status(self):
"""Get status of the device
0 - 'Idle'
1 - 'Running'
Exception is produced in the case of any error
(for example, connection to the device is lost)
:return: (int) status code
Exception is produced in the case of error
"""
class PGenError(Exception):
pass
| 86 | 632 | 46 |
17cc2151cef7cb62503e1ee06869d1ee88e9b6bb | 1,113 | py | Python | docs/_examples/mesh-smoothing.py | yijiangh/compas | a9e86edf6b602f47ca051fccedcaa88a5e5d3600 | [
"MIT"
] | 1 | 2019-03-27T22:32:56.000Z | 2019-03-27T22:32:56.000Z | docs/_examples/mesh-smoothing.py | yijiangh/compas | a9e86edf6b602f47ca051fccedcaa88a5e5d3600 | [
"MIT"
] | null | null | null | docs/_examples/mesh-smoothing.py | yijiangh/compas | a9e86edf6b602f47ca051fccedcaa88a5e5d3600 | [
"MIT"
] | null | null | null | """Smooth a mesh.
"""
from compas.datastructures import Mesh
from compas.geometry import smooth_area
import compas_rhino
__author__ = ['Tom Van Mele', 'Matthias Rippmann']
__copyright__ = 'Copyright 2017, BRG - ETH Zurich',
__license__ = 'MIT'
__email__ = 'van.mele@arch.ethz.ch'
# select a Rhino mesh
# and make it into a mesh datastructure
guid = compas_rhino.select_mesh()
mesh = compas_rhino.mesh_from_guid(Mesh, guid)
# extract the data needed by the smoothing algorithm
# identify the boundary as fixed
vertices = mesh.get_vertices_attributes('xyz')
faces = [mesh.face_vertices(fkey) for fkey in mesh.faces()]
adjacency = [mesh.vertex_faces(key, ordered=True) for key in mesh.vertices()]
fixed = mesh.vertices_on_boundary()
# run the smoothing algorithm
# update the mesh
# display the result in Rhino
smooth_area(vertices, faces, adjacency, fixed=fixed, kmax=100)
for key, attr in mesh.vertices(True):
attr['x'] = vertices[key][0]
attr['y'] = vertices[key][1]
attr['z'] = vertices[key][2]
compas_rhino.mesh_draw(mesh)
| 25.295455 | 78 | 0.698113 | """Smooth a mesh.
"""
from compas.datastructures import Mesh
from compas.geometry import smooth_area
import compas_rhino
__author__ = ['Tom Van Mele', 'Matthias Rippmann']
__copyright__ = 'Copyright 2017, BRG - ETH Zurich',
__license__ = 'MIT'
__email__ = 'van.mele@arch.ethz.ch'
# select a Rhino mesh
# and make it into a mesh datastructure
guid = compas_rhino.select_mesh()
mesh = compas_rhino.mesh_from_guid(Mesh, guid)
# extract the data needed by the smoothing algorithm
# identify the boundary as fixed
vertices = mesh.get_vertices_attributes('xyz')
faces = [mesh.face_vertices(fkey) for fkey in mesh.faces()]
adjacency = [mesh.vertex_faces(key, ordered=True) for key in mesh.vertices()]
fixed = mesh.vertices_on_boundary()
# run the smoothing algorithm
# update the mesh
# display the result in Rhino
smooth_area(vertices, faces, adjacency, fixed=fixed, kmax=100)
for key, attr in mesh.vertices(True):
attr['x'] = vertices[key][0]
attr['y'] = vertices[key][1]
attr['z'] = vertices[key][2]
compas_rhino.mesh_draw(mesh)
| 0 | 0 | 0 |
e5c22083523c248ad1b2c6de86eb7566e358c791 | 7,922 | py | Python | tests/managers/event_log_tests.py | dvzrv/softlayer-python | 9a5f6c6981bcc370084537b4d1769383499ce90d | [
"MIT"
] | 126 | 2015-01-05T05:09:22.000Z | 2021-07-02T00:16:35.000Z | tests/managers/event_log_tests.py | dvzrv/softlayer-python | 9a5f6c6981bcc370084537b4d1769383499ce90d | [
"MIT"
] | 969 | 2015-01-05T15:55:31.000Z | 2022-03-31T19:55:20.000Z | tests/managers/event_log_tests.py | dvzrv/softlayer-python | 9a5f6c6981bcc370084537b4d1769383499ce90d | [
"MIT"
] | 176 | 2015-01-22T11:23:40.000Z | 2022-02-11T13:16:58.000Z | """
SoftLayer.tests.managers.event_log_tests
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:license: MIT, see LICENSE for more details.
"""
import SoftLayer
from SoftLayer import fixtures
from SoftLayer import testing
| 31.188976 | 99 | 0.454304 | """
SoftLayer.tests.managers.event_log_tests
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:license: MIT, see LICENSE for more details.
"""
import SoftLayer
from SoftLayer import fixtures
from SoftLayer import testing
class EventLogTests(testing.TestCase):
def set_up(self):
self.event_log = SoftLayer.EventLogManager(self.client)
def test_get_event_logs(self):
# Cast to list to force generator to get all objects
result = list(self.event_log.get_event_logs())
expected = fixtures.SoftLayer_Event_Log.getAllObjects
self.assertEqual(expected, result)
self.assert_called_with('SoftLayer_Event_Log', 'getAllObjects')
def test_get_event_logs_no_iteration(self):
# Cast to list to force generator to get all objects
result = self.event_log.get_event_logs(iterator=False)
expected = fixtures.SoftLayer_Event_Log.getAllObjects
self.assertEqual(expected, result)
self.assert_called_with('SoftLayer_Event_Log', 'getAllObjects')
def test_get_event_log_types(self):
result = self.event_log.get_event_log_types()
expected = fixtures.SoftLayer_Event_Log.getAllEventObjectNames
self.assertEqual(expected, result)
self.assert_called_with('SoftLayer_Event_Log', 'getAllEventObjectNames')
def test_build_filter_no_args(self):
result = self.event_log.build_filter(None, None, None, None, None, None)
self.assertEqual(result, {})
def test_build_filter_min_date(self):
expected = {
'eventCreateDate': {
'operation': 'greaterThanDate',
'options': [
{
'name': 'date',
'value': [
'2017-10-30T00:00:00.000000+00:00'
]
}
]
}
}
result = self.event_log.build_filter('10/30/2017', None, None, None, None, None)
self.assertEqual(expected, result)
def test_build_filter_max_date(self):
expected = {
'eventCreateDate': {
'operation': 'lessThanDate',
'options': [
{
'name': 'date',
'value': [
'2017-10-31T00:00:00.000000+00:00'
]
}
]
}
}
result = self.event_log.build_filter(None, '10/31/2017', None, None, None, None)
self.assertEqual(expected, result)
def test_build_filter_min_max_date(self):
expected = {
'eventCreateDate': {
'operation': 'betweenDate',
'options': [
{
'name': 'startDate',
'value': [
'2017-10-30T00:00:00.000000+00:00'
]
},
{
'name': 'endDate',
'value': [
'2017-10-31T00:00:00.000000+00:00'
]
}
]
}
}
result = self.event_log.build_filter('10/30/2017', '10/31/2017', None, None, None, None)
self.assertEqual(expected, result)
def test_build_filter_min_date_pos_utc(self):
expected = {
'eventCreateDate': {
'operation': 'greaterThanDate',
'options': [
{
'name': 'date',
'value': [
'2017-10-30T00:00:00.000000+05:00'
]
}
]
}
}
result = self.event_log.build_filter('10/30/2017', None, None, None, None, '+0500')
self.assertEqual(expected, result)
def test_build_filter_max_date_pos_utc(self):
expected = {
'eventCreateDate': {
'operation': 'lessThanDate',
'options': [
{
'name': 'date',
'value': [
'2017-10-31T00:00:00.000000+05:00'
]
}
]
}
}
result = self.event_log.build_filter(None, '10/31/2017', None, None, None, '+0500')
self.assertEqual(expected, result)
def test_build_filter_min_max_date_pos_utc(self):
expected = {
'eventCreateDate': {
'operation': 'betweenDate',
'options': [
{
'name': 'startDate',
'value': [
'2017-10-30T00:00:00.000000+05:00'
]
},
{
'name': 'endDate',
'value': [
'2017-10-31T00:00:00.000000+05:00'
]
}
]
}
}
result = self.event_log.build_filter('10/30/2017', '10/31/2017', None, None, None, '+0500')
self.assertEqual(expected, result)
def test_build_filter_min_date_neg_utc(self):
expected = {
'eventCreateDate': {
'operation': 'greaterThanDate',
'options': [
{
'name': 'date',
'value': [
'2017-10-30T00:00:00.000000-03:00'
]
}
]
}
}
result = self.event_log.build_filter('10/30/2017', None, None, None, None, '-0300')
self.assertEqual(expected, result)
def test_build_filter_max_date_neg_utc(self):
expected = {
'eventCreateDate': {
'operation': 'lessThanDate',
'options': [
{
'name': 'date',
'value': [
'2017-10-31T00:00:00.000000-03:00'
]
}
]
}
}
result = self.event_log.build_filter(None, '10/31/2017', None, None, None, '-0300')
self.assertEqual(expected, result)
def test_build_filter_min_max_date_neg_utc(self):
expected = {
'eventCreateDate': {
'operation': 'betweenDate',
'options': [
{
'name': 'startDate',
'value': [
'2017-10-30T00:00:00.000000-03:00'
]
},
{
'name': 'endDate',
'value': [
'2017-10-31T00:00:00.000000-03:00'
]
}
]
}
}
result = self.event_log.build_filter('10/30/2017', '10/31/2017', None, None, None, '-0300')
self.assertEqual(expected, result)
def test_build_filter_name(self):
expected = {'eventName': {'operation': 'Add Security Group'}}
result = self.event_log.build_filter(None, None, 'Add Security Group', None, None, None)
self.assertEqual(expected, result)
def test_build_filter_id(self):
expected = {'objectId': {'operation': 1}}
result = self.event_log.build_filter(None, None, None, 1, None, None)
self.assertEqual(expected, result)
def test_build_filter_type(self):
expected = {'objectName': {'operation': 'CCI'}}
result = self.event_log.build_filter(None, None, None, None, 'CCI', None)
self.assertEqual(expected, result)
| 7,198 | 17 | 482 |
96b5c9d649f8f78abe33dffa08a7507737a3b4c1 | 397 | py | Python | sensors/MercurySwitchModule.py | akesiraju/raspberrypi | e8ae5e535a9953631ffa2d1e7de926c9dc19f961 | [
"MIT"
] | 2 | 2019-03-26T23:47:40.000Z | 2020-03-28T03:23:31.000Z | sensors/MercurySwitchModule.py | akesiraju/raspberrypi | e8ae5e535a9953631ffa2d1e7de926c9dc19f961 | [
"MIT"
] | 1 | 2019-03-27T10:59:14.000Z | 2019-03-27T10:59:14.000Z | sensors/MercurySwitchModule.py | akesiraju/raspberrypi | e8ae5e535a9953631ffa2d1e7de926c9dc19f961 | [
"MIT"
] | 1 | 2018-07-14T23:55:14.000Z | 2018-07-14T23:55:14.000Z | import RPi.GPIO as GPIO
import time
#When the mercury is contact, we get a signal on 21
pin = 21
light = 20
GPIO.setmode(GPIO.BCM)
GPIO.setup(pin, GPIO.IN)
GPIO.setup(light, GPIO.OUT)
while True:
if GPIO.input(pin):
print("contact")
GPIO.output(light, GPIO.HIGH)
else:
print("no contact")
GPIO.output(light, GPIO.LOW)
time.sleep(0.3)
GPIO.cleanup() | 18.045455 | 51 | 0.647355 | import RPi.GPIO as GPIO
import time
#When the mercury is contact, we get a signal on 21
pin = 21
light = 20
GPIO.setmode(GPIO.BCM)
GPIO.setup(pin, GPIO.IN)
GPIO.setup(light, GPIO.OUT)
while True:
if GPIO.input(pin):
print("contact")
GPIO.output(light, GPIO.HIGH)
else:
print("no contact")
GPIO.output(light, GPIO.LOW)
time.sleep(0.3)
GPIO.cleanup() | 0 | 0 | 0 |
d2714d988a8b9f099d118a01f597d6b3479b2a08 | 192 | py | Python | src/cmp/cool_lang/ast/integer_node.py | codestrange/cool-compiler-2020 | 30508965d75a1a1d1362d0b51bef8da3978fd0c2 | [
"MIT"
] | 3 | 2020-01-14T04:47:32.000Z | 2020-09-10T17:57:20.000Z | src/cmp/cool_lang/ast/integer_node.py | codestrange/cool-compiler-2020 | 30508965d75a1a1d1362d0b51bef8da3978fd0c2 | [
"MIT"
] | 5 | 2020-01-14T06:06:35.000Z | 2020-02-19T01:01:33.000Z | src/cmp/cool_lang/ast/integer_node.py | codestrange/cool-compiler-2020 | 30508965d75a1a1d1362d0b51bef8da3978fd0c2 | [
"MIT"
] | 3 | 2020-01-14T04:58:24.000Z | 2020-01-14T16:23:41.000Z | from .atomic_node import AtomicNode
| 27.428571 | 62 | 0.723958 | from .atomic_node import AtomicNode
class IntegerNode(AtomicNode):
def __init__(self, token: str, line: int, column: int):
super(IntegerNode, self).__init__(token, line, column)
| 97 | 9 | 49 |
076d01cdee74927f3950a2f36d0c2aa2fc2d6a1b | 106 | py | Python | dexp/cli/defaults.py | haesleinhuepf/dexp | 2ea84f3db323724588fac565fae56f0d522bc5ca | [
"BSD-3-Clause"
] | 16 | 2021-04-21T14:09:19.000Z | 2022-03-22T02:30:59.000Z | dexp/cli/defaults.py | haesleinhuepf/dexp | 2ea84f3db323724588fac565fae56f0d522bc5ca | [
"BSD-3-Clause"
] | 28 | 2021-04-15T17:43:08.000Z | 2022-03-29T16:08:35.000Z | dexp/cli/defaults.py | haesleinhuepf/dexp | 2ea84f3db323724588fac565fae56f0d522bc5ca | [
"BSD-3-Clause"
] | 3 | 2022-02-08T17:41:30.000Z | 2022-03-18T15:32:27.000Z | _default_store = "dir"
_default_clevel = 3
_default_codec = "zstd"
_default_workers_backend = "threading"
| 21.2 | 38 | 0.792453 | _default_store = "dir"
_default_clevel = 3
_default_codec = "zstd"
_default_workers_backend = "threading"
| 0 | 0 | 0 |
130a684460802de83781936238e59e5a10674077 | 110 | py | Python | docker/template/handlers/birthday.py | izi-global/izir | d1a4bfb5c082c3de1956402ef0280564014a3bd8 | [
"MIT"
] | null | null | null | docker/template/handlers/birthday.py | izi-global/izir | d1a4bfb5c082c3de1956402ef0280564014a3bd8 | [
"MIT"
] | 5 | 2021-03-18T21:01:05.000Z | 2022-03-11T23:29:48.000Z | docker/template/handlers/birthday.py | izi-global/izir | d1a4bfb5c082c3de1956402ef0280564014a3bd8 | [
"MIT"
] | null | null | null | import izi
@izi.get("/birthday")
| 15.714286 | 53 | 0.672727 | import izi
@izi.get("/birthday")
def home(name: str):
return "Happy Birthday, {name}".format(name=name)
| 53 | 0 | 22 |
b42e01b497eed8a4616e56359654df96530b2429 | 37,311 | py | Python | feersum_nlu/api/regex_entity_extractors_api.py | praekelt/feersum-nlu-api-wrappers | 6580e2bab2c8a764fe868a505330b3fee6029074 | [
"BSD-3-Clause"
] | 9 | 2017-10-10T12:24:23.000Z | 2021-08-18T14:07:51.000Z | feersum_nlu/api/regex_entity_extractors_api.py | praekelt/feersum-nlu-api-wrappers | 6580e2bab2c8a764fe868a505330b3fee6029074 | [
"BSD-3-Clause"
] | 1 | 2020-12-06T11:03:25.000Z | 2021-04-14T05:21:23.000Z | feersum_nlu/api/regex_entity_extractors_api.py | praekelt/feersum-nlu-api-wrappers | 6580e2bab2c8a764fe868a505330b3fee6029074 | [
"BSD-3-Clause"
] | 2 | 2019-02-12T08:26:06.000Z | 2022-02-01T09:39:47.000Z | # coding: utf-8
"""
FeersumNLU API
This is the HTTP API for Feersum NLU. See https://github.com/praekelt/feersum-nlu-api-wrappers for examples of how to use the API. # noqa: E501
OpenAPI spec version: 2.0.54.dev2
Contact: nlu@feersum.io
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from feersum_nlu.api_client import ApiClient
class RegexEntityExtractorsApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def regex_entity_extractor_create(self, create_details, **kwargs): # noqa: E501
"""Create a regular expression entity extractor. # noqa: E501
Create a new regular expression entity extractor or reload one from the trash. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.regex_entity_extractor_create(create_details, async_req=True)
>>> result = thread.get()
:param async_req bool
:param RegexEntityExtractorCreateDetails create_details: The details of the instance to create. (required)
:param str x_caller:
:return: RegexEntityExtractorInstanceDetail
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.regex_entity_extractor_create_with_http_info(create_details, **kwargs) # noqa: E501
else:
(data) = self.regex_entity_extractor_create_with_http_info(create_details, **kwargs) # noqa: E501
return data
def regex_entity_extractor_create_with_http_info(self, create_details, **kwargs): # noqa: E501
"""Create a regular expression entity extractor. # noqa: E501
Create a new regular expression entity extractor or reload one from the trash. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.regex_entity_extractor_create_with_http_info(create_details, async_req=True)
>>> result = thread.get()
:param async_req bool
:param RegexEntityExtractorCreateDetails create_details: The details of the instance to create. (required)
:param str x_caller:
:return: RegexEntityExtractorInstanceDetail
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['create_details', 'x_caller'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method regex_entity_extractor_create" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'create_details' is set
if ('create_details' not in params or
params['create_details'] is None):
raise ValueError("Missing the required parameter `create_details` when calling `regex_entity_extractor_create`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
if 'x_caller' in params:
header_params['X-CALLER'] = params['x_caller'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
if 'create_details' in params:
body_params = params['create_details']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['APIKeyHeader', 'APIKeyHeader_old'] # noqa: E501
return self.api_client.call_api(
'/nlu/v2/regex_entity_extractors', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='RegexEntityExtractorInstanceDetail', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def regex_entity_extractor_del(self, instance_name, **kwargs): # noqa: E501
"""Delete named instance. # noqa: E501
Delete and get the details of the named regular expression entity extractor instance. Deleted models can be reloaded from the trash with the create operation. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.regex_entity_extractor_del(instance_name, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str instance_name: The name of the instance. (required)
:param str x_caller:
:return: RegexEntityExtractorInstanceDetail
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.regex_entity_extractor_del_with_http_info(instance_name, **kwargs) # noqa: E501
else:
(data) = self.regex_entity_extractor_del_with_http_info(instance_name, **kwargs) # noqa: E501
return data
def regex_entity_extractor_del_with_http_info(self, instance_name, **kwargs): # noqa: E501
"""Delete named instance. # noqa: E501
Delete and get the details of the named regular expression entity extractor instance. Deleted models can be reloaded from the trash with the create operation. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.regex_entity_extractor_del_with_http_info(instance_name, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str instance_name: The name of the instance. (required)
:param str x_caller:
:return: RegexEntityExtractorInstanceDetail
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['instance_name', 'x_caller'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method regex_entity_extractor_del" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'instance_name' is set
if ('instance_name' not in params or
params['instance_name'] is None):
raise ValueError("Missing the required parameter `instance_name` when calling `regex_entity_extractor_del`") # noqa: E501
collection_formats = {}
path_params = {}
if 'instance_name' in params:
path_params['instance_name'] = params['instance_name'] # noqa: E501
query_params = []
header_params = {}
if 'x_caller' in params:
header_params['X-CALLER'] = params['x_caller'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['APIKeyHeader', 'APIKeyHeader_old'] # noqa: E501
return self.api_client.call_api(
'/nlu/v2/regex_entity_extractors/{instance_name}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='RegexEntityExtractorInstanceDetail', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def regex_entity_extractor_get_details(self, instance_name, **kwargs): # noqa: E501
"""Get details of named instance. # noqa: E501
Get the details of the named regular expression entity extractor instance. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.regex_entity_extractor_get_details(instance_name, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str instance_name: The name of the instance. (required)
:param str x_caller:
:return: RegexEntityExtractorInstanceDetail
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.regex_entity_extractor_get_details_with_http_info(instance_name, **kwargs) # noqa: E501
else:
(data) = self.regex_entity_extractor_get_details_with_http_info(instance_name, **kwargs) # noqa: E501
return data
def regex_entity_extractor_get_details_with_http_info(self, instance_name, **kwargs): # noqa: E501
"""Get details of named instance. # noqa: E501
Get the details of the named regular expression entity extractor instance. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.regex_entity_extractor_get_details_with_http_info(instance_name, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str instance_name: The name of the instance. (required)
:param str x_caller:
:return: RegexEntityExtractorInstanceDetail
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['instance_name', 'x_caller'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method regex_entity_extractor_get_details" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'instance_name' is set
if ('instance_name' not in params or
params['instance_name'] is None):
raise ValueError("Missing the required parameter `instance_name` when calling `regex_entity_extractor_get_details`") # noqa: E501
collection_formats = {}
path_params = {}
if 'instance_name' in params:
path_params['instance_name'] = params['instance_name'] # noqa: E501
query_params = []
header_params = {}
if 'x_caller' in params:
header_params['X-CALLER'] = params['x_caller'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['APIKeyHeader', 'APIKeyHeader_old'] # noqa: E501
return self.api_client.call_api(
'/nlu/v2/regex_entity_extractors/{instance_name}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='RegexEntityExtractorInstanceDetail', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def regex_entity_extractor_get_details_all(self, **kwargs): # noqa: E501
"""Get list of regular expression entity extractors. # noqa: E501
Get the list of regular expression entity extractors. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.regex_entity_extractor_get_details_all(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str x_caller:
:return: list[RegexEntityExtractorInstanceDetail]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.regex_entity_extractor_get_details_all_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.regex_entity_extractor_get_details_all_with_http_info(**kwargs) # noqa: E501
return data
def regex_entity_extractor_get_details_all_with_http_info(self, **kwargs): # noqa: E501
"""Get list of regular expression entity extractors. # noqa: E501
Get the list of regular expression entity extractors. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.regex_entity_extractor_get_details_all_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str x_caller:
:return: list[RegexEntityExtractorInstanceDetail]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['x_caller'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method regex_entity_extractor_get_details_all" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
if 'x_caller' in params:
header_params['X-CALLER'] = params['x_caller'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['APIKeyHeader', 'APIKeyHeader_old'] # noqa: E501
return self.api_client.call_api(
'/nlu/v2/regex_entity_extractors', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[RegexEntityExtractorInstanceDetail]', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def regex_entity_extractor_get_params(self, instance_name, **kwargs): # noqa: E501
"""Get the editable model parameters of named regex entity extractor. # noqa: E501
Get the editable model parameters of named regex entity extractor. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.regex_entity_extractor_get_params(instance_name, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str instance_name: The name of the instance. (required)
:param str x_caller:
:return: ModelParams
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.regex_entity_extractor_get_params_with_http_info(instance_name, **kwargs) # noqa: E501
else:
(data) = self.regex_entity_extractor_get_params_with_http_info(instance_name, **kwargs) # noqa: E501
return data
def regex_entity_extractor_get_params_with_http_info(self, instance_name, **kwargs): # noqa: E501
"""Get the editable model parameters of named regex entity extractor. # noqa: E501
Get the editable model parameters of named regex entity extractor. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.regex_entity_extractor_get_params_with_http_info(instance_name, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str instance_name: The name of the instance. (required)
:param str x_caller:
:return: ModelParams
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['instance_name', 'x_caller'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method regex_entity_extractor_get_params" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'instance_name' is set
if ('instance_name' not in params or
params['instance_name'] is None):
raise ValueError("Missing the required parameter `instance_name` when calling `regex_entity_extractor_get_params`") # noqa: E501
collection_formats = {}
path_params = {}
if 'instance_name' in params:
path_params['instance_name'] = params['instance_name'] # noqa: E501
query_params = []
header_params = {}
if 'x_caller' in params:
header_params['X-CALLER'] = params['x_caller'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['APIKeyHeader', 'APIKeyHeader_old'] # noqa: E501
return self.api_client.call_api(
'/nlu/v2/regex_entity_extractors/{instance_name}/params', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ModelParams', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def regex_entity_extractor_retrieve(self, instance_name, text_input, **kwargs): # noqa: E501
"""Extract information based on the regular expression. # noqa: E501
Extract the entities matching the regular expression. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.regex_entity_extractor_retrieve(instance_name, text_input, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str instance_name: The name of the instance. (required)
:param TextInput text_input: The input text. (required)
:param str x_caller:
:return: list[RegexEntity]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.regex_entity_extractor_retrieve_with_http_info(instance_name, text_input, **kwargs) # noqa: E501
else:
(data) = self.regex_entity_extractor_retrieve_with_http_info(instance_name, text_input, **kwargs) # noqa: E501
return data
def regex_entity_extractor_retrieve_with_http_info(self, instance_name, text_input, **kwargs): # noqa: E501
"""Extract information based on the regular expression. # noqa: E501
Extract the entities matching the regular expression. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.regex_entity_extractor_retrieve_with_http_info(instance_name, text_input, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str instance_name: The name of the instance. (required)
:param TextInput text_input: The input text. (required)
:param str x_caller:
:return: list[RegexEntity]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['instance_name', 'text_input', 'x_caller'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method regex_entity_extractor_retrieve" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'instance_name' is set
if ('instance_name' not in params or
params['instance_name'] is None):
raise ValueError("Missing the required parameter `instance_name` when calling `regex_entity_extractor_retrieve`") # noqa: E501
# verify the required parameter 'text_input' is set
if ('text_input' not in params or
params['text_input'] is None):
raise ValueError("Missing the required parameter `text_input` when calling `regex_entity_extractor_retrieve`") # noqa: E501
collection_formats = {}
path_params = {}
if 'instance_name' in params:
path_params['instance_name'] = params['instance_name'] # noqa: E501
query_params = []
header_params = {}
if 'x_caller' in params:
header_params['X-CALLER'] = params['x_caller'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
if 'text_input' in params:
body_params = params['text_input']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['APIKeyHeader', 'APIKeyHeader_old'] # noqa: E501
return self.api_client.call_api(
'/nlu/v2/regex_entity_extractors/{instance_name}/retrieve', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[RegexEntity]', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def regex_entity_extractor_set_params(self, instance_name, model_params, **kwargs): # noqa: E501
"""Set the model parameters of named regex entity extractor. # noqa: E501
Set the model parameters of named regex entity extractor. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.regex_entity_extractor_set_params(instance_name, model_params, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str instance_name: The name of the instance. (required)
:param ModelParams model_params: The model parameters. (required)
:param str x_caller:
:return: RegexEntityExtractorInstanceDetail
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.regex_entity_extractor_set_params_with_http_info(instance_name, model_params, **kwargs) # noqa: E501
else:
(data) = self.regex_entity_extractor_set_params_with_http_info(instance_name, model_params, **kwargs) # noqa: E501
return data
def regex_entity_extractor_set_params_with_http_info(self, instance_name, model_params, **kwargs): # noqa: E501
"""Set the model parameters of named regex entity extractor. # noqa: E501
Set the model parameters of named regex entity extractor. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.regex_entity_extractor_set_params_with_http_info(instance_name, model_params, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str instance_name: The name of the instance. (required)
:param ModelParams model_params: The model parameters. (required)
:param str x_caller:
:return: RegexEntityExtractorInstanceDetail
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['instance_name', 'model_params', 'x_caller'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method regex_entity_extractor_set_params" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'instance_name' is set
if ('instance_name' not in params or
params['instance_name'] is None):
raise ValueError("Missing the required parameter `instance_name` when calling `regex_entity_extractor_set_params`") # noqa: E501
# verify the required parameter 'model_params' is set
if ('model_params' not in params or
params['model_params'] is None):
raise ValueError("Missing the required parameter `model_params` when calling `regex_entity_extractor_set_params`") # noqa: E501
collection_formats = {}
path_params = {}
if 'instance_name' in params:
path_params['instance_name'] = params['instance_name'] # noqa: E501
query_params = []
header_params = {}
if 'x_caller' in params:
header_params['X-CALLER'] = params['x_caller'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
if 'model_params' in params:
body_params = params['model_params']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['APIKeyHeader', 'APIKeyHeader_old'] # noqa: E501
return self.api_client.call_api(
'/nlu/v2/regex_entity_extractors/{instance_name}/params', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='RegexEntityExtractorInstanceDetail', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def regex_entity_extractor_vaporise(self, instance_name, **kwargs): # noqa: E501
"""Vaporise the named model. # noqa: E501
Permanently vaporises a model even if not trashed. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.regex_entity_extractor_vaporise(instance_name, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str instance_name: The name of the instance. (required)
:param str x_caller:
:return: RegexEntityExtractorInstanceDetail
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.regex_entity_extractor_vaporise_with_http_info(instance_name, **kwargs) # noqa: E501
else:
(data) = self.regex_entity_extractor_vaporise_with_http_info(instance_name, **kwargs) # noqa: E501
return data
def regex_entity_extractor_vaporise_with_http_info(self, instance_name, **kwargs): # noqa: E501
"""Vaporise the named model. # noqa: E501
Permanently vaporises a model even if not trashed. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.regex_entity_extractor_vaporise_with_http_info(instance_name, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str instance_name: The name of the instance. (required)
:param str x_caller:
:return: RegexEntityExtractorInstanceDetail
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['instance_name', 'x_caller'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method regex_entity_extractor_vaporise" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'instance_name' is set
if ('instance_name' not in params or
params['instance_name'] is None):
raise ValueError("Missing the required parameter `instance_name` when calling `regex_entity_extractor_vaporise`") # noqa: E501
collection_formats = {}
path_params = {}
if 'instance_name' in params:
path_params['instance_name'] = params['instance_name'] # noqa: E501
query_params = []
header_params = {}
if 'x_caller' in params:
header_params['X-CALLER'] = params['x_caller'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['APIKeyHeader', 'APIKeyHeader_old'] # noqa: E501
return self.api_client.call_api(
'/nlu/v2/regex_entity_extractors/{instance_name}/vaporise', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='RegexEntityExtractorInstanceDetail', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| 43.034602 | 180 | 0.637319 | # coding: utf-8
"""
FeersumNLU API
This is the HTTP API for Feersum NLU. See https://github.com/praekelt/feersum-nlu-api-wrappers for examples of how to use the API. # noqa: E501
OpenAPI spec version: 2.0.54.dev2
Contact: nlu@feersum.io
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from feersum_nlu.api_client import ApiClient
class RegexEntityExtractorsApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def regex_entity_extractor_create(self, create_details, **kwargs): # noqa: E501
"""Create a regular expression entity extractor. # noqa: E501
Create a new regular expression entity extractor or reload one from the trash. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.regex_entity_extractor_create(create_details, async_req=True)
>>> result = thread.get()
:param async_req bool
:param RegexEntityExtractorCreateDetails create_details: The details of the instance to create. (required)
:param str x_caller:
:return: RegexEntityExtractorInstanceDetail
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.regex_entity_extractor_create_with_http_info(create_details, **kwargs) # noqa: E501
else:
(data) = self.regex_entity_extractor_create_with_http_info(create_details, **kwargs) # noqa: E501
return data
def regex_entity_extractor_create_with_http_info(self, create_details, **kwargs): # noqa: E501
"""Create a regular expression entity extractor. # noqa: E501
Create a new regular expression entity extractor or reload one from the trash. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.regex_entity_extractor_create_with_http_info(create_details, async_req=True)
>>> result = thread.get()
:param async_req bool
:param RegexEntityExtractorCreateDetails create_details: The details of the instance to create. (required)
:param str x_caller:
:return: RegexEntityExtractorInstanceDetail
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['create_details', 'x_caller'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method regex_entity_extractor_create" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'create_details' is set
if ('create_details' not in params or
params['create_details'] is None):
raise ValueError("Missing the required parameter `create_details` when calling `regex_entity_extractor_create`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
if 'x_caller' in params:
header_params['X-CALLER'] = params['x_caller'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
if 'create_details' in params:
body_params = params['create_details']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['APIKeyHeader', 'APIKeyHeader_old'] # noqa: E501
return self.api_client.call_api(
'/nlu/v2/regex_entity_extractors', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='RegexEntityExtractorInstanceDetail', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def regex_entity_extractor_del(self, instance_name, **kwargs): # noqa: E501
"""Delete named instance. # noqa: E501
Delete and get the details of the named regular expression entity extractor instance. Deleted models can be reloaded from the trash with the create operation. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.regex_entity_extractor_del(instance_name, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str instance_name: The name of the instance. (required)
:param str x_caller:
:return: RegexEntityExtractorInstanceDetail
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.regex_entity_extractor_del_with_http_info(instance_name, **kwargs) # noqa: E501
else:
(data) = self.regex_entity_extractor_del_with_http_info(instance_name, **kwargs) # noqa: E501
return data
def regex_entity_extractor_del_with_http_info(self, instance_name, **kwargs): # noqa: E501
"""Delete named instance. # noqa: E501
Delete and get the details of the named regular expression entity extractor instance. Deleted models can be reloaded from the trash with the create operation. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.regex_entity_extractor_del_with_http_info(instance_name, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str instance_name: The name of the instance. (required)
:param str x_caller:
:return: RegexEntityExtractorInstanceDetail
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['instance_name', 'x_caller'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method regex_entity_extractor_del" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'instance_name' is set
if ('instance_name' not in params or
params['instance_name'] is None):
raise ValueError("Missing the required parameter `instance_name` when calling `regex_entity_extractor_del`") # noqa: E501
collection_formats = {}
path_params = {}
if 'instance_name' in params:
path_params['instance_name'] = params['instance_name'] # noqa: E501
query_params = []
header_params = {}
if 'x_caller' in params:
header_params['X-CALLER'] = params['x_caller'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['APIKeyHeader', 'APIKeyHeader_old'] # noqa: E501
return self.api_client.call_api(
'/nlu/v2/regex_entity_extractors/{instance_name}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='RegexEntityExtractorInstanceDetail', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def regex_entity_extractor_get_details(self, instance_name, **kwargs): # noqa: E501
"""Get details of named instance. # noqa: E501
Get the details of the named regular expression entity extractor instance. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.regex_entity_extractor_get_details(instance_name, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str instance_name: The name of the instance. (required)
:param str x_caller:
:return: RegexEntityExtractorInstanceDetail
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.regex_entity_extractor_get_details_with_http_info(instance_name, **kwargs) # noqa: E501
else:
(data) = self.regex_entity_extractor_get_details_with_http_info(instance_name, **kwargs) # noqa: E501
return data
def regex_entity_extractor_get_details_with_http_info(self, instance_name, **kwargs): # noqa: E501
"""Get details of named instance. # noqa: E501
Get the details of the named regular expression entity extractor instance. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.regex_entity_extractor_get_details_with_http_info(instance_name, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str instance_name: The name of the instance. (required)
:param str x_caller:
:return: RegexEntityExtractorInstanceDetail
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['instance_name', 'x_caller'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method regex_entity_extractor_get_details" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'instance_name' is set
if ('instance_name' not in params or
params['instance_name'] is None):
raise ValueError("Missing the required parameter `instance_name` when calling `regex_entity_extractor_get_details`") # noqa: E501
collection_formats = {}
path_params = {}
if 'instance_name' in params:
path_params['instance_name'] = params['instance_name'] # noqa: E501
query_params = []
header_params = {}
if 'x_caller' in params:
header_params['X-CALLER'] = params['x_caller'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['APIKeyHeader', 'APIKeyHeader_old'] # noqa: E501
return self.api_client.call_api(
'/nlu/v2/regex_entity_extractors/{instance_name}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='RegexEntityExtractorInstanceDetail', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def regex_entity_extractor_get_details_all(self, **kwargs): # noqa: E501
"""Get list of regular expression entity extractors. # noqa: E501
Get the list of regular expression entity extractors. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.regex_entity_extractor_get_details_all(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str x_caller:
:return: list[RegexEntityExtractorInstanceDetail]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.regex_entity_extractor_get_details_all_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.regex_entity_extractor_get_details_all_with_http_info(**kwargs) # noqa: E501
return data
def regex_entity_extractor_get_details_all_with_http_info(self, **kwargs): # noqa: E501
"""Get list of regular expression entity extractors. # noqa: E501
Get the list of regular expression entity extractors. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.regex_entity_extractor_get_details_all_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str x_caller:
:return: list[RegexEntityExtractorInstanceDetail]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['x_caller'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method regex_entity_extractor_get_details_all" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
if 'x_caller' in params:
header_params['X-CALLER'] = params['x_caller'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['APIKeyHeader', 'APIKeyHeader_old'] # noqa: E501
return self.api_client.call_api(
'/nlu/v2/regex_entity_extractors', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[RegexEntityExtractorInstanceDetail]', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def regex_entity_extractor_get_params(self, instance_name, **kwargs): # noqa: E501
"""Get the editable model parameters of named regex entity extractor. # noqa: E501
Get the editable model parameters of named regex entity extractor. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.regex_entity_extractor_get_params(instance_name, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str instance_name: The name of the instance. (required)
:param str x_caller:
:return: ModelParams
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.regex_entity_extractor_get_params_with_http_info(instance_name, **kwargs) # noqa: E501
else:
(data) = self.regex_entity_extractor_get_params_with_http_info(instance_name, **kwargs) # noqa: E501
return data
def regex_entity_extractor_get_params_with_http_info(self, instance_name, **kwargs): # noqa: E501
"""Get the editable model parameters of named regex entity extractor. # noqa: E501
Get the editable model parameters of named regex entity extractor. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.regex_entity_extractor_get_params_with_http_info(instance_name, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str instance_name: The name of the instance. (required)
:param str x_caller:
:return: ModelParams
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['instance_name', 'x_caller'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method regex_entity_extractor_get_params" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'instance_name' is set
if ('instance_name' not in params or
params['instance_name'] is None):
raise ValueError("Missing the required parameter `instance_name` when calling `regex_entity_extractor_get_params`") # noqa: E501
collection_formats = {}
path_params = {}
if 'instance_name' in params:
path_params['instance_name'] = params['instance_name'] # noqa: E501
query_params = []
header_params = {}
if 'x_caller' in params:
header_params['X-CALLER'] = params['x_caller'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['APIKeyHeader', 'APIKeyHeader_old'] # noqa: E501
return self.api_client.call_api(
'/nlu/v2/regex_entity_extractors/{instance_name}/params', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ModelParams', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def regex_entity_extractor_retrieve(self, instance_name, text_input, **kwargs): # noqa: E501
"""Extract information based on the regular expression. # noqa: E501
Extract the entities matching the regular expression. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.regex_entity_extractor_retrieve(instance_name, text_input, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str instance_name: The name of the instance. (required)
:param TextInput text_input: The input text. (required)
:param str x_caller:
:return: list[RegexEntity]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.regex_entity_extractor_retrieve_with_http_info(instance_name, text_input, **kwargs) # noqa: E501
else:
(data) = self.regex_entity_extractor_retrieve_with_http_info(instance_name, text_input, **kwargs) # noqa: E501
return data
def regex_entity_extractor_retrieve_with_http_info(self, instance_name, text_input, **kwargs): # noqa: E501
"""Extract information based on the regular expression. # noqa: E501
Extract the entities matching the regular expression. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.regex_entity_extractor_retrieve_with_http_info(instance_name, text_input, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str instance_name: The name of the instance. (required)
:param TextInput text_input: The input text. (required)
:param str x_caller:
:return: list[RegexEntity]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['instance_name', 'text_input', 'x_caller'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method regex_entity_extractor_retrieve" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'instance_name' is set
if ('instance_name' not in params or
params['instance_name'] is None):
raise ValueError("Missing the required parameter `instance_name` when calling `regex_entity_extractor_retrieve`") # noqa: E501
# verify the required parameter 'text_input' is set
if ('text_input' not in params or
params['text_input'] is None):
raise ValueError("Missing the required parameter `text_input` when calling `regex_entity_extractor_retrieve`") # noqa: E501
collection_formats = {}
path_params = {}
if 'instance_name' in params:
path_params['instance_name'] = params['instance_name'] # noqa: E501
query_params = []
header_params = {}
if 'x_caller' in params:
header_params['X-CALLER'] = params['x_caller'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
if 'text_input' in params:
body_params = params['text_input']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['APIKeyHeader', 'APIKeyHeader_old'] # noqa: E501
return self.api_client.call_api(
'/nlu/v2/regex_entity_extractors/{instance_name}/retrieve', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[RegexEntity]', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def regex_entity_extractor_set_params(self, instance_name, model_params, **kwargs): # noqa: E501
"""Set the model parameters of named regex entity extractor. # noqa: E501
Set the model parameters of named regex entity extractor. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.regex_entity_extractor_set_params(instance_name, model_params, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str instance_name: The name of the instance. (required)
:param ModelParams model_params: The model parameters. (required)
:param str x_caller:
:return: RegexEntityExtractorInstanceDetail
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.regex_entity_extractor_set_params_with_http_info(instance_name, model_params, **kwargs) # noqa: E501
else:
(data) = self.regex_entity_extractor_set_params_with_http_info(instance_name, model_params, **kwargs) # noqa: E501
return data
def regex_entity_extractor_set_params_with_http_info(self, instance_name, model_params, **kwargs): # noqa: E501
"""Set the model parameters of named regex entity extractor. # noqa: E501
Set the model parameters of named regex entity extractor. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.regex_entity_extractor_set_params_with_http_info(instance_name, model_params, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str instance_name: The name of the instance. (required)
:param ModelParams model_params: The model parameters. (required)
:param str x_caller:
:return: RegexEntityExtractorInstanceDetail
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['instance_name', 'model_params', 'x_caller'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method regex_entity_extractor_set_params" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'instance_name' is set
if ('instance_name' not in params or
params['instance_name'] is None):
raise ValueError("Missing the required parameter `instance_name` when calling `regex_entity_extractor_set_params`") # noqa: E501
# verify the required parameter 'model_params' is set
if ('model_params' not in params or
params['model_params'] is None):
raise ValueError("Missing the required parameter `model_params` when calling `regex_entity_extractor_set_params`") # noqa: E501
collection_formats = {}
path_params = {}
if 'instance_name' in params:
path_params['instance_name'] = params['instance_name'] # noqa: E501
query_params = []
header_params = {}
if 'x_caller' in params:
header_params['X-CALLER'] = params['x_caller'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
if 'model_params' in params:
body_params = params['model_params']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['APIKeyHeader', 'APIKeyHeader_old'] # noqa: E501
return self.api_client.call_api(
'/nlu/v2/regex_entity_extractors/{instance_name}/params', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='RegexEntityExtractorInstanceDetail', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def regex_entity_extractor_vaporise(self, instance_name, **kwargs): # noqa: E501
"""Vaporise the named model. # noqa: E501
Permanently vaporises a model even if not trashed. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.regex_entity_extractor_vaporise(instance_name, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str instance_name: The name of the instance. (required)
:param str x_caller:
:return: RegexEntityExtractorInstanceDetail
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.regex_entity_extractor_vaporise_with_http_info(instance_name, **kwargs) # noqa: E501
else:
(data) = self.regex_entity_extractor_vaporise_with_http_info(instance_name, **kwargs) # noqa: E501
return data
def regex_entity_extractor_vaporise_with_http_info(self, instance_name, **kwargs): # noqa: E501
"""Vaporise the named model. # noqa: E501
Permanently vaporises a model even if not trashed. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.regex_entity_extractor_vaporise_with_http_info(instance_name, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str instance_name: The name of the instance. (required)
:param str x_caller:
:return: RegexEntityExtractorInstanceDetail
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['instance_name', 'x_caller'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method regex_entity_extractor_vaporise" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'instance_name' is set
if ('instance_name' not in params or
params['instance_name'] is None):
raise ValueError("Missing the required parameter `instance_name` when calling `regex_entity_extractor_vaporise`") # noqa: E501
collection_formats = {}
path_params = {}
if 'instance_name' in params:
path_params['instance_name'] = params['instance_name'] # noqa: E501
query_params = []
header_params = {}
if 'x_caller' in params:
header_params['X-CALLER'] = params['x_caller'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['APIKeyHeader', 'APIKeyHeader_old'] # noqa: E501
return self.api_client.call_api(
'/nlu/v2/regex_entity_extractors/{instance_name}/vaporise', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='RegexEntityExtractorInstanceDetail', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| 120 | 0 | 27 |
e232c85608e7357dcbea2b73496a2fb0667670b1 | 516 | py | Python | setup.py | bobjansen/django_fabv | cf8d1be8371c7574884f0405b6ca7f78a7c224b3 | [
"BSD-3-Clause"
] | 3 | 2015-05-18T13:49:36.000Z | 2015-05-18T14:37:32.000Z | setup.py | bobjansen/django_fabv | cf8d1be8371c7574884f0405b6ca7f78a7c224b3 | [
"BSD-3-Clause"
] | null | null | null | setup.py | bobjansen/django_fabv | cf8d1be8371c7574884f0405b6ca7f78a7c224b3 | [
"BSD-3-Clause"
] | null | null | null | try:
from setuptools import setup, find_packages
except ImportError:
from distutils.core import setup, find_packages
setup(
name = 'django_fabv',
version = '0.1.1',
url = 'https://pypi.python.org/pypi/django_fabv/0.1.0',
include_package_data=True,
packages=find_packages(),
license = '3-clause BSD',
author = 'Bob Jansen',
author_email = 'bob.jansen@veneficus.nl',
description = 'A/B module for Django based on ABingo',
long_description=open('README.txt').read(),
)
| 27.157895 | 59 | 0.680233 | try:
from setuptools import setup, find_packages
except ImportError:
from distutils.core import setup, find_packages
setup(
name = 'django_fabv',
version = '0.1.1',
url = 'https://pypi.python.org/pypi/django_fabv/0.1.0',
include_package_data=True,
packages=find_packages(),
license = '3-clause BSD',
author = 'Bob Jansen',
author_email = 'bob.jansen@veneficus.nl',
description = 'A/B module for Django based on ABingo',
long_description=open('README.txt').read(),
)
| 0 | 0 | 0 |
536290ee2514884e41f360d3529e625d5173f659 | 707 | py | Python | Ago-Dic-2018/Ernesto Vela/Practica1Parcial2/ocp2.py | Arbupa/DAS_Sistemas | 52263ab91436b2e5a24ce6f8493aaa2e2fe92fb1 | [
"MIT"
] | 41 | 2017-09-26T09:36:32.000Z | 2022-03-19T18:05:25.000Z | Ago-Dic-2018/Ernesto Vela/Practica1Parcial2/ocp2.py | Arbupa/DAS_Sistemas | 52263ab91436b2e5a24ce6f8493aaa2e2fe92fb1 | [
"MIT"
] | 67 | 2017-09-11T05:06:12.000Z | 2022-02-14T04:44:04.000Z | Ago-Dic-2018/Ernesto Vela/Practica1Parcial2/ocp2.py | Arbupa/DAS_Sistemas | 52263ab91436b2e5a24ce6f8493aaa2e2fe92fb1 | [
"MIT"
] | 210 | 2017-09-01T00:10:08.000Z | 2022-03-19T18:05:12.000Z |
programmer = Programmer()
tester = Tester()
project = ProjectManagement()
print(project.process(programmer))
print(project.process(tester))
#La clase ProjectManagment es la que se puede extender
#por eso la dejamos asi, ya que cumple con el principio
#Abierto/Cerrado
#(no le entendi muy bien)
| 19.638889 | 55 | 0.670438 | class Programmer:
def code(self):
return 'I am Coding!'
class Tester:
def test(self):
return 'I am Testing!'
class ProjectManagement:
def process(self, worker):
if isinstance(worker, Programmer):
return worker.code()
elif isinstance(worker, Tester):
return worker.test()
else:
return 'Hey there!, Something went wrong :C'
programmer = Programmer()
tester = Tester()
project = ProjectManagement()
print(project.process(programmer))
print(project.process(tester))
#La clase ProjectManagment es la que se puede extender
#por eso la dejamos asi, ya que cumple con el principio
#Abierto/Cerrado
#(no le entendi muy bien)
| 266 | -9 | 149 |
89e3547e029cf9e6f7b297217cd7135285370c2d | 15,695 | py | Python | rock4/softtest/pad/hierarchy/driver.py | RockFeng0/rock4automation | a29270ab9fa4cdc79f6453971b7c7a21f01442b0 | [
"MIT"
] | 5 | 2018-09-25T05:49:49.000Z | 2021-12-30T11:06:09.000Z | rock4/softtest/pad/hierarchy/driver.py | RockFeng0/rock4automation | a29270ab9fa4cdc79f6453971b7c7a21f01442b0 | [
"MIT"
] | 3 | 2018-04-01T04:27:21.000Z | 2019-01-03T11:02:33.000Z | rock4/softtest/pad/hierarchy/driver.py | RockFeng0/rock4automation | a29270ab9fa4cdc79f6453971b7c7a21f01442b0 | [
"MIT"
] | 7 | 2018-09-25T05:49:51.000Z | 2021-12-30T11:06:11.000Z | # -*- encoding: utf-8 -*-
'''
Current module: mobile.monkeyrunner.driver
Rough version history:
v1.0 Original version to use
********************************************************************
@AUTHOR: Administrator-Bruce Luo(罗科峰)
MAIL: lkf20031988@163.com
RCS: rock4.softtest.pad.hierarchy.driver,v 2.0 2017年2月7日
FROM: 2015年12月23日
********************************************************************
======================================================================
Android UI automation frame for python.
Jython-monkeyrunner--need use jython to compileall
monkeyrunner.jar & chimpchat.jar & hierarchyviewer2lib.jar
'''
from com.android.monkeyrunner import MonkeyRunner,MonkeyDevice
from com.android.monkeyrunner.easy import By,EasyMonkeyDevice
from com.android.chimpchat.hierarchyviewer import HierarchyViewer
import os,re,time
import p_m_env
class MobileApp():
''' Mobile App Test.(need MonkeyRunner + Hierarchy)'''
@classmethod
@classmethod
@classmethod
def CloseApp(cls,app_package):
''' only close app . keep the session'''
result = re.search(".*%s\r\n" %app_package,os.popen("adb shell ps").read())
if result:
pid = re.findall("\w+",result.group())[1]
f=open("tmp",'w')
f.write("su\r\nkill -9 %s\r\nexit\r\nexit\r\n" %pid)
f.close()
os.system("adb shell <tmp")
os.remove("tmp")
@classmethod
def IsAppInstalled(cls,app_package):
'''
app_package
app_component
apk_path
'''
if re.search(app_package,os.popen("adb shell pm list packages").read()):
result = True
else:
result = False
time.sleep(0.5)
return result
@classmethod
def InstallApp(cls,apk_path):
''' install the app to mobile
apk_path=r"c:\test.apk"
'''
getattr(p_m_env.DEVICE,"installPackage")(apk_path)
time.sleep(0.5)
@classmethod
@classmethod
@classmethod
@classmethod
@classmethod
@classmethod
@classmethod
@classmethod
@classmethod
@classmethod
@classmethod
@classmethod
@classmethod
@classmethod
@classmethod
@classmethod
@classmethod
@classmethod
@classmethod
@classmethod
@classmethod
class MobileElement():
''' Mobile App Element Test.(need MonkeyRunner + Hierarchy)'''
(by,value,parent,timeout) = (None,None,None,30)
@classmethod
@classmethod
@classmethod
def Swipe(cls,duration=2,steps=100):
''' 模拟用户滑动
cls.by = "positon"
cls.value = [100,100,200,200]
duration --> 设置拖动过程的耗时
steps --> 设置拖动过程的步长, 步长较短的话,效果就是长按
'''
if cls.by == "position":
if not isinstance(cls.value, list):
raise Exception("Need list type:",cls.value)
getattr(p_m_env.DEVICE,"drag'")((cls.value[0],cls.value[1]),(cls.value[2],cls.value[3]),duration,steps)
@classmethod
@classmethod
@classmethod
@classmethod
@classmethod
@classmethod
@classmethod
@classmethod
@classmethod
@classmethod
@classmethod
@classmethod
@classmethod
@classmethod
@classmethod
@classmethod
@classmethod
@classmethod
@classmethod
if __name__ == "__main__":
# monkeyrunner driver.py
#等待连接
MobileApp.WaitForConnection()
#登录的示例
component = "com.tianwen.aischool/.ui.publics.login.LoginActivity"
MobileApp.LaunchApp(component)
print "launch app ok"
# 截图
MobileElement.by = "id"
MobileElement.value = "id/login_user_img"
MobileElement.SaveCurrentImageToFile(r"d:\auto\buffer\test.png")
print "Saving a picture to a file. ok"
#用户名
MobileElement.by = "id"
MobileElement.value = "id/login_account_input";#使用android sdk中的工具-->hierarchyviewer.bat查看。还有一个辅助工具 uiautomatorviewer.bat
MobileElement.TypeInClear("brucestudent1")
print "Typing username(brucestudent1) ok"
#密码
(MobileElement.by,MobileElement.value) = ("id","id/login_password_input")
MobileElement.TypeInClear("123456")
print "Typing password(123456) ok"
#登录按钮
(MobileElement.by,MobileElement.value) = ("id","id/login_start_button")
MobileElement.Touch()
print "Tap login button ok"
#验证信息
(MobileElement.by,MobileElement.value) = ("id","id/login_error_tip_text")
result = MobileElement.GetText()
print "verify login info"
print "-->",result
if "正在登录" in result:
print "Login Test Pass"
else:
print "Login Test Fail"
# 点击 练习作业
print "->click link_text"
(MobileElement.by,MobileElement.value,MobileElement.parent,MobileElement.timeout) = ("link_text",u"练习作业","id/homepage_exercise",30)
MobileElement.Touch()
print "Link_text is ok"
# 点击 练习作业
print "->click index"
(MobileElement.by,MobileElement.value,MobileElement.parent) = ("index",[0,0,1,0],"id/grid")
MobileElement.Touch()
print "Index is ok"
| 33.680258 | 140 | 0.568652 | # -*- encoding: utf-8 -*-
'''
Current module: mobile.monkeyrunner.driver
Rough version history:
v1.0 Original version to use
********************************************************************
@AUTHOR: Administrator-Bruce Luo(罗科峰)
MAIL: lkf20031988@163.com
RCS: rock4.softtest.pad.hierarchy.driver,v 2.0 2017年2月7日
FROM: 2015年12月23日
********************************************************************
======================================================================
Android UI automation frame for python.
Jython-monkeyrunner--need use jython to compileall
monkeyrunner.jar & chimpchat.jar & hierarchyviewer2lib.jar
'''
from com.android.monkeyrunner import MonkeyRunner,MonkeyDevice
from com.android.monkeyrunner.easy import By,EasyMonkeyDevice
from com.android.chimpchat.hierarchyviewer import HierarchyViewer
import os,re,time
import p_m_env
class MobileApp():
''' Mobile App Test.(need MonkeyRunner + Hierarchy)'''
@classmethod
def WaitForConnection(cls):
print """
Connecting the device...
Please check blow, if without an connection in long time:
1. USB-debug model is enabled
2. Device driver is installed
"""
p_m_env.DEVICE = MonkeyRunner.waitForConnection();#命令: adb wait-for-device
print "connect ok"
p_m_env.EASY_DEVICE = EasyMonkeyDevice(p_m_env.DEVICE)
print "connect easy ok"
p_m_env.HIERARCHY = p_m_env.DEVICE.getHierarchyViewer()
print "connect hierarchy ok"
@classmethod
def LaunchApp(cls,app_component):
# sample: app_component = com.mytest.aischool/.ui.publics.login.LoginActivity
# adb shell am start -W -n com.tianwen.aischool/.ui.publics.login.LoginActivity
getattr(p_m_env.DEVICE,"startActivity")(component=app_component)
time.sleep(0.5)
@classmethod
def CloseApp(cls,app_package):
''' only close app . keep the session'''
result = re.search(".*%s\r\n" %app_package,os.popen("adb shell ps").read())
if result:
pid = re.findall("\w+",result.group())[1]
f=open("tmp",'w')
f.write("su\r\nkill -9 %s\r\nexit\r\nexit\r\n" %pid)
f.close()
os.system("adb shell <tmp")
os.remove("tmp")
@classmethod
def IsAppInstalled(cls,app_package):
'''
app_package
app_component
apk_path
'''
if re.search(app_package,os.popen("adb shell pm list packages").read()):
result = True
else:
result = False
time.sleep(0.5)
return result
@classmethod
def InstallApp(cls,apk_path):
''' install the app to mobile
apk_path=r"c:\test.apk"
'''
getattr(p_m_env.DEVICE,"installPackage")(apk_path)
time.sleep(0.5)
@classmethod
def RemoveApp(cls,app_package):
# sample: app_package = com.mytest.testschool
# adb uninstall com.mytest.testschool
getattr(p_m_env.DEVICE,"removePackage")(app_package)
time.sleep(0.5)
@classmethod
def Click(cls,x,y):
p_m_env.DEVICE.touch(x,y,MonkeyDevice.DOWN_AND_UP)
@classmethod
def TypeRaw(cls,msg):
p_m_env.DEVICE.type(msg)
@classmethod
def WaitSecond(cls,sec):
MonkeyRunner.sleep(sec)
@classmethod
def ShotScreen(cls,file_path):
p_m_env.DEVICE.takeSnapshot().writeToFile(file_path,'png')
@classmethod
def Enter(cls):
cls.Keyevent("KEYCODE_ENTER")
@classmethod
def GetCurrentActivity(cls):
return getattr(p_m_env.HIERARCHY,"getFocusedWindowName")()
@classmethod
def Keyevent(cls,key_code_name):
#adb shell input keyevent KEYCODE_HOME
getattr(p_m_env.DEVICE,"press")(key_code_name,MonkeyDevice.DOWN_AND_UP)
@classmethod
def Back(cls):
cls.Keyevent("KEYCODE_BACK")
@classmethod
def Menu(cls):
cls.Keyevent("KEYCODE_MENU")
@classmethod
def Home(cls):
cls.Keyevent("KEYCODE_HOME")
@classmethod
def Tab(cls):
cls.Keyevent("KEYCODE_TAB")
@classmethod
def PageSwapUP(cls):
cls.Keyevent(92);# KEYCODE_PAGE_UP
@classmethod
def PageSwapDOWN(cls):
cls.Keyevent(93);# KEYCODE_PAGE_DOWN
@classmethod
def CursorUP(cls):
cls.Keyevent(19);# KEYCODE_DPAD_UP
@classmethod
def CursorDOWN(cls):
cls.Keyevent(20);# KEYCODE_DPAD_DOWN
@classmethod
def CursorLEFT(cls):
cls.Keyevent(21);# KEYCODE_DPAD_LEFT
@classmethod
def CursorRIGHT(cls):
cls.Keyevent(22);# KEYCODE_DPAD_RIGHT
@classmethod
def CursorMoveToHOME(cls):
cls.Keyevent(122);# KEYCODE_MOVE_HOME
@classmethod
def CursorMoveToEND(cls):
cls.Keyevent(123);# KEYCODE_MOVE_END
@classmethod
def selectCurrentCursor(cls):
cls.Keyevent(23);# KEYCODE_DPAD_CENTER
class MobileElement():
''' Mobile App Element Test.(need MonkeyRunner + Hierarchy)'''
(by,value,parent,timeout) = (None,None,None,30)
@classmethod
def Touch(cls):
if cls.by == "position":
if not isinstance(cls.value, list):
raise Exception("Need list type:",cls.value)
p_m_env.DEVICE.touch(cls.value[0],cls.value[1],MonkeyDevice.DOWN_AND_UP)
else:
point = cls.GetElementCenterPoint()
p_m_env.DEVICE.touch(point.x,point.y,MonkeyDevice.DOWN_AND_UP)
@classmethod
def LongTouch(cls):
if cls.by == "position":
if not isinstance(cls.value, list):
raise Exception("Need list type:",cls.value)
getattr(p_m_env.DEVICE,"drag'")((cls.value[0],cls.value[1]),(cls.value[0],cls.value[1]),1,3)
else:
point = cls.GetElementCenterPoint()
getattr(p_m_env.DEVICE,"drag'")((point.x,point.y),(point.x,point.y),0.5,10)
@classmethod
def Swipe(cls,duration=2,steps=100):
''' 模拟用户滑动
cls.by = "positon"
cls.value = [100,100,200,200]
duration --> 设置拖动过程的耗时
steps --> 设置拖动过程的步长, 步长较短的话,效果就是长按
'''
if cls.by == "position":
if not isinstance(cls.value, list):
raise Exception("Need list type:",cls.value)
getattr(p_m_env.DEVICE,"drag'")((cls.value[0],cls.value[1]),(cls.value[2],cls.value[3]),duration,steps)
@classmethod
def SwipeToObj(cls,dest_obj,duration=2,steps=3):
if cls.by == "position":
if not isinstance(cls.value, list):
raise Exception("Need list type:",cls.value)
src_xy = [cls.value[0], cls.value[0]]
else:
point1 = cls.GetElementCenterPoint()
src_xy = [point1.x, point1.y]
if dest_obj.by == "positon":
if not isinstance(dest_obj.value, list):
raise Exception("Need list type:",cls.value)
dest_xy = [dest_obj.value[0], dest_obj.value[0]]
else:
point2 = dest_obj.GetElementCenterPoint()
dest_xy = [point2.x, point2.y]
getattr(p_m_env.DEVICE,"drag'")((src_xy[0],src_xy[1]),(dest_xy[0],dest_xy[1]),duration,steps)
@classmethod
def Type(cls, msg):
if msg == "":
return
if msg == "SET_EMPTY":
msg = ""
cls.Touch()
p_m_env.DEVICE.type(msg)
@classmethod
def TypeInClear(cls, msg):
if msg == "":
return
if msg == "SET_EMPTY":
msg = ""
cls.Touch()
text = cls.GetText()
for i in range(len(text)):
p_m_env.DEVICE.press('KEYCODE_DEL', MonkeyDevice.DOWN_AND_UP)
p_m_env.DEVICE.press('KEYCODE_FORWARD_DEL', MonkeyDevice.DOWN_AND_UP)
p_m_env.DEVICE.type(msg)
@classmethod
def SendEnter(cls):
getattr(p_m_env.DEVICE,"press")("KEYCODE_ENTER",MonkeyDevice.DOWN_AND_UP)
@classmethod
def GetText(cls):
view_node = cls.__get_view_node()
return p_m_env.HIERARCHY.getText(view_node).encode("utf-8")
@classmethod
def SaveCurrentImageToFile(cls,file_path):
if not os.path.isdir(os.path.dirname(file_path)):
raise Exception("invalid file: %s" %file_path)
view_node = cls.__get_view_node()
screen_snapshot = getattr(p_m_env.DEVICE,"takeSnapshot")()
point = p_m_env.HIERARCHY.getAbsolutePositionOfView(view_node)
sub_img = screen_snapshot.getSubImage((point.x,point.y,view_node.width,view_node.height))
return sub_img.writeToFile(file_path)
@classmethod
def GetFocusedWindowName(cls):
return getattr(p_m_env.HIERARCHY,"getFocusedWindowName")()
@classmethod
def GetElementCenterPoint(cls):
view_node = cls.__get_view_node()
point = p_m_env.HIERARCHY.getAbsoluteCenterOfView(view_node)
print "center of view:",point
return point
@classmethod
def GetElementRectPoint(cls):
view_node = cls.__get_view_node()
point = p_m_env.HIERARCHY.getAbsolutePositionOfView(view_node)
(left,top) = (point.x,point.y)
(right,bottom) = (point.x + view_node.width,point.y + view_node.height)
return [(left,top),(right,bottom)]
@classmethod
def GetAttribute(cls, attr):
view_node = cls.__get_view_node()
return view_node.namedProperties
@classmethod
def IsExist(cls):
if cls.__get_view_node():
return True
return False
@classmethod
def IsVisible(cls):
view_node = cls.__get_view_node()
if not view_node:
return False
return p_m_env.HIERARCHY.visible(view_node)
@classmethod
def __getPropertyList(cls):
return p_m_env.DEVICE.getPropertyList()
@classmethod
def __get_view_node(cls):
view_node = None
if cls.by == "id":
view_node = cls.__find_element_by_id(cls.value)
elif cls.by == "link_text":
view_node = cls.__find_element_by_link_text(cls.parent,cls.value)
elif cls.by == "index":
view_node = cls.__find_element_by_index(cls.parent,cls.value)
return view_node
@classmethod
def __wait(cls,aid):
# MonkeyRunner search element need id(parent id or self id)
endtime = time.time() + cls.timeout
while True:
view_node = p_m_env.HIERARCHY.findViewById(aid)
if view_node:
# print "-->'%s' is OK" %aid
return view_node
elif time.time()>endtime:
# raise Exception("Timeout at %d seconds.Element(%s-%s-%s) not found." %(cls.timeout,cls.by,repr(cls.value),cls.parent))
print "Warning: Timeout at %d seconds.Element(%s-%s-%s) not found." %(cls.timeout,cls.by,repr(cls.value),cls.parent)
return
@classmethod
def __find_element_by_id(cls,aid):
# Sample usage:
# node = __find_element_by_id("id/login_account_input")
return cls.__wait(aid)
@classmethod
def __find_element_by_link_text(cls,root_id,strs):
#Sample usage:
# u"科目" --> u'\u79d1\u76ee'
# node=__find_element_by_link_text(id,u'\u79d1\u76ee')
if not isinstance(strs,unicode):
raise Exception("Need unicode string:",strs)
view_node = cls.__find_element_by_id(root_id)
return cls.__search_element(view_node,strs)
@classmethod
def __find_element_by_index(cls,root_id,index_list):
# sample usage:
# __find_element_by_index("id/grid",[0,0,1,0])
if not isinstance(index_list,list):
raise Exception("Need list type:",index_list)
view_node = cls.__find_element_by_id(root_id)
return cls.__search_element(view_node,index_list)
@classmethod
def __search_element(cls,child_node,strs_or_list):
if not child_node:
return
# 依据index列表,返回节点
if isinstance(strs_or_list, list):
for i in strs_or_list:
if child_node.children:
child_node = child_node.children[i]
else:
return
return child_node
# 递归遍历,返回节点
if "TextView" in child_node.name:
mText = child_node.namedProperties.get("text:mText")
if mText and mText.value == strs_or_list:
# print strs_or_list.encode("utf-8")
# print mText.value.encode("utf-8")
return child_node
else:
for child in child_node.children:
result = cls.__search_element(child,strs_or_list)
if result:
return result
if __name__ == "__main__":
# monkeyrunner driver.py
#等待连接
MobileApp.WaitForConnection()
#登录的示例
component = "com.tianwen.aischool/.ui.publics.login.LoginActivity"
MobileApp.LaunchApp(component)
print "launch app ok"
# 截图
MobileElement.by = "id"
MobileElement.value = "id/login_user_img"
MobileElement.SaveCurrentImageToFile(r"d:\auto\buffer\test.png")
print "Saving a picture to a file. ok"
#用户名
MobileElement.by = "id"
MobileElement.value = "id/login_account_input";#使用android sdk中的工具-->hierarchyviewer.bat查看。还有一个辅助工具 uiautomatorviewer.bat
MobileElement.TypeInClear("brucestudent1")
print "Typing username(brucestudent1) ok"
#密码
(MobileElement.by,MobileElement.value) = ("id","id/login_password_input")
MobileElement.TypeInClear("123456")
print "Typing password(123456) ok"
#登录按钮
(MobileElement.by,MobileElement.value) = ("id","id/login_start_button")
MobileElement.Touch()
print "Tap login button ok"
#验证信息
(MobileElement.by,MobileElement.value) = ("id","id/login_error_tip_text")
result = MobileElement.GetText()
print "verify login info"
print "-->",result
if "正在登录" in result:
print "Login Test Pass"
else:
print "Login Test Fail"
# 点击 练习作业
print "->click link_text"
(MobileElement.by,MobileElement.value,MobileElement.parent,MobileElement.timeout) = ("link_text",u"练习作业","id/homepage_exercise",30)
MobileElement.Touch()
print "Link_text is ok"
# 点击 练习作业
print "->click index"
(MobileElement.by,MobileElement.value,MobileElement.parent) = ("index",[0,0,1,0],"id/grid")
MobileElement.Touch()
print "Index is ok"
| 8,700 | 0 | 1,227 |
fa017755b8936f4b18dedb46a5a32403feb76580 | 452 | py | Python | app/api/v1/models/usermodels.py | kiruidavid/SendIT254 | ee14fdcd0964d18cc93312aec39987a0753e09f1 | [
"MIT"
] | null | null | null | app/api/v1/models/usermodels.py | kiruidavid/SendIT254 | ee14fdcd0964d18cc93312aec39987a0753e09f1 | [
"MIT"
] | null | null | null | app/api/v1/models/usermodels.py | kiruidavid/SendIT254 | ee14fdcd0964d18cc93312aec39987a0753e09f1 | [
"MIT"
] | null | null | null | users = []
| 13.294118 | 67 | 0.579646 | users = []
class Users(object):
def __init__(self):
self.db = users
def create_account(self, email, phone_number, user_name, password):
account = {
"user_id": str(len(users) + 1),
"email": email,
"phone_number": phone_number,
"user_name": user_name,
"password": password
}
self.db.append(account)
return account
def get_users(self):
res = self.db
return res
| 333 | -1 | 97 |
a2064944d6fc7908c704e0375eec3db9c30f3880 | 2,787 | py | Python | core.py | vitus9988/arcalive_con_downloader | 5e1612ab015565f95da0dba68e302375fc827695 | [
"MIT"
] | null | null | null | core.py | vitus9988/arcalive_con_downloader | 5e1612ab015565f95da0dba68e302375fc827695 | [
"MIT"
] | null | null | null | core.py | vitus9988/arcalive_con_downloader | 5e1612ab015565f95da0dba68e302375fc827695 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from bs4 import BeautifulSoup
import requests
import urllib.request
from moviepy.video.io.VideoFileClip import VideoFileClip
import os
if __name__ == '__main__':
#main('https://arca.live/e/56?target=title&keyword=%EB%9D%BC%EC%98%A4&sort=rank&p=1')
con_download() | 30.626374 | 146 | 0.527449 | # -*- coding: utf-8 -*-
from bs4 import BeautifulSoup
import requests
import urllib.request
from moviepy.video.io.VideoFileClip import VideoFileClip
import os
def wc(url):
headers = {
"user-agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.87 Safari/537.36"}
html = requests.get(url, headers=headers).text
soup = BeautifulSoup(html, "lxml")
return soup
def main(url):
headers = {
"user-agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.87 Safari/537.36"}
soup = wc(url)
title = soup.find('div','title-row').text.strip()
detail = soup.find('div','emoticons-wrapper')
maintab = detail.find_all(loading='lazy')
count = 1
if os.path.exists("{}".format(title)):
print('이미 존재하는 콘')
else:
os.makedirs("{}".format(title))
for i in maintab:
link = i.get('src')
if link is None:
link = i.get('data-src')
print("http://{}".format(link[2:]))
imglink = "http://{}".format(link[2:])
request_ = urllib.request.Request(imglink, None, headers)
response = urllib.request.urlopen(request_)
result = imglink.split('.')
filetype = ''.join(result[-1])
if filetype == 'mp4':
with open('{}/{}.{}'.format(title, count, filetype), "wb") as file:
file.write(response.read())
mp4togif('{}/{}.{}'.format(title, count, filetype))
count += 1
else:
with open('{}/{}.{}'.format(title, count, filetype), "wb") as file:
file.write(response.read())
count += 1
remover(f"{title}")
def mp4togif(filename):
clip = VideoFileClip(filename)
rename = filename.replace('.mp4', '.gif')
clip.write_gif(rename, fps=30, fuzz=1)
return clip
def remover(filepath):
filelist = os.listdir(filepath)
for file in filelist:
if 'mp4' in file:
try:
os.remove(f"{filepath}/{file}")
except:
print('mp4파일 삭제 오류')
else:
pass
def con_download():
while 1:
conUrl = input("원하는 콘 url을 입력하세요(종료하려면 q를 입력): ")
if conUrl == 'q' or conUrl == 'Q' or conUrl == 'ㅂ':
break
else:
try:
main(conUrl)
except:
print('유효하지 않은 url이거나 잘못된 url')
if __name__ == '__main__':
#main('https://arca.live/e/56?target=title&keyword=%EB%9D%BC%EC%98%A4&sort=rank&p=1')
con_download() | 2,434 | 0 | 125 |
c82e1da6da58dbb8b151c18c5241dc8a78e80db8 | 5,215 | py | Python | core/commands/create_domain.py | M-Spencer-94/configNOW | 56828587253202089e77cfdfcf5329f2a7f09b3f | [
"PSF-2.0",
"Apache-2.0",
"MIT"
] | 3 | 2019-07-09T20:02:48.000Z | 2021-11-21T20:00:37.000Z | core/commands/create_domain.py | M-Spencer-94/configNOW | 56828587253202089e77cfdfcf5329f2a7f09b3f | [
"PSF-2.0",
"Apache-2.0",
"MIT"
] | null | null | null | core/commands/create_domain.py | M-Spencer-94/configNOW | 56828587253202089e77cfdfcf5329f2a7f09b3f | [
"PSF-2.0",
"Apache-2.0",
"MIT"
] | null | null | null | # ============================================================================
#
# Copyright (c) 2007-2010 Integral Technology Solutions Pty Ltd,
# All Rights Reserved.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR HOLDERS INCLUDED IN THIS NOTICE BE
# LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR
# ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER
# IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
#
# FOR FURTHER INFORMATION PLEASE SEE THE INTEGRAL TECHNOLOGY SOLUTIONS
# END USER LICENSE AGREEMENT (ELUA).
#
# ============================================================================
import common.assertions as assertions
import common.logredirect as logredirect
from java.io import File
execfile('wlst/common.py')
execfile('wlst/server.py')
execfile('wlst/workmgr.py')
execfile('wlst/deployment.py')
execfile('wlst/createDomain.py')
execfile('wlst/jdbc_offline.py')
def run(cfg):
"""Create WebLogic Domain"""
assertions.sanityCheckInstall(cfg)
assertions.sanityCheckDomainConfig(cfg)
if wlst_support:
logredirect.setup()
create_domain(cfg)
else:
raise Exception('WLST support required for this command')
| 42.398374 | 123 | 0.653883 | # ============================================================================
#
# Copyright (c) 2007-2010 Integral Technology Solutions Pty Ltd,
# All Rights Reserved.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR HOLDERS INCLUDED IN THIS NOTICE BE
# LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR
# ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER
# IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
#
# FOR FURTHER INFORMATION PLEASE SEE THE INTEGRAL TECHNOLOGY SOLUTIONS
# END USER LICENSE AGREEMENT (ELUA).
#
# ============================================================================
import common.assertions as assertions
import common.logredirect as logredirect
from java.io import File
execfile('wlst/common.py')
execfile('wlst/server.py')
execfile('wlst/workmgr.py')
execfile('wlst/deployment.py')
execfile('wlst/createDomain.py')
execfile('wlst/jdbc_offline.py')
def run(cfg):
"""Create WebLogic Domain"""
assertions.sanityCheckInstall(cfg)
assertions.sanityCheckDomainConfig(cfg)
if wlst_support:
logredirect.setup()
create_domain(cfg)
else:
raise Exception('WLST support required for this command')
def create_domain(configProperties):
domainPath=configProperties.getProperty('wls.domain.dir')
domainName=configProperties.getProperty('wls.domain.name')
domainAppDir=configProperties.getProperty('wls.domain.app.dir')
webLogicHome=configProperties.getProperty('wls.oracle.home')
osbname=configProperties.getProperty('osb.name')
soaname=configProperties.getProperty('soa.name')
version=configProperties.getProperty('wls.version')
try:
if domainName=='':
log.error("wls.domain.name property can't be empty")
raise Exception('wls.domain.name property can not be empty')
domainFullPath=str(domainPath) + '/' + str(domainName)
checkDomainExistence(domainFullPath)
log.info('Creating domain: ' + domainFullPath)
__createDomain(configProperties)
log.info("Reading domain")
readDomain(domainFullPath)
if version == '12' and (not soaname is None):
log.debug("not plain WLS domain")
jrfTemplateLoc=configProperties.getProperty('wls.template.jrf.file')
ServiceTableName=configProperties.getProperty('jdbc.datasource.LocalSvcTblDataSource.Name')
ServiceTableUser=configProperties.getProperty('jdbc.datasource.LocalSvcTblDataSource.Username')
ServiceTableURL=configProperties.getProperty('jdbc.datasource.LocalSvcTblDataSource.URL')
ServiceTableDriver=configProperties.getProperty('jdbc.datasource.LocalSvcTblDataSource.Driver')
ServiceTablePassword=configProperties.getProperty('jdbc.datasource.LocalSvcTblDataSource.Password')
__addTemplate(configProperties)
log.info("Connecting to Service table")
cd('/JDBCSystemResources/'+ServiceTableName+'/JdbcResource/'+ServiceTableName+'')
cd('JDBCDriverParams/NO_NAME_0')
set('DriverName','oracle.jdbc.OracleDriver')
set('URL',ServiceTableURL)
set('PasswordEncrypted',ServiceTablePassword)
cd('Properties/NO_NAME_0')
cd('Property/user')
cmo.setValue(ServiceTableUser)
log.debug(ServiceTableURL)
log.debug(ServiceTableUser)
log.info('Connected to ServiceTable'+ServiceTableName+'......Fetching DB Schema details now')
getDatabaseDefaults()
#print 'Coherence changes'
#cd('/')
#cd('Server/soa_server1')
#create('member_config', 'CoherenceMemberConfig')
#cd('CoherenceMemberConfig/member_config')
#set('UnicastListenAddress', '121.0.0.1')
log.debug('Finished updating 12c changes')
else:
__addTemplate(configProperties)
__createMachines(0, configProperties)
__createClusters(0, configProperties)
__createServers(0, configProperties)
__createWorkManagers(0, configProperties)
__configureDeployments(0, configProperties)
__configureDataSources(configProperties)
log.info("Closing Template NOW")
updateDomain()
closeDomain()
except Exception, error:
log.error('Unable to create domain [' + str(domainPath) + '/' + str(domainName) + ']')
raise error
# __processPostDomainCreation(configProperties)
def checkDomainExistence(domainPath):
if File(domainPath).exists():
raise Exception('Cannot create domain as it already exists at ' + domainPath)
| 3,646 | 0 | 23 |
c56c8fcafd417e7a3375947337c8289dd86cfdbd | 781 | py | Python | urlunquote.py | kmorwood/mrequests | 957b9a7496cb7514cb3c1270883f40e038a7e1ce | [
"MIT"
] | 5 | 2021-08-14T15:58:29.000Z | 2022-02-24T23:11:19.000Z | urlunquote.py | kmorwood/mrequests | 957b9a7496cb7514cb3c1270883f40e038a7e1ce | [
"MIT"
] | 2 | 2021-09-15T12:32:28.000Z | 2022-01-10T02:32:31.000Z | urlunquote.py | kmorwood/mrequests | 957b9a7496cb7514cb3c1270883f40e038a7e1ce | [
"MIT"
] | 1 | 2021-11-28T16:46:04.000Z | 2021-11-28T16:46:04.000Z | def unquote(string):
"""Decode and replace URL percent-escapes in string.
unquote('abc%20def') -> b'abc def'.
Note: If a string, not a bytes object, is passed, it is encoded as UTF-8.
This is only an issue if it contains unescaped non-ASCII characters, which
URIs should not.
"""
if not string:
return b''
if isinstance(string, str):
string = string.encode('utf-8')
bits = string.split(b'%')
if len(bits) == 1:
return string
res = bytearray(bits[0])
append = res.append
extend = res.extend
for item in bits[1:]:
try:
append(int(item[:2], 16))
extend(item[2:])
except KeyError:
append(b'%')
extend(item)
return bytes(res)
| 22.970588 | 78 | 0.564661 | def unquote(string):
"""Decode and replace URL percent-escapes in string.
unquote('abc%20def') -> b'abc def'.
Note: If a string, not a bytes object, is passed, it is encoded as UTF-8.
This is only an issue if it contains unescaped non-ASCII characters, which
URIs should not.
"""
if not string:
return b''
if isinstance(string, str):
string = string.encode('utf-8')
bits = string.split(b'%')
if len(bits) == 1:
return string
res = bytearray(bits[0])
append = res.append
extend = res.extend
for item in bits[1:]:
try:
append(int(item[:2], 16))
extend(item[2:])
except KeyError:
append(b'%')
extend(item)
return bytes(res)
| 0 | 0 | 0 |
2127d7f5da8e00b3ae835b2e11ef891db0f2c08f | 664 | py | Python | meeting_room_booking_service/app/models/meeting_room_reservation.py | cassioeskelsen/microservice_boilerplate | c2c09d93095cc87e54f70095e74f13a97458e7d9 | [
"Apache-2.0"
] | null | null | null | meeting_room_booking_service/app/models/meeting_room_reservation.py | cassioeskelsen/microservice_boilerplate | c2c09d93095cc87e54f70095e74f13a97458e7d9 | [
"Apache-2.0"
] | null | null | null | meeting_room_booking_service/app/models/meeting_room_reservation.py | cassioeskelsen/microservice_boilerplate | c2c09d93095cc87e54f70095e74f13a97458e7d9 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
from mongoengine import EmbeddedDocument, StringField, Document, EmailField, EmbeddedDocumentField, DateTimeField
| 26.56 | 113 | 0.680723 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
from mongoengine import EmbeddedDocument, StringField, Document, EmailField, EmbeddedDocumentField, DateTimeField
class Room(EmbeddedDocument):
name = StringField()
class Employeer(EmbeddedDocument):
name = StringField()
class RoomReservation(Document):
def __init__(self, *args, **kwargs):
super(Document, self).__init__(*args, **kwargs)
self.employeer = Employeer()
self.room = Room()
datehour = DateTimeField()
employer = EmbeddedDocumentField(Employeer)
room = EmbeddedDocumentField(Room)
meta = {
'collection' : 'room_reservations'
}
| 135 | 299 | 69 |
0d33a1a4d1cd0a62230fae875e71caf4206bae3e | 2,325 | py | Python | backend/app/core/cache.py | luccasPh/gobarber | 3bc84c5098b534352ef794428ffb7b937bd3bbd6 | [
"MIT"
] | 1 | 2021-05-05T15:43:25.000Z | 2021-05-05T15:43:25.000Z | backend/app/core/cache.py | luccasPh/gobarber | 3bc84c5098b534352ef794428ffb7b937bd3bbd6 | [
"MIT"
] | null | null | null | backend/app/core/cache.py | luccasPh/gobarber | 3bc84c5098b534352ef794428ffb7b937bd3bbd6 | [
"MIT"
] | null | null | null | from app.schemas.appointment import Appointment
import pickle
from redis import Redis
from typing import Any
from datetime import timedelta
from app.models.user import User as UserModel
from .config import settings
| 32.746479 | 75 | 0.588387 | from app.schemas.appointment import Appointment
import pickle
from redis import Redis
from typing import Any
from datetime import timedelta
from app.models.user import User as UserModel
from .config import settings
class RedisCache(Redis):
def __init__(self):
if(settings.REDIS_URL):
temp = settings.REDIS_URL.split(':')
port = temp[-1]
temp = temp[2].split('@')
password = temp[0]
host = temp[1]
super().__init__(
host=host,
port=port,
password=password,
username=""
)
else:
super().__init__(
host=settings.REDIS_HOST,
port=settings.REDIS_PORT,
password=settings.REDIS_PASSWORD,
username=settings.REDIS_USER
)
def set_to_cache(self, key: str, value: Any) -> None:
self.setex(key, timedelta(seconds=3600), value=pickle.dumps(value))
def get_from_cache(self, key: str) -> Any:
data = self.get(key)
if not data:
return None
return pickle.loads(data)
def invalidate_cache(self, key: str) -> None:
self.delete(key)
def invalidate_cache_prefix(self, prefix: str) -> None:
keys = self.keys(f"{prefix}:*")
pipeline = self.pipeline()
for key in keys:
pipeline.delete(key)
pipeline.execute()
def invalidate_cache_provider(self, user: UserModel) -> None:
"""
Delete the cache of a specific provider
through the relationship between user and appointment
"""
appointment: Appointment
for appointment in user.user_appointments:
prefix = f"providers-appointments:{appointment.provider_id}"
if self.keys(f"{prefix}:*"):
self.invalidate_cache_prefix(prefix)
def invalidate_cache_user(self, provider: UserModel) -> None:
"""
Delete the cache of a specific user
through the relationship between user/provider and appointment
"""
appointment: Appointment
for appointment in provider.provider_appointments:
prefix = f"user-appointments:{appointment.user_id}"
self.delete(prefix)
| 1,082 | 1,005 | 22 |
b2f1e5909e2fbd5772b468609d91bf3631253923 | 358 | py | Python | tests/test_common/test_nano.py | CraftSpider/SpiderTools | 4bf155feec7cb983e8d283d93552902ec85178a2 | [
"MIT"
] | 5 | 2019-10-14T13:50:02.000Z | 2021-09-23T18:48:27.000Z | tests/test_common/test_nano.py | CraftSpider/SpiderTools | 4bf155feec7cb983e8d283d93552902ec85178a2 | [
"MIT"
] | null | null | null | tests/test_common/test_nano.py | CraftSpider/SpiderTools | 4bf155feec7cb983e8d283d93552902ec85178a2 | [
"MIT"
] | null | null | null |
import spidertools.common.nano as nano
import os
| 22.375 | 65 | 0.701117 |
import spidertools.common.nano as nano
import os
async def test_nanoclient():
client = nano.NanoClient("talosbot", os.getenv("TALOS_PSWD"))
await client.init()
marcy = await client.get_user("marcyt")
projects = await marcy.get_projects()
user = await projects[0].get_user()
assert marcy == user
await client.get_fundometer()
| 284 | 0 | 23 |
84a2f6fe3ad381cfe5315c1bfe35729e95a4a951 | 919 | py | Python | shpfixer/utils/precisionredux.py | francbartoli/ShapeFixer | dfb975cb4d10f0194eef1e7d5edef73bbac89d38 | [
"MIT"
] | null | null | null | shpfixer/utils/precisionredux.py | francbartoli/ShapeFixer | dfb975cb4d10f0194eef1e7d5edef73bbac89d38 | [
"MIT"
] | null | null | null | shpfixer/utils/precisionredux.py | francbartoli/ShapeFixer | dfb975cb4d10f0194eef1e7d5edef73bbac89d38 | [
"MIT"
] | null | null | null | from shapely.geometry import *
from utils import geomerremovertools
class PrecisionRedux(object):
"""Class PrecisionRedux for managing the precision of geometries
"""
class PrecisionReduxPoint(PrecisionRedux):
"""Derived class for Point"""
class PrecisionReduxMultiPoint(PrecisionRedux):
"""Derived class for MultiPoint"""
class PrecisionReduxLineString(PrecisionRedux):
"""Derived class for LineString"""
class PrecisionReduxMultiLineString(PrecisionRedux):
"""Derived class for MultiLineString"""
| 19.978261 | 65 | 0.761697 | from shapely.geometry import *
from utils import geomerremovertools
class PrecisionRedux(object):
"""Class PrecisionRedux for managing the precision of geometries
"""
def __init__(self, arg):
super(PrecisionRedux, self).__init__()
self.arg = arg
def round():
pass
class PrecisionReduxPoint(PrecisionRedux):
"""Derived class for Point"""
def round_point(input):
if isinstance (Point,input):
pass
class PrecisionReduxMultiPoint(PrecisionRedux):
"""Derived class for MultiPoint"""
def round_multipoint(input):
if isinstance (Multipoint,input):
pass
class PrecisionReduxLineString(PrecisionRedux):
"""Derived class for LineString"""
def round_linestring(input):
if isinstance (Linestring,input):
pass
class PrecisionReduxMultiLineString(PrecisionRedux):
"""Derived class for MultiLineString"""
def round_multilinestring(input):
if isinstance (MultiLinestring,input):
pass
| 263 | 0 | 145 |
bdc017c5baa8abcf99007f79e8a9e29fed22cec4 | 1,077 | py | Python | python/reciept.py | AlexShukhman/smop-backend-PUBLIC | d0f54d963bc0a3860181213049f7f26851badcaa | [
"Apache-2.0"
] | null | null | null | python/reciept.py | AlexShukhman/smop-backend-PUBLIC | d0f54d963bc0a3860181213049f7f26851badcaa | [
"Apache-2.0"
] | null | null | null | python/reciept.py | AlexShukhman/smop-backend-PUBLIC | d0f54d963bc0a3860181213049f7f26851badcaa | [
"Apache-2.0"
] | null | null | null | import json, os, sys
from openpyxl import load_workbook
"""
This script is for creating receipts for Smop coders using data given from mongo
"""
if __name__ == '__main__':
main() | 25.046512 | 80 | 0.652739 | import json, os, sys
from openpyxl import load_workbook
"""
This script is for creating receipts for Smop coders using data given from mongo
"""
def readIn():
lines = sys.stdin.readlines()
return json.loads(lines[0])
def main():
lines = readIn()
#Ensure in correct directory
directory_name = './api/models/'
# #template file name
template_file_name = 'Smop_Sales_Receipt_Template1.xlsx'
#winner's username
winner_name = lines[1]
#winner's email
winner_email = lines[2]
#task name
task_name = lines[3]
#owner name
owner_name = lines[4]
#bounty
bounty = lines[0]
print(bounty, winner_email, winner_name, task_name, owner_name)
wb = load_workbook(filename = directory_name + template_file_name)
ws1 = wb.active
ws1['C5'] = winner_name
ws1['C6'] = winner_email
ws1['B11'] = 1.0
ws1['D11'] = 'Top 3 Payout for task "%s" by %s' % (task_name, owner_name)
ws1['F11'] = bounty
wb.save(filename = task_name + '_' + winner_name + '.xlsx')
if __name__ == '__main__':
main() | 847 | 0 | 46 |
050cd1985aea466a75989d2e36e84cd9e064860b | 3,740 | py | Python | applications/welcome/controllers/meteoritepage.py | winmill2/CS-411-UFO-Meteorite-Finder | 8a3ae102a18df6a6f45fc425383dfa0facb61d13 | [
"BSD-3-Clause"
] | null | null | null | applications/welcome/controllers/meteoritepage.py | winmill2/CS-411-UFO-Meteorite-Finder | 8a3ae102a18df6a6f45fc425383dfa0facb61d13 | [
"BSD-3-Clause"
] | null | null | null | applications/welcome/controllers/meteoritepage.py | winmill2/CS-411-UFO-Meteorite-Finder | 8a3ae102a18df6a6f45fc425383dfa0facb61d13 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
# try something like
if not session.met_index:
session.met_index = 0
if not session.met_sort:
session.met_sort = 'ID'
if not session.met_display_flash:
session.met_display_flash = 'No'
if not session.met_filter_cat:
session.met_filter_cat = 'ID'
session.met_filter_text = ''
| 34 | 143 | 0.663636 | # -*- coding: utf-8 -*-
# try something like
if not session.met_index:
session.met_index = 0
if not session.met_sort:
session.met_sort = 'ID'
if not session.met_display_flash:
session.met_display_flash = 'No'
if not session.met_filter_cat:
session.met_filter_cat = 'ID'
session.met_filter_text = ''
def meteorite():
page_count = items_per_page
index = session.met_index
sort_selection = session.met_sort
filter_category = session.met_filter_cat
search_text = session.met_filter_text
if session.met_display_flash == 'Yes':
response.flash = 'That field is expecting an integer to be searched (no letters or symbols).'
session.met_display_flash = 'No'
if (filter_category == "ID") and (not search_text == ''):
met_set = db(db.Meteorite_table.id == search_text)
elif (filter_category == "Mass") and (not search_text == ''):
met_set = db(db.Meteorite_table.mass == search_text)
elif (filter_category == "Geo Location") and (not search_text == ''):
met_set = db(db.Meteorite_table.GeoLocation.contains(search_text))
elif (filter_category == "Name") and (not search_text == ''):
met_set = db(db.Meteorite_table.name.contains(search_text))
elif (filter_category == "Class") and (not search_text == ''):
met_set = db(db.Meteorite_table.class_met.contains(search_text))
elif (filter_category == "Year Seen") and (not search_text == ''):
met_set = db(db.Meteorite_table.year_seen == search_text)
else:
met_set = db(db.Meteorite_table)
count = met_set.count()
if session.met_sort == 'ID':
rows = met_set.select(orderby=db.Meteorite_table.id)
elif session.met_sort == 'Mass':
rows = met_set.select(orderby=db.Meteorite_table.mass)
elif session.met_sort == 'Geo Location':
rows = met_set.select(orderby=db.Meteorite_table.GeoLocation)
elif session.met_sort == 'Name':
rows = met_set.select(orderby=db.Meteorite_table.name)
elif session.met_sort == 'Class':
rows = met_set.select(orderby=db.Meteorite_table.class_met)
elif session.met_sort == 'Year Seen':
rows = met_set.select(orderby=db.Meteorite_table.year_seen)
else:
rows = met_set.select(orderby=db.Meteorite_table.id)
return locals()
def next_page():
session.met_index = session.met_index + items_per_page
redirect(URL('meteorite'))
def prev_page():
session.met_index = session.met_index - items_per_page
redirect(URL('meteorite'))
def set_filter():
session.met_index = 0
if (request.vars.filter == 'ID' or request.vars.filter == 'Mass' or request.vars.filter == 'Year Seen') and request.vars.search_text != '':
if request.vars.search_text.isdigit():
session.met_filter_cat = request.vars.filter
session.met_filter_text = request.vars.search_text
else:
session.met_display_flash = 'Yes'
else:
session.met_filter_cat = request.vars.filter
session.met_filter_text = request.vars.search_text
redirect(URL('meteorite'))
def reset_filter():
session.met_index = 0
session.met_filter_cat = None
session.met_filter_text = ''
redirect(URL('meteorite'))
def sort_met():
session.met_index = 0
if request.args(0) == 'A':
session.met_sort = 'ID'
elif request.args(0) == 'B':
session.met_sort = 'Mass'
elif request.args(0) == 'C':
session.met_sort = 'Geo Location'
elif request.args(0) == 'D':
session.met_sort = 'Name'
elif request.args(0) == 'E':
session.met_sort = 'Class'
elif request.args(0) == 'F':
session.met_sort = 'Year Seen'
redirect(URL('meteorite'))
| 3,275 | 0 | 138 |
97a694e5b108ae18df8c6c74128996964290ff48 | 7,413 | py | Python | checks/drc_checks/magic/magic_gds_drc_check.py | efabless/google_mpw_precheck | b30e779ab0ff90e37e5c56795066a30804816e13 | [
"Apache-2.0"
] | 17 | 2020-11-13T12:24:28.000Z | 2021-09-03T08:47:15.000Z | checks/drc_checks/magic/magic_gds_drc_check.py | efabless/google_mpw_precheck | b30e779ab0ff90e37e5c56795066a30804816e13 | [
"Apache-2.0"
] | 22 | 2020-11-06T16:23:47.000Z | 2021-07-09T16:19:25.000Z | checks/drc_checks/magic/magic_gds_drc_check.py | efabless/google_mpw_precheck | b30e779ab0ff90e37e5c56795066a30804816e13 | [
"Apache-2.0"
] | 14 | 2020-11-13T12:24:33.000Z | 2021-08-13T03:14:30.000Z | import argparse
import gzip
import logging
import os
import re
import subprocess
from pathlib import Path
try:
from checks.drc_checks.magic.converters import magic_drc_to_rdb, magic_drc_to_tcl, magic_drc_to_tr_drc, tr2klayout
except ImportError:
from converters import magic_drc_to_rdb, magic_drc_to_tcl, magic_drc_to_tr_drc, tr2klayout
def violations_count(drc_content):
"""
design name
violation message
list of violations
Total Count:
"""
split_line = '----------------------------------------'
drc_sections = drc_content.split(split_line)
if len(drc_sections) == 2:
return 0
else:
vio_dict = dict()
for i in range(1, len(drc_sections) - 1, 2):
vio_dict[drc_sections[i]] = len(drc_sections[i + 1].split("\n")) - 2
count = 0
for key in vio_dict:
val = vio_dict[key]
count += val
logging.error(f"Violation Message \"{str(key.strip())} \"found {str(val)} Times.")
return count
if __name__ == "__main__":
logging.basicConfig(level=logging.DEBUG, format=f"%(asctime)s | %(levelname)-7s | %(message)s", datefmt='%d-%b-%Y %H:%M:%S')
parser = argparse.ArgumentParser(description='Runs magic and klayout drc checks on a given GDS.')
parser.add_argument('--gds_input_file_path', '-g', required=True, help='GDS File to apply DRC checks on')
parser.add_argument('--output_directory', '-o', required=True, help='Output Directory')
parser.add_argument('--pdk_root', '-p', required=True, help='PDK Path')
parser.add_argument('--design_name', '-d', required=True, help='Design Name')
args = parser.parse_args()
gds_input_file_path = Path(args.gds_input_file_path)
output_directory = Path(args.output_directory)
pdk_root = Path(args.pdk_root)
design_name = args.design_name
if gds_input_file_path.exists() and gds_input_file_path.suffix == ".gds":
if output_directory.exists() and output_directory.is_dir():
if magic_gds_drc_check(gds_input_file_path, args.design_name, pdk_root, output_directory):
logging.info("Magic GDS DRC Clean")
else:
logging.info("Magic GDS DRC Dirty")
else:
logging.error(f"{output_directory} is not valid")
else:
logging.error(f"{gds_input_file_path} is not valid")
| 47.825806 | 140 | 0.70835 | import argparse
import gzip
import logging
import os
import re
import subprocess
from pathlib import Path
try:
from checks.drc_checks.magic.converters import magic_drc_to_rdb, magic_drc_to_tcl, magic_drc_to_tr_drc, tr2klayout
except ImportError:
from converters import magic_drc_to_rdb, magic_drc_to_tcl, magic_drc_to_tr_drc, tr2klayout
def check_if_binary_has(word, filename):
f = gzip.open(filename, 'r', errors='ignore') if 'gz' in str(filename) else open(filename, errors='ignore')
content = f.read()
f.close()
return int(bool(re.search(word, content)))
def is_valid_magic_drc_report(drc_content):
split_line = '----------------------------------------'
drc_sections = drc_content.split(split_line)
return len(drc_sections) >= 2
def violations_count(drc_content):
"""
design name
violation message
list of violations
Total Count:
"""
split_line = '----------------------------------------'
drc_sections = drc_content.split(split_line)
if len(drc_sections) == 2:
return 0
else:
vio_dict = dict()
for i in range(1, len(drc_sections) - 1, 2):
vio_dict[drc_sections[i]] = len(drc_sections[i + 1].split("\n")) - 2
count = 0
for key in vio_dict:
val = vio_dict[key]
count += val
logging.error(f"Violation Message \"{str(key.strip())} \"found {str(val)} Times.")
return count
def magic_gds_drc_check(gds_ut_path, design_name, pdk_root, output_directory):
parent_directory = Path(__file__).parent
logs_directory = output_directory / 'logs'
outputs_directory = output_directory / 'outputs'
reports_directory = outputs_directory / 'reports'
design_magic_drc_file_path = reports_directory / f"magic_drc_check.drc.report"
installed_sram_modules_names = []
sram_maglef_files_generator = Path(pdk_root / "sky130A" / "libs.ref" / "sky130_sram_macros" / "maglef").glob('*.mag')
for installed_sram in sram_maglef_files_generator:
installed_sram_modules_names.append(installed_sram.stem)
sram_modules_in_gds = []
for sram in installed_sram_modules_names:
if check_if_binary_has(sram, gds_ut_path):
sram_modules_in_gds.append(sram) # only the name of the module
magicrc_file_path = parent_directory.parent.parent / 'tech-files' / 'sky130A.magicrc'
magic_drc_tcl_path = parent_directory / 'magic_drc_check.tcl'
design_magic_drc_mag_file_path = outputs_directory / f"{design_name}.magic.drc.mag"
esd_fet = 'sky130_fd_io__signal_5_sym_hv_local_5term'
# cli arguments for a tcl script has to be a string
has_sram_as_str = str(check_if_binary_has('sram', gds_ut_path))
has_esd_fet_as_str = str(check_if_binary_has('sky130_fd_io__signal_5_sym_hv_local_5term', gds_ut_path))
# TODO(ahmad.nofal@efabless.com): This should be a command line argument
os.environ['MAGTYPE'] = 'mag'
run_magic_drc_check_cmd = ['magic', '-noconsole', '-dnull', '-rcfile', magicrc_file_path, magic_drc_tcl_path, gds_ut_path,
design_name, pdk_root, design_magic_drc_file_path, design_magic_drc_mag_file_path,
' '.join(sram_modules_in_gds), esd_fet, has_sram_as_str, has_esd_fet_as_str]
magic_drc_log_file_path = logs_directory / 'magic_drc_check.log'
with open(magic_drc_log_file_path, 'w') as magic_drc_log:
process = subprocess.run(run_magic_drc_check_cmd, stderr=magic_drc_log, stdout=magic_drc_log)
if not design_magic_drc_file_path.exists():
logging.error(f"No {design_magic_drc_file_path} file produced by the drc check")
return False
drc_violations_count = process.returncode
if drc_violations_count != 0:
drc_violations_count = (drc_violations_count + 3) / 4 # TODO(ahmad.nofal@efabless.com): Check validity
magic_drc_total_file_path = logs_directory / 'magic_drc_check.total'
with open(magic_drc_total_file_path, 'w') as magic_drc_total:
magic_drc_total.write(str(drc_violations_count))
# Write all different formats for drc violations reports using converters
try:
design_magic_rdb_file_path = reports_directory / f"magic_drc_check.rdb"
magic_drc_to_rdb.convert(design_magic_drc_file_path, design_magic_rdb_file_path)
design_magic_drc_tcl_file_path = reports_directory / f"magic_drc_check.tcl"
magic_drc_to_tcl.convert(design_magic_drc_file_path, design_magic_drc_tcl_file_path)
design_tr_drc_file_path = reports_directory / f"magic_drc_check.tr"
magic_drc_to_tr_drc.convert(design_magic_drc_file_path, design_tr_drc_file_path)
design_klayout_xml_file_path = reports_directory / f"magic_drc_check.xml"
tr2klayout.convert(design_tr_drc_file_path, design_klayout_xml_file_path, design_name)
except Exception as e:
logging.warning(f"Error generating DRC violation report(s), the full set of Magic DRC reports will not be generated. {e}")
with open(magic_drc_log_file_path) as magic_drc_log:
log_content = magic_drc_log.read()
if log_content.find("was used but not defined.") != -1:
logging.error(f"The GDS is not valid/corrupt contains cells that are used but not defined. Please check: {magic_drc_log_file_path}")
return False
if log_content.find("Unrecognized layer (type) name \"<<<<<\"") != -1:
logging.error(f"The GDS is not valid/corrupt contains cells. Please check: {magic_drc_log_file_path}")
return False
with open(design_magic_drc_file_path) as magic_drc_report:
drc_content = magic_drc_report.read()
if not is_valid_magic_drc_report(drc_content):
logging.error(f"Incomplete DRC Report. Maybe you ran out of RAM. Please check: {magic_drc_log_file_path}")
return False
else:
count = violations_count(drc_content)
logging.info(f"{count} DRC violations") if count == 0 else logging.error(f"{count} DRC violations")
return True if count == 0 else False
if __name__ == "__main__":
logging.basicConfig(level=logging.DEBUG, format=f"%(asctime)s | %(levelname)-7s | %(message)s", datefmt='%d-%b-%Y %H:%M:%S')
parser = argparse.ArgumentParser(description='Runs magic and klayout drc checks on a given GDS.')
parser.add_argument('--gds_input_file_path', '-g', required=True, help='GDS File to apply DRC checks on')
parser.add_argument('--output_directory', '-o', required=True, help='Output Directory')
parser.add_argument('--pdk_root', '-p', required=True, help='PDK Path')
parser.add_argument('--design_name', '-d', required=True, help='Design Name')
args = parser.parse_args()
gds_input_file_path = Path(args.gds_input_file_path)
output_directory = Path(args.output_directory)
pdk_root = Path(args.pdk_root)
design_name = args.design_name
if gds_input_file_path.exists() and gds_input_file_path.suffix == ".gds":
if output_directory.exists() and output_directory.is_dir():
if magic_gds_drc_check(gds_input_file_path, args.design_name, pdk_root, output_directory):
logging.info("Magic GDS DRC Clean")
else:
logging.info("Magic GDS DRC Dirty")
else:
logging.error(f"{output_directory} is not valid")
else:
logging.error(f"{gds_input_file_path} is not valid")
| 4,952 | 0 | 69 |