content stringlengths 22 815k | id int64 0 4.91M |
|---|---|
def is_anaconda_5():
"""
anaconda 5 has conda version 4.4.0 or greater... obviously :/
"""
vers = conda_version()
if not vers:
return False
ma = vers['major'] >= 4
mi = vers['minor'] >= 4
return ma and mi | 5,328,800 |
def mkdir(path):
"""create a single empty directory if it didn't exist
Parameters:
path (str) -- a single directory path
"""
if not os.path.exists(path):
os.makedirs(path) | 5,328,801 |
def relay_array_map(c, fn, *array):
"""Implementation of array_map for Relay."""
assert fn.is_constant(Primitive)
fn = fn.value
if fn is P.switch:
rfn = relay.where
else:
rfn = SIMPLE_MAP[fn]
return rfn(*[c.ref(a) for a in array]) | 5,328,802 |
def output_to_csv(results, results_file_name):
""" Output a line to a CSV file. """
with open(results_file_name, mode='a') as results_file:
writer = csv.writer(results_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
writer.writerow(results) | 5,328,803 |
def test_StructlogUtils_graypy_structlog_processor(
structlog_utils,
event_kwargs
):
"""
Tests if event dictionary can be converted to a Graylog handler compatible
format, to be passed as arguments/keyword arguments
# C1: Check that the position-based arguments are formatted correct
# c2: Check that keyword-based arguments are formatted correctly
"""
args, kwargs = structlog_utils.graypy_structlog_processor(
logger,
method,
event_dict=event_kwargs
)
# C1
assert args == (event_kwargs.get('event', ''),)
# C2
cached_event_dict = kwargs.get('extra')
for key, value in cached_event_dict.items():
assert isinstance(cached_event_dict.get('pid'), str)
assert isinstance(cached_event_dict.get('process_name'), str)
assert isinstance(cached_event_dict.get('thread_name'), str)
assert isinstance(cached_event_dict.get('file'), str)
assert isinstance(cached_event_dict.get('function'), str) | 5,328,804 |
def select_sort(alist):
"""
选择排序:从无序队列中选择最小的放到前面去,有序部分和无序部分
首先认为最小的元素就是第一个元素,不断比较更新这个min的数值
"""
for j in range(0, len(alist)-1):
min_index = j
# 因为j要和剩余的部分作比较也就是从j+1到最后一个元素,n的索引为n-1,闭区间所以到n
for i in range(j+1, len(alist)):
if alist[min_index] > alist[i]:
min_index = i
alist[j], alist[min_index] = alist[min_index], alist[j]
"""
j从0开始比较到倒数第二个元素,总共9个元素,最后让8和8+1比较,所以range:9-1=8,闭区间只能到7
内层循环中让j和j+1比较,更新min_index和剩余无序部分的数值
时间复杂度为O(n^2)
"""
"""
排序中的稳定性,如果列表中两个元素相等,排序如果可能出现这两个相等元素的位置不固定的话,则为不稳定排序
[1, 3, 2, 2]
""" | 5,328,805 |
def get_multi_objects_dict(*args, params=None):
"""Convertir un array de objetos en diccionarios"""
object_group = []
result = {}
for data_object in args:
if params is not None and params['fields']:
fields = params['fields']
else:
fields = [attr for attr in data_object.__dict__.keys() if not attr.startswith('_')]
row = {}
for field in fields:
value = getattr(data_object, field)
if field.startswith('date') and value is not None:
row.update({field: value.strftime('%Y-%m-%d %H:%M:%S')})
else:
row.update({field: parse_value(value)})
object_group.append(row)
for data_object in object_group:
result.update(**data_object)
return result | 5,328,806 |
def assert_never_inf(tensor):
"""Make sure there are no Inf values in the given tensor.
Parameters
----------
tensor : torch.tensor
input tensor
Raises
------
InfTensorException
If one or more Inf values occur in the given tensor
"""
try:
assert torch.isfinite(tensor).byte().any()
except AssertionError:
raise InfTensorException("There was an Inf value in tensor") | 5,328,807 |
def aspuru(weights, x, wires, n_layers=1):
""" Circuits ID = 5 in arXiv:1905.10876 paper
:param weights: trainable weights
:param x: input, len(x) is <= len(wires)
:param wires: list of wires on which the feature map acts
:param n_layers: number of repetitions of the first layer
"""
data_size = len(x)
n_wires = len(wires)
weights_each_layer = (n_wires * (n_wires + 3) - 2 * data_size)
n_weights_needed = weights_each_layer * n_layers
if len(x) > n_wires:
raise ValueError("Feat map can encode at most {} features (which is the "
"number of wires), got {}.".format(n_wires, len(x)))
if len(weights) != n_weights_needed:
raise ValueError("Feat map needs {} weights, got {}."
.format(n_weights_needed, len(weights)))
for l in range(n_layers):
# inputs
for i in range(data_size):
if i < len(x):
qml.RX(x[i], wires=wires[i])
for i in range(len(x), n_wires):
qml.RX(weights[weights_each_layer * l + i -data_size], wires=wires[i])
for i in range(n_wires):
qml.RZ(weights[weights_each_layer * l + n_wires - data_size + i], wires=wires[i])
for i in reversed(range(n_wires)):
for j in reversed(range(n_wires)):
if j == i:
continue
qml.CRZ(weights[weights_each_layer * l + 2 * n_wires - data_size + i * (n_wires - 1) + j],
wires=[wires[i], wires[j]])
for i in range(data_size):
qml.RX(x[i], wires=wires[i])
for i in range(len(x), n_wires):
qml.RX(weights[weights_each_layer * l + n_wires * (n_wires + 1) - data_size + i],
wires=wires[i])
for i in range(n_wires):
qml.RZ(weights[weights_each_layer * l + n_wires * (n_wires + 2) - 2 * data_size + i], wires=wires[i]) | 5,328,808 |
def test_function_ping_now_bytes():
"""
Picking the 'ping_now' function and return as bytes.
"""
from btu.manual_tests import ping_now
queue_args = {
"site": frappe.local.site,
"user": frappe.session.user,
"method": ping_now,
"event": None,
"job_name": "ping_now",
"is_async": True, # always true; we want to run Tasks via the Redis Queue, not on the Web Server.
"kwargs": {} # if 'ping_now' had keyword arguments, we'd set them here.
}
new_sanchez = Sanchez()
new_sanchez.build_internals(func=execute_job, _args=None, _kwargs=queue_args)
http_result: bytes = new_sanchez.get_serialized_rq_job()
return http_result | 5,328,809 |
def test_missing_data():
"""Test loading and parsing invalid json file; missing data fields."""
file = open('tests/json/buienradar_missing.json', 'r')
data = file.read()
file.close()
latitude = 51.50
longitude = 6.20
result = parse_data(data, None, latitude, longitude, usexml=False)
print(result)
# "Missing key(s) in br data: stationname "
assert (result[SUCCESS] and # noqa: ignore=W504
result[MESSAGE] == None and
result[DATA][STATIONNAME] == None)
latitude = 52.07
longitude = 5.88
result = parse_data(data, None, latitude, longitude, usexml=False)
print(result)
# "Missing key(s) in br data: feeltemperature "
assert (result[SUCCESS] and # noqa: ignore=W504
result[MESSAGE] == None and
result[DATA][FEELTEMPERATURE] == None
)
latitude = 52.65
longitude = 4.98
result = parse_data(data, None, latitude, longitude, usexml=False)
print(result)
# "Missing key(s) in br data: humidity "
assert (result[SUCCESS] and # noqa: ignore=W504
result[MESSAGE] == None and
result[DATA][HUMIDITY] == None
)
latitude = 52.10
longitude = 5.18
result = parse_data(data, None, latitude, longitude, usexml=False)
print(result)
# "Missing key(s) in br data: groundtemperature "
assert (result[SUCCESS] and # noqa: ignore=W504
result[MESSAGE] == None and
result[DATA][GROUNDTEMP] == None
)
latitude = 52.92
longitude = 4.78
result = parse_data(data, None, latitude, longitude, usexml=False)
print(result)
# "Missing key(s) in br data: temperature "
assert (result[SUCCESS] and # noqa: ignore=W504
result[MESSAGE] == None and
result[DATA][TEMPERATURE] == None
)
latitude = 51.45
longitude = 5.42
result = parse_data(data, None, latitude, longitude, usexml=False)
print(result)
# "Missing key(s) in br data: windspeed "
assert (result[SUCCESS] and # noqa: ignore=W504
result[MESSAGE] == None and
result[DATA][WINDSPEED] == None
)
latitude = 51.20
longitude = 5.77
result = parse_data(data, None, latitude, longitude, usexml=False)
print(result)
# "Missing key(s) in br data: windspeedBft "
assert (result[SUCCESS] and # noqa: ignore=W504
result[MESSAGE] == None and
result[DATA][WINDFORCE] == None
)
latitude = 52.00
longitude = 3.28
result = parse_data(data, None, latitude, longitude, usexml=False)
print(result)
# "Missing key(s) in br data: winddirectiondegrees "
assert (result[SUCCESS] and # noqa: ignore=W504
result[MESSAGE] == None and
result[DATA][WINDAZIMUTH] == None
)
latitude = 51.57
longitude = 4.93
result = parse_data(data, None, latitude, longitude, usexml=False)
print(result)
# "Missing key(s) in br data: winddirection "
assert (result[SUCCESS] and # noqa: ignore=W504
result[MESSAGE] is None and
result[DATA][WINDDIRECTION] == None
)
latitude = 52.07
longitude = 6.65
result = parse_data(data, None, latitude, longitude, usexml=False)
print(result)
# expectedmsg = "Missing key(s) in br data: "
assert (result[SUCCESS] and
result[MESSAGE] is None and
result[DATA][PRESSURE] is None
)
latitude = 52.43
longitude = 6.27
result = parse_data(data, None, latitude, longitude, usexml=False)
print(result)
# "Missing key(s) in br data: windgusts "
assert (result[SUCCESS] and # noqa: ignore=W504
result[MESSAGE] is None and
result[DATA][WINDGUST] is None
)
latitude = 51.87
longitude = 5.15
result = parse_data(data, None, latitude, longitude, usexml=False)
print(result)
# "Missing key(s) in br data: precipitation "
assert (result[SUCCESS] and # noqa: ignore=W504
result[MESSAGE] is None and
result[DATA][PRECIPITATION] is None
)
latitude = 51.98
longitude = 4.10
result = parse_data(data, None, latitude, longitude, usexml=False)
print(result)
# "Missing key(s) in br data: sunpower "
assert (result[SUCCESS] and # noqa: ignore=W504
result[MESSAGE] is None and
result[DATA][IRRADIANCE] is None
) | 5,328,810 |
def post_equals_form(post, json_response):
"""
Checks if the posts object is equal to the json object
"""
if post.title != json_response['title']:
return False
if post.deadline != json_response['deadline']:
return False
if post.details != json_response['details']:
return False
if post.category != json_response['category']:
return False
if post.preferred_contact != json_response['preferred_contact']:
return False
if post.zip_code != json_response['zip_code']:
return False
return True | 5,328,811 |
def get_shield(plugin: str) -> dict:
"""
Generate shield json for napari plugin.
If the package is not a valid plugin, display 'plugin not found' instead.
:param plugin: name of the plugin
:return: shield json used in shields.io.
"""
shield_schema = {
"color": "#0074B8",
"label": "napari hub",
"logoSvg": "<svg width=\"512\" height=\"512\" viewBox=\"0 0 512 512\" fill=\"none\" "
"xmlns=\"http://www.w3.org/2000/svg\"><circle cx=\"256.036\" cy=\"256\" "
"r=\"85.3333\" fill=\"white\" stroke=\"white\" stroke-width=\"56.8889\"/>"
"<circle cx=\"256.036\" cy=\"42.6667\" r=\"42.6667\" fill=\"white\"/>"
"<circle cx=\"256.036\" cy=\"469.333\" r=\"42.6667\" fill=\"white\"/>"
"<path d=\"M256.036 28.4445L256.036 142.222\" stroke=\"white\" "
"stroke-width=\"56.8889\" stroke-linecap=\"round\" stroke-linejoin=\"round\"/>"
"<path d=\"M256.036 369.778L256.036 483.556\" stroke=\"white\" stroke-width=\"56.8889\" "
"stroke-linecap=\"round\" stroke-linejoin=\"round\"/>"
"<circle cx=\"71.2838\" cy=\"149.333\" r=\"42.6667\" transform=\"rotate(-60 71.2838 149.333)\" "
"fill=\"white\"/><circle cx=\"440.788\" cy=\"362.667\" r=\"42.6667\" "
"transform=\"rotate(-60 440.788 362.667)\" fill=\"white\"/>"
"<path d=\"M58.967 142.222L157.501 199.111\" stroke=\"white\" stroke-width=\"56.8889\" "
"stroke-linecap=\"round\" stroke-linejoin=\"round\"/><path d=\"M354.57 312.889L453.105 369.778\" "
"stroke=\"white\" stroke-width=\"56.8889\" stroke-linecap=\"round\" stroke-linejoin=\"round\"/>"
"<circle cx=\"71.2838\" cy=\"362.667\" r=\"42.6667\" transform=\"rotate(-120 71.2838 362.667)\" "
"fill=\"white\"/><circle cx=\"440.788\" cy=\"149.333\" r=\"42.6667\" "
"transform=\"rotate(-120 440.788 149.333)\" fill=\"white\"/>"
"<path d=\"M58.967 369.778L157.501 312.889\" stroke=\"white\" stroke-width=\"56.8889\" "
"stroke-linecap=\"round\" stroke-linejoin=\"round\"/><path d=\"M354.57 199.111L453.105 142.222\" "
"stroke=\"white\" stroke-width=\"56.8889\" stroke-linecap=\"round\" stroke-linejoin=\"round\"/>"
"</svg>",
"schemaVersion": 1,
"style": "flat-square"
}
plugins = get_valid_plugins()
if plugin not in plugins:
shield_schema['message'] = 'plugin not found'
else:
shield_schema['message'] = plugin
return shield_schema | 5,328,812 |
def check_auth(username, password):
"""This function is called to check if a username /
password combination is valid.
"""
return username == expectedUN and password == expectedPW | 5,328,813 |
def Lambda(t, y):
"""Original Arnett 1982 dimensionless bolometric light curve expression
Calculates the bolometric light curve due to radioactive decay of 56Ni,
assuming no other energy input.
t: time since explosion in days
y: Arnett 1982 light curve width parameter (typical 0.7 < y < 1.4)
Returns the dimensionless light curve shape function.
"""
tm = 2*tNi*y
a, x = [ ], np.atleast_1d(t/tm)
ig = lambda z: 2*z * np.exp(-2*z*y + z**2)
for xi in x.ravel(): a.append(np.exp(-xi**2) * quad(ig, 0, xi)[0])
return np.array(a) | 5,328,814 |
def vectordump():
"""Dump all vector data to ``.csv`` files.
"""
client = MongoClient(mongoUri, tz_aware=True)
db = client.fisb
currentPath = '.'
# Delete any existing .csv files so we don't get confused as
# to what is new and what is old.
for x in OUTPUT_FILES:
csvPath = os.path.join(currentPath, x)
if os.path.isfile(csvPath):
os.remove(csvPath)
vec.dumpVectors(currentPath, db) | 5,328,815 |
def get_utcnow_time(format: str = None) -> str:
"""
Return string with current utc time in chosen format
Args:
format (str): format string. if None "%y%m%d.%H%M%S" will be used.
Returns:
str: formatted utc time string
"""
if format is None:
format = "%y%m%d.%H%M%S"
result = datetime.utcnow().strftime(format)
return result | 5,328,816 |
def test_search(script):
"""
End to end test of search command.
"""
output = script.pip('search', 'pip')
assert (
'The PyPA recommended tool for installing '
'Python packages.' in output.stdout
) | 5,328,817 |
def repair_branch(cmorph, cut, rmorph, rep, force=False):
"""Attempts to extend cut neurite using intact branch.
Args:
cmorph (treem.Morph): cut morphology.
cut (treem.Node): cut node, from cmorph.
rmorph (treem.Morph): repair morphology.
rep (treem.Node): undamaged branch start node, from rmorph.
force (bool): force repair if branch is too short.
Returns:
True if repaired.
"""
done = 0
cutsec = list(reversed(list(cut.section(reverse=True))))
repsec = list(rep.section())
cutlen = cmorph.length(cutsec)
replen = rmorph.length(repsec)
target = cut
if replen > cutlen:
for node in repsec[-1::-1]:
if rmorph.length(node.section()) > replen - cutlen:
break
source = node # pylint: disable=undefined-loop-variable
elif rep.breadth() > 1 or force:
source = rep
else:
source = None
if source:
tree = rmorph.copy(source)
scale_z = -1
scale_r = cmorph.radii(cutsec).mean() / rmorph.radii(repsec).mean()
tree.data[:, SWC.XYZR] *= np.array([1, 1, scale_z, scale_r])
u = np.mean(tree.data[:, SWC.XYZ], axis=0) - tree.root.coord()
v = target.coord() - cmorph.root.coord()
axis, angle = rotation(u, v)
tree.rotate(axis, angle)
shift = (target.coord() - tree.root.coord() +
target.coord() - target.parent.coord())
tree.translate(shift)
cmorph.graft(tree, target)
done = 1
return done | 5,328,818 |
async def test_create_read(http_client):
"""Create a new product and read it."""
name, description = "Some item name", "Some item description"
prod_id = await create_product(http_client, name=name, description=description)
read_resp = await http_client.get(f"/products/{prod_id}")
assert read_resp.status_code == 200
assert read_resp.json() == {"id": prod_id, "name": name, "description": description} | 5,328,819 |
def ValidateDisplayName(display_name):
"""Validates the display name."""
if display_name is not None and not display_name:
raise exceptions.InvalidArgumentException(
'--display-name',
'Display name can not be empty.') | 5,328,820 |
def parse_patient_dob(dob):
"""
Parse date string and sanity check.
expects date string in YYYYMMDD format
Parameters
----------
dob : str
dob as string YYYYMMDD
Returns
-------
dob : datetime object
"""
try:
dob = datetime.datetime.strptime(dob, '%Y%m%d')
if dob < datetime.datetime(1900, 1, 1):
raise ValueError
except (ValueError, TypeError):
dob = None
log.debug(dob)
return dob | 5,328,821 |
def us2cycles(us):
"""
Converts microseconds to integer number of tProc clock cycles.
:param cycles: Number of microseconds
:type cycles: float
:return: Number of tProc clock cycles
:rtype: int
"""
return int(us*fs_proc) | 5,328,822 |
def test_DCAFFT_error(noise_dataset):
"""Test that a DCAFFT raises an error for d>1.
"""
with pytest.raises(ValueError):
X = noise_dataset
model = DCAFFT(d=2, T=10)
model.fit(X) | 5,328,823 |
def date_range(begin_date, end_date):
"""
获取一个时间区间的list
"""
dates = []
dt = datetime.datetime.strptime(begin_date, "%Y-%m-%d")
date = begin_date[:]
while date <= end_date:
dates.append(date)
dt = dt + datetime.timedelta(1)
date = dt.strftime("%Y-%m-%d")
return dates | 5,328,824 |
def scalar(name):
"""
Create a scalar variable with the corresponding name. The 'name' will be during code generation, so should match the
variable name used in the C++ code.
"""
tname = name
return symbols(tname) | 5,328,825 |
def add(number1, number2):
"""
This functions adds two numbers
Arguments:
number1 : first number to be passed
number2 : second number to be passed
Returns: number1*number2
the result of two numbers
Examples:
>>> add(0,0)
0
>>> add(1,1)
2
>>> add(1.1,2.2)
3.3000000000000003
"""
return number1 + number2 | 5,328,826 |
def warn_and_skip(message):
"""
Prints warning and skips the test
"""
warnings.warn(message)
pytest.skip(message) | 5,328,827 |
def calculate_timeout(start_point, end_point, planner):
"""
Calucaltes the time limit between start_point and end_point considering a fixed speed of 5 km/hr.
Args:
start_point: initial position
end_point: target_position
planner: to get the shortest part between start_point and end_point
Returns:
time limit considering a fixed speed of 5 km/hr
"""
path_distance = planner.get_shortest_path_distance(
[start_point.location.x, start_point.location.y, 0.22], [
start_point.orientation.x, start_point.orientation.y, 0.22], [
end_point.location.x, end_point.location.y, end_point.location.z], [
end_point.orientation.x, end_point.orientation.y, end_point.orientation.z])
return ((path_distance / 1000.0) / 5.0) * 3600.0 + 10.0 | 5,328,828 |
def overview(request):
"""Returns the overview for a daterange.
GET paramaters:
* daterange - 7d, 1m, 3m, 6m or 1y (default: 1y)
Returns an overview dict with a count for all action types.
"""
form = OverviewAPIForm(request.GET)
if not form.is_valid():
return {'success': False, 'errors': form.errors}
daterange = form.cleaned_data.get('daterange') or '1y'
mgr = KarmaManager()
overview = {}
for t in KarmaManager.action_types.keys():
overview[t] = mgr.count(daterange, type=t)
# TODO: Maybe have a karma action not assigned to a user for this?
num_days = KarmaManager.date_ranges[daterange]
start_day = date.today() - timedelta(days=num_days)
overview['question'] = Question.objects.filter(
created__gt=start_day).count()
return {
'success': True,
'overview': overview} | 5,328,829 |
def func_parallel(func, list_inputs, leave_cpu_num=1):
"""
:param func: func(list_inputs[i])
:param list_inputs: each element is the input of func
:param leave_cpu_num: num of cpu that not use
:return: [return_of_func(list_inputs[0]), return_of_func(list_inputs[1]), ...]
"""
cpu_cores = mp.cpu_count() - leave_cpu_num
pool = mp.Pool(processes=cpu_cores)
list_outputs = pool.map(func, list_inputs)
pool.close()
return list_outputs | 5,328,830 |
def get_mean(jsondata):
"""Get average of list of items using numpy."""
if len(jsondata['results']) > 1:
return mean([float(price.get('price')) for price in jsondata['results'] if 'price' in price]) # key name from itunes
# [a.get('a') for a in alist if 'a' in a]
else:
return float(jsondata['results'][0]['price']) | 5,328,831 |
def evaluate_with_trajectory(
sc_dataset: SingleCellDataset,
n_samples: int,
trajectory_type: str,
trajectory_coef: Dict,
types: DeconvolutionDatatypeParametrization,
deconvolution_params: Dict,
n_iters=5_000,
):
"""Evaluate L1_error and measure fit time for fitting on a simulated dataset from a given trajectory
:param sc_dataset: SingleCellDataset for generated simulations from
:param n_samples: number of samples along the time axis to generate
:param trajectory_type: string indicating the trajectory type to which the `trajectory_coef` correspond
:param trajectory_coef: trajectory coefficients
:param types: DeconvolutionDatatypeParametrization identifying datatypes to use
:param deconvolution_params: Dictionary with deconvolution parameters
:param n_iters: Number of learning iterations for each execution
:return: Dictionary with results
"""
# Simulate bulk data
sim_res = simulate_data(
w_hat_gc=torch.Tensor(sc_dataset.w_hat_gc),
num_samples=n_samples,
trajectory_type=trajectory_type,
dirichlet_alpha=10.0,
trajectory_coef=trajectory_coef,
)
simulated_bulk = generate_anndata_from_sim(sim_res, sc_dataset)
ebov_simulated_dataset = DeconvolutionDataset(
types=types,
parametrization=DeconvolutionDatasetParametrization(
sc_anndata=sc_dataset.sc_anndata,
sc_celltype_col="Subclustering_reduced",
bulk_anndata=simulated_bulk,
bulk_time_col="time",
),
)
# Prepare deconvolution object
pseudo_time_reg_deconv_sim = TimeRegularizedDeconvolutionModel(
dataset=ebov_simulated_dataset, types=types, **deconvolution_params,
)
# Deconvolve
t_0 = time.perf_counter()
pseudo_time_reg_deconv_sim.fit_model(
n_iters=n_iters,
verbose=True,
log_frequency=1000,
keep_param_store_history=False,
)
t_1 = time.perf_counter()
# Calculate errors
errors = calculate_trajectory_prediction_error(sim_res, pseudo_time_reg_deconv_sim)
# Return
return {
"n_samples": n_samples,
"l1_error_norm": errors["L1_error_norm"],
"fit_time": t_1 - t_0,
} | 5,328,832 |
def main(argv=None):
"""
"""
if argv == None:
argv = sys.argv[1:]
try:
pdb_file = argv[0]
data_file = argv[1]
except IndexError:
err = "Incorrect number of arguments!\n\n%s\n\n" % __usage__
raise PerturbPdbError(err)
out = perturbPdb(pdb_file,data_file)
return "".join(out) | 5,328,833 |
def del_rw(action, name, exc):
""" Removes the readonly flag from a file and deletes it. Useful for shutil.rmtree """
os.chmod(name, stat.S_IWRITE)
os.remove(name) | 5,328,834 |
def plot_diffraction_1d(result, deg):
"""
Returns this result instance in PlotData1D representation.
:param deg: if False the phase is expressed in radians, if True in degrees.
"""
# Distinguish between the strings "phase in deg" and "phase in rad".
if deg:
phase_string = "Phase in deg"
else:
phase_string = "Phase in rad"
# Retrieve setup information.
info_dict = result.diffractionSetup().toDictionary()
info_dict["Bragg angle"] = str(result.braggAngle())
# Retrieve angles of the results.
angles_in_um = [i * 1e+6 for i in result.angleDeviations()]
# Define inner function to duplicate info for every plot.
def addPlotInfo(info_dict, energy, angles_in_um, data):
plot_data = PlotData1D(data[0], data[1], data[2])
plot_data.set_x(angles_in_um)
plot_data.set_y(data[3])
for key, value in info_dict.items():
plot_data.add_plot_info(key, value)
plot_data.add_plot_info("Energy", str(energy))
return plot_data
plots = []
for energy in result.energies():
# Intensity S polarization.
categories = []
s_intensity = ("Intensity - Polarization S",
"Angle deviation in urad",
"Intensity",
result.sIntensityByEnergy(energy))
plots.append(addPlotInfo(info_dict, energy, angles_in_um, s_intensity))
p_intensity = ("Intensity - Polarization P",
"Angle deviation in urad",
"Intensity",
result.pIntensityByEnergy(energy))
plots.append(addPlotInfo(info_dict, energy, angles_in_um, p_intensity))
intensity_difference = ("Intensity difference",
"Angle deviation in urad",
"Intensity",
result.differenceIntensityByEnergy(energy))
plots.append(addPlotInfo(info_dict, energy, angles_in_um, intensity_difference))
s_phase = ("Phase - Polarization S",
"Angle deviation in urad",
phase_string,
result.sPhaseByEnergy(energy, deg))
plots.append(addPlotInfo(info_dict, energy, angles_in_um, s_phase))
p_phase = ("Phase - Polarization P",
"Angle deviation in urad",
phase_string,
result.pPhaseByEnergy(energy, deg))
plots.append(addPlotInfo(info_dict, energy, angles_in_um, p_phase))
phase_difference = ("Phase difference",
"Angle deviation in urad",
phase_string,
result.differencePhaseByEnergy(energy, deg))
plots.append(addPlotInfo(info_dict, energy, angles_in_um, phase_difference))
return plots | 5,328,835 |
def prepare_go_environ():
"""Returns dict with environment variables to set to use Go toolset.
Installs or updates the toolset and vendored dependencies if necessary.
"""
bootstrap(LAYOUT, logging.INFO)
return get_go_environ(LAYOUT) | 5,328,836 |
def get_subnets(client, name='tag:project', values=[ec2_project_name,], dry=True):
"""
Get VPC(s) by tag (note: create_tags not working via client api, use cidr or object_id instead )
https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/ec2.html#EC2.Client.describe_subnets
"""
try:
return client.describe_subnets(Filters=[{'Name': name, 'Values': values},], DryRun=dry)
except Exception as err:
handle(err) | 5,328,837 |
def get_object_handler(s3_client, request_context, user_request):
"""
Handler for the GetObject Operation
:param s3_client: s3 client
:param request_context: GetObject request context
:param user_request: user request
:return: WriteGetObjectResponse
"""
# Validate user request and return error if invalid
requests_validation = validator.validate_request(user_request)
if not requests_validation.is_valid:
return error.write_error_response(s3_client, request_context, requests.codes.bad_request,
'InvalidRequest', requests_validation.error_msg)
# Get the original object from Amazon S3
s3_url = request_context["inputS3Url"]
request_header = get_request_header(user_request["headers"])
object_response = requests.get(s3_url, headers=request_header)
# Check if the get original object request from S3 is successful
if object_response.status_code != requests.codes.ok:
# For 304 Not Modified, Error Message dont need to be send
if object_response.status_code == requests.codes.not_modified:
return s3_client.write_get_object_response(
RequestRoute=request_context["outputRoute"],
RequestToken=request_context["outputToken"],
StatusCode=object_response.status_code,
)
return error.write_error_response_for_s3(s3_client,
request_context,
object_response)
# Transform the object
original_object = object_response.content
transformed_whole_object = transform.transform_object(original_object)
# Handle range or partNumber if present in the request
partial_object_response = apply_range_or_part_number(transformed_whole_object, user_request)
if partial_object_response.hasError:
return error.write_error_response(s3_client, request_context, requests.codes.bad_request,
'InvalidRequest', partial_object_response.error_msg)
transformed_object = partial_object_response.object
# Send the transformed object back to Amazon S3 Object Lambda
transformed_object_checksum = checksum.get_checksum(transformed_object)
return s3_client.write_get_object_response(RequestRoute=request_context["outputRoute"],
RequestToken=request_context["outputToken"],
Body=transformed_object,
Metadata={
'body-checksum-algorithm': transformed_object_checksum.algorithm,
'body-checksum-digest': transformed_object_checksum.digest
}) | 5,328,838 |
async def test_get_triggers_for_invalid_device_id(hass, device_reg, coap_wrapper):
"""Test error raised for invalid shelly device_id."""
assert coap_wrapper
config_entry = MockConfigEntry(domain=DOMAIN, data={})
config_entry.add_to_hass(hass)
invalid_device = device_reg.async_get_or_create(
config_entry_id=config_entry.entry_id,
connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
)
with pytest.raises(InvalidDeviceAutomationConfig):
await async_get_device_automations(hass, "trigger", invalid_device.id) | 5,328,839 |
def NormalizePath(path):
"""Returns a path normalized to how we write DEPS rules and compare paths."""
return os.path.normcase(path).replace(os.path.sep, posixpath.sep) | 5,328,840 |
def libritts(
target_dir: Pathlike,
):
"""LibriTTS data download."""
download_libritts(target_dir) | 5,328,841 |
def eval_imgs_output_dets(opt,
data_loader,
data_type,
result_f_name,
out_dir,
save_dir=None,
show_image=True):
"""
:param opt:
:param data_loader:
:param data_type:
:param result_f_name:
:param out_dir:
:param save_dir:
:param show_image:
:return:
"""
if save_dir:
mkdir_if_missing(save_dir)
if not os.path.isdir(out_dir):
os.makedirs(out_dir)
else:
shutil.rmtree(out_dir)
os.makedirs(out_dir)
# init tracker
tracker = JDETracker(opt, frame_rate=30)
timer = Timer()
results_dict = defaultdict(list)
frame_id = 0 # frame index(start from 0)
for path, img, img_0 in data_loader:
if frame_id % 30 == 0:
logger.info('Processing frame {} ({:.2f} fps)'
.format(frame_id, 1.0 / max(1e-5, timer.average_time)))
blob = torch.from_numpy(img).to(opt.device).unsqueeze(0)
# ----- run detection
timer.tic()
# update detection results
dets_dict = tracker.update_detection(blob, img_0)
timer.toc()
# -----
# plot detection results
if show_image or save_dir is not None:
online_im = vis.plot_detects(image=img_0,
dets_dict=dets_dict,
num_classes=opt.num_classes,
frame_id=frame_id,
fps=1.0 / max(1e-5, timer.average_time))
if frame_id > 0:
# 是否显示中间结果
if show_image:
cv2.imshow('online_im', online_im)
if save_dir is not None:
cv2.imwrite(os.path.join(save_dir, '{:05d}.jpg'.format(frame_id)), online_im)
# ----- 格式化并输出detection结果(txt)到指定目录
# 格式化
dets_list = format_dets_dict2dets_list(dets_dict, w=img_0.shape[1], h=img_0.shape[0])
# 输出label(txt)到指定目录
out_img_name = os.path.split(path)[-1]
out_f_name = out_img_name.replace('.jpg', '.txt')
out_f_path = out_dir + '/' + out_f_name
with open(out_f_path, 'w', encoding='utf-8') as w_h:
w_h.write('class prob x y w h total=' + str(len(dets_list)) + '\n')
for det in dets_list:
w_h.write('%d %f %f %f %f %f\n' % (det[0], det[1], det[2], det[3], det[4], det[5]))
print('{} written'.format(out_f_path))
# 处理完一帧, 更新frame_id
frame_id += 1
print('Total {:d} detection result output.\n'.format(frame_id))
# 写入最终结果save results
write_results_dict(result_f_name, results_dict, data_type)
# 返回结果
return frame_id, timer.average_time, timer.calls | 5,328,842 |
def decline_agreement(supplier_code):
"""Decline agreement (role=supplier)
---
tags:
- seller edit
parameters:
- name: supplier_code
in: path
type: number
required: true
responses:
200:
description: Agreement declined.
400:
description: Bad request.
403:
description: Unauthorised to decline agreement.
404:
description: Supplier not found.
500:
description: Unexpected error.
"""
if current_user.supplier_code != supplier_code:
return forbidden('Unauthorised to decline agreement')
try:
seller_edit_business.decline_agreement({
'supplier_code': current_user.supplier_code,
'email_address': current_user.email_address
})
except NotFoundError as nfe:
not_found(str(nfe))
except DeletedError as de:
abort(str(de))
except UnauthorisedError as ue:
abort(str(ue))
return Response(status=200) | 5,328,843 |
def load_model(model, model_path):
"""
Load model from saved weights.
"""
if hasattr(model, "module"):
model.module.load_state_dict(torch.load(model_path, map_location="cpu"), strict=False)
else:
model.load_state_dict(torch.load(model_path, map_location="cpu"), strict=False)
return model | 5,328,844 |
def push_to_drive_as_child(drive, local_meta, filename, parent_id):
"""
Upload an image to Google Drive and store drive file id in queue. Concurrently executed by many threadpool workers in parallel.
Args:
drive (GoogleDrive object): [description]
local_meta (pandas.DataFrame): Pandas dataframe of metadata
filename (str): Filename of file that is being uploaded
parent_id (str): Google drive Id of the folder that the image is being uploaded to
#//q (queue.Queue): Queue of [row, id] pairs of uploaded images
"""
file = drive.CreateFile({'parents': [{'id': parent_id}]})
file.SetContentFile(filename)
file.Upload()
#id = file["id"]
#temp = local_meta.index[local_meta["File"]==filename].tolist()
# Add drive file id to meta_data csv iff metadata has been correctly preprocessed for upload
#if len(temp) != 1:
# print("Exiting, input .csv not properly formatted")
# sys.exit() # Terminate all execution
#row = temp[0]
#q.put([row, id]) | 5,328,845 |
def get_file_size(filepath: str):
"""
Not exactly sure how os.stat or os.path.getsize work, but they seem to get the total allocated size of the file and
return that while the file is still copying. What we want, is the actual file size written to disk during copying.
With standard Windows file copying, we can just try open/close the file, and if that succeeds, the file is finished.
With Kongsberg systems writing to disk, we can actually open and read the .all file as it copies, so the try/except is
not good enough. This function will find the length of the actual readable data on disk.
Parameters
----------
filepath
file path to a file being written
Returns
-------
int
file size in bytes
"""
with open(filepath, "r") as file:
# move pointer to the end of the file
file.seek(0, 2)
# retrieve the current position of the pointer
# this will be the file's size in bytes
size = file.tell()
return size | 5,328,846 |
def get_aws_regions_from_file(region_file):
"""
Return the list of region names read from region_file.
The format of region_file is as follows:
{
"regions": [
"cn-north-1",
"cn-northwest-1"
]
}
"""
with open(region_file) as r_file:
region_data = json.load(r_file)
return sorted(r for r in region_data.get("regions")) | 5,328,847 |
def item_pack():
""" RESTful CRUD controller """
s3db.configure("supply_item_pack",
listadd = False,
)
return s3_rest_controller() | 5,328,848 |
def add_config_vars_to_argparse(args):
"""
Import all defined config vars into `args`, for parsing command line.
:param args: A container for argparse vars
:type args: argparse.ArgumentParser or argparse._ArgumentGroup
:return:
"""
global _groups
for group_name, group in _groups.items():
for key in group:
obj = group._var_object(key)
args.add_argument(f"--{group_name}.{key}", type=type(obj.default),
default=obj.default, help=obj.description) | 5,328,849 |
def inv_cipher(rkey, ct, Nk=4):
"""AES decryption cipher."""
assert Nk in {4, 6, 8}
Nr = Nk + 6
rkey = rkey.reshape(4*(Nr+1), 32)
ct = ct.reshape(128)
# first round
state = add_round_key(ct, rkey[4*Nr:4*(Nr+1)])
for i in range(Nr-1, 0, -1):
state = inv_shift_rows(state)
state = inv_sub_bytes(state)
state = add_round_key(state, rkey[4*i:4*(i+1)])
state = inv_mix_columns(state)
# final round
state = inv_shift_rows(state)
state = inv_sub_bytes(state)
state = add_round_key(state, rkey[0:4])
return state | 5,328,850 |
def get_channel_output(channel_id: Optional[pulumi.Input[str]] = None,
project: Optional[pulumi.Input[Optional[str]]] = None,
site_id: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetChannelResult]:
"""
Retrieves information for the specified channel of the specified site.
"""
... | 5,328,851 |
def get_version(module='spyder_terminal'):
"""Get version."""
with open(os.path.join(HERE, module, '__init__.py'), 'r') as f:
data = f.read()
lines = data.split('\n')
for line in lines:
if line.startswith('VERSION_INFO'):
version_tuple = ast.literal_eval(line.split('=')[-1].strip())
version = '.'.join(map(str, version_tuple))
break
return version | 5,328,852 |
def _ptrarray_to_list(ptrarray):
"""Converts a ptr_array structure from SimpLL into a Python list."""
result = []
for i in range(0, ptrarray.len):
result.append(ptrarray.arr[i])
lib.freePointerArray(ptrarray)
return result | 5,328,853 |
def perform_modifications(statemachine,amount=1,possible_modifications=[]):
"""Starting point for modifications upon interfaces.
Performs modifications as specified in the peramaters.
N (amount) of modifications are selected at random from possible_modiifcations and then attempted
to be applied upon the provided statemachine.
Args:
statemachine (generator.StateMachine object): An original statemachine object created using "generator.py"
amount (int, optional): Amount of modifications to be applied upon interface. Defaults to 1.
possible_modifications (list, optional): This should be a list of modification function references (from modifications.py). Defaults to [].
Returns:
(generator.StateMachine object,list): Returns a tuple containing the modified statemachine object
and the ordered list of modifications applied.
This can be "False" if no modifications could succesfully be applied.
"""
# Have to keep track of states already modified to prevent conflicts within AR file creation.
global already_modified
statemachine = copy.deepcopy(statemachine)
done_modifications = []
# Loop over the amount of modifications to be selected.
for _ in range(amount):
# Select at random a modification from the function references list and store it
selected = random.choice(possible_modifications)
done_modifications.append(selected.__name__)
# If the selected modification is create:
# The state upon which this may be applied cannot be a begin state.
if selected == create:
selected_state = random.choice([x for x in statemachine.states if x != statemachine.BeginState and x not in already_modified]) #)
already_modified.append(selected_state)
if not selected(statemachine,selected_state):
print("Something went wrong") # Temporary debug
return False
# If the selected modification is delete or split, the selected transition must be an output on the server side.
if selected == delete or selected == split:
selected_state = random.choice([x.end for x in statemachine.transitions if x.output and x not in already_modified]) #)
already_modified.append(selected_state)
if not selected(statemachine,selected_state):
print("Something went wrong") # Temporary debug
return False
# If the selected modification is merge the same rules apply as per delete and split, also have to select if merge must happen
# on 2 or 3 outputs.
if selected == merge:
selected_state = random.sample([x.end for x in statemachine.transitions if x.output and x not in already_modified], random.choice([2,3])) #)
already_modified.append(selected_state)
if not merge(selected_state,statemachine):
print("something went wrong")
return False
# TODO: UPDATE NUMBERS
return (statemachine,done_modifications) | 5,328,854 |
def endpoint(path: str) -> Callable[[], Endpoint]:
"""Decorator for creating an
Arguments:
path: The path to the API endpoint (relative to the API's
``base_url``).
Returns:
The wrapper for the endpoint method.
"""
def wrapper(method):
return Endpoint(path, build_converter(method))
return wrapper | 5,328,855 |
def from_string_to_bytes(a):
"""
Based on project: https://github.com/chaeplin/dashmnb.
"""
return a if isinstance(a, bytes) else bytes(a, 'utf-8') | 5,328,856 |
def test_folder_detail_view_contains_folder_data(client, user, folder):
"""
Tests if FolderDetailView displays the correct folder object
:param client:
:param user:
:param folder: A Folder object created by the FolderFactory fixture
:return:
"""
client.force_login(user)
response = client.get(reverse("folders:detail", kwargs={"slug": folder.slug}))
assertContains(response, folder.name)
assertContains(response, folder.creator) | 5,328,857 |
def split(x, divider):
"""Split a string.
Parameters
----------
x : any
A str object to be split. Anything else is returned as is.
divider : str
Divider string.
"""
if isinstance(x, str):
return x.split(divider)
return x | 5,328,858 |
def file_info(file_name):
"""Displays the name and shape of all available series in file_name
Args:
file_name (str): path to file
"""
JVM().start()
meta_data = metadata(file_name)
with bf.ImageReader(file_name) as reader:
n_series = reader.rdr.getSeriesCount()
for s in range(n_series):
reader.rdr.setSeries(s)
shape = _get_TXCYX_shape(reader)
name = meta_data.image(s).get_Name()
print(f"Series {s}: {name}, {shape}") | 5,328,859 |
def despesa_update(despesa_id):
"""
Editar uma despesa.
Args:
despesa_id (int): ID da despesa a ser editada.
Lógica matemática é chamada de utils.py: adicionar_registro()
Returns:
Template renderizado: despesa.html
Redirecionamento: aplication.transacoes
"""
despesa = Despesa.query.get_or_404(despesa_id)
if despesa.user != current_user:
abort(403)
form = DespesaForm()
if form.validate_on_submit():
valor_antigo = despesa.valor
id_conta_bancaria_antiga = despesa.conta_bancaria.id
despesa.valor=form.valor.data
despesa.data_origem=form.data_origem.data
despesa.descricao=form.descricao.data
despesa.categoria_despesa=form.categoria.data
despesa.conta_bancaria=form.conta.data
if despesa.status:
adicionar_registro(id_conta_bancaria_antiga, form.conta.data.id,
valor_antigo, form.valor.data, 1)
db.session.commit()
flash('Sua despesa foi alterada.', 'success')
return redirect(url_for('aplication.transacoes', despesa_id=despesa.id))
elif request.method == 'GET':
form.valor.data=despesa.valor
form.data_origem.data=despesa.data_origem
form.descricao.data=despesa.descricao
form.categoria.data=despesa.categoria_despesa
form.conta.data=despesa.conta_bancaria
return render_template('despesa.html', title='Atualizar despesa',
legend='Atualizar despesa', form=form) | 5,328,860 |
def _convert_format(partition):
"""
Converts the format of the python-louvain into a numpy array
Parameters
----------
partition : dict
Standard output from python-louvain package
Returns
-------
partition: np.array
Partition as a numpy array
"""
return np.array([partition[val] for val in partition.keys()]) | 5,328,861 |
def populate_intercom_user(profile):
"""
Creates or updates an intercom user with information from TffProfile (from UserDetails)
"""
intercom_plugin = get_intercom_plugin()
if not intercom_plugin:
return
intercom_user = upsert_intercom_user(profile.username, profile)
tag_intercom_users(IntercomTags.APP_REGISTER, [profile.username])
return
message = """Welcome to the ThreeFold Foundation app.
If you have questions you can get in touch with us through this chat.
Our team is at your service during these hours:
Sunday: 07:00 - 15:00 GMT +1
Monday - Friday: 09:00 - 17:00 GMT +1
Of course you can always ask your questions outside these hours, we will then get back to you the next business day."""
email, app_id = get_app_user_tuple(profile.app_user)
chat_id = start_or_get_chat(get_tf_token_api_key(), '+default+', email.email(), app_id,
intercom_user, message)
deferred.defer(store_chat_id_in_user_data, chat_id, email.email(), app_id, _countdown=10) | 5,328,862 |
def settingsdir():
"""In which directory to save to the settings file"""
return module_dir()+"/settings" | 5,328,863 |
def _is_iqn_attached(sess, iqn):
"""
Verify if oci volume with iqn is attached to this instance.
Parameters
----------
sess: OCISession
The OCISession instance.
iqn: str
The iSCSI qualified name.
Returns
-------
str: the ocid
"""
_logger.debug('Verifying if [%s] is attached to this instance.')
volume_data = get_volume_by_iqn(sess, iqn)
if volume_data is None:
return None
if volume_data.is_attached():
return volume_data.get_ocid()
return None | 5,328,864 |
def test_total(snaptype):
"""Test total analysis functions."""
filename = DIR / snaptype.filename
snap = plonk.load_snap(filename)
total.accreted_mass(snap=snap)
total.angular_momentum(snap=snap)
total.center_of_mass(snap=snap)
total.kinetic_energy(snap=snap)
total.mass(snap=snap)
total.momentum(snap=snap)
total.specific_angular_momentum(snap=snap)
total.specific_kinetic_energy(snap=snap)
snap.close_file() | 5,328,865 |
def run_range_mcraptor(
timetable: Timetable,
origin_station: str,
dep_secs_min: int,
dep_secs_max: int,
max_rounds: int,
) -> Dict[str, List[Journey]]:
"""
Perform the McRAPTOR algorithm for a range query
"""
# Get stops for origins and destinations
from_stops = timetable.stations.get_stops(origin_station)
destination_stops = {
st.name: timetable.stations.get_stops(st.name) for st in timetable.stations
}
destination_stops.pop(origin_station, None)
# Find all trips leaving from stops within time range
potential_trip_stop_times = timetable.trip_stop_times.get_trip_stop_times_in_range(
from_stops, dep_secs_min, dep_secs_max
)
potential_dep_secs = sorted(
list(set([tst.dts_dep for tst in potential_trip_stop_times])), reverse=True
)
logger.info(
"Potential departure times : {}".format(
[sec2str(x) for x in potential_dep_secs]
)
)
journeys_to_destinations = {
station_name: [] for station_name, _ in destination_stops.items()
}
logger.info("Calculating journeys to all destinations")
s = perf_counter()
# Find Pareto-optimal journeys for all possible departure times
for dep_index, dep_secs in enumerate(potential_dep_secs):
logger.info(f"Processing {dep_index} / {len(potential_dep_secs)}")
logger.info(f"Analyzing best journey for departure time {sec2str(dep_secs)}")
# Run Round-Based Algorithm
mcraptor = McRaptorAlgorithm(timetable)
if dep_index == 0:
bag_round_stop, actual_rounds = mcraptor.run(from_stops, dep_secs, max_rounds)
else:
bag_round_stop, actual_rounds = mcraptor.run(from_stops, dep_secs, max_rounds, last_round_bag)
last_round_bag = copy(bag_round_stop[actual_rounds])
# Determine the best destination ID, destination is a platform
for destination_station_name, to_stops in destination_stops.items():
destination_legs = best_legs_to_destination_station(
to_stops, last_round_bag
)
if len(destination_legs) != 0:
journeys = reconstruct_journeys(
from_stops, destination_legs, bag_round_stop, k=actual_rounds
)
journeys_to_destinations[destination_station_name].extend(journeys)
logger.info(f"Journey calculation time: {perf_counter() - s}")
# Keep unique journeys
for destination_station_name, journeys in journeys_to_destinations.items():
unique_journeys = []
for journey in journeys:
if not journey in unique_journeys:
unique_journeys.append(journey)
journeys_to_destinations[destination_station_name] = unique_journeys
return journeys_to_destinations | 5,328,866 |
def test_invert_image_filter_with_numpy():
""" Test the invert image filter works correctly with NumpyImageContainer"""
invert_image_filter = InvertImageFilter()
array = np.ones(shape=(3, 3, 3, 1), dtype=np.float32)
nifti_image_container = NumpyImageContainer(image=array)
invert_image_filter.add_input("image", nifti_image_container)
invert_image_filter.run()
assert_array_equal(invert_image_filter.outputs["image"].image, -array) | 5,328,867 |
def _choose_node_type(w_operator, w_constant, w_input, t):
"""
Choose a random node (from operators, constants and input variables)
:param w_operator: Weighting of choosing an operator
:param w_constant: Weighting of choosing a constant
:param w_input: Weighting of choosing an input
:param t: Trace object
:return: An operator, constant or input variable
"""
w_sum = w_operator + w_constant + w_input
rb = t.random()
# print('Chose:', rb)
r = rb * w_sum
# r = random.uniform(0, w_sum)
if r < w_operator:
return BNode(_random_from_list(operators, t))
elif r < w_operator + w_constant:
return _random_constant(t)
else:
return input_var | 5,328,868 |
def plot_prisma_diagram(save_cfg=cfg.saving_config):
"""Plot diagram showing the number of selected articles.
TODO:
- Use first two colors of colormap instead of gray
- Reduce white space
- Reduce arrow width
"""
# save_format = save_cfg['format'] if isinstance(save_cfg, dict) else 'svg'
save_format = 'pdf'
# save_format = 'eps'
size = '{},{}!'.format(0.5 * save_cfg['page_width'], 0.2 * save_cfg['page_height'])
dot = Digraph(format=save_format)
dot.attr('graph', rankdir='TB', overlap='false', size=size, margin='0')
dot.attr('node', fontname='Liberation Sans', fontsize=str(9), shape='box',
style='filled', margin='0.15,0.07', penwidth='0.1')
# dot.attr('edge', arrowsize=0.5)
fillcolor = 'gray98'
dot.node('A', 'PubMed (n=39)\nGoogle Scholar (n=409)\narXiv (n=105)',
fillcolor='gray95')
dot.node('B', 'Articles identified\nthrough database\nsearching\n(n=553)',
fillcolor=fillcolor)
# dot.node('B2', 'Excluded\n(n=446)', fillcolor=fillcolor)
dot.node('C', 'Articles after content\nscreening and\nduplicate removal\n(n=105) ',
fillcolor=fillcolor)
dot.node('D', 'Articles included in\nthe analysis\n(n=154)',
fillcolor=fillcolor)
dot.node('E', 'Additional articles\nidentified through\nbibliography search\n(n=49)',
fillcolor=fillcolor)
dot.edge('B', 'C')
# dot.edge('B', 'B2')
dot.edge('C', 'D')
dot.edge('E', 'D')
if save_cfg is not None:
fname = os.path.join(save_cfg['savepath'], 'prisma_diagram')
dot.render(filename=fname, view=False, cleanup=False)
return dot | 5,328,869 |
def get_script(software):
"""
Gets the path of the post install script of a software.
:rtype: str
"""
dir_scripts = get_scripts_location()
scripts = os.listdir(dir_scripts)
for script in scripts:
if script == software:
return os.path.join(dir_scripts, script)
return None | 5,328,870 |
def _plat_idx_to_val(idx: int , edge: float = 0.5, FIO_IO_U_PLAT_BITS: int = 6, FIO_IO_U_PLAT_VAL: int = 64) -> float:
""" Taken from fio's stat.c for calculating the latency value of a bin
from that bin's index.
idx : the value of the index into the histogram bins
edge : fractional value in the range [0,1]** indicating how far into
the bin we wish to compute the latency value of.
** edge = 0.0 and 1.0 computes the lower and upper latency bounds
respectively of the given bin index. """
# MSB <= (FIO_IO_U_PLAT_BITS-1), cannot be rounded off. Use
# all bits of the sample as index
if (idx < (FIO_IO_U_PLAT_VAL << 1)):
return idx
# Find the group and compute the minimum value of that group
error_bits = (idx >> FIO_IO_U_PLAT_BITS) - 1
base = 1 << (error_bits + FIO_IO_U_PLAT_BITS)
# Find its bucket number of the group
k = idx % FIO_IO_U_PLAT_VAL
# Return the mean (if edge=0.5) of the range of the bucket
return base + ((k + edge) * (1 << error_bits)) | 5,328,871 |
def is_blank(value):
"""
Returns True if ``value`` is ``None`` or an empty string.
>>> is_blank("")
True
>>> is_blank(0)
False
>>> is_blank([])
False
"""
return value is None or value == "" | 5,328,872 |
def overload_check(data, min_overload_samples=3):
"""Check data for overload
:param data: one or two (time, samples) dimensional array
:param min_overload_samples: number of samples that need to be equal to max
for overload
:return: overload status
"""
if data.ndim > 2:
raise Exception('Number of dimensions of data should be 2 or less')
def _overload_check(x):
s = np.sort(np.abs(x))[::-1]
over = s == np.max(s)
if np.sum(over) >= min_overload_samples:
return True
else:
return False
if data.ndim == 2:
over = [_overload_check(d) for d in data.T]
return over
else:
over = _overload_check(data)
return over | 5,328,873 |
def main():
"""Main function"""
args = parse_args()
print('Called with args:')
print(args)
if not torch.cuda.is_available():
sys.exit("Need a CUDA device to run the code.")
if args.cuda or cfg.NUM_GPUS > 0:
cfg.CUDA = True
else:
raise ValueError("Need Cuda device to run !")
cfg_from_file(args.cfg_file)
if args.set_cfgs is not None:
cfg_from_list(args.set_cfgs)
if cfg.RPN.RPN_ON:
assert (args.load_pretrained is not None) | (args.load_ckpt is not None)
else:
assert (args.load_pretrained is not None) | (cfg.MODEL.LOAD_PRETRAINED_BACKBONE_WEIGHTS is not "") | \
(args.load_ckpt is not None)
if args.load_pretrained is not None and not os.path.exists(args.load_pretrained):
raise ValueError("Specified pretrained detectron model does not exists")
elif args.load_pretrained is not None:
cfg.GAN.TRAIN.PRETRAINED_WEIGHTS = args.load_pretrained
if args.output_dir is not None:
cfg.OUTPUT_DIR = args.output_dir
# Adaptively adjust some configs
original_num_gpus = cfg.NUM_GPUS
cfg.NUM_GPUS = torch.cuda.device_count()
# Adaptively adjust some configs for the PRE-TRAINING
original_batch_size_pre = cfg.NUM_GPUS * cfg.GAN.TRAIN.IMS_PER_BATCH_PRE
original_ims_per_batch_pre = cfg.GAN.TRAIN.IMS_PER_BATCH_PRE
if args.batch_size_pre is None:
args.batch_size_pre = original_batch_size_pre
assert (args.batch_size_pre % cfg.NUM_GPUS) == 0, \
'batch_size: %d, NUM_GPUS: %d' % (args.batch_size_pre, cfg.NUM_GPUS)
cfg.GAN.TRAIN.IMS_PER_BATCH_PRE = args.batch_size_pre // cfg.NUM_GPUS
effective_batch_size_pre = args.iter_size * args.batch_size_pre
print('effective_batch_size_pre = batch_size * iter_size = %d * %d' % (args.batch_size_pre, args.iter_size))
print('Adaptive config changes:')
print(' effective_batch_size: %d --> %d' % (original_batch_size_pre, effective_batch_size_pre))
print(' NUM_GPUS: %d --> %d' % (original_num_gpus, cfg.NUM_GPUS))
print(' IMS_PER_BATCH: %d --> %d' % (original_ims_per_batch_pre, cfg.GAN.TRAIN.IMS_PER_BATCH_PRE))
# Adaptively adjust some configs for discriminator #
original_batch_size_D = cfg.NUM_GPUS * cfg.GAN.TRAIN.IMS_PER_BATCH_D
original_ims_per_batch_D = cfg.GAN.TRAIN.IMS_PER_BATCH_D
if args.batch_size_D is None:
args.batch_size_D = original_batch_size_D
assert (args.batch_size_D % cfg.NUM_GPUS) == 0, \
'batch_size: %d, NUM_GPUS: %d' % (args.batch_size_D, cfg.NUM_GPUS)
cfg.GAN.TRAIN.IMS_PER_BATCH_D = args.batch_size_D // cfg.NUM_GPUS
effective_batch_size_D = args.iter_size * args.batch_size_D
print('effective_batch_size_D = batch_size * iter_size = %d * %d' % (args.batch_size_D, args.iter_size))
print('Adaptive config changes:')
print(' effective_batch_size: %d --> %d' % (original_batch_size_D, effective_batch_size_D))
print(' NUM_GPUS: %d --> %d' % (original_num_gpus, cfg.NUM_GPUS))
print(' IMS_PER_BATCH: %d --> %d' % (original_ims_per_batch_D, cfg.GAN.TRAIN.IMS_PER_BATCH_D))
# Adaptively adjust some configs for generator #
original_batch_size_G = cfg.NUM_GPUS * cfg.GAN.TRAIN.IMS_PER_BATCH_G
original_ims_per_batch_G = cfg.GAN.TRAIN.IMS_PER_BATCH_G
if args.batch_size_G is None:
args.batch_size_G = original_batch_size_G
assert (args.batch_size_G % cfg.NUM_GPUS) == 0, \
'batch_size: %d, NUM_GPUS: %d' % (args.batch_size_G, cfg.NUM_GPUS)
cfg.GAN.TRAIN.IMS_PER_BATCH_G = args.batch_size_G // cfg.NUM_GPUS
effective_batch_size_G = args.iter_size * args.batch_size_G
print('effective_batch_size_G = batch_size * iter_size = %d * %d' % (args.batch_size_G, args.iter_size))
print('Adaptive config changes:')
print(' effective_batch_size: %d --> %d' % (original_batch_size_G, effective_batch_size_G))
print(' NUM_GPUS: %d --> %d' % (original_num_gpus, cfg.NUM_GPUS))
print(' IMS_PER_BATCH: %d --> %d' % (original_ims_per_batch_G, cfg.GAN.TRAIN.IMS_PER_BATCH_G))
# Adjust learning based on batch size change linearly
# For iter_size > 1, gradients are `accumulated`, so lr is scaled based
# on batch_size instead of effective_batch_size
old_base_lr_D = cfg.GAN.SOLVER.BASE_LR_D
old_base_lr_G = cfg.GAN.SOLVER.BASE_LR_G
old_base_lr_pre = cfg.GAN.SOLVER.BASE_LR_PRE
cfg.GAN.SOLVER.BASE_LR_D *= args.batch_size_D / original_batch_size_D
cfg.GAN.SOLVER.BASE_LR_PRE *= args.batch_size_pre / original_batch_size_pre
cfg.GAN.SOLVER.BASE_LR_G *= args.batch_size_G / original_batch_size_G
print('Adjust BASE_LR_PRE linearly according to batch_size change:\n'
' BASE_LR: {} --> {}'.format(old_base_lr_pre, cfg.GAN.SOLVER.BASE_LR_PRE))
print('Adjust BASE_LR_D linearly according to batch_size change:\n'
' BASE_LR: {} --> {}'.format(old_base_lr_D, cfg.GAN.SOLVER.BASE_LR_D))
print('Adjust BASE_LR_G linearly according to batch_size change:\n'
' BASE_LR: {} --> {}'.format(old_base_lr_G, cfg.GAN.SOLVER.BASE_LR_G))
# Adjust solver steps
step_scale_pre = original_batch_size_pre / effective_batch_size_pre
step_scale_D = original_batch_size_D / effective_batch_size_D
step_scale_G = original_batch_size_G / effective_batch_size_G
if not cfg.GAN.SOLVER.STEPS_D:
cfg.GAN.SOLVER.STEPS_D = cfg.GAN.SOLVER.STEPS
if not cfg.GAN.SOLVER.STEPS_G:
cfg.GAN.SOLVER.STEPS_G = cfg.GAN.SOLVER.STEPS
old_solver_steps_D = cfg.GAN.SOLVER.STEPS_D
old_solver_steps_G = cfg.GAN.SOLVER.STEPS_G
old_solver_steps_pre = cfg.GAN.SOLVER.STEPS_PRE
old_max_iter = cfg.GAN.SOLVER.MAX_ITER
old_max_iter_pre = cfg.GAN.SOLVER.PRE_ITER
cfg.GAN.SOLVER.STEPS_PRE = list(map(lambda x: int(x * step_scale_pre + 0.5), cfg.GAN.SOLVER.STEPS_PRE))
cfg.GAN.SOLVER.STEPS_D = list(map(lambda x: int(x * step_scale_D + 0.5), cfg.GAN.SOLVER.STEPS_D))
cfg.GAN.SOLVER.STEPS_G = list(map(lambda x: int(x * step_scale_G + 0.5), cfg.GAN.SOLVER.STEPS_G))
cfg.GAN.SOLVER.MAX_ITER_D = int(cfg.GAN.SOLVER.MAX_ITER * step_scale_D + 0.5)
cfg.GAN.SOLVER.MAX_ITER_G = int(cfg.GAN.SOLVER.MAX_ITER * step_scale_G + 0.5)
cfg.GAN.SOLVER.PRE_ITER = int(cfg.GAN.SOLVER.PRE_ITER * step_scale_pre + 0.5)
print('PRE: Adjust SOLVER.STEPS and SOLVER.MAX_ITER linearly based on effective_batch_size change:\n'
' SOLVER.STEPS: {} --> {}\n'
' SOLVER.MAX_ITER: {} --> {}'.format(old_solver_steps_pre, cfg.GAN.SOLVER.STEPS_PRE,
old_max_iter_pre, cfg.GAN.SOLVER.PRE_ITER))
print('DIS: Adjust SOLVER.STEPS and SOLVER.MAX_ITER linearly based on effective_batch_size change:\n'
' SOLVER.STEPS: {} --> {}\n'
' SOLVER.MAX_ITER: {} --> {}'.format(old_solver_steps_D, cfg.GAN.SOLVER.STEPS_D,
old_max_iter, cfg.GAN.SOLVER.MAX_ITER_D))
print('GEN: Adjust SOLVER.STEPS and SOLVER.MAX_ITER linearly based on effective_batch_size change:\n'
' SOLVER.STEPS: {} --> {}\n'
' SOLVER.MAX_ITER: {} --> {}'.format(old_solver_steps_G, cfg.GAN.SOLVER.STEPS_G,
old_max_iter, cfg.GAN.SOLVER.MAX_ITER_G))
if args.num_workers is not None:
cfg.DATA_LOADER.NUM_THREADS = args.num_workers
print('Number of data loading threads: %d' % cfg.DATA_LOADER.NUM_THREADS)
assert_and_infer_cfg(make_immutable=False)
timers = defaultdict(Timer)
# prepare flags
# for FAST R-CNN: rois are not sampled on the run. The flags therefore have to be passed to the actual dataloader
fake_dis_flag = [ModeFlags("fake", "discriminator") for _ in range(cfg.NUM_GPUS)]
real_dis_flag = [ModeFlags("real", "discriminator") for _ in range(cfg.NUM_GPUS)]
if not cfg.GAN.TRAIN.DATASETS_GEN:
fake_gen_flag = [ModeFlags("fake", "generator") for _ in range(cfg.NUM_GPUS)]
else:
fake_gen_flag = [ModeFlags("real_fake", "generator") for _ in range(cfg.NUM_GPUS)]
pre_flag = [ModeFlags("real", "pre") for _ in range(cfg.NUM_GPUS)]
##################################################################################################################
#################################### DATASETS and Loader Setup ##################################################
##################################################################################################################
timers['roidb_real'].tic()
roidb_real, ratio_list_real, ratio_index_real = combined_roidb_for_training(
cfg.GAN.TRAIN.DATASETS_REAL, cfg.GAN.TRAIN.PROPOSAL_FILES_REAL)
timers['roidb_real'].toc()
roidb_size_real = len(roidb_real)
logger.info('{:d} roidb entries'.format(roidb_size_real))
logger.info('Takes %.2f sec(s) to construct roidb', timers['roidb_real'].average_time)
# Effective training sample size for one epoch
train_size_D = roidb_size_real // args.batch_size_D * args.batch_size_D
batchSampler_pre = BatchSampler(
sampler=MinibatchSampler(ratio_list_real, ratio_index_real, cfg.GAN.TRAIN.IMS_PER_BATCH_PRE),
batch_size=args.batch_size_pre,
drop_last=True
)
dataset_pre = RoiDataLoader(
roidb_real,
cfg.MODEL.NUM_CLASSES,
training=True,
flags=pre_flag[0])
dataloader_pre = torch.utils.data.DataLoader(
dataset_pre,
batch_sampler=batchSampler_pre,
num_workers=cfg.DATA_LOADER.NUM_THREADS,
collate_fn=collate_minibatch_pre,
pin_memory=False)
dataiterator_pre = iter(dataloader_pre)
batchSampler_real_discriminator= BatchSampler(
sampler=MinibatchSampler(ratio_list_real, ratio_index_real, cfg.GAN.TRAIN.IMS_PER_BATCH_D),
batch_size=args.batch_size_D,
drop_last=True
)
dataset_real_discriminator = RoiDataLoader(
roidb_real,
cfg.MODEL.NUM_CLASSES,
training=True,
flags=real_dis_flag[0])
dataloader_real_discriminator = torch.utils.data.DataLoader(
dataset_real_discriminator,
batch_sampler=batchSampler_real_discriminator,
num_workers=cfg.DATA_LOADER.NUM_THREADS,
collate_fn=collate_minibatch_discriminator,
pin_memory=False)
dataiterator_real_discriminator = iter(dataloader_real_discriminator)
timers['roidb_fake'].tic()
roidb_fake, ratio_list_fake, ratio_index_fake = combined_roidb_for_training(
cfg.GAN.TRAIN.DATASETS_FAKE, cfg.GAN.TRAIN.PROPOSAL_FILES_FAKE)
timers['roidb_fake'].toc()
roidb_size_fake = len(roidb_fake)
logger.info('{:d} roidb entries'.format(roidb_size_fake))
logger.info('Takes %.2f sec(s) to construct roidb', timers['roidb_fake'].average_time)
# Effective training sample size for one epoch
train_size_G = roidb_size_fake // args.batch_size_G * args.batch_size_G
batchSampler_fake_discriminator = BatchSampler(
sampler=MinibatchSampler(ratio_list_fake, ratio_index_fake, cfg.GAN.TRAIN.IMS_PER_BATCH_D),
batch_size=args.batch_size_D,
drop_last=True
)
dataset_fake_discriminator = RoiDataLoader(
roidb_fake,
cfg.MODEL.NUM_CLASSES,
training=True,
flags=fake_dis_flag[0]
)
dataloader_fake_discriminator = torch.utils.data.DataLoader(
dataset_fake_discriminator,
batch_sampler=batchSampler_fake_discriminator,
num_workers=cfg.DATA_LOADER.NUM_THREADS,
collate_fn=collate_minibatch_discriminator,
pin_memory=False)
dataiterator_fake_discriminator = iter(dataloader_fake_discriminator)
# if no further dataets for training the generator are specified
# use the same dataset settings as for training the discriminator
# on fake samples
if not cfg.GAN.TRAIN.DATASETS_GEN:
batchSampler_fake_generator = BatchSampler(
sampler=MinibatchSampler(ratio_list_fake, ratio_index_fake, cfg.GAN.TRAIN.IMS_PER_BATCH_G),
batch_size=args.batch_size_G,
drop_last=True
)
dataset_fake_generator = RoiDataLoader(
roidb_fake,
cfg.MODEL.NUM_CLASSES,
training=True,
flags=fake_gen_flag[0]
)
dataloader_fake_generator = torch.utils.data.DataLoader(
dataset_fake_generator,
batch_sampler=batchSampler_fake_generator,
num_workers=cfg.DATA_LOADER.NUM_THREADS,
collate_fn=collate_minibatch_generator,
pin_memory=False)
dataiterator_fake_generator = iter(dataloader_fake_generator)
else:
timers['roidb_fake_gen'].tic()
roidb_fake_gen, ratio_list_fake_gen, ratio_index_fake_gen = combined_roidb_for_training(
cfg.GAN.TRAIN.DATASETS_GEN, cfg.GAN.TRAIN.PROPOSAL_FILES_GEN)
timers['roidb_fake_gen'].toc()
roidb_size_fake_gen = len(roidb_fake_gen)
logger.info('{:d} roidb entries'.format(roidb_size_fake_gen))
logger.info('Takes %.2f sec(s) to construct roidb', timers['roidb_fake_gen'].average_time)
batchSampler_fake_generator = BatchSampler(
sampler=MinibatchSampler(ratio_list_fake_gen, ratio_index_fake_gen, cfg.GAN.TRAIN.IMS_PER_BATCH_G),
batch_size=args.batch_size_G,
drop_last=True
)
dataset_fake_generator = RoiDataLoader(
roidb_fake_gen,
cfg.MODEL.NUM_CLASSES,
training=True,
flags=fake_gen_flag[0]
)
dataloader_fake_generator = torch.utils.data.DataLoader(
dataset_fake_generator,
batch_sampler=batchSampler_fake_generator,
num_workers=cfg.DATA_LOADER.NUM_THREADS,
collate_fn=collate_minibatch_generator,
pin_memory=False)
dataiterator_fake_generator = iter(dataloader_fake_generator)
##################################################################################################################
############################################# MODEL INITIALIZATION ##############################################
##################################################################################################################
# only load pre-trained discriminator explicitly specified
if args.load_pretrained and args.init_dis_pretrained:
gan = GAN()
elif cfg.GAN.TRAIN.PRETRAINED_WEIGHTS is not "":
if args.init_dis_pretrained:
gan = GAN(generator_weights=cfg.GAN.TRAIN.PRETRAINED_WEIGHTS,
discriminator_weights=cfg.GAN.TRAIN.PRETRAINED_WEIGHTS)
else:
gan = GAN(generator_weights=cfg.GAN.TRAIN.PRETRAINED_WEIGHTS)
else: # if Fast R-CNN, start with new model, but use pre-trained weights from config (on ImageNet)
gan = GAN()
if cfg.CUDA:
gan.cuda()
# Load checkpoint
# loading checkpoint is only possible for combined gan training
if args.load_ckpt:
load_name = args.load_ckpt
logger.info("loading checkpoint %s", load_name)
checkpoint = torch.load(load_name, map_location=lambda storage, loc: storage)
net_utils.load_ckpt(gan, checkpoint['model'])
del checkpoint
torch.cuda.empty_cache()
if args.load_pretrained and args.init_dis_pretrained:
logger.info("loading pretrained checkpoint %s", args.load_pretrained)
checkpoint = torch.load(args.load_pretrained, map_location=lambda storage, loc: storage)
net_utils.load_ckpt(gan, checkpoint['model'])
del checkpoint
torch.cuda.empty_cache()
##################################################################################################################
############################################# PARAMETER SETUP ##################################################
##################################################################################################################
# train discriminator only on adversarial branch
if cfg.GAN.TRAIN.TRAIN_FULL_DIS:
dis_params = gan.discriminator.named_parameters()
params_D = [{
'params': gan.discriminator.parameters(),
'lr': 0,
'weight_decay': cfg.GAN.SOLVER.WEIGHT_DECAY_D
}]
else:
dis_params = gan.discriminator.adversarial.named_parameters()
params_D = [{
'params': gan.discriminator.adversarial.parameters(),
'lr': 0,
'weight_decay': cfg.GAN.SOLVER.WEIGHT_DECAY_D
}]
param_names_D = []
for key, value in dis_params:
if value.requires_grad:
param_names_D.append(key)
logger.info("Parameters discriminator is trained on")
logger.info(param_names_D)
# pre-training in classical fashion with seperate groups for bias and non-bias parameters
params_list_pre = {
'bias_params': [],
'bias_param_names': [],
'nonbias_params': [],
'nonbias_param_names': [],
'nograd_param_names': []
}
# pre-train either on perceptual branch and/or Generator_block for Faster R-CNN
# or pre-train on perceptual branch and conv_body for fast r-cnn
if cfg.MODEL.FASTER_RCNN:
if cfg.GAN.TRAIN.PRE_TRAIN_GENERATOR:
pre_named_params = chain(gan.discriminator.Box_Head.named_parameters(),
gan.discriminator.Box_Outs.named_parameters(),
gan.generator.Generator_Block.named_parameters())
else:
pre_named_params = chain(gan.discriminator.Box_Head.named_parameters(),
gan.discriminator.Box_Outs.named_parameters())
else:
if cfg.GAN.TRAIN.PRE_TRAIN_GENERATOR:
pre_named_params = chain(gan.discriminator.Box_Head.named_parameters(),
gan.discriminator.Box_Outs.named_parameters(),
gan.generator.Conv_Body.named_parameters(),
gan.generator.Generator_Block.named_parameters()
)
else:
pre_named_params = chain(gan.discriminator.Box_Head.named_parameters(),
gan.discriminator.Box_Outs.named_parameters(),
gan.generator.Conv_Body.named_parameters())
for key, value in pre_named_params:
if value.requires_grad:
if 'bias' in key:
params_list_pre['bias_params'].append(value)
params_list_pre['bias_param_names'].append(key)
else:
params_list_pre['nonbias_params'].append(value)
params_list_pre['nonbias_param_names'].append(key)
else:
params_list_pre['nograd_param_names'].append(key)
params_pre = [
{'params': params_list_pre['nonbias_params'],
'lr': 0,
'weight_decay': cfg.GAN.SOLVER.WEIGHT_DECAY_PRE},
{'params': params_list_pre['bias_params'],
'lr': 0 * (cfg.GAN.SOLVER.BIAS_DOUBLE_LR_PRE + 1),
'weight_decay': cfg.GAN.SOLVER.WEIGHT_DECAY_PRE if cfg.GAN.SOLVER.BIAS_WEIGHT_DECAY_PRE else 0}
]
param_names_pre = [params_list_pre['nonbias_param_names'], params_list_pre['bias_param_names']]
logger.info("Parameters during pre-training")
logger.info(param_names_pre)
generator_params = gan.generator.Generator_Block.parameters()
generator_named_params = gan.generator.Generator_Block.named_parameters()
param_names_G = []
for key, value in generator_named_params:
if value.requires_grad:
param_names_G.append(key)
params_G = [
{'params': generator_params,
'lr': 0,
'weight_decay': cfg.GAN.SOLVER.WEIGHT_DECAY_G}
]
logger.info("Parameters generator is trained on")
logger.info(param_names_G)
# Optimizers
if cfg.GAN.SOLVER.TYPE_G == "SGD":
optimizer_G = torch.optim.SGD(params_G, momentum=cfg.GAN.SOLVER.MOMENTUM_G)
elif cfg.GAN.SOLVER.TYPE_G == "Adam":
optimizer_G = torch.optim.Adam(params_G)
else:
raise ValueError("INVALID Optimizer_G specified. Must be SGD or Adam!")
if cfg.GAN.SOLVER.TYPE_D == "SGD":
optimizer_D = torch.optim.SGD(params_D, momentum=cfg.GAN.SOLVER.MOMENTUM_D)
elif cfg.GAN.SOLVER.TYPE_D == "Adam":
optimizer_D = torch.optim.Adam(params_D)
else:
raise ValueError("INVALID Optimizer_D specified. Must be SGD or Adam!")
if cfg.GAN.SOLVER.TYPE_PRE == "SGD":
optimizer_pre = torch.optim.SGD(params_pre, momentum=cfg.GAN.SOLVER.MOMENTUM_PRE)
elif cfg.GAN.SOLVER.TYPE_PRE == "Adam":
optimizer_pre = torch.optim.Adam(params_pre)
else:
raise ValueError("INVALID Optimizer_pre specified. Must be SGD or Adam!")
lr_D = optimizer_D.param_groups[0]['lr'] # lr of non-bias parameters, for commmand line outputs.
lr_G = optimizer_G.param_groups[0]['lr']
lr_pre = optimizer_pre.param_groups[0]['lr']
if cfg.RPN.RPN_ON:
cpu_keys = ['im_info', 'roidb']
else:
cpu_keys = ['im_info', 'roidb', 'labels_int32', 'rois',
'bbox_targets', 'bbox_inside_weights', 'bbox_outside_weights']
gan = mynn.DataParallel(gan, cpu_keywords=cpu_keys,
minibatch=True)
##################################################################################################################
############################################# logger setup ##################################################
##################################################################################################################
args.run_name = misc_utils.get_run_name() + '_step'
output_dir = misc_utils.get_output_dir(args, args.run_name)
output_dir_pre = os.path.join(output_dir, 'pre')
args.cfg_filename = os.path.basename(args.cfg_file)
if not args.no_save:
if not os.path.exists(output_dir):
os.makedirs(output_dir)
if not os.path.exists(output_dir_pre):
os.makedirs(output_dir_pre)
logging.info("Using output_dir: {}".format(output_dir))
blob = {'cfg': yaml.dump(cfg), 'args': args}
with open(os.path.join(output_dir, 'config_and_args.pkl'), 'wb') as f:
pickle.dump(blob, f, pickle.HIGHEST_PROTOCOL)
if args.use_tfboard:
from tensorboardX import SummaryWriter
# Set the Tensorboard logger
tblogger_dis = SummaryWriter(os.path.join(output_dir, 'log', 'dis'),
filename_suffix="_discriminator")
tblogger_dis_fake = SummaryWriter(os.path.join(output_dir, 'log', 'dis_fake'),
filename_suffix="_discriminator_fake")
tblogger_gen = SummaryWriter(os.path.join(output_dir, 'log', 'gen'),
filename_suffix="_generator")
tblogger_pre = SummaryWriter(os.path.join(output_dir_pre, 'log', 'pre'),
filename_suffix="_pre")
### Training Loop ###
gan.train()
CHECKPOINT_PERIOD = int(cfg.GAN.TRAIN.SNAPSHOT_ITERS / cfg.NUM_GPUS)
# Set index for decay steps
decay_steps_ind_D = None
decay_steps_ind_G = None
decay_steps_ind_pre = None
for i in range(1, len(cfg.GAN.SOLVER.STEPS_D)):
if cfg.GAN.SOLVER.STEPS_D[i] >= args.start_step:
decay_steps_ind_D = i
break
if decay_steps_ind_D is None:
decay_steps_ind_D = len(cfg.GAN.SOLVER.STEPS_D)
for i in range(1, len(cfg.GAN.SOLVER.STEPS_G)):
if cfg.GAN.SOLVER.STEPS_G[i] >= args.start_step:
decay_steps_ind_G = i
break
if decay_steps_ind_G is None:
decay_steps_ind_G = len(cfg.GAN.SOLVER.STEPS_G)
for i in range(1, len(cfg.GAN.SOLVER.STEPS_PRE)):
if cfg.GAN.SOLVER.STEPS_PRE[i] >= args.start_step:
decay_steps_ind_pre = i
break
if decay_steps_ind_pre is None:
decay_steps_ind_pre = len(cfg.GAN.SOLVER.STEPS_PRE)
training_stats_pre = TrainingStats(
args,
args.disp_interval,
cfg.GAN.SOLVER.PRE_ITER,
tblogger_pre if args.use_tfboard and not args.no_save else None)
# use maximum max_iter for training
max_iter = max(cfg.GAN.SOLVER.MAX_ITER_D, cfg.GAN.SOLVER.MAX_ITER_G)
##################################################################################################################
############################################# PRE-TRAINING-LOOP ##################################################
##################################################################################################################
try:
logger.info('Training starts !')
step = args.start_step
# prepare adv_targets for training
Tensor = torch.cuda.FloatTensor
batch_size = cfg.GAN.TRAIN.IMS_PER_BATCH_D * cfg.GAN.TRAIN.BATCH_SIZE_PER_IM_D
batch_size_gen = cfg.GAN.TRAIN.IMS_PER_BATCH_G * cfg.GAN.TRAIN.BATCH_SIZE_PER_IM_G
batch_size_pre = cfg.GAN.TRAIN.IMS_PER_BATCH_PRE * cfg.GAN.TRAIN.BATCH_SIZE_PER_IM_PRE
adv_target_real = [Variable(Tensor(batch_size, 1).fill_(cfg.GAN.MODEL.LABEL_SMOOTHING),
requires_grad=False) for _ in range(cfg.NUM_GPUS)]
adv_target_gen = [Variable(Tensor(batch_size_gen, 1).fill_(cfg.GAN.MODEL.LABEL_SMOOTHING),
requires_grad=False) for _ in range(cfg.NUM_GPUS)]
adv_target_pre = [Variable(Tensor(batch_size_pre, 1).fill_(cfg.GAN.MODEL.LABEL_SMOOTHING),
requires_grad=False) for _ in range(cfg.NUM_GPUS)]
adv_target_fake = [Variable(Tensor(batch_size, 1).fill_(0.0),
requires_grad=False) for _ in range(cfg.NUM_GPUS)]
# pre-training of perceptual branch
if not args.init_dis_pretrained:
logger.info('Pre-Training: training perceptual-branch on large objects')
for step in range(0, cfg.GAN.SOLVER.PRE_ITER):
# Warm up
# for simplicity: equal for generator and discriminator
if step < cfg.GAN.SOLVER.PRE_WARM_UP_ITERS:
method = cfg.GAN.SOLVER.WARM_UP_METHOD
if method == 'constant':
warmup_factor = cfg.GAN.SOLVER.WARM_UP_FACTOR
elif method == 'linear':
alpha = step / cfg.GAN.SOLVER.PRE_WARM_UP_ITERS
warmup_factor = cfg.GAN.SOLVER.WARM_UP_FACTOR * (1 - alpha) + alpha
else:
raise KeyError('Unknown SOLVER.WARM_UP_METHOD: {}'.format(method))
lr_new_pre = cfg.GAN.SOLVER.BASE_LR_PRE * warmup_factor
net_utils.update_learning_rate_gan(optimizer_pre, lr_pre, lr_new_pre, type='pre')
lr_pre = optimizer_pre.param_groups[0]['lr']
assert lr_pre == lr_new_pre
elif step == cfg.GAN.SOLVER.PRE_WARM_UP_ITERS :
net_utils.update_learning_rate_gan(optimizer_pre, lr_pre, cfg.GAN.SOLVER.BASE_LR_PRE, type='pre')
lr_pre = optimizer_pre.param_groups[0]['lr']
assert lr_pre == cfg.GAN.SOLVER.BASE_LR_PRE
# Learning rate decay
if decay_steps_ind_pre < len(cfg.GAN.SOLVER.STEPS_PRE) and \
step == cfg.GAN.SOLVER.STEPS_PRE[decay_steps_ind_pre]:
logger.info('Decay the learning (pre-training) on step %d', step)
lr_new_pre = lr_pre * cfg.GAN.SOLVER.GAMMA_PRE
net_utils.update_learning_rate_gan(optimizer_pre, lr_pre, lr_new_pre, type='pre')
lr_pre = optimizer_pre.param_groups[0]['lr']
assert lr_pre == lr_new_pre
decay_steps_ind_pre += 1
if cfg.DEBUG:
print("pre-training ...")
optimizer_pre.zero_grad()
training_stats_pre.IterTic()
input_data_pre, dataiterator_pre = create_input_data(
dataiterator_pre, dataloader_pre
)
input_data_pre.update({"flags": pre_flag,
"adv_target": adv_target_pre}
)
outputs_pre = gan(**input_data_pre)
# only train perceptual branch
# remove adv loss
training_stats_pre.UpdateIterStats(outputs_pre)
# train only on the Perceptual Branch
loss_pre = outputs_pre['losses']['loss_cls'] + outputs_pre['losses']['loss_bbox']
loss_pre.backward()
optimizer_pre.step()
training_stats_pre.IterToc()
training_stats_pre.LogIterStatsReal(step, lr=lr_pre)
del input_data_pre
del loss_pre
del outputs_pre
# CLEAN-UP !!
logger.info("clean-up after pre-training ...")
if args.use_tfboard and not args.no_save:
tblogger_pre.close()
del dataiterator_pre
del dataloader_pre
del batchSampler_pre
del dataset_pre
del training_stats_pre
del optimizer_pre
torch.cuda.empty_cache()
logger.info("clean-up finished.")
# save model after pre-training
final_model = save_ckpt_gan(output_dir_pre, args, step, train_size_gen=train_size_G, train_size_dis=train_size_D,
model=gan, optimizer_dis=optimizer_D, optimizer_gen=optimizer_G)
if args.testing_pre_training:
test_output_dir = os.path.join(output_dir_pre, 'testing')
logger.info("Testing model after pre-training")
test_pre_cfgs = [x for x in args.set_cfgs]
test_pre_cfgs.append('DEBUG_GAN')
test_pre_cfgs.append('True')
if final_model is not None:
if args.multi_gpu_testing:
args_test = Namespace(cfg_file='{}'.format(args.cfg_file),
load_ckpt='{}'.format(final_model),
load_dis=None, load_gen=None,
multi_gpu_testing=True, output_dir='{}'.format(test_output_dir),
range=None, set_cfgs=test_pre_cfgs, vis=False)
else:
args_test = Namespace(cfg_file='{}'.format(args.cfg_file),
load_ckpt='{}'.format(final_model),
load_dis=None, load_gen=None,
multi_gpu_testing=False, output_dir='{}'.format(test_output_dir),
range=None, set_cfgs=test_pre_cfgs, vis=False)
test_net_routine(args_test)
if args.quit_after_pre_training:
return
###################### testing pretrained loaded model #######################################################
if args.load_pretrained and args.init_dis_pretrained:
test_output_dir = os.path.join(output_dir_pre, 'testing_initialization')
test_pre_cfgs = [x for x in args.set_cfgs]
test_pre_cfgs.append('DEBUG_GAN')
test_pre_cfgs.append('True')
if final_model is not None:
if args.multi_gpu_testing:
args_test = Namespace(cfg_file='{}'.format(args.cfg_file),
load_ckpt='{}'.format(final_model),
load_dis=None, load_gen=None,
multi_gpu_testing=True, output_dir='{}'.format(test_output_dir),
range=None, set_cfgs=test_pre_cfgs, vis=False)
else:
args_test = Namespace(cfg_file='{}'.format(args.cfg_file),
load_ckpt='{}'.format(final_model),
load_dis=None, load_gen=None,
multi_gpu_testing=False, output_dir='{}'.format(test_output_dir),
range=None, set_cfgs=test_pre_cfgs, vis=False)
test_net_routine(args_test)
torch.cuda.empty_cache()
##################################################################################################################
################################# Combined Training loop ###############################################
##################################################################################################################
training_stats_dis = TrainingStats(
args,
args.disp_interval,
max_iter,
tblogger_dis if args.use_tfboard and not args.no_save else None)
training_stats_dis_fake = TrainingStats(
args,
args.disp_interval,
max_iter,
tblogger_dis_fake if args.use_tfboard and not args.no_save else None)
training_stats_gen = TrainingStats(
args,
args.disp_interval,
max_iter,
tblogger_gen if args.use_tfboard and not args.no_save else None)
logger.info('Combined GAN-training starts now!')
for step in range(args.start_step, max_iter):
# Warm up
# for simplicity: equal for generator and discriminator
if step < cfg.GAN.SOLVER.WARM_UP_ITERS:
method = cfg.GAN.SOLVER.WARM_UP_METHOD
if method == 'constant':
warmup_factor = cfg.GAN.SOLVER.WARM_UP_FACTOR
elif method == 'linear':
alpha = step / cfg.GAN.SOLVER.WARM_UP_ITERS
warmup_factor = cfg.GAN.SOLVER.WARM_UP_FACTOR * (1 - alpha) + alpha
else:
raise KeyError('Unknown SOLVER.WARM_UP_METHOD: {}'.format(method))
lr_new_D = cfg.GAN.SOLVER.BASE_LR_D * warmup_factor
lr_new_G = cfg.GAN.SOLVER.BASE_LR_G * warmup_factor
net_utils.update_learning_rate_gan(optimizer_D, lr_D, lr_new_D, type='dis')
net_utils.update_learning_rate_gan(optimizer_G, lr_G, lr_new_G, type='gen')
lr_D = optimizer_D.param_groups[0]['lr']
lr_G = optimizer_G.param_groups[0]['lr']
assert lr_D == lr_new_D
assert lr_G == lr_new_G
elif step == cfg.GAN.SOLVER.WARM_UP_ITERS:
net_utils.update_learning_rate_gan(optimizer_D, lr_D, cfg.GAN.SOLVER.BASE_LR_D, type="dis")
net_utils.update_learning_rate_gan(optimizer_G, lr_G, cfg.GAN.SOLVER.BASE_LR_G, type="gen")
lr_D = optimizer_D.param_groups[0]['lr']
lr_G = optimizer_G.param_groups[0]['lr']
assert lr_D == cfg.GAN.SOLVER.BASE_LR_D
assert lr_G == cfg.GAN.SOLVER.BASE_LR_G
# Learning rate decay
if decay_steps_ind_D < len(cfg.GAN.SOLVER.STEPS_D) and \
step == cfg.GAN.SOLVER.STEPS_D[decay_steps_ind_D]:
logger.info('Decay the learning (discriminator) on step %d', step)
lr_new_D = lr_D * cfg.GAN.SOLVER.GAMMA_D
net_utils.update_learning_rate_gan(optimizer_D, lr_D, lr_new_D, type="dis")
lr_D = optimizer_D.param_groups[0]['lr']
assert lr_D == lr_new_D
decay_steps_ind_D += 1
if decay_steps_ind_G < len(cfg.GAN.SOLVER.STEPS_G) and \
step == cfg.GAN.SOLVER.STEPS_G[decay_steps_ind_G]:
logger.info('Decay the learning (generator) on step %d', step)
lr_new_G = lr_G * cfg.GAN.SOLVER.GAMMA_G
net_utils.update_learning_rate_gan(optimizer_G, lr_G, lr_new_G, type="gen")
lr_G = optimizer_G.param_groups[0]['lr']
assert lr_G == lr_new_G
decay_steps_ind_G += 1
#################### training discrriminator ############################
training_stats_dis.IterTic()
training_stats_dis_fake.IterTic()
for _ in range(cfg.GAN.TRAIN.k):
optimizer_D.zero_grad()
# train on fake data
if cfg.DEBUG:
print("training on fake data ...")
input_data, dataiterator_fake_discriminator = create_input_data(
dataiterator_fake_discriminator, dataloader_fake_discriminator
)
input_data.update({"flags": fake_dis_flag,
"adv_target": adv_target_fake}
)
outputs_fake = gan(**input_data)
# train on real data
input_data, dataiterator_real_discriminator = create_input_data(
dataiterator_real_discriminator, dataloader_real_discriminator
)
if cfg.DEBUG:
print("training on real data ...")
input_data.update({"flags": real_dis_flag,
"adv_target": adv_target_real}
)
outputs_real = gan(**input_data)
training_stats_dis.UpdateIterStats(out=outputs_real)
training_stats_dis_fake.UpdateIterStats(out=outputs_fake)
if cfg.GAN.TRAIN.TRAIN_FULL_DIS:
loss_fake = cfg.GAN.TRAIN.ADV_LOSS_WEIGHT * outputs_fake['losses']['loss_adv']
loss_fake += outputs_fake['losses']['loss_cls']
loss_fake += outputs_fake['losses']['loss_bbox']
loss_real = cfg.GAN.TRAIN.ADV_LOSS_WEIGHT * outputs_real['losses']['loss_adv']
loss_real += outputs_real['losses']['loss_cls']
loss_real += outputs_real['losses']['loss_bbox']
else:
# adversarial loss for discriminator
if cfg.DEBUG:
print("train discriminator only on adversarial loss")
loss_fake = outputs_fake['losses']['loss_adv']
loss_real = outputs_real['losses']['loss_adv']
loss_D = loss_real + loss_fake
loss_D.backward()
optimizer_D.step()
training_stats_dis.tb_log_stats(training_stats_dis.GetStats(step, lr_D), step)
training_stats_dis_fake.tb_log_stats(training_stats_dis_fake.GetStats(step, lr_D), step)
# clean-up to save memory
if args.online_cleanup:
del loss_D
del loss_real
del loss_fake
del outputs_fake
del outputs_real
del input_data
torch.cuda.empty_cache()
#################### training generator #################################
training_stats_dis.IterToc()
training_stats_dis_fake.IterToc()
optimizer_G.zero_grad()
training_stats_gen.IterTic()
input_data, dataiterator_fake_generator = create_input_data(
dataiterator_fake_generator, dataloader_fake_generator
)
input_data.update({"flags": fake_gen_flag,
"adv_target": adv_target_gen}
)
outputs = gan(**input_data)
training_stats_gen.UpdateIterStats(out=outputs)
# train generator on Faster R-CNN loss and adversarial loss
if cfg.GAN.TRAIN.TRANSFER_LEARNING:
loss_G = outputs['losses']['loss_adv']
else:
if cfg.DEBUG:
print("train generator on combined loss")
loss_G = outputs['losses']['loss_cls'] + outputs['losses']['loss_bbox']
loss_G += cfg.GAN.TRAIN.ADV_LOSS_WEIGHT * outputs['losses']['loss_adv']
loss_G.backward()
optimizer_G.step()
training_stats_gen.IterToc()
log_gan_stats_combined(step, lr_gen=lr_G, lr_dis=lr_D,
training_stats_dis=training_stats_dis,
training_stats_dis_fake=training_stats_dis_fake,
training_stats_gen=training_stats_gen)
training_stats_gen.tb_log_stats(training_stats_gen.GetStats(step, lr_G), step)
if args.online_cleanup:
# clean-up to save memory
del loss_G
del input_data
del outputs
torch.cuda.empty_cache()
if (step+1) % CHECKPOINT_PERIOD == 0:
save_ckpt_gan(output_dir, args, step, train_size_gen=train_size_G, train_size_dis=train_size_D,
model=gan, optimizer_dis=optimizer_D, optimizer_gen=optimizer_G)
####################### Training ends #################################
# Save last checkpoint
final_model = save_ckpt_gan(output_dir, args, step, train_size_gen=train_size_G, train_size_dis=train_size_D,
model=gan, optimizer_dis=optimizer_D, optimizer_gen=optimizer_G)
logger.info("Closing dataloader and tfboard if used")
if args.use_tfboard and not args.no_save:
tblogger_dis.close()
tblogger_dis_fake.close()
tblogger_gen.close()
del training_stats_dis
del training_stats_gen
del training_stats_dis_fake
# cleanup
del gan
del dataiterator_real_discriminator
del dataiterator_fake_discriminator
del dataiterator_fake_generator
del dataloader_fake_discriminator
del dataloader_fake_generator
del dataloader_real_discriminator
del batchSampler_fake_discriminator
del batchSampler_fake_generator
del batchSampler_real_discriminator
del dataset_fake_discriminator
del dataset_real_discriminator
del dataset_fake_generator
del optimizer_G
del optimizer_D
torch.cuda.empty_cache()
except (RuntimeError, KeyboardInterrupt):
del dataiterator_real_discriminator
del dataiterator_fake_discriminator
del dataiterator_fake_generator
logger.info('Save ckpt on exception ...')
save_ckpt_gan(output_dir, args, step, train_size_gen=train_size_G, train_size_dis=train_size_D,
model=gan, optimizer_dis=optimizer_D, optimizer_gen=optimizer_G)
logger.info('Save ckpt done.')
stack_trace = traceback.format_exc()
print(stack_trace)
logger.info("Closing dataloader and tfboard if used")
if args.use_tfboard and not args.no_save:
tblogger_gen.close()
tblogger_dis.close()
tblogger_dis.close()
logger.info('Aborted training.')
return
############## Testing final model ##########################################
logger.info('Finished training.')
time.sleep(5) # sleep some time to make sure that cache is free for testing
logger.info("Start testing final model")
test_output_dir = os.path.join(output_dir, 'testing')
if not os.path.exists(test_output_dir) and not args.no_save:
os.makedirs(test_output_dir)
if final_model is not None:
args.set_cfgs.append('DEBUG_GAN')
args.set_cfgs.append('False')
if args.multi_gpu_testing:
args_test = Namespace(cfg_file='{}'.format(args.cfg_file),
load_ckpt='{}'.format(final_model),
load_dis=None, load_gen=None,
multi_gpu_testing=True, output_dir='{}'.format(test_output_dir),
range=None, set_cfgs=args.set_cfgs, vis=False)
else:
args_test = Namespace(cfg_file='{}'.format(args.cfg_file),
load_ckpt='{}'.format(final_model),
load_dis=None, load_gen=None,
multi_gpu_testing=False, output_dir='{}'.format(test_output_dir),
range=None, set_cfgs=args.set_cfgs, vis=False)
test_net_routine(args_test) | 5,328,874 |
def test_error_responses(run_response, expected_exception_class, acl_tool):
"""When the icacls process responds, but yields some non-0 return code """
with patch("icaclswrap.foldertool.subprocess.run") as mock_run:
mock_run.return_value = run_response
with pytest.raises(expected_exception_class):
acl_tool.set_rights(path=r"\test", username="testuser", rights_collection=FULL_ACCESS) | 5,328,875 |
def unknown_cruise_distance(segment):
"""This is a method that allows your vehicle to land at prescribed landing weight
Assumptions:
N/A
Source:
N/A
Inputs:
segment.cruise_tag [string]
state.unknowns.cruise_distance [meters]
Outputs:
segment.distance [meters]
Properties Used:
N/A
"""
# unpack
distance = segment.state.unknowns.cruise_distance
cruise_tag = segment.cruise_tag
# apply the unknown
segment.segments[cruise_tag].distance = distance
return | 5,328,876 |
def send_packet_to_capture_last_one():
"""
Since we read packets from stdout of tcpdump, we do not know when a packet is finished
Hence you should send an additional packet after you assume all interesting packets were sent
"""
def send():
conf = get_netconfig()
sock = socket(AF_PACKET, SOCK_RAW)
sock.bind((conf.dev.name, 0))
dst_mac = MAC("22:22:22:22:22:22")
src_ip = IP("192.168.69.10")
dst_ip = IP("192.168.69.20")
src_mac = MAC("11:11:11:11:11:11")
packet = arp_packet(dst_mac, src_mac, 2, src_mac, src_ip, dst_mac, dst_ip)
sock.send(packet)
time.sleep(0.05)
return send | 5,328,877 |
def diss(
demos: Demos,
to_concept: Identify,
to_chain: MarkovChainFact,
competency: CompetencyEstimator,
lift_path: Callable[[Path], Path] = lambda x: x,
n_iters: int = 25,
reset_period: int = 5,
cooling_schedule: Callable[[int], float] | None = None,
size_weight: float = 1.0,
surprise_weight: float = 1.0,
sgs_temp: float = 2.0,
synth_timeout: int = 15,
example_drop_prob: float = 0.0,
) -> Iterable[tuple[LabeledExamples, Optional[Concept]]]:
"""Perform demonstration informed gradiented guided search."""
if cooling_schedule is None:
def cooling_schedule(t: int) -> float:
return 100*(1 - t / n_iters) + 1
sggs = GradientGuidedSampler.from_demos(
demos=demos,
to_chain=to_chain,
competency=competency,
temp=sgs_temp,
)
def handler(signum, frame):
raise ConceptIdException
signal.signal(signal.SIGALRM, handler)
def drop_pred(example):
if example_drop_prob == 0.0:
return True
elif example_drop_prob == 1.0:
return False
return example_drop_prob <= random.random()
weights = np.array([size_weight, surprise_weight])
concept2energy = {} # Concepts seen so far + associated energies.
concept2data = {} # Concepts seen so far + associated data.
energy, new_data = float('inf'), LabeledExamples()
for t in range(n_iters):
temp = cooling_schedule(t)
# Sample from proposal distribution.
if (t % reset_period) == 0: # Reset to best example set.
concept = None
proposed_examples = reset(temp, concept2energy, concept2data)
else:
# Drop examples with some probability.
examples2 = LabeledExamples(
positive=filter(drop_pred, examples.positive),
negative=filter(drop_pred, examples.negative),
)
proposed_examples = examples2 @ new_data
try:
signal.alarm(synth_timeout)
concept = to_concept(proposed_examples, concept=concept)
signal.alarm(0) # Unset alarm.
concept2data.setdefault(concept, proposed_examples)
except ConceptIdException:
new_data = LabeledExamples() # Reject: New data caused problem.
signal.alarm(0) # Unset alarm.
continue
new_data, metadata = sggs(concept)
new_data = new_data.map(lift_path)
new_energy = weights @ [concept.size, metadata['surprisal']]
metadata |= {
'energy': new_energy,
'conjecture': new_data,
'data': proposed_examples,
}
yield (proposed_examples, concept, metadata)
# DISS Bookkeeping for resets.
concept2energy[concept] = new_energy
# Accept/Reject proposal based on energy delta.
dE = new_energy - energy
if (dE < 0) or (np.exp(-dE / temp) > np.random.rand()):
energy, examples = new_energy, proposed_examples # Accept.
else:
new_data = LabeledExamples() # Reject. | 5,328,878 |
def expand_gelu(expand_info):
"""Gelu expander"""
# get op info.
input_desc = expand_info['input_desc'][0]
graph_builder = builder.GraphBuilder()
# generate a graph.
with graph_builder.graph_scope('main') as graph_scope:
# create tensor input.
input_x = graph_builder.tensor(input_desc['shape'], input_desc['data_type'], input_desc['format'])
dtype = input_x.dtype
if dtype == 'float16':
input_x = graph_builder.emit('Cast', [input_x], attrs={'dst_type': 'float32'})
# cal tanh.
mul_0 = graph_builder.emit('Mul', [input_x, input_x])
pow_0 = graph_builder.emit('Mul', [mul_0, input_x])
const_csvalue = graph_builder.value(pow_0.dtype, CSVALUE, input_desc['format'])
mul_1 = graph_builder.emit('Mul', [pow_0, const_csvalue])
tanh_res = graph_builder.emit('TensorAdd', [input_x, mul_1])
const_csvalue_a = graph_builder.value(tanh_res.dtype, CSVALUE_A, input_desc['format'])
mul_0 = graph_builder.emit('Mul', [tanh_res, const_csvalue_a])
const_zero = graph_builder.value(mul_0.dtype, 0.0, input_desc['format'])
mul_0_min = graph_builder.emit('Minimum', [mul_0, const_zero])
right_mul = graph_builder.emit('Exp', [mul_0_min])
mul_0_abs = graph_builder.emit('Abs', [mul_0])
const_neg_one = graph_builder.value(mul_0_abs.dtype, -1.0, input_desc['format'])
mul_0_abs_neg = graph_builder.emit('Mul', [mul_0_abs, const_neg_one])
mul_0_abs_neg_exp = graph_builder.emit('Exp', [mul_0_abs_neg])
const_one = graph_builder.value(mul_0_abs_neg_exp.dtype, 1.0, input_desc['format'])
mul_0_abs_neg_exp_add = graph_builder.emit('TensorAdd', [mul_0_abs_neg_exp, const_one])
left_mul = graph_builder.emit('RealDiv', [input_x, mul_0_abs_neg_exp_add])
result = graph_builder.emit('Mul', [left_mul, right_mul])
if dtype == 'float16':
result = graph_builder.emit('Cast', [result], attrs={'dst_type': 'float16'})
# set graph output.
graph_scope.set_output(result)
graph = graph_builder.get()[0]
return graph | 5,328,879 |
def cache_mixin(cache, session):
"""CacheMixin factory"""
hook = EventHook([cache], session)
class _Cache(CacheMixinBase):
_hook = hook
_cache_client = cache
_db_session = session
return _Cache | 5,328,880 |
def _read_output(path):
"""Read CmdStan output csv file.
Parameters
----------
path : str
Returns
-------
Dict[str, Any]
"""
# Read data
columns, data, comments = _read_output_file(path)
pconf = _process_configuration(comments)
# split dataframe to warmup and draws
saved_warmup = (
int(pconf.get("save_warmup", 0))
* int(pconf.get("num_warmup", 0))
// int(pconf.get("thin", 1))
)
data_warmup = data[:saved_warmup]
data = data[saved_warmup:]
# Split data to sample_stats and sample
sample_stats_columns = {col: idx for col, idx in columns.items() if col.endswith("__")}
sample_columns = {col: idx for col, idx in columns.items() if col not in sample_stats_columns}
return {
"sample": data,
"sample_warmup": data_warmup,
"sample_columns": sample_columns,
"sample_stats_columns": sample_stats_columns,
"configuration_info": pconf,
} | 5,328,881 |
def linear_chance_constraint_noinit(a,M,N,risk,num_gpcpoly,n_states,n_uncert,p):
"""
Pr{a^\Top x + b \leq 0} \geq 1-eps
Converts to SOCP
"""
a_hat = np.kron(a.T,M)
a_dummy = np.zeros((n_states,n_states))
for ii in range(n_states):
a_dummy[ii,ii] = a[ii,0]
#print(a_dummy)
U = np.kron(a_dummy,np.identity(num_gpcpoly))
# Sigma_det = U*N*N.T*U.T
Sigma_det = N.T*U.T
return np.reshape(np.round(np.array(a_hat,dtype=float),5),num_gpcpoly*n_states), np.round(np.array(Sigma_det,dtype=float),5) | 5,328,882 |
def patroni(response: responses.Response,
session: sqlalchemy.orm.Session = fastapi.Depends(models.patroni.get_session)):
"""
Returns a health check for the reachability of the Patroni database.
"""
return db_health(response, session, 'Patroni') | 5,328,883 |
def authorization_code_grant_step1(request):
"""
Code grant step1 short-cut. This will return url with code.
"""
django_request = oauth2_request_class()(request)
grant = CodeGrant(oauth2_server, django_request)
return grant.authorization() | 5,328,884 |
def test_weighted_means(gid, resolution, excl_dict, time_series):
"""
Test Supply Curve Point exclusions weighted mean calculation
"""
with SupplyCurvePoint(gid, F_EXCL, TM_DSET, excl_dict=excl_dict,
resolution=resolution) as point:
shape = (point._gids.max() + 1, )
if time_series:
shape = (time_series, ) + shape
arr = np.random.random(shape)
means = point.exclusion_weighted_mean(arr.copy())
excl = point.include_mask_flat[point.bool_mask]
excl_sum = excl.sum()
if len(arr.shape) == 2:
assert means.shape[0] == shape[0]
x = arr[:, point._gids[point.bool_mask]]
x *= excl
x = x[0]
means = means[0]
else:
x = arr[point._gids[point.bool_mask]]
x *= excl
test = x.sum() / excl_sum
assert np.allclose(test, means, rtol=RTOL) | 5,328,885 |
async def activity(
guild_id: int,
discord_id: int,
activity_input: DestinyActivityInputModel,
db: AsyncSession = Depends(get_db_session),
):
"""Return information about the user their stats in the supplied activity ids"""
user = await discord_users.get_profile_from_discord_id(discord_id)
# update the user's db entries
activities = DestinyActivities(db=db, user=user)
await activities.update_activity_db()
return await activities.get_activity_stats(
activity_ids=activity_input.activity_ids,
mode=activity_input.mode,
character_class=activity_input.character_class,
character_ids=activity_input.character_ids,
start_time=activity_input.start_time,
end_time=activity_input.end_time,
) | 5,328,886 |
def patch_diff_tarfile( base_path, diff_tarfile, restrict_index=() ):
"""Patch given Path object using delta tarfile (as in tarfile.TarFile)
If restrict_index is set, ignore any deltas in diff_tarfile that
don't start with restrict_index.
"""
if base_path.exists():
path_iter = selection.Select( base_path ).set_iter()
else:
path_iter = empty_iter() # probably untarring full backup
diff_path_iter = difftar2path_iter( diff_tarfile )
if restrict_index:
diff_path_iter = filter_path_iter( diff_path_iter, restrict_index )
collated = diffdir.collate2iters( path_iter, diff_path_iter )
ITR = IterTreeReducer( PathPatcher, [base_path] )
for basis_path, diff_ropath in collated:
if basis_path:
log.Info(_("Patching %s") % (util.ufn(basis_path.get_relative_path())),
log.InfoCode.patch_file_patching,
util.escape( basis_path.get_relative_path() ) )
ITR( basis_path.index, basis_path, diff_ropath )
else:
log.Info(_("Patching %s") % (util.ufn(diff_ropath.get_relative_path())),
log.InfoCode.patch_file_patching,
util.escape( diff_ropath.get_relative_path() ) )
ITR( diff_ropath.index, basis_path, diff_ropath )
ITR.Finish()
base_path.setdata() | 5,328,887 |
def put_topoverlays(image, rects, alpha=0.3):
"""
a function for drawing some rectangles with random color
Args:
image: an opencv image with format of BGR
rects: a list of opencv rectangle
alpha: a float, blend level
Return:
An opencv image
"""
h, w, _ = image.shape
im = np.ones(shape=image.shape).astype(np.uint8)
overlay_bboxs = []
for i in rects:
x1 = int(i[0])
x2 = int(min(i[0] + 1.7 * (i[2] - i[0]), w))
y1 = int(i[1])
y2 = int(max(i[1] - 0.2 * (i[3] - i[1]), 0))
overlay_bboxs.append([x1, y1, x2, y2])
cv2.rectangle(im, (x1, y1), (x2, y2), (100, 100, 0), -1)
cv2.rectangle(im, (x1, y1), (x2, y2), (0, 100, 255), 2)
image = cv2.addWeighted(im, alpha, image, 1 - alpha, 0, image)
return image, overlay_bboxs | 5,328,888 |
def extract_next_token(link):
"""Use with paginated endpoints for extracting
token which points to next page of data."""
clean_link = link.split(";")[0].strip("<>")
token = clean_link.split("?token=")[1]
# token is already quoted we have to unqoute so it can be passed to params
return unquote(token) | 5,328,889 |
def get_host_country(host_ip):
"""Gets country of the target's IP"""
country = 'NOT DEFINED'
try:
response_body = urllib.request.urlopen(f'https://ipinfo.io/{host_ip}').read().decode('utf8')
response_data = json.loads(response_body)
country = response_data['country']
except:
pass
return country | 5,328,890 |
def feed_many_stdins(fp, processes):
"""
:param fp: input file
:param processes: list of processes to be written to.
"""
while True:
data = fp.read(8192)
if not data:
break
for proc in processes:
proc.stdin.write(data) | 5,328,891 |
def astng_wrapper(func, modname):
"""wrapper to give to ASTNGManager.project_from_files"""
print 'parsing %s...' % modname
try:
return func(modname)
except ASTNGBuildingException, exc:
print exc
except Exception, exc:
import traceback
traceback.print_exc() | 5,328,892 |
def clean(s):
"""Clean text!"""
return patList.do(liblang.fixRepetedVowel(s)) | 5,328,893 |
def extract_narr_aux_data(espa_metadata, aux_path):
"""Extracts the required NARR data from the auxiliary archive
Args:
espa_metadata <espa.Metadata>: The metadata structure for the scene
aux_path <str>: Path to base auxiliary (NARR) data
"""
logger = logging.getLogger(__name__)
(dummy, t0_date, t1_date) = util.NARR.dates(espa_metadata)
logger.info('Before Date = {}'.format(str(t0_date)))
logger.info(' After Date = {}'.format(str(t1_date)))
for aux_set in aux_filenames(aux_path, PARMS_TO_EXTRACT,
t0_date, t1_date):
logger.info('Using {0}'.format(aux_set.hdr))
logger.info('Using {0}'.format(aux_set.grb))
# Verify that the files we need exist
if (not os.path.exists(aux_set.hdr) or
not os.path.exists(aux_set.grb)):
raise Exception('Required ST AUX files are missing')
extract_from_grib(aux_set) | 5,328,894 |
def filename(s, errors="strict"):
"""Same as force_unicode(s, sys.getfilesystemencoding(), errors)
"""
return force_unicode(s, sys.getfilesystemencoding(), errors) | 5,328,895 |
def elastic_transform(image, alpha=1000, sigma=30, spline_order=1, mode='nearest', random_state=np.random):
"""Elastic deformation of image as described in [Simard2003]_.
.. [Simard2003] Simard, Steinkraus and Platt, "Best Practices for
Convolutional Neural Networks applied to Visual Document Analysis", in
Proc. of the International Conference on Document Analysis and
Recognition, 2003.
"""
assert image.ndim == 3
shape = image.shape[:2]
dx = gaussian_filter((random_state.rand(*shape) * 2 - 1),
sigma, mode="constant", cval=0) * alpha
dy = gaussian_filter((random_state.rand(*shape) * 2 - 1),
sigma, mode="constant", cval=0) * alpha
x, y = np.meshgrid(np.arange(shape[0]), np.arange(shape[1]), indexing='ij')
indices = [np.reshape(x + dx, (-1, 1)), np.reshape(y + dy, (-1, 1))]
result = np.empty_like(image)
for i in range(image.shape[2]):
result[:, :, i] = map_coordinates(
image[:, :, i], indices, order=spline_order, mode=mode).reshape(shape)
return result | 5,328,896 |
def format_search_log(json_string):
"""
usage example {{ model_object|format_search_log }}
"""
query_json = json.loads(json_string)
attributes_selected = sorted(query_json.get('_source'))
context = {}
context['attributes_selected'] = attributes_selected
return attributes_selected | 5,328,897 |
def set_variable(value,variable=None):
"""Load some value into session memory by creating a new variable.
If an existing variable is given, load the value into the given variable.
"""
sess = get_session()
if variable is not None:
assign_op = tf.assign(variable,value)
sess.run([assign_op])
return variable
else:
variable = tf.Variable(initial_value=value)
sess.run([tf.variables_initializer([variable])])
return variable | 5,328,898 |
def matrix ( mtrx , i , j ) :
"""Get i,j element from matrix-like object
>>> mtrx = ...
>>> value = matrix ( m , 1 , 2 )
"""
if isinstance ( mtrx , ROOT.TMatrix ) :
if i < mtrx.GetNrows () and j < mtrx.GetNcols () :
return mtrx ( i , j )
if callable ( mtrx ) :
try :
return mtrx ( i , j )
except :
pass
try :
return m [ i , j ]
except :
pass
try :
return m [ i ] [ j ]
except :
pass
return TypeError("Can't get m(%d,%d) for m=%s" % ( i , j , mtrx ) ) | 5,328,899 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.