content
stringlengths 22
815k
| id
int64 0
4.91M
|
|---|---|
def indgen(*shape):
"""
Create a (multi-dimensional) range of integer values.
Notes
-----
**porting to python**
If ``shape`` is of one dimension only, you can use ``np.arange(n)``.
IDL accepts floats as dimension parameters, but applies ``int()`` before
using them. While ``np.arange()`` also accepts floats, be careful, as the
number of elements do not match any more!
.. code-block:: IDL
INDGEN(5.2) -> [0,1,2,3,4]
INDGEN(5) -> [0,1,2,3,4]
np.arange(5.2) -> [0,1,2,3,4,5] ; !!
np.arange(int(5.2)) -> [0,1,2,3,4]
np.arange(5) -> [0,1,2,3,4]
"""
return findgen(*shape, dtype=int)
| 5,343,300
|
def post_search(request):
"""
Crear funcion de busqueda de posts llamando el form SearchForm
Cuando el formulario es emitido,mandas el formulario usando el metodo GET
atraves del POST. Cuando el formulario es emiido se instancia con los datos enviados por GET
y verifica que los datos sean validos, si el formulario es valido puedes buscar los post
publicados con la instancia custom SearchVector construida con los campos titulo y body
Se aplican diferentes weights a la busqueda construida por el title y body
los weights defaults son D, C, B y A y se refieren a numeros 0.1, 0.2, 0.4 y 1.0 respectivamente
Puedes aplicar un weight de 1.0 para el title y weight de 04 al body
los resultados de title van a prevalecer sobre el contenido compatible con el body
Se filtra el resultado para mostrar solo los que tengan el rank mas alto de 0.3
"""
form = SearchForm()
query = None
results = []
if 'query' in request.GET:
form = SearchForm(request.GET)
if form.is_valid():
query = form.cleaned_data['query']
search_vector = SearchVector('title', weight='A') + SearchVector('body', weight='B')
search_query = SearchQuery(query)
results = Post.published.annotate(
search=search_vector,
rank=SearchRank(search_vector, search_query)
).filter(rank__gte=0.3).order_by('-rank')
"""results = Post.published.annotate(
search=search_vector,
rank=SearchRank(search_vector, search_query)
).filter(search=search_query).order_by('-rank')"""
#results = Post.published.annotate(search=SearchVector('title', 'body'),).filter(search=query)
return render(request, 'blog/post/search.html',
{'form': form,
'query': query,
'results': results})
| 5,343,301
|
def get_bins(values):
"""
Automatically compute the number of bins for discrete variables.
Parameters
----------
values = numpy array
values
Returns
-------
array with the bins
Notes
-----
Computes the width of the bins by taking the maximun of the Sturges and the Freedman-Diaconis
estimators. Acording to numpy `np.histogram` this provides good all around performance.
The Sturges is a very simplistic estimator based on the assumption of normality of the data.
This estimator has poor performance for non-normal data, which becomes especially obvious for
large data sets. The estimate depends only on size of the data.
The Freedman-Diaconis rule uses interquartile range (IQR) to estimate the binwidth.
It is considered a robusts version of the Scott rule as the IQR is less affected by outliers
than the standard deviation. However, the IQR depends on fewer points than the standard
deviation, so it is less accurate, especially for long tailed distributions.
"""
x_min = values.min().astype(int)
x_max = values.max().astype(int)
# Sturges histogram bin estimator
bins_sturges = (x_max - x_min) / (np.log2(values.size) + 1)
# The Freedman-Diaconis histogram bin estimator.
iqr = np.subtract(*np.percentile(values, [75, 25])) # pylint: disable=assignment-from-no-return
bins_fd = 2 * iqr * values.size ** (-1 / 3)
width = np.round(np.max([1, bins_sturges, bins_fd])).astype(int)
return np.arange(x_min, x_max + width + 1, width)
| 5,343,302
|
def destination_name(data):
""" Fonction qui permet de récupérer le nom du terminus en fonction de data passé en paramètre
Nom fonction : destination_name
Paramètre : data, un flux xml
Return : un string qui a comme valeur le libellé de la destination final"""
tree = ET.ElementTree(ET.fromstring(data))
root = tree.getroot()
for tag in root.findall("."):
if tag.tag == "PlannedPatternDelivery":
if tag.find("PlannedPattern") is None:
return "Error, 'PlannedPattern' tag not exists"
else:
if tag.find("PlannedPattern/DestinationName") is None:
return "Error, 'DestinationName' tag not exists"
else:
for elem in root.findall("./PlannedPattern/DestinationName"):
if elem.text is None:
return "Error, 'DestinationName' tag is empty"
else:
return elem.text
| 5,343,303
|
def prim_NumToTensor(mapper, graph, node):
""" 构造转为Tensor的PaddleLayer。
TorchScript示例:
%other.2 : Tensor = prim::NumToTensor(%1736)
参数含义:
%other.2 (Tensor): 输出。
%1736 (-): 输入。
"""
scope_name = mapper.normalize_scope_name(node)
output_name = mapper._get_outputs_name(node)[0]
layer_outputs = [output_name]
layer_inputs = {}
layer_attrs = {}
inputs_name, inputs_node = mapper._get_inputs_name(node)
# 获取当前节点输出的list
current_outputs = [output_name]
# 处理输入0,即%86
mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs, scope_name)
inputs_inputs_name, inputs_inputs_node = mapper._get_inputs_name(inputs_node[0])
if inputs_node[0].kind() == "aten::size" and len(inputs_inputs_name) > 1:
layer_inputs["input"] = inputs_name[0]
# 获取当前节点输入的list
current_inputs = list(layer_inputs.values())
graph.add_layer(
"prim_equal", inputs=layer_inputs, outputs=layer_outputs, scope_name=scope_name)
else:
layer_inputs["fill_value"] = inputs_name[0]
# 获取当前节点输入的list
current_inputs = list(layer_inputs.values())
input_type = list(node.inputs())[0].type()
layer_attrs["dtype"] = input_type
layer_attrs["shape"] = [1]
graph.add_layer(
"paddle.full",
inputs=layer_inputs,
outputs=layer_outputs,
scope_name=scope_name,
**layer_attrs)
return current_inputs, current_outputs
| 5,343,304
|
def import_taskdict(modname):
"""Import user module and return its name and TASKDICT"""
try:
mod = import_module(modname)
except (ImportError, ModuleNotFoundError):
LOGGER.critical('Module %s not found. '
'Check it is along PYTHONPATH', modname)
raise
try:
modtd = getattr(mod, 'TASKDICT')
except AttributeError:
LOGGER.critical('Module %s has no TASKDICT; '
'Please, remove it from input, to continue.',
mod.__name__)
raise
return mod.__name__, modtd
| 5,343,305
|
def core_periphery():
"""Pipeline for core-periphery clustering, using the CP_THRESHOLD."""
with open(INPUT_PICKLED_GRAPH, "rb") as file:
G = pickle.load(file)
# Reference: https://github.com/skojaku/core-periphery-detection/blob/7d924402caa935e0c2e66fca40457d81afa618a5/cpnet/Rombach.py
rb = cpnet.Rombach()
rb.detect(G)
pair_id = rb.get_pair_id()
coreness = rb.get_coreness()
save_cp(pair_id, coreness)
clustering = make_cp_clustering(coreness)
# Hardcoded K=2, since binary by threshold:
filename = "output_files/main_files/{}_K2_labeling_file_cp.csv"
with open(filename.format(IDENTIFIER_STRING), mode="w") as file:
# Header:
fieldnames = ["node", "coreness_binary_{}".format(CP_THRESHOLD)]
writer = csv.DictWriter(file, fieldnames=fieldnames)
writer.writeheader()
user_obj_writer = csv.writer(file, delimiter=',', quotechar='"', quoting=csv.QUOTE_NONNUMERIC)
# Rows:
for i in range(len(G)):
row = [i, clustering[i]]
user_obj_writer.writerow(row)
return
| 5,343,306
|
def nlp_stem(string):
"""
Generates a list of the stem for each word in the original string and returns the joined list of stems as a single string.
"""
ps = nltk.porter.PorterStemmer()
stems = [ps.stem(word) for word in string.split()]
return " ".join(stems)
| 5,343,307
|
def _CreateHostConfigEntityFromHostInventory(lab_name, host):
"""Creates HostConfig from HostInventory.
Args:
lab_name: the lab name.
host: the ansible inventory Host object.
Returns:
the HostConfig entity.
"""
return datastore_entities.HostConfig(
id=host.name,
lab_name=lab_name,
hostname=host.name,
inventory_groups=sorted(set([g.name for g in host.groups])))
| 5,343,308
|
def assert_allclose(
actual: numpy.ndarray,
desired: numpy.ndarray,
rtol: float,
atol: int,
err_msg: Literal["attribute: tvalues"],
):
"""
usage.statsmodels: 1
"""
...
| 5,343,309
|
def test_simple_case():
"""Test a simple case with 3 frames and 2 detections/gts per frame."""
gt = Tracks()
gt.add_frame(0, [0, 1], np.array([[0, 0, 1, 1], [1, 1, 2, 2]]))
gt.add_frame(1, [0, 1], np.array([[0, 0, 1, 1], [2, 2, 3, 3]]))
gt.add_frame(2, [0, 1], np.array([[0, 0, 1, 1], [2, 2, 3, 3]]))
hyp = Tracks()
hyp.add_frame(0, [0, 1], np.array([[0, 0, 1, 1], [1, 1, 2, 2]]))
hyp.add_frame(1, [0, 1], np.array([[0.1, 0.1, 1.1, 1.1], [1, 1, 2, 2]]))
hyp.add_frame(2, [2, 1], np.array([[0.05, 0.05, 1.05, 1.05], [2, 2, 3, 3]]))
metrics = calculate_clearmot_metrics(gt, hyp)
assert metrics["FN_CLEAR"] == 1
assert metrics["IDS"] == 1
assert metrics["FP_CLEAR"] == 1
assert metrics["MOTA"] == 0.5
assert metrics["MOTP"] == 0.0994008537355717
| 5,343,310
|
def visualize_object_detection_custom(
image, post_processed, config, prev_state, start_time, duration):
"""Draw object detection result boxes to image.
Args:
image (np.ndarray): A inference input RGB image to be draw.
post_processed (np.ndarray): A one batch output of model be
already applied post process. Format is defined at
https://github.com/blue-oil/blueoil/blob/master/docs/specification/output_data.md
config (EasyDict): Inference config.
prev_state (string): A previous state, "NORMAL" or "WARNING" or "CLEAR"
start_time (float): UNIX time when state was changed to current state
duration (float): Duration(sec) for waiting to change status displayed
Returns:
PIL.Image.Image: drawn image object.
String: A current state ("NORMAL" or "WARNING" or "CLEAR")
Float: UNIX time when state was changed to current state
Bool: A flag of which "WARNING" is displayed or not
"""
colorWarning = (255, 0, 0)
colorClear = (0, 255, 0)
box_font = PIL.ImageFont.truetype(FONT, 10)
state_font = PIL.ImageFont.truetype(FONT, 20)
classes = config.CLASSES
ng_class_id = classes.index("face") if "face" in classes else 0
start_time = start_time or time.time()
center_width = image.shape[1] // 2
predict_boxes = _scale_boxes(
post_processed, image.shape, config.IMAGE_SIZE
)
# Gather and remove duplicate box in different classes
uniq_boxes = _gather_prediction(predict_boxes)
states = [_get_state(box, center_width) for box in uniq_boxes]
total_state = _get_total_state(states, ng_class_id)
image = PIL.Image.fromarray(_mask_image(image))
draw = PIL.ImageDraw.Draw(image)
for uniq_box, state in zip(uniq_boxes, states):
box = uniq_box["box"]
class_id = uniq_box["class_id"]
xy = [box[0], box[1], box[0] + box[2], box[1] + box[3]]
color = colorWarning if class_id == ng_class_id else colorClear
prefix = "[OK]" if state[0] or (state[1] != ng_class_id) else "[NG]"
txt = "{:s} {:s}: {:.3f}".format(
prefix, classes[class_id], float(uniq_box["score"])
)
draw.rectangle(xy, outline=color)
draw.text([box[0], box[1]], txt, fill=color, font=box_font)
if prev_state != total_state:
start_time = time.time()
elapsed_time = float(time.time() - start_time)
right_corner = [center_width + 60, 0]
displayed_waring = False
if total_state == STATE_WARNING and elapsed_time >= duration:
draw.text(right_corner, "WARNING", fill=colorWarning, font=state_font)
displayed_waring = True
elif total_state == STATE_CLEAR and elapsed_time >= duration:
draw.text(right_corner, " CLEAR", fill=colorClear, font=state_font)
return image, total_state, start_time, displayed_waring
| 5,343,311
|
def all_files(directory="..\\raw_data\\"):
""" Return flat list of all csv files in the given directory.
Args:
directory [string] full path to directory with csv files.
Default project layout is used if it is not provided
Returns:
Flat list of csv files as absolute names.
"""
files = list_files(directory)
result = []
for year in files.keys():
result += files[year]
return result
| 5,343,312
|
def connected_user(**params):
"""Returns the connected user."""
if g.context.person:
return g.context.person == request.view_args.get('person_id')
| 5,343,313
|
def remove_plugin(plugin, directory=None):
"""Removes the specified plugin."""
repo = require_repo(directory)
plugins = get_value(repo, 'plugins', expect_type=dict)
if plugin not in plugins:
return False
del plugins[plugin]
set_value(repo, 'plugins', plugins)
return True
| 5,343,314
|
def command_arg(name, value_type=None, help=''): # noqa; pylint: disable=redefined-builtin
"""
Decorator wrapping functions to add command line arguments to the sub command to be invoked
:param name: Name of the argument
:param value_type: Type of the argument
:param help: Help string for the argument
"""
@_ensure_command
def wrapper(f):
f.command.add_argument(name, value_type=value_type, help=help, # noqa; pylint: disable=redefined-builtin
wrapped=f)
return f
return wrapper
| 5,343,315
|
def test_gcp_iam_organization_role_get_command(client):
"""
Retrieve organization role information.
Given:
- User has provided valid credentials.
When:
- gcp-iam-organization-role-get called.
Then:
- Ensure number of items is correct.
- Ensure outputs prefix is correct.
- Ensure a sample value from the API matches what is generated in the context.
"""
mock_response = load_mock_response('role/organization_role_get.json')
client.gcp_iam_organization_role_get_request = Mock(return_value=mock_response)
role_name = "organizations/xsoar-organization/roles/xsoar_demo_97"
command_args = dict(role_name=role_name)
result = GCP_IAM.gcp_iam_organization_role_get_command(client, command_args)
assert len(result[0].outputs) == 1
assert len(result[0].outputs[0]) == 7
assert result[0].outputs_prefix == 'GCPIAM.Role'
assert result[0].outputs[0].get('stage') == 'ALPHA'
assert result[0].outputs[0].get('name') == role_name
client.gcp_iam_organization_role_get_request.side_effect = Exception('Not Found')
result = GCP_IAM.gcp_iam_organization_role_get_command(client, command_args)
assert result[0].readable_output == get_error_message(role_name)
| 5,343,316
|
def write_file(signum, frame):
"""Requisited new data from sensor"""
print "Requisited new data from sensor"
f = open(PASSIVE_FILE, 'w')
f.write(msp430['passives'])
print msp430['passives']
f.close()
try:
# Without a real pid, could be dangerous
# kill(pid_bikex,SIG2)
pass
except Exception, e:
print 'Father not available'
| 5,343,317
|
def ioc_arg_parser(*, desc, default_prefix, argv=None, macros=None,
supported_async_libs=None):
"""
A reusable ArgumentParser for basic example IOCs.
Parameters
----------
description : string
Human-friendly description of what that IOC does
default_prefix : string
args : list, optional
Defaults to sys.argv
macros : dict, optional
Maps macro names to default value (string) or None (indicating that
this macro parameter is required).
supported_async_libs : list, optional
"White list" of supported server implementations. The first one will
be the default. If None specified, the parser will accept all of the
(hard-coded) choices.
Returns
-------
ioc_options : dict
kwargs to be handed into the IOC init.
run_options : dict
kwargs to be handed to run
"""
parser, split_args = template_arg_parser(desc=desc, default_prefix=default_prefix,
argv=argv, macros=macros,
supported_async_libs=supported_async_libs)
return split_args(parser.parse_args())
| 5,343,318
|
def count_model_param_and_flops(model):
"""
Return the number of params and the number of flops of (only) 2DConvolutional Layers and Dense Layers for both the model.
:return:
"""
param_by_layer = dict()
flop_by_layer = dict()
nb_param_model, nb_flop_model = 0, 0
for layer in model.layers:
if isinstance(layer, Conv2D):
nb_param_layer, nb_param_layer_bias = count_nb_param_layer(layer)
nb_flop_layer = count_nb_flop_conv_layer(layer, nb_param_layer, nb_param_layer_bias)
elif isinstance(layer, Dense):
nb_param_layer, nb_param_layer_bias = count_nb_param_layer(layer)
nb_flop_layer = count_nb_flop_dense_layer(layer, nb_param_layer, nb_param_layer_bias)
else:
# if you have over layers you want to compute flops in: put other conditions here and write the necessary functions
nb_param_layer, nb_param_layer_bias, nb_flop_layer = 0, 0, 0
param_by_layer[layer.name] = nb_param_layer + nb_param_layer_bias
flop_by_layer[layer.name] = nb_flop_layer
nb_param_model += nb_param_layer
nb_flop_model += nb_flop_layer
total_nb_param_model = nb_param_model
total_nb_flop_model = nb_flop_model
return total_nb_param_model, total_nb_flop_model
| 5,343,319
|
def plate_from_dataframe(
dataframe, wellname_field="wellname", num_wells="infer", data=None
):
"""Create a plate from a Pandas dataframe where each row contains the
name of a well and data on the well.
it is assumed that the dataframe's index is given by the well names.
This function is used e.g. in `plate_from_list_spreadsheet`.
Parameters
----------
dataframe
A Pandas dataframe
wellname_field
The name of the Pandas dataframe column indicating the name of the wells.
num_wells
Number of wells in the Plate to be created. If left to default 'infer',
the size of the plate will be chosen as the smallest format (out of
96, 384 and 1536 wells) which contains all the well names.
data
Metadata information for the plate.
"""
# TODO: infer plate class automatically ?
dataframe = dataframe.set_index(wellname_field)
wells_data = {well: row.to_dict() for well, row in dataframe.iterrows()}
if num_wells == "infer":
num_wells = infer_plate_size_from_wellnames(wells_data.keys())
plate_class = get_plate_class(num_wells=num_wells)
return plate_class(wells_data=wells_data, data=data)
| 5,343,320
|
def catch_conn_reset(f):
"""
A decorator to handle connection reset errors even ones from pyOpenSSL
until https://github.com/edsu/twarc/issues/72 is resolved
It also handles ChunkedEncodingError which has been observed in the wild.
"""
try:
import OpenSSL
ConnectionError = OpenSSL.SSL.SysCallError
except:
ConnectionError = None
@wraps(f)
def new_f(self, *args, **kwargs):
# Only handle if pyOpenSSL is installed.
if ConnectionError:
try:
return f(self, *args, **kwargs)
except (ConnectionError, ChunkedEncodingError) as e:
log.warning("caught connection reset error: %s", e)
self.connect()
return f(self, *args, **kwargs)
else:
return f(self, *args, **kwargs)
return new_f
| 5,343,321
|
def S7_plot_static(t, whichplot, tsteps, M, S, gamma):
"""
Plots the interactions of the galaxies in the S7 initial condition at set time values.
Parameters
----------
t: float
The current time t[i].
whichplot: str
The passage one wants to see.
tsteps: int
The number of steps the time will be divided into.
M, S, gamma: int, int, float
Parameters of the differential equation.
Returns
-------
Static plot of the chosen passage.
"""
direct_r1, direct_r2, retro_r1, retro_r2, R1, R2, vR1, vR2 = S7_ode_solutions(t,tsteps,M,S,gamma)
plt.figure(figsize=(12,5))
o=[0, 50, 100, 150, 200, 250, 300, 350, 400, 450]
j=1
for l in o:
mr1,mr2=[],[]
if j==11:
break
if whichplot=='direct':
for v in range(120):
mr1.append(direct_r1[v][l])
mr2.append(direct_r2[v][l])
else:
for v in range(120):
mr1.append(retro_r1[v][l])
mr2.append(retro_r2[v][l])
plt.subplot(2,5,j,frame_on=False)
plt.scatter(mr1,mr2,c='c',s=4)
plt.scatter(R1[l],R2[l],c='r')
plt.scatter(0,0,c='k')
plt.xlim(-55,55)
plt.ylim(-55,55)
plt.tick_params(axis='x', labelbottom='off', top='off', bottom='off')
plt.tick_params(axis='y', labelleft='off', left='off', right='off')
j+=1
if whichplot=='direct':
plt.suptitle('Direct Passage, S7', x=0.5, y=1.02, fontsize=15)
plt.savefig("directS7.png", bbox_inches='tight')
else:
plt.suptitle('Retrograde Passage, S7', x=0.5, y=1.02, fontsize=15)
plt.savefig("retrogradeS7.png", bbox_inches='tight')
plt.tight_layout()
plt.show()
| 5,343,322
|
def register(session):
"""Register action. Called when used as an event plugin."""
AppplicationsAction(session).register()
| 5,343,323
|
def prepared(name):
"""Prepare the given volume.
Args:
name (str): Volume name
Returns:
dict: state return value
"""
ret = {'name': name, 'changes': {}, 'result': False, 'comment': ''}
# Idempotence.
if __salt__['metalk8s_volumes.is_prepared'](name):
ret['result'] = True
ret['comment'] = 'Volume {} already prepared.'.format(name)
return ret
# Dry-run.
if __opts__['test']:
ret['changes'][name] = 'Prepared'
ret['result'] = None
ret['comment'] = 'Volume {} is going to be prepared.'.format(name)
return ret
# Let's go for real.
try:
__salt__['metalk8s_volumes.prepare'](name)
except Exception as exn:
ret['result'] = False
ret['comment'] = 'Failed to prepare volume {}: {}.'.format(name, exn)
else:
ret['changes'][name] = 'Prepared'
ret['result'] = True
ret['comment'] = 'Volume {} prepared.'.format(name)
return ret
| 5,343,324
|
def if_statement(env, node):
"""
'If' statement def for AST.
interpret - runtime function for Evaluator (true of false statement depending on condition).
"""
condition_value = node.condition.interpret(env)
if condition_value:
node.true_stmt.interpret(env)
else:
if node.alternatives_stmt:
for alternative_stmt in node.alternatives_stmt:
alternative_condition_value = alternative_stmt.interpret(env)
if alternative_condition_value:
return True
if node.false_stmt:
node.false_stmt.interpret(env)
return condition_value
| 5,343,325
|
def update_app_downloads():
"""
Update download/install stats for all apps.
Spread these tasks out successively by `seconds_between` seconds so they
don't hit Monolith all at once.
"""
chunk_size = 50
seconds_between = 2
all_ids = list(Webapp.objects.filter(status=amo.STATUS_PUBLIC)
.values_list('id', flat=True))
countdown = 0
for ids in chunked(all_ids, chunk_size):
update_downloads.delay(ids, countdown=countdown)
countdown += seconds_between
| 5,343,326
|
def volCyl(radius:float, height: float) -> float:
"""Finds volume of a cylinder"""
volume: float = pi * radius * radius * height
return volume
| 5,343,327
|
def req_filter_widgets():
"""
Filter widgets for requests
@returns: list of filter widgets
"""
T = current.T
from s3 import S3DateFilter, \
S3LocationFilter, \
S3OptionsFilter, \
S3TextFilter, \
s3_get_filter_opts
from s3db.req import req_status_opts
req_status_opts = OrderedDict(sorted(req_status_opts().items(),
key = lambda i: i[0],
))
filter_widgets = [
S3TextFilter(["req_ref"],
label = T("Order No."),
),
S3DateFilter("date"),
S3OptionsFilter("transit_status",
cols = 3,
options = req_status_opts,
sort = False,
),
S3OptionsFilter("fulfil_status",
cols = 3,
hidden = True,
options = req_status_opts,
sort = False,
),
S3OptionsFilter("req_item.item_id",
hidden = True,
options = lambda: s3_get_filter_opts("supply_item"),
),
]
if current.auth.s3_has_role("SUPPLY_COORDINATOR"):
coordinator_filters = [
S3LocationFilter("site_id$location_id",
levels = ["L3", "L4"],
),
S3TextFilter("site_id$location_id$addr_postcode",
label = T("Postcode"),
),
S3OptionsFilter("site_id",
hidden = True
),
S3OptionsFilter("site_id$organisation_id$delivery.value",
label = T("Delivery##supplying"),
options = delivery_tag_opts(),
),
]
filter_widgets[2:2] = coordinator_filters
return filter_widgets
| 5,343,328
|
def is_minimally_connected(graph: List[List[int]], num_vertices: int) -> bool:
"""
1. Has no cycle.
2. All nodes are connected
"""
visited = set()
has_cycle = is_cyclic(graph, 0, -1, visited)
if has_cycle or len(visited) < num_vertices:
# if num_vertices > len(visited), it means there is a disconnect in the graph.
return False
return True
| 5,343,329
|
def to_keep(path):
"""
:param path:
:return: True if heigh and width >= 512
"""
img = Image.open(path)
h, w = img.size
return h >= 512 and w >= 512
| 5,343,330
|
def __register(key, *additional_keys, default_handler_info, registry=None, registering_for_name="", overwrite=False):
"""
Internal decorator to register the non-default handlers for multimethod
registry and key_fn_name are keyword arguments with defaults to make it easy to apply
functools.partial on them
"""
if registry is None:
registry = {}
all_keys = [key] + list(additional_keys)
def decorator(handler):
if not isinstance(key, Hashable):
raise TypeError(
f"Cannot register handler for function {registering_for_name} with key {repr(key)} that is not Hashable"
)
if not isinstance(handler, Callable):
raise TypeError(f"handler function {handler} of type {type(handler)} must be Callable")
for _key in all_keys:
if _key in registry and not overwrite:
raise KeyError(
f"Duplicate registration for key {repr(_key)} for function {registering_for_name}"
)
if __get_default_params(handler):
raise ValueError(
f"Found default params while registering keys {repr(all_keys)} for function {registering_for_name}. "
"Default params are only allowed in the default handler"
)
for _key in all_keys:
registry[_key] = __HandlerInfo(
handler=handler,
default_params={
**default_handler_info.default_params,
**__get_default_params(handler) # default params of explicit registration takes precedence
}
)
@wraps(handler)
def wrapper(*args, **kwargs):
handler(*args, **kwargs)
return wrapper
return decorator
| 5,343,331
|
def download_from_url_if_not_in_cache(cloud_path: str, cache_dir: str = None):
"""
:param cloud_path: e.g., https://public-aristo-processes.s3-us-west-2.amazonaws.com/wiqa-model.tar.gz
:param to_dir: will be regarded as a cache.
:return: the path of file to which the file is downloaded.
"""
return cached_path(url_or_filename=cloud_path, cache_dir=cache_dir)
| 5,343,332
|
def assert_array_almost_equal(x: numpy.bool_, y: bool):
"""
usage.scipy: 2
"""
...
| 5,343,333
|
def check(self):
"""Check that the SlotW27 object is correct
Parameters
----------
self : SlotW27
A SlotW27 object
Returns
-------
None
Raises
-------
S27_W01CheckError
You must have W0 <= W1
S27_W12CheckError
You must have W1 <= W2
S27_W03CheckError
You must have W0 <= W3
"""
if self.W1 < self.W0:
raise S27_W01CheckError("You must have W0 <= W1")
if self.W2 < self.W1:
raise S27_W12CheckError("You must have W1 <= W2")
if self.W3 < self.W0:
raise S27_W03CheckError("You must have W0 <= W3")
| 5,343,334
|
def test_filter(qtbot, browser):
"""
Ensure the filter UX works
"""
initRowCount = browser._listView.model().rowCount()
assert initRowCount > 0
# Enter a search term
qtbot.keyClicks(browser._lineEdit, 'google')
# Press Enter to perform the filter
qtbot.keyPress(browser._lineEdit, QtCore.Qt.Key_Enter)
filteredRowCount = browser._listView.model().rowCount()
assert initRowCount > filteredRowCount
| 5,343,335
|
def plot_sensors_connectivity(info, con, picks=None,
cbar_label='Connectivity'):
"""Visualize the sensor connectivity in 3D.
Parameters
----------
info : dict | None
The measurement info.
con : array, shape (n_channels, n_channels) | Connectivity
The computed connectivity measure(s).
%(picks_good_data)s
Indices of selected channels.
cbar_label : str
Label for the colorbar.
Returns
-------
fig : instance of Renderer
The 3D figure.
"""
_validate_type(info, "info")
from mne.viz.backends.renderer import _get_renderer
from mne_connectivity.base import BaseConnectivity
if isinstance(con, BaseConnectivity):
con = con.get_data()
renderer = _get_renderer(size=(600, 600), bgcolor=(0.5, 0.5, 0.5))
picks = _picks_to_idx(info, picks)
if len(picks) != len(con):
raise ValueError('The number of channels picked (%s) does not '
'correspond to the size of the connectivity data '
'(%s)' % (len(picks), len(con)))
# Plot the sensor locations
sens_loc = [info['chs'][k]['loc'][:3] for k in picks]
sens_loc = np.array(sens_loc)
renderer.sphere(np.c_[sens_loc[:, 0], sens_loc[:, 1], sens_loc[:, 2]],
color=(1, 1, 1), opacity=1, scale=0.005)
# Get the strongest connections
n_con = 20 # show up to 20 connections
min_dist = 0.05 # exclude sensors that are less than 5cm apart
threshold = np.sort(con, axis=None)[-n_con]
ii, jj = np.where(con >= threshold)
# Remove close connections
con_nodes = list()
con_val = list()
for i, j in zip(ii, jj):
if np.linalg.norm(sens_loc[i] - sens_loc[j]) > min_dist:
con_nodes.append((i, j))
con_val.append(con[i, j])
con_val = np.array(con_val)
# Show the connections as tubes between sensors
vmax = np.max(con_val)
vmin = np.min(con_val)
for val, nodes in zip(con_val, con_nodes):
x1, y1, z1 = sens_loc[nodes[0]]
x2, y2, z2 = sens_loc[nodes[1]]
tube = renderer.tube(origin=np.c_[x1, y1, z1],
destination=np.c_[x2, y2, z2],
scalars=np.c_[val, val],
vmin=vmin, vmax=vmax,
reverse_lut=True)
renderer.scalarbar(source=tube, title=cbar_label)
# Add the sensor names for the connections shown
nodes_shown = list(set([n[0] for n in con_nodes] +
[n[1] for n in con_nodes]))
for node in nodes_shown:
x, y, z = sens_loc[node]
renderer.text3d(x, y, z, text=info['ch_names'][picks[node]],
scale=0.005,
color=(0, 0, 0))
renderer.set_camera(azimuth=-88.7, elevation=40.8,
distance=0.76,
focalpoint=np.array([-3.9e-4, -8.5e-3, -1e-2]))
renderer.show()
return renderer.scene()
| 5,343,336
|
def str_to_list_1(string):
"""
Parameters
----------
string : str
The str of first line in each sample of sample.txt
Returns
---------
final_list: lst
"""
final_list = []
li = re.findall(r'\[.*?\]', string)
for ele in li:
final_list.append(json.loads(ele))
return final_list
| 5,343,337
|
def batched_nms(boxes, scores, idxs, iou_threshold):
"""
Same as torchvision.ops.boxes.batched_nms, but safer.
"""
assert boxes.shape[-1] == 4
# TODO may need better strategy.
# Investigate after having a fully-cuda NMS op.
if len(boxes) < 40000:
return box_ops.batched_nms(boxes, scores, idxs, iou_threshold)
result_mask = scores.new_zeros(scores.size(), dtype=torch.bool)
for id in torch.unique(idxs).cpu().tolist():
mask = (idxs == id).nonzero().view(-1)
keep = nms(boxes[mask], scores[mask], iou_threshold)
result_mask[mask[keep]] = True
keep = result_mask.nonzero().view(-1)
keep = keep[scores[keep].argsort(descending=True)]
return keep
| 5,343,338
|
def one_mini_batch(data, batch_indices):
"""
产生每一次的小的batch
:param data:
:param batch_indices:
:return:
"""
batch_data = {
"raw_data": [data[i] for i in batch_indices],
"word_id_list": [],
"label_vector": []
}
for data in batch_data["raw_data"]:
batch_data["word_id_list"].append(data["word_id_list"])
batch_data["label_vector"].append(data["label_vector"])
return batch_data
| 5,343,339
|
def reset_password(reset_key):
"""Checks the reset key. If successful, displays the password reset prompt."""
username = auth_utils.check_reset_key(reset_key)
if username is None:
flask.flash(
'Invalid request. If your link has expired, then you will need to generate a new one. '
'If you continue to encounter problems, please contact devteam@donut.caltech.edu.'
)
return flask.redirect(flask.url_for('auth.forgot_password'))
return flask.render_template(
'reset_password.html', username=username, reset_key=reset_key)
| 5,343,340
|
def williams_diff_test(corr_func: SummaryCorrFunc,
X: np.ndarray,
Y: np.ndarray,
Z: np.ndarray,
two_tailed: bool) -> float:
"""
Calculates the p-value for the difference in correlations using Williams' Test.
"""
# In the math, Z is metric 1. We take the absolute value of the correlations because
# it does not matter whether they are positively or negatively correlated with each other. The WMT scripts
# do the same before calling r.test
r12 = abs(corr_func(X, Z))
r13 = abs(corr_func(Y, Z))
r23 = abs(corr_func(X, Y))
n = _get_n(corr_func, X)
# Implementation based on https://github.com/cran/psych/blob/master/R/r.test.R
diff = r12 - r13
det = 1 - (r12 ** 2) - (r23 ** 2) - (r13 ** 2) + (2 * r12 * r23 * r13)
av = (r12 + r13) / 2
cube = (1 - r23) ** 3
t2 = diff * np.sqrt((n - 1) * (1 + r23) / (((2 * (n - 1) / (n - 3)) * det + av ** 2 * cube)))
# r.test implicitly assumes that r12 > r13 because it takes the absolute value of the t statistic. Since we don't,
# we have to have special handling for one-tailed tests so we don't map a negative t statistic to a positive one.
if two_tailed:
pvalue = scipy.stats.t.sf(abs(t2), n - 3) * 2
else:
pvalue = scipy.stats.t.sf(t2, n - 3)
return pvalue
| 5,343,341
|
def runProtectionScenario(scenarioName, outputDir=None, workspace=None,
scenarioFile=None, xmlFiles=None, inPlace=False,
unprotectFirst=False):
"""
Run the protection named by `scenarioName`, found in `scenarioFile` if given,
or the value of config variable `GCAM.LandProtectionXmlFile` otherwise. The source
files are take from `workspace`, if given, otherwise from the value of `GCAM.RefWorkspace`.
Results are written to the given `outputDir`. In the even that the input and output
files are the same, `inPlace` must be set to True to indicate that overwriting is intended.
By default the two files `xmlFiles`, land2.xml and land3.xml in the aglu-xml directory,
are processed, though other files can be specified in the unlikely case that you have
alternatives.
:param scenarioName: (str) the name of a protection scenario defined in the `scenarioFile`
:param outputDir: (str) the directory under which to write the modified land files. Ignored
if inPlace=True.
:param workspace: (str) the location of the workspace holding the input files (ignored
if xmlFiles are specified explicitly)
:param scenarioFile: (str) the path to a protection.xml file defining `scenarioName`
:param xmlFiles: (list of str) the paths of the XML input files to modify
:param inPlace: (bool) if True, input and output files may be the same (output overwrites input).
:param unprotectFirst: (bool) if True, make all land "unprotected" before
protecting.
:return: none
"""
_logger.debug("Land-protection scenario '%s'", scenarioName)
landProtection = parseLandProtectionFile(scenarioFile=scenarioFile)
workspace = workspace or getParam('GCAM.SandboxRefWorkspace')
xmlFiles = xmlFiles or _landXmlPaths(workspace)
for inFile in xmlFiles:
basename = os.path.basename(inFile)
outFile = inFile if inPlace else pathjoin(outputDir, basename)
# check that we're not clobbering the input file
if not inPlace and os.path.lexists(outFile) and os.path.samefile(inFile, outFile):
raise CommandlineError("Attempted to overwrite '%s' but --inPlace was not specified." % inFile)
landProtection.protectLand(inFile, outFile, scenarioName, unprotectFirst=unprotectFirst)
| 5,343,342
|
def insert_into_topic_rate():
""" 插入积分表 """
postgres = DBPoolHelper(db_type='postgressql', dbname='dingxiangyuan', user='postgres', password='0000', host='localhost', port='5432')
data1 = pd.read_sql(sql="select topic_url from posts_replies where floor=1", con=db_conn)
data2 = pd.read_sql(sql="select topic_url from topic_rate_get", con=db_conn)
topic_urls = set(data1['topic_url']) - set(data2['topic_url'])
for topic_url in topic_urls:
res = pd.read_sql(sql='select topic_type, board_name from posts_replies where floor=1 and topic_url=%s', con=db_conn, params=(topic_url,))
topic_type, board_name = res['topic_type'].values[0], res['board_name'].values[0]
try:
postgres.execute(sql="INSERT INTO topic_rate_get(topic_url, topic_type, board_name, rate_get) VALUES(%s, %s, %s, 0)", params=(topic_url, topic_type, board_name))
print('插入成功')
except Exception as e:
print('插入失败', e)
postgres.connect_close()
| 5,343,343
|
def load_students(max_meeting_seconds: int) -> Tuple[List[str], int]:
"""Loads student names and wait times from the database."""
try:
with sqlite3.connect("students.db") as conn:
cursor = conn.cursor()
try:
cursor.execute("SELECT name FROM students")
student_names = [row[0] for row in cursor.fetchall()]
cursor.execute("SELECT seconds FROM students")
individual_seconds = cursor.fetchall()[0][0]
return student_names, individual_seconds
except IndexError:
pass
except sqlite3.OperationalError:
create_students_table()
return [], max_meeting_seconds
| 5,343,344
|
def coords(gd0, c, pad=True):
"""Return coordinates along one of the three axes.
Useful for plotting::
import matplotlib.pyplot as plt
plt.plot(gd.coords(0), data[:, 0, 0])
plt.show()
"""
L = np.linalg.norm(gd0.cell_cv[c])
N = gd0.N_c[c]
h = L / N
p = gd0.pbc_c[c] or pad
return np.linspace((1 - p) * h, L, N - 1 + p, False)
| 5,343,345
|
def get_associated_genes(variants_list: list) -> pd.DataFrame:
"""
Get variant gene information from BioMart.
More information on BioMart here: https://www.ensembl.org/info/data/biomart/index.html
:param variants_list: the list with variant ids.
:return: dataframe with variant and gene information
"""
snp_dataset = Dataset(name='hsapiens_snp', host='http://www.ensembl.org')
variant_gene_df = snp_dataset.query(attributes=['refsnp_id', 'ensembl_gene_stable_id'],
filters={'snp_filter': variants_list})
gene_dataset = Dataset(name='hsapiens_gene_ensembl', host='http://www.ensembl.org')
gene_df = gene_dataset.query(attributes=['ensembl_gene_id', 'external_gene_name'], only_unique=False,
filters={'link_ensembl_gene_id': list(variant_gene_df['Gene stable ID'])})
merged_df = pd.merge(variant_gene_df, gene_df, on='Gene stable ID')
interaction = ['association' for ind, row in merged_df.iterrows()]
merged_df['interaction'] = interaction
return merged_df
| 5,343,346
|
def crc16(data) :
"""Compute CRC16 for bytes/bytearray/memoryview data"""
crc = _CRC16_START
for b in data :
crc = ((crc << 8) & 0xFFFF) ^ _CRC16_TABLE[(crc >> 8) ^ b]
return crc
| 5,343,347
|
def sigma(j: int, N: int = 1) -> np.ndarray:
"""
"""
s = [s0, s1, s2, s3]
dims = [4] * N
idx = np.unravel_index(j, dims)
return tensor(s[x] for x in idx)
| 5,343,348
|
def backcasting(
predictor,
window,
curves,
distance="RMS",
columns=("cases", "deaths"),
min_series=14,
step=1,
):
"""
Perform a backcasting performance analysis of the given model. For the sake
of this method, the model is just a function that receives an epidemic curve
dataframe and a list of time windows and return the forecasts for cases and
deaths for the specified times.
"""
windows = np.array(as_seq(windows))
min_window = windows.min(initial=len(curves))
def distance(x, y):
return (x - y).dropna().abs() / x
results = []
for k in range(min_window, len(curves) - min_series, step):
data = curves.iloc[:-k]
prediction = fn(data, windows)
results.append(distance(curves, prediction))
st.write(results[-1])
return pd.concat(results, axis=0)
| 5,343,349
|
def print_linked_list(node):
"""
在控制台上打印链表
:type node: ListNode
"""
visited = set()
while node:
if node in visited:
print(f"{node.val}(环)")
return
visited.add(node)
print(node.val, end="")
node = node.next
if node:
print(" -> ", end="")
print()
| 5,343,350
|
def write_chat(s, queue) -> None:
"""This method consume concurrently the queue while reading the messages written in chat.
"""
last_time_message_sent = datetime.datetime.now()
# We exploit `CHAT_SERVER_NOP_SEC` in the config file to send a NOP
# to #NOP channel for not being disconnected from the Server after
# 5 mins of inactivity.
while True:
_, writable, _ = select.select([], [s], [])
for sock in writable:
now = datetime.datetime.now()
delta = now - last_time_message_sent
seconds_passed = int(delta.total_seconds())
if queue.empty():
if seconds_passed > CHAT_SERVER_NOP_SEC:
cmd = f"{NOP_CH} NOP"
else:
continue
else:
try:
request = queue.get()
cmd = request["command"]
if cmd == "STOP QUEUE":
return
except:
None
print(f"Sending {cmd}")
sock.sendall(cmd.encode('utf-8') + b'\n')
last_time_message_sent = datetime.datetime.now()
| 5,343,351
|
def find_keys(d: Dict[K, V], predicate: Callable[[V], bool]) -> List[K]:
"""Find keys where values match predicate."""
return [k for k, v in d.items() if predicate(v)]
| 5,343,352
|
def inject_local_url(endpoint: str) -> None:
"""Sets tables' host to use local DynamoDB.
:param endpoint: URL to your local DynamoDB service.
"""
for table in [ResourceTemplateDB]:
table.Meta.host = endpoint # type: ignore
LOG.info(f"Using local DynamoDB: {endpoint}")
| 5,343,353
|
def get_maya_property_name(prop, ignore_channel=False):
"""
Given a property, return a reasonable Maya name to use for it.
If ignore_channel is True, return the property for the whole vector, eg. return
'.translate' instead of '.translateX'.
This doesn't create or query anything. It just generates a name to use elsewhere.
"""
prop_parts = prop.path.split('/')
# Get the property key, without any channel suffixes attached.
prop_key = prop_parts[0]
mapping = {
'translation': 'translate',
'rotation': 'rotate',
'scale': 'scale',
}
maya_key = None
if prop_key in mapping:
prop_key = mapping[prop_key]
if prop.path.count('/') == 1 and not ignore_channel:
# If we've been given a single channel, eg. rotation/x, return it.
assert len(prop_parts) == 2, prop_parts
assert prop_parts[1] in ('x', 'y', 'z'), prop_parts
return '%s%s' % (prop_key, prop_parts[1].upper())
else:
# Otherwise, return the vector itself.
return prop_key
| 5,343,354
|
def name_nonini_file(tmp_path):
"""Nмя существующего файла не ini формата """
file = tmp_path / "file1.ini"
file.write_text('-')
yield str(file)
file.unlink(missing_ok=True)
| 5,343,355
|
def model_deepFlavourReference_test(Inputs,nclasses,dropoutRate=0.1,momentum=0.6):
"""
reference 1x1 convolutional model for 'deepFlavour'
with recurrent layers and batch normalisation
standard dropout rate it 0.1
should be trained for flavour prediction first. afterwards, all layers can be fixed
that do not include 'regression' and the training can be repeated focusing on the regression part
(check function fixLayersContaining with invert=True)
"""
globalvars = BatchNormalization(momentum=momentum,name='globals_input_batchnorm') (Inputs[0])
cpf = BatchNormalization(momentum=momentum,name='cpf_input_batchnorm') (Inputs[1])
npf = BatchNormalization(momentum=momentum,name='npf_input_batchnorm') (Inputs[2])
vtx = BatchNormalization(momentum=momentum,name='vtx_input_batchnorm') (Inputs[3])
cpf,npf,vtx = block_deepFlavourConvolutions(charged=cpf,
neutrals=npf,
vertices=vtx,
dropoutRate=dropoutRate,
active=True,
batchnorm=True, batchmomentum=momentum)
#
cpf = LSTM(150,go_backwards=True,implementation=2, name='cpf_lstm')(cpf)
cpf=BatchNormalization(momentum=momentum,name='cpflstm_batchnorm')(cpf)
cpf = Dropout(dropoutRate)(cpf)
npf = LSTM(50,go_backwards=True,implementation=2, name='npf_lstm')(npf)
npf=BatchNormalization(momentum=momentum,name='npflstm_batchnorm')(npf)
npf = Dropout(dropoutRate)(npf)
vtx = LSTM(50,go_backwards=True,implementation=2, name='vtx_lstm')(vtx)
vtx=BatchNormalization(momentum=momentum,name='vtxlstm_batchnorm')(vtx)
vtx = Dropout(dropoutRate)(vtx)
x = Concatenate()( [globalvars,cpf,npf,vtx ])
x = block_deepFlavourDense(x,dropoutRate,active=True,batchnorm=True,batchmomentum=momentum)
flavour_pred=Dense(nclasses, activation='softmax',kernel_initializer='lecun_uniform',name='ID_pred')(x)
predictions = [flavour_pred]
model = Model(inputs=Inputs, outputs=predictions)
return model
| 5,343,356
|
def load_spyrelet_class(spyrelet_name, cfg):
"""Load a spyrelet class from a file (whose location is defined in cfg)"""
# discover spyrelet file and class
spyrelet_path_str, _ = get_config_param(cfg, [CONFIG_SPYRELETS_KEY, spyrelet_name, CONFIG_SPYRELETS_FILE_KEY])
spyrelet_class_name, spyrelet_cfg_path_str = get_config_param(cfg, [CONFIG_SPYRELETS_KEY, spyrelet_name, CONFIG_SPYRELETS_CLASS_KEY])
# resolve the spyrelet file location
# if the path isn't absolute resolve it relative to the config file
spyrelet_path = pathlib.Path(spyrelet_path_str)
if not spyrelet_path.is_absolute():
spyrelet_path = pathlib.Path(spyrelet_cfg_path_str).parent / spyrelet_path
spyrelet_path = spyrelet_path.resolve()
if not spyrelet_path.is_file():
raise SpyreletLoadError(None, f'spyrelet [{spyrelet_name}] file [{spyrelet_path}] doesn\'t exist')
return load_class_from_file(spyrelet_path, spyrelet_class_name)
| 5,343,357
|
def see_documentation():
"""
This function redirects to the api documentation
"""
return jsonify({
'@context': responses.CONTEXT,
'rdfs:comment': 'See http://www.conceptnet.io for more information about ConceptNet, and http://api.conceptnet.io/docs for the API documentation.'
})
| 5,343,358
|
def fit_imputer(df, tolerance=0.2, verbose=2, max_iter=20, nearest_features=20, imputation_order='ascending',
initial_strategy='most_frequent'):
"""
A function to train an IterativeImputer using machine learning
Args:
df: dataset to impute
tolerance: Tolerance of stopping function
verbose: Verbosy flag, controls the debug messages that are issued as functions are evaluated
max_iter: Maximum number of imputation rounds
nearest_features: Number of other features to use to estimate the missing values
imputation_order: ascending or descending - the order in which the features will be imputed
initial_strategy: e.g. 'most_frequent' or 'mean'
Returns: dataset with no missing values
"""
start = time.time()
# restrict the values to be predicted to a min / max range
minimum_before = list(df.iloc[:, :].min(axis=0))
maximum_before = list(df.iloc[:, :].max(axis=0))
imputer = IterativeImputer(random_state=0,
imputation_order=imputation_order,
n_nearest_features=nearest_features,
initial_strategy=initial_strategy,
max_iter=max_iter,
min_value=minimum_before,
max_value=maximum_before,
skip_complete=True,
tol=tolerance,
verbose=verbose)
imputer.fit(df)
end = time.time()
print('Execution time for IterativeImputer: {} sec'.format(end - start))
return imputer
| 5,343,359
|
def jhtml_render(request, file_type=None,json_file_url=None, html_template=None, json_render_dict=None, json_render_func=None, file_path=None, url_name=None, app_name=None):
"""
:param request:
:param file_type: json/temp_json
:param json_file_url:
:param html_template:模板文件路径,不包含templates
:param render_var_dict_str: 渲染变量dict
:return:
"""
path = request.path
print(path)
from django.conf import settings
from django.shortcuts import render
if file_type=='temp_json':
try:
json_file_url = reverse(url_name+'_tjson', current_app=app_name)
except Exception as e:
print('ERROR: no json file url found:', file_path)
render_dict = {'json_to_render_file_url': json_file_url}
if json_render_dict is not None:
render_dict.update(json_render_dict)
if json_render_func is not None:
render_dict.update(json_render_func(request, json_render_dict))
page_name = html_template
if (page_name is not None) and len(page_name) > 0:
page_name = page_name # settings.BASE_DIR /
else:
page_name = 'html/index_for_json.html'
if len(json_file_url) > 3:
if json_file_url[-4:] == 'html':
page_name = json_file_url
#static amis json file render
##if re_one.file_type=='temp_json':
return render(request, page_name, render_dict)
| 5,343,360
|
def intermediate_dir():
""" Location in temp dir for storing .cpp and .o files during
builds.
"""
python_name = "python%d%d_intermediate" % tuple(sys.version_info[:2])
path = os.path.join(tempfile.gettempdir(),"%s"%whoami(),python_name)
if not os.path.exists(path):
os.makedirs(path, mode=0o700)
return path
| 5,343,361
|
def work():
"""thread worker function"""
global working, analogReadPollingPins
x = 0
working = True
while(working):
x = x + 0.09
y = int(math.cos(x) * 100 + 150)
# retcmd = "publishPin/" + str(pin) + "/3/"+ str(y) +"\n"
# uart.write(codec.encode(retcmd))
for pinx in digitalReadPollingPins:
retcmd = "publishPin/" + str(pinx) + "/0/"+str(randint(0,1))+"\n"
uart.write(codec.encode(retcmd))
for pinx in analogReadPollingPins:
#retcmd = "publishPin/" + str(pinx) + "/4/"+ str(y) +"\n"
#retcmd = "publishPin/" + str(pinx) + "/" + str(int(pinx)%4) + "/"+ str(y) +"\n"
retcmd = "publishPin/" + str(pinx) + "/1/"+ str(y) +"\n"
uart.write(codec.encode(retcmd))
sleep(0.001)
#print (y)
# TODO -------
# if (digitalReadPollingPins.length() == 0 && analogReadPollingPins.length() == 0
# working = False
print("I am done !")
| 5,343,362
|
def image(cache_path, width, height):
""" Generate a custom-sized sample image """
# Create unique path
size = (width, height)
filename = "%sx%s.png" % (width, height)
path = os.path.join(cache_path, filename)
# Check if image has already been created
if not os.path.exists(path):
# Generate new image
sample = breakdown.pkg_path("img/sample.png")
if not os.path.exists(sample):
return Markup(u"<img/>")
else:
try:
# Try scaling the image using PIL
from PIL import Image
source = Image.open(sample)
scaled = source.resize(size, Image.BICUBIC)
scaled.save(path)
except ImportError:
# If we couldnt find PIL, just copy the image
inf = open(sample, "rb")
outf = open(path, "wb")
outf.write(inf.read())
return Markup(u'<img src="%s%s">' % (STATIC_URL, filename))
| 5,343,363
|
def truncate(text, length=30, indicator='...', whole_word=False):
"""Truncate ``text`` with replacement characters.
``length``
The maximum length of ``text`` before replacement
``indicator``
If ``text`` exceeds the ``length``, this string will replace
the end of the string
``whole_word``
If true, shorten the string further to avoid breaking a word in the
middle. A word is defined as any string not containing whitespace.
If the entire text before the break is a single word, it will have to
be broken.
Example::
>>> truncate('Once upon a time in a world far far away', 14)
'Once upon a...'
TODO: try to replace it with built-in `textwrap.shorten`
(available starting from Python 3.4) when support for Python 2
completely dropped.
"""
if not text:
return ""
if len(text) <= length:
return text
short_length = length - len(indicator)
if not whole_word:
return text[:short_length] + indicator
# Go back to end of previous word.
i = short_length
while i >= 0 and not text[i].isspace():
i -= 1
while i >= 0 and text[i].isspace():
i -= 1
if i <= 0:
# Entire text before break is one word, or we miscalculated.
return text[:short_length] + indicator
return text[:i + 1] + indicator
| 5,343,364
|
def prefix_sums(A):
"""
This function calculate of sums of eements in given slice (contiguous segments of array).
Its main idea uses prefix sums which
are defined as the consecutive totals of the first 0, 1, 2, . . . , n elements of an array.
Args:
A: an array represents number of mushrooms growing on the
consecutive spots along a road.
Returns:
an array contains the consecutive sums of the first n elements of an array A
To use:
>> A=[2,3,7,5,1,3,9]
>> print(prefix_sums(A))
[0, 2, 5, 12, 17, 18, 21, 30]
Time Complexity: O(n)
"""
n = len(A)
P = [0] * (n + 1)
for k in range(1, n + 1):
P[k] = P[k - 1] + A[k - 1]
return P
| 5,343,365
|
def polar_import():
"""Import data from Polar and save as workouts"""
from run4it.api.scripts import script_import_polar_exercices as script_func
return script_func('polar_import')
| 5,343,366
|
def write_ilastik_batch_volume(im, fn):
"""Write a volume to an HDF5 file for Ilastik batch processing."""
if im.ndim == 2:
im = im.reshape((1,1)+im.shape+(1,))
elif im.ndim == 3:
im = im.reshape((1,)+im.shape+(1,))
else:
raise ValueError('Unsupported number of dimensions in image.')
write_h5_stack(im, fn, group='/volume/data')
| 5,343,367
|
def dijkstra(G, Gextra, source, target_set, required_datarate, max_path_latency):
"""
:returns a successful path from source to a target from target_set with lowest path length
"""
q = DynamicPriorityQueue()
q.put((source, 0.0), priority=0.0)
marked = set()
parents = {source: None}
while not q.empty():
path_length, (current_node, current_path_latency) = q.pop()
marked.add(current_node)
if current_node in target_set:
return _compute_path(parents, current_node)
for neighbor in G.neighbors_iter(current_node):
if neighbor not in marked:
edata = G.edge[current_node][neighbor]
new_path_latency = current_path_latency + edata["l_cap"]
if (required_datarate <= Gextra.get_edge(current_node, neighbor).b_rem and
new_path_latency <= max_path_latency):
new_path_length = path_length + 1
if not config.USE_HOP_PATH_LENGTH:
new_path_length = new_path_latency
if q.put_or_decrease((neighbor, new_path_latency), other_priority=new_path_length):
parents[neighbor] = current_node
return None
| 5,343,368
|
def build_single_class_dataset(name, class_ind=0, **dataset_params):
"""
wrapper for the base skeletor dataset loader `build_dataset`
this will take in the same arguments, but the loader will only iterate
over examples of the given class
I'm just going to overwrite standard cifar loading data for now
"""
trainloader, testloader = build_dataset(name, **dataset_params)
def _filter(loader, mode='train'):
dataset = loader.dataset
assert name in ['cifar10', 'svhn'],\
'we only support cifar and svhn right now'
if name == 'cifar10':
data_attr = mode + '_data' # e.g. train imgs in dataset.train_data
label_attr = mode + '_labels'
else:
data_attr = 'data'
label_attr = 'labels'
data = getattr(dataset, data_attr)
targets = np.array(getattr(dataset, label_attr))
class_inds = np.where(targets == int(class_ind))
data, targets = data[class_inds], targets[class_inds]
setattr(dataset, data_attr, data)
setattr(dataset, label_attr, targets)
return loader
return _filter(trainloader, mode='train'), _filter(testloader, mode='test')
| 5,343,369
|
def gamma_trace(t):
"""
trace of a single line of gamma matrices
Examples
========
>>> from sympy.physics.hep.gamma_matrices import GammaMatrix as G, \
gamma_trace, LorentzIndex
>>> from sympy.tensor.tensor import tensor_indices, tensorhead
>>> p, q = tensorhead('p, q', [LorentzIndex], [[1]])
>>> i0,i1,i2,i3,i4,i5 = tensor_indices('i0:6', LorentzIndex)
>>> ps = p(i0)*G(-i0)
>>> qs = q(i0)*G(-i0)
>>> gamma_trace(G(i0)*G(i1))
4*metric(i0, i1)
>>> gamma_trace(ps*ps) - 4*p(i0)*p(-i0)
0
>>> gamma_trace(ps*qs + ps*ps) - 4*p(i0)*p(-i0) - 4*p(i0)*q(-i0)
0
"""
if isinstance(t, TensAdd):
res = TensAdd(*[_trace_single_line(x) for x in t.args])
return res
t = _simplify_single_line(t)
res = _trace_single_line(t)
return res
| 5,343,370
|
def preprocess(comment):
"""Pre-Process the comment"""
copy_comment = copy.deepcopy(comment)
# Replacing link
final_comment = replace_link(copy_comment)
nftokens = get_nf_tokens(comment)
return final_comment, nftokens
| 5,343,371
|
def RefreshAnnotations():
"""Refresh all annotations"""
for Head in ida.Heads():
if GetAnnotation(Head):
analysis.AnalyzeAddress(Head,Erase=False)
| 5,343,372
|
def voc_label_indices(colormap, colormap2label):
"""Map a RGB color to a label."""
colormap = colormap.astype('int32')
idx = ((colormap[:, :, 0] * 256 + colormap[:, :, 1]) * 256
+ colormap[:, :, 2])
return colormap2label[idx]
| 5,343,373
|
def add_unlimited_vars_and_constraints(Market, ndds, m):
"""
For each ndd
add binary var in m for each outgoing edge
add constraint to m that sum of used edges per ndd is <= 1
for each paired donor edge
add outgoing and incoming edges as binary var to m
for each paired vertex
add constraint to m that incoming edge sum is <= 1
"""
directed_donors = [node for node in Market.Graph.nodes()
if node not in ndds]
# Initiate directed donor solver constraint variables
for v in Market.Graph.nodes():
if v not in ndds:
v.solver_vars_in = []
v.solver_vars_out = []
# Initiate edge solver variables
Market.edge_vars = []
for ndd in ndds:
ndd_edge_vars = []
for e in ndd.edges:
edge_name = str(str(e[0].type) + "," + str(e[1].type))
edge_var = pulp.LpVariable(edge_name, cat='Binary')
m += edge_var
e.edge_var = edge_var
ndd_edge_vars.append(edge_var)
e.target_v.solver_vars_in.append(edge_var)
m += sum(ndd_edge_vars) <= 1
# Add pair->pair edge variables
for e in Market.Graph.edges(directed_donors):
edge_name = str(str(e[0].type) + "," + str(e[1].type))
edge_var = pulp.LpVariable(edge_name, cat='Binary')
m += edge_var
# Add constraint variables to keep track
Market.edge_vars.append(edge_var)
e[0].solver_vars_out.append(edge_var)
e[1].solver_vars_in.append(edge_var)
# Update constraints on LP model
for v in Market.Graph.nodes():
if len(v.solver_vars_in) > 1:
m += sum(v.solver_vars_in) <= 1
# Sum of edges into a vertex must be >= sum of edges out
for v in Market.Graph.nodes():
m.addConstr(quicksum(v.grb_vars_in) >= quicksum(v.grb_vars_out))
| 5,343,374
|
def make_noisy_linear(w=1, std=1):
"""Factory for linear function <w,x> perturbed by gaussian noise N(0,std^2)"""
@Oracle
def noisy_linear(x):
return np.dot(x, w) + np.random.normal(scale=std)
return noisy_linear
| 5,343,375
|
def update_delegator_distribution(wallet: dict) -> None:
"""
Update normal wallet outstanding rewards.
:param wallet: wallet to update
:return: None
"""
rewards = get_delegator_distribution(wallet["address"])
if rewards["result"]["total"]:
for denom_dict in rewards["result"]["total"]:
denom: str = denom_dict["denom"]
amount: float = big_float_to_real_float(denom, float(denom_dict["amount"]))
set_wallet_balance(wallet, denom, rewards=amount)
| 5,343,376
|
def distance(turtle, x, y=None):
"""Return the distance from the turtle to (x,y) in turtle step units.
Arguments:
turtle -- the turtle
x -- a number or a pair/vector of numbers or a turtle instance
y -- a number None None
call: distance(turtle, x, y) # two coordinates
--or: distance(turtle, (x, y)) # a pair (tuple) of coordinates
--or: distance(turtle, vec) # e.g. as returned by pos(turtle)
--or: distance(turtle, mypen) # where mypen is another turtle
Example:
>>> pos(turtle)
(0.00, 0.00)
>>> distance(turtle, 30,40)
50.0
>>> pen = Turtle()
>>> pen.forward(77)
>>> distance(turtle, pen)
77.0
"""
if type(turtle) != _turtle.Turtle:
raise(TypeError("turtle argument to distance is not a valid turtle"))
return turtle.distance(x, y)
| 5,343,377
|
def asy_ts(gp, anc_data):
""" Returns a recommendation via TS in the asyuential setting. """
anc_data = copy(anc_data)
# Always use a random optimiser with a vectorised sampler for TS.
if anc_data.acq_opt_method != 'rand':
anc_data.acq_opt_method = 'rand'
anc_data.max_evals = 4 * anc_data.max_evals
gp_sample = _get_gp_sampler_for_parallel_strategy(gp, anc_data)
return _maximise_acquisition(gp_sample, anc_data, vectorised=True)
| 5,343,378
|
def checkParams(opts):
"""
检查模块名是否符合命名规则
检查目录是否存在
"""
res = {}
for opt, arg in opts:
if opt in ('--name'):
if re.match('^[a-zA-Z_][a-zA-Z0-9_]*$', arg):
res['name'] = arg
else:
return res
elif opt in ('--dir'):
res['dir'] = arg;
elif opt in ('--type'):
res['type'] = arg
else:
print("Unknown option " + arg)
res['dir'] = res['dir'] + res['name'] + '/'
return res
| 5,343,379
|
def nms(bboxes, iou_threshold, sigma=0.3, method='nms'):
"""
Note: soft-nms, https://arxiv.org/pdf/1704.04503.pdf
https://github.com/bharatsingh430/soft-nms
"""
best_bboxes = []
while len(bboxes) > 0:
max_ind = np.argmax(bboxes[:, 4])
best_bbox = bboxes[max_ind]
best_bboxes.append(list(best_bbox))
bboxes = np.concatenate([bboxes[: max_ind], bboxes[max_ind + 1:]])
iou = bboxes_iou(best_bbox[np.newaxis, :4], bboxes[:, :4])
weight = np.ones((len(iou),), dtype=np.float32)
assert method in ['nms', 'soft-nms']
if method == 'nms':
iou_mask = iou > iou_threshold
weight[iou_mask] = 0.0
if method == 'soft-nms':
weight = np.exp(-(1.0 * iou ** 2 / sigma))
bboxes[:, 4] = bboxes[:, 4] * weight
score_mask = bboxes[:, 4] > 0.
bboxes = bboxes[score_mask]
return best_bboxes
| 5,343,380
|
def smart_cast(value):
"""Intelligently cast the given value to a Python data type.
:param value: The value to be cast.
:type value: str
"""
# Handle integers first because is_bool() may interpret 0s and 1s as booleans.
if is_integer(value, cast=True):
return int(value)
elif is_float(value):
return float(value)
elif is_bool(value):
return to_bool(value)
else:
return value
| 5,343,381
|
def _draw_mol_with_property( mol, property, **kwargs ):
"""
http://rdkit.blogspot.com/2015/02/new-drawing-code.html
Parameters
---------
property : dict
key atom idx, val the property (need to be stringfiable)
"""
from rdkit.Chem import Draw
from rdkit.Chem import AllChem
def run_from_ipython():
try:
__IPYTHON__
return True
except NameError:
return False
AllChem.Compute2DCoords(mol)
for idx in property:
# opts.atomLabels[idx] =
mol.GetAtomWithIdx( idx ).SetProp( 'molAtomMapNumber', "({})".format( str(property[idx])))
mol = Draw.PrepareMolForDrawing(mol, kekulize=False) #enable adding stereochem
if run_from_ipython():
from IPython.display import SVG, display
if "width" in kwargs and type(kwargs["width"]) is int and "height" in kwargs and type(kwargs["height"]) is int:
drawer = Draw.MolDraw2DSVG(kwargs["width"], kwargs["height"])
else:
drawer = Draw.MolDraw2DSVG(500,250)
drawer.DrawMolecule(mol)
drawer.FinishDrawing()
display(SVG(drawer.GetDrawingText().replace("svg:", "")))
else:
if "width" in kwargs and type(kwargs["width"]) is int and "height" in kwargs and type(kwargs["height"]) is int:
drawer = Draw.MolDraw2DCairo(kwargs["width"], kwargs["height"])
else:
drawer = Draw.MolDraw2DCairo(500,250) #cairo requires anaconda rdkit
# opts = drawer.drawOptions()
drawer.DrawMolecule(mol)
drawer.FinishDrawing()
#
# with open("/home/shuwang/sandbox/tmp.png","wb") as f:
# f.write(drawer.GetDrawingText())
import io
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
buff = io.BytesIO()
buff.write(drawer.GetDrawingText())
buff.seek(0)
plt.figure()
i = mpimg.imread(buff)
plt.imshow(i)
plt.show()
# display(SVG(drawer.GetDrawingText()))
| 5,343,382
|
def find_host_biz_relations(bk_host_ids: List[int]) -> Dict:
"""
查询主机所属拓扑关系
:param bk_host_ids: 主机ID列表 [1, 2, 3]
:return: 主机所属拓扑关系
[
{
"bk_biz_id": 3,
"bk_host_id": 3,
"bk_module_id": 59,
"bk_set_id": 11,
"bk_supplier_account": "0"
}
]
"""
# CMDB 限制了单次查询数量,这里需分批并发请求查询
param_list = [
{"bk_host_id": bk_host_ids[count * constants.QUERY_CMDB_LIMIT : (count + 1) * constants.QUERY_CMDB_LIMIT]}
for count in range(math.ceil(len(bk_host_ids) / constants.QUERY_CMDB_LIMIT))
]
host_biz_relations = request_multi_thread(client_v2.cc.find_host_biz_relations, param_list, get_data=lambda x: x)
return host_biz_relations
| 5,343,383
|
def import_students(dest, filetype="md"):
"""
Import students and write data to disk
"""
os.makedirs(dest, exist_ok=True)
# Get the spreadsheet data as [(field, value), ...]
data = get_spreadsheet_values(STUDENT_SPREADSHEET_ID, STUDENT_RANGE_NAME, rename_fields=STUDENT_FIELD_RENAME)
for idx, d in enumerate(data):
content, fields = data_to_toml(d,
filetype=filetype,
filter_fields=STUDENT_FIELD_FILTER,
taxonomy_fields=STUDENT_FIELD_TAXONOMIES)
filename = data_to_filename(fields, filetype, base_dir=dest)
if filename:
print("Writing:", filename)
with open(os.path.join(dest, filename), 'w') as f:
f.write(content)
| 5,343,384
|
def atom_to_atom_line(atom):
"""Takes an atomium atom and turns it into a .cif ATOM record.
:param Atom atom: the atom to read.
:rtype: ``str``"""
name = get_atom_name(atom)
res_num, res_insert = split_residue_id(atom)
return "ATOM {} {} {} . {} {} . {} {} {} {} {} 1 {} {} {} {} {} {} 1".format(
atom.id, atom.element, name, atom.het._name if atom.het else "?",
atom.het._internal_id if atom.het and isinstance(
atom.het, Ligand
) else atom.chain._internal_id if atom.chain else ".",
res_num, res_insert, atom.location[0], atom.location[1], atom.location[2],
atom.bvalue, atom.charge,
res_num, atom.het._name if atom.het else "?",
atom.chain.id if atom.chain else ".", name
)
| 5,343,385
|
def set_state_to_approval():
"""
This method is called when Peer Reviewer approves a
review and moves a CL to approval state
"""
checkListObj = retrieve_checklist_and_its_decisions(
request_data_mgr.get_cl_uuid(), 'peer_review_value')
checkListObj.state = CheckListState.approval.name
admin_role = Role.objects.get(name=Roles.admin.name)
admin_list = IceUserProfile.objects.all().filter(
role=admin_role)
if admin_list.count() < 1:
logger.error("Failed to save the new state \
of the Checklist to the DB")
msg = "checklist state wasn't change due to server error"
raise Exception(msg)
rand_admin = admin_list[random.randint(0, admin_list.count() - 1)]
admin = IceUserProfile.objects.get(uuid=rand_admin.uuid)
checkListObj.update_time = timezone.now()
checkListObj.owner = admin
insert_to_recent_engagements(
owner=checkListObj.owner,
action=RecentEngagementActionType.GOT_OWNERSHIP_OVER_ENGAGEMENT.name)
checkListObj.save()
| 5,343,386
|
def combine_audio(files, target_audio, pre_normalisation=True):
"""
input : file names in an array (you can use videos!!)
Combine audio_files into one
"""
import soundfile
from transform_audio import wav_to_mono
import os
import numpy as np
#Extract audio from video and convert to mono
audio_files = []
for cpt, file in enumerate(files):
#extract audio
audio = str(cpt)+"_aux_audio_1439.wav"
audio_files.append(audio)
extract_audio(file, audio)
#To mono
wav_to_mono(audio, audio)
#read audios
raw_audios = []
for file in audio_files:
#read audio
x, fs = soundfile.read(file)
#normalize loudness, if needed
if pre_normalisation:
x = x / np.max(x)
raw_audios.append(x)
#Pad difference
lengths = [len(i) for i in raw_audios]
#Find longer file
max_value = max(lengths)
max_index = lengths.index(max_value)
#pad audio
paded_audio = []
for raw_audio in raw_audios:
diff = abs(len(raw_audio) - max_value)
pad = [0.0 for i in range(diff)]
pad = np.asarray(pad)
paded_audio.append(np.concatenate([raw_audio, pad]))
paded_audio = np.sum(paded_audio, axis=0)
#normalize
paded_audio = paded_audio/ np.max(paded_audio)
#Export audio
soundfile.write(target_audio, paded_audio , fs)
#delete files
for file in audio_files:
os.remove(file)
| 5,343,387
|
async def edit_chat_invite_link(
token: str = TOKEN_VALIDATION,
chat_id: Union[int, str] = Query(..., description='Unique identifier for the target chat or username of the target channel (in the format @channelusername)'),
invite_link: str = Query(..., description='The invite link to edit'),
name: Optional[str] = Query(None, description='Invite link name; 0-32 characters'),
expire_date: Optional[int] = Query(None, description='Point in time (Unix timestamp) when the link will expire'),
member_limit: Optional[int] = Query(None, description='Maximum number of users that can be members of the chat simultaneously after joining the chat via this invite link; 1-99999'),
creates_join_request: Optional[bool] = Query(None, description="True, if users joining the chat via the link need to be approved by chat administrators. If True, member_limit can't be specified"),
) -> JSONableResponse:
"""
Use this method to edit a non-primary invite link created by the bot. The bot must be an administrator in the chat for this to work and must have the appropriate administrator rights. Returns the edited invite link as a ChatInviteLink object.
https://core.telegram.org/bots/api#editchatinvitelink
"""
from .....main import _get_bot
bot = await _get_bot(token)
try:
entity = await get_entity(bot, chat_id)
except BotMethodInvalidError:
assert isinstance(chat_id, int) or (isinstance(chat_id, str) and len(chat_id) > 0 and chat_id[0] == '@')
entity = chat_id
except ValueError:
raise HTTPException(404, detail="chat not found?")
# end try
result = await bot.edit_chat_invite_link(
entity=entity,
invite_link=invite_link,
name=name,
expire_date=expire_date,
member_limit=member_limit,
creates_join_request=creates_join_request,
)
data = await to_web_api(result, bot)
return r_success(data.to_array())
| 5,343,388
|
def post_live_migrate_at_source(adapter, host_uuid, instance, vif):
"""Performs the post live migrate on the source host.
:param adapter: The pypowervm adapter.
:param host_uuid: The host UUID for the PowerVM API.
:param instance: The nova instance object.
:param vif: The virtual interface of the instance. This may be
called network_info in other portions of the code.
"""
vif_drv = _build_vif_driver(adapter, host_uuid, instance, vif)
return vif_drv.post_live_migrate_at_source(vif)
| 5,343,389
|
def conv2date(dtstr,tstart=None):
"""Convert epoch string or time interval to matplotlib date"""
#we possibly have a timeinterval as input so wrap in exception block
m=re.search("([\+\-])([0-9]+)([dm])",dtstr)
if m:
if m.group(3) == "m":
dt=30.5*float(m.group(2)) #scale with average days per month
elif m.group(3) == "d":
dt=float(m.group(2))
if m.group(1) == "+":
fac=1
else:
fac=-1
if not tstart:
tstart=0 #Compute timedeltas only
dout=tstart+fac*dt
else:
dout=datestr2num(dtstr)
return dout
| 5,343,390
|
def resnet18(pretrained=False, **kwargs):
"""Constructs a ResNet-18 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(MaskedBasicblock, [2, 2, 2, 2], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))
print('ResNet-18 Use pretrained model for initalization')
return model
| 5,343,391
|
def logioinfo(func):
"""
This function is to add IO information
"""
def write(exec_info):
"""
This function is to add bucket and object Io information
Parameters:
exec_info
Returns:
write
"""
log.info('in write')
log.info(exec_info)
ret_val = func(exec_info)
if ret_val is False:
return ret_val
gen_basic_io_info_structure = BasicIOInfoStructure()
gen_extra_io_info_structure = ExtraIOInfoStructure()
write_bucket_info = BucketIoInfo()
write_key_info = KeyIoInfo()
obj = exec_info['obj']
resource_name = exec_info['resource']
extra_info = exec_info.get('extra_info', None)
log.info('obj_name :%s' % obj)
log.info('resource_name: %s' % resource_name)
if 's3.Bucket' == type(obj).__name__:
log.info('in s3.Bucket logging')
resource_names = ['create']
if resource_name in resource_names:
access_key = extra_info['access_key']
log.info('adding io info of create bucket')
bucket_info = gen_basic_io_info_structure.bucket(**{'name': obj.name})
write_bucket_info.add_bucket_info(access_key, bucket_info)
if 's3.Object' == type(obj).__name__:
log.info('in s3.Object logging')
resource_names = ['upload_file', 'initiate_multipart_upload']
if resource_name in resource_names:
log.info('writing log for upload_type: %s' % extra_info.get('upload_type','normal'))
access_key = extra_info['access_key']
# setting default versioning status to disabled
extra_info['versioning_status'] = extra_info.get('versioning_status', 'disabled')
log.info('versioning_status: %s' % extra_info['versioning_status'])
if extra_info.get('versioning_status') == 'disabled' or \
extra_info.get('versioning_status') == 'suspended':
log.info('adding io info of upload objects')
key_upload_info = gen_basic_io_info_structure.key(
**{'name': extra_info['name'], 'size': extra_info['size'],
'md5_local': extra_info['md5'],
'upload_type': extra_info.get('upload_type','normal')})
write_key_info.add_keys_info(access_key, obj.bucket_name, key_upload_info)
if extra_info.get('versioning_status') == 'enabled' and extra_info.get('version_count_no') == 0:
log.info('adding io info of upload objects, version enabled, so only key name will be added')
key_upload_info = gen_basic_io_info_structure.key(
**{'name': extra_info['name'], 'size': None,
'md5_local': None,
'upload_type': extra_info.get('upload_type','normal')})
write_key_info.add_keys_info(access_key, obj.bucket_name, key_upload_info)
log.debug('writing log for %s' % resource_name)
return ret_val
return write
| 5,343,392
|
def weather(api_token, city, start, end):
"""
Returns an hourly report of cloud cover, wind and temperature data for the
given city. The report is always in full days. Timestamps are in UTC.
Start and end dates are interpreted as UTC.
"""
a = Astral()
city = a[city]
# hour=0 would give us the previous day. Dark Sky always returns full days so
# we can just make one request per day from start to end, always at midday.
d = start.replace(hour=12, tzinfo=pytz.UTC)
dfs = []
for i in range(_num_days(start, end)):
weather = _raw_weather(api_token, city.latitude, city.longitude, d)
df = _as_dataframe(weather, d)
dfs.append(df)
d = d + timedelta(days=1)
return _tidy(pd.concat(dfs))
| 5,343,393
|
def skimage_radon_back_projector(sinogram, geometry, range, out=None):
"""Calculate forward projection using skimage.
Parameters
----------
sinogram : `DiscreteLpElement`
Sinogram (projections) to backproject.
geometry : `Geometry`
The projection geometry to use.
range : `DiscreteLp`
range of this projection (volume space).
out : ``range`` element, optional
An element in range that the result should be written to.
Returns
-------
sinogram : ``range`` element
Sinogram given by the projection.
"""
# Lazy import due to significant import time
from skimage.transform import iradon
theta = skimage_theta(geometry)
skimage_range = skimage_sinogram_space(geometry, range, sinogram.space)
skimage_sinogram = skimage_range.element()
skimage_sinogram.sampling(clamped_interpolation(range, sinogram))
if out is None:
out = range.element()
else:
# Only do asserts here since these are backend functions
assert out in range
# Rotate back from (rows, cols) to (x, y)
backproj = iradon(skimage_sinogram.asarray().T, theta,
output_size=range.shape[0], filter=None, circle=False)
out[:] = np.rot90(backproj, -1)
# Empirically determined value, gives correct scaling
scaling_factor = 4.0 * float(geometry.motion_params.length) / (2 * np.pi)
# Correct in case of non-weighted spaces
proj_extent = float(sinogram.space.partition.extent.prod())
proj_size = float(sinogram.space.partition.size)
proj_weighting = proj_extent / proj_size
scaling_factor *= (sinogram.space.weighting.const /
proj_weighting)
scaling_factor /= (range.weighting.const /
range.cell_volume)
# Correctly scale the output
out *= scaling_factor
return out
| 5,343,394
|
def get_segment_hosts(master_port):
"""
"""
gparray = GpArray.initFromCatalog( dbconn.DbURL(port=master_port), utility=True )
segments = GpArray.getSegmentsByHostName( gparray.getDbList() )
return segments.keys()
| 5,343,395
|
def save_database(database):
""" Write database back to their individual files """
# Sort all databases
for db_name in database.keys():
database[db_name] = {k: v for k, v in sorted(database[db_name].items(), key=lambda item: item)}
# Write database to files
for filename in database:
path = os.path.join(database_folder, filename + ".json")
with open(path, "w") as f:
json.dump(database[filename], f)
| 5,343,396
|
def get_saved_albums(sp: Spotify) -> List[Dict[str, Any]]:
"""Returns the list of albums saved in user library"""
albums = [] # type: List[Dict[str, Any]]
results = sp.current_user_saved_albums(limit=50)
albums.extend(results["items"])
while results["next"]:
results = sp.next(results)
albums.extend(results["items"])
return albums
| 5,343,397
|
async def test_deprecated_run_migrations_dry_run(
migrations, postgresql_conn_factory, postgresql_db
):
"""If running in dry run mode, pending migration(s) won't be committed. Useful for
testing that migrations are error-free"""
await run_migrations(
migrations["postgresql_a"], postgresql_db, dry_run=True, force_close_conn=False
)
tables = await postgresql_db.fetch(
"SELECT table_name FROM information_schema.tables"
" WHERE table_schema='public' AND table_type='BASE TABLE';"
)
assert len(tables) == 1 # applied_migration expected
assert tables[0]["table_name"] == "applied_migration"
| 5,343,398
|
def createMergerCatalog(hd_obj, obj_conditions, cosmo, time_since_merger=1):
"""
Function to create Major Merger (MM) catalog
@hd_obj :: header file for the object of interest
@obj_conditions :: prior conditions to define the object sample
@cosmo :: cosmology used in the notebook (Flat Lambda CDM)
@mass_range :: [lower, upper] limits on range on galaxy stellar masses to create pair sample
@time_since_merger :: int to decide the objects with mergers < x Gyr
"""
# converting the time since merger into scale factor
merger_z = z_at_value(cosmo.lookback_time, time_since_merger*u.Gyr)
merger_scale = 1/(1+merger_z)
# defining the merger condition
merger_condition = (hd_obj['HALO_scale_of_last_MM']>merger_scale)
downsample = obj_conditions & merger_condition
return hd_obj[downsample], downsample
| 5,343,399
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.