content stringlengths 22 815k | id int64 0 4.91M |
|---|---|
def guess(key, values):
"""
Returns guess values for the parameters of this function class based on the input. Used for fitting using this
class.
:param key:
:param values:
:return:
"""
return [min(values)-max(values), (max(key)-min(key))/3, min(values)] | 5,329,100 |
def add_metadata_values_to_record(record_message, schema_message):
"""Populate metadata _sdc columns from incoming record message
The location of the required attributes are fixed in the stream
"""
extended_record = record_message['record']
extended_record['_sdc_batched_at'] = datetime.now().isoformat()
extended_record['_sdc_deleted_at'] = record_message.get('record', {}).get('_sdc_deleted_at')
extended_record['_sdc_extracted_at'] = record_message.get('time_extracted')
extended_record['_sdc_primary_key'] = schema_message.get('key_properties')
extended_record['_sdc_received_at'] = datetime.now().isoformat()
extended_record['_sdc_sequence'] = int(round(time.time() * 1000))
extended_record['_sdc_table_version'] = record_message.get('version')
return extended_record | 5,329,101 |
def manipulate_reservation_action(request: HttpRequest, default_foreward_url: str):
"""
This function is used to alter the reservation beeing build inside
a cookie. This function automatically crafts the required response.
"""
js_string: str = ""
r: GroupReservation = None
u: Profile = get_current_user(request)
forward_url: str = default_foreward_url
if request.GET.get("redirect"):
forward_url = request.GET["redirect"]
if "srid" in request.GET:
if not request.GET.get("rid"):
return HttpResponseRedirect("/admin?error=missing%20primary%20reservation%20id")
srid: int = int(request.GET["srid"])
sr: SubReservation = None
if srid == 0:
sr = SubReservation()
else:
sr = SubReservation.objects.get(id=srid)
if request.POST.get("notes"):
sr.notes = escape(request.POST["notes"])
else:
sr.notes = " "
sr.primary_reservation = GroupReservation.objects.get(id=int(request.GET["rid"]))
sr.save()
print(request.POST)
print(sr.notes)
return HttpResponseRedirect("/admin/reservations/edit?rid=" + str(int(request.GET["rid"])) + "&srid=" + str(sr.id))
if "rid" in request.GET:
# update reservation
r = GroupReservation.objects.get(id=int(request.GET["rid"]))
elif u.number_of_allowed_reservations > GroupReservation.objects.all().filter(createdByUser=u).count():
r = GroupReservation()
r.createdByUser = u
r.ready = False
r.open = True
r.pickupDate = datetime.datetime.now()
else:
return HttpResponseRedirect("/admin?error=Too%20Many%20reservations")
if request.POST.get("notes"):
r.notes = escape(request.POST["notes"])
if request.POST.get("contact"):
r.responsiblePerson = escape(str(request.POST["contact"]))
if (r.createdByUser == u or o.rights > 1) and not r.submitted:
r.save()
else:
return HttpResponseRedirect("/admin?error=noyb")
response: HttpResponseRedirect = HttpResponseRedirect(forward_url + "?rid=" + str(r.id))
return response | 5,329,102 |
def get_Y_feed(X, datapath=None, upsample=False):
"""
Generator for getting each Y vector (label vector) that corresponds to each X vector.
X is a list of: [(reversed, fv), (reversed, fv), ...]
A Y vector is a numpy array that looks like this:
[A_betrays_B_in_one_season, A_betrays_B_in_two_seasons, A_betrays_B_in_three_seasons, B_betrays_A_in_one_season, B_betrays_A_in_two_seasons, B_betrays_A_in_three_seasons]
IMPORTANT:
As soon as someone betrays the other, the odds collapse to 1 and 0, so that if A betrays B in two seasons, then the vector will look like this:
[0, 1, 1,
0, 0, 0]
When upsample is True, we add duplicate betrayal datapoints to address the class imbalance. These extra betrayals are all added to the end, so
you should probably shuffle the data when you get it.
UPSAMPLE IS NOT IMPLEMENTED FOR THIS FUNCTION YET.
"""
for i, relationship in enumerate(get_all_sequences(datapath)):
season_trigrams = relationship.get_season_trigrams()
for tri in season_trigrams:
yield _get_label_from_trigram(tri, relationship, relationship.betrayal, X[i][0]) | 5,329,103 |
def _decompose(number):
"""Generate digits from `number` in base alphabet, least significants
bits first.
Since A is 1 rather than 0 in base alphabet, we are dealing with
`number - 1` at each iteration to be able to extract the proper digits.
"""
while number:
number, remainder = divmod(number - 1, ALPHABET_SIZE)
yield remainder | 5,329,104 |
def _dense_to_sparse(data):
"""Convert a numpy array to a tf.SparseTensor."""
indices = np.where(data)
return tf.SparseTensor(
np.stack(indices, axis=-1), data[indices], dense_shape=data.shape) | 5,329,105 |
def wait_for_block(network: vegaNetwork, height: int = 1) -> None:
"""Wait for block at specific height"""
for validator_id in range(network.validators_count()):
host, public_port, private_port = network.api_address(validator_id)
client = vegaClient(host, public_port, private_port)
for _ in range(RETRIES_AMOUNT):
if client.public_api.get_block(height).status_code == 200:
break
time.sleep(0.5) | 5,329,106 |
def update_migration_index(db: typing.Any, index: int) -> None:
"""
Set the database's migration index to the given index.
This function also ensures that the migration index can only ever be
increased and fails with an assertion error otherwise.
:param db: the database
:param index: the new migration index
"""
# migration 0 creates the migration_index table and sets it to 0
if index == 0:
return
assert db.session.execute(
"""
UPDATE migration_index
SET migration_index = :index
WHERE migration_index < :index
""",
{"index": index}
).rowcount == 1 | 5,329,107 |
def subcommand2(args):
"""
Subcommand2
"""
print("This is subcommand2") | 5,329,108 |
def getflookup(facetid):
"""
find out if a facet with this id has been saved to the facet_files table
"""
found = FacetLookup.objects.all().values_list('graphdb', flat=True).get(id=facetid)
if found:
return True
else:
return False | 5,329,109 |
def numeric_to_string(year):
"""
Convert numeric year to string
"""
if year < 0 :
yearstring = "{}BC".format(year*-1)
elif year >= 0:
yearstring = "{}AD".format(year)
else:
raise
return yearstring | 5,329,110 |
def code_server(HandlerClass, ServerClass=ThreadingHTTPServer, protocol="HTTP/1.0", port=8000, bind="", ssl=False):
"""
This runs an HTTP server on port 8000 (or the port argument).
"""
server_address = (bind, port)
HandlerClass.protocol_version = protocol
with ServerClass(server_address, HandlerClass) as httpd:
sa = httpd.socket.getsockname()
if ssl:
try:
httpd.socket = modssl.wrap_socket (httpd.socket, keyfile='key.pem', certfile='server.pem', server_side=True)
except Exception as e:
print("can't start ssl",e)
print("maybe 'openssl req -new -x509 -keyout key.pem -out server.pem -days 3650 -nodes'")
ssl=False
if ssl:
serve_message = "Serving HTTPS on {host} port {port} (https://{host}:{port}/) ..."
else:
serve_message = "Serving HTTP on {host} port {port} (http://{host}:{port}/) ..."
print(serve_message.format(host=sa[0], port=sa[1]))
try:
httpd.serve_forever()
except KeyboardInterrupt:
print("\nKeyboard interrupt received, exiting.")
sys.exit(0) | 5,329,111 |
def validate_arguments(
expected_types: Mapping[str, QueryArgumentGraphQLType], arguments: Mapping[str, Any]
) -> None:
"""Ensure that all arguments are provided and that they are of the expected type.
Backends are the database languages we have the ability to compile to, like OrientDB MATCH,
Gremlin, or SQLAlchemy. This function should be stricter than the validation done by any
specific backend. That way code that passes validation can be compiled to any backend.
Args:
arguments: mapping of argument names to arguments values.
expected_types: mapping of argument names to the expected GraphQL types. All GraphQLNonNull
type wrappers are stripped.
"""
ensure_arguments_are_provided(expected_types, arguments)
for name in expected_types:
validate_argument_type(name, expected_types[name], arguments[name]) | 5,329,112 |
def _get_num_ve_sve_and_max_num_cells(cell_fracs):
"""
Calculate the num_ve, num_sve and max_num_cells
Parameters
----------
cell_fracs : structured array, optional
A sorted, one dimensional array,
each entry containing the following fields:
:idx: int
The volume element index.
:cell: int
The geometry cell number.
:vol_frac: float
The volume fraction of the cell withing the mesh ve.
:rel_error: float
The relative error associated with the volume fraction.
Returns
-------
num_ve : int
Number of the total voxels
num_sve : int
Number of the total subvoxels, eqaul to or greater than num_ve
max_num_cells : int
Max number of cells (subvoxels) in a voxel
"""
num_sve = len(cell_fracs)
num_ve = len(set(cell_fracs["idx"]))
max_num_cells = -1
for i in range(num_sve):
max_num_cells = max(max_num_cells, len(cell_fracs[cell_fracs["idx"] == i]))
return num_ve, num_sve, max_num_cells | 5,329,113 |
def is_void(at):
"""Returns True if the given object is an ``adatetime`` with all of its
attributes equal to None.
"""
if isinstance(at, datetime):
return False
return all((getattr(at, attr) is None) for attr in adatetime.units) | 5,329,114 |
def _get_metric_fn(params):
"""Get the metrix fn used by model compile."""
batch_size = params["batch_size"]
def metric_fn(y_true, y_pred):
"""Returns the in_top_k metric."""
softmax_logits = y_pred
logits = tf.slice(softmax_logits, [0, 1], [batch_size, 1])
# The dup mask should be obtained from input data, but we did not yet find
# a good way of getting it with keras, so we set it to zeros to neglect the
# repetition correction
dup_mask = tf.zeros([batch_size, 1])
cross_entropy, metric_fn, in_top_k, ndcg, metric_weights = (
neumf_model.compute_eval_loss_and_metrics_helper(
logits,
softmax_logits,
dup_mask,
params["num_neg"],
params["match_mlperf"],
params["use_xla_for_gpu"]))
in_top_k = tf.cond(
tf.keras.backend.learning_phase(),
lambda: tf.zeros(shape=in_top_k.shape, dtype=in_top_k.dtype),
lambda: in_top_k)
return in_top_k
return metric_fn | 5,329,115 |
def test_subclass_nt_to_phenotypic_feature(rosetta,eg):
"""named_thing -> Phenotype """
node = KNode('HP:0001874', type=node_types.NAMED_THING)
rosetta.synonymizer.synonymize(node)
assert eg.get_leaf_type(node,node_types.NAMED_THING)[0]== node_types.PHENOTYPIC_FEATURE | 5,329,116 |
def mark_contention_and_waiters(all_lock_periods):
"""
thread 1 -> ------ lock ------ locked ------ unlocked ------
thread 2 -> ------------- lock ---------------------- locked
thread 3 -> lock ------------------------------------ ...... locked
=> contention means A thread <locked> after B thread <unlocked> and A's <lock> happens before B's <unlocked>
"""
for p in all_lock_periods:
for pp in all_lock_periods:
if p.tid == pp.tid or p.lock != pp.lock:
continue
if pp.lock_time <= p.unlocked_time <= pp.locked_time:
if p.unlocked_time < pp.contention_time or pp.contention_time == 0:
pp.contention = p.tid
pp.contention_time = p.unlocked_time
""" contention tid appears times means waiters"""
contention_counter = {}
for p in all_lock_periods:
if p.contention is not None:
if p.contention not in contention_counter:
contention_counter[p.contention] = 0
contention_counter[p.contention] += 1
for p in all_lock_periods:
if p.contention is not None:
p.waiters = contention_counter[p.contention] | 5,329,117 |
def check():
"""Check if all required modules are present.
Returns 0 on success, non-zero on error.
"""
flag = 0
for package in import_list:
try:
exec( "import " + package )
except Exception:
log.error( "Missing module: %s", package )
flag = True
if flag:
return 1
return 0 | 5,329,118 |
def flip_nums(text):
""" flips numbers on string to the end (so 2019_est --> est_2019)"""
if not text:
return ''
i = 0
s = text + '_'
while text[i].isnumeric():
s += text[i]
i += 1
if text[i] == '_':
i += 1
return s[i:] | 5,329,119 |
def nnls(A, b, k=None, maxiter=None):
"""
Compute the least-squares solution to the equation ``A @ x = b`` subject to
the nonnegativity constraints ``x[:k] >= 0``.
Parameters
----------
A : array_like, shape (m, n)
Matrix `A` as shown above.
b : array_like, shape (m,)
Right-hand side vector `b` as shown above.
k : int, optional
Number of nonnegativity constraints. The first `k` components of the
solution vector are nonnegative (the default is ``A.shape[1]``).
maxiter : int, optional
Maximum number of inner iterations (the default is ``3 * A.shape[1]``).
Returns
-------
x : numpy.ndarray, shape (n,)
Solution vector ``x`` as shown above.
See Also
--------
bvtcg : Bounded variable truncated conjugate gradient
cpqp : Convex piecewise quadratic programming
lctcg : Linear constrained truncated conjugate gradient
Notes
-----
The method is adapted from the NNLS algorithm [1]_.
References
----------
.. [1] C. L. Lawson and R. J. Hanson. Solving Least Squares Problems.
Classics Appl. Math. Philadelphia, PA, US: SIAM, 1974.
"""
A = np.atleast_2d(A)
if A.dtype.kind in np.typecodes['AllInteger']:
A = np.asarray(A, dtype=float)
A = np.asfortranarray(A)
b = np.atleast_1d(b)
if b.dtype.kind in np.typecodes['AllInteger']:
b = np.asarray(b, dtype=float)
n = A.shape[1]
if k is None:
k = n
if k < 0 or k > n:
raise ValueError('Number of nonnegative constraints is invalid')
if maxiter is None:
maxiter = 3 * n
# Check the sizes of the inputs.
assert_(A.ndim == 2)
assert_(b.ndim == 1)
assert_(A.shape[0] == b.size)
x = _nnls(A, b, k, maxiter) # noqa
return np.array(x, dtype=float) | 5,329,120 |
def send_new_submission_message(submission):
"""
Sends an embed to the specified Discord webhook with details of the Reddit submission.
"""
# Escape any formatting characters in the title since it'll apply them in the embed.
title = discord.escape_formatting(submission.title)
embed_json = {
"title": title[:253] + '...' if len(title) > 256 else title,
"url": f"https://redd.it/{submission.id}",
"author": {
"name": f"/u/{submission.author.name}"
},
"timestamp": datetime.fromtimestamp(submission.created_utc, timezone.utc).isoformat(),
"footer": {
"text": f"{submission.id} | {submission.link_flair_text}"
},
"fields": [
],
"color": config.FLAIR_COLOR.get(submission.link_flair_text, 0)
}
# Link posts include a direct link to the thing submitted as well.
if not submission.is_self:
embed_json["description"] = submission.url
# If they're posting social media/Youtube channel links grab extra info for searching later.
if submission.media is not None and submission.media.get("oembed"):
if submission.media["oembed"].get("author_url"):
media_info = {
"name": "Media Channel",
"value": submission.media["oembed"]["author_url"]
}
embed_json["fields"].append(media_info)
logger.debug(embed_json)
discord.send_webhook_message({"embeds": [embed_json]}, channel_webhook_url=config.DISCORD["webhook_feed"]) | 5,329,121 |
def test_question_correct_index_set():
"""Test set correct answer index
"""
q = exam2pdf.Question("Who are you?")
a1 = exam2pdf.Answer()
a2 = exam2pdf.Answer()
q.add_answer(a1)
q.add_answer(a2)
q.correct_index = 1
assert q.correct_answer == a2
assert q.correct_index == 1 | 5,329,122 |
def read_inputs(filename, height, padding, num_quant_levels, p_norm,
predict_semantics):
"""Reads inputs for scan completion.
Reads input_sdf, target_df/sem (if any), previous predicted df/sem (if any).
Args:
filename: TFRecord containing input_sdf.
height: height in voxels to be processed by model.
padding: amount of padding (in voxels) around test scene (height is cropped
by padding for processing).
num_quant_levels: amount of quantization (if applicable).
p_norm: which p-norm is used (0, 1, 2; 0 for none).
predict_semantics: whether semantics is predicted.
Returns:
input scan: input_scan as np array.
ground truth targets: target_scan/target_semantics as np arrays (if any).
previous resolution predictions: prediction_scan_low_resolution /
prediction_semantics_low_resolution as
np arrays (if any).
"""
for record in tf.python_io.tf_record_iterator(filename):
example = tf.train.Example()
example.ParseFromString(record)
feature_map = example.features
# Input scan as sdf.
input_scan = read_input_float_feature(feature_map, 'input_sdf', shape=None)
(scene_dim_z, scene_dim_y, scene_dim_x) = input_scan.shape
# Target scan as df.
if 'target_df' in feature_map.feature:
target_scan = read_input_float_feature(
feature_map, 'target_df', [scene_dim_z, scene_dim_y, scene_dim_x])
if 'target_sem' in feature_map.feature:
target_semantics = read_input_bytes_feature(
feature_map, 'target_sem', [scene_dim_z, scene_dim_y, scene_dim_x])
# Adjust dimensions for model (clamp height, make even for voxel groups).
height_y = min(height, scene_dim_y - padding)
scene_dim_x = (scene_dim_x // 2) * 2
scene_dim_y = (height_y // 2) * 2
scene_dim_z = (scene_dim_z // 2) * 2
input_scan = input_scan[:scene_dim_z, padding:padding + scene_dim_y, :
scene_dim_x]
input_scan = util.preprocess_sdf(input_scan, constants.TRUNCATION)
if target_scan is not None:
target_scan = target_scan[:scene_dim_z, padding:padding + scene_dim_y, :
scene_dim_x]
target_scan = util.preprocess_df(target_scan, constants.TRUNCATION)
if target_semantics is not None:
target_semantics = target_semantics[:scene_dim_z, padding:
padding + scene_dim_y, :scene_dim_x]
target_semantics = util.preprocess_target_sem(target_semantics)
# Default values for previous resolution inputs.
prediction_scan_low_resolution = np.zeros(
[scene_dim_z // 2, scene_dim_y // 2, scene_dim_x // 2, 2])
prediction_semantics_low_resolution = np.zeros(
[scene_dim_z // 2, scene_dim_y // 2, scene_dim_x // 2], dtype=np.uint8)
if target_semantics is None:
target_semantics = np.zeros([scene_dim_z, scene_dim_y, scene_dim_x])
# Load previous level prediction.
if not FLAGS.is_base_level:
previous_file = os.path.join(
FLAGS.output_dir_prev, 'level' + str(FLAGS.hierarchy_level - 1) + '_' +
os.path.splitext(os.path.basename(filename))[0] + 'pred.tfrecord')
tf.logging.info('Reading previous predictions frome file: %s',
previous_file)
assert os.path.isfile(previous_file)
for record in tf.python_io.tf_record_iterator(previous_file):
prev_example = tf.train.Example()
prev_example.ParseFromString(record)
prev_feature_map = prev_example.features
prediction_scan_low_resolution = read_input_float_feature(
prev_feature_map, 'prediction_df', None)
(prev_scene_dim_z, prev_scene_dim_y,
prev_scene_dim_x) = prediction_scan_low_resolution.shape
offset_z = (prev_scene_dim_z - scene_dim_z // 2) // 2
offset_x = (prev_scene_dim_x - scene_dim_x // 2) // 2
prediction_scan_low_resolution = prediction_scan_low_resolution[
offset_z:offset_z + scene_dim_z // 2, :scene_dim_y // 2, offset_x:
offset_x + scene_dim_x // 2]
prediction_scan_low_resolution = util.preprocess_target_sdf(
prediction_scan_low_resolution, num_quant_levels, constants.TRUNCATION,
p_norm == 0)
if predict_semantics:
prediction_semantics_low_resolution = read_input_bytes_feature(
prev_feature_map, 'prediction_sem',
[prev_scene_dim_z, prev_scene_dim_y, prev_scene_dim_x])
prediction_semantics_low_resolution = prediction_semantics_low_resolution[
offset_z:offset_z + scene_dim_z // 2, :scene_dim_y // 2, offset_x:
offset_x + scene_dim_x // 2]
return (input_scan, target_scan, target_semantics,
prediction_scan_low_resolution, prediction_semantics_low_resolution) | 5,329,123 |
def f_elas_linear_tsswlc(x, t3, t2, e_b, gam, e_par, e_perp, eta):
"""Compute spring forces and torques on each bead of dsswlc."""
N, _ = x.shape
f = np.zeros(x.shape)
t = np.zeros(x.shape)
for i in range(0, N - 1):
dx = x[i+1] - x[i]
dx_par = dx @ t3[i]
dx_perp = dx - dx_par*t3[i]
cos_u1_u2 = t3[i+1]@t3[i]
Gi = t3[i+1] - cos_u1_u2*t3[i] - eta*dx_perp
Fi = -eta*e_b*Gi + e_par*(dx_par - gam)*t3[i] + e_perp*dx_perp
f[i] += Fi
f[i + 1] -= Fi
Gi = (t3[i+1] - t3[i]) - eta*dx_perp
t[i] += e_b*Gi - eta*e_b*dx_par*Gi + eta*e_b*(1 - cos_u1_u2)*dx \
- e_par*(dx_par - gam)*dx + e_perp*dx_par*dx_perp
t[i+1] -= e_b*Gi
# TODO: implement extra torque due to orientation differences
return f, t | 5,329,124 |
def extent2(texture):
""" Returns the extent of the image data (0.0-1.0, 0.0-1.0) inside its texture owner.
Textures have a size power of 2 (512, 1024, ...), but the actual image can be smaller.
For example: a 400x250 image will be loaded in a 512x256 texture.
Its extent is (0.78, 0.98), the remainder of the texture is transparent.
"""
return (texture.tex_coords[3], texture.tex_coords[7]) | 5,329,125 |
def clean_all_annotations_in_directory(
input_directory,
output_directory=None,
check_misnomers=True,
verbose=True):
"""
Cleans every annotation file in a directory and saves the invalid
annotations to their own file so that they can be checked and fixed.
Invalid annotations will be saved in a file with "_rejected" placed
immediately before the file extension.
For example: "foo.bar-AB.txt" --> "foo.bar-AB_rejected.txt
Parameters
input_directory : str
path to the directory containing annotation files to clean
output_directory : str (defaults to match input_directory)
path to output valid and invalid annotation files
check_misnomers : bool, optional (default: True)
flag to control whether to warn about potential filename mistakes
verbose : bool, optional (default: True)
flag to control whether debug information is printed
"""
if output_directory is None:
output_directory = input_directory
os.makedirs(output_directory, exist_ok=True)
annotation_paths = get_all_annotations_in_directory(
input_directory,
check_misnomers=check_misnomers
)
for annot_path in annotation_paths:
annotations = read_annotations(annot_path)
valid_annotations, invalid_annotations = \
clean_annotations(annotations, verbose=verbose)
if len(valid_annotations) > 0:
valid_annots_path = path.join(
output_directory,
path.basename(annot_path)
)
save_annotations(
valid_annotations,
valid_annots_path,
verbose=verbose
)
if len(invalid_annotations) > 0:
invalid_annots_path = path.join(
output_directory,
"{}_rejected{}".format(
*path.splitext(path.basename(annot_path))
)
)
save_annotations(
invalid_annotations,
invalid_annots_path,
verbose=verbose
) | 5,329,126 |
def calculate_handlen(hand):
"""
Returns the length (number of letters) in the current hand.
hand: dictionary (string-> int)
returns: integer
"""
# Store the total length of the hand
hand_len = 0
# For every letter in the hand
for key in hand.keys():
# Add the number of times that letter appears in the hand
# to the variable storing hand length
hand_len += hand[key]
# Return the number of letters in the current hand
return hand_len | 5,329,127 |
def add_fieldmap(fieldmap: BIDSFile, layout: BIDSLayout) -> dict:
"""
Locates fieldmap-related json file and adds them in an appropriate dictionary with keys that describe their directionality
Parameters
----------
fieldmap : BIDSFile
Fieldmap's NIfTI
layout : BIDSLayout
BIDSLayout instance for the queried bids directory.
Returns
-------
dict
Dictionary of fieldmap's NIfTI and json with appropriate keys.
"""
entities = fieldmap.get_entities()
entities.pop("fmap")
direction = entities.get("direction")
entities["extension"] = "json"
json = layout.get(**entities)
fieldmap_dict = {f"fmap_{direction}": fieldmap.path}
if json:
fieldmap_dict[f"fmap_{direction}_json"] = json[0].path
return fieldmap_dict | 5,329,128 |
def fit_plane_lstsq(XYZ):
"""
Fits a plane to a point cloud.
Where z=a.x+b.y+c; Rearranging: a.x+b.y-z+c=0
@type XYZ: list
@param XYZ: list of points
@rtype: np.array
@return: normalized normal vector of the plane in the form C{(a,b,-1)}
"""
[rows, cols] = XYZ.shape
G = np.ones((rows, 3))
G[:, 0] = XYZ[:, 0] # X
G[:, 1] = XYZ[:, 1] # Y
Z = XYZ[:, 2]
(a, b, c), resid, rank, s = np.linalg.lstsq(G, Z)
normal = (a, b, -1)
nn = np.linalg.norm(normal)
normal = normal / nn
return normal | 5,329,129 |
def setup_logging(default_path='logging.json',
default_level=logging.INFO, env_key='LOG_CFG'):
"""Setup logging configuration
"""
path = default_path
value = os.getenv(env_key, None)
if value:
path = value
if os.path.exists(path):
with open(path, 'rt') as f:
config = json.load(f)
logging.config.dictConfig(config)
else:
logging.basicConfig(level=default_level)
socketHandler = logging.handlers.DatagramHandler(
'localhost', logging.handlers.DEFAULT_UDP_LOGGING_PORT)
rootLogger = logging.getLogger('')
rootLogger.addHandler(socketHandler) | 5,329,130 |
def clean_name(name: str) -> str:
"""Clean a string by capitalizing and removing extra spaces.
Args:
name: the name to be cleaned
Returns:
str: the cleaned name
"""
name = " ".join(name.strip().split())
return str(titlecase.titlecase(name)) | 5,329,131 |
def pull_list(buf: Buffer, capacity: int, func: Callable[[], T]) -> List[T]:
"""
Pull a list of items.
"""
items = []
with pull_block(buf, capacity) as length:
end = buf.tell() + length
while buf.tell() < end:
items.append(func())
return items | 5,329,132 |
def ResNet(
stack_fn, preact, use_bias, model_name='resnet', include_top=True, weights='imagenet',
input_tensor=None, input_shape=None, pooling=None, classes=1000,
classifier_activation='softmax', bottomright_maxpool_test=False,
use_group_norm=False, **kwargs):
"""Instantiates the ResNet, ResNetV2, and ResNeXt architecture.
Reference:
- [Deep Residual Learning for Image Recognition](
https://arxiv.org/abs/1512.03385) (CVPR 2015)
Optionally loads weights pre-trained on ImageNet.
Note that the data format convention used by the model is
the one specified in your Keras config at `~/.keras/keras.json`.
Arguments:
stack_fn: a function that returns output tensor for the
stacked residual blocks.
preact: whether to use pre-activation or not
(True for ResNetV2, False for ResNet and ResNeXt).
use_bias: whether to use biases for convolutional layers or not
(True for ResNet and ResNetV2, False for ResNeXt).
model_name: string, model name.
include_top: whether to include the fully-connected
layer at the top of the network.
weights: one of `None` (random initialization),
'imagenet' (pre-training on ImageNet),
or the path to the weights file to be loaded.
input_tensor: optional Keras tensor
(i.e. output of `layers.Input()`)
to use as image input for the model.
input_shape: optional shape tuple, only to be specified
if `include_top` is False (otherwise the input shape
has to be `(224, 224, 3)` (with `channels_last` data format)
or `(3, 224, 224)` (with `channels_first` data format).
It should have exactly 3 inputs channels.
pooling: optional pooling mode for feature extraction
when `include_top` is `False`.
- `None` means that the output of the model will be
the 4D tensor output of the
last convolutional layer.
- `avg` means that global average pooling
will be applied to the output of the
last convolutional layer, and thus
the output of the model will be a 2D tensor.
- `max` means that global max pooling will
be applied.
classes: optional number of classes to classify images
into, only to be specified if `include_top` is True, and
if no `weights` argument is specified.
classifier_activation: A `str` or callable. The activation function to use
on the "top" layer. Ignored unless `include_top=True`. Set
`classifier_activation=None` to return the logits of the "top" layer.
**kwargs: For backwards compatibility only.
Returns:
A `keras.Model` instance.
Raises:
ValueError: in case of invalid argument for `weights`,
or invalid input shape.
ValueError: if `classifier_activation` is not `softmax` or `None` when
using a pretrained top layer.
"""
global layers
if 'layers' in kwargs:
layers = kwargs.pop('layers')
else:
layers = VersionAwareLayers()
if kwargs:
raise ValueError('Unknown argument(s): %s' % (kwargs,))
if not (weights in {'imagenet', None} or file_io.file_exists_v2(weights)):
raise ValueError('The `weights` argument should be either '
'`None` (random initialization), `imagenet` '
'(pre-training on ImageNet), '
'or the path to the weights file to be loaded.')
if weights == 'imagenet' and include_top and classes != 1000:
raise ValueError('If using `weights` as `"imagenet"` with `include_top`'
' as true, `classes` should be 1000')
# Determine proper input shape
input_shape = imagenet_utils.obtain_input_shape(
input_shape, default_size=224, min_size=32, data_format=backend.image_data_format(),
require_flatten=include_top, weights=weights)
if input_tensor is None:
img_input = layers.Input(shape=input_shape)
else:
if not backend.is_keras_tensor(input_tensor):
img_input = layers.Input(tensor=input_tensor, shape=input_shape)
else:
img_input = input_tensor
bn_axis = 3 if backend.image_data_format() == 'channels_last' else 1
x = layers.ZeroPadding2D(padding=((3, 3), (3, 3)), name='conv1_pad')(img_input)
x = layers.Conv2D(
64, 7, strides=2, use_bias=use_bias and not use_group_norm, name='conv1_conv')(x)
if use_group_norm:
def norm_layer(name):
return tfa.layers.GroupNormalization(epsilon=batchnorm_epsilon, name=name)
else:
def norm_layer(name):
return layers.BatchNormalization(
axis=bn_axis, epsilon=batchnorm_epsilon, momentum=batchnorm_momentum,
name=name)
if not preact:
x = norm_layer(name='conv1_gn' if use_group_norm else 'conv1_bn')(x)
x = layers.Activation('relu', name='conv1_relu')(x)
padding_layer = layers.ZeroPadding2D(padding=((1, 1), (1, 1)), name='pool1_pad')
if bottomright_maxpool_test:
padding_test = layers.ZeroPadding2D(padding=((0, 2), (0, 2)), name='pool1_pad')
padding_layer = TrainTestSwitchLayer(padding_layer, padding_test)
x = padding_layer(x)
x = layers.MaxPooling2D(3, strides=2, name='pool1_pool')(x)
x = stack_fn(x)
if preact:
x = norm_layer(name='post_gn' if use_group_norm else 'post_bn')(x)
x = layers.Activation('relu', name='post_relu')(x)
if include_top:
x = layers.GlobalAveragePooling2D(name='avg_pool')(x)
imagenet_utils.validate_activation(classifier_activation, weights)
x = layers.Dense(classes, activation=classifier_activation, name='predictions')(x)
else:
if pooling == 'avg':
x = layers.GlobalAveragePooling2D(name='avg_pool')(x)
elif pooling == 'max':
x = layers.GlobalMaxPooling2D(name='max_pool')(x)
# Ensure that the model takes into account
# any potential predecessors of `input_tensor`.
if input_tensor is not None:
inputs = layer_utils.get_source_inputs(input_tensor)
else:
inputs = img_input
# Create model.
if use_group_norm:
model_name = model_name + '_groupnorm'
model = training.Model(inputs, x, name=model_name)
# Load weights.
if (weights == 'imagenet') and (model_name in WEIGHTS_HASHES):
if include_top:
file_name = model_name + f'_weights_tf_dim_ordering_tf_kernels.h5'
file_hash = WEIGHTS_HASHES[model_name][0]
else:
file_name = model_name + f'_weights_tf_dim_ordering_tf_kernels_notop.h5'
file_hash = WEIGHTS_HASHES[model_name][1]
weights_path = data_utils.get_file(
file_name, BASE_WEIGHTS_PATH + file_name, cache_subdir='models', file_hash=file_hash)
model.load_weights(weights_path)
elif weights is not None:
model.load_weights(weights)
return model | 5,329,133 |
def read_yaml(yaml_path):
"""
Read yaml file from the path
:param yaml_path:
:return:
"""
stream = open(yaml_path, "r")
docs = yaml.load_all(stream)
result = dict()
for doc in docs:
for k, v in doc.items():
result[k] = v
return result | 5,329,134 |
def _server_allow_run_on_save() -> bool:
"""Allows users to automatically rerun when app is updated.
Default: true
"""
return True | 5,329,135 |
def _strip_schema(url):
"""Returns the url without the s3:// part"""
result = urlparse(url)
return result.netloc + result.path | 5,329,136 |
def simple_word_tokenize(text, _split=GROUPING_SPACE_REGEX.split):
"""
Split text into tokens. Don't split by a hyphen.
Preserve punctuation, but not whitespaces.
"""
return [t for t in _split(text) if t and not t.isspace()] | 5,329,137 |
def merge_strategy(media_identifier, target_site, sdc_data, strategy):
"""
Check if the file already holds Structured Data, if so resolve what to do.
@param media_identifier: Mid of the file
@param target_site: pywikibot.Site object to which file should be uploaded
@param sdc_data: internally formatted Structured Data in json format
@param strategy: Strategy used for merging uploaded data with pre-existing
data. Allowed values are None, "New", "Blind", "Add" and "Nuke".
@return: dict of pids and caption languages removed from sdc_data due to
conflicts.
@raises: ValueError, SdcException
"""
prior_data = _get_existing_structured_data(media_identifier, target_site)
if not prior_data:
# even unknown strategies should pass if there is no prior data
return
if not strategy:
raise SdcException(
'warning', 'pre-existing sdc-data',
('Found pre-existing SDC data, no new data will be added. '
'Found data: {}'.format(prior_data))
)
strategy = strategy.lower()
if strategy in ('new', 'add'):
pre_pids = prior_data['statements'].keys()
pre_langs = prior_data['labels'].keys()
new_langs = sdc_data.get('caption', dict()).keys()
if strategy == 'add':
pid_clash = set(pre_pids).intersection(sdc_data.keys())
lang_clash = set(pre_langs).intersection(new_langs)
for pid in pid_clash:
sdc_data.pop(pid, None)
for lang in lang_clash:
sdc_data['caption'].pop(lang, None)
if (not any(is_prop_key(key) for key in sdc_data.keys())
and not sdc_data.get('caption')):
# warn if not data left to upload
raise SdcException(
'warning', 'all conflicting pre-existing sdc-data',
('Found pre-existing SDC data, no new non-conflicting '
'data could be added. Found data: {}'.format(
prior_data))
)
elif pid_clash or lang_clash:
return {'pids': pid_clash, 'langs': lang_clash}
elif (not set(pre_pids).isdisjoint(sdc_data.keys())
or not set(pre_langs).isdisjoint(new_langs)):
raise SdcException(
'warning', 'conflicting pre-existing sdc-data',
('Found pre-existing SDC data, no new data will be added. '
'Found data: {}'.format(prior_data))
)
elif strategy not in STRATEGIES:
raise ValueError(
'The `strategy` parameter must be None, "{0}" or "{1}" '
'but "{2}" was provided'.format(
'", "'.join([s.capitalize() for s in STRATEGIES[:-1]]),
STRATEGIES[-1].capitalize(),
strategy.capitalize()))
# pass if strategy is "Blind" or "Nuke" | 5,329,138 |
def convert_translations_to_dict(js_translations):
"""Convert a GNUTranslations object into a dict for jsonifying.
Args:
js_translations: GNUTranslations object to be converted.
Returns:
A dictionary representing the GNUTranslations object.
"""
plural, n_plural = _get_plural_forms(js_translations)
translations_dict = {'plural': plural, 'catalog': {}, 'fallback': None}
if js_translations._fallback is not None:
translations_dict['fallback'] = convert_translations_to_dict(
js_translations._fallback
)
for key, value in js_translations._catalog.items():
if key == '':
continue
if isinstance(key, basestring):
translations_dict['catalog'][key] = value
elif isinstance(key, tuple):
if key[0] not in translations_dict['catalog']:
translations_dict['catalog'][key[0]] = [''] * n_plural
translations_dict['catalog'][key[0]][int(key[1])] = value
return translations_dict | 5,329,139 |
def reduce(coro, iterable, initializer=None, limit=1, right=False, loop=None):
"""
Apply function of two arguments cumulatively to the items of sequence,
from left to right, so as to reduce the sequence to a single value.
Reduction will be executed sequentially without concurrency,
so passed values would be in order.
This function is the asynchronous coroutine equivalent to Python standard
`functools.reduce()` function.
This function is a coroutine.
This function can be composed in a pipeline chain with ``|`` operator.
Arguments:
coro (coroutine function): reducer coroutine binary function.
iterable (iterable|asynchronousiterable): an iterable collection
yielding coroutines functions.
initializer (mixed): initial accumulator value used in
the first reduction call.
limit (int): max iteration concurrency limit. Use ``0`` for no limit.
right (bool): reduce iterable from right to left.
loop (asyncio.BaseEventLoop): optional event loop to use.
Raises:
TypeError: if input arguments are not valid.
Returns:
mixed: accumulated final reduced value.
Usage::
async def reducer(acc, num):
return acc + num
await paco.reduce(reducer, [1, 2, 3, 4, 5], initializer=0)
# => 15
"""
assert_corofunction(coro=coro)
assert_iter(iterable=iterable)
# Reduced accumulator value
acc = initializer
# If interable is empty, just return the initializer value
if len(iterable) == 0:
return initializer
# Create concurrent executor
pool = ConcurrentExecutor(limit=limit, loop=loop)
# Reducer partial function for deferred coroutine execution
def reducer(element):
@asyncio.coroutine
def wrapper():
nonlocal acc
acc = yield from coro(acc, element)
return wrapper
# Support right reduction
if right:
iterable.reverse()
# Iterate and attach coroutine for defer scheduling
for element in iterable:
pool.add(reducer(element))
# Wait until all coroutines finish
yield from pool.run(ignore_empty=True)
# Returns final reduced value
return acc | 5,329,140 |
def validate_JSON_dict(value):
""" validates if value JSON decodes to a Python dict """
try:
d = json.loads(value)
if type(d) is not dict:
raise ValidationError('%(value)s is not a JSON dictionary', params={'value': value})
except json.decoder.JSONDecodeError as e:
raise ValidationError('%(value)s is not a valid JSON: %(e)s', params={'value' : value, 'e':e}) | 5,329,141 |
def sigm_temp(base_sim_param, assumptions, t_base_type):
"""Calculate base temperature depending on sigmoid diff and location
Parameters
----------
base_sim_param : dict
Base simulation assumptions
assumptions : dict
Dictionary with assumptions
Return
------
t_base_cy : float
Base temperature of current year
Note
----
Depending on the base temperature in the base and end year
a sigmoid diffusion from the base temperature from the base year
to the end year is calculated
This allows to model changes e.g. in thermal confort
"""
# Base temperature of end year minus base temp of base year
t_base_diff = assumptions[t_base_type]['end_yr'] - assumptions[t_base_type]['base_yr']
# Sigmoid diffusion
t_base_frac = diffusion_technologies.sigmoid_diffusion(
base_sim_param['base_yr'],
base_sim_param['curr_yr'],
base_sim_param['end_yr'],
assumptions['smart_meter_diff_params']['sig_midpoint'],
assumptions['smart_meter_diff_params']['sig_steeppness']
)
# Temp diff until current year
t_diff_cy = t_base_diff * t_base_frac
# Add temp change to base year temp
t_base_cy = t_diff_cy + assumptions[t_base_type]['base_yr']
return t_base_cy | 5,329,142 |
def test_slc25a6(ncbi, slc25a6):
"""Test that SLC25A6 normalizes to correct gene concept."""
# Concept ID
normalizer_response = ncbi.search('NCBIgene:293')
assertion_checks(normalizer_response, slc25a6, 1, MatchType.CONCEPT_ID)
# Symbol
normalizer_response = ncbi.search('SLC25A6')
assertion_checks(normalizer_response, slc25a6, 1, MatchType.SYMBOL) | 5,329,143 |
def _test_get_cell(vertex, cell):
"""
Test :meth:`.Vertex.get_cell`.
Parameters
----------
vertex : :class:`.Vertex`
The vertex to test.
cell : :class:`numpy.ndarray`
The correct cell.
Returns
-------
None : :class:`NoneType`
"""
assert np.all(np.equal(vertex.get_cell(), cell)) | 5,329,144 |
def is_socket_closed(sock):
"""Check if socket ``sock`` is closed."""
if not sock:
return True
try:
if not poll: # pragma nocover
if not select:
return False
try:
return bool(select([sock], [], [], 0.0)[0])
except socket.error:
return True
# This version is better on platforms that support it.
p = poll()
p.register(sock, POLLIN)
for (fno, ev) in p.poll(0.0):
if fno == sock.fileno():
# Either data is buffered (bad), or the connection is dropped.
return True
except Exception:
return True | 5,329,145 |
def _fit_ovo_binary(estimator, X, y, i, j):
"""Fit a single binary estimator (one-vs-one)."""
cond = np.logical_or(y == i, y == j)
y = y[cond]
y_binary = np.empty(y.shape, np.int)
y_binary[y == i] = 0
y_binary[y == j] = 1
ind = np.arange(X.shape[0])
return _fit_binary(estimator, X[ind[cond]], y_binary, classes=[i, j]) | 5,329,146 |
def get_implicit_permissions_for_user(user: str, domain=None):
"""
GetImplicitPermissionsForUser gets implicit permissions for a user or role.
Compared to GetPermissionsForUser(), this function retrieves permissions for inherited roles.
For example:
p, admin, data1, read
p, alice, data2, read
g, alice, admin
GetPermissionsForUser("alice") can only get: [["alice", "data2", "read"]].
But GetImplicitPermissionsForUser("alice") will get: [["admin", "data1", "read"], ["alice", "data2", "read"]].
"""
return enforcer.get_implicit_permissions_for_user(user, domain=None) | 5,329,147 |
def Law_f(text):
"""
:param text: The "text" of this Law
"""
return '\\begin{block}{Law}\n' + text + '\n\\end{block}\n' | 5,329,148 |
def ordered_dict_intersection(first_dict, second_dict, compat=operator.eq):
"""Return the intersection of two dictionaries as a new OrderedDict.
Items are retained if their keys are found in both dictionaries and the
values are compatible.
Parameters
----------
first_dict, second_dict : dict-like
Mappings to merge.
compat : function, optional
Binary operator to determine if two values are compatible. By default,
checks for equality.
Returns
-------
intersection : OrderedDict
Intersection of the contents.
"""
new_dict = OrderedDict(first_dict)
remove_incompatible_items(new_dict, second_dict, compat)
return new_dict | 5,329,149 |
def init_db():
"""Clear existing data and create new tables."""
db = get_db()
cursor = db.cursor()
cursor.execute("SELECT VERSION()")
# 使用 fetchone() 方法获取单条数据.
data = cursor.fetchone()
print("Database version : %s " % data)
with current_app.open_resource('schema.sql') as f:
sql = f.read().decode('utf-8')
print(sql)
cursor = db.cursor()
cursor.execute(sql) | 5,329,150 |
def get_cl2cf_matrices(theta_bin_edges, lmin, lmax):
"""
Returns the set of matrices to go from one entire power spectrum to one binned correlation function.
Args:
theta_bin_edges (1D numpy array): Angular bin edges in radians.
lmin (int): Minimum l.
lmax (int): Maximum l.
Returns:
(2D numpy array, \
2D numpy array, \
2D numpy array): Tuple of matrices to each go from one entire power spectrum to one binned \
correlation function for different spins: (0-0, 2-2, 0-2). The spin-2-2 matrix is only for \
xi+, not xi-.
"""
# Calculate Legendre functions and their derivatives up to lmax
# pl and dpl indexed as [theta_idx, l]
cos_thetas = np.cos(theta_bin_edges)
pl_dpl = np.array([scipy.special.lpn(lmax + 1, cos_theta) for cos_theta in cos_thetas])
pl = pl_dpl[:, 0, :]
dpl = pl_dpl[:, 1, :]
# Calculate various offset combinations of Pl and dPl, and some other useful things
assert lmin >= 2
plplus1 = pl[:, (lmin + 1):] # first is l=lmin+1, last is lmax+1
plminus1 = pl[:, (lmin - 1):lmax] # first is l=lmin-1, last is lmax-1
xpl = cos_thetas[:, np.newaxis] * pl[:, lmin:(lmax + 1)]
xdpl = cos_thetas[:, np.newaxis] * dpl[:, lmin:(lmax + 1)]
dplminus1 = dpl[:, (lmin - 1):lmax]
xdplminus1 = cos_thetas[:, np.newaxis] * dplminus1
ell = np.arange(lmin, lmax + 1)
two_ell_plus1 = 2 * ell + 1
cos_theta_diff = np.diff(cos_thetas)
# Calculate bin-averaged Pl, Pl^2 and Gl+/- following Fang et al. eqs 5.6-5.8
# (Also Friedrich et al. DES Y3 covariance paper, which uses a different sign convention but this cancels out.)
# All of these vectorised equations have been validated against much slower loop implementations
# Pl
pl_bin_top_prediff = plplus1 - plminus1
pl_bin_top = np.diff(pl_bin_top_prediff, axis=0)
pl_bin_bottom = np.outer(cos_theta_diff, two_ell_plus1)
pl_bin = pl_bin_top / pl_bin_bottom
# Pl^2
plminus1_coeff = ell + 2 / two_ell_plus1
plminus1_term = plminus1_coeff[np.newaxis, :] * plminus1
xpl_coeff = 2 - ell
xpl_term = xpl_coeff[np.newaxis, :] * xpl
plplus1_coeff = 2 / two_ell_plus1
plplus1_term = plplus1_coeff[np.newaxis, :] * plplus1
pl2_bin_top_prediff = plminus1_term + xpl_term - plplus1_term
pl2_bin_top = np.diff(pl2_bin_top_prediff, axis=0)
pl2_bin_bottom = cos_theta_diff[:, np.newaxis]
pl2_bin = pl2_bin_top / pl2_bin_bottom
# Gl2+ + Gl2-
plminus1_coeff = - ell * (ell - 1) / 2 * (ell + 2 / two_ell_plus1) - (ell + 2)
plminus1_term = plminus1_coeff[np.newaxis, :] * plminus1
xpl_coeff = - ell * (ell - 1) * (2 - ell) / 2
xpl_term = xpl_coeff[np.newaxis, :] * xpl
plplus1_coeff = ell * (ell - 1) / two_ell_plus1
plplus1_term = plplus1_coeff[np.newaxis, :] * plplus1
dpl_coeff = 4 - ell
dpl_term = dpl_coeff * dpl[:, lmin:(lmax + 1)]
xdplminus1_coeff = ell + 2
xdplminus1_term = xdplminus1_coeff[np.newaxis, :] * xdplminus1
xdpl_coeff = 2 * (ell - 1)
xdpl_term = xdpl_coeff[np.newaxis, :] * xdpl
pl_coeff = - 2 * (ell - 1)
pl_term = pl_coeff[np.newaxis, :] * pl[:, lmin:(lmax + 1)]
dplminus1_coeff = - 2 * (ell + 2)
dplminus1_term = dplminus1_coeff[np.newaxis, :] * dplminus1
gplus_bin_top_prediff = (plminus1_term + xpl_term + plplus1_term + dpl_term + xdplminus1_term + xdpl_term + pl_term
+ dplminus1_term)
gplus_bin_top = np.diff(gplus_bin_top_prediff, axis=0)
gplus_bin_bottom = cos_theta_diff[:, np.newaxis]
gplus_bin = gplus_bin_top / gplus_bin_bottom
# Apply relevant prefactors to obtain bin-averaged Wigner d symbols
ell_ellplus1 = (ell * (ell + 1))[np.newaxis, :]
d00_bin = pl_bin
d22plus_bin = 2 / ell_ellplus1 ** 2 * gplus_bin
d02_bin = 1 / ell_ellplus1 * pl2_bin
# Apply final Wigner prefactor to obtain Cl->CF matrices
prefac = (two_ell_plus1 / (4 * np.pi))[np.newaxis, :]
cl2cf_00 = prefac * d00_bin
cl2cf_22plus = prefac * d22plus_bin
cl2cf_02 = prefac * d02_bin
return cl2cf_00, cl2cf_22plus, cl2cf_02 | 5,329,151 |
def series_to_supervised(data, n_in=1, n_out=1, dropnan=True):
"""
Frame a time series as a supervised learning dataset.
Arguments:
data: Sequence of observations as a list or NumPy array.
n_in: Number of lag observations as input (X).
n_out: Number of observations as output (y).
dropnan: Boolean whether or not to drop rows with NaN values.
Returns:
Pandas DataFrame of series framed for supervised learning.
"""
n_vars = 1 if type(data) is list else data.shape[1]
df = DataFrame(data)
cols, names = list(), list()
# input sequence (t-n, ... t-1)
for i in range(n_in, 0, -1):
cols.append(df.shift(i))
names += [('var%d(t-%d)' % (j+1, i)) for j in range(n_vars)]
# forecast sequence (t, t+1, ... t+n)
for i in range(0, n_out):
cols.append(df.shift(-i))
if i == 0:
names += [('var%d(t)' % (j+1)) for j in range(n_vars)]
else:
names += [('var%d(t+%d)' % (j+1, i)) for j in range(n_vars)]
# put it all together
agg = concat(cols, axis=1)
agg.columns = names
# drop rows with NaN values
if dropnan:
agg.dropna(inplace=True)
return agg.reset_index(drop=True) | 5,329,152 |
def valid_from_done(done):
"""Returns a float mask which is zero for all time-steps after a
`done=True` is signaled. This function operates on the leading dimension
of `done`, assumed to correspond to time [T,...], other dimensions are
preserved."""
done = done.type(torch.float)
valid = torch.ones_like(done)
valid[1:] = 1 - torch.clamp(torch.cumsum(done[:-1], dim=0), max=1)
return valid | 5,329,153 |
def rollout_script(arg_def_fn=None,
env_factory=None,
policy_factory=None,
add_policy_arg: bool = False):
"""Performs a rollout script.
Args:
arg_def_fn: A function that takes an ArgumentParser. Use this to add
arguments to the script.
env_factory: A function that takes program arguments and returns
an environment. Otherwise, uses `gym.make`.
policy_factory: A function that takes program arguments and returns a
policy function (callable that observations and returns actions)
and the environment.
add_policy_arg: If True, adds an argument to take a policy path.
"""
parser = argparse.ArgumentParser()
parser.add_argument(
'-o', '--output', help='The directory to save rollout data to.')
if add_policy_arg:
parser.add_argument(
'-p', '--policy', help='The path to the policy file to load.')
parser.add_argument(
'-n',
'--num_episodes',
type=int,
default=DEFAULT_EPISODE_COUNT,
help='The number of episodes to run.')
parser.add_argument(
'--seed', type=int, default=None, help='The seed for the environment.')
parser.add_argument(
'-r',
'--render',
nargs='?',
const='human',
default=None,
help=('The rendering mode. If provided, renders to a window. A render '
'mode string can be passed here.'))
# Add additional argparse arguments.
if arg_def_fn:
arg_def_fn(parser)
env_id, params, args = parse_env_args(
parser, default_env_name=DEFAULT_ENV_NAME)
robel.set_env_params(env_id, params)
if env_factory:
env = env_factory(args)
else:
env = gym.make(env_id)
action_fn = None
if policy_factory:
action_fn = policy_factory(args)
if args.seed is not None:
env.seed(args.seed)
paths = []
try:
episode_num = 0
for traj in do_rollouts(
env,
num_episodes=args.num_episodes,
action_fn=action_fn,
render_mode=args.render,
):
print('Episode {}'.format(episode_num))
print('> Total reward: {}'.format(traj.total_reward))
if traj.durations:
print('> Execution times:')
for key in sorted(traj.durations):
print('{}{}: {:.2f}ms'.format(' ' * 4, key,
traj.durations[key] * 1000))
episode_num += 1
if args.output:
paths.append(
dict(
actions=traj.actions,
observations=traj.observations,
rewards=traj.rewards,
total_reward=traj.total_reward,
infos=traj.infos,
))
finally:
env.close()
if paths and args.output:
os.makedirs(args.output, exist_ok=True)
# Serialize the paths.
save_path = os.path.join(args.output, 'paths.pkl')
with open(save_path, 'wb') as f:
pickle.dump(paths, f)
# Log the paths to a CSV file.
csv_path = os.path.join(args.output,
'{}-results.csv'.format(env_id))
with EpisodeLogger(csv_path) as logger:
for path in paths:
logger.log_path(path) | 5,329,154 |
def get_transfer_options(transfer_kind='upload', transfer_method=None):
"""Returns hostnames that the current host can upload or download to.
transfer_kind: 'upload' or 'download'
transfer_method: is specified and not None, return only hosts with which
we can work using this method (e.g. scp)
"""
try:
transfer_options = get_config(get_hostname())[
'%s_options' % transfer_kind]
except LookupError:
logging.info("Host %s has no known transfer options.",
get_hostname())
return []
if transfer_method is not None:
transfer_options = [to for to in transfer_options
if get_config(to['host'])['method'] == 'method']
return transfer_options | 5,329,155 |
def calc_utility_np(game, iter):
"""Calc utility of current position
Parameters
----------
game : camel up game
Camel up game class
iter : int
Iterations to run the monte carlo simulations
Returns
-------
np.array
Numpy structured array with expected utilities
"""
coins = coins_to_numpy(game)
if str(game.camel_dict) + str(game.tiles_dict) in CACHE.keys():
turn_prob_first, turn_prob_second, turn_prob_other, exp_tile_points = CACHE[
str(game.camel_dict) + str(game.tiles_dict)
][0]
game_prob_first, game_prob_last = CACHE[
str(game.camel_dict) + str(game.tiles_dict)
][1]
else:
turn_prob_first, turn_prob_second, turn_prob_other, exp_tile_points = turn_prob_numpy(
game, iter
)
game_prob_first, game_prob_last = game_prob_numpy(game, iter)
game_prob_first["prob"] = np.where(
game_prob_first["prob"] < 0.30, 0, game_prob_first["prob"]
)
game_prob_last["prob"] = np.where(
game_prob_last["prob"] < 0.30, 0, game_prob_last["prob"]
)
CACHE[str(game.camel_dict) + str(game.tiles_dict)] = [
(turn_prob_first, turn_prob_second, turn_prob_other, exp_tile_points),
(game_prob_first, game_prob_last),
]
winner_bets, loser_bets = winner_loser_bets_to_numpy(game)
bet_tiles = bet_tiles_to_numpy(game)
util.rename_np(turn_prob_first, ["counts", "prob"], "first")
util.rename_np(turn_prob_second, ["counts", "prob"], "second")
util.rename_np(turn_prob_other, ["counts", "prob"], "other")
bets = util.numpy_left_join(bet_tiles, turn_prob_first, "camel")
bets = util.numpy_left_join(bets, turn_prob_second, "camel")
bets = util.numpy_left_join(bets, turn_prob_other, "camel")
multiply_array = (
(bets["value"] * bets["prob_first"])
+ (bets["bets"] * bets["prob_second"])
- (bets["bets"] * bets["prob_other"])
)
bets = util.add_col_np(bets, "exp_value", multiply_array)
bets_groupby = util.numpy_group_by_sum(bets, "player", "exp_value")
final = util.numpy_left_join(coins, exp_tile_points, "player")
final = util.numpy_left_join(final, bets_groupby, "player")
game_first = util.numpy_left_join(winner_bets, game_prob_first, "camel")
game_last = util.numpy_left_join(loser_bets, game_prob_last, "camel")
game_winner_other = deepcopy(game_first)
game_winner_other["prob"] = 1 - game_first["prob"]
game_loser_other = deepcopy(game_last)
game_loser_other["prob"] = 1 - game_last["prob"]
game_first = util.add_col_np(
game_first, "points", config.BET_SCALING[0 : game_first.shape[0]]
)
game_last = util.add_col_np(
game_last, "points", config.BET_SCALING[0 : game_last.shape[0]]
)
game_winner_other = util.add_col_np(
game_winner_other, "points", [1] * game_winner_other.shape[0]
)
game_loser_other = util.add_col_np(
game_loser_other, "points", [1] * game_loser_other.shape[0]
)
final = util.numpy_left_join(
final, calc_exp_value_np(game_first, "exp_value_first"), "player"
)
final = util.numpy_left_join(
final, calc_exp_value_np(game_last, "exp_value_last"), "player"
)
final = util.numpy_left_join(
final, calc_exp_value_np(game_winner_other, "exp_value_winner_other"), "player"
)
final = util.numpy_left_join(
final, calc_exp_value_np(game_loser_other, "exp_value_loser_other"), "player"
)
multiply_array = (
final["coins"]
+ final["exp_points"]
+ final["exp_value"]
+ final["exp_value_first"]
+ final["exp_value_last"]
- final["exp_value_winner_other"]
- final["exp_value_loser_other"]
)
final = util.add_col_np(final, "utility", multiply_array)
return final | 5,329,156 |
def add_masses(line, mass_light, mass_heavy):
"""
Add m/z information in the output lines
"""
new_line = "{} {} {}\n".format(round_masses(mass_light), round_masses(mass_heavy), line)
return new_line | 5,329,157 |
def load_r_ind_sent_bars():
"""
Loads the random index-barcodes of the actual networks
"""
bars = []
for text in texts:
bars.append(np.load('Textbooks/{}/r_ind_sent_bars.npy'.format(text)))
return bars | 5,329,158 |
def load_pickle(indices, image_data):
""""
0: Empty
1: Active
2: Inactive
"""
size = 13
# image_data = "./data/images.pkl"
with open(image_data, "rb") as f:
images = pickle.load(f)
x = []
y = []
n = []
cds = []
for idx in indices:
D_dict = images[idx]
img = D_dict['image']
label = D_dict['label']
row, col = label.shape
length, width = img.shape
img = np.expand_dims(img, axis=-1)
img_r, img_c = 40, 40
for g_r in range(1, row-1):
img_c = 40
for g_c in range(1, col-1):
# Check whether it's empty
if label[g_r][g_c] == 0.0:
pass
else:
l = img_c - size
u = img_r - size
r = img_c + size + 1
d = img_r + size + 1
pt = img[u:d, l:r]
nb = get_neibs_cds(img, l, u)
lb = label[g_r][g_c]
x.append(pt)
y.append(lb)
n.append(nb)
cds.append((img_r, img_c))
img_c += 27
img_r += 27
x = np.array(x)
y = np.array(y)
n = np.array(n)
return x, y, n, cds | 5,329,159 |
def summarize_vref_locs(locs:TList[BaseObjLocation]) -> pd.DataFrame:
"""
Return a table with cols (partition, num vrefs)
"""
vrefs_by_partition = group_like(objs=locs, labels=[loc.partition for loc in locs])
partition_sort = sorted(vrefs_by_partition)
return pd.DataFrame({
'Partition': partition_sort,
'Number of vrefs': [len(vrefs_by_partition[k]) for k in partition_sort]
}) | 5,329,160 |
def rescale_list_to_range(original, limits):
"""
Linearly rescale values in original list to limits (minimum and maximum).
:example:
>>> rescale_list_to_range([1, 2, 3], (0, 10))
[0.0, 5.0, 10.0]
>>> rescale_list_to_range([1, 2, 3], (-10, 0))
[-10.0, -5.0, 0.0]
>>> rescale_list_to_range([1, 2, 3], (0j, 10j))
[0j, 5j, 10j]
:param original: Original list or list-like to be rescaled.
:type original: list
:param limits: Tuple of two floats, min and max, to constrain the new list
:type limits: tuple
:return: Original list rescaled to fit between min and max
:rtype: list
"""
new_min, new_max = limits[0:2]
old_min, old_max = min(original), max(original)
return (new_max + new_min) / 2 * original / old_min if old_min == old_max \
else [new_max * (v - old_min) / (old_max - old_min) +
new_min * (old_max - v) / (old_max - old_min) for v in original] | 5,329,161 |
def test_simulation(overwrite: bool = False) -> None:
"""export sim in JSON, and then load it again"""
component = gf.components.straight(length=3)
sim = gt.get_simulation(component=component)
if overwrite:
sim.to_file("sim_ref.yaml") # uncomment to overwrite material
sim.to_file("sim_run.yaml")
dirpath = pathlib.Path(__file__).parent
dref = OmegaConf.load(dirpath / "sim_ref.yaml")
drun = OmegaConf.load(dirpath / "sim_run.yaml")
d = diff(dref, drun)
assert len(d) == 0, d | 5,329,162 |
def build_list_request(
filters: Optional[dict[str, str]] = None
) -> Union[IssueListInvalidRequest, IssueListValidRequest]:
"""Create request from filters."""
accepted_filters = ["obj__eq", "state__eq", "title__contains"]
invalid_req = IssueListInvalidRequest()
if filters is not None:
if not isinstance(filters, Mapping):
invalid_req.add_error("filters", "Is not iterable")
return invalid_req
for key, value in filters.items():
if key not in accepted_filters:
invalid_req.add_error("filters", f"Key {key} cannot be used.")
if (key == "obj__eq" and value not in ["pull request", "issue", "all"]) or (
key == "state__eq" and value not in ["all", "open", "closed"]
):
invalid_req.add_error(
"filters", f"Value {value} for key 'obj__eq' cannot be used."
)
if invalid_req.has_errors():
return invalid_req
return IssueListValidRequest(filters=filters) | 5,329,163 |
def get_ratings(labeled_df):
"""Returns list of possible ratings."""
return labeled_df.RATING.unique() | 5,329,164 |
def split_edge_cost(
edge_cost: EdgeFunction, to_split: LookupToSplit
) -> Dict[Edge, float]:
"""Assign half the cost of the original edge to each of the split edges.
Args:
edge_cost: Lookup from edges to cost.
to_split: Lookup from original edges to pairs of split edges
(see [lookup_to_split][tspwplib.converter.lookup_to_split]).
Returns:
Lookup from split edges to cost.
Notes:
The cost is cast to a float.
"""
split_cost = {}
for edge, cost in edge_cost.items():
first_split, second_split = to_split[edge]
half_cost = float(cost) / 2.0
split_cost[first_split] = half_cost
split_cost[second_split] = half_cost
return split_cost | 5,329,165 |
def get_ants_brain(filepath, metadata, channel=0):
"""Load .nii brain file as ANTs image."""
nib_brain = np.asanyarray(nib.load(filepath).dataobj).astype('uint32')
spacing = [float(metadata.get('micronsPerPixel_XAxis', 0)),
float(metadata.get('micronsPerPixel_YAxis', 0)),
float(metadata.get('micronsPerPixel_ZAxis', 0)),
float(metadata.get('sample_period', 0))]
spacing = [spacing[x] for x in range(4) if metadata['image_dims'][x] > 1]
if len(nib_brain.shape) > 4: # multiple channels
# trim to single channel
return ants.from_numpy(np.squeeze(nib_brain[..., channel]), spacing=spacing)
else:
# return ants.from_numpy(np.squeeze(nib_brain[..., :300]), spacing=spacing) # TESTING
return ants.from_numpy(np.squeeze(nib_brain), spacing=spacing) | 5,329,166 |
def numpy_max(x):
"""
Returns the maximum of an array.
Deals with text as well.
"""
return numpy_min_max(x, lambda x: x.max(), minmax=True) | 5,329,167 |
def clip_gradient(model, clip_norm):
"""Computes a gradient clipping coefficient based on gradient norm."""
totalnorm = 0
for p in model.parameters():
if p.requires_grad:
modulenorm = p.grad.data.norm()
totalnorm += modulenorm ** 2
totalnorm = np.sqrt(totalnorm)
norm = clip_norm / max(totalnorm, clip_norm)
for p in model.parameters():
if p.requires_grad:
p.grad.mul_(float(norm)) | 5,329,168 |
def online_training(exp_folder: pathlib.Path, filename: pathlib.Path,
freeze: bool):
"""Continue running an experiment.
Args:
exp_folder (pathlib.Path): Experiments folders.
filename (pathlib.Path): Input path for new data set.
freeze (bool): Freeze non conditional layers.
"""
batch_model, hparams = run_exp.setup_exp(exp_folder)
reference_model = batch_integration.DISCERN.from_json(hparams)
inputdata = _prepare_inputdata(exp_folder=exp_folder,
hparams=hparams,
filename=filename)
batch_model.zeros = inputdata.zeros
if exp_folder.joinpath("backup").exists():
shutil.rmtree(exp_folder.joinpath("backup"))
shutil.copytree(exp_folder.joinpath("job"), exp_folder.joinpath("backup"))
reference_model.restore_model(exp_folder.joinpath("job"))
batch_model.build_model(n_genes=inputdata.var_names.size,
n_labels=inputdata.obs.batch.cat.categories.size,
scale=inputdata.config["total_train_count"])
batch_model.wae_model = update_model(old_model=reference_model.wae_model,
new_model=batch_model.wae_model,
freeze_unchanged=freeze)
_LOGGER.debug("Recompile Model to apply freezing")
batch_model.compile(optimizer=batch_model.get_optimizer(), scale=15000.0)
_LOGGER.debug("Starting online training of %s", exp_folder)
run_exp._train( # pylint: disable=protected-access
model=batch_model,
exp_folder=exp_folder.resolve(),
inputdata=inputdata,
early_stopping=hparams['training']["early_stopping"],
max_steps=hparams['training']['max_steps'])
_LOGGER.info('%s has finished online training', exp_folder) | 5,329,169 |
def get_train_val_test_splits(X, y, max_points, seed, confusion, seed_batch,
split=(2./3, 1./6, 1./6)):
"""Return training, validation, and test splits for X and y.
Args:
X: features
y: targets
max_points: # of points to use when creating splits.
seed: seed for shuffling.
confusion: labeling noise to introduce. 0.1 means randomize 10% of labels.
seed_batch: # of initial datapoints to ensure sufficient class membership.
split: percent splits for train, val, and test.
Returns:
indices: shuffled indices to recreate splits given original input data X.
y_noise: y with noise injected, needed to reproduce results outside of
run_experiments using original data.
"""
np.random.seed(seed)
X_copy = copy.copy(X)
y_copy = copy.copy(y)
# Introduce labeling noise
y_noise = flip_label(y_copy, confusion)
indices = np.arange(len(y))
if max_points is None:
max_points = len(y_noise)
else:
max_points = min(len(y_noise), max_points)
train_split = int(max_points * split[0])
val_split = train_split + int(max_points * split[1])
assert seed_batch <= train_split
# Do this to make sure that the initial batch has examples from all classes
min_shuffle = 3
n_shuffle = 0
y_tmp = y_noise
# Need at least 4 obs of each class for 2 fold CV to work in grid search step
while (any(get_class_counts(y_tmp, y_tmp[0:seed_batch]) < 4)
or n_shuffle < min_shuffle):
np.random.shuffle(indices)
y_tmp = y_noise[indices]
n_shuffle += 1
X_train = X_copy[indices[0:train_split]]
X_val = X_copy[indices[train_split:val_split]]
X_test = X_copy[indices[val_split:max_points]]
y_train = y_noise[indices[0:train_split]]
y_val = y_noise[indices[train_split:val_split]]
y_test = y_noise[indices[val_split:max_points]]
# Make sure that we have enough observations of each class for 2-fold cv
assert all(get_class_counts(y_noise, y_train[0:seed_batch]) >= 4)
# Make sure that returned shuffled indices are correct
assert all(y_noise[indices[0:max_points]] ==
np.concatenate((y_train, y_val, y_test), axis=0))
return (indices[0:max_points], X_train, y_train,
X_val, y_val, X_test, y_test, y_noise) | 5,329,170 |
def require_apikey(key):
"""
Decorator for view functions and API requests. Requires
that the user pass in the API key for the application.
"""
def _wrapped_func(view_func):
def _decorated_func(*args, **kwargs):
passed_key = request.args.get('key', None)
if passed_key == key:
return view_func(*args, **kwargs)
else:
abort(401)
return _decorated_func
return _wrapped_func | 5,329,171 |
def apply_actions(local_files, deployed_files, visitor):
"""Get actions to do "Update, Delete, Create"
- LocalFiles is a list of tuple (KuduPath, openable path)
- DeployedFiles is dict KuduPath as key, meta as value
return a list of tuple (action, KuduPath, openable path)
"""
deployed_files_copy = dict(deployed_files) # Copy, I will pop it
for comparable_name, pretty_name, localpath in local_files:
# print("Working on "+comparable_name)
kudumeta = deployed_files_copy.pop(comparable_name, None)
if not kudumeta:
# print("Didn't found a Kudu file, PUT")
visitor.accept("Create", pretty_name, localpath)
continue
# print("Found Kudu file! Checking meta for update")
last_modified_time_local = datetime.fromtimestamp(os.path.getmtime(localpath), UTC)
last_modified_time_distant = kudumeta['mtime']
# print("\t"+pretty_name)
# print("\tDist: {}".format(last_modified_time_distant))
# print("\tLocl: {}".format(last_modified_time_local))
if last_modified_time_local > last_modified_time_distant:
visitor.accept("Update", pretty_name, localpath)
for meta in deployed_files_copy.values():
visitor.accept("Delete", meta['urlpath'], None) | 5,329,172 |
def gauss_legendre(ordergl,tol=10e-14):
"""
Returns nodal abscissas {x} and weights {A} of
Gauss-Legendre m-point quadrature.
"""
m = ordergl + 1
from math import cos,pi
from numpy import zeros
def legendre(t,m):
p0 = 1.0; p1 = t
for k in range(1,m):
p = ((2.0*k + 1.0)*t*p1 - k*p0)/(1.0 + k )
p0 = p1; p1 = p
dp = m*(p0 - t*p1)/(1.0 - t**2)
return p1,dp
A = zeros(m)
x = zeros(m)
nRoots = (m + 1)// 2 # Number of non-neg. roots
for i in range(nRoots):
t = cos(pi*(i + 0.75)/(m + 0.5)) # Approx. root
for j in range(30):
p,dp = legendre(t,m) # Newton-Raphson
dt = -p/dp; t = t + dt # method
if abs(dt) < tol:
x[i] = t; x[m-i-1] = -t
A[i] = 2.0/(1.0 - t**2)/(dp**2) # Eq.(6.25)
A[m-i-1] = A[i]
break
return x,A | 5,329,173 |
def test_hyperparameters_called(mock):
"""Checks that __call__ function of regularizers is executed."""
x, y = get_data()
ma = get_test_member()
ma.step_on_batch(x, y)
calls = [call('Called {}'.format(h)) for h in ma.hyperparameters
if isinstance(h, L1L2Mutable)]
mock.assert_has_calls(calls, any_order=True) | 5,329,174 |
def _remove_variable_declaration_statements(parse_tree):
"""
Modify the parse tree by removing all variable declaration statements.
This is an in-place modification: the given tree's list of children is
modified.
:param parse_tree: The whole parse tree (a subtree won't work)
"""
indices_to_remove = [
i for i, statement in enumerate(parse_tree.children)
if statement.children[0].data == "variable_declaration_statement"
]
# Easier to do in reverse order, so that deletions don't affect
# subsequent indices.
for i in reversed(indices_to_remove):
del parse_tree.children[i] | 5,329,175 |
def write(notes):
"""Writes a Lilypond file containing the given notes in Lilypond notation"""
out = open("out.ly", "w")
out.write(template.replace("XXX", notes))
out.close() | 5,329,176 |
def simple_message(msg, parent=None, title=None):
"""
create a simple message dialog with string msg. Optionally set
the parent widget and dialog title
"""
dialog = gtk.MessageDialog(
parent = None,
type = gtk.MESSAGE_INFO,
buttons = gtk.BUTTONS_OK,
message_format = msg)
if parent is not None:
dialog.set_transient_for(parent)
if title is not None:
dialog.set_title(title)
dialog.show()
dialog.run()
dialog.destroy()
return None | 5,329,177 |
def accept(value):
"""Accept header class and method decorator."""
def accept_decorator(t):
set_decor(t, 'header', CaseInsensitiveDict({'Accept': value}))
return t
return accept_decorator | 5,329,178 |
def create_meg_data():
"""Creates MEG data for testing."""
make_fake_meg_map_data()
make_fake_exponent_map_data() | 5,329,179 |
def screenshot(widget, path=None, dir=None):
"""Save a screenshot of a Qt widget to a PNG file.
By default, the screenshots are saved in `~/.phy/screenshots/`.
Parameters
----------
widget : Qt widget
Any widget to capture (including OpenGL widgets).
path : str or Path
Path to the PNG file.
"""
path = path or screenshot_default_path(widget, dir=dir)
path = Path(path).resolve()
if isinstance(widget, QOpenGLWindow):
# Special call for OpenGL widgets.
widget.grabFramebuffer().save(str(path))
else:
# Generic call for regular Qt widgets.
widget.grab().save(str(path))
logger.info("Saved screenshot to %s.", path)
return path | 5,329,180 |
def xyz_to_rtp(x, y, z):
"""
Convert 1-D Cartesian (x, y, z) coords. to 3-D spherical coords.
(r, theta, phi).
The z-coord. is assumed to be anti-parallel to the r-coord. when
theta = 0.
"""
# First establish 3-D versions of x, y, z
xx, yy, zz = np.meshgrid(x, y, z, indexing='ij')
# Calculate 3-D spherical coordinate vectors.
rr = np.sqrt(xx**2 + yy**2 + zz**2)
tt = np.arccos(zz / rr)
pp = np.arccos(xx / np.sqrt(xx**2 + yy**2))
return rr, tt, pp | 5,329,181 |
def irccat_targets(bot, targets):
"""
Go through our potential targets and place them in an array so we can
easily loop through them when sending messages.
"""
result = []
for s in targets.split(','):
if re.search('^@', s):
result.append(re.sub('^@', '', s))
elif re.search('^#', s) and s in bot.config.core.channels:
result.append(s)
elif re.search('^#\*$', s):
for c in bot.config.core.channels:
result.append(c)
return result | 5,329,182 |
async def test_loading_with_no_config(hass, mock_config_entry):
"""Test component loading failure when it has not configuration."""
mock_config_entry.add_to_hass(hass)
await setup.async_setup_component(hass, DOMAIN, {})
# Component setup fails because the oauth2 implementation could not be registered
assert mock_config_entry.state is config_entries.ConfigEntryState.SETUP_ERROR | 5,329,183 |
def do_analyze(directory, config):
"""
Analyze directory for keywords.
"""
analyze(directory, config) | 5,329,184 |
def _watchos_stub_partial_impl(
*,
ctx,
actions,
binary_artifact,
label_name,
watch_application):
"""Implementation for the watchOS stub processing partial."""
bundle_files = []
providers = []
if binary_artifact:
# Create intermediate file with proper name for the binary.
intermediate_file = intermediates.file(
actions,
label_name,
"WK",
)
actions.symlink(
target_file = binary_artifact,
output = intermediate_file,
)
bundle_files.append(
(processor.location.bundle, "_WatchKitStub", depset([intermediate_file])),
)
providers.append(_AppleWatchosStubInfo(binary = intermediate_file))
if watch_application:
binary_artifact = watch_application[_AppleWatchosStubInfo].binary
bundle_files.append(
(processor.location.archive, "WatchKitSupport2", depset([binary_artifact])),
)
return struct(
bundle_files = bundle_files,
providers = providers,
) | 5,329,185 |
def contour_area_filter(image, kernel=(9,9), resize=1.0, uint_mode="scale",
min_area=100, min_area_factor=3, factor=3, **kwargs):
"""
Checks that a contour can be returned for two thresholds of the image, a
mean threshold and an otsu threshold.
Parameters
----------
image : np.ndarray
Image to check for contours.
kernel : tuple, optional
Kernel to use when gaussian blurring.
resize : float, optional
How much to resize the image by before doing any calculations.
uint_mode : str, optional
Conversion mode to use when converting to uint8.
min_area : float, optional
Minimum area of the otsu thresholded beam.
factor : float
Factor to pass to the mean threshold.
min_area_factor : float
The amount to scale down the area for comparison with the mean threshold
contour area.
Returns
-------
passes : bool
True if the image passes the check, False if it does not
"""
image_prep = uint_resize_gauss(image, mode=uint_mode, kernel=kernel,
fx=resize, fy=resize)
# Try to get contours of the image
try:
_, area_mean = get_largest_contour(
image_prep, thresh_mode="mean", factor=factor, **kwargs)
_, area_otsu = get_largest_contour(
image_prep, thresh_mode="otsu", **kwargs)
# Do the check for area
if area_otsu < min_area or area_mean < min_area/min_area_factor:
logger.debug("Filter - Contour area, {0} is below the min area, "
"{1}.".format(area_otsu, min_area))
return False
return True
except NoContoursDetected:
logger.debug("Filter - No contours found on image.")
return False | 5,329,186 |
def catMullRomFit(p, nPoints=100):
"""
Return as smoothed path from a list of QPointF objects p, interpolating points if needed.
This function takes a set of points and fits a CatMullRom Spline to the data. It then
interpolates the set of points and outputs a smoothed path with the desired number of points
on it.
p : the path to be smoothed
nPoints : the desired number of points in the smoothed path
"""
N = len(p)
#there is no re interpolation required
if N == nPoints:
return p
interp = []
dj = 1.0 / nPoints
for j in range(0, nPoints):
di = j * dj * (N - 1)
i = int(di)
x = di - i
xx = x * x
xxx = x * x * x
c0 = 2.0 * xxx - 3.0 * xx + 1.0
c1 = xxx - 2.0 * xx + x
c2 = -2.0 * xxx + 3.0 * xx
c3 = xxx - xx
p0 = p[i]
p1 = p0
p2 = p0
p3 = p0
if i + 1 < N:
p1 = p[i + 1]
if i - 1 > -1:
p2 = p[i - 1]
if i + 2 < N:
p3 = p[i + 2]
m0 = toVector(p1 - p2) * 0.5
m1 = toVector(p3 - p0) * 0.5
px = (c0 * toVector(p0)) + (c1 * m0) + (c2 * toVector(p1)) + (c3 * m1)
interp.append(toPoint(px))
# pop back the last one
interp.pop()
# make sure the last point in the original polygon is still the last one
interp.append(p[-1])
return interp | 5,329,187 |
def pdf_from_ppf(quantiles, ppfs, edges):
"""
Reconstruct pdf from ppf and evaluate at desired points.
Parameters
----------
quantiles: numpy.ndarray, shape=(L)
L quantiles for which the ppf_values are known
ppfs: numpy.ndarray, shape=(1,...,L)
Corresponding ppf-values for all quantiles
edges: numpy.ndarray, shape=(M+1)
Binning of the desired binned pdf
Returns
-------
pdf_values: numpy.ndarray, shape=(1,...,M)
Recomputed, binned pdf
"""
# recalculate pdf values through numerical differentiation
pdf_interpolant = np.nan_to_num(np.diff(quantiles) / np.diff(ppfs, axis=-1))
# Unconventional solution to make this usable with np.apply_along_axis for readability
# The ppf bin-mids are computed since the pdf-values are derived through derivation
# from the ppf-values
xyconcat = np.concatenate(
(ppfs[..., :-1] + np.diff(ppfs) / 2, pdf_interpolant), axis=-1
)
def interpolate_ppf(xy):
ppf = xy[:len(xy) // 2]
pdf = xy[len(xy) // 2:]
interpolate = interp1d(ppf, pdf, bounds_error=False, fill_value=(0, 0))
result = np.nan_to_num(interpolate(edges[:-1]))
return np.diff(edges) * result
# Interpolate pdf samples and evaluate at bin edges, weight with the bin_width to estimate
# correct bin height via the midpoint rule formulation of the trapezoidal rule
pdf_values = np.apply_along_axis(interpolate_ppf, -1, xyconcat)
return pdf_values | 5,329,188 |
def get_arg_text(ob):
"""Get a string describing the arguments for the given object"""
arg_text = ""
if ob is not None:
arg_offset = 0
if type(ob) in (types.ClassType, types.TypeType):
# Look for the highest __init__ in the class chain.
fob = _find_constructor(ob)
if fob is None:
fob = lambda: None
else:
arg_offset = 1
elif type(ob)==types.MethodType:
# bit of a hack for methods - turn it into a function
# but we drop the "self" param.
fob = ob.im_func
arg_offset = 1
else:
fob = ob
# Try to build one for Python defined functions
if type(fob) in [types.FunctionType, types.LambdaType]:
argcount = fob.func_code.co_argcount
real_args = fob.func_code.co_varnames[arg_offset:argcount]
defaults = fob.func_defaults or []
defaults = list(map(lambda name: "=%s" % repr(name), defaults))
defaults = [""] * (len(real_args) - len(defaults)) + defaults
items = map(lambda arg, dflt: arg + dflt, real_args, defaults)
if fob.func_code.co_flags & 0x4:
items.append("...")
if fob.func_code.co_flags & 0x8:
items.append("***")
arg_text = ", ".join(items)
arg_text = "(%s)" % re.sub("\.\d+", "<tuple>", arg_text)
# See if we can use the docstring
doc = getattr(ob, "__doc__", "")
if doc:
doc = doc.lstrip()
pos = doc.find("\n")
if pos < 0 or pos > 70:
pos = 70
if arg_text:
arg_text += "\n"
arg_text += doc[:pos]
return arg_text | 5,329,189 |
def create_ses_weights(d, ses_col, covs, p_high_ses, use_propensity_scores):
"""
Used for training preferentially on high or low SES people. If use_propensity_scores is True, uses propensity score matching on covs.
Note: this samples from individual images, not from individual people. I think this is okay as long as we're clear about what's being done. If p_high_ses = 0 or 1, both sampling methods are equivalent. One reason to sample images rather than people is that if you use propensity score weighting, covs may change for people over time.
"""
assert p_high_ses >= 0 and p_high_ses <= 1
high_ses_idxs = (d[ses_col] == True).values
n_high_ses = high_ses_idxs.sum()
n_low_ses = len(d) - n_high_ses
assert pd.isnull(d[ses_col]).sum() == 0
n_to_sample = min(n_high_ses, n_low_ses) # want to make sure train set size doesn't change as we change p_high_ses from 0 to 1 so can't have a train set size larger than either n_high_ses or n_low_ses
n_high_ses_to_sample = int(p_high_ses * n_to_sample)
n_low_ses_to_sample = n_to_sample - n_high_ses_to_sample
all_idxs = np.arange(len(d))
high_ses_samples = np.array(random.sample(list(all_idxs[high_ses_idxs]), n_high_ses_to_sample))
low_ses_samples = np.array(random.sample(list(all_idxs[~high_ses_idxs]), n_low_ses_to_sample))
print("%i high SES samples and %i low SES samples drawn with p_high_ses=%2.3f" %
(len(high_ses_samples), len(low_ses_samples), p_high_ses))
# create weights.
weights = np.zeros(len(d))
if len(high_ses_samples) > 0:
weights[high_ses_samples] = 1.
if len(low_ses_samples) > 0:
weights[low_ses_samples] = 1.
if not use_propensity_scores:
assert covs is None
weights = weights / weights.sum()
return weights
else:
assert covs is not None
# fit probability model
propensity_model = sm.Logit.from_formula('%s ~ %s' % (ses_col, '+'.join(covs)), data=d).fit()
print("Fit propensity model")
print(propensity_model.summary())
# compute inverse propensity weights.
# "A subject's weight is equal to the inverse of the probability of receiving the treatment that the subject actually received"
# The treatment here is whether they are high SES,
# and we are matching them on the other covariates.
high_ses_propensity_scores = propensity_model.predict(d).values
high_ses_weights = 1 / high_ses_propensity_scores
low_ses_weights = 1 / (1 - high_ses_propensity_scores)
propensity_weights = np.zeros(len(d))
propensity_weights[high_ses_idxs] = high_ses_weights[high_ses_idxs]
propensity_weights[~high_ses_idxs] = low_ses_weights[~high_ses_idxs]
assert np.isnan(propensity_weights).sum() == 0
# multply indicator vector by propensity weights.
weights = weights * propensity_weights
# normalize weights so that high and low SES sum to the right things.
print(n_high_ses_to_sample, n_low_ses_to_sample)
if n_high_ses_to_sample > 0:
weights[high_ses_idxs] = n_high_ses_to_sample * weights[high_ses_idxs] / weights[high_ses_idxs].sum()
if n_low_ses_to_sample > 0:
weights[~high_ses_idxs] = n_low_ses_to_sample * weights[~high_ses_idxs] / weights[~high_ses_idxs].sum()
assert np.isnan(weights).sum() == 0
# normalize whole vector, just to keep things clean
weights = weights / weights.sum()
return weights | 5,329,190 |
def vectors_intersect(vector_1_uri, vector_2_uri):
"""Take in two OGR vectors (we're assuming that they're in the same
projection) and test to see if their geometries intersect. Return True of
so, False if not.
vector_1_uri - a URI to an OGR vector
vector_2_uri - a URI to an OGR vector
Returns True or False"""
utils.assert_files_exist([vector_1_uri, vector_2_uri])
LOGGER.debug('Opening vector %s', vector_1_uri)
basename_1 = os.path.basename(vector_1_uri)
vector_1 = ogr.Open(vector_1_uri)
layer_1 = vector_1.GetLayer()
LOGGER.debug('Opening vector %s', vector_2_uri)
basename_2 = os.path.basename(vector_2_uri)
vector_2 = ogr.Open(vector_2_uri)
layer_2 = vector_2.GetLayer()
for feature_1 in layer_1:
prep_polygon = offsets.build_shapely_polygon(feature_1, prep=True)
for feature_2 in layer_2:
polygon = offsets.build_shapely_polygon(feature_2)
if prep_polygon.intersects(polygon):
fid_1 = feature_1.GetFID()
fid_2 = feature_2.GetFID()
LOGGER.debug('%s (fid %s) and %s (fid %s) intersect',
basename_1, fid_1, basename_2, fid_2)
return True
layer_2.ResetReading()
LOGGER.debug('No Features intersect.')
return False | 5,329,191 |
def decoder_g(zxs):
"""Define decoder."""
with tf.variable_scope('decoder', reuse=tf.AUTO_REUSE):
hidden_layer = zxs
for i, n_hidden_units in enumerate(FLAGS.n_hidden_units_g):
hidden_layer = tf.layers.dense(
hidden_layer,
n_hidden_units,
activation=tf.nn.relu,
name='decoder_{}'.format(i),
reuse=tf.AUTO_REUSE,
kernel_initializer='normal')
i = len(FLAGS.n_hidden_units_g)
y_hat = tf.layers.dense(
hidden_layer,
FLAGS.dim_y,
name='decoder_{}'.format(i),
reuse=tf.AUTO_REUSE,
kernel_initializer='normal')
return y_hat | 5,329,192 |
def make_evinfo_str(json_str):
"""
[メソッド概要]
DB登録用にイベント情報を文字列に整形
"""
evinfo_str = ''
for v in json_str[EventsRequestCommon.KEY_EVENTINFO]:
if evinfo_str:
evinfo_str += ','
if not isinstance(v, list):
evinfo_str += '"%s"' % (v)
else:
temp_val = '['
for i, val in enumerate(v):
if i > 0:
temp_val += ','
temp_val += '"%s"' % (val)
temp_val += ']'
evinfo_str += '%s' % (temp_val)
return evinfo_str | 5,329,193 |
def parse_cisa_data(parse_file: str) -> object:
"""Parse the CISA Known Exploited Vulnerabilities file and create a new dataframe."""
inform("Parsing results")
# Now parse CSV using pandas, GUID is CVE-ID
new_dataframe = pd.read_csv(parse_file, parse_dates=['dueDate', 'dateAdded'])
# extend dataframe
new_dataframe['AssetsVulnerableCount'] = int(0)
pd.to_numeric(new_dataframe['AssetsVulnerableCount'])
# force these fields to be dtype objects
new_dataframe['AssetsVulnerable'] = pd.NaT
new_dataframe['AssetsVulnerable'] = new_dataframe['AssetsVulnerable'].astype('object').dtypes
return new_dataframe | 5,329,194 |
def camera():
"""Video streaming home page."""
return render_template('index.html') | 5,329,195 |
def new_doc():
"""Creating a new document."""
if request.method == 'GET' or request.form.get('act') != 'create':
return render_template('new.html', title='New document', permalink=url_for('.new_doc'))
else:
slug = request.form['slug'].strip()
src = os.path.join(app.config['DOCPATH'], 'template', 'template.tex')
dstdir = os.path.join(app.config['DOCPATH'], slug)
dst = os.path.join(dstdir, slug + '.tex')
try:
os.mkdir(dstdir)
shutil.copy(src, dst)
except:
flash('Failed to create new document.', 'error')
return redirect(url_for('.new_doc'), 302)
reload(slug)
return redirect(url_for('.doc', slug=slug), 302) | 5,329,196 |
def fit_alternative(model, dataloader, optimizer, train_data, labelled=True):
"""
fit method using alternative loss, executes one epoch
:param model: VAE model to train
:param dataloader: input dataloader to fatch batches
:param optimizer: which optimizer to utilize
:param train_data: useful for plotting completion bar
:param labelled: to know if the data is composed of (data, target) or only data
:return: train loss
"""
model.train() # set in train mode
running_loss, running_kld_loss, running_rec_loss = 0.0, 0.0, 0.0 # set up losses to accumulate over
for i, data in tqdm(enumerate(dataloader), total=int(len(train_data) / dataloader.batch_size)):
data = data[0] if labelled else data # get the train batch
data = data.view(data.size(0), -1) # unroll
optimizer.zero_grad() # set gradient to zero
mu_rec, mu_latent, logvar_latent = model(data) # feedforward
loss = elbo_loss_alternative(mu_rec, model.log_var_rec, mu_latent, logvar_latent, data) # get loss value
# update losses
running_kld_loss += loss[0].item()
running_rec_loss += loss[1].item()
running_loss += loss[2].item()
loss[2].backward() # set up gradient with total loss
optimizer.step() # backprop
# set up return variable for all three losses
train_loss = [running_kld_loss / len(dataloader.dataset),
running_rec_loss / len(dataloader.dataset),
running_loss / len(dataloader.dataset)]
return train_loss | 5,329,197 |
def FlagOverrider(**flag_kwargs):
"""A Helpful decorator which can switch the flag values temporarily."""
return flagsaver.flagsaver(**flag_kwargs) | 5,329,198 |
def test_transcode_j2k_dicom_wsi_to_zarr(samples_path, tmp_path):
"""Test that we can transcode a J2K compressed DICOM WSI to a Zarr."""
reader = readers.Reader.from_file(samples_path / "CMU-1-Small-Region-J2K")
writer = writers.ZarrReaderWriter(
path=tmp_path / "CMU-1.zarr",
tile_size=reader.tile_shape[::-1],
dtype=reader.dtype,
)
writer.transcode_from_reader(reader=reader)
assert writer.path.exists()
assert writer.path.is_dir()
assert len(list(writer.path.iterdir())) > 0
output = zarr.open(writer.path)
original = reader[...]
new = output[0][...]
assert original.shape == new.shape
# Allow for some slight differences in the pixel values due to
# different decoders.
difference = original.astype(np.float16) - new.astype(np.float16)
mse = (difference**2).mean()
assert mse < 1.5
assert np.percentile(np.abs(difference), 95) < 1 | 5,329,199 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.