content
stringlengths 22
815k
| id
int64 0
4.91M
|
|---|---|
def entry_point():
"""dbf_light command line utilities."""
| 16,200
|
def apply_flags_all_variables(station, all_variables, flag_col, logfile, test_name, plots = False, diagnostics = False):
"""
Apply these flags to all variables
:param object station: the station object to be processed
:param list all_variables: the variables where the flags are to be applied
:param list flag_col: which column in the qc_flags array to work on
:param file logfile: logfile to store outputs
:param str test_name: test name for printing/loggin
:param bool plots: to do any plots
:param bool diagnostics: do any extra diagnostic output
:returns:
"""
flag_locs, = np.where(station.qc_flags[:, flag_col] != 0)
for var in all_variables:
st_var = getattr(station, var)
# copy flags into attribute
st_var.flags[flag_locs] = 1
if plots or diagnostics:
print "Applying {} flags to {}".format(test_name, var)
else:
logfile.write("Applying {} flags to {}\n".format(test_name, var))
return
| 16,201
|
def exportSDFVisual(visualobj, linkobj, visualdata, indentation, modelname):
"""Simple wrapper for visual data of links.
The visual object is required to determine the position (pose) of the
object.
If relative poses are used the data found in visualdata (key pose) is used.
Otherwise the pose of the visual object will be combined with all
collected links up to the rootobject (see
phobos.utils.editing.getCombinedTransform).
Args:
visualobj: object to be used for pose
visualdata: data as provided by dictionary (should contain name,
geometry)
indentation: indentation at current level
relative: True for usage of sdf relative pathing
modelname: the name of the model (required for geometry)
linkobj:
Returns:
: str -- writable xml line
"""
tagger = xmlTagger(initial=indentation)
tagger.descend('visual', attribs={'name': visualdata['name']})
# OPT: tagger.attrib('cast_shadows', ...)
# OPT: tagger.attrib('laser_retro', ...)
# OPT: tagger.attrib('transparency', ...)
# OPT: tagger.descend('meta')
# OPT: tagger.attrib('layer', ...)
# tagger.ascend()
# OPT: tagger.write(exportSDFFrame(..., tagger.get_indent()))
# Pose data of the visual is transformed by link --> use local matrix
matrix = visualobj.matrix_local
posedata = {
'rawmatrix': matrix,
'matrix': [list(vector) for vector in list(matrix)],
'translation': list(matrix.to_translation()),
'rotation_euler': list(matrix.to_euler()),
'rotation_quaternion': list(matrix.to_quaternion()),
}
# overwrite absolute position of the visual object
tagger.write(exportSDFPose(posedata, tagger.get_indent()))
# write material data if available
if 'material' in visualdata:
tagger.write(exportSDFMaterial(visualdata['material'], tagger.get_indent()))
tagger.write(exportSDFGeometry(visualdata['geometry'], tagger.get_indent(), modelname))
tagger.ascend()
return "".join(tagger.get_output())
| 16,202
|
def register_module():
"""Callback for module registration. Sets up URL routes."""
global custom_module # pylint: disable=global-statement
permissions = [
roles.Permission(EDIT_STUDENT_GROUPS_PERMISSION,
messages.EDIT_STUDENT_GROUPS_PERMISSION_DESCRIPTION),
]
def permissions_callback(unused_application_context):
return permissions
def notify_module_enabled():
"""Callback at module-enable time, just after module registration.
Responsible for registering module's callbacks and other items with
core and other modules.
"""
model_caching.CacheFactory.build(
MODULE_NAME_AS_IDENTIFIER, MODULE_NAME + " Caching",
messages.ENABLE_GROUP_CACHING,
max_size_bytes=(
StudentGroupAvailabilityRestHandler.MAX_NUM_MEMBERS * 1024 * 4),
ttl_sec=60 * 60, dao_class=StudentGroupDAO)
# Tell permissioning system about permission for this module.
roles.Roles.register_permissions(custom_module, permissions_callback)
# Navigation sub-tab for showing list of student groups, and
# associated role-level permission.
dashboard.DashboardHandler.add_sub_nav_mapping(
'settings', MODULE_NAME_AS_IDENTIFIER, 'Student Groups',
action=StudentGroupListHandler.ACTION,
contents=StudentGroupListHandler.render_groups_view)
dashboard.DashboardHandler.map_get_action_to_permission(
StudentGroupListHandler.ACTION, custom_module,
EDIT_STUDENT_GROUPS_PERMISSION)
# Register action for add/edit/delete of student group.
dashboard.DashboardHandler.add_custom_get_action(
StudentGroupRestHandler.ACTION,
handler=StudentGroupRestHandler.edit_student_group,
in_action=StudentGroupListHandler.ACTION)
dashboard.DashboardHandler.map_get_action_to_permission(
StudentGroupRestHandler.ACTION, custom_module,
EDIT_STUDENT_GROUPS_PERMISSION)
# Override existing action for availability. For UX convenience,
# we want to have the same page modify overall course availability
# as well as per-group availability.
dashboard.DashboardHandler.add_custom_get_action(
availability.AvailabilityRESTHandler.ACTION,
StudentGroupAvailabilityRestHandler.get_form, overwrite=True)
# Register a callback to add the user's student group ID (if any) to
# recorded events.
models.EventEntity.EVENT_LISTENERS.append(
_add_student_group_to_event)
# Register a component with the student-aggregator data pump source
# so that student-aggregate records get marked with the group ID
# for that student.
student_aggregate.StudentAggregateComponentRegistry.register_component(
AddToStudentAggregate)
# Register a callback with models.models.StudentProfileDAO to let us
# know when a student registers. This allows us to move the
# Definitive Truth about group membership to the Student record.
models.StudentProfileDAO.STUDENT_CREATION_HOOKS.append(
StudentGroupMembership.user_added_callback)
# Register a callback with Course so that when anyone asks for the
# student-facing list of units and lessons we can modify them as
# appropriate.
courses.Course.COURSE_ELEMENT_STUDENT_VIEW_HOOKS.append(
modify_unit_and_lesson_attributes)
# Register a callback with Course so that when the environment is
# fetched, we can submit overwrite items.
courses.Course.COURSE_ENV_POST_COPY_HOOKS.append(
modify_course_environment)
# Register student group as a generically handle-able translatable
# resource.
resource.Registry.register(ResourceHandlerStudentGroup)
# Register student group as a translatable item; the title and
# description can appear on student profile pages.
i18n_dashboard.TranslatableResourceRegistry.register(
TranslatableResourceStudentGroups)
# Register a section on the student profile to add the current
# student's group - if any.
utils.StudentProfileHandler.EXTRA_PROFILE_SECTION_PROVIDERS.append(
_add_student_group_to_profile)
# Register with gradebook to add student group as a filterable
# item.
gradebook.RawAnswersDataSource.FILTERS.append(StudentGroupFilter)
# Register with generator feeding gradebook to add some handling to
# the map and reduce steps so we can generate our filter-able data
# column in the generator's output.
gradebook.RawAnswersGenerator.register_hook(
MODULE_NAME,
_add_student_group_to_map_result,
_add_student_group_to_kwargs)
# Add our types to the set of DB tables for download/upload of course.
courses.ADDITIONAL_ENTITIES_FOR_COURSE_IMPORT.add(StudentGroupEntity)
courses.ADDITIONAL_ENTITIES_FOR_COURSE_IMPORT.add(
StudentGroupMembership)
custom_module = custom_modules.Module(
MODULE_NAME, 'Define and manage groups of students.',
global_routes=[
(EmailToObfuscatedUserIdCleanup.URL,
EmailToObfuscatedUserIdCleanup),
], namespaced_routes=[
(StudentGroupRestHandler.URL,
StudentGroupRestHandler),
(StudentGroupAvailabilityRestHandler.URL,
StudentGroupAvailabilityRestHandler)
],
notify_module_enabled=notify_module_enabled)
return custom_module
| 16,203
|
def build_texttable(events):
"""
value['date'], value["target"], value['module_name'], value['scan_unique_id'],
value['options'], value['event']
build a text table with generated event related to the scan
:param events: all events
:return:
array [text table, event_number]
"""
_table = texttable.Texttable()
table_headers = [
'target',
'module_name',
'scan_unique_id',
'options',
'event',
'date'
]
_table.add_rows(
[
table_headers
]
)
for event in events:
_table.add_rows(
[
table_headers,
[
event['target'],
event['module_name'],
event['scan_unique_id'],
event['options'],
event['event'],
event['date']
]
]
)
return _table.draw().encode('utf8') + b'\n\n' + messages("nettacker_version_details").format(
version_info()[0],
version_info()[1],
now()
).encode('utf8') + b"\n"
| 16,204
|
def for_all_regions(get_client_func, catalog_entry, action_func, parsed_args):
"""
Run the provided function on all the available regions.
Available regions are determined based on the user service catalog entries.
"""
result = []
cache_key = 'todo'
cache_item = CACHE.get(cache_key, None)
if cache_item is None:
client = get_client_func(parsed_args)
CACHE[cache_key] = client
else:
client = cache_item
catalog = client.connection.get_service_catalog()
urls = catalog.get_public_urls(service_type=catalog_entry,
name=catalog_entry)
auth_connection = client.connection.get_auth_connection_instance()
driver_kwargs = {'ex_auth_connection': auth_connection}
def run_in_pool(client):
item = action_func(client)
result.extend(item)
for api_url in urls:
parsed_args.api_url = api_url
client = get_client_func(parsed_args, driver_kwargs=driver_kwargs)
run_function(pool, run_in_pool, client)
join_pool(pool)
return result
| 16,205
|
def copy_file_from_worker(request, context):
"""Copy file from worker to host."""
path = request.path
if not os.path.isfile(path):
context.set_trailing_metadata([('result', 'invalid-path')])
return
with open(path) as f:
for chunk in file_utils.file_chunk_generator(f):
yield chunk
context.set_trailing_metadata([('result', 'ok')])
| 16,206
|
def migrate(source_config,target_config,migration_config):
"""
Migrate data from the source database to the target database. The target
database and schema must already exist.
Args:
source_config (dict): Settings for source database.
target_config (dict): Settings for target database.
migration_config (dict): Settings for the migration.
"""
msg = 'Migrating data to target database...\n'
print(msg)
# set up multiprocessing
if migration_config['multiprocess']:
# set number of processes
if migration_config['processes']:
pool = multiprocessing.Pool(int(migration_config['processes']))
else:
pool = multiprocessing.Pool()
# starmap takes an iterable list
arg_iterable = [[schema,source_config,target_config,migration_config] for schema in source_config['schema_list']]
pool.starmap(_migrate_data,arg_iterable)
else:
for schema in source_config['schema_list']:
_migrate_data(schema,source_config,target_config,migration_config)
msg = 'Migration complete!\n'
logging.info(msg)
print(msg)
| 16,207
|
def KL_distance(image1, image2):
"""
Given two images, calculate the KL divergence between the two
2d array is not supported, so we have to flatten the array and compare each pixel in the image1 to the corresponding pixel in the image2.
"""
return scipy.stats.entropy(image1.ravel(), image2.ravel())
| 16,208
|
def stop_all_bots():
"""
This function address RestAPI call to stop polling for all bots which
have ever started polling.
:return:
"""
bots_stopped = procedures.stop_all() # Stop all bots.
botapi_logger.info('Successfully stopped {count} bots for polling in '
'start_all api call.'.format(count=len(bots_stopped)))
if bots_stopped > 0:
return jsonify({
"result": "success",
"message": "Successfully stopped {count} previously running "
"bots.".format(count=len(bots_stopped)),
"ids": [bot_id for bot_id in bots_stopped]
}), 200
else:
return internal_server_error(
message="No to stop previously running bots.")
| 16,209
|
def load_word_embedding_dict(embedding, embedding_path, normalize_digits=True):
"""
load word embeddings from file
:param embedding:
:param embedding_path:
:return: embedding dict, embedding dimention, caseless
"""
print "loading embedding: %s from %s" % (embedding, embedding_path)
if embedding == 'word2vec':
# loading word2vec
word2vec = Word2Vec.load_word2vec_format(embedding_path, binary=True)
embedd_dim = word2vec.vector_size
return word2vec, embedd_dim, False
elif embedding == 'glove':
# loading GloVe
embedd_dim = -1
embedd_dict = dict()
with gzip.open(embedding_path, 'r') as file:
for line in file:
line = line.strip()
line = line.decode('utf-8')
if len(line) == 0:
continue
tokens = line.split()
if len(tokens) <101:
continue
if embedd_dim < 0:
embedd_dim = len(tokens) - 1
else:
assert (embedd_dim + 1 == len(tokens))
embedd = np.empty([1, embedd_dim], dtype=theano.config.floatX)
embedd[:] = tokens[1:]
word = data_utils.DIGIT_RE.sub(b"0", tokens[0]) if normalize_digits else tokens[0]
embedd_dict[word] = embedd
return embedd_dict, embedd_dim, True
elif embedding == 'senna':
# loading Senna
embedd_dim = -1
embedd_dict = dict()
with gzip.open(embedding_path, 'r') as file:
for line in file:
line = line.strip()
line = line.decode('utf-8')
if len(line) == 0:
continue
tokens = line.split()
if embedd_dim < 0:
embedd_dim = len(tokens) - 1
else:
assert (embedd_dim + 1 == len(tokens))
embedd = np.empty([1, embedd_dim], dtype=theano.config.floatX)
embedd[:] = tokens[1:]
word = data_utils.DIGIT_RE.sub(b"0", tokens[0]) if normalize_digits else tokens[0]
embedd_dict[word] = embedd
return embedd_dict, embedd_dim, True
elif embedding == 'sskip':
embedd_dim = -1
embedd_dict = dict()
with gzip.open(embedding_path, 'r') as file:
# skip the first line
file.readline()
for line in file:
line = line.strip()
line = line.decode('utf-8')
if len(line) == 0:
continue
tokens = line.split()
if embedd_dim < 0:
embedd_dim = len(tokens) - 1
embedd = np.empty([1, embedd_dim], dtype=theano.config.floatX)
start = len(tokens) - embedd_dim
word = ' '.join(tokens[0:start])
embedd[:] = tokens[start:]
word = data_utils.DIGIT_RE.sub(b"0", word) if normalize_digits else word
embedd_dict[word] = embedd
return embedd_dict, embedd_dim, True
elif embedding == 'polyglot':
words, embeddings = pickle.load(open(embedding_path, 'rb'))
_, embedd_dim = embeddings.shape
embedd_dict = dict()
for i, word in enumerate(words):
embedd = np.empty([1, embedd_dim], dtype=theano.config.floatX)
embedd[:] = embeddings[i, :]
word = data_utils.DIGIT_RE.sub(b"0", word) if normalize_digits else word
embedd_dict[word] = embedd
return embedd_dict, embedd_dim, False
else:
raise ValueError("embedding should choose from [word2vec, senna]")
| 16,210
|
def fibonacci(n: int) -> int:
"""Returns nth fib number, fib_0 = 0, fib_1 = 1, ..."""
print(sys.platform)
return nfibonacci(n + 1)[-1]
| 16,211
|
def random_exponential(shape=(40,60), a0=100, dtype=float) :
"""Returns numpy array of requested shape and type filled with exponential distribution for width a0.
"""
a = a0*np.random.standard_exponential(size=shape)
return a.astype(dtype)
| 16,212
|
def get_mpi_components_from_files(fileList, threads=False):
"""
Given a list of files to read input data from, gets a percentage of time
spent in MPI, and a breakdown of that time in MPI
"""
percentDict = dict()
timeDict = dict()
for filename in fileList:
filename = filename.strip()
try:
# Open the file for reading
with open(filename, "r") as infile:
# Read the json
jsonDict = json.load(infile)
runtime = get_runtime(jsonDict)
numprocs = get_num_threads(jsonDict) if threads else get_num_processes(jsonDict)
# Read the overview data and get the percentage of overall time spent in mpi
subDict = get_overview_data(jsonDict)
mpiPercent = get_dict_field_val(subDict, ["mpi", "percent"]) #mpiTime = (percent / 100.) * runtime
# Now get the sub-percentage of the mpi time
mpiEntry = get_dict_field_val(jsonDict, ["data", "mpi"])
# Get all of the percentages (as a percentage of total time)
mpiSubPercent = [float(get_dict_field_val(mpiEntry, [field])) * mpiPercent / 100. for field in mpiSubPercentages]
mpiSubTime = [runtime * subpercent / 100. for subpercent in mpiSubPercent]
percentDict[numprocs] = mpiSubPercent
timeDict[numprocs] = mpiSubTime
except IOError:
print("File " + filename + " does not exist. Skipping.")
pass
return percentDict, timeDict
| 16,213
|
def get_character_url(name):
"""Gets a character's tibia.com URL"""
return url_character + urllib.parse.quote(name.encode('iso-8859-1'))
| 16,214
|
def setup_temp_data():
"""
resource for memory mapped files
:return:
"""
temp_folder = os.getcwd()+'/temp'
if not os.path.isdir(temp_folder):
os.mkdir(temp_folder)
print("\nsetting up temp folder")
# setup files, provide memmap type
img = np.random.random((2048, 2048))
np.save(temp_folder+'/img.npy', img)
print("setting up temp file")
yield np.memmap(temp_folder+'/img.npy', shape=(2048, 2048), dtype=np.uint8)
# breakdown files
if os.path.isfile(temp_folder+'/img.npy'):
os.remove(temp_folder+'/img.npy')
print("\nbreaking down temp file")
if os.path.isdir(temp_folder):
os.rmdir(temp_folder)
print("breaking down temp folder")
| 16,215
|
def parse_input(lines):
"""Parse the input document, which contains validity rules for the various
ticket fields, a representation of my ticket, and representations of a
number of other observed tickets.
Return a tuple of (rules, ticket, nearby_tickets)
"""
section = parse_sections(lines)
rules = parse_rules(section[0])
my_ticket = parse_ticket(section[1][1])
tickets = [parse_ticket(line) for line in section[2][1:]]
return (rules, my_ticket, tickets)
| 16,216
|
def _call(retaddr=None):
"""Push a new stack frame with retaddr in cell 0 of it"""
##debug("call, will return to addr:%s" % str(retaddr))
stack.append([retaddr])
| 16,217
|
def main():
"""Main function"""
y = [1, 0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 0]
pred = [1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1]
print_confusion_matrix(y, pred)
mx_ = get_confusion_matrix(y, pred)
print("\n")
print("Mcc: ", get_MCC(mx_))
print("Acc.: ", get_accuracy(mx_))
print("F1 score: ", get_f1score(mx_))
print("Precision: ", get_precision(mx_))
print("Recall: ", get_recall(mx_))
print("Sen.: ", get_sensitivity(mx_))
print("Spec.:", get_specificity(mx_))
print("\n")
[tp, fp], [fn, tn] = mx_
(
sensitivity_point_estimate,
specificity_point_estimate,
sensitivity_confidence_interval,
specificity_confidence_interval,
) = sensitivity_and_specificity_with_confidence_intervals(
tp, fp, fn, tn, alpha=0.05
)
print(
"Sensitivity: %f, Specificity: %f"
% (sensitivity_point_estimate, specificity_point_estimate)
)
print("alpha = %f CI for sensitivity:" % 0.05, sensitivity_confidence_interval)
print("alpha = %f CI for specificity:" % 0.05, specificity_confidence_interval)
print("\n")
# DeLong confidence interval
auc, auc_cov, ci = get_delong_ci(y, pred)
for a in [0.0, 0.5, 0.9, 0.95, 0.99, 0.999999]:
(
sensitivity_point_estimate,
specificity_point_estimate,
sensitivity_confidence_interval,
specificity_confidence_interval,
) = sensitivity_and_specificity_with_confidence_intervals(
tp, fp, fn, tn, alpha=a
)
print(
"Sensitivity: %f, Specificity: %f"
% (sensitivity_point_estimate, specificity_point_estimate)
)
print("alpha = %f CI for sensitivity:" % a, sensitivity_confidence_interval)
print("alpha = %f CI for specificity:" % a, specificity_confidence_interval)
print("")
manual_feature_names = (
"(prot9 - exp(prot10))**3",
"(-prot10**3 + Abs(prot4))**2",
"(-prot10 + Abs(prot3))**3",
"(prot1 + Edad_scaled**3)**3",
"(prot9 + Abs(prot6))**3",
"prot9*exp(-prot6)",
"(prot9 + Abs(prot3))**2",
"(-prot2**3 + prot10**2)**3",
)
print("Number of proteins in the feature vector: ")
print(get_number_of_proteins(manual_feature_names))
## Get of Maximal Information Coefficient (MIC), Pearson, Spearman
# and Cosine similarity
# generate random numbers between 0-1 *10
x = list((rand(10) * 10))
y = [2.0 + 0.7 * num**2 + 0.5 * num for num in x]
print("---------------------------")
out = get_correlation_metrics(x, y)
print(out)
| 16,218
|
def _cnv_prioritize(data):
"""Perform confidence interval based prioritization for CNVs.
"""
supported = {"cnvkit": {"inputs": ["call_file", "segmetrics"], "fn": _cnvkit_prioritize}}
pcall = None
priority_files = None
for call in data.get("sv", []):
if call["variantcaller"] in supported:
priority_files = [call.get(x) for x in supported[call["variantcaller"]]["inputs"]]
priority_files = [x for x in priority_files if x is not None and utils.file_exists(x)]
if len(priority_files) == len(supported[call["variantcaller"]]["inputs"]):
pcall = call
break
prioritize_by = tz.get_in(["config", "algorithm", "svprioritize"], data)
if pcall and prioritize_by:
out_file = "%s-prioritize.tsv" % utils.splitext_plus(priority_files[0])[0]
gene_list = _find_gene_list_from_bed(prioritize_by, out_file, data)
if gene_list:
with open(gene_list) as in_handle:
genes = [x.strip() for x in in_handle]
args = [dd.get_sample_name(data), genes] + priority_files
df = supported[pcall["variantcaller"]]["fn"](*args)
with file_transaction(data, out_file) as tx_out_file:
df.to_csv(tx_out_file, sep="\t", index=False)
pcall["priority"] = out_file
return data
| 16,219
|
def add_meta_files(tarfile, meta_files_dir):
"""Adds the meta files to the specified tarfile.
Args:
tarfile: The tarfile object where the files needs to be added.
meta_files_dir: The directory containing the meta files.
"""
tarfile.add(os.path.join(meta_files_dir, 'TIMESTAMP'), arcname='TIMESTAMP')
tarfile.add(os.path.join(meta_files_dir, 'SCHEMA_SEQUENCE'), arcname='SCHEMA_SEQUENCE')
| 16,220
|
def maximum(
left_node: NodeInput,
right_node: NodeInput,
auto_broadcast: str = "NUMPY",
name: Optional[str] = None,
) -> Node:
"""Return node which applies the maximum operation to input nodes elementwise."""
return _get_node_factory_opset1().create(
"Maximum", [left_node, right_node], {"auto_broadcast": auto_broadcast.upper()}
)
| 16,221
|
def write_csv(file_name, upload_name, is_local, header, body):
""" Write a CSV to the relevant location.
Args:
file_name: pathless file name
upload_name: file name to be used as S3 key
is_local: True if in local development, False otherwise
header: value to write as the first line of the file
body: Iterable to write as the body of the file
"""
local_filename = CONFIG_BROKER['broker_files'] + file_name
if is_local:
logger.debug({
'message': "Writing file locally...",
'message_type': 'ValidatorDebug',
'file_name': local_filename
})
with open(local_filename, 'w', newline='') as csv_file:
# create local file and write headers
out_csv = csv.writer(csv_file, delimiter=',', quoting=csv.QUOTE_MINIMAL, lineterminator='\n')
if header:
out_csv.writerow(header)
for line in body:
out_csv.writerow(line)
csv_file.close()
if not is_local:
upload_file_to_s3(upload_name, local_filename)
os.remove(local_filename)
| 16,222
|
def create_template_error():
"""
Создает заготовку для генерации ошибок
"""
return {'response': False}
| 16,223
|
def set_ball(x_dir=(random.randrange(0,2) == 0)):
""" Resets ball to centre of screen """
global ball_vel, ball_pos
ball_vel = [random.randrange(2,4), random.randrange(1,3)]
if random.randrange(0,2) == 0:
ball_vel[y] *= -1
if x_dir:
ball_vel[x] *= -1
ball_pos = [win_width//2, win_height//2]
| 16,224
|
def download_and_extract_index(storage_bucket: Any, extract_destination_path: str) -> Tuple[str, Any, int]:
"""Downloads and extracts index zip from cloud storage.
Args:
storage_bucket (google.cloud.storage.bucket.Bucket): google storage bucket where index.zip is stored.
extract_destination_path (str): the full path of extract folder.
Returns:
str: extracted index folder full path.
Blob: google cloud storage object that represents index.zip blob.
str: downloaded index generation.
"""
if storage_bucket.name == GCPConfig.PRODUCTION_PRIVATE_BUCKET:
index_storage_path = os.path.join(GCPConfig.PRIVATE_BASE_PATH, f"{GCPConfig.INDEX_NAME}.zip")
else:
index_storage_path = os.path.join(GCPConfig.STORAGE_BASE_PATH, f"{GCPConfig.INDEX_NAME}.zip")
download_index_path = os.path.join(extract_destination_path, f"{GCPConfig.INDEX_NAME}.zip")
index_blob = storage_bucket.blob(index_storage_path)
index_folder_path = os.path.join(extract_destination_path, GCPConfig.INDEX_NAME)
index_generation = 0 # Setting to 0 makes the operation succeed only if there are no live versions of the blob
if not os.path.exists(extract_destination_path):
os.mkdir(extract_destination_path)
if not index_blob.exists():
os.mkdir(index_folder_path)
logging.error(f"{storage_bucket.name} index blob does not exists")
return index_folder_path, index_blob, index_generation
index_blob.reload()
index_generation = index_blob.generation
index_blob.download_to_filename(download_index_path, if_generation_match=index_generation)
if os.path.exists(download_index_path):
with ZipFile(download_index_path, 'r') as index_zip:
index_zip.extractall(extract_destination_path)
if not os.path.exists(index_folder_path):
logging.critical(f"Failed creating {GCPConfig.INDEX_NAME} folder with extracted data.")
sys.exit(1)
os.remove(download_index_path)
logging.success(f"Finished downloading and extracting {GCPConfig.INDEX_NAME} file to "
f"{extract_destination_path}")
return index_folder_path, index_blob, index_generation
else:
logging.critical(f"Failed to download {GCPConfig.INDEX_NAME}.zip file from cloud storage.")
sys.exit(1)
| 16,225
|
def MenuPersonnalise(contenu): #py:MakeCustomMenu
"""
À l'intention des éducateurs. Permet de créer des menus de monde
personalisés. Voir la documentation pour plus de détails."""
RUR._MakeCustomMenu_(contenu)
| 16,226
|
def e_qest(model, m):
"""
Calculation of photocounting statistics estimation from
photon-number statistics estimation
Parameters
----------
model : InvPBaseModel
m : int
Photocount number.
"""
return quicksum(model.T[m, n] * model.PEST[n]
for n in model.PSET)
| 16,227
|
def load_input(fname):
"""Read in the data, return as a list."""
data = [""]
with open(fname, "r") as f:
for line in f.readlines():
if line.strip("\n"):
data[-1] += line.strip("\n") + " "
else:
data[-1] = data[-1].strip(" ")
data.append("")
data [-1] = data[-1].strip(" ")
return data
| 16,228
|
def parse_repo_layout_from_json(file_):
"""Parse the repo layout from a JSON file.
Args:
file_ (File): The source file.
Returns:
RepoLayout
Raises:
InvalidConfigFileError: The configuration file is invalid.
"""
def encode_dict(data):
new_data = {}
for key, value in data.items():
# Waf Node API requires String objects
if not isinstance(key, str):
new_data[key.encode('utf-8')] = [i.encode('utf-8')
for i in value]
else:
new_data[key] = value
return new_data
try:
loaded_dict = json.load(file_, object_hook=encode_dict)
except ValueError as e:
raise blderror.InvalidConfigFileError('Invalid .bdelayoutconfig: %s' %
e.message)
repo_layout = repolayout.RepoLayout()
for key in loaded_dict:
if key in repo_layout.__dict__:
setattr(repo_layout, key, loaded_dict[key])
else:
logutil.warn('Invalid field in .bdelayoutconfig: %s.' %
key)
return repo_layout
| 16,229
|
def SizeArray(input_matrix):
"""
Return the size of an array
"""
nrows=input_matrix.shape[0]
ncolumns=input_matrix.shape[1]
return nrows,ncolumns
| 16,230
|
def show_fun_elem_state_machine(fun_elem_str, xml_state_list, xml_transition_list,
xml_fun_elem_list):
"""Creates lists with desired objects for <functional_element> state, send them to
plantuml_adapter.py then returns url_diagram"""
new_fun_elem_list = set()
main_fun_elem = check_get_object(fun_elem_str, **{'xml_fun_elem_list': xml_fun_elem_list})
if not main_fun_elem:
return None
if not main_fun_elem.allocated_state_list:
print(f"No state allocated to {main_fun_elem.name} (no display)")
return None
new_fun_elem_list.add(main_fun_elem)
new_state_list = {s for s in xml_state_list if s.id in main_fun_elem.allocated_state_list}
new_transition_list = get_transitions(new_state_list, xml_transition_list)
_, url_diagram = plantuml_adapter.get_state_machine_diagram(new_state_list,
new_transition_list,
xml_fun_elem_list)
print("State Machine Diagram for " + fun_elem_str + " generated")
return url_diagram
| 16,231
|
def get_bank_account_rows(*args, **kwargs):
"""
获取列表
:param args:
:param kwargs:
:return:
"""
return db_instance.get_rows(BankAccount, *args, **kwargs)
| 16,232
|
def load_class(class_name, module_name):
"""Dynamically load a class from strings or raise a helpful error."""
# TODO remove this nasty python 2 hack
try:
ModuleNotFoundError
except NameError:
ModuleNotFoundError = ImportError
try:
loaded_module = importlib.import_module(module_name)
class_ = getattr(loaded_module, class_name)
except ModuleNotFoundError as e:
raise PluginModuleNotFoundError(module_name=module_name)
except AttributeError as e:
raise PluginClassNotFoundError(
module_name=module_name,
class_name=class_name
)
return class_
| 16,233
|
def pybo_mod(tokens, tag_codes=[]):
"""extract text/pos tuples from Token objects"""
txt_tags = []
for token in tokens:
tags = []
tags.append(token.text)
# Select and order the tags
for tag_code in tag_codes:
tags.append(get_tag(token, tag_code))
txt_tags.append(tags)
return txt_tags
| 16,234
|
def test_string_value():
"""
Test string values.
"""
lib.backup_and_restore(
lambda context: put_values(lib.SET, "key", STRING_VALUES),
None,
lambda context: check_values(lib.SET, "key", STRING_VALUES)
)
| 16,235
|
def acf(
da: xr.DataArray, *, lag: int = 1, group: str | Grouper = "time.season"
) -> xr.DataArray:
"""Autocorrelation function.
Autocorrelation with a lag over a time resolution and averaged over all years.
Parameters
----------
da : xr.DataArray
Variable on which to calculate the diagnostic.
lag: int
Lag.
group : {'time.season', 'time.month'}
Grouping of the output.
E.g. If 'time.month', the autocorrelation is calculated over each month separately for all years.
Then, the autocorrelation for all Jan/Feb/... is averaged over all years, giving 12 outputs for each grid point.
Returns
-------
xr.DataArray
lag-{lag} autocorrelation of the variable over a {group.prop} and averaged over all years.
See Also
--------
statsmodels.tsa.stattools.acf
References
----------
Alavoine M., and Grenier P. (under review) The distinct problems of physical inconsistency and of multivariate bias potentially involved in the statistical adjustment of climate simulations. International Journal of Climatology, submitted on September 19th 2021. (Preprint: https://doi.org/10.31223/X5C34C)
Examples
--------
>>> from xclim.testing import open_dataset
>>> pr = open_dataset(path_to_pr_file).pr
>>> acf(da=pr, lag=3, group="time.season")
"""
attrs = da.attrs
def acf_last(x, nlags):
# noqa: D403
"""statsmodels acf calculates acf for lag 0 to nlags, this return only the last one."""
# As we resample + group, timeseries are quite short and fft=False seems more performant
out_last = stattools.acf(x, nlags=nlags, fft=False)
return out_last[-1]
@map_groups(out=[Grouper.PROP], main_only=True)
def _acf(ds, *, dim, lag, freq):
out = xr.apply_ufunc(
acf_last,
ds.dat.resample({dim: freq}),
input_core_dims=[[dim]],
vectorize=True,
kwargs={"nlags": lag},
)
out = out.mean("__resample_dim__")
return out.rename("out").to_dataset()
out = _acf(da.rename("dat").to_dataset(), group=group, lag=lag, freq=group.freq).out
out.attrs.update(attrs)
out.attrs["long_name"] = f"lag-{lag} autocorrelation"
out.attrs["units"] = ""
out.name = "acf"
return out
| 16,236
|
def username(UID: str) -> str:
"""
Get a users username from their user ID.
>>> username("zx7gd1yx")
'1'
>>> username("7j477kvj")
'AnInternetTroll'
>>> username("Sesame Street")
Traceback (most recent call last):
...
utils.UserError: User with uid 'Sesame Street' not found.
"""
R: dict = requests.get(f"{API}/users/{UID}").json()
try:
return R["data"]["names"]["international"]
except KeyError:
raise UserError(f"User with uid '{UID}' not found.")
| 16,237
|
def handle_exceptions(func):
"""Exception handler helper function."""
import logging
logging.basicConfig(level = logging.INFO)
def wrapper_func(*args, **kwargs):
try:
return func(*args, **kwargs)
except Exception as e:
logging.error(f'{func.__name__} raised an error: {e}')#, exc_info = True)
return None
return wrapper_func
| 16,238
|
def wflow_eqv(eqv_folder="eqv", jobname="NbTiVZr", latname="bcc", queue_type="pbs",
emtopath="./", sws0=3.0, sws_percent=10.0, sws_step=11,
concs=[[0.25, 0.25, 0.25, 0.25]], species=[['Nb','Ti','V','Zr']]):
"""
Workflow for equialium volume
Parameter
eqv_folder: str
The folder name for the input and result, "eqv"
jobname: str
The jobname, "NbTiVZr"
latname: str
The lattice name, "bcc"
queue_type = "pbs"
emtopath = "./"
sws0 = 3.0
sws_percent = 10.0
sws_step = 11
concs = [[0.25, 0.25, 0.25, 0.25]]
species = [['Nb','Ti','V','Zr']]
Return
None
"""
creat_folders(eqv_folder)
os.chdir(eqv_folder)
input_gen_eqv(jobname=jobname, emtopath=emtopath, latname=latname, sws0=sws0, sws_percent=sws_percent,
sws_step=sws_step, concs=concs, species = species)
write_eqv_post(jobname=jobname, folder=emtopath, lat=latname, concs=concs, species=species, DLM=False)
pbs_scripts = find_pbs_script(folder=emtopath, jobname=jobname, latname=latname, ext=queue_type)
cmd_lines, head_lines = merge_batchfile(pbs_scripts, latpath=emtopath, jobname=jobname, queue_type=queue_type)
script_lines = head_lines
script_lines += "\n#Change to " + eqv_folder + " folder.\n"
script_lines += "cd " + eqv_folder + "\n"
script_lines += cmd_lines
script_lines += "\npython " + jobname + "_eqv_post.py\n"
script_lines += "cd .."
os.chdir("..")
with open(jobname + "_" + eqv_folder + "." + queue_type, "w+") as fid:
fid.write(script_lines)
| 16,239
|
def apply_hamming_window(image):
"""Cross correlate after applying hamming window to compensate side effects"""
window_h = np.hamming(image.shape[0])
window_v = np.hamming(image.shape[1])
image = np.multiply(image.T, window_h).T
return np.multiply(image, window_v)
| 16,240
|
def calc_ewald_sum(dielectric_tensor: np.ndarray,
real_lattice_set: np.ndarray,
reciprocal_lattice_set: np.ndarray,
mod_ewald_param: float,
root_det_epsilon: float,
volume: float,
) -> Tuple[float, float]:
"""Return real and reciprocal Ewald summations at given parameters"""
epsilon_inv = np.linalg.inv(dielectric_tensor)
real_sum = 0
# Skip the potential caused by the defect itself
for v in real_lattice_set:
root_r_inv_epsilon_r = np.sqrt(reduce(dot, [v.T, epsilon_inv, v]))
real_sum += \
erfc(mod_ewald_param * root_r_inv_epsilon_r) / root_r_inv_epsilon_r
real_part = real_sum / (4 * pi * root_det_epsilon)
# Ewald reciprocal part
# sum exp(-g * epsilon * g / (4 * ewald ** 2)) / g * epsilon * g [1/A]
reciprocal_sum = 0
for g in reciprocal_lattice_set:
g_epsilon_g = reduce(dot, [g.T, dielectric_tensor, g])
reciprocal_sum += \
(exp(- g_epsilon_g / 4.0 / mod_ewald_param ** 2)
/ g_epsilon_g * cos(dot(g, np.zeros(3)))) # [A^2]
reciprocal_part = reciprocal_sum / volume
return real_part, reciprocal_part
| 16,241
|
def proximal_descent(
x0, grad, prox, step_size, momentum='fista', restarting=None,
max_iter=100, early_stopping=True, eps=np.finfo(np.float64).eps,
obj=None, benchmark=False):
""" Proximal descent algorithm.
Parameters
----------
x0 : array, shape (n_length, ), initial variables
grad : func, gradient function
prox : func, proximal operator function
step_size : float, step-size for the gradient descent
momentum : str or None, (default='fista'), momentum to choose, possible
choice are ('fista', 'greedy', None)
restarting : str or None, (default=None), restarting to chosse, possible
choice are ('obj', 'descent', None), if restarting == 'obj', obj
function should be given
max_iter : int, (default=100), maximum number of iterations to perform the
analysis
early_stopping : bool, (default=True), whether to early stop the analysis
eps : float, (default=np.finfo(np.float64).eps), stoppping parameter w.r.t
evolution of the cost-function
obj : func, (default=None), cost-function function
benchmark : bool, (default=False), whether or not to save the cost-function
and the duration of computatio nof each iteration
Return
------
x : array, shape (n_atoms, n_voxels), the estimated variable
pobj : array or None, shape (n_iter,) or (3 * n_iter,), the saved
cost-function
times : array or None, shape (n_iter,) or(3 * n_iter,), the saved duration
per steps
"""
if benchmark and obj is None:
raise ValueError("If 'benchmark' is set True 'obj' should be given.")
if restarting == 'obj' and obj is None:
raise ValueError("If 'restarting' is set 'obj' 'obj' should be given.")
x_old, x, y, y_old = np.copy(x0), np.copy(x0), np.copy(x0), np.copy(x0)
t = t_old = 1
if benchmark:
pobj, times = [obj(y)], [0.0]
for ii in range(max_iter):
if benchmark:
t0 = time.process_time()
y -= step_size * grad(y)
x = prox(y, step_size)
if momentum == 'fista':
t = 0.5 * (1.0 + np.sqrt(1.0 + 4.0 * t_old**2))
y = x + (t_old - 1.0) / t * (x - x_old)
elif momentum == 'greedy':
y = x + (x - x_old)
elif momentum is None:
y = x
restarted = False
if restarting == 'obj' and (ii > 0) and (pobj[-1] > pobj[-2]):
if momentum == 'fista':
x = x_old
t = 1.0
elif momentum == 'greedy':
y = x
restarted = True
if restarting == 'descent':
angle = (y_old - x).ravel().dot((x - x_old).ravel())
if angle >= 0.0:
if momentum == 'fista':
x = x_old
t = 1.0
elif momentum == 'greedy':
y = x
restarted = True
if benchmark:
t1 = time.process_time()
pobj.append(obj(y))
converged = np.linalg.norm(x - x_old) < eps * np.linalg.norm(x_old)
if early_stopping and converged and not restarted:
break
t_old = t
x_old = x
y_old = y
if benchmark:
times.append(t1 - t0)
if benchmark:
return x, np.array(pobj), np.array(times)
else:
return x
| 16,242
|
def reducer():
"""
Pairs reducer function
Reads pairs of words separated by a space and a count and calculates P
"""
# Empty dict for our output
n = dict()
sigma_n = dict()
# Precompiled regex to match our input
parse = re.compile('(\w+)\s+(\w+):\s+(\d+)')
# Read STDIN and get counts
line = stdin.readline()
while line:
# Try and match our line (skip if failed)
m = parse.match(line)
if m:
word_1 = m.group(1)
word_2 = m.group(2)
count = int(m.group(3))
# Add our count to d
if word_1 not in n.keys():
n[word_1] = dict()
sigma_n[word_1] = 0
if word_2 not in n[word_1].keys():
n[word_1][word_2] = count
else:
n[word_1][word_2] += count
sigma_n[word_1] += count
line = stdin.readline()
# Calculate P
for word_1 in sorted(n.keys()):
for word_2, n_word in sorted(n[word_1].items(), key=lambda x: (x[1], x[0]), reverse=True):
p = n[word_1][word_2] / float(sigma_n[word_1])
stdout.write("%s %s: %f\n" % (word_1, word_2, p))
| 16,243
|
def sign_tx(path,
multisig_address,
redeemscript,
utxo_file,
output_file,
testnet=False):
"""
Sign a spend of a bitcoin 2-of-3 P2SH-multisig address
using a Trezor One Hardware Wallet
Args:
path: BIP32 path of key with which to sign
multisig_address: Address that is being spent
redeemscript: redeem script corresponding to multisig_address
utxo_file: JSON file of UTXOs for multisig_address
(see get_utxo_set.py)
output_file: JSON file of destination addresses and amounts
(see generate_outputs.py)
testnet: Is this a testnet or mainnet address?
Returns:
Dictionary with two keys:
pubkey: public key corresponding to the private key used for signing
signatures: a list of signatures, one per utxo
Raises:
ValueError: If multisig_address is not correct for the given redeemscript
Example:
TODO
"""
with open(utxo_file, 'r') as f:
utxos = json.load(f)
with open(output_file, 'r') as f:
outputs = json.load(f)
# Verify that Pubkeys and Address match
check_address = generate_multisig_address(redeemscript, testnet)
parsed_redeem_script = btc_utils.parse_redeem_script(redeemscript)
if multisig_address != check_address:
raise ValueError("Incorrect Redeem Script")
if testnet:
coin = 'Testnet'
else:
coin = 'Bitcoin'
input_script_type = proto.InputScriptType.SPENDMULTISIG
output_script_type = proto.OutputScriptType.PAYTOADDRESS
tx_api = trezorlib.coins.tx_api[coin]
client = trezor_utils.get_trezor_client()
#client.set_tx_api(tx_api)
# Get signing node:
expanded_path = trezorlib.tools.parse_path(path)
signer = trezorbtc.get_public_node(client, expanded_path, show_display=True).node
# blank HDNodes with public_keys
nodes = [proto.HDNodePathType(node=proto.HDNodeType(public_key=bytes.fromhex(h),
depth=0,
fingerprint=0,
child_num=0,
chain_code=b'0'*32),
address_n=[]
) for h in parsed_redeem_script['pubkeys']]
trezor_inputs = []
for utxo in utxos:
multisig = proto.MultisigRedeemScriptType(
pubkeys=nodes,
m=parsed_redeem_script['m']
)
_input = proto.TxInputType(
prev_hash=bytes.fromhex(utxo['txid']),
prev_index=utxo['n'],
amount=utxo['amount'],
address_n=trezorlib.tools.parse_path(path),
script_type=input_script_type,
multisig=multisig
)
trezor_inputs.append(_input)
txes = {}
for tx in trezor_inputs:
tmptx = tx_api[tx.prev_hash]
txes[tx.prev_hash] = tmptx
# make this multi-output, probably from file
trezor_outputs = []
for output in outputs:
trezor_outputs.append(
proto.TxOutputType(
address=output['address'],
amount=output['amount'],
script_type=output_script_type,
)
)
output_signatures, serialized_tx = trezorbtc.sign_tx(client, coin, trezor_inputs, trezor_outputs, prev_txes=txes)
signature_blob = {"pubkey": signer.public_key.hex(),
"signatures": [s.hex() for s in output_signatures]
}
client.close()
return signature_blob
| 16,244
|
def wrap_atari_dqn(env):
"""
wrap the environment in atari wrappers for DQN
:param env: (Gym Environment) the environment
:return: (Gym Environment) the wrapped environment
"""
from stable_baselines_custom.common.atari_wrappers import wrap_deepmind
return wrap_deepmind(env, frame_stack=True, scale=False)
| 16,245
|
def get_Theta_CR_i_d_t(pv_setup, Theta_A_d_t, I_s_i_d_t):
"""加重平均太陽電池モジュール温度 (6)
Args:
pv_setup(str): 太陽電池アレイ設置方式
Theta_A_d_t(ndarray): 日付dの時刻tにおける外気温度(℃)
I_s_i_d_t(ndarray): 日付dの時刻tにおける太陽電池アレイiの設置面の単位面積当たりの日射量(W/m2)
Returns:
ndarray: 日付dの時刻tにおける太陽電池アレイiの加重平均太陽電池モジュール温度
"""
# 係数 f_A, f_B
if pv_setup == '架台設置型':
f_A_i = get_table_6()[0][0]
f_B_i = get_table_6()[0][1]
elif pv_setup == '屋根置き型':
f_A_i = get_table_6()[1][0]
f_B_i = get_table_6()[1][1]
elif pv_setup == 'その他':
f_A_i = get_table_6()[2][0]
f_B_i = get_table_6()[2][1]
else:
raise NotImplementedError()
# 太陽電池アレイの接地面における風速
V_i_d_t = get_V_i_d_t()
return Theta_A_d_t + (f_A_i/(f_B_i * V_i_d_t**0.8 + 1)+2) * I_s_i_d_t * 10**(-3) - 2
| 16,246
|
def or_equality(input_1: Variable, input_2: Variable, output: Variable) -> Set[Clause]:
"""
Encode an OR-Gate into a CNF.
:param input_1: variable representing the first input of the OR-Gate
:param input_2: variable representing the second input of the OR-Gate
:param output: variable representing the output of the OR-Gate
:return: A set of clauses encoding the OR-Gate
"""
return {
frozenset([-input_1, output]),
frozenset([-input_2, output]),
frozenset([input_1, input_2, -output])
}
| 16,247
|
def keyPosition_to_keyIndex(key_position: int, key: int) -> int:
"""
キーポジションからどのキーのノーツなのかを変換します
引数
----
key_position : int
-> キーポジション
key : int
-> 全体のキー数、4Kなら4と入力
戻り値
------
int
-> キーインデックス、指定したキーの0~キー-1の間の数
"""
return math.floor(key_position * key / 512)
| 16,248
|
def main(args):
""" Convert a TSV file with the following columns:
* icd11 - the URI of the ICD 11 resource
* icdrubric - the name associated with the rubric
* expression - a compositional grammar expression that fully or partially defines the ICD 11 resource
* maptype - "A" means the definition belongs to WHO, "E" means it belongs to IHTSDO (and should be added to SNOMED CT)
"""
optparser = argparse.ArgumentParser(description="Convert cardio raw expressions into OWL")
optparser.add_argument('infile', help="Input tab separated value file file")
optparser.add_argument('-f', '--outputformat', help="File format", default="n3")
optparser.add_argument('-p', '--port', help="SCT Converter gateway port", type=int)
optparser.add_argument('-o', '--outfile', help="Output file name", required=True)
optparser.add_argument('-m', '--mapfile', help="Map file name")
opts = optparser.parse_args(args)
gw = SCTConverterGateway(opts.port) if opts.port else SCTConverterGateway()
cg_graph = init_graph()
map_graph = Graph().parse(opts.mapfile, format='n3') if opts.mapfile else None
with open(opts.infile) as tsvfile:
reader = csv.DictReader(tsvfile, delimiter='\t')
for row in reader:
who_entity = row['icd11'].split(str(WHO))[1]
subj = URIRef(str(ICDCG if row['maptype'] == post_coordinated else SCTCG) + who_entity)
primitive = bool(single_concept_re.match(row['expression']))
ttlresult = gw.parse(subj, primitive, row['expression'])
if ttlresult:
cg_graph.parse(StringIO(owlbasere.sub(r'\1>', ttlresult)), format='n3')
cg_graph.add( (subj, RDFS.label, Literal('ICDCG ' + row['icdrubric'])))
if map_graph:
owlc = URIRef(str(WHO) + who_entity)
map_graph.add( (owlc, OWL.equivalentClass, subj))
else:
print("Conversion failure: " + str(row))
rem_sct_labels(cg_graph)
cg_graph.serialize(opts.outfile, format="n3")
print("OWL saved to %s" % opts.outfile)
if map_graph:
map_graph.serialize(opts.mapfile + 'upd.ttl', format="n3")
print("Map saved to %s" % opts.mapfile + 'upd.ttl')
| 16,249
|
def rebuild_index():
"""
Rebuild the K-nearest neighbors index based on 50000 of the most active
users (ignoring the top 500 most active).
"""
pipe = get_pipeline()
usernames = pipe.zrevrange(format_key("user"), 500, 50500).execute()[0]
for user in usernames:
get_vector(user, pipe=pipe)
results = pipe.execute()
points = np.zeros([len(usernames), nvector])
for i in range(len(usernames)):
points[i, :] = parse_vector(results[8 * i:8 * (i + 1)])
flann = pyflann.FLANN()
flann.build_index(points)
# Save the index.
fn1 = _h5_filename(index_filename)
tmp1 = fn1 + ".tmp"
flann.save_index(tmp1)
# Save the index coordinates.
fn2 = _h5_filename(points_filename)
tmp2 = fn2 + ".tmp"
with h5py.File(tmp2, "w") as f:
f["points"] = points
f["names"] = usernames
# Atomically move the index files into place.
shutil.move(tmp1, fn1)
shutil.move(tmp2, fn2)
| 16,250
|
def load_file_recipes(fh, enabled_only=False, expensive=False, logger=logger):
"""
Load all the recipes from a given file handle.
:param enabled_only: Set True to limit to only enabled recipes.
:param expensive: Set True to use 'expensive' configurations.
:return: dict(name -> {recipe})
"""
logger.info("Loading recipes from %s", fh.name)
lua_text = fh.read().strip()
logger.debug("Loaded %d bytes", len(lua_text))
# Strip the non-table wrapper.
if not lua_text.startswith(RECIPE_PREFIX) or not lua_text.endswith(RECIPE_SUFFIX):
logger.warning("%s does not appear to be a recipe definition file.", fh.name)
return {}
lua_table = lua_text[len(RECIPE_PREFIX):-len(RECIPE_SUFFIX)].strip()
definitions = {}
for table in slpp.decode(lua_table):
own_version = {}
# Only handle 'recipe's.
if table.get('type') != "recipe":
logger.debug("Ignoring: %s", table)
continue
name = table.get('name').lower()
if not name:
logger.warning("Malformed entry: %s", table)
continue
own_version['name'] = name
# Check if we're skipping disabled recipes.
if enabled_only:
if table.get('enabled', True) is False:
logger.debug("Skipping %s: disabled" % name)
continue
own_version['enabled'] = table['enabled']
# Make sure it has a unique name.
if name in definitions:
raise ParseError("%s: Duplicated recipe: %s" % (fh.name, name))
inset = table.get('normal')
if expensive:
inset = table.get('expensive', inset)
if inset:
if enabled_only and inset.get('enabled', True) is False:
logger.debug("Skipping %s: inset dsabled" % name)
continue
if 'ingredients' in inset:
table = inset
ingredients = table.get('ingredients')
if not ingredients:
logger.warning("Entry with no ingredients: %s", table)
continue
own_version['ingredients'] = {}
for entry in ingredients:
if isinstance(entry, (tuple, list)):
assert len(entry) == 2
assert isinstance(entry[1], int)
own_version['ingredients'][entry[0]] = entry[1]
else:
assert isinstance(entry, dict)
assert len(entry) == 3
own_version['ingredients'][entry['name']] = int(entry['amount'])
if 'energy_required' in table:
own_version['energy_required'] = table['energy_required']
logger.debug("\"%s\": %s", name, json.dumps(own_version, sort_keys=True))
definitions[name] = own_version
return definitions
| 16,251
|
def assert_euler_xyz_equal(e_xyz1, e_xyz2, *args, **kwargs):
"""Raise an assertion if two xyz Euler angles are not approximately equal.
Note that Euler angles are only unique if we limit them to the intervals
[-pi, pi], [-pi/2, pi/2], and [-pi, pi] respectively. See
numpy.testing.assert_array_almost_equal for a more detailed documentation
of the other parameters.
"""
R1 = matrix_from_euler_xyz(e_xyz1)
R2 = matrix_from_euler_xyz(e_xyz2)
assert_array_almost_equal(R1, R2, *args, **kwargs)
| 16,252
|
def load_model_data(m, d, data_portal, scenario_directory, subproblem, stage):
"""
:param m:
:param data_portal:
:param scenario_directory:
:param subproblem:
:param stage:
:return:
"""
# TODO: once we align param names and column names, we will have conflict
# because binary storage and generator use same build size param name
# in the columns.
data_portal.load(
filename=os.path.join(
scenario_directory,
str(subproblem),
str(stage),
"inputs",
"new_binary_build_storage_vintage_costs.tab",
),
index=m.STOR_NEW_BIN_VNTS,
select=(
"project",
"vintage",
"lifetime_yrs",
"annualized_real_cost_per_mw_yr",
"annualized_real_cost_per_mwh_yr",
),
param=(
m.stor_new_bin_lifetime_yrs,
m.stor_new_bin_annualized_real_cost_per_mw_yr,
m.stor_new_bin_annualized_real_cost_per_mwh_yr,
),
)
data_portal.load(
filename=os.path.join(
scenario_directory,
str(subproblem),
str(stage),
"inputs",
"new_binary_build_storage_size.tab",
),
index=m.STOR_NEW_BIN,
select=("project", "binary_build_size_mw", "binary_build_size_mwh"),
param=(m.stor_new_bin_build_size_mw, m.stor_new_bin_build_size_mwh),
)
| 16,253
|
def send_record(agg_record):
"""Send the input aggregated record to Kinesis via the PutRecord API.
Args:
agg_record - The aggregated record to send to Kinesis. (AggRecord)"""
global kinesis_client, stream_name
if agg_record is None:
return
partition_key, explicit_hash_key, raw_data = agg_record.get_contents()
six.print_('Submitting record with EHK=%s NumRecords=%d NumBytes=%d' %
(explicit_hash_key, agg_record.get_num_user_records(), agg_record.get_size_bytes()))
try:
kinesis_client.put_record(StreamName=stream_name,
Data=raw_data,
PartitionKey=partition_key,
ExplicitHashKey=explicit_hash_key)
except Exception as e:
six.print_('Transmission Failed: %s' % e, file=sys.stderr)
else:
six.print_('Completed record with EHK=%s' % ehk)
| 16,254
|
def patch(url, controller):
"""Shortcut for Patch HTTP class.
Arguments:
url {string} -- The url you want to use for the route
controller {string|object} -- This can be a string controller or a normal object controller
Returns:
masonite.routes.Patch -- The Masonite Patch class.
"""
from masonite.routes import Patch
return Patch().route(url, controller)
| 16,255
|
def get_user(message: discord.Message, username: str):
""" Get member by discord username or osu username. """
member = utils.find_member(guild=message.guild, name=username)
if not member:
for key, value in osu_tracking.items():
if value["new"]["username"].lower() == username.lower():
member = discord.utils.get(message.guild.members, id=int(key))
return member
| 16,256
|
def ensure_dirs(filename):
"""Make sure the directories exist for `filename`."""
dirname = os.path.dirname(filename)
if dirname and not os.path.exists(dirname):
os.makedirs(dirname)
| 16,257
|
def resaturate_color(color, amount=0.5):
"""
Saturates the given color by setting saturation to the given amount.
Input can be matplotlib color string, hex string, or RGB tuple.
"""
if not isinstance(color, np.ndarray) and color in matplotlib.colors.cnames:
color = matplotlib.colors.cnames[color]
hls = colorsys.rgb_to_hls(*matplotlib.colors.to_rgb(color))
new_hls = hls[0], hls[1], amount
new_color = colorsys.hls_to_rgb(*new_hls)
return tuple(np.minimum(np.maximum(0, new_color), 1))
| 16,258
|
def create_reach_segment(upstream_point, downstream_point, polyline, identifier="HA",
junctionID=0, isEnd=False):
"""Returns a polyline based on two bounding vertices found on the line. """
part = polyline.getPart (0)
total_length = polyline.length
lineArray = arcpy.Array ()
#Identifies bounding vertices and associated distance along the line.
if isEnd:
last_point= polyline.lastPoint
upstream_point_dist = round (total_length - polyline.measureOnLine (downstream_point , False) , 2)
downstream_point_dist = round(total_length - polyline.measureOnLine (last_point , False), 2)
else:
upstream_point_dist = round (total_length - polyline.measureOnLine (upstream_point , False) , 2)
downstream_point_dist = round(total_length - polyline.measureOnLine (downstream_point , False), 2)
#Retrieves all vertices between bounding vertices of a polyline.
for pnt in part:
pnt_dist = round(total_length - polyline.measureOnLine (pnt , False), 2)
if pnt_dist <= upstream_point_dist and pnt_dist>=downstream_point_dist:
if lineArray.count == 0:
lineArray.add(upstream_point)
lineArray.add (pnt)
else:
lineArray.add (pnt)
#Makes ending downstream point is added to array
if lineArray[lineArray.count -1].X != downstream_point.X and lineArray[lineArray.count -1].Y != downstream_point.Y:
lineArray.add(downstream_point)
#Creates a new polyline from point array
new_polyline = arcpy.Polyline(lineArray)
identifier = str(identifier)
junc = identifier
if identifier.upper().find('J') == len(identifier)-1:
identifier =identifier.upper()[0:len(identifier)-1] + 'R'
else:
identifier = identifier.upper() + 'R'
return {'name':identifier,'polyline':new_polyline, 'DJunc':junc, 'JuncID':junctionID}
| 16,259
|
def sample_recipe(user, **params):
""" Helper function for creating recipes """
""" for not writing every single time this fields """
defaults = {
'title': 'Sample recipe',
'time_minutes': 10,
'price': 5.00
}
"""
Override any field of the defaults dictionary.
Updating the keys:field from params to defaults
if params has any similar key.
If params has a new key, then it appends to defaults.
"""
defaults.update(params)
return Recipe.objects.create(user=user, **defaults)
| 16,260
|
def _unary_geo(op, left, *args, **kwargs):
# type: (str, np.array[geoms]) -> np.array[geoms]
"""Unary operation that returns new geometries"""
# ensure 1D output, see note above
data = np.empty(len(left), dtype=object)
data[:] = [getattr(geom, op, None) for geom in left]
return data
| 16,261
|
def map_func(h, configs, args):
"""Polygons command line in parallel.
"""
if args.verbose:
cmd = "python {} -i {}/threshold{}.tif -o {}/threshold{}.shp -v".format(
configs["path"]["polygons"],
configs["path"]["output"],
h,
configs["path"]["output"],
h
)
print cmd
else:
cmd = "python {} -i {}/threshold{}.tif -o {}/threshold{}.shp".format(
configs["path"]["polygons"],
configs["path"]["output"],
h,
configs["path"]["output"],
h
)
cmd_args = shlex.split(cmd)
stdout,stderr = sp.Popen(
cmd_args,
stdin = sp.PIPE,
stdout = sp.PIPE,
stderr = sp.PIPE
).communicate()
if args.verbose:
print stdout, stderr
return True
| 16,262
|
def _runopenssl(pem, *args):
"""
Run the command line openssl tool with the given arguments and write
the given PEM to its stdin. Not safe for quotes.
"""
if os.name == 'posix':
command = "openssl " + " ".join(["'%s'" % (arg.replace("'", "'\\''"),) for arg in args])
else:
command = "openssl " + quoteArguments(args)
write, read = popen2(command, "b")
write.write(pem)
write.close()
return read.read()
| 16,263
|
def check_health(request: HttpRequest) -> bool:
"""Check app health."""
return True
| 16,264
|
def test_timeout_not_set_in_config():
"""
Creating a CkanAuthTktCookiePlugin instance without setting timeout in
config sets correct values in CkanAuthTktCookiePlugin instance.
"""
plugin = make_plugin(secret="sosecret")
assert plugin.timeout is None
assert plugin.reissue_time is None
| 16,265
|
def return_galo_tarsilo(message):
"""Middle function for returning "gaucho" vídeo.
Parameters
----------
message : telebot.types.Message
The message object.
Returns
-------
msg : str
User/Chat alert list addition/removal.
"""
return 'https://www.youtube.com/watch?v=MVYEwZFixJ8'
| 16,266
|
def test_exception(client, recorder):
"""
Test handling an exception
:param test_client: AioHttp test client fixture
:param loop: Eventloop fixture
:param recorder: X-Ray recorder fixture
"""
exc = None
try:
client.get("/exception")
except Exception as e:
exc = e
assert exc is not None
segment = recorder.emitter.pop()
assert not segment.in_progress
assert segment.fault
request = segment.http["request"]
response = segment.http["response"]
exception = segment.cause["exceptions"][0]
assert request["method"] == "GET"
assert request["url"] == "http://testserver/exception"
assert request["client_ip"] == "testclient:50000"
assert response["status"] == 500
assert exception.type == "KeyError"
| 16,267
|
def register_scheduler(name, scheduler):
"""
Registers a new scheduler. Attempting to register a scheduler with a name that is already taken will raise a ``SystemSetupError``.
:param name: The name under which to register the scheduler.
:param scheduler: Either a unary function ``float`` -> ``float`` or a class with the same interface as ``Scheduler``.
"""
logger = logging.getLogger(__name__)
if name in __SCHEDULERS:
raise exceptions.SystemSetupError("A scheduler with the name [%s] is already registered." % name)
# we'd rather use callable() but this will erroneously also classify a class as callable...
if isinstance(scheduler, types.FunctionType):
logger.debug("Registering function [%s] for [%s].", str(scheduler), str(name))
# lazy initialize a delegating scheduler
__SCHEDULERS[name] = lambda params: DelegatingScheduler(params, scheduler)
else:
logger.debug("Registering object [%s] for [%s].", str(scheduler), str(name))
__SCHEDULERS[name] = scheduler
| 16,268
|
def preimage_func(f, x):
"""Pre-image a funcation at a set of input points.
Parameters
----------
f : typing.Callable
The function we would like to pre-image. The output type must be hashable.
x : typing.Iterable
Input points we would like to evaluate `f`. `x` must be of a type acceptable by `f`.
Returns
-------
D : dict(object, list(object))
This dictionary maps the output of `f` to the list of `x` values that produce it.
"""
D = {}
for xx in x:
D.setdefault(f(xx), []).append(xx)
return D
| 16,269
|
def get_spec_id(mat_quality, mat_faction=None):
"""
Get the material_spec id corresponding to the material quality and faction.
Args:
mat_quality (str): A material quality like Basic, Fine, Choice etc...
mat_faction (str): A material faction like Matis, Zoraï etc...
Returns:
int - The id of the corresponding material_spec.
Example:
>>> get_spec_id('Basic', 'Generic')
1
"""
if mat_faction:
dbm.query(
"SELECT id FROM material_spec WHERE quality = ? AND faction = ?",
(mat_quality, mat_faction)
)
else:
dbm.query(
"SELECT id FROM material_spec WHERE quality = ?",
(mat_quality,)
)
try:
return dbm.cur.fetchone()[0]
except TypeError:
print(
"Wrong quality: {} or faction: {}".format(
mat_quality, mat_faction),
file=sys.stderr
)
sys.exit()
| 16,270
|
def pytest_configure(config):
"""Add pytest new configurations."""
config.addinivalue_line('markers', 'web: validate web link')
config.addinivalue_line('markers', 'rapidtest: rapid test')
| 16,271
|
def print_raw_data(raw_data, start_index=0, limit=200, flavor='fei4b', index_offset=0, select=None, tdc_trig_dist=False, trigger_data_mode=0):
"""Printing FEI4 raw data array for debugging.
"""
if not select:
select = ['DH', 'TW', "AR", "VR", "SR", "DR", 'TDC', 'UNKNOWN FE WORD', 'UNKNOWN WORD']
total_words = 0
for index in range(start_index, raw_data.shape[0]):
dw = FEI4Record(raw_data[index], chip_flavor=flavor, tdc_trig_dist=tdc_trig_dist, trigger_data_mode=trigger_data_mode)
if dw in select:
print index + index_offset, '{0:12d} {1:08b} {2:08b} {3:08b} {4:08b}'.format(raw_data[index], (raw_data[index] & 0xFF000000) >> 24, (raw_data[index] & 0x00FF0000) >> 16, (raw_data[index] & 0x0000FF00) >> 8, (raw_data[index] & 0x000000FF) >> 0), dw
total_words += 1
if limit and total_words >= limit:
break
return total_words
| 16,272
|
def test_fit_raise_error(classifier):
"""Test raising an error on the wrong classifier type."""
with pytest.raises(
TypeError,
match='`ClassifierBettor` requires a classifier. '
f'Instead {type(classifier)} is given.',
):
ClassifierBettor(classifier).fit(X, Y)
| 16,273
|
def test_md034_good_with_leading_character():
"""
Test to make sure this rule does not trigger with a document that
contains http urls with non-whitespace directly before it.
"""
# Arrange
scanner = MarkdownScanner()
supplied_arguments = [
"scan",
"test/resources/rules/md034/good_with_leading_character.md",
]
expected_return_code = 0
expected_output = ""
expected_error = ""
# Act
execute_results = scanner.invoke_main(arguments=supplied_arguments)
# Assert
execute_results.assert_results(
expected_output, expected_error, expected_return_code
)
| 16,274
|
def load_tree(tree,fmt=None):
"""
Load a tree into an ete3 tree data structure.
tree: some sort of tree. can be an ete3.Tree (returns self), a dendropy
Tree (converts to newick and drops root), a newick file or a newick
string.
fmt: format for reading tree from newick. 0-9 or 100. See ete3 documentation
for how these are read (http://etetoolkit.org/docs/latest/tutorial/tutorial_trees.html#reading-and-writing-newick-trees).
As of ETE3.1.1, these numbers mean:
| ====== ==============================================
| FORMAT DESCRIPTION
| ====== ==============================================
| 0 flexible with support values
| 1 flexible with internal node names
| 2 all branches + leaf names + internal supports
| 3 all branches + all names
| 4 leaf branches + leaf names
| 5 internal and leaf branches + leaf names
| 6 internal branches + leaf names
| 7 leaf branches + all names
| 8 all names
| 9 leaf names
| 100 topology only
| ====== ==============================================
if fmt is None, try to parse without a format descriptor, then these
formats in numerical order.
Returns an ete3 tree object.
"""
# Already an ete3 tree.
if type(tree) is ete3.TreeNode:
return tree
# Convert dendropy tree into newick (drop root)
if type(tree) is dp.Tree:
tree = tree.as_string(schema="newick",suppress_rooting=True)
# If we get here, we need to convert. If fmt is not specified, try to parse
# without a format string.
if fmt is None:
try:
t = Tree(tree)
except ete3.parser.newick.NewickError:
# Try all possible formats now, in succession
w = "\n\nCould not parse tree without format string. Going to try different\n"
w += "formats. Please check output carefully.\n\n"
warnings.warn(w)
formats = list(range(10))
formats.append(100)
t = None
for f in formats:
try:
t = Tree(tree,format=f)
w = f"\n\nSuccessfully parsed tree with format style {f}.\n"
w += "Please see ete3 documentation for details:\n\n"
w += "http://etetoolkit.org/docs/latest/tutorial/tutorial_trees.html#reading-and-writing-newick-trees\n\n"
warnings.warn(w)
break
except ete3.parser.newick.NewickError:
continue
if t is None:
err = "\n\nCould not parse tree!\n\n"
raise ValueError(err)
else:
# Try a conversion with the specified format
t = Tree(tree,format=fmt)
return t
| 16,275
|
def load_extract(context, extract: Dict) -> str:
"""
Upload extract to Google Cloud Storage.
Return GCS file path of uploaded file.
"""
return context.resources.data_lake.upload_df(
folder_name="nwea_map",
file_name=extract["filename"],
df=extract["value"]
)
| 16,276
|
def _make_default_colormap():
"""Return the default colormap, with custom first colors."""
colormap = np.array(cc.glasbey_bw_minc_20_minl_30)
# Reorder first colors.
colormap[[0, 1, 2, 3, 4, 5]] = colormap[[3, 0, 4, 5, 2, 1]]
# Replace first two colors.
colormap[0] = [0.03137, 0.5725, 0.9882]
colormap[1] = [1.0000, 0.0078, 0.0078]
return colormap
| 16,277
|
def poly_to_box(poly):
"""Convert a polygon into an array of tight bounding box."""
box = np.zeros(4, dtype=np.float32)
box[0] = min(poly[:, 0])
box[2] = max(poly[:, 0])
box[1] = min(poly[:, 1])
box[3] = max(poly[:, 1])
return box
| 16,278
|
def default_collate(batch):
"""Puts each data field into a tensor with outer dimension batch size"""
error_msg = "batch must contain tensors, numbers, dicts or lists; found {}"
elem_type = type(batch[0])
if isinstance(batch[0], torch.Tensor):
return torch.stack(batch, 0)
elif (
elem_type.__module__ == "numpy"
and elem_type.__name__ != "str_"
and elem_type.__name__ != "string_"
): # pragma: no cover
elem = batch[0]
if elem_type.__name__ == "ndarray":
return torch.stack([torch.from_numpy(b) for b in batch], 0)
if elem.shape == (): # scalars
py_type = float if elem.dtype.name.startswith("float") else int
return numpy_type_map[elem.dtype.name](list(map(py_type, batch)))
elif isinstance(batch[0], int_classes): # pragma: no cover
return torch.LongTensor(batch)
elif isinstance(batch[0], float): # pragma: no cover
return torch.DoubleTensor(batch)
elif isinstance(batch[0], string_classes): # pragma: no cover
return batch
elif isinstance(batch[0], container_abcs.Mapping): # pragma: no cover
return {key: default_collate([d[key] for d in batch]) for key in batch[0]}
elif isinstance(batch[0], container_abcs.Sequence): # pragma: no cover
transposed = zip(*batch)
return [default_collate(samples) for samples in transposed]
raise TypeError((error_msg.format(type(batch[0]))))
| 16,279
|
def updatestatus(requestdata, authinfo, acldata, supportchan, session):
"""Update the /Status page of a user."""
if requestdata[2] in acldata['wikis']:
wikiurl = str('https://' + acldata['wikis'][requestdata[2]]['url'] + '/w/api.php')
sulgroup = acldata['wikis'][requestdata[2]]['sulgroup']
else:
return 'Wiki could not be found'
if requestdata[0] in acldata['users']:
if sulgroup in acldata['users'][requestdata[0]]['groups']:
request = [acldata['users'][requestdata[0]]['groups'][sulgroup], requestdata[3]]
else:
return f"Data not found for {sulgroup} in {requestdata[0]}, Keys were: {acldata['users'][requestdata[0]].keys()}"
elif requestdata[1][0] in acldata['sulgroups'][sulgroup]['cloaks']:
request = [requestdata[1][1], requestdata[3]]
else:
ERRNOAUTH = "You don't seem to be authorised to use this plugin. Check you are signed into NickServ and try again."
if supportchan is None:
return ERRNOAUTH
return f'{ERRNOAUTH} If this persists, ask for help in {supportchan}.'
return mwapi.main(
performer=request[0],
target=str('User:' + (str(request[0]) + '/Status')),
action='create',
reason=str('Updating status to ' + str(request[1]) + ' per ' + str(request[0])),
url=wikiurl,
authinfo=[authinfo[0], authinfo[1]],
content=str(request[1]),
session=session,
)
| 16,280
|
def sortByTimeStamps(paths):
"""Sorts the given list of file paths by their time-stamp
:paths: The file paths to sort by time-stamp
:returns: A sorted list of file paths
"""
sortedPaths = []
timeStamps = []
# Extract the YYYYMMDD & HHMMSS timestamps from the file paths
for p in paths:
timeStamp = getTimeStamps(p)
timeStamps.append((int(timeStamp[0]), int(timeStamp[1])))
# Sort the timestamps in ascending order FIX FOR TUPLE
timeStamps = sorted(timeStamps, key = lambda x: (int(x[0]), int(x[1])))
# Sort the paths by comparing to the sorted timestamps
for t in timeStamps:
for p in paths:
timeStamp = getTimeStamps(p)
if (int(timeStamp[0]), int(timeStamp[1])) == t:
sortedPaths.append(p)
return sortedPaths
| 16,281
|
def get_xml_nk_bands(xml_tree):
"""
Function to specifically get kpoint (cartesian) coordinates and corresponding eigenvalues (in Hartree)
"""
k_points_car = []
k_eigenvalues = []
k_occupations = []
for ks_energies in xml_tree.iter(tag='ks_energies'):
k_points_car.append( get_xml_data(ks_energies,'k_point',as_type=float) )
k_eigenvalues.append( get_xml_data(ks_energies,'eigenvalues',as_type=float) )
k_occupations.append( get_xml_data(ks_energies,'occupations',as_type=float) )
k_points_car = np.array(k_points_car)
k_eigenvalues = np.array(k_eigenvalues)
k_occupations = np.array(k_occupations)
return k_points_car, k_eigenvalues, k_occupations
| 16,282
|
def enumerate(server, directory_list, filenames):
"""
Enumerate directories and files on the web server.
"""
print('\n[*] Enumerating resources.')
to_search = [server]
directories = []
resources = []
print('[*] Recursively searching for directories.')
while len(to_search) != 0:
base_url = to_search.pop(0)
print('[*] Searching for directories in {0}'.format(base_url))
to_search.extend(check(base_url, directory_list))
directories.append(base_url)
resources.append(base_url)
if len(filenames) > 0:
print('\n[*] Searching for files.')
for url in directories:
resources.extend(check(url, filenames, False))
return resources
| 16,283
|
def deep_equals(x, y):
"""Test two objects for equality in value.
Correct if x/y are one of the following valid types:
types compatible with != comparison
pd.Series, pd.DataFrame, np.ndarray
lists, tuples, or dicts of a valid type (recursive)
Important note:
this function will return "not equal" if types of x,y are different
for instant, bool and numpy.bool are *not* considered equal
Parameters
----------
x: object
y: object
Returns
-------
bool - True if x and y are equal in value
x and y do not need to be equal in reference
"""
if type(x) != type(y):
return False
# we now know all types are the same
# so now we compare values
if isinstance(x, pd.Series):
if x.dtype != y.dtype:
return False
# if columns are object, recurse over entries and index
if x.dtype == "object":
index_equal = x.index.equals(y.index)
return index_equal and deep_equals(list(x.values), list(y.values))
else:
return x.equals(y)
elif isinstance(x, pd.DataFrame):
if not x.columns.equals(y.columns):
return False
# if columns are equal and at least one is object, recurse over Series
if sum(x.dtypes == "object") > 0:
return np.all([deep_equals(x[c], y[c]) for c in x.columns])
else:
return x.equals(y)
elif isinstance(x, np.ndarray):
if x.dtype != y.dtype:
return False
return np.array_equal(x, y, equal_nan=True)
# recursion through lists, tuples and dicts
elif isinstance(x, (list, tuple)):
return _tuple_equals(x, y)
elif isinstance(x, dict):
return _dict_equals(x, y)
elif x != y:
return False
return True
| 16,284
|
def get_local_log(date, which="completed", safeout=False):
""" """
filein = get_log_filepath(date, which=which)
if not os.path.isfile(filein):
if safeout:
return None
raise IOError(f"No {which}_log locally stored for {date}. see download_log()")
return pandas.read_csv(filein)
| 16,285
|
def get_asan_options(redzone_size, malloc_context_size, quarantine_size_mb,
bot_platform, leaks):
"""Generates default ASAN options."""
asan_options = {}
# Default options needed for all cases.
asan_options['alloc_dealloc_mismatch'] = 0
asan_options['print_scariness'] = 1
asan_options['strict_memcmp'] = 0
# Set provided redzone size.
if redzone_size:
asan_options['redzone'] = redzone_size
# This value is used in determining whether to report OOM crashes or not.
set_value('REDZONE', redzone_size)
# Set maximum number of stack frames to report.
if malloc_context_size:
asan_options['malloc_context_size'] = malloc_context_size
# Set quarantine size.
if quarantine_size_mb:
asan_options['quarantine_size_mb'] = quarantine_size_mb
# Test for leaks if this is an LSan-enabled job type.
if get_value('LSAN') and leaks:
lsan_options = join_memory_tool_options(get_lsan_options())
set_value('LSAN_OPTIONS', lsan_options)
asan_options['detect_leaks'] = 1
else:
remove_key('LSAN_OPTIONS')
asan_options['detect_leaks'] = 0
# FIXME: Support container overflow on Android.
if bot_platform == 'ANDROID':
asan_options['detect_container_overflow'] = 0
# Enable stack use-after-return.
asan_options['detect_stack_use_after_return'] = 1
asan_options['max_uar_stack_size_log'] = 16
# Other less important default options for all cases.
asan_options.update({
'allocator_may_return_null': 1,
'allow_user_segv_handler': 0,
'check_malloc_usable_size': 0,
'detect_odr_violation': 0,
'fast_unwind_on_fatal': 1,
'print_suppressions': 0,
})
# Add common sanitizer options.
asan_options.update(COMMON_SANITIZER_OPTIONS)
# FIXME: For Windows, rely on online symbolization since llvm-symbolizer.exe
# in build archive does not work.
asan_options['symbolize'] = int(bot_platform == 'WINDOWS')
# For Android, allow user defined segv handler to work.
if bot_platform == 'ANDROID':
asan_options['allow_user_segv_handler'] = 1
# Check if UBSAN is enabled as well for this ASAN build.
# If yes, set UBSAN_OPTIONS and enable suppressions.
if get_value('UBSAN'):
ubsan_options = get_ubsan_options()
# Remove |symbolize| explicitly to avoid overridding ASan defaults.
ubsan_options.pop('symbolize', None)
set_value('UBSAN_OPTIONS', join_memory_tool_options(ubsan_options))
return asan_options
| 16,286
|
def get_onelinepred_results(pred_file, thred=0.1):
""""from pred_file parse pred_results
Args:
# TODO save format of pred_file still unknown
pred_file (str): pred_file path
thred: pred_box's score less than it could be ignored
Return:
pred_dict (dict(list)) : output predict result. The outer dict means different images
, inner list contains xywh class_id(1) score
"""
if pred_file is None: return None
pred_dict = {}
lines = open(pred_file, 'r').readlines()
for line in lines:
split_item = line.strip().split()
if len(split_item) < 5: continue
image_path = split_item[0]
#image key first occur
if not image_path in pred_dict.keys():
pred_dict[image_path] = list()
pred_box = np.array(split_item[1:]).reshape((-1, 9)).astype(np.float)
#if int(pred_cls) < 2: pred_cls = '0'
for box in pred_box:
cls_id = 1 #int(box[0]) - 1 #skip background
score = box[0]
# if not (abs(box[8]) < 35 and abs(box[7]) < 35 and abs(box[6]) < 35): continue
# if score < thred or box[5] < 0.5: continue
pred_dict[image_path].append(box[1:5].tolist()+[cls_id, score]) #box+cls
return pred_dict
| 16,287
|
def permutations(**kwargs):
"""Generate a CSV with each permutation of columns and possible values. The probabilities are not considered. """
cols_template, gen_rows = handle_common_cmdline(kwargs)
col_opts = prepare_col_opts(cols_template)
rows = []
gen_permutations(col_opts, cols=list(col_opts.keys()), rows=rows, row={})
write_output(kwargs, col_opts, rows)
| 16,288
|
def validate_parent_instance(parent: Optional[Any]) -> None:
"""
Validate specified parent is `ChildInterface` instance.
Parameters
----------
parent : *
Any parent instance or None.
Raises
------
ValueError
If specified parent is not None and not `ChildInterface`
instance.
"""
if parent is None:
return
from apysc._display.child_interface import ChildInterface
if isinstance(parent, ChildInterface):
return
raise ValueError(
'Specified parent is not None and not `ChildInterface` instance,'
f' like a Sprite: {type(parent)}')
| 16,289
|
def _is_valid_img_uri(uri: str) -> bool:
"""
Returns true if a string is a valid uri that can be saved in the database.
"""
regex = "data:image/jpeg;base64*."
return not uri or re.match(regex, uri)
| 16,290
|
def insert_global_vars(config):
""" replace global variable placeholders with respective values """
for key, value in config.items():
if type(value) != dict and value in vars(globals):
config[key] = getattr(globals, value)
| 16,291
|
def update_config(a, b, mode="default"):
"""Update the configuration a with b."""
if not b:
return a
from_version = get_config_version(a)
to_version = get_config_version(b)
if from_version == 1 and to_version == 2:
# When updating the configuration to a newer version, we clear all user fields.
a = {k: v for k, v in a.items() if k in _non_user_fields}
return replace_config(a, b)
if mode == "default" or mode == "merge":
return merge_config(a, b)
if mode == "replace":
return replace_config(a, b)
raise ValueError("Invalid configuration update mode: %s" % mode)
| 16,292
|
def disk_partitions(disk_ntuple, all=False):
"""Return all mountd partitions as a named tuple.
If all == False return physical partitions only.
"""
phydevs = []
if os.path.exists('/proc/filesystems'):
my_file = open('/proc/filesystems', 'r')
for line in my_file:
if not line.startswith('nodev'):
phydevs.append(line.strip())
else:
print ('path does not exist: /proc/filesystems')
retlist = []
if os.path.exists('/etc/mtab'):
my_file = open('/etc/mtab', 'r')
for line in my_file:
if not all and line.startswith('none'):
continue
fields = line.split()
device = fields[0]
mountpoint = fields[1]
fstype = fields[2]
if not all and fstype not in phydevs:
continue
if device == 'none':
device = ''
ntuple = disk_ntuple(device, mountpoint, fstype)
retlist.append(ntuple)
else:
print ('path does not exist: /etc/mtab')
return retlist
| 16,293
|
def create_app(config_name='DevelopmentConfig'):
"""Create the Flask application from a given config object type.
Args:
config_name (string): Config instance name.
Returns:
Flask Application with config instance scope.
"""
app = Flask(__name__)
{{cookiecutter.package_name | upper}}(app, config_name=config_name)
return app
| 16,294
|
def label_accuracy_score(hist):
"""Returns accuracy score evaluation result.
- overall accuracy
- mean accuracy
- mean IU
- fwavacc
"""
with np.errstate(divide='ignore', invalid='ignore'):
iu = np.diag(hist) / (
hist.sum(axis=1) + hist.sum(axis=0) - np.diag(hist)
)
mean_iu = np.nanmean(iu)
return mean_iu
| 16,295
|
def get_page_title(title: str):
""" Возвращает заголовок, отображаемый на вкладках """
return f'{title} | NeuraHS'
| 16,296
|
def scan_build(registry, xml_parent, data):
"""yaml: scan-build
This plugin allows you configure a build step that will execute the Clang
scan-build static analysis tool against an XCode project.
The scan-build report has to be generated in the directory
``${WORKSPACE}/clangScanBuildReports`` for the publisher to find it.
Requires the Jenkins :jenkins-wiki:`Clang Scan-Build Plugin
<Clang+Scan-Build+Plugin>`.
:arg str target: Provide the exact name of the XCode target you wish to
have compiled and analyzed (required)
:arg str target-sdk: Set the simulator version of a currently installed SDK
(default iphonesimulator)
:arg str config: Provide the XCode config you wish to execute scan-build
against (default Debug)
:arg str clang-install-name: Name of clang static analyzer to use (default
'')
:arg str xcode-sub-path: Path of XCode project relative to the workspace
(default '')
:arg str workspace: Name of workspace (default '')
:arg str scheme: Name of scheme (default '')
:arg str scan-build-args: Additional arguments to clang scan-build
(default --use-analyzer Xcode)
:arg str xcode-build-args: Additional arguments to XCode (default
-derivedDataPath $WORKSPACE/build)
:arg str report-folder: Folder where generated reports are located
(>=1.7) (default clangScanBuildReports)
Full Example:
.. literalinclude:: /../../tests/builders/fixtures/scan-build-full.yaml
:language: yaml
Minimal Example:
.. literalinclude::
/../../tests/builders/fixtures/scan-build-minimal.yaml
:language: yaml
"""
p = XML.SubElement(
xml_parent,
'jenkins.plugins.clangscanbuild.ClangScanBuildBuilder')
p.set('plugin', 'clang-scanbuild')
mappings = [
('target', 'target', None),
('target-sdk', 'targetSdk', 'iphonesimulator'),
('config', 'config', 'Debug'),
('clang-install-name', 'clangInstallationName', ''),
('xcode-sub-path', 'xcodeProjectSubPath', 'myProj/subfolder'),
('workspace', 'workspace', ''),
('scheme', 'scheme', ''),
('scan-build-args', 'scanbuildargs', '--use-analyzer Xcode'),
('xcode-build-args',
'xcodebuildargs',
'-derivedDataPath $WORKSPACE/build'),
('report-folder', 'outputFolderName', 'clangScanBuildReports'),
]
convert_mapping_to_xml(p, data, mappings, fail_required=True)
| 16,297
|
def make_pointer_union_printer(val):
"""Factory for an llvm::PointerUnion printer."""
pointer, value = get_pointer_int_pair(val['Val'])
if not pointer or not value:
return None
pointer_type = val.type.template_argument(int(value))
string = 'llvm::PointerUnion containing %s' % pointer_type
return make_printer(string, [('pointer', pointer.cast(pointer_type))])
| 16,298
|
def test_default_quality_2_down():
"""
Test that quality goes down by 2 when sell_in date has gotten to 0
GIVEN: GildedRose with one Item of type default
WHEN: call update_quality method
THEN: quality should be two less
"""
quality = 10
sell_in = 0
name = 'default'
items = [Item(name, sell_in, quality)]
gilded_rose = GildedRose(items)
gilded_rose.update_quality()
assert items[0].quality == quality - 2
| 16,299
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.