content
stringlengths 22
815k
| id
int64 0
4.91M
|
|---|---|
def run_script(script_path, session, handle_command=None, handle_line=None):
""" Run a script file using a valid sqlalchemy session.
Based on https://bit.ly/2CToAhY.
See also sqlalchemy transaction control: https://bit.ly/2yKso0A
:param script_path: The path where the script is located
:param session: A sqlalchemy session to execute the sql commands from the
script
:param handle_command: Function to handle a valid command
:param handle_line: Function to handle a valid line
:return:
"""
logger.debug("Opening script %s." % script_path)
with open(script_path, "r") as stream:
sql_command = ""
for line in stream:
# Ignore commented lines
if not line.startswith("--") and line.strip("\n"):
# Append line to the command string
if handle_line is not None:
logger.debug("Calling the handle line function for: "
"%s." % line)
line = handle_line(line)
sql_command = "%s%s" % (sql_command, line.strip("\n"))
# If the command string ends with ";", it is a full statement
if sql_command.endswith(";"):
# Try to execute statement and commit it
try:
if handle_command is not None:
logger.debug("Calling the handle command function "
"for: %s." % sql_command)
sql_command = handle_command(sql_command)
session.execute(text(sql_command))
# Assert in case of error
except Exception as e:
session.rollback()
raise e
# Finally, clear command string
finally:
sql_command = ""
session.commit()
| 5,343,000
|
def create_overide_pandas_func(
cls, func, verbose, silent, full_signature, copy_ok, calculate_memory
):
""" Create overridden pandas method dynamically with
additional logging using DataFrameLogger
Note: if we extracting _overide_pandas_method outside we need to implement decorator like here
https://stackoverflow.com/questions/10176226/how-do-i-pass-extra-arguments-to-a-python-decorator
:param cls: pandas class for which the method should be overriden
:param func: pandas method name to be overridden
:param silent: Whether additional the statistics get printed
:param full_signature: adding additional information to function signature
:param copy_ok: whether the dataframe is allowed to be copied to calculate more informative metadata logs
:return: the same function with additional logging capabilities
"""
def _run_method_and_calc_stats(
fn,
fn_args,
fn_kwargs,
input_df,
full_signature,
silent,
verbose,
copy_ok,
calculate_memory,
):
if copy_ok:
# If we're ok to make copies, copy the input_df so that we can compare against the output of inplace methods
try:
# Will hit infinite recursion if we use the patched copy method so use the original
original_input_df = getattr(
input_df, settings.ORIGINAL_METHOD_PREFIX + "copy"
)(deep=True)
except AttributeError:
original_input_df = input_df.copy(deep=True)
output_df, execution_stats = get_execution_stats(
cls, fn, input_df, fn_args, fn_kwargs, calculate_memory
)
if output_df is None:
# The operation was strictly in place so we just call the dataframe the output_df as well
output_df = input_df
if copy_ok:
# If this isn't true and the method was strictly inplace, input_df and output_df will just
# point to the same object
input_df = original_input_df
step_stats = StepStats(
execution_stats,
cls,
fn,
fn_args,
fn_kwargs,
full_signature,
input_df,
output_df,
)
step_stats.log_stats_if_needed(silent, verbose, copy_ok)
if isinstance(output_df, pd.DataFrame) or isinstance(output_df, pd.Series):
step_stats.persist_execution_stats()
return output_df
def _overide_pandas_method(fn):
if cls == pd.DataFrame:
register_method_wrapper = pf.register_dataframe_method
elif cls == pd.Series:
register_method_wrapper = pf.register_series_method
@register_method_wrapper
@wraps(fn)
def wrapped(*args, **fn_kwargs):
input_df, fn_args = args[0], args[1:]
output_df = _run_method_and_calc_stats(
fn,
fn_args,
fn_kwargs,
input_df,
full_signature,
silent,
verbose,
copy_ok,
calculate_memory,
)
return output_df
return wrapped
return exec(f"@_overide_pandas_method\ndef {func}(df, *args, **kwargs): pass")
| 5,343,001
|
def orthogonalize(U, eps=1e-15):
"""
Orthogonalizes the matrix U (d x n) using Gram-Schmidt Orthogonalization.
If the columns of U are linearly dependent with rank(U) = r, the last n-r columns
will be 0.
Args:
U (numpy.array): A d x n matrix with columns that need to be orthogonalized.
eps (float): Threshold value below which numbers are regarded as 0 (default=1e-15).
Returns:
(numpy.array): A d x n orthogonal matrix. If the input matrix U's cols were
not linearly independent, then the last n-r cols are zeros.
Examples:
```python
>>> import numpy as np
>>> import gram_schmidt as gs
>>> gs.orthogonalize(np.array([[10., 3.], [7., 8.]]))
array([[ 0.81923192, -0.57346234],
[ 0.57346234, 0.81923192]])
>>> gs.orthogonalize(np.array([[10., 3., 4., 8.], [7., 8., 6., 1.]]))
array([[ 0.81923192 -0.57346234 0. 0. ]
[ 0.57346234 0.81923192 0. 0. ]])
```
"""
n = len(U[0])
# numpy can readily reference rows using indices, but referencing full rows is a little
# dirty. So, work with transpose(U)
V = U.T
for i in range(n):
prev_basis = V[0:i] # orthonormal basis before V[i]
coeff_vec = np.dot(prev_basis, V[i].T) # each entry is np.dot(V[j], V[i]) for all j < i
# subtract projections of V[i] onto already determined basis V[0:i]
V[i] -= np.dot(coeff_vec, prev_basis).T
if la.norm(V[i]) < eps:
V[i][V[i] < eps] = 0. # set the small entries to 0
else:
V[i] /= la.norm(V[i])
return V.T
| 5,343,002
|
def undistort(
cam_mat,
dist_coeffs,
images,
res_dirpath):
"""Saves undistorted images with specified calibration parameters."""
# write the camera matrix
imgSize = images[0].shape[:2]
h, w = imgSize
newcameramtx, roi = cv2.getOptimalNewCameraMatrix(
cam_mat, dist_coeffs, (w, h), 1, (w, h))
for i, img in enumerate(images):
dst = cv2.undistort(img,
cam_mat,
dist_coeffs,
None,
newcameramtx)
# crop the image
x, y, w, h = roi
dst = dst[y:y+h, x:x+w]
cv2.imwrite(osp.join(res_dirpath, "undist"+str(i+1)+'.png'), dst)
| 5,343,003
|
def get_files_under_dir(directory, ext='', case_sensitive=False):
"""
Perform recursive search in directory to match files with one of the
extensions provided
:param directory: path to directory you want to perform search in.
:param ext: list of extensions of simple extension for files to match
:param case_sensitive: is case of filename takes into consideration
:return: list of files that matched query
"""
if isinstance(ext, (list, tuple)):
allowed_exensions = ext
else:
allowed_exensions = [ext]
if not case_sensitive:
allowed_exensions = map(str.lower, allowed_exensions)
result = []
for root, dirs, files in os.walk(directory):
for filename in files:
check_filename = filename if case_sensitive else filename.lower()
if any(map(check_filename.endswith, allowed_exensions)):
result.append(filename)
return result
| 5,343,004
|
def read_ftdc(fn, first_only = False):
"""
Read an ftdc file. fn may be either a single metrics file, or a
directory containing a sequence of metrics files.
"""
# process dir
if os.path.isdir(fn):
for f in sorted(os.listdir(fn)):
for chunk in read_ftdc(os.path.join(fn, f)):
yield chunk
# process file
else:
# open and map file
f = open(fn)
buf = mmap.mmap(f.fileno(), 0, access=mmap.ACCESS_READ)
at = 0
# traverse the file reading type 1 chunks
while at < len(buf):
try:
chunk_doc = _read_bson_doc(buf, at)
at += chunk_doc.bson_len
if chunk_doc['type']==1:
yield _decode_chunk(chunk_doc, first_only)
except Exception as e:
print('bad bson doc: ')
raise
# bson docs should exactly cover file
assert(at==len(buf))
| 5,343,005
|
def boolean(func):
"""
Sets 'boolean' attribute (this attribute is used by list_display).
"""
func.boolean=True
return func
| 5,343,006
|
def load_output_template_configs(items):
"""Return list of output template configs from *items*."""
templates = []
for item in items:
template = OutputTemplateConfig(
id=item["id"],
pattern_path=item.get("pattern-path", ""),
pattern_base=item.get("pattern-base", ""),
append_username_to_name=item.get("append-username-to-name", False),
append_colorspace_to_name=item.get("append-colorspace-to-name", False),
append_passname_to_name=item.get("append-passname-to-name", False),
append_passname_to_subfolder=item.get("append-passname-to-subfolder", False),
)
templates.append(template)
return tuple(templates)
| 5,343,007
|
def print_http_requests(pcap):
"""Print out information about each packet in a pcap
Args:
pcap: dpkt pcap reader object (dpkt.pcap.Reader)
"""
# For each packet in the pcap process the contents
for timestamp, buf in pcap:
# Unpack the Ethernet frame (mac src/dst, ethertype)
eth = dpkt.ethernet.Ethernet(buf)
# Make sure the Ethernet data contains an IP packet
if not isinstance(eth.data, dpkt.ip.IP):
print('Non IP Packet type not supported %s\n' % eth.data.__class__.__name__)
continue
# Now grab the data within the Ethernet frame (the IP packet)
ip = eth.data
# Check for TCP in the transport layer
if isinstance(ip.data, dpkt.tcp.TCP):
# Set the TCP data
tcp = ip.data
# Now see if we can parse the contents as a HTTP request
try:
request = dpkt.http.Request(tcp.data)
except (dpkt.dpkt.NeedData, dpkt.dpkt.UnpackError):
continue
# Pull out fragment information (flags and offset all packed into off field, so use bitmasks)
do_not_fragment = bool(ip.off & dpkt.ip.IP_DF)
more_fragments = bool(ip.off & dpkt.ip.IP_MF)
fragment_offset = ip.off & dpkt.ip.IP_OFFMASK
# Print out the info
print('Timestamp: ', str(datetime.datetime.utcfromtimestamp(timestamp)))
print('Ethernet Frame: ', mac_addr(eth.src), mac_addr(eth.dst), eth.type)
print('IP: %s -> %s (len=%d ttl=%d DF=%d MF=%d offset=%d)' %
(inet_to_str(ip.src), inet_to_str(ip.dst), ip.len, ip.ttl, do_not_fragment, more_fragments, fragment_offset))
print('HTTP request: %s\n' % repr(request))
# Check for Header spanning acrossed TCP segments
if not tcp.data.endswith(b'\r\n'):
print('\nHEADER TRUNCATED! Reassemble TCP segments!\n')
| 5,343,008
|
def read_keyword_arguments_section(docstring: Docstring, start_index: int) -> tuple[DocstringSection | None, int]:
"""
Parse a "Keyword Arguments" section.
Arguments:
docstring: The docstring to parse
start_index: The line number to start at.
Returns:
A tuple containing a `Section` (or `None`) and the index at which to continue parsing.
"""
arguments, index = read_arguments(docstring, start_index)
if arguments:
return DocstringSection(DocstringSectionKind.keyword_arguments, arguments), index
warn(docstring, index, f"Empty keyword arguments section at line {start_index}")
return None, index
| 5,343,009
|
def iter_from_pbf_buffer(buff: IO[bytes]) -> Iterator[dict]:
"""Yields all items inside a given OSM PBF buffer.
"""
parser = ParserPbf(buff)
yield from parser.parse()
| 5,343,010
|
def write_tables(output_dir):
"""
Write pipeline tables as csv files (in output directory) as specified by output_tables list
in settings file.
'output_tables' can specify either a list of output tables to include or to skip
if no output_tables list is specified, then no checkpointed tables will be written
To write all output tables EXCEPT the households and persons tables:
::
output_tables:
action: skip
tables:
- households
- persons
To write ONLY the households table:
::
output_tables:
action: include
tables:
- households
To write tables into a single HDF5 store instead of individual CSVs, use the h5_store flag:
::
output_tables:
h5_store: True
action: include
tables:
- households
Parameters
----------
output_dir: str
"""
output_tables_settings_name = 'output_tables'
output_tables_settings = setting(output_tables_settings_name)
if output_tables_settings is None:
logger.info("No output_tables specified in settings file. Nothing to write.")
return
action = output_tables_settings.get('action')
tables = output_tables_settings.get('tables')
prefix = output_tables_settings.get('prefix', 'final_')
h5_store = output_tables_settings.get('h5_store', False)
sort = output_tables_settings.get('sort', False)
checkpointed_tables = pipeline.checkpointed_tables()
if action == 'include':
output_tables_list = tables
elif action == 'skip':
output_tables_list = [t for t in checkpointed_tables if t not in tables]
else:
raise "expected %s action '%s' to be either 'include' or 'skip'" % \
(output_tables_settings_name, action)
for table_name in output_tables_list:
if table_name == 'checkpoints':
df = pipeline.get_checkpoints()
else:
if table_name not in checkpointed_tables:
logger.warning("Skipping '%s': Table not found." % table_name)
continue
df = pipeline.get_table(table_name)
if sort:
traceable_table_indexes = inject.get_injectable('traceable_table_indexes', {})
if df.index.name in traceable_table_indexes:
df = df.sort_index()
logger.debug(f"write_tables sorting {table_name} on index {df.index.name}")
else:
# find all registered columns we can use to sort this table
# (they are ordered appropriately in traceable_table_indexes)
sort_columns = [c for c in traceable_table_indexes if c in df.columns]
if len(sort_columns) > 0:
df = df.sort_values(by=sort_columns)
logger.debug(f"write_tables sorting {table_name} on columns {sort_columns}")
else:
logger.debug(f"write_tables sorting {table_name} on unrecognized index {df.index.name}")
df = df.sort_index()
if h5_store:
file_path = config.output_file_path('%soutput_tables.h5' % prefix)
df.to_hdf(file_path, key=table_name, mode='a', format='fixed')
else:
file_name = "%s%s.csv" % (prefix, table_name)
file_path = config.output_file_path(file_name)
# include the index if it has a name or is a MultiIndex
write_index = df.index.name is not None or isinstance(df.index, pd.MultiIndex)
df.to_csv(file_path, index=write_index)
| 5,343,011
|
def is_builtin(x, drop_callables=True):
"""Check if an object belongs to the Python standard library.
Parameters
----------
drop_callables: bool
If True, we won't consider callables (classes/functions) to be builtin.
Classes have class `type` and functions have class
`builtin_function_or_method`, both of which are builtins - however,
this is often not what we mean when we want to know if something is
built in. Note: knowing the class alone is not enough to determine if
the objects it creates are built-in; this may depend on the kwargs
passed to its constructor. This will NOT check if a class was defined
in the standard library.
Returns
-------
bool: True if the object is built-in. If the object is list-like, each
item will be checked as well the container. If the object is dict-like,
each key AND value will be checked (you can always pass in d.keys() or
d.values() for more limited checking). Again, the container itself will
be checked as well.
"""
def _builtin(x, drop_callables):
if callable(x) and drop_callables:
return False
return x.__class__.__module__ == 'builtins'
builtin = partial(_builtin, drop_callables=drop_callables)
# Check mapping before iterable because mappings are iterable.
if isinstance(x, Mapping):
return builtin(x) and all(builtin(o) for o in flatten(x.items()))
elif isinstance(x, Iterable):
return builtin(x) and all(builtin(o) for o in flatten(x))
return builtin(x)
| 5,343,012
|
def random_pair_selection(config_path,
data_size=100,
save_log="random_sents"):
"""
randomly choose from parallel data, and save to the save_logs
:param config_path:
:param data_size:
:param save_log:
:return: random selected pairs
"""
np.random.seed(32767)
with open(config_path.strip()) as f:
configs = yaml.load(f)
data_configs = configs["data_configs"]
with open(data_configs["train_data"][0], "r") as src, \
open(data_configs["train_data"][1], "r") as trg, \
open(save_log+".src", "w") as out_src, open(save_log+".trg", "w") as out_trg:
counter=0
return_src=[]
return_trg=[]
for sent_s, sent_t in zip(src,trg):
if np.random.uniform()<0.2 and counter<data_size:
counter += 1
out_src.write(sent_s)
out_trg.write(sent_t)
return_src+=[sent_s.strip()]
return_trg+=[sent_t.strip()]
return return_src, return_trg
| 5,343,013
|
def test_deployable():
"""
Check that 1) no untracked files are hanging out, 2) no staged but
uncommitted updates are outstanding, 3) no unstaged, uncommitted changes
are outstanding, 4) the most recent git tag matches HEAD, and 5) the most
recent git tag matches the current version.
"""
pytest.dbgfunc()
staged, changed, untracked = tbx.git_status()
assert untracked == [], "You have untracked files"
assert changed == [], "You have unstaged updates"
assert staged == [], "You have updates staged but not committed"
if tbx.git_current_branch() != 'master':
return True
last_tag = tbx.git_last_tag()
msg = "Version ({}) does not match tag ({})".format(version._v,
last_tag)
assert version._v == last_tag, msg
assert tbx.git_hash() == tbx.git_hash(last_tag), "Tag != HEAD"
| 5,343,014
|
def profile_nominal(pairs, **options):
"""Return stats for the nominal field
Arguments:
:param pairs: list with pairs (row, value)
:return: dictionary with stats
"""
result = OrderedDict()
values = [r[1] for r in pairs]
c = Counter(values)
result['top'], result['freq'] = c.most_common(1)[0]
categories = list(c)
categories.sort()
result['categories'] = categories
result['categories_num'] = len(categories)
return result
| 5,343,015
|
def class_definitions(cursor: Cursor) -> List[Cursor]:
"""
extracts all class definitions in the file pointed by cursor. (typical mocks.h)
Args:
cursor: cursor of parsing result of target source code by libclang
Returns:
a list of cursor, each pointing to a class definition.
"""
cursors = cursors_in_same_file(cursor)
class_cursors = []
for descendant in cursors:
# check if descendant is pointing to a class declaration block.
if descendant.kind != CursorKind.CLASS_DECL:
continue
if not descendant.is_definition():
continue
# check if this class is directly enclosed by a namespace.
if descendant.semantic_parent.kind != CursorKind.NAMESPACE:
continue
class_cursors.append(descendant)
return class_cursors
| 5,343,016
|
def visualize_dataset(dataset_directory=None, mesh_filename_path=None):
"""
This method loads the mesh file from dataset directory and helps us to visualize it
Parameters:
dataset_directory (str): root dataset directory
mesh_filename_path (str): mesh file name to process
Returns:
mesh (trimesh object)
"""
try:
if dataset_directory is not None and mesh_filename_path is not None:
mesh = trimesh.load(os.path.join(dataset_directory, mesh_filename_path))
return mesh
except Exception:
print('Caught Exception while reading meshfile', exc_info=True)
| 5,343,017
|
def service_list_by_category_view(request, category):
"""Shows services for a chosen category.
If url doesn't link to existing category, return user to categories list"""
template_name = 'services/service-list-by-category.html'
if request.method == "POST":
contact_form = ContactForm(request.POST)
if contact_form.is_valid():
contact_form.save()
return redirect(reverse('accounts:profile'))
else:
if request.user.is_authenticated:
initial_data = {
"user": request.user,
"name": request.user.first_name + " " + request.user.last_name,
"email": request.user.email
}
form = ContactForm(
request.POST or None, initial=initial_data)
else:
form = ContactForm()
try:
obj = ServiceCategory.objects.get(name=category)
queryset = Service.objects.filter(category=obj.pk)
context = {
"obj": obj,
"queryset": queryset,
"form": form,
}
except ServiceCategory.DoesNotExist:
messages.error(request, 'No category named <em>' + category + '</em>.')
return redirect("services:services_list")
return render(request, template_name=template_name, context=context)
| 5,343,018
|
def unmap_address(library, session):
"""Unmaps memory space previously mapped by map_address().
:param library: the visa library wrapped by ctypes.
:param session: Unique logical identifier to a session.
"""
library.viUnmapAddress(session)
| 5,343,019
|
def test_shorter_segments():
"""shorter segments than chunks, no tolerance"""
document = ("01234 "*4).split()
segments = list(segment_fuzzy(document, segment_size=4))
assert segments == [[['0', '1', '2', '3']],
[['4'], ['0', '1', '2']],
[['3', '4'], ['0', '1']],
[['2', '3', '4'], ['0']],
[['1', '2', '3', '4']]]
| 5,343,020
|
def check_columns(board: list):
"""
Check column-wise compliance of the board for uniqueness (buildings of unique height) and visibility (top-bottom and vice versa).
Same as for horizontal cases, but aggregated in one function for vertical case, i.e. columns.
>>> check_columns(['***21**', '412453*', '423145*', '*543215', '*35214*', '*41532*', '*2*1***'])
True
>>> check_columns(['***21**', '412453*', '423145*', '*543215', '*35214*', '*41232*', '*2*1***'])
False
>>> check_columns(['***21**', '412553*', '423145*', '*543215', '*35214*', '*41532*', '*2*1***'])
False
"""
transformed_board = []
l = len(board)
for idx1 in range(l):
line = []
for idx2 in range(l):
line.append(board[idx2][idx1])
line = ''.join(line)
transformed_board.append(line)
if not check_horizontal_visibility(transformed_board):
return False
return True
| 5,343,021
|
def get_git_tree(pkg, g, top_prd):
"""
:return:
"""
global pkg_tree
global pkg_id
global pkg_list
global pkg_matrix
pkg_tree = Tree()
pkg_id = 0
pkg_list = dict()
# pkg_list['root'] = []
if pkg == '':
return None
if pkg in Config.CACHED_GIT_REPOS:
pkg_content = Config.CACHED_GIT_REPOS[pkg]
print("^", end="", flush=True)
else:
pkg_content = get_gitpkg_content(pkg, g)
if pkg_content:
# first node in the tree
if pkg in pkg_matrix.keys():
if top_prd not in pkg_matrix[pkg]:
pkg_matrix[pkg].append(top_prd)
else:
pkg_matrix[pkg] = [top_prd]
pkg_content.key = str(pkg_id) + "." +pkg_content.name
pkg_content.component_id = top_prd.id
pkg_content.component_name = top_prd.name
# print(pkg_content.key, pkg_content.pkey, pkg_content.name, pkg_content.ups_table, ">>>>>>", end="", flush=True)
pkg_tree.create_node(pkg_content.key, pkg_content.key, data=pkg_content)
if pkg not in Config.CACHED_GIT_REPOS.keys():
Config.CACHED_GIT_REPOS[pkg] = pkg_content
print("+", end="", flush=True)
for child in pkg_content.ups_table:
walk_git_tree(child, g, pkg_content.key, top_prd)
else:
return {'tree': None, 'deps': None}
# print(pkg_tree)
return {'tree': pkg_tree, 'deps': pkg_list}
| 5,343,022
|
def get_requests_session():
"""Return an empty requests session, use the function to reuse HTTP connections"""
session = requests.session()
session.mount("http://", request_adapter)
session.mount("https://", request_adapter)
session.verify = bkauth_settings.REQUESTS_VERIFY
session.cert = bkauth_settings.REQUESTS_CERT
return session
| 5,343,023
|
def edit_role_description(rid, description, analyst):
"""
Edit the description of a role.
:param rid: The ObjectId of the role to alter.
:type rid: str
:param description: The new description for the Role.
:type description: str
:param analyst: The user making the change.
:type analyst: str
"""
description = description.strip()
Role.objects(id=rid,
name__ne=settings.ADMIN_ROLE).update_one(set__description=description)
return {'success': True}
| 5,343,024
|
def test_incidents_info_md_with_invalid_keys(mocker):
"""
Given:
- Incidents in campaign context contains some invalid keys (e.g. status),
When:
- Get value from incident (GetCampaignIncidentsInfo.get_incident_val)
Then:
- Validate invalid key not in the human readable
"""
# prepare
incident_with_invalid_status = MOCKED_INCIDENTS[4]
incident_without_status = MOCKED_INCIDENTS[0].copy()
incident_without_status.pop('status')
incidents = [incident_with_invalid_status, incident_without_status]
mocker.patch.object(demisto, 'results')
mocker.patch('GetCampaignIncidentsInfo.get_campaign_incidents_from_context', return_value=incidents)
mocker.patch('GetCampaignIncidentsInfo.update_incident_with_required_keys', return_value=incidents)
# run
main()
hr = demisto.results.call_args[0][0]['HumanReadable']
# validate
assert 'Status' not in hr
assert all(status not in hr for status in STATUS_DICT.values())
| 5,343,025
|
def have_questions(pair, config, info=None):
"""
Return True iff both images are annotated with questions.
"""
qas = info["qas"]
c1id = pair[0]
if qas[c1id]['qas'] == []:
return False
c2id = pair[1]
if qas[c2id]['qas'] == []:
return False
return True
| 5,343,026
|
def free_strings(
allocator, # type: Allocator
ranges, # type: List[Tuple]
data, # type: Union[bytes, bytearray, memoryview]
):
"""
Mark as free the space used by the strings delimited by `ranges`,
also detect and mark as free zero padding (alignment) between strings.
The `ranges` argument will be sorted in-place
"""
ranges.sort(key=lambda range: range[0])
for i, (start, end) in enumerate(ranges):
if i == 0:
free_stride_start = start
free_stride_end = end
else:
if (
free_stride_end <= start
and (start - free_stride_end) < 4
and set(data[free_stride_end:start]).issubset({0})
):
free_stride_end = end
else:
allocator.free(free_stride_start, free_stride_end)
free_stride_start = start
free_stride_end = end
allocator.free(free_stride_start, free_stride_end)
| 5,343,027
|
def get_dist_genomic(genomic_data,var_or_gene):
"""Get the distribution associated to genomic data for its characteristics
Parameters: genomic_data (dict): with UDN ID as key and list with dictionaries as value,
dict contaning characteristics of the considered genomic data
var_or_gene (str): "Var" if variants, "Gen" otherwise
Returns: gene_effects (collec.Counter): distribution of characteristics for selected genomic data
"""
gene_list=[]
for patient in genomic_data:
for i in range(len(genomic_data[patient])):
if var_or_gene=="Var":
if "effect" in list(genomic_data[patient][i].keys()) and "gene" in list(genomic_data[patient][i].keys()):
gene_list.append([genomic_data[patient][i]["gene"],genomic_data[patient][i]["effect"]])
else:
gene_list.append([genomic_data[patient][i]["gene"],"NA"])
elif var_or_gene=="Gen":
if "status" in list(genomic_data[patient][i].keys()) and "gene" in list(genomic_data[patient][i].keys()):
gene_list.append([genomic_data[patient][i]["gene"],genomic_data[patient][i]["status"]])
else:
gene_list.append([genomic_data[patient][i]["gene"],"NA"])
else:
print("var_or_gene must be Var or Gen")
gene_effects=collec.Counter(np.array(gene_list)[:,1])
return gene_effects
| 5,343,028
|
def get_gmb_dataset_train(max_sentence_len):
"""
Returns the train portion of the gmb data-set. See TRAIN_TEST_SPLIT param for split ratio.
:param max_sentence_len:
:return:
"""
tokenized_padded_tag2idx, tokenized_padded_sentences, sentences = get_gmb_dataset(max_sentence_len)
return tokenized_padded_tag2idx[:int(len(tokenized_padded_tag2idx)*TRAIN_TEST_SPLIT)], \
tokenized_padded_sentences[:int(len(tokenized_padded_sentences)*TRAIN_TEST_SPLIT)], \
sentences[:int(len(sentences)*TRAIN_TEST_SPLIT)]
| 5,343,029
|
def is_answer_reliable(location_id, land_usage, expansion):
"""
Before submitting to DB, we judge if an answer reliable and set the location done if:
1. The user passes the gold standard test
2. Another user passes the gold standard test, and submitted the same answer as it.
Parameters
----------
location_id : int
ID of the location.
land_usage : int
User's answer of judging if the land is a farm or has buildings.
(check the answer table in model.py for the meaning of the values)
expansion : int
User's answer of judging the construction is expanded.
(check the answer table in model.py for the meaning of the values)
Return
------
bool
Result of the checking.
True : Matches another good answer candiate.
False : No other good answer candidates exist or match.
"""
# If another user passed the gold standard quality test, and submitted an answer to the same location.
good_answer_candidates = Answer.query.filter_by(gold_standard_status=1, location_id=location_id, land_usage=land_usage, expansion=expansion).all()
# If the good answer candidate doesn't exist
if len(good_answer_candidates) == 0:
return False
else:
return True
| 5,343,030
|
def _polyfit_coeffs(spec,specerr,scatter,labelA,return_cov=False):
"""For a given scatter, return the best-fit coefficients"""
Y= spec/(specerr**2.+scatter**2.)
ATY= numpy.dot(labelA.T,Y)
CiA= labelA*numpy.tile(1./(specerr**2.+scatter**2.),(labelA.shape[1],1)).T
ATCiA= numpy.dot(labelA.T,CiA)
ATCiAinv= linalg.inv(ATCiA)
if return_cov:
return (numpy.dot(ATCiAinv,ATY),ATCiAinv)
else:
return numpy.dot(ATCiAinv,ATY)
| 5,343,031
|
async def test_device_security_gateway():
"""Test device class on a security gateway."""
mock_requests = MagicMock(return_value=Future())
mock_requests.return_value.set_result("")
devices = Devices([GATEWAY_USG3], mock_requests)
assert len(devices.values()) == 1
gateway = devices[GATEWAY_USG3["mac"]]
assert gateway.board_rev == 16
assert gateway.considered_lost_at == 1588175842
assert gateway.disabled is False
assert gateway.id == "235678987654345678"
assert gateway.ip == "1.2.3.4"
assert gateway.fan_level is None
assert gateway.has_fan is False
assert gateway.last_seen == 1588175740
assert gateway.mac == "78:8a:20:33:44:55"
assert gateway.model == "UGW3"
assert gateway.name == "USG"
assert gateway.next_heartbeat_at == 1588175774
assert gateway.overheating is False
assert gateway.port_overrides == []
assert gateway.port_table == GATEWAY_USG3["port_table"]
assert gateway.state == 1
assert gateway.sys_stats == {
"loadavg_1": "0.03",
"loadavg_15": "0.08",
"loadavg_5": "0.07",
"mem_buffer": 57561088,
"mem_total": 507412480,
"mem_used": 293453824,
}
assert gateway.type == "ugw"
assert gateway.version == "4.4.44.5213844"
assert gateway.upgradable is True
assert gateway.upgrade_to_firmware == "4.4.50.5272448"
assert gateway.uplink_depth is None
assert gateway.user_num_sta == 20
assert gateway.wlan_overrides == []
assert gateway.__repr__() == f"<Device {gateway.name}: {gateway.mac}>"
assert len(gateway.ports.values()) == 3
gateway_port_eth0 = gateway.ports["eth0"]
assert gateway_port_eth0.ifname == "eth0"
assert gateway_port_eth0.media is None
assert gateway_port_eth0.name == "wan"
assert gateway_port_eth0.port_idx is None
assert gateway_port_eth0.poe_class is None
assert gateway_port_eth0.poe_enable is None
assert gateway_port_eth0.poe_mode is None
assert gateway_port_eth0.poe_power is None
assert gateway_port_eth0.poe_voltage is None
assert gateway_port_eth0.portconf_id is None
assert gateway_port_eth0.port_poe is False
assert gateway_port_eth0.up is True
assert (
gateway_port_eth0.__repr__()
== f"<{gateway_port_eth0.name}: Poe {gateway_port_eth0.poe_enable}>"
)
gateway_port_eth1 = gateway.ports["eth1"]
assert gateway_port_eth1.ifname == "eth1"
assert gateway_port_eth1.media is None
assert gateway_port_eth1.name == "lan"
assert gateway_port_eth1.port_idx is None
assert gateway_port_eth1.poe_class is None
assert gateway_port_eth1.poe_enable is None
assert gateway_port_eth1.poe_mode is None
assert gateway_port_eth1.poe_power is None
assert gateway_port_eth1.poe_voltage is None
assert gateway_port_eth1.portconf_id is None
assert gateway_port_eth1.port_poe is False
assert gateway_port_eth1.up is True
assert (
gateway_port_eth1.__repr__()
== f"<{gateway_port_eth1.name}: Poe {gateway_port_eth1.poe_enable}>"
)
gateway_port_eth2 = gateway.ports["eth2"]
assert gateway_port_eth2.ifname == "eth2"
assert gateway_port_eth2.media is None
assert gateway_port_eth2.name == "lan2"
assert gateway_port_eth2.port_idx is None
assert gateway_port_eth2.poe_class is None
assert gateway_port_eth2.poe_enable is None
assert gateway_port_eth2.poe_mode is None
assert gateway_port_eth2.poe_power is None
assert gateway_port_eth2.poe_voltage is None
assert gateway_port_eth2.portconf_id is None
assert gateway_port_eth2.port_poe is False
assert gateway_port_eth2.up is False
assert (
gateway_port_eth2.__repr__()
== f"<{gateway_port_eth2.name}: Poe {gateway_port_eth2.poe_enable}>"
)
| 5,343,032
|
def generate_diagram(acc, draw_fname):
"""
:param acc: acc
:param draw_file_base: base of the diagram
:return: None
"""
data = pd.DataFrame(acc, columns=['Property Concept', 'Metric', 'Value'])
ax = sns.barplot(x="Value", y="Property Concept",
hue="Metric",
data=data, linewidth=1.0,
# palette="colorblind",
palette="Spectral",
# palette="pastel",
# palette="ch:start=.2,rot=-.3",
# palette="YlOrBr",
# palette="Paired",
# palette="Set2",
orient="h")
# ax.legend_.remove()
# ax.legend(bbox_to_anchor=(1.01, 1), borderaxespad=0)
ax.legend(bbox_to_anchor=(1.0, -0.1), borderaxespad=0)
# ax.set_xlim(0, 1.0)
# ax.set_ylim(0, 0.7)
# Horizontal
ticks = ax.get_yticks()
new_ticks = [t for t in ticks]
texts = ax.get_yticklabels()
# print(ax.get_yticklabels())
labels = [t.get_text() for t in texts]
ax.set_yticks(new_ticks)
ax.set_yticklabels(labels, fontsize=8)
ax.set(xlabel=None, ylabel=None)
# print(ax.get_yticklabels())
plt.setp(ax.lines, color='k')
ax.figure.savefig('%s.svg' % draw_fname, bbox_inches="tight")
ax.figure.clf()
| 5,343,033
|
def plot_electoral_position_based_summary_stats(
data: Optional[pd.DataFrame] = None,
) -> None:
"""
Input data should be the "flattened" dataset.
"""
# Load default data
if data is None:
data = flatten_access_eval_2021_dataset()
# Only work against the post data for summary stats as there was no difference
# pre and post (trial / contact)
data = data[data[DatasetFields.trial] == "B - Post"]
# Plot basic stats
plot_summary_stats(
data,
subset_name="election-position-split-",
keep_cols=[DatasetFields.electoral_position],
plot_kwargs={
"column": alt.Column(DatasetFields.electoral_position, spacing=40)
},
)
| 5,343,034
|
def update_player_shots(settings, game_stats, player, player_shots, shields, invaders, invader_shots):
"""Update position of player shots, explode shots that reach a certain height and then remove them."""
# Update shot position.
player_shots.update()
for shot in player_shots:
# Color shots above certain position and only color them once.
if not shot.is_red and shot.rect.bottom < 150:
color_surface(shot.image, settings.red)
shot.is_red = True
# Set shot to exploded if position is at top of screen.
if not shot.exploded and shot.rect.top < 97:
shot.explode(shot.rect.x - (settings.block_size * 3), shot.rect.y - (settings.block_size * 6))
for block in shot.explosion.image:
color_surface(block.image, settings.red)
current_time = pygame.time.get_ticks()
# Show explosion for a little bit and then remove it.
if shot.exploded and current_time - shot.explosion.timer > 300:
player_shots.remove(shot)
player.has_active_shot = False
check_shot_shield_collisions(settings, player_shots, shields)
check_shot_shot_collision(settings, player_shots, invader_shots)
check_shot_invader_collisions(settings, game_stats, player_shots, invaders, player)
| 5,343,035
|
def get_repo_dir():
"""Get repository root directory"""
root_dir = './'
if os.path.isdir(Path(__file__).parent.parent / 'src'):
root_dir = f"{str((Path(__file__).parent.parent).resolve())}/"
elif os.path.isdir('../src'):
root_dir = '../'
elif os.path.isdir('./src'):
root_dir = './'
else:
log.warning('ROOT FOLDER NOT FOUND.')
return root_dir
| 5,343,036
|
def colorize(data, colors, display_ranges):
"""Example:
colors = 'white', (0, 1, 0), 'red', 'magenta', 'cyan'
display_ranges = np.array([
[100, 3000],
[700, 5000],
[600, 3000],
[600, 4000],
[600, 3000],
])
rgb = fig4.colorize(data, colors, display_ranges)
plt.imshow(rgb)
"""
color_map = np.array([to_rgba(c)[:3] for c in colors])
dr = display_ranges[..., None, None]
normed = (data - dr[:, 0]) / (dr[:, 1] - dr[:, 0] )
# there's probably a nicer way to do this
rgb = (color_map.T[..., None, None] * normed[None, ...]).sum(axis=1)
return rgb.clip(min=0, max=1).transpose([1, 2, 0])
| 5,343,037
|
def multi_mdf(S, all_drGs, constraints, ratio_constraints=None, net_rxns=[],
all_directions=False, x_max=0.01, x_min=0.000001,
T=298.15, R=8.31e-3):
"""Run MDF optimization for all condition combinations
ARGUMENTS
S : pandas.DataFrame
Pandas DataFrame that corresponds to the stoichiometric matrix. Column
names are reaction IDs and row indices are compound names.
all_drGs : pandas.DataFrame
Pandas DataFrame with reaction IDs in the first column, condition
identifier strings in the intermediate columns, and reaction standard
Gibbs energies in float format in the last column.
constraints : pandas.DataFrame
Pandas DataFrame with a compound ID column (string), a lower
concentration bound column (float) and an upper concentration bound
colunn (float).
ratio_constraints : pandas.DataFrame, optional
Pandas DataFrame with two compound ID columns (string), a lower limit
concentration ratio column (float), an upper limit concentration ratio
column (float) and the concentration ratio range step number (int). The
third column is interpreted as the fixed ratio when the fourth column
contains a None value. The last column indicates the type of spacing to
use for ratio ranges (linear or logarithmic).
net_rxns : list of strings
List with strings referring to the background network reactions for
network-embedded MDF analysis (NEM). The reactions should be in S.
all_directions : bool, optional
Set to True to calculate MDF for all possible reaction direction
combinations. Not recommended for sets of reactions >20.
x_max : float
Maximum default metabolite concentration (M).
x_min : float
Minimum default metabolite concentration (M).
T : float
Temperature (K).
R : float
Universal gas constant (kJ/(mol*K)).
RETURNS
mdf_table : pandas.DataFrame
A Pandas DataFrame containing all MDF results for a single pathway. Each
row corresponds to one individual MDF optimization, with the parameters
described in the columns:
v0 ... : string
Condition identifiers as supplied in all_drGs.
drG_std(rxn_id) : float
The standard reaction Gibbs energy for the reaction 'rxn_id'.
[cpd_id_num]/[cpd_id_den] ... : float
Ratio of concentration between compounds 'cpd_id_num' and
'cpd_id_den'.
dir(rxn_id) ... : int
The direction used for the reaction 'rxn_id'. The order is the same
as the columns in S.
[cpd_id] ... : float
Optimized concentration for compound 'cpd_id' (M).
drG_opt(rxn_id) : float
The optimized reaction Gibbs energy for reaction 'rxn_id' (kJ/mol).
success : int
Indicates optimization success (1) or failure (0).
MDF : float
The Max-min Driving Force determined through linear optimization
(kJ/mol).
"""
# All drGs
# -> All ratio combinations
# -> All directions
# Number of reactions
n_rxn = S.shape[1]
# List the condition identifiers
conditions = list(all_drGs.columns[1:-1])
# Create column labels for output DataFrame
if ratio_constraints is not None:
ratio_labels = [
'ratio_' + ratio_constraints.iloc[row,:]['cpd_id_num'] + \
'_' + ratio_constraints.iloc[row,:]['cpd_id_den'] \
for row in range(ratio_constraints.shape[0])
]
else:
ratio_labels = []
column_labels = [
*conditions,
*['drGstd_' + rxn_id for rxn_id in list(S.columns)],
*ratio_labels,
*['dir_' + rxn_id for rxn_id in list(S.columns)],
*['c_' + cpd_id for cpd_id in list(S.index)],
*['drGopt_' + rxn_id for rxn_id in list(S.columns)],
'success',
'MDF'
]
# Also create labels for sorting (conditions, ratios and directions)
sort_labels = [
*conditions,
*ratio_labels,
*['dir_' + rxn_id for rxn_id in list(S.columns)]
]
# Iterator preparation
def prep_iter():
# Set up conditions iterator
if len(conditions):
cond_iter = all_drGs[conditions].drop_duplicates().iterrows()
else:
cond_iter = [None]
# Set up directions iterator
if not all_directions:
dir_iter = [[1.0]*n_rxn]
else:
dir_iter = itertools.product([1.0,-1.0], repeat=n_rxn)
# Set up ratios iterator
if ratio_constraints is not None:
rats_iter = ratio_iter(ratio_constraints)
else:
rats_iter = [None]
# Set up fixed concentration range constraints iterator
cons_iter = con_iter(constraints)
return itertools.product(cond_iter, dir_iter, rats_iter, cons_iter)
# Set up output DataFrame
mdf_table = pd.DataFrame(columns = column_labels)
# Determine number of rows that will be produced
M = 0
for i in prep_iter():
M += 1
# Iterate over all combinations of conditions, directions and ratios
n = 0
for params in prep_iter():
n += 1
progress = float(n / M * 100)
sWrite("\rPerforming MDF optimization... %0.1f%%" % progress)
# Extract specific condition, direction and ratio constraints
if params[0] is not None:
condition = pd.DataFrame(params[0][1]).T
else:
condition = None
direction = params[1]
rats = params[2]
constraints_mod = params[3]
# Obtain specific standard reaction Gibbs energies with correct sign
if condition is not None:
drGs = pd.merge(condition, all_drGs)
else:
drGs = all_drGs
drGs.is_copy = False
drGs.loc[:,['drG']] = drGs['drG'] * direction
# Modify direction (sign) of reactions in the stoichiometric matrix
S_mod = S * direction
# Set up MDF inputs
c = mdf_c(S_mod)
A = mdf_A(S_mod, net_rxns)
b = mdf_b(S_mod, drGs, constraints_mod, x_max, x_min, T, R)
# Use equality (ratio) constraints if they were specified
if rats is not None:
A_eq = mdf_A_eq(S_mod, rats)
b_eq = mdf_b_eq(rats)
# If the ratio constraints have been filtered out, set to None
if not A_eq.size or not b_eq.size:
A_eq = None
b_eq = None
else:
A_eq = None
b_eq = None
# Perform MDF
mdf_result = mdf(c, A, b, A_eq, b_eq)
# Prepare conditions list
if condition is not None:
conditions_list = list(condition.iloc[0,:])
else:
conditions_list = []
# Prepare ratios list
if rats is not None:
rats_list = list(rats.ratio)
else:
rats_list = []
# Format results row
mdf_row = [
*conditions_list,
*[float(drGs[drGs.rxn_id == rxn_id]['drG']) for rxn_id in S_mod.columns],
*rats_list,
*direction,
]
if mdf_result.success:
mdf_row.extend([
*np.exp(mdf_result.x[:-1]), # Concentrations
*calc_drGs(S_mod, drGs, mdf_result.x[:-1]), # Reaction Gibbs energies
1.0, # Success
mdf_result.x[-1]*R*T # MDF value
])
else:
mdf_row.extend([
*[np.nan]*S_mod.shape[0], # Concentrations
*[np.nan]*S_mod.shape[1], # Reaction Gibbs energies
0.0, # Failure
np.nan # No MDF value
])
# Append row to expected result
mdf_table = mdf_table.append(pd.DataFrame([mdf_row], columns = column_labels))
return mdf_table.sort_values(sort_labels)
| 5,343,038
|
def cache_data(datatable, data, **kwargs):
"""Stores the object list in the cache under the appropriate key."""
cache_key = "%s%s" % (CACHE_PREFIX, datatable.get_cache_key(**kwargs))
log.debug("Setting data to cache at %r: %r", cache_key, data)
cache.set(cache_key, data)
| 5,343,039
|
def rank_zero_warn(*args, **kwargs) -> None:
"""Warning only if (sub)process has rank 0."""
global _proc_rank
if _proc_rank == 0:
warnings.warn(*args, **kwargs)
| 5,343,040
|
def test_loop_dist_matrix(X_n120) -> None:
"""
Tests to ensure the proper results are returned when supplying the
appropriate format distance and neighbor matrices.
:param X_n120: A pytest Fixture that generates 120 observations.
:return: None
"""
# generate distance and neighbor indices
neigh = NearestNeighbors(metric='euclidean')
neigh.fit(X_n120)
d, idx = neigh.kneighbors(X_n120, n_neighbors=10, return_distance=True)
# fit loop using data and distance matrix
clf1 = loop.LocalOutlierProbability(X_n120, use_numba=NUMBA)
clf2 = loop.LocalOutlierProbability(distance_matrix=d, neighbor_matrix=idx,
use_numba=NUMBA)
scores1 = clf1.fit().local_outlier_probabilities
scores2 = clf2.fit().local_outlier_probabilities
# compare the agreement between the results
assert_almost_equal(scores1, scores2, decimal=1)
| 5,343,041
|
def nufft_adjoint(input, coord, oshape=None, oversamp=1.25, width=4.0, n=128):
"""Adjoint non-uniform Fast Fourier Transform.
Args:
input (array): Input Fourier domain array.
coord (array): coordinate array of shape (..., ndim).
ndim determines the number of dimension to apply nufft adjoint.
oshape (tuple of ints): output shape.
oversamp (float): oversampling factor.
width (float): interpolation kernel full-width in terms of oversampled grid.
n (int): number of sampling points of interpolation kernel.
Returns:
array: Transformed array.
See Also:
:func:`sigpy.nufft.nufft`
"""
device = backend.get_device(input)
xp = device.xp
ndim = coord.shape[-1]
beta = np.pi * (((width / oversamp) * (oversamp - 0.5))**2 - 0.8)**0.5
if oshape is None:
oshape = list(input.shape[:-coord.ndim + 1]) + estimate_shape(coord)
else:
oshape = list(oshape)
with device:
coord = _scale_coord(backend.to_device(coord, device), oshape, oversamp)
kernel = backend.to_device(
_kb(np.arange(n, dtype=coord.dtype) / n, width, beta, coord.dtype), device)
os_shape = oshape[:-ndim] + [_get_ugly_number(oversamp * i) for i in oshape[-ndim:]]
output = interp.gridding(input, os_shape, width, kernel, coord)
for a in range(-ndim, 0):
i = oshape[a]
os_i = os_shape[a]
idx = xp.arange(i, dtype=input.dtype)
os_shape[a] = i
# Swap axes
output = output.swapaxes(a, -1)
os_shape[a], os_shape[-1] = os_shape[-1], os_shape[a]
# Oversampled IFFT
output = ifft(output, axes=[-1], norm=None)
output *= os_i / i**0.5
output = util.resize(output, os_shape)
# Calculate apodization
apod = (beta**2 - (np.pi * width * (idx - i // 2) / os_i)**2)**0.5
apod /= xp.sinh(apod)
# Apodize
output *= apod
# Swap back
output = output.swapaxes(a, -1)
os_shape[a], os_shape[-1] = os_shape[-1], os_shape[a]
return output
| 5,343,042
|
def get_bustools_version():
"""Get the provided Bustools version.
This function parses the help text by executing the included Bustools binary.
:return: tuple of major, minor, patch versions
:rtype: tuple
"""
p = run_executable([get_bustools_binary_path()], quiet=True, returncode=1)
match = VERSION_PARSER.match(p.stdout.read())
return tuple(int(ver) for ver in match.groups()) if match else None
| 5,343,043
|
def main():
"""
Args: none
Returns: exit code
Usage: python -m rununittest
"""
# Can use unittest or nose; nose here, which allows --with-coverage.
import nose
return nose.run(argv=[sys.argv[0], "-s", "--with-coverage", "rununittest"])
| 5,343,044
|
def request_from_url(url):
"""Parses a gopher URL and returns the corresponding Request instance."""
pu = urlparse(url, scheme='gopher', allow_fragments=False)
t = '1'
s = ''
if len(pu.path) > 2:
t = pu.path[1]
s = pu.path[2:]
if len(pu.query) > 0:
s = s + '?' + pu.query
p = '70'
if pu.port:
p = str(pu.port)
return Request(t, pu.hostname, p, s)
| 5,343,045
|
def getJsonPath(name, moduleFile):
"""
获取JSON配置文件的路径:
1. 优先从当前工作目录查找JSON文件
2. 若无法找到则前往模块所在目录查找
"""
currentFolder = os.getcwd()
currentJsonPath = os.path.join(currentFolder, name)
if os.path.isfile(currentJsonPath):
return currentJsonPath
else:
moduleFolder = os.path.abspath(os.path.dirname(moduleFile))
moduleJsonPath = os.path.join(moduleFolder, '.', name)
return moduleJsonPath
| 5,343,046
|
def enable_pause_data_button(n, interval_disabled):
"""
Enable the play button when data has been loaded and data *is* currently streaming
"""
if n and n[0] < 1: return True
if interval_disabled:
return True
return False
| 5,343,047
|
def convertFiles(directory: str):
"""Convert files downloaded from Solar Atlas."""
files_list = glob.glob(os.path.join(directory, "*.tif"))
if len(files_list) > 0:
logging.info("Converting files")
if not os.path.exists(
os.path.join(os.path.dirname(directory), "converted_files")
):
os.mkdir(os.path.join(os.path.dirname(directory), "converted_files"))
for source_file in files_list:
logging.info(source_file)
dest_file = os.path.join(
os.path.dirname(directory),
"converted_files",
os.path.basename(source_file),
)
os.system( # nosec
"gdalwarp {source_file} {dest_file} -of GTIFF -s_srs EPSG:4326 -t_srs {outputSRS} --config GDAL_PAM_ENABLED NO -co COMPRESS=DEFLATE -co BIGTIFF=YES".format(
source_file=source_file, dest_file=dest_file, outputSRS=SRS
)
)
else:
logging.info("There are no tif files to extract")
| 5,343,048
|
def dProj(z, dist, input_unit='deg', unit='Mpc'):
"""
Projected distance, physical or angular, depending on the input units (if
input_unit is physical, returns angular, and vice-versa).
The units can be 'cm', 'ly' or 'Mpc' (default units='Mpc').
"""
if input_unit in ('deg', 'arcmin', 'arcsec'):
Da = dA(z, unit=unit)
else:
Da = dA(z, unit=input_unit)
# from angular to physical
if input_unit == 'deg':
dist = Da * scipy.pi * dist / 180
elif input_unit == 'arcmin':
dist = Da * scipy.pi * dist / (180 * 60)
elif input_unit == 'arcsec':
dist = Da * scipy.pi * dist / (180 * 3600)
# from physical to angular
if unit == 'deg':
dist = dist * 180 / (scipy.pi * Da)
elif unit == 'arcmin':
dist = dist * 180 * 60 / (scipy.pi * Da)
elif unit == 'arcsec':
dist = dist * 180 * 3600 / (scipy.pi * Da)
return dist
| 5,343,049
|
def savgoldiff(x, dt, params=None, options={}, dxdt_truth=None, tvgamma=1e-2, padding='auto',
optimization_method='Nelder-Mead', optimization_options={'maxiter': 10}, metric='rmse'):
"""
Optimize the parameters for pynumdiff.linear_model.savgoldiff
See pynumdiff.optimize.__optimize__ and pynumdiff.linear_model.savgoldiff for detailed documentation.
"""
# initial condition
if params is None:
orders = [2, 3, 5, 7, 9, 11, 13]
window_sizes = [3, 10, 30, 50, 90, 130, 200, 300]
smoothing_wins = [3, 10, 30, 50, 90, 130, 200, 300]
params = []
for order in orders:
for window_size in window_sizes:
for smoothing_win in smoothing_wins:
params.append([order, window_size, smoothing_win])
# param types and bounds
params_types = [int, int, int]
params_low = [1, 3, 3]
params_high = [12, 1e3, 1e3]
# optimize
func = pynumdiff.linear_model.savgoldiff
args = [func, x, dt, params_types, params_low, params_high, options, dxdt_truth, tvgamma, padding, metric]
opt_params, opt_val = __optimize__(params, args, optimization_method=optimization_method,
optimization_options=optimization_options)
return opt_params, opt_val
| 5,343,050
|
def credentials(scope="module"):
"""
Note that these credentials match those mentioned in test.htpasswd
"""
h = Headers()
h.add('Authorization',
'Basic ' + base64.b64encode("username:password"))
return h
| 5,343,051
|
def assign_oe_conformers(oe_mol: "OEMol", conformers: List[unit.Quantity]):
"""Assign a set of conformers to an OE molecule, overwriting any existing
ones."""
from openeye import oechem
oe_mol.DeleteConfs()
assert len(conformers) > 0, "at least one conformer must be provided/"
for conformer in conformers:
oe_mol.NewConf(
oechem.OEFloatArray(conformer.value_in_unit(unit.angstrom).flatten())
)
| 5,343,052
|
def _score_match(matchinfo: bytes, form, query) -> float:
""" Score how well the matches form matches the query
0.5: half of the terms match (using normalized forms)
1: all terms match (using normalized forms)
2: all terms are identical
3: all terms are identical, including case
"""
try:
if form == query:
return 3
if form.lower() == query.lower():
return 2
# Decode matchinfo blob according to https://www.sqlite.org/fts3.html#matchinfo
offset = 0
num_cols = int.from_bytes(matchinfo[offset : offset + 4], sys.byteorder)
offset += 4
tokens = int.from_bytes(matchinfo[offset : offset + 4], sys.byteorder)
offset += num_cols * 4
matched_tokens = int.from_bytes(matchinfo[offset : offset + 4], sys.byteorder)
# print(matchinfo, form, query, matched_tokens, tokens)
return matched_tokens / tokens
except Exception as e:
print(e)
raise
| 5,343,053
|
def _set_bias(clf, X, Y, recall, fpos, tneg):
"""Choose a bias for a classifier such that the classification
rule
clf.decision_function(X) - bias >= 0
has a recall of at least `recall`, and (if possible) a false positive rate
of at most `fpos`
Paramters
---------
clf : Classifier
classifier to use
X : array-like [M-examples x N-dimension]
feature vectors
Y : array [M-exmaples]
Binary classification
recall : float
Minimum fractional recall
fpos : float
Desired Maximum fractional false positive rate
tneg : int
Total number of negative examples (including previously-filtered
examples)
"""
df = clf.decision_function(X).ravel()
r = _recall_bias(df[Y == 1], recall)
f = _fpos_bias(df[Y == 1], fpos, tneg)
return min(r, f)
| 5,343,054
|
def get_additional_params(model_klass: Type['Model']) -> Dict[str, Any]:
"""
By default, we dont need additional params to FB API requests. But in some instances (i.e. fetching Comments),
adding parameters makes fetching data simpler
"""
assert issubclass(model_klass, abstractcrudobject.AbstractCrudObject)
return _default_additional_params.get(model_klass, {})
| 5,343,055
|
def thread_loop(run):
"""decorator to make the function run in a loop if it is a thread"""
def fct(self, *args, **kwargs):
if self.use_thread:
while True:
run(*args, **kwargs)
else:
run(*args, **kwargs)
return fct
| 5,343,056
|
def rand_index(pred_cluster: Dict, target_cluster: Dict) -> float:
"""Use contingency_table to get RI directly
RI = Accuracy = (TP+TN)/(TP,TN,FP,FN)
Args:
pred_cluster: Dict element:cluster_id (cluster_id from 0 to max_size)| predicted clusters
target_cluster: Dict element:cluster_id (cluster_id from 0 to max_size) | target clusters
Return:
RI (float)
"""
pred_cluster_ = helper_trans_to_element2clusterid(pred_cluster)
target_cluster_ = helper_trans_to_element2clusterid(target_cluster)
pred_cluster_size = len(pred_cluster_)
target_cluster_size = len(target_cluster_)
contingency_table = np.zeros((pred_cluster_size,target_cluster_size))
for i, p_cluster in enumerate(pred_cluster_):
for j, t_cluster in enumerate(target_cluster_):
#find common element
l = [*p_cluster,*t_cluster]
contingency_table[i][j] = len(l) - len(set(l))
s = comb(np.sum(contingency_table), 2)
a = 0
for i in np.nditer(contingency_table):
a += comb(i,2)
return a/s
| 5,343,057
|
def retrieve(lims, csv, acc):
"""Saves matching account to csv"""
account = getAccount(lims,acc)
print ("Writing results to {0}".format(csv))
# unicode(, "utf8")
with open(csv, 'w') as csv_file:
csv_file.write( getAccountCSV(account) )
| 5,343,058
|
def _strToDateTimeAndStamp(incoming_v, timezone_required=False):
"""Test (and convert) datetime and date timestamp values.
@param incoming_v: the literal string defined as the date and time
@param timezone_required: whether the timezone is required (ie, for date timestamp) or not
@return datetime
@rtype: datetime.datetime
@raise ValueError: invalid datetime or date timestamp
"""
# First, handle the timezone portion, if there is any
(v, tzone) = _returnTimeZone(incoming_v)
# Check on the timezone. For time date stamp object it is required
if timezone_required and tzone is None:
raise ValueError("Invalid datetime %s" % incoming_v)
# The microseconds should be handled here...
final_v = v
milliseconds = 0
milpattern = "(.*)(\.)([0-9]*)"
match = re.match(milpattern, v)
if match is not None:
# we have a millisecond portion...
try:
final_v = match.groups()[0]
milliseconds = int(match.groups()[2])
except:
raise ValueError("Invalid datetime %s" % incoming_v)
#
# By now, the pattern should be clear
# This may raise an exception...
try:
tstr = time.strptime(final_v, "%Y-%m-%dT%H:%M:%S")
if tzone is not None:
return datetime.datetime(tstr.tm_year, tstr.tm_mon, tstr.tm_mday, tstr.tm_hour, tstr.tm_min, tstr.tm_sec,
milliseconds, tzone)
else:
return datetime.datetime(tstr.tm_year, tstr.tm_mon, tstr.tm_mday, tstr.tm_hour, tstr.tm_min, tstr.tm_sec,
milliseconds)
except:
raise ValueError("Invalid datetime %s" % incoming_v)
| 5,343,059
|
def get_price_for_market_stateless(result):
"""Returns the price for the symbols that the API doesnt follow the market state (ETF, Index)"""
## It seems that for ETF symbols it uses REGULAR market fields
return {
"current": result['regularMarketPrice']['fmt'],
"previous": result['regularMarketPreviousClose']['fmt'],
"change": result['regularMarketChange']['fmt'],
"percent": result['regularMarketChangePercent']['fmt']
}
| 5,343,060
|
def make_sure_not_modified(arg):
""" Function checking whether annotation of SomeList is never resized
and never modified, useful for debugging. Does nothing when run directly
"""
return arg
| 5,343,061
|
def jump(current_command):
"""Return Jump Mnemonic of current C-Command"""
#jump exists after ; if ; in string. Always the last part of the command
if ";" in current_command:
command_list = current_command.split(";")
return command_list[-1]
else:
return ""
| 5,343,062
|
def get_veterans(uname=None):
"""
@purpose: Runs SQL commands to querey the database for information on veterans.
@args: The username of the veteran. None if the username is not provided.
@returns: A list with one or more veterans.
"""
vet = None
if uname:
command = "SELECT * FROM veterans WHERE username = '{}' ".format(uname)
else:
command = "SELECT * FROM veterans"
with sql.connect(DATABASE) as con:
cur = con.cursor()
cur.execute(command)
if uname:
vet = cur.fetchone()
else:
vet = cur.fetchall()
cur.close()
if vet is not None and len(vet) > 10:
return vet[0:10]
else:
return vet
| 5,343,063
|
def install():
"""
Starts the coverage tool if the coverage dir directive is set.
"""
if os.environ.get('COVERAGE_DIR', None):
_install()
| 5,343,064
|
def main():
"""
Dump info about a given font file.
"""
# pylint: disable=too-many-locals
parser = argparse.ArgumentParser()
parser.add_argument('fontfile', help='Path to font file to inspect.')
args = parser.parse_args()
print(args.fontfile)
fname, _, chars = query_font(args.fontfile)
print(fname)
mincode = min(c.code for c in chars)
maxcode = max(c.code for c in chars)
print('{} chars, codes from [0x{:04x}]-[0x{:04x}]'.format(len(chars), mincode, maxcode))
blockcounts = defaultdict(int)
catcounts = defaultdict(int)
for c in chars:
blockcounts[c.block] += 1
catcounts[c.category] += 1
print('Category counts:')
for catid, catdesc in CATEGORIES.items():
cct = catcounts[catid]
if cct > 0:
print('\t{}: {}'.format(catdesc, cct))
print('Block counts:')
for b in BLOCKS:
k = b[2]
bct = blockcounts[k]
if bct > 0:
print('\t{}: {}'.format(k, bct))
| 5,343,065
|
def multiline(xs, ys, c=None, ax=None, **kwargs):
"""
Plot lines with different colorings
Adapted from: https://stackoverflow.com/a/50029441/2565317
Parameters
----------
xs : iterable container of x coordinates
ys : iterable container of y coordinates
c : iterable container of numbers mapped to colormap
ax (optional): Axes to plot on.
kwargs (optional): passed to LineCollection
EXAMPLE:
xs = [[0, 1],
[0, 1, 2]]
ys = [[0, 0],
[1, 2, 1]]
c = [0, 1]
lc = multiline(xs, ys, c, cmap='bwr', lw=2)
Notes:
len(xs) == len(ys) == len(c) is the number of line segments
len(xs[i]) == len(ys[i]) is the number of points for each line (indexed by i)
Returns
-------
lc : LineCollection instance.
"""
from matplotlib.collections import LineCollection
# find axes
ax = plt.gca() if ax is None else ax
n = len(xs)
if c is None:
c = np.linspace(0, 1, n)
# create LineCollection
segments = [np.column_stack([x, y]) for x, y in zip(xs, ys)]
lc = LineCollection(segments, **kwargs)
# set coloring of line segments
# Note: I get an error if I pass c as a list here... not sure why.
lc.set_array(np.asarray(c))
# add lines to axes and rescale
# Note: adding a collection doesn't autoscalee xlim/ylim
ax.add_collection(lc)
ax.autoscale()
return lc
| 5,343,066
|
def main():
"""PARSE AND VALIDATE INTEGRATION PARAMS."""
command = demisto.command()
params = demisto.params()
verify_certificate = not params.get('insecure', False)
proxy = params.get('proxy', False)
api_key = params.get('apikey', {})
demisto.info(f'Command being called is {command}')
client = Client(
base_url=BASE_URL,
verify=verify_certificate,
proxy=proxy,
ok_codes=[200],
auth=requests.auth.HTTPBasicAuth(api_key, '')
)
try:
'''EXECUTION CODE'''
if demisto.command() == 'test-module':
# This is the call made when pressing the integration Test button.
return_results(test_module(client, params))
elif demisto.command() == 'fetch-incidents':
last_run = demisto.getLastRun()
last_run_curr, events = fetch_incidents(client, last_run, params)
demisto.setLastRun(last_run_curr)
demisto.incidents(events)
else:
COMMAND_TO_FUNCTION = {
'bitsight-company-details-get': company_details_get_command,
"bitsight-company-findings-get": company_findings_get_command,
"bitsight-companies-guid-get": companies_guid_get_command,
}
if COMMAND_TO_FUNCTION.get(demisto.command()):
args = demisto.args()
remove_nulls_from_dictionary(trim_spaces_from_args(args))
return_results(COMMAND_TO_FUNCTION[demisto.command()](client, args)) # type: ignore
else:
raise NotImplementedError(f'Command {demisto.command()} is not implemented')
# Log exceptions
except Exception as e:
return_error(f'Failed to execute {demisto.command()} command.\nError:\n{e}')
| 5,343,067
|
def mock_movement_handler() -> AsyncMock:
"""Get an asynchronous mock in the shape of an MovementHandler."""
return AsyncMock(spec=MovementHandler)
| 5,343,068
|
def compute( op , x , y ):
"""Compute the value of expression 'x op y', where -x and y
are two integers and op is an operator in '+','-','*','/'"""
if (op=='+'):
return x+y
elif op=='-':
return x-y
elif op=='*':
return x*y
elif op=='/':
return x/y
else:
return 0
| 5,343,069
|
def _add_registration_to_section(reg_json, section):
"""
Add the Registration object to section.
"""
registration = Registration(data=reg_json)
section.registration = registration
section.grade_date = registration.grade_date
section.student_grade = registration.grade
section.is_auditor = registration.is_auditor
if section.is_source_eos() or section.is_source_sdb_eos():
# the student's actual enrollment
if registration.start_date is not None:
section.start_date = registration.start_date
if registration.end_date is not None:
section.end_date = registration.end_date
try:
section.student_credits = Decimal(registration.credits)
except InvalidOperation:
pass
| 5,343,070
|
def ensure_test_env(outpath=None):
"""Make an empty .env file for testing purposes."""
if outpath is None:
outpath = conf.PROJECT_ROOT / '.env'
else:
outpath = path(outpath)
if not outpath.exists():
# It's fine if it's empty, just make sure it exists
local('touch %s' % outpath)
| 5,343,071
|
def list_arg(raw_value):
"""argparse type for a list of strings"""
return str(raw_value).split(',')
| 5,343,072
|
def create_tracking(slug, tracking_number):
"""Create tracking, return tracking ID
"""
tracking = {'slug': slug, 'tracking_number': tracking_number}
result = aftership.tracking.create_tracking(tracking=tracking, timeout=10)
return result['tracking']['id']
| 5,343,073
|
def get_indentation(line_):
"""
returns the number of preceding spaces
"""
return len(line_) - len(line_.lstrip())
| 5,343,074
|
def main():
"""
Find the 10001th prime main method.
:param n: integer n
:return: 10001th prime
"""
primes = {2, }
for x in count(3, 2):
if prime(x):
primes.add(x)
if len(primes) >= 10001:
break
return sorted(primes)[-1]
| 5,343,075
|
def findDataById(objectids, level=None, version=None):
"""Return xml list of urls for each objectid."""
if sciflo.utils.isXml(objectids):
et, xmlNs = sciflo.utils.getXmlEtree(objectids)
objectids = et.xpath('.//_default:objectid/text()', xmlNs)
infoLoL = []
headerLoL = ['objectid', ['urls', 'url']]
if len(objectids) == 0: return sciflo.utils.list2Xml(infoLoL, headerLoL, 'resultSet', 'result')
datasetDict = {}
for regex in OBJECTIDREGEX_TO_DATASET_MAP.keys():
datasetDict[OBJECTIDREGEX_TO_DATASET_MAP[regex]] = []
for objectid in objectids:
found = False
for regex in OBJECTIDREGEX_TO_DATASET_MAP.keys():
if re.search(regex, objectid):
datasetDict[OBJECTIDREGEX_TO_DATASET_MAP[regex]].append(objectid)
found = True
break
if not found:
raise RuntimeError("Failed to match objectid %s to a dataset." % objectid)
datasetsToDo = [dataset for dataset in datasetDict.keys() if len(datasetDict[dataset]) > 0]
if len(datasetsToDo) > 1:
raise NotImplementedError("Multiple dataset handling not yet implemented.")
getDataByUrlFunc = eval(DATASET_TO_FUNC_MAP[datasetsToDo[0]])
urlDict = getDataByUrlFunc(datasetDict[datasetsToDo[0]], level=level, version=version)
objids = datasetDict[datasetsToDo[0]]
for objid in objectids:
urls = urlDict.get(objid, [])
infoLoL.append([objid, urls])
return sciflo.utils.list2Xml(infoLoL, headerLoL, 'resultSet', 'result')
| 5,343,076
|
def node_parameter_parser(s):
"""Expects arguments as (address,range,probability)"""
try:
vals = s.split(",")
address = int(vals[0])
range = float(vals[1])
probability = float(vals[2])
return address, range, probability
except:
raise argparse.ArgumentTypeError("Node parameters must be address,range,probability")
| 5,343,077
|
def launch(context, service_id, catalog_packages=""):
""" Initialize the module. """
return EnvManager(context=context, service_id=service_id,
catalog_packages=catalog_packages)
| 5,343,078
|
def handle_verification_token(request, token) -> [404, redirect]:
"""
This is just a reimplementation of what was used previously with OTC
https://github.com/EuroPython/epcon/pull/809/files
"""
token = get_object_or_404(Token, token=token)
logout(request)
user = token.user
user.is_active = True
user.save()
user = authenticate(uid=user.id)
login(request, user)
token.delete()
messages.success(request, 'Email verfication complete')
return redirect('user_panel:dashboard')
| 5,343,079
|
def multi_perspective_expand_for_2d(in_tensor, weights):
"""Given a 2d input tensor and weights of the appropriate shape,
weight the input tensor by the weights by multiplying them
together.
"""
# Shape: (num_sentence_words, 1, rnn_hidden_dim)
in_tensor_expanded = tf.expand_dims(in_tensor, axis=1)
# Shape: (1, multi_perspective_dims, rnn_hidden_dim)
weights_expanded = tf.expand_dims(weights, axis=0)
# Shape: (num_sentence_words, multi_perspective_dims, rnn_hidden_dim)
return tf.multiply(in_tensor_expanded, weights_expanded)
| 5,343,080
|
def get_temp_url(src, *args):
"""
Caches `data` in a file of type specified in `mimetype`, to the images/temp folder and returns a link to the data.
The generated URL is used only once, after it is accessed, the data is deleted from the machine.
Argument:
data: `bytes` | `str` - If it's type is bytes, it's the data needed to be cached; if it's type is str, it's a path to a file in `images/static`
mimetype: `str` - The type of data. Supported types are defined by `mimetypes` built-in module. Only needs to be provided for creating files from data in RAM.
Returns: `str` - URL to the data.
"""
if isinstance(src, bytes):
extension = mimetypes.guess_extension(args[0])
assert extension is not None, f"Unknown file format: {args[0]}"
data = src
elif isinstance(src, str):
extension = "." + src.split(".", maxsplit=1)[1]
src = os.path.join("images", "static", src)
data = open(src, "rb").read()
filename = uuid.uuid4().hex + extension
open(os.path.join("images", "temp", filename), "wb").write(data)
return "/".join(
[os.environ["HOST_URL"], os.environ["BOT_TOKEN"], "dynamic", filename]
)
| 5,343,081
|
def _import(package, plugin):
"""Import the given plugin file from a package"""
importlib.import_module(f"{package}.{plugin}")
| 5,343,082
|
def generateBasicAuthHeader(username, password):
"""
Generates a basic auth header
:param username: Username of user
:type username: str
:param password: Password of user
:type password: str
:return: Dict containing basic auth header
:rtype: dict
>>> generateBasicAuthHeader('test','test')
{'Authorization': 'Basic dGVzdDp0ZXN0'}
"""
base64string = base64.encodestring('%s:%s' % (username, password)).replace('\n', '')
return {'Authorization': 'Basic %s' % base64string}
| 5,343,083
|
def test_string_overuse(
assert_errors,
assert_error_text,
parse_ast_tree,
default_options,
strings,
string_value,
):
"""Ensures that over-used strings raise violations."""
tree = parse_ast_tree(strings.format(string_value))
visitor = StringOveruseVisitor(default_options, tree=tree)
visitor.run()
assert_errors(visitor, [OverusedStringViolation])
assert_error_text(
visitor,
string_value.replace('"', '') or "''",
default_options.max_string_usages,
)
| 5,343,084
|
def correlation(df, target, limit=0, figsize=None, plot=True):
"""
Display Pearson correlation coefficient between target and numerical features
Return a list with low-correlated features if limit is provided
"""
numerical = list(df.select_dtypes(include=[np.number]))
numerical_f = [n for n in numerical if n not in target]
if not numerical_f:
print("There are no numerical features")
return
copy_df = df.copy()
for t in target:
if t not in numerical:
copy_df[t] = copy_df[t].astype(np.float16)
corr = copy_df.corr().loc[numerical_f, target].fillna(0).sort_values(target, ascending=False).round(2)
if not figsize:
figsize = (8, len(numerical_f) // 2 + 1)
corr.plot.barh(figsize=figsize)
plt.gca().invert_yaxis()
if limit>0:
plt.axvline(x=-limit, color='k', linestyle='--', )
plt.axvline(x=limit, color='k', linestyle='--', )
plt.xlabel('Pearson correlation coefficient')
plt.ylabel('feature')
if limit:
return corr.loc[abs(corr[target[0]]) < abs(limit)].index.tolist()
| 5,343,085
|
def fasta_to_raw_observations(raw_lines):
"""
Assume that the first line is the header.
@param raw_lines: lines of a fasta file with a single sequence
@return: a single line string
"""
lines = list(gen_nonempty_stripped(raw_lines))
if not lines[0].startswith('>'):
msg = 'expected the first line to start with ">"'
raise ValueError(msg)
data_lines = lines[1:]
return ''.join(data_lines)
| 5,343,086
|
def pc_proj(data, pc, k):
"""
get the eigenvalues of principal component k
"""
return np.dot(data, pc[k].T) / (np.sqrt(np.sum(data**2, axis=1)) * np.sqrt(np.sum(pc[k]**2)))
| 5,343,087
|
def split_data():
"""Data spliter for train/val/test sets"""
path = "cochrane_collections/cochrane_summary_collection.json"
random.seed(13)
with open(path, "r", encoding="utf-8") as f:
cochrane_dict = json.load(f)
cochrane = [q for q in cochrane_dict]
random.shuffle(cochrane)
l = len(cochrane)
train_split = int(l*.7)
valid_split = train_split + int(l*.1)
train = cochrane[0:train_split]
valid = cochrane[train_split:valid_split]
test = cochrane[valid_split:]
train = {q: cochrane_dict[q] for q in train}
valid = {q: cochrane_dict[q] for q in valid}
test = {q: cochrane_dict[q] for q in test}
print("Length of train/val/test: ", len(train), "/", len(valid), "/", len(test))
data_dict = {"train": train, "val": valid, "test": test}
for s in data_dict:
with open("cochrane_collections/cochrane_summary_{}_collection.json".format(s), "w", encoding="utf-8") as f:
json.dump(data_dict[s], f, indent=4)
| 5,343,088
|
def compare_time(time_str):
""" Compare timestamp at various hours """
t_format = "%Y-%m-%d %H:%M:%S"
if datetime.datetime.now() - datetime.timedelta(hours=3) <= \
datetime.datetime.strptime(time_str, t_format):
return 3
elif datetime.datetime.now() - datetime.timedelta(hours=6) <= \
datetime.datetime.strptime(time_str, t_format):
return 6
elif datetime.datetime.now() - datetime.timedelta(hours=12) <= \
datetime.datetime.strptime(time_str, t_format):
return 12
elif datetime.datetime.now() - datetime.timedelta(hours=24) <= \
datetime.datetime.strptime(time_str, t_format):
return 24
# Else catch all
return 100
| 5,343,089
|
def trendline(xd, yd, order=1, c='r', alpha=1, Rval=True):
"""Make a line of best fit,
Set Rval=False to print the R^2 value on the plot"""
#Only be sure you are using valid input (not NaN)
idx = np.isfinite(xd) & np.isfinite(yd)
#Calculate trendline
coeffs = np.polyfit(xd[idx], yd[idx], order)
intercept = coeffs[-1]
slope = coeffs[-2]
power = coeffs[0] if order == 2 else 0
minxd = np.min(xd)
maxxd = np.max(xd)
xl = np.array([minxd, maxxd])
yl = power * xl ** 2 + slope * xl + intercept
#Plot trendline
plt.plot(xl, yl, c, alpha=alpha)
#Calculate R Squared
p = np.poly1d(coeffs)
ybar = np.sum(yd) / len(yd)
ssreg = np.sum((p(xd) - ybar) ** 2)
sstot = np.sum((yd - ybar) ** 2)
Rsqr = ssreg / sstot
if not Rval:
#Plot R^2 value
plt.text(0.8 * maxxd + 0.2 * minxd, 0.8 * np.max(yd) + 0.2 * np.min(yd),
'$R^2 = %0.2f$' % Rsqr)
else:
#Return the R^2 value:
return Rsqr
| 5,343,090
|
def groupby(key, seq):
""" Group a collection by a key function
>>> names = ['Alice', 'Bob', 'Charlie', 'Dan', 'Edith', 'Frank']
>>> groupby(len, names) # doctest: +SKIP
{3: ['Bob', 'Dan'], 5: ['Alice', 'Edith', 'Frank'], 7: ['Charlie']}
>>> iseven = lambda x: x % 2 == 0
>>> groupby(iseven, [1, 2, 3, 4, 5, 6, 7, 8]) # doctest: +SKIP
{False: [1, 3, 5, 7], True: [2, 4, 6, 8]}
Non-callable keys imply grouping on a member.
>>> groupby('gender', [{'name': 'Alice', 'gender': 'F'},
... {'name': 'Bob', 'gender': 'M'},
... {'name': 'Charlie', 'gender': 'M'}]) # doctest:+SKIP
{'F': [{'gender': 'F', 'name': 'Alice'}],
'M': [{'gender': 'M', 'name': 'Bob'},
{'gender': 'M', 'name': 'Charlie'}]}
See Also:
countby
"""
if not callable(key):
key = getter(key)
d = collections.defaultdict(lambda: [].append)
for item in seq:
d[key(item)](item)
rv = {}
for k, v in iteritems(d):
rv[k] = v.__self__
return rv
| 5,343,091
|
def evo():
"""Creates a test evolution xarray file."""
nevo = 20
gen_data = {1: np.arange(nevo),
2: np.sin(np.linspace(0, 2*np.pi, nevo)),
3: np.arange(nevo)**2}
data = {'X1': np.linspace(0.1, 1.7, nevo)*_unit_conversion['AU'],
'X2': np.deg2rad(np.linspace(60, 120, nevo)),
'X3': np.deg2rad(np.linspace(30, 80, nevo)),
'TIME': np.arange(nevo)*60*60*24,
'DT': np.arange(nevo),
'NSTEP': np.arange(nevo),
'D': scale_variable(gen_data[3], 'den')/_unit_conversion['den'],
'T': scale_variable(gen_data[2], 'temp'),
'V1': scale_variable(gen_data[1], 'vel')/_unit_conversion['vel'],
'V2': scale_variable(gen_data[2], 'vel')/_unit_conversion['vel'],
'V3': scale_variable(gen_data[3], 'vel')/_unit_conversion['vel'],
'B1': scale_variable(gen_data[1], 'mag'),
'B2': scale_variable(gen_data[2], 'mag'),
'B3': scale_variable(gen_data[3], 'mag'),
'DP': np.linspace(0, 0.1, nevo),
'BP': np.linspace(-1, 1, nevo)}
# Need to make data Arrays for all of the variables with the single dim
for x in data:
data[x] = xr.DataArray(data[x], dims=['nevo'])
ds = xr.Dataset(data, coords={'nevo': np.arange(nevo)})
ds.attrs = {'label': 'earth',
'rundate_cal': "2010-01-01T00"}
with NamedTemporaryFile(suffix='.nc') as f:
ds.to_netcdf(f.name)
evo = read_evo(f.name)
return evo
| 5,343,092
|
def jinja_calc_buffer(fields: List[Any], category: Optional[str] = None) -> int:
"""calculate buffer for list of fields based on their length"""
if category:
fields = [f for f in fields if f.category == category]
return max(len(f.to_string()) for f in fields)
| 5,343,093
|
def get_delete_op(op_name):
""" Determine if we are dealing with a deletion operation.
Normally we just do the logic in the last return. However, we may want
special behavior for some types.
:param op_name: ctx.operation.name.split('.')[-1].
:return: bool
"""
return 'delete' == op_name
| 5,343,094
|
def label(img_id):
"""
GET: return the current label for <img_id>
POST: update the current label for <img_id>
"""
if request.method == 'PUT':
#TODO: figure out how to get `request` to properly parse json on PUT
req_dict = json.loads(request.data.decode())
with connection.cursor() as cursor:
label_id = req_dict['label_id']
sql = "update {} set label_id={} where id={}".format(table, label_id, img_id)
cursor.execute(sql)
app.logger.info("updated id={} to label_id={}".format(img_id, label_id))
return jsonify(status='ok')
else:
with connection.cursor() as cursor:
sql = "select * from {} where id={}".format(table, img_id)
cursor.execute(sql)
app.logger.info("queried for id={}".format(img_id))
result = cursor.fetchone()
return jsonify(result)
| 5,343,095
|
def random_radec(nsynths, ra_lim=[0, 360], dec_lim=[-90, 90],
random_state=None, **kwargs):
"""
Generate random ra and dec points within a specified range.
All angles in degrees.
Parameters
----------
nsynths : int
Number of random points to generate.
ra_lim : list-like, optional
ra limits.
dec_lim : list-like, optional
dec limits.
random_state : `None`, int, list of ints, or `numpy.random.RandomState`
If ``seed`` is `None`, return the `~numpy.random.RandomState`
singleton used by ``numpy.random``. If ``seed`` is an `int`,
return a new `~numpy.random.RandomState` instance seeded with
``seed``. If ``seed`` is already a `~numpy.random.RandomState`,
return it. Otherwise raise ``ValueError``.
Returns
-------
points : 2d ndarray
Random ra and dec points in degrees.
"""
rng = check_random_state(random_state)
ra_lim = np.deg2rad(np.asarray(ra_lim))
dec_lim = np.deg2rad(np.asarray(dec_lim))
zlim = np.sin(dec_lim)
z = zlim[0] + zlim.ptp() * rng.uniform(size=int(nsynths))
ra = ra_lim[0] + ra_lim.ptp() * rng.uniform(size=int(nsynths))
dec = np.arcsin(z)
ra, dec = np.rad2deg(ra), np.rad2deg(dec)
points = np.array([ra, dec]).T
return points
| 5,343,096
|
def test_modules_bump_versions_single_module(self):
"""Test updating a single module"""
# Change the star/align version to an older version
main_nf_path = os.path.join(self.nfcore_modules, "modules", "star", "align", "main.nf")
with open(main_nf_path, "r") as fh:
content = fh.read()
new_content = re.sub(r"bioconda::star=\d.\d.\d\D?", r"bioconda::star=2.6.1d", content)
with open(main_nf_path, "w") as fh:
fh.write(new_content)
version_bumper = nf_core.modules.ModuleVersionBumper(pipeline_dir=self.nfcore_modules)
version_bumper.bump_versions(module="star/align")
assert len(version_bumper.failed) == 0
| 5,343,097
|
def display_alert(
alert: Union[Mapping[str, Any], SecurityAlert], show_entities: bool = False
):
"""
Display a Security Alert.
Parameters
----------
alert : Union[Mapping[str, Any], SecurityAlert]
The alert to display as Mapping (e.g. pd.Series)
or SecurityAlert
show_entities : bool, optional
Whether to display entities (the default is False)
"""
output = format_alert(alert, show_entities)
if not isinstance(output, tuple):
output = [output]
for disp_obj in output:
display(disp_obj)
| 5,343,098
|
def warmUp():
"""
Warm up the machine in AppEngine a few minutes before the daily standup
"""
return "ok"
| 5,343,099
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.