content stringlengths 22 815k | id int64 0 4.91M |
|---|---|
def add_diagdir(parser):
"""since it's so common to have a diagdir flag for the tooling
we've added it here to make it simpler to add"""
parser.add_argument(
"-d",
"--diagdir",
dest="diag_dir",
default=".",
help=" where the diag tarball directory is exported, "
+ 'should be where the nodes folder is located (default ".")',
) | 5,331,800 |
def get_project_root() -> Path:
"""Return the path of the project root folder.
Returns:
Path: Path to project root
"""
return Path(__file__).parent | 5,331,801 |
def FTCS(Uo, diffX, diffY=None):
"""Return the numerical solution of dependent variable in the model eq.
This routine uses the explicit Forward Time/Central Space method
to obtain the solution of the 1D or 2D diffusion equation.
Call signature:
FTCS(Uo, diffX, diffY)
Parameters
----------
Uo: ndarray[float], =1d, 2d
The dependent variable at time level, n within the entire domain.
diffX : float
Diffusion number for x-component of the parabolic/diffusion
equation.
diffY : float, Default=None for 1-D applications
Diffusion number for y-component of the parabolic/diffusion
equation.
Returns
-------
U: ndarray[float], =1d, 2d
The dependent variable at time level, n+1 within the entire domain.
"""
shapeU = Uo.shape # Obtain Dimension
U = Uo.copy() # Initialize U
if len(shapeU) == 1:
U[1:-1] = (
Uo[1:-1] + diffX*(Uo[2:] - 2.0*Uo[1:-1] + Uo[0:-2])
)
elif len(shapeU) == 2:
U[1:-1, 1:-1] = (
Uo[1:-1, 1:-1]
+ diffX*(Uo[2:, 1:-1] - 2.0*Uo[1:-1, 1:-1] + Uo[0:-2, 1:-1])
+ diffY*(Uo[1:-1, 2:] - 2.0*Uo[1:-1, 1:-1] + Uo[1:-1, 0:-2])
)
return U | 5,331,802 |
def epicyclic_frequency(prof) -> Quantity:
"""Epicyclic frequency."""
Omega = prof['keplerian_frequency']
R = prof['radius']
return np.sqrt(2 * Omega / R * np.gradient(R ** 2 * Omega, R)) | 5,331,803 |
def _filter_subset(systems, test_sets, langpair, origlang, subset=None):
"""Filter sentences with a given origlang (or subset) according to the raw SGM files."""
if origlang is None and subset is None:
return systems
if test_sets is None or langpair is None:
raise ValueError('Filtering for --origlang or --subset needs a test (-t) and a language pair (-l).')
indices_to_keep = []
for test_set in test_sets.split(','):
rawfile = os.path.join(SACREBLEU_DIR, test_set, 'raw', DATASETS[test_set][langpair][0])
if not rawfile.endswith('.sgm'):
raise Exception('--origlang and --subset supports only *.sgm files, not %s', rawfile)
if subset is not None:
if test_set not in SUBSETS:
raise Exception('No subset annotation available for test set ' + test_set)
doc_to_tags = SUBSETS[test_set]
number_sentences_included = 0
with smart_open(rawfile) as fin:
include_doc = False
for line in fin:
if line.startswith('<doc '):
if origlang is None:
include_doc = True
else:
doc_origlang = re.sub(r'.* origlang="([^"]+)".*\n', '\\1', line)
if origlang.startswith('non-'):
include_doc = doc_origlang != origlang[4:]
else:
include_doc = doc_origlang == origlang
if subset is not None:
doc_id = re.sub(r'.* docid="([^"]+)".*\n', '\\1', line)
if not re.search(subset, doc_to_tags.get(doc_id, '')):
include_doc = False
if line.startswith('<seg '):
indices_to_keep.append(include_doc)
number_sentences_included += 1 if include_doc else 0
return [[sentence for sentence,keep in zip(sys, indices_to_keep) if keep] for sys in systems] | 5,331,804 |
def get_groups(
a_graph,
method='component_infomap', return_form='membership'):
"""
Return the grouping of the provided graph object using the specified
method. The grouping is returned as a list of sets each holding all
members of a group.
Parameters
==========
a_graph: :class:`igraph.Graph`
The graph to partition
method: str (default='component_infomap')
String specifying which method to use. If two methods
should be used one after the other they should be separated by `_`.
Default: 'component_infomap' which will first consider all
disconnected components as groups then apply infomap on all of
those groups to optionally further split.
return_form: str (default='membership')
Determines the format of how the social group structure should be
returned. Options are:
* ``'membership'``: A list returning for each `index` node the group it
belongs to.
* ``'memberlists'``: Dictionary with a list of members `value` for each
group `key`.
Returns
=======
dict
Depending on what was chosen for the `return_form` attribute, either the
membership dict, i.e.::
{
node_id: group_id,
...
}
or the memberlist dict, i.e.::
{
group_id: [node1_id, node2_id, ...],
...
}
(value) is returned.
"""
# methods = method.split('_')
# For now only 'component_infomap' is allowed as procedure
if method == 'component_infomap':
# first the connected components
a_graph.vs['component'] = a_graph.clusters(
).membership
components = set(a_graph.vs['component'])
# create for each component a graph and apply infomap to it
node_membership = {}
# print(
# 'INFO: Found {0} disconnected components'.format(len(components))
# )
if components:
# do the community detection on each component and create a
# compound group id: component_group
for component in components:
_comp_graph = a_graph.subgraph(
[
node['name']
for node in a_graph.vs
if node['component'] == component
]
)
_infompa_comp_graph = _comp_graph.community_infomap('weight')
_comp_graph.vs['_group'] = _infompa_comp_graph.membership
for node in _comp_graph.vs:
node_membership[node['name']] = '{0}_{1}'.format(
node['component'], node['_group']
)
del _infompa_comp_graph
else:
_infompa_comp_graph = a_graph.community_infomap('weight')
a_graph.vs['group'] = _infompa_comp_graph.membership
node_membership = {
node['name']: node['group']
for node in a_graph.vs
}
group_membership = {}
for node in node_membership:
try:
group_membership[node_membership[node]].append(node)
except KeyError:
group_membership[node_membership[node]] = [node]
if return_form == 'membership':
# nbr_nodes = len(a_graph.vs['name'])
# membership = [None]*nbr_nodes
# for g, members in group_membership.items():
# for member in members:
# membership[member] = g
# return membership
return node_membership
elif return_form == 'memberlists':
# return [_group for _group in group_membership.values()]
return group_membership
else:
return None | 5,331,805 |
def positional_rank_queues (service_platform,
api_key):
""" Get the queues that have positional ranks enabled.
References:
https://developer.riotgames.com/regional-endpoints.html
https://developer.riotgames.com/api-methods/#league-v4/GET_getQueuesWithPositionRanks
Arguments:
service_platform (str): The service platform that the request should be issued to.
api_key (str): The client's api key.
Returns:
dict: the details of the response to the issued http request.
"""
header_parameters = {
"X-Riot-Token": api_key
}
url = endpoints.v4["host"]["endpoint"].format(service_platform)
path = endpoints.v4["positional-rank-queues"]["endpoint"]
return _request_executor.get("".join([url, path]),
header_parameters=header_parameters) | 5,331,806 |
def maplist(f, xs):
"""Implement `maplist` in pure Python."""
return list(map(f, xs)) | 5,331,807 |
def configure_bgp_l2vpn_neighbor_activate(
device, address_family, bgp_as, neighbor_address,
address_family_modifier="", community=""
):
""" Activate bgp neighbor on bgp router
Args:
device ('obj') : Device to be configured
bgp_as ('str') : Bgp Id to be added to configuration
neighbor_address ('str') : Address of neighbor to be added to configuration
address_family ('str') : Address family to be configured
address_family_modifier ('str') : the endpoint provisioning information to be distributed
to BGP peers.
community('str') : Specifies the communities attribute to be sent to a BGP neighbor.
Returns:
N/A
Raises:
SubCommandFailure: Failed executing configure commands
"""
log.info("configure l2vpn vpls address-family on router bgp {bgp_as}"
.format(bgp_as=bgp_as))
try:
device.configure([
"router bgp {bgp_as}".format(bgp_as=bgp_as),
"address-family {address_family} {address_family_modifier}".format(
address_family=address_family,
address_family_modifier=address_family_modifier),
"neighbor {neighbor_address} activate".format(
neighbor_address=neighbor_address),
"neighbor {neighbor_address} send-community {community}".format(
neighbor_address=neighbor_address, community=community)
])
except SubCommandFailure as e:
raise SubCommandFailure(
"Could not ativate l2vpn bgp neighbor on bgp "
"router {bgp_as}. Error:{e}".format(bgp_as=bgp_as, e=e)
) | 5,331,808 |
def sanitize_name_inputs(inputs_data):
"""
Sanitize value of the keys "name" of the dictionary passed in parameter.
Because sometimes output from Galaxy, or even just file name, from user inputs, have spaces.
Also, it can contain '/' character and could break the use of os.path function.
:param inputs_data: dict[string, dict[string, string]]
"""
for key in inputs_data:
inputs_data[key]["name"] = sanitize_name_input(inputs_data[key]["name"]) | 5,331,809 |
def pull_urls_excel_sheets(workbook):
"""
Pull URLs from cells in a given ExcelBook object.
"""
# Got an Excel workbook?
if (workbook is None):
return []
# Look through each cell.
all_cells = excel.pull_cells_workbook(workbook)
r = set()
for cell in all_cells:
# Skip empty cells.
value = None
try:
value = str(cell["value"]).strip()
except UnicodeEncodeError:
value = ''.join(filter(lambda x:x in string.printable, cell["value"])).strip()
if (len(value) == 0):
continue
# Add http:// for cells that look like they might be URLs
# missing the http part.
pat = r"[A-Za-z0-9_]{3,50}\.[A-Za-z]{2,10}/(?:[A-Za-z0-9_]{1,50}/)*[A-Za-z0-9_\.]{3,50}"
if (re.search(pat, value) is not None):
value = "http://" + value
# Look for URLs in the cell value.
for url in re.findall(read_ole_fields.URL_REGEX, value):
r.add(url.strip())
# Return any URLs found in cells.
return r | 5,331,810 |
def main():
"""Main entry point"""
parser = argparse.ArgumentParser(prog="glow-tts-export-onnx")
parser.add_argument("checkpoint", help="Path to model checkpoint (.pth)")
parser.add_argument("output", help="Path to output onnx model")
parser.add_argument(
"--config", action="append", help="Path to JSON configuration file(s)"
)
parser.add_argument(
"--debug", action="store_true", help="Print DEBUG messages to the console"
)
args = parser.parse_args()
if args.debug:
logging.basicConfig(level=logging.DEBUG)
else:
logging.basicConfig(level=logging.INFO)
_LOGGER.debug(args)
# -------------------------------------------------------------------------
# Convert to paths
args.checkpoint = Path(args.checkpoint)
args.output = Path(args.output)
if args.config:
args.config = [Path(p) for p in args.config]
elif args.checkpoint and args.checkpoint.is_dir():
# Look for config in checkpoint directory
maybe_config_path = args.checkpoint / "config.json"
if maybe_config_path.is_file():
_LOGGER.debug("Found config in checkpoint directory: %s", maybe_config_path)
args.config = [maybe_config_path]
# Load configuration
config = TrainingConfig()
if args.config:
_LOGGER.debug("Loading configuration(s) from %s", args.config)
config = TrainingConfig.load_and_merge(config, args.config)
# Load checkpoint
_LOGGER.debug("Loading checkpoint from %s", args.checkpoint)
checkpoint = load_checkpoint(args.checkpoint, config)
generator = checkpoint.training_model.generator
_LOGGER.info(
"Loaded checkpoint from %s (global step=%s)",
args.checkpoint,
checkpoint.global_step,
)
# Inference only
generator.eval()
generator.remove_weight_norm()
if args.output.is_dir():
# Output to directory
args.output.mkdir(parents=True, exist_ok=True)
output_path = args.output / "generator.onnx"
else:
# Output to file
args.output.parent.mkdir(parents=True, exist_ok=True)
output_path = args.output
# Create dummy input
dummy_input = to_gpu(torch.randn((1, config.audio.num_mels, 50), dtype=torch.float))
# Export
torch.onnx.export(
generator,
dummy_input,
str(output_path),
opset_version=12,
do_constant_folding=True,
input_names=["mel"],
output_names=["audio"],
dynamic_axes={
"mel": {0: "batch_size", 2: "mel_length"},
"audio": {0: "batch_size", 1: "audio_length"},
},
)
_LOGGER.info("Exported model to %s", args.output) | 5,331,811 |
def do_request(base_url, api_path, key, session_id, extra_params=''):
"""
Voer een aanvraag uit op de KNVB API, bijvoorbeeld /teams; hiermee
vraag je alle team-data op
"""
hashStr = md5.new('{0}#{1}#{2}'.format(key,
api_path,
session_id)).hexdigest()
url = '{0}{1}?PHPSESSID={2}&hash={3}&{4}'.format(base_url,
api_path,
session_id,
hashStr,
extra_params)
headers = {
'HTTP_X_APIKEY': key,
'Content-Type': 'application/json'
}
click.echo('URL: {0}'.format(url))
r = requests.get(url, headers=headers)
json_data = r.json()
return json_data | 5,331,812 |
def write_code():
"""
Code that checks the existing path and snaviewpath
in the environmental viriables/PATH
"""
msg = """\n\n[Code]\n"""
msg += """function InstallVC90CRT(): Boolean;\n"""
msg += """begin\n"""
msg += """ Result := not DirExists('C:\WINDOWS\WinSxS\\x86_Microsoft.VC90."""
msg += """CRT_1fc8b3b9a1e18e3b_9.0.21022.8_x-ww_d08d0375');\n"""
msg += """end;\n\n"""
msg += """function NeedsAddPath(): boolean;\n"""
msg += """var\n"""
msg += """ oldpath: string;\n"""
msg += """ newpath: string;\n"""
msg += """ pathArr: TArrayOfString;\n"""
msg += """ i: Integer;\n"""
msg += """begin\n"""
msg += """ RegQueryStringValue(HKEY_CURRENT_USER,'Environment',"""
msg += """'PATH', oldpath)\n"""
msg += """ oldpath := oldpath + ';';\n"""
msg += """ newpath := '%SASVIEWPATH%';\n"""
msg += """ i := 0;\n"""
msg += """ while (Pos(';', oldpath) > 0) do begin\n"""
msg += """ SetArrayLength(pathArr, i+1);\n"""
msg += """ pathArr[i] := Copy(oldpath, 0, Pos(';', oldpath)-1);\n"""
msg += """ oldpath := Copy(oldpath, Pos(';', oldpath)+1,"""
msg += """ Length(oldpath));\n"""
msg += """ i := i + 1;\n"""
msg += """ // Check if current directory matches app dir\n"""
msg += """ if newpath = pathArr[i-1] \n"""
msg += """ then begin\n"""
msg += """ Result := False;\n"""
msg += """ exit;\n"""
msg += """ end;\n"""
msg += """ end;\n"""
msg += """ Result := True;\n"""
msg += """end;\n"""
msg += """\n"""
return msg | 5,331,813 |
def get_filenames(filename):
"""
Return list of unique file references within a passed file.
"""
try:
with open(filename, 'r', encoding='utf8') as file:
words = re.split("[\n\\, \-!?;'//]", file.read())
#files = filter(str.endswith(('csv', 'zip')), words)
files = set(filter(lambda s: s.endswith(('.csv', '.zip', '.pdf', '.txt', '.tsv', '.cfg', '.ini')), words))
return list(files)
except Exception as e:
print(e)
return [] | 5,331,814 |
def format_filename_gen(prefix, seq_len, tgt_len, bi_data, suffix,
src_lang,tgt_lang,uncased=False,):
"""docs."""
if not uncased:
uncased_str = ""
else:
uncased_str = "uncased."
if bi_data:
bi_data_str = "bi"
else:
bi_data_str = "uni"
file_name = "{}-{}_{}.seqlen-{}.tgtlen-{}.{}{}.gen.{}".format(
src_lang[:2],tgt_lang[:2],
prefix, seq_len, tgt_len, uncased_str,
bi_data_str, suffix)
return file_name | 5,331,815 |
def restore_modifiers(obj: Object) -> None:
"""Restore modifier state after tension computation.
Args:
obj -- The blender object for which to restore suspended modifiers
"""
supended_modifiers = obj.data.tension_props.suspended_modifiers
for modifier in supended_modifiers:
if modifier.name in obj.modifiers:
obj.modifiers[modifier.name].show_viewport = modifier.viewport
obj.modifiers[modifier.name].show_render = modifier.render | 5,331,816 |
def upsert_game(cur, session, guess):
"""
Add or update a users guess for a game to the db
:param cur: the database cursor
:param session: the current session
:param guess: the users guess (-1 for lower, 1 for higher)
:return:
"""
latest_quake = cur.execute(
"SELECT QuakeID FROM Quake ORDER BY Timestamp DESC LIMIT 1"
).fetchone()[0]
user_id = auth.get_user(session)["id"]
if user_id is None:
raise web.HTTPUnauthorized
if guess not in ["-1", "1"]:
raise web.HTTPBadRequest
cur.execute(
"INSERT OR REPLACE INTO Game (UserID, QuakeID, Guess) "
"VALUES (?, ?, ?)",
(user_id, latest_quake, guess)
)
raise web.HTTPOk | 5,331,817 |
def app_get_cmd(args):
"""Extract an application description from an execution."""
exec_api = ZoeExecutionsAPI(utils.zoe_url(), utils.zoe_user(), utils.zoe_pass())
execution = exec_api.get(args.id)
if execution is None:
print("no such execution")
else:
json.dump(execution['description'], sys.stdout, sort_keys=True, indent=4) | 5,331,818 |
def make_file_list_for_RTI_Rwanda():
"""
This function makes the training and testing file list for RTI Rwanda imagery
"""
# folder = '/home/sr365/Gaia/Rwanda_RTI/RTI_data_set/train'
# folder = '/home/sr365/Gaia/Rwanda_RTI/RTI_data_set/all'
# folder = '/home/sr365/Gaia/Rwanda_RTI/RTI_data_set/all_conatains_object'
folder = '/home/sr365/Gaia/Rwanda_RTI/RTI_data_set/all_train_5_percent/train_patches'
if 'test' in folder:
save_file = os.path.join(folder, 'file_list_test.txt')
elif 'train' in folder:
save_file = os.path.join(folder, 'file_list_train.txt')
else:
print("Your make_file_for_RTI_dataset does not have train or test in your folder name")
save_file = os.path.join(folder, 'file_list_raw.txt')
with open(save_file, 'a') as f:
for file in os.listdir(os.path.join(folder, 'patches')):
if not file.endswith('.jpg'):
continue
f.write(file)
f.write(' ')
f.write(file.replace('.jpg', '.png'))
f.write('\n') | 5,331,819 |
def is_holiday(date) -> bool:
"""
Return True or False for whether a date is a holiday
"""
name = penn_holidays.get(date)
if not name:
return False
name = name.replace(' (Observed)', '')
return name in holiday_names | 5,331,820 |
def tokenize_text(text):
"""
Tokenizes a string.
:param text: String
:return: Tokens
"""
token = []
running_word = ""
for c in text:
if re.match(alphanumeric, c):
running_word += c
else:
if running_word != "":
token.append(running_word)
if c not in filter_character:
token.append(c)
running_word = ""
if running_word != "":
token.append(running_word)
return token | 5,331,821 |
def generate_dummy_probe(elec_shapes='circle'):
"""
Generate a 3 columns 32 channels electrode.
Mainly used for testing and examples.
"""
if elec_shapes == 'circle':
electrode_shape_params = {'radius': 6}
elif elec_shapes == 'square':
electrode_shape_params = {'width': 7}
elif elec_shapes == 'rect':
electrode_shape_params = {'width': 6, 'height': 4.5}
probe = generate_multi_columns_probe(num_columns=3,
num_elec_per_column=[10, 12, 10],
xpitch=25, ypitch=25, y_shift_per_column=[0, -12.5, 0],
electrode_shapes=elec_shapes, electrode_shape_params=electrode_shape_params)
return probe | 5,331,822 |
def try_decode(message):
"""Try to decode the message with each known message class; return
the first successful decode, or None."""
for c in MESSAGE_CLASSES:
try:
return c.decode(message)
except ValueError:
pass # The message was probably of a different type.
return None | 5,331,823 |
def build_where_clause(args: dict) -> str:
"""
This function transforms the relevant entries of dict into the where part of a SQL query
Args:
args: The arguments dict
Returns:
A string represents the where part of a SQL query
"""
args_dict = {
'source_ip': 'source_ip.value',
'dest_ip': 'dest_ip.value',
'rule_matched': 'rule_matched',
'from_zone': 'from_zone',
'to_zone': 'to_zone',
'source_port': 'source_port',
'dest_port': 'dest_port',
'action': 'action.value',
'file_sha_256': 'file_sha_256',
'file_name': 'file_name',
'app': 'app',
'app_category': 'app_category',
'dest_device_port': 'dest_device_port',
'dest_edl': 'dest_edl',
'dest_dynamic_address_group': 'dest_dynamic_address_group',
'dest_location': 'dest_location',
'dest_user': 'dest_user',
'file_type': 'file_type',
'is_server_to_client': 'is_server_to_client',
'is_url_denied': 'is_url_denied',
'log_type': 'log_type',
'nat_dest': 'nat_dest',
'nat_dest_port': 'nat_dest_port',
'nat_source': 'nat_source',
'nat_source_port': 'nat_source_port',
'rule_matched_uuid': 'rule_matched_uuid',
'severity': 'severity',
'source_device_host': 'source_device_host',
'source_edl': 'source_edl',
'source_dynamic_address_group': 'source_dynamic_address_group',
'source_location': 'source_location',
'source_user': 'source_user',
'sub_type': 'sub_type.value',
'time_generated': 'time_generated',
'url_category': 'url_category',
'url_domain': 'url_domain'
}
if args.get('ip') and (args.get('source_ip') or args.get('dest_ip')):
raise DemistoException('Error: "ip" argument cannot appear with either "source_ip" nor "dest_ip"')
if args.get('port') and (args.get('source_port') or args.get('dest_port')):
raise DemistoException('Error: "port" argument cannot appear with either "source_port" nor "dest_port"')
non_string_keys = {'dest_port', 'source_port'}
if 'query' in args:
# if query arg is supplied than we just need to parse it and only it
return args['query'].strip()
where_clause = ''
if args.get('ip'):
ips = argToList(args.pop('ip'))
# Creating a query for ip argument using source ip and dest ip
where_clause += '(' + ' OR '.join(f'source_ip.value = "{ip}" OR dest_ip.value = "{ip}"' for ip in ips) + ')'
if any(args.get(key) for key in args_dict) or args.get('port') or args.get('url'):
where_clause += ' AND '
if args.get('port'):
ports = argToList(args.pop('port'))
# Creating a query for port argument using source port and dest port
where_clause += '(' + ' OR '.join(f'source_port = {port} OR dest_port = {port}' for port in ports) + ')'
if any(args.get(key) for key in args_dict):
where_clause += ' AND '
if args.get('url'):
urls = argToList(args.pop('url'))
# Creating a query for url argument using uri and referer
where_clause += '(' + ' OR '.join(f'uri LIKE "%{url}%" OR referer LIKE "%{url}%"' for url in urls) + ')'
if any(args.get(key) for key in args_dict):
where_clause += ' AND '
# We want to add only keys that are part of the query
string_query_fields = {key: value for key, value in args.items() if key in args_dict and key not in non_string_keys}
or_statements = []
for key, values in string_query_fields.items():
string_values_list: list = argToList(values)
field = args_dict[key]
or_statements.append(' OR '.join([f'{field} = "{value}"' for value in string_values_list]))
# ports are digested as ints and cannot be sent as strings
non_string_query_fields = {key: value for key, value in args.items() if key in non_string_keys}
for key, values in non_string_query_fields.items():
non_string_values_list: list = argToList(values)
field = args_dict[key]
or_statements.append(' OR '.join([f'{field} = {value}' for value in non_string_values_list]))
where_clause += ' AND '.join([f'({or_statement})' for or_statement in or_statements if or_statement])
return where_clause | 5,331,824 |
def matrix_scale(s):
"""Produce scaling transform matrix with uniform scale s in all 3 dimensions."""
M = matrix_ident()
M[0:3,0:3] = np.diag([ s, s, s ]).astype(np.float64)
return M | 5,331,825 |
def arm(seconds):
"""Arm HW watchdog"""
result = int(platform_watchdog.arm(seconds))
if result < 0:
click.echo("Failed to arm Watchdog for {} seconds".format(seconds))
else:
click.echo("Watchdog armed for {} seconds".format(result)) | 5,331,826 |
def magnitude_datapoints(data: DataPoint) -> List:
"""
:param data:
:return:
"""
if data is None or len(data) == 0:
return []
input_data = np.array([i.sample for i in data])
data = norm(input_data, axis=1).tolist()
return data | 5,331,827 |
def capture_output():
"""Captures standard output and error during the context. Returns a
tuple of the two streams as lists of lines, added after the context has
executed.
.. testsetup::
from attest import capture_output
>>> with capture_output() as (out, err):
... print 'Captured'
...
>>> out
['Captured']
"""
stdout, stderr = sys.stdout, sys.stderr
sys.stdout, sys.stderr = StringIO(), StringIO()
out, err = [], []
try:
yield out, err
finally:
out.extend(sys.stdout.getvalue().splitlines())
err.extend(sys.stderr.getvalue().splitlines())
sys.stdout, sys.stderr = stdout, stderr | 5,331,828 |
def test_load_submission_file_c_fain(mock_db_cursor):
"""
Test load submission management command for File C records with only a FAIN
"""
models_to_mock = [
{
"model": TreasuryAppropriationAccount,
"treasury_account_identifier": -1111,
"allocation_transfer_agency_id": "999",
"agency_id": "999",
"beginning_period_of_availability": "1700-01-01",
"ending_period_of_availability": "1700-12-31",
"availability_type_code": "000",
"main_account_code": "0000",
"sub_account_code": "0000",
"tas_rendering_label": "999-999-000-0000-0000",
},
{"model": TransactionNormalized, "id": -997},
{"model": Award, "id": -997, "fain": "RANDOM_LOAD_SUB_FAIN", "latest_transaction_id": -997},
]
mommy.make(ObjectClass, id=0, major_object_class="00", object_class="000", direct_reimbursable=None)
for entry in models_to_mock:
mommy.make(entry.pop("model"), **entry)
call_command("load_submission", "-9999")
expected_results = {"award_ids": [-997]}
actual_results = {
"award_ids": list(
FinancialAccountsByAwards.objects.filter(award_id__isnull=False).values_list("award_id", flat=True)
)
}
assert expected_results == actual_results | 5,331,829 |
def prediction(pdb_filename):
"""Run the CheShift CS prediction routine"""
cmd.set('suspend_updates', 'on')
pose, residues, total_residues, states = pose_from_pdb(pdb_filename)
Db = load(path)
raw(pose, residues, total_residues, states, Db)
print '<'*80 + '\nYou didn`t provide a file with chemical Shifts, hence CheShift-2 assumed you\n only wanted the predicted CS. The predicted chemical shifts can be found in the file %s.txt\n' % pose + '>'*80
for sel in ['A', 'B', 'C', 'D']:
cmd.delete(sel)
cmd.set('suspend_updates', 'off') | 5,331,830 |
def generate_config(data, config_name=None, validate=True, base_config=None):
"""
Generates the *OpenColorIO* config from given data.
Parameters
----------
data : ConfigData
*OpenColorIO* config data.
config_name : unicode, optional
*OpenColorIO* config file name, if given the config will be written to
disk.
validate : bool, optional
Whether to validate the config.
base_config : bool, optional
*OpenColorIO* base config inherited for initial data.
Returns
-------
Config
*OpenColorIO* config.
"""
import PyOpenColorIO as ocio
if base_config is not None:
config = base_config
else:
config = ocio.Config()
config.setMajorVersion(data.profile_version)
if data.description is not None:
config.setDescription(data.description)
for search_path in data.search_path:
logging.debug(f'Adding "{search_path}".')
config.addSearchPath(search_path)
for role, colorspace in data.roles.items():
logging.debug(f'Adding "{colorspace}" colorspace as "{role}" role.')
config.setRole(role, colorspace)
for colorspace in data.colorspaces:
if isinstance(colorspace, Mapping):
colorspace = colorspace_factory(**colorspace)
logging.debug(f'Adding "{colorspace.getName()}" colorspace.')
config.addColorSpace(colorspace)
for named_transform in data.named_transforms:
if isinstance(named_transform, Mapping):
named_transform = named_transform_factory(**named_transform)
logging.debug(f'Adding "{named_transform.getName()}" named transform.')
config.addNamedTransform(named_transform)
for view_transform in data.view_transforms:
if isinstance(view_transform, Mapping):
view_transform = view_transform_factory(**view_transform)
logging.debug(f'Adding "{view_transform.getName()}" view transform.')
config.addViewTransform(view_transform)
for look in data.looks:
if isinstance(look, Mapping):
look = look_factory(**look)
logging.debug(f'Adding "{look.getName()}" look.')
config.addLook(look)
if data.profile_version >= 2:
logging.debug(f'Disabling "{data.inactive_colorspaces}" colorspaces.')
config.setInactiveColorSpaces(','.join(data.inactive_colorspaces))
for shared_view in data.shared_views:
display_colorspace = shared_view.get('display_colorspace',
'<USE_DISPLAY_NAME>')
looks = shared_view.get('looks')
view_transform = shared_view.get('view_transform')
rule = shared_view.get('rule')
description = shared_view.get('description')
view = shared_view['view']
logging.debug(
f'Adding "{view}" shared view using "{view_transform}" '
f'view transform, "{display_colorspace}" display colorspace, '
f'"{looks}" looks, "{rule}" rule and "{description}"'
f'description.')
config.addSharedView(view, view_transform, display_colorspace, looks,
rule, description)
for view in data.views:
display = view['display']
colorspace = view.get('colorspace')
looks = view.get('looks')
view_transform = view.get('view_transform')
display_colorspace = view.get('display_colorspace')
rule = view.get('rule')
description = view.get('description')
view = view['view']
if colorspace is not None:
logging.debug(f'Adding "{view}" view to "{display}" display '
f'using "{colorspace}" colorspace.')
config.addDisplayView(display, view, colorspace, looks)
elif view_transform is not None and display_colorspace is not None:
logging.debug(f'Adding "{view}" view to "{display}" display '
f'using "{view_transform}" view transform, '
f'"{display_colorspace}" display colorspace, '
f'"{rule}" rule and "{description}" description.')
config.addDisplayView(display, view, view_transform,
display_colorspace, looks, rule, description)
else:
logging.debug(f'Adding "{view}" view to "{display}" display.')
config.addDisplaySharedView(display, view)
if data.active_displays:
logging.debug(f'Activating "{data.active_displays}" displays.')
config.setActiveDisplays(','.join(data.active_displays))
if data.active_views:
logging.debug(f'Activating "{data.active_views}" views.')
config.setActiveViews(','.join(data.active_views))
if data.file_rules:
file_rules = ocio.FileRules()
rule_index = 0
for file_rule in reversed(data.file_rules):
name = file_rule['name']
colorspace = file_rule['colorspace']
regex = file_rule.get('regex')
pattern = file_rule.get('pattern')
extension = file_rule.get('extension')
if name == 'Default':
logging.debug(f'Setting "{name}" file rule with '
f'"{colorspace}" colorspace.')
file_rules.setDefaultRuleColorSpace(colorspace)
elif regex:
logging.debug(f'Adding "{name}" file rule with '
f'"{regex}" regex pattern for '
f'"{colorspace}" colorspace.')
file_rules.insertRule(rule_index, name, colorspace, regex)
rule_index += 1
else:
logging.debug(
f'Adding "{name}" file rule with '
f'"{pattern}" pattern and "{extension}" extension '
f'for "{colorspace}" colorspace.')
file_rules.insertRule(rule_index, name, colorspace, pattern,
extension)
rule_index += 1
config.setFileRules(file_rules)
if data.viewing_rules:
viewing_rules = ocio.ViewingRules()
for i, viewing_rule in enumerate(reversed(data.viewing_rules)):
logging.warning('Inserting a viewing rule is not supported yet!')
# viewing_rules.insertRule()
config.setViewingRules(viewing_rules)
if data.default_view_transform is not None:
config.setDefaultViewTransformName(data.default_view_transform)
if validate:
validate_config(config)
if config_name is not None:
with open(config_name, 'w') as file:
file.write(config.serialize())
return config | 5,331,831 |
def svn_stringbuf_from_aprfile(*args):
"""svn_stringbuf_from_aprfile(svn_stringbuf_t result, apr_file_t file, apr_pool_t pool) -> svn_error_t"""
return apply(_core.svn_stringbuf_from_aprfile, args) | 5,331,832 |
def _check_iterative_process_compatibility(iterative_process):
"""Checks the compatibility of an iterative process with the training loop."""
error_message = (
'The iterative_process argument must be of '
'type`tff.templates.IterativeProcess`, and must have an '
'attribute `get_model_weights`, which must be a `tff.Computation`. This '
'computation must accept as input the state of `iterative_process`, and '
'its output must be a nested structure of tensors matching the expected '
'shape of the first input argument to `evaluation_fn`.')
compatibility_error = IterativeProcessCompatibilityError(error_message)
if not isinstance(iterative_process, tff.templates.IterativeProcess):
raise compatibility_error
if not hasattr(iterative_process, 'get_model_weights'):
raise compatibility_error
elif not callable(iterative_process.get_model_weights):
raise compatibility_error
get_model_weights_fn = iterative_process.get_model_weights
if not isinstance(get_model_weights_fn, tff.Computation):
raise compatibility_error
input_type = get_model_weights_fn.type_signature.parameter
server_state_type = iterative_process.state_type.member
server_state_type.is_assignable_from(input_type)
# TODO(b/174268978): Once we enforce federated evaluations, we can check
# compatibility with `validation_fn` without actually running the function. | 5,331,833 |
def Arrow_Head_A (cls, elid = "SVG:Arrow_Head_A", design_size = 12, ref_x = None, stroke = "black", marker_height = 6, marker_width = 6, fill = "white", fill_opacity = 1, ** kw) :
"""Return a marker that is an arrow head with an A-Shape.
>>> mrk = Marker.Arrow_Head_A ()
>>> svg = Document (Root (view_box="0 0 1000 500"))
>>> svg.add (Defs (mrk))
>>> svg.add (Rect (x = 5, y = 5, width = 990, height = 490, fill = "none", stroke = "orange", stroke_width = 5))
>>> svg.add (Path (fill = "none", stroke = "red", stroke_width = 25, marker_end = "url(#SVG:Arrow_Head_A)", d = "M 100 200 L 500 200 900 400"))
>>> svg.add (Path (fill = "none", stroke = "blue", stroke_width =10, marker_start = "url(#SVG:Arrow_Head_A)", d = "M 100 100 L 500 100 900 50"))
>>> svg.write_to_xml_stream ()
<?xml version="1.0" encoding="utf-8" standalone="yes"?>
<!DOCTYPE svg PUBLIC
"-//W3C//DTD SVG 1.1//EN"
"http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd">
<svg version="1.1" viewBox="0 0 1000 500" xmlns="http://www.w3.org/2000/svg"
xmlns:xlink="http://www.w3.org/1999/xlink"
>
<defs>
<marker id="SVG:Arrow_Head_A" fill="none" markerHeight="6"
markerUnits="strokeWidth" markerWidth="6" orient="auto" refX="0"
refY="6" stroke="black" viewBox="0 0 12 12"
>
<path d="m 0,9.0 0,-6.0 6.0,3.0 -6.0,3.0 z" fill="white"
fill-opacity="1" stroke="none"
>
</path>
<path d="m 0,9.0 5.0,-3.0 -5.0,-3.0">
</path>
<path d="m 2.0,4.0 0,4.0">
</path>
</marker>
</defs>
<rect fill="none" height="490" stroke="orange" stroke-width="5"
width="990" x="5" y="5"
/>
<path d="M 100 200 L 500 200 900 400" fill="none"
marker-end="url(#SVG:Arrow_Head_A)" stroke="red" stroke-width="25"
>
</path>
<path d="M 100 100 L 500 100 900 50" fill="none"
marker-start="url(#SVG:Arrow_Head_A)" stroke="blue"
stroke-width="10"
>
</path>
</svg>
"""
# modifying design size will draw with different line-strength
# compared to the shape
size = design_size
size_2 = size // 2
scope = Scope ()
if ref_x is None :
ref_x = 0
result = cls \
( Path
( d = "m %s,%s 0,%s %s,%s %s,%s z" %
( 0
, size * 3 / 4.
, -(size / 2.)
, size / 2.
, size / 4.
, -(size / 2.)
, size / 4.
)
, fill = fill
, fill_opacity = fill_opacity
, stroke = "none"
)
, Path
( d = "m %s,%s %s,%s %s,%s" %
( 0
, size * 3 / 4.
, size * 5 / 12.
, -(size / 4.)
, -(size * 5 / 12.)
, -(size / 4.)
)
)
, Path
( d = "m %s,%s 0,%s" %
( size / 6.
, size / 3.
, size / 3.
)
)
, elid = elid
, fill = "none"
, marker_units = "strokeWidth"
, marker_height = marker_height
, marker_width = marker_width
, orient = "auto"
, ref_x = ref_x
, ref_y = size_2
, stroke = stroke
, view_box = (0, 0, size, size)
, ** kw
)
return result | 5,331,834 |
def signup() -> Response | str | tuple[dict[str, str | int], int]:
"""Sign up"""
# Bypass if user is logged in
if current_user.is_authenticated:
return redirect(url_for("home"))
# Process user data
try:
# Return template if request.method is GET
assert request.method != "GET"
# Process form
username, password, remember = _get_auth_form()
assert username and password
# Check if user with given username already exists
if database.get_instance(models.User, username=username):
flash(
f"User with {username} username already exists.<br>"
f'Go to <a href="{url_for("auth.login")}">login page</a>.'
)
assert False
except AssertionError:
return render_template("signup.html")
# Add user to database and login
user = database.add_instance(
models.User,
lambda i: i.set_password(password),
username=username,
)
login_user(user, remember=remember)
# Return json response or redirect to home
if request.form.get("raw"):
response = {
"info": f"Successfully signed up as {username}.",
"status": 200,
}
return response, 200
return redirect(url_for("home")) | 5,331,835 |
def evaluate_all_configs(hparams, agent_model_dir):
"""Evaluate the agent with multiple eval configurations."""
def make_eval_hparams(hparams, policy_to_action, max_num_noops):
hparams = copy.copy(hparams)
hparams.add_hparam("num_agents", hparams.eval_num_agents)
hparams.add_hparam("policy_to_actions_lambda", {
"sample": lambda policy: policy.sample(),
"mode": lambda policy: policy.mode()
}[policy_to_action])
hparams.max_num_noops = max_num_noops
return hparams
metrics = {}
# Iterate over all combinations of picking actions by sampling/mode and
# whether to do initial no-ops.
for policy_to_action in ("mode", "sample"):
for max_num_noops in (hparams.eval_max_num_noops, 0):
eval_hparams = make_eval_hparams(hparams, policy_to_action, max_num_noops)
scores = evaluate_single_config(eval_hparams, agent_model_dir)
for (score, clipped) in zip(scores, (True, False)):
metric_name = "mean_reward/eval/{}_{}_max_noops_{}".format(
policy_to_action, max_num_noops,
"clipped" if clipped else "unclipped"
)
metrics[metric_name] = score
return metrics | 5,331,836 |
def showcase_create_non_quaranteed_bit_rate_subscription_for_live_streaming():
"""
This example showcases how you can create a subscription to the 5G-API in order to establish
a Non-Guaranteed Bit Rate (NON-GBR) QoS.
In order to run this example you need to follow the instructions in readme.md in order to a) run the NEF emulator
and b) run a local webserver that will print the location notifications it retrieves from the emulator.
A testing local webserver (Flask webserver) can be initiated by running the examples/api.py
"""
# Create a subscription, that will notify us 1000 times, for the next 1 day starting from now
netapp_id = "myNetapp"
host = emulator_utils.get_host_of_the_nef_emulator()
token = emulator_utils.get_token()
qos_awereness = QosAwareness(host, token.access_token)
# The following external identifier was copy pasted by the NEF emulator. Go to the Map and hover over a User icon.
# There you can retrieve the id address
equipment_network_identifier = "10.0.0.3"
network_identifier = QosAwareness.NetworkIdentifier.IP_V4_ADDRESS
qos_reference = QosAwareness.NonGBRQosReference.LIVE_STREAMING
gigabyte = 1024 * 1024 * 1024
# Up to 10 gigabytes. 5 GB downlink, 5gb uplink
usage_threshold = UsageThreshold(duration=None, # not supported
total_volume=10 * gigabyte, # 10 Gigabytes of total volume
downlink_volume=5 * gigabyte, # 5 Gigabytes for downlink
uplink_volume=5 * gigabyte # 5 Gigabytes for uplink
)
# In this example we are running flask at http://localhost:5000 with a POST route to (/monitoring/callback) in order to retrieve notifications.
# If you are running on the NEF emulator, you need to provide a notification_destination with an IP that the
# NEF emulator docker can understand
# For latest versions of docker this should be: http://host.docker.internal:5000/monitoring/callback"
# Alternative you can find the ip of the HOST by running 'ip addr show | grep "\binet\b.*\bdocker0\b" | awk '{print $2}' | cut -d '/' -f 1'
# See article for details: https://stackoverflow.com/questions/48546124/what-is-linux-equivalent-of-host-docker-internal/61001152
notification_destination="http://172.17.0.1:5000/monitoring/callback"
subscription = qos_awereness.create_non_guaranteed_bit_rate_subscription(
netapp_id=netapp_id,
equipment_network_identifier=equipment_network_identifier,
network_identifier=network_identifier,
notification_destination=notification_destination,
non_gbr_qos_reference=qos_reference,
usage_threshold=usage_threshold
)
# From now on we should retrieve POST notifications to http://172.17.0.1:5000/monitoring/callback
print("--- PRINTING THE SUBSCRIPTION WE JUST CREATED ----")
print(subscription)
# Request information about a subscription
id = subscription.link.split("/")[-1]
subscription_info = qos_awereness.get_subscription(netapp_id, id)
print("--- RETRIEVING INFORMATION ABOUT SUBSCRIPTION " + id + "----")
print(subscription_info) | 5,331,837 |
def determinize(seed: Optional[int] = None, be_deterministic: bool = True) -> None:
"""
Seeds more random sources and cares about the environment to be or don’t be
deterministic.
--------------
@Params: -> seed: Optional[int] = None
The number to use as seed for manual seeding. Setting this
number to a constant value highly increases the chance of the
reproducibility of a training. On the contrary leaving this
number empty highly increases the randomness of each training
individually.
-> be_deterministic: bool = True
A boolean that specifies whether to switch torch.backends.cudnn
into deterministic mode or not.
This function imports random from the standard library and tries to import
NumPy however it won’t fail if NumPy is not installed.
"""
import random
if be_deterministic:
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
if seed is None:
seed = 0
else:
torch.backends.cudnn.deterministic = False
torch.backends.cudnn.benchmark = True
if seed is None:
if torch.cuda.is_available():
torch.cuda.seed_all()
torch.seed()
try:
import numpy as np
np.random.seed()
except ImportError:
pass
random.seed()
else:
if torch.cuda.is_available():
torch.cuda.manual_seed_all(seed)
torch.manual_seed(seed)
try:
import numpy as np
np.random.seed(seed)
except ImportError:
pass
random.seed(seed) | 5,331,838 |
def shape(a: Matrix) -> Tuple[int, int]:
""" Returns the num of rows and columns of A """
num_rows = len(a)
num_cols = len(a[0]) if a else 0 # number of elements so columns in first element if it exists
return num_rows, num_cols | 5,331,839 |
def solve(A, b):
"""
:param A: Matrix R x C
:param b: Vector R
:return: Vector C 'x' solving Ax=b
>>> M = Mat(({'a', 'b', 'c', 'd'}, {'A', 'B', 'C', 'D'}), { \
('a', 'A'): one, ('a', 'B'): one, ('a', 'D'): one, \
('b', 'A'): one, ('b', 'D'): one, \
('c', 'A'): one, ('c', 'B'): one, ('c', 'C'): one, ('c', 'D'): one, \
('d', 'C'): one, ('d', 'D'): one \
})
>>> v = Vec(M.D[0], {'a': one, 'c': one})
>>> solve(M, v)
"""
M = transformation(A)
U = M*A
col_label_list = sorted(A.D[1])
U_rows_dict = mat2rowdict(U)
row_list = [U_rows_dict[i] for i in sorted(U_rows_dict)]
# return echelon_solve(row_list,col_label_list, M*b)
# print(row_list, col_label_list, repr(M * b))
return row_list, col_label_list, M * b | 5,331,840 |
def token_groups(self):
"""The groups the Token owner is a member of."""
return self.created_by.groups | 5,331,841 |
def get_html_content_in_text(url):
"""
Grab all the content in webpage url and return it's content in text.
Arguments:
url -- a webpage url string.
Returns:
r.text -- the content of webpage in text.
"""
r = requests.get(url)
return r.text | 5,331,842 |
def subscribe():
"""Subscribe new message"""
webhook_url = request.form.get("webhook_url")
header_key = request.form.get("header_key")
header_value = request.form.get("header_value")
g.driver.subscribe_new_messages(NewMessageObserver(webhook_url, header_key, header_value))
return jsonify({"success": True}) | 5,331,843 |
def evaluate_model_old(
model_path: str,
report_output_path: str,
seg_criterion: torch.nn,
det_criterion: typing.Callable,
net: torch.nn.Module,
data_loaders: typing.Tuple[
torch.utils.data.DataLoader,
torch.utils.data.DataLoader,
torch.utils.data.DataLoader,
],
wandb_key,
loss_factor: float = 0.5,
num_classes: int = 3,
visualize: bool = False,
input_width: int = 640,
input_height: int = 480,
run_name="soccer-robot",
) -> None:
"""
This function evaluates the model trained on a set of test image and provides a report with evaluation metrics.
The evaluation metrics used are: Precision, Recall and F-score.
The module also aids in visualizing the predictions and groundtruth labels.
Args:
model_path: string
Path of the model to be used for inference
report_output_path: string
Path for writing the inference output report with evaluation metrics and visualization images
criterion: torch.nn
Loss type for evaluation
net: torch.nn.Module
Network architecture of the model trained
dataset_class: typing.Union[Dataset, ConcatDataset]
Specifies the dataset to use for inference.
The datasets available are: GINI, ICDAR and GINI_ICDAR provided in gin config
collate: typing.Callable
Function in utils custom_collate for gathering the dataset keys
visualize: bool
To visualize the model predictions alongside groundtruth prediction
"""
if not os.path.exists(os.path.dirname(report_output_path)):
LOGGER.info(
"Output directory does not exist. Creating directory %s",
os.path.dirname(report_output_path),
)
os.makedirs(os.path.dirname(report_output_path))
if visualize and (
not os.path.exists(os.path.join(report_output_path, "output_images"))
):
os.makedirs(os.path.join(report_output_path, "output_images"))
LOGGER.info(
"Saving images in the directory: %s",
os.path.join(report_output_path, "output_images"),
)
device = torch.device("cpu")
state_test = torch.load(model_path, map_location=device)
net.load_state_dict(state_test)
net.eval()
# instantiate dataset
train_loader, valid_loader, test_loader = data_loaders
LOGGER.info("Evaluating Soccer Robot Perception using the model, %s", model_path)
LOGGER.info("Results will be written to the path, %s", report_output_path)
LOGGER.info("Ready to start evaluating!")
df_seg_columns = [
"seg loss",
"precision",
"recall",
"f1-score",
"accuracy",
]
df_det_columns = [
"det loss",
"tp",
"fp",
"tn",
"fn",
"precision",
"recall",
"f1-score",
"accuracy",
"fdr",
]
df_micro = pd.DataFrame(columns=df_seg_columns)
df_macro = pd.DataFrame(columns=df_seg_columns)
df_iou = pd.DataFrame(columns=["bg", "field", "lines"])
confusion_matrix_array = np.zeros((num_classes, num_classes))
precision_per_class = np.zeros((num_classes))
recall_per_class = np.zeros((num_classes))
f1score_per_class = np.zeros((num_classes))
accuracy_per_class = np.zeros((num_classes))
df_det_ball = pd.DataFrame(columns=df_det_columns)
df_det_robot = pd.DataFrame(columns=df_det_columns)
df_det_goalpost = pd.DataFrame(columns=df_det_columns)
for loader in test_loader:
for data in loader:
LOGGER.info("Predicting on image: %d", len(df_micro) + 1)
input_image = data["image"]
det_out, seg_out = net(input_image)
det_out_collected = []
det_target_collected = []
seg_out_collected = []
seg_target_collected = []
# To calculate loss for each data
for n, i in enumerate(data["dataset_class"]):
if i == "detection":
det_target_collected.append(data["det_target"][n].unsqueeze_(0))
det_out_collected.append(det_out[n].unsqueeze_(0))
else:
seg_target_collected.append(data["seg_target"][n].unsqueeze_(0))
seg_out_collected.append(seg_out[n].unsqueeze_(0))
if len(det_target_collected) != 0:
det_target_tensor = torch.cat(det_target_collected, dim=0)
det_out_tensor = torch.cat(det_out_collected, dim=0)
det_loss = det_criterion(det_out_tensor, det_target_tensor)
ball_points = center_of_shape(det_out[0][0].detach().numpy(), 1)
robot_points = center_of_shape(det_out[0][1].detach().numpy(), 2)
goalpost_points = center_of_shape(det_out[0][2].detach().numpy(), 3)
# ball_points = [
# [53.0, 91.0, 1.0],
# [61, 81, 1],
# [0, 19, 1],
# ]
#
# robot_points = [
# [50, 50, 2],
# [98.0, 12.0, 2.0],
# [14.0, 65.0, 2.0],
# [89.0, 87.0, 2.0],
# ]
# goalpost_points = [[13., 109., 3.],
# [13., 112., 3.],
# [13., 113., 3.],
# ]
# goalpost_points = []
blob_map = np.zeros(
(3, int(input_height / 4), int(input_width / 4))
)
ball_map = plot_blobs(ball_points, 6)
robot_map = plot_blobs(robot_points, 12)
goalpost_map = plot_blobs(goalpost_points, 6)
blob_map[0] = ball_map
blob_map[1] = robot_map
blob_map[2] = goalpost_map
if len(det_target_collected) != 0:
(
tp,
fp,
tn,
fn,
precision,
recall,
f1,
accuracy,
fdr,
) = calculate_det_metrics(ball_points, data["blob_centers"][0], 1)
df_det_ball.loc[len(df_det_ball)] = [
det_loss.detach().numpy(),
tp,
fp,
tn,
fn,
precision,
recall,
f1,
accuracy,
fdr,
]
(
tp,
fp,
tn,
fn,
precision,
recall,
f1,
accuracy,
fdr,
) = calculate_det_metrics(robot_points, data["blob_centers"][0], 2)
df_det_robot.loc[len(df_det_robot)] = [
det_loss.detach().numpy(),
tp,
fp,
tn,
fn,
precision,
recall,
f1,
accuracy,
fdr,
]
(
tp,
fp,
tn,
fn,
precision,
recall,
f1,
accuracy,
fdr,
) = calculate_det_metrics(goalpost_points, data["blob_centers"][0], 3)
df_det_goalpost.loc[len(df_det_goalpost)] = [
det_loss.detach().numpy(),
tp,
fp,
tn,
fn,
precision,
recall,
f1,
accuracy,
fdr,
]
else:
det_loss = torch.tensor(
0, dtype=torch.float32, requires_grad=True, device=device
)
df_det_ball.loc[len(df_det_ball)] = [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
]
df_det_robot.loc[len(df_det_robot)] = [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
]
df_det_goalpost.loc[len(df_det_goalpost)] = [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
]
if len(seg_target_collected) != 0:
seg_target_tensor = torch.cat(seg_target_collected, dim=0)
seg_out_tensor = torch.cat(seg_out_collected, dim=0)
seg_loss = seg_criterion(seg_out_tensor, seg_target_tensor.long())
seg_out_max = torch.argmax(seg_out_tensor, dim=1)
outputs_seg_flatten = torch.flatten(seg_out_max, start_dim=0).unsqueeze_(0)
labels_seg_flatten = torch.flatten(
seg_target_tensor, start_dim=0
).unsqueeze_(0)
(
target_bg_iou_map,
target_field_iou_map,
target_lines_iou_map,
) = iou_metrics_preprocess(seg_target_tensor)
(
output_bg_iou_map,
output_field_iou_map,
output_lines_iou_map,
) = iou_metrics_preprocess(seg_out_max)
iou_bg = calculate_iou(target_bg_iou_map, output_bg_iou_map)
iou_field = calculate_iou(target_field_iou_map, output_field_iou_map)
iou_lines = calculate_iou(target_lines_iou_map, output_lines_iou_map)
df_iou.loc[len(df_iou)] = [
iou_bg.detach().item(),
iou_field.detach().item(),
iou_lines.detach().item(),
]
precision, recall, f1score, accuracy = calculate_metrics(
labels_seg_flatten.detach().numpy(),
outputs_seg_flatten.detach().numpy(),
False,
"micro",
)
df_micro.loc[len(df_micro)] = [
seg_loss.detach().numpy(),
precision,
recall,
f1score,
accuracy,
]
precision, recall, f1score, accuracy = calculate_metrics(
labels_seg_flatten.detach().numpy(),
outputs_seg_flatten.detach().numpy(),
False,
"macro",
)
df_macro.loc[len(df_macro)] = [
seg_loss.detach().numpy(),
precision,
recall,
f1score,
accuracy,
]
image_precision, image_recall, image_f1score, _ = calculate_metrics(
labels_seg_flatten.detach().numpy(),
outputs_seg_flatten.detach().numpy(),
True,
)
precision_per_class = precision_per_class + image_precision
recall_per_class = recall_per_class + image_recall
f1score_per_class = f1score_per_class + image_f1score
confusion_matrix_array = confusion_matrix_array + get_confusion_matrix(
labels_seg_flatten.detach().numpy(),
outputs_seg_flatten.detach().numpy(),
)
accuracy_per_class = accuracy_per_class + (
confusion_matrix_array.diagonal() / confusion_matrix_array.sum(axis=1)
)
else:
seg_loss = torch.tensor(
0, dtype=torch.float32, requires_grad=True, device=device
)
df_iou.loc[len(df_iou)] = [0, 0, 0]
df_micro.loc[len(df_micro)] = [
seg_loss.detach().numpy(),
0,
0,
0,
0,
]
df_macro.loc[len(df_macro)] = [
seg_loss.detach().numpy(),
0,
0,
0,
0,
]
precision_per_class = precision_per_class + 0
recall_per_class = recall_per_class + 0
f1score_per_class = f1score_per_class + 0
accuracy_per_class = accuracy_per_class + 0
confusion_matrix_array = confusion_matrix_array + 0
loss = seg_loss + det_loss
LOGGER.info(
"image: %d, loss: %f, segment loss: %f, regression loss: %f",
len(df_micro),
loss.item(),
seg_loss.item(),
det_loss.item(),
)
if visualize:
new_image = input_image[0].permute(1, 2, 0).detach().numpy()
plt.subplot(231)
plt.imshow(cv2.resize(new_image, (160, 120), cv2.INTER_NEAREST))
plt.title("Input")
plt.subplot(232)
plt.imshow((det_out[0].detach().permute(1, 2, 0).numpy() * 255).astype(np.uint8))
plt.title("Det out")
plt.subplot(233)
plt.imshow((torch.argmax(seg_out, dim=1)[0].detach().numpy()), cmap="gray")
plt.title("Seg out")
if len(det_target_collected) != 0:
plt.subplot(234)
plt.imshow((data["det_target"][n][0].detach().permute(1, 2, 0).numpy() * 255).astype(np.uint8))
plt.title("Det tar")
else:
plt.subplot(234)
plt.imshow(np.zeros((120, 160)), cmap='gray')
plt.title("Det tar")
if len(seg_target_collected) != 0:
plt.subplot(235)
plt.imshow(data["seg_target"][n][0].numpy(), cmap="gray")
plt.title("Seg tar")
else:
plt.subplot(235)
plt.imshow(np.zeros((120, 160)), cmap='gray')
plt.title("Seg tar")
plt.subplot(236)
plt.imshow((np.transpose(blob_map, (1, 2, 0)) * 255).astype(np.uint8))
plt.title("Blobs")
plt.savefig(
report_output_path
+ "/output_images/"
+ str(len(df_micro) + 1)
+ "_pred.jpg"
)
plt.close()
df_iou.loc["mean"] = df_iou.mean()
df_micro.loc["mean"] = df_micro.mean()
df_macro.loc["mean"] = df_macro.mean()
df_confusion_matrix = pd.DataFrame(confusion_matrix_array / len(df_micro))
df_precision_per_class = pd.DataFrame(precision_per_class / len(df_micro))
df_recall_per_class = pd.DataFrame(recall_per_class / len(df_micro))
df_f1score_per_class = pd.DataFrame(f1score_per_class / len(df_micro))
df_accuracy_per_class = pd.DataFrame(accuracy_per_class / len(df_micro))
df_det_ball.loc["mean"] = df_det_ball.mean()
df_det_robot.loc["mean"] = df_det_robot.mean()
df_det_goalpost.loc["mean"] = df_det_goalpost.mean()
excel_writer = pd.ExcelWriter(
os.path.join(report_output_path, "report.xlsx"), engine="xlsxwriter"
)
df_micro.to_excel(excel_writer, sheet_name="micro")
df_macro.to_excel(excel_writer, sheet_name="macro")
df_iou.to_excel(excel_writer, sheet_name="iou")
df_confusion_matrix.to_excel(excel_writer, sheet_name="normalized_confusion_matrix")
df_precision_per_class.to_excel(excel_writer, sheet_name="precision_per_class")
df_recall_per_class.to_excel(excel_writer, sheet_name="recall_per_class")
df_f1score_per_class.to_excel(excel_writer, sheet_name="f1score_per_class")
df_accuracy_per_class.to_excel(excel_writer, sheet_name="accuracy_per_class")
df_det_ball.to_excel(excel_writer, sheet_name="ball_det")
df_det_robot.to_excel(excel_writer, sheet_name="robot_det")
df_det_goalpost.to_excel(excel_writer, sheet_name="goalpost_det")
excel_writer.save()
LOGGER.info("Results were written to %s", report_output_path) | 5,331,844 |
def checkAttribute(node, attribute, value):
"""Check that an attribute holds the expected value, and that the
corresponding accessor method, if provided, returns an equivalent
value."""
#
v1 = getattr(node, attribute)
if v1 != value:
raise AssertionError(
"attribute value does not match\n expected: %s\n found: %s"
% (`value`, `v1`))
if hasattr(node, "_get_" + attribute):
v2 = getattr(node, "_get_" + attribute)()
if v2 != value:
raise AssertionError(
"accessor result does not match\n expected: %s\n found: %s"
% (`value`, `v2`))
if v1 != v2:
raise AssertionError(
"attribute & accessor result don't compare equal\n"
" attribute: %s\n accessor: %s"
% (`v1`, `v2`)) | 5,331,845 |
def seek_tell(fileobj, offset):
""" Seek in `fileobj` or check we're in the right place already
Parameters
----------
fileobj : file-like
object implementing ``seek`` and (if seek raises an IOError) ``tell``
offset : int
position in file to which to seek
"""
try:
fileobj.seek(offset)
except IOError as e:
if fileobj.tell() != offset:
raise IOError(str(e)) | 5,331,846 |
def SegAlign(ea, alignment):
"""
Change alignment of the segment
@param ea: any address in the segment
@param alignment: new alignment of the segment (one of the sa... constants)
@return: success (boolean)
"""
return SetSegmentAttr(ea, SEGATTR_ALIGN, alignment) | 5,331,847 |
def create_bitlink(logger, headers='', long_url='google.com'):
"""
Функция создает короткие ссылки из длинных
:param logger: logger object
:param headers: Generic Access Token сформированнный на сайте
:param long_url: Ссылка которую надо укоротить
:return: созданная короткая ссылка
"""
url_template = 'https://api-ssl.bitly.com/v4/{}'
user, bit = ['user', 'bitlinks']
with requests.Session() as s:
bitl_user_info = s.get(url_template.format(user), headers=headers)
logger.info(f'Получаем группу по пользователю ответ: {bitl_user_info.json()}')
group_guid = bitl_user_info.json()['default_group_guid']
payload = {'group_guid': group_guid, 'title': 'shortlink', 'long_url': long_url}
response = s.post(url_template.format(bit), json=payload, headers=headers)
bitlink = response.json()['id']
return bitlink | 5,331,848 |
def compute_neq(count_mat):
"""
Compute the Neq for each residue from an occurence matrix.
Parameters
----------
count_mat : numpy array
an occurence matrix returned by `count_matrix`.
Returns
-------
neq_array : numpy array
a 1D array containing the neq values
"""
# get the frequency matrix
freq = utils.compute_freq_matrix(count_mat)
# Compute neq
neq_array = numpy.apply_along_axis(_neq_per_residue, 1, freq)
return neq_array | 5,331,849 |
def _GetOptionsParser():
"""Get the options parser."""
parser = optparse.OptionParser(__doc__)
parser.add_option('-i',
'--input',
dest='inputs',
action='append',
default=[],
help='One or more input files to calculate dependencies '
'for. The namespaces in this file will be combined with '
'those given with the -n flag to form the set of '
'namespaces to find dependencies for.')
parser.add_option('-n',
'--namespace',
dest='namespaces',
action='append',
default=[],
help='One or more namespaces to calculate dependencies '
'for. These namespaces will be combined with those given '
'with the -i flag to form the set of namespaces to find '
'dependencies for. A Closure namespace is a '
'dot-delimited path expression declared with a call to '
'goog.provide() (e.g. "goog.array" or "foo.bar").')
parser.add_option('--root',
dest='roots',
action='append',
default=[],
help='The paths that should be traversed to build the '
'dependencies.')
parser.add_option('-o',
'--output_mode',
dest='output_mode',
type='choice',
action='store',
choices=['list', 'script', 'compiled'],
default='list',
help='The type of output to generate from this script. '
'Options are "list" for a list of filenames, "script" '
'for a single script containing the contents of all the '
'files, or "compiled" to produce compiled output with '
'the Closure Compiler. Default is "list".')
parser.add_option('-c',
'--compiler_jar',
dest='compiler_jar',
action='store',
help='The location of the Closure compiler .jar file.')
parser.add_option('-f',
'--compiler_flags',
dest='compiler_flags',
default=[],
action='append',
help='Additional flags to pass to the Closure compiler. '
'To pass multiple flags, --compiler_flags has to be '
'specified multiple times.')
parser.add_option('--output_file',
dest='output_file',
action='store',
help=('If specified, write output to this path instead of '
'writing to standard output.'))
return parser | 5,331,850 |
def spikalize_img(experiment, image, label):
"""
Transform image to spikes. Spike with poisson distributed rate proportional to pixel brightness.
:param experiment:
:param image:
:param label:
:return:
"""
image_shape = np.append(np.array(experiment.timesteps), np.array(image.shape))
rand = tf.random.uniform(shape=image_shape)
spiked_img = tf.cast(image / 255 * experiment.max_rate > rand, tf.float32)
return spiked_img, label | 5,331,851 |
def retrieve_value(step,standard,entity_property):
"""
Function attempts to retrieve entity property from datastore
:param standard: Bibliographic standard or schema being tested
:param entity_property: Entity property being extracted from schema
"""
world.redis_property_key = entity_property
world.value = redis_server.get(world.frbr_entity,entity_property) | 5,331,852 |
def update_security_schemes(spec, security, login_headers, security_schemes,
unauthorized_schema):
"""Patch OpenAPI spec to include security schemas.
Args:
spec: OpenAPI spec dictionary
Returns:
Patched spec
"""
# login_headers = {'Set-Cookie':
# {'schema':
# {'type': 'string',
# 'example': 'session=abcde12345; Path=/; HttpOnly'}}}
# security_schemes = {'cookieAuth': {'description': 'Session Cookie',
# 'type': 'apiKey',
# 'in': 'cookie',
# 'name': 'session'}}
# unauthorized_schema = {'UnauthorizedError':
# {'description': "The auth cookie isn't present",
# 'properties':
# {'schema': {'type': 'string', 'example': 'Unauthorized'}}}}
spec["components"]["securitySchemes"] = security_schemes
spec["security"] = security
spec["paths"]["/login"]["post"]["responses"][200]["headers"] = login_headers.copy()
return spec | 5,331,853 |
def get_geckodriver_url(version):
"""
Generates the download URL for current platform , architecture and the given version.
Supports Linux, MacOS and Windows.
:param version: the version of geckodriver
:return: Download URL for geckodriver
"""
platform, architecture = get_platform_architecture()
return f'https://github.com/mozilla/geckodriver/releases/download/{version}' \
f'/geckodriver-{version}-{platform}{architecture}.tar.gz' | 5,331,854 |
def get_graph_size(depth: int):
"""returns how many nodes are in fully-equipped with nodes graph of the given depth"""
size = 1
cur_size = 1
ln = len(expand_sizes)
for i in range(min(ln, depth)):
cur_size *= expand_sizes[i]
size += cur_size
if ln < depth:
size += cur_size * later_expand_size*(depth - ln)
return size | 5,331,855 |
def inherits_from(obj, parent):
"""
Takes an object and tries to determine if it inherits at *any*
distance from parent.
Args:
obj (any): Object to analyze. This may be either an instance
or a class.
parent (any): Can be either instance, class or python path to class.
Returns:
inherits_from (bool): If `parent` is a parent to `obj` or not.
Notes:
What differs this function from e.g. `isinstance()` is that `obj`
may be both an instance and a class, and parent may be an
instance, a class, or the python path to a class (counting from
the evennia root directory).
"""
if callable(obj):
# this is a class
obj_paths = ["%s.%s" % (mod.__module__, mod.__name__) for mod in obj.mro()]
else:
obj_paths = ["%s.%s" % (mod.__module__, mod.__name__) for mod in obj.__class__.mro()]
if isinstance(parent, str):
# a given string path, for direct matching
parent_path = parent
elif callable(parent):
# this is a class
parent_path = "%s.%s" % (parent.__module__, parent.__name__)
else:
parent_path = "%s.%s" % (parent.__class__.__module__, parent.__class__.__name__)
return any(1 for obj_path in obj_paths if obj_path == parent_path) | 5,331,856 |
def AddDescriptionFlag(parser):
"""Add the description argument.
Args:
parser: An argparse parser that you can use to add arguments that go
on the command line after this command. Positional arguments are
allowed.
"""
parser.add_argument('--description',
help='Optional description of the composite type.',
default='') | 5,331,857 |
def load_df_from_googlesheet(
url_string: str,
skiprows: Optional[int] = 0,
skipfooter: Optional[int] = 0,
) -> pd.DataFrame:
"""Load a Pandas DataFrame from a google sheet.
Given a file object, try to read the content as a CSV file and transform
into a data frame. The skiprows and skipfooter are number of lines to skip
from the top and bottom of the file (see read_csv in pandas).
It also tries to convert as many columns as possible to date/time format
(testing the conversion on every string column).
:param url_string: URL where the file is available
:param skiprows: Number of lines to skip at the top of the document
:param skipfooter: Number of lines to skip at the bottom of the document
:return: Resulting data frame, or an Exception.
"""
# Process the URL provided by google. If the URL is obtained using the
# GUI, it has as suffix /edit?[parameters]. This part needs to be
# replaced by the suffix /export?format=csv
# For example from:
# https://docs.google.com/spreadsheets/d/DOCID/edit?usp=sharing
# to
# https://docs.google.com/spreadsheets/d/DOCID/export?format=csv&gid=0
parse_res = urlparse(url_string)
if parse_res.path.endswith('/edit'):
qs_dict = parse_qs(parse_res.query)
qs_dict['format'] = 'csv'
new_fragment = parse_res.fragment
if 'gid=' in parse_res.fragment:
qs_dict['gid'] = parse_res.fragment.split('=')[1]
new_fragment = ''
url_string = urlunparse([
parse_res.scheme,
parse_res.netloc,
parse_res.path.replace('/edit', '/export'),
parse_res.params,
urlencode(qs_dict, doseq=True),
new_fragment,
])
# Process the link using pandas read_csv
return load_df_from_csvfile(url_string, skiprows, skipfooter) | 5,331,858 |
def decomp(bits, dummies=default_dummies, width=default_width):
"""Translate 0s and 1s to dummies[0] and dummies[1]."""
words = (dummies[i] for i in bits)
unwrapped = ' '.join(words)
return wrap_source(unwrapped, width=width) | 5,331,859 |
def get_word_node_attrs(word: Word) -> WordNodeAttrs:
"""Create the graph's node attribute for a `Word`.
Build an attribute dict with the word's features. Note that we're using the
term `Word` instead of `Token` to be closer to the implementation of these
data structures in stanza. From stanza's documentation, a `Token` might
hold more the a single word in the case of multi-word tokens. For more
information please refer to
'https://stanfordnlp.github.io/stanza/data_objects.html#token'.
Arguments:
word: Word
A stanza-annotated word.
Return: WordNodeAttrs
A dictionary containing the word's features to be used by networkx's
feature graph.
"""
# Changing the color for the sentence's head token
if word.head == 0:
color = GraphNodeColor.HEAD.value
else:
color = GraphNodeColor.TOKEN.value
return {
'fname': word.sent.doc.fname,
'start_idx': word.parent.start_char,
'end_idx': word.parent.end_char,
'text': word.text,
'upos': word.upos,
'lemma': word.lemma,
'label': word.text, # for PyVis
'color': color # for pyvis
} | 5,331,860 |
def numpy_bbox_to_image(image, bbox_list, labels=None, scores=None, class_name=[], config=None):
""" Numpy function used to display the bbox (target or prediction)
"""
assert(image.dtype == np.float32 and image.dtype == np.float32 and len(image.shape) == 3)
if config is not None and config.normalized_method == "torch_resnet":
channel_avg = np.array([0.485, 0.456, 0.406])
channel_std = np.array([0.229, 0.224, 0.225])
image = (image * channel_std) + channel_avg
image = (image*255).astype(np.uint8)
elif config is not None and config.normalized_method == "tf_resnet":
image = image[..., ::-1]
image = image / 255
bbox_xcycwh = bbox.np_rescale_bbox_xcycwh(bbox_list, (image.shape[0], image.shape[1]))
bbox_x1y1x2y2 = bbox.np_xcycwh_to_xy_min_xy_max(bbox_xcycwh)
# Set the labels if not defined
if labels is None:
labels = np.zeros((bbox_x1y1x2y2.shape[0]))
bbox_area = []
# Go through each bbox
for b in range(0, bbox_x1y1x2y2.shape[0]):
x1, y1, x2, y2 = bbox_x1y1x2y2[b]
bbox_area.append((x2-x1)*(y2-y1))
# Go through each bbox
for b in np.argsort(bbox_area)[::-1]:
# Take a new color at reandon for this instance
instance_color = np.random.randint(0, 255, (3))
x1, y1, x2, y2 = bbox_x1y1x2y2[b]
x1, y1, x2, y2 = int(x1), int(y1), int(x2), int(y2)
x1, y1, x2, y2 = max(0, x1), max(0, y1), min(image.shape[1], x2), min(image.shape[0], y2)
# Select the class associated with this bbox
class_id = labels[int(b)]
if scores is not None and len(scores) > 0:
label_name = class_name[int(class_id)]
label_name = "%s:%.2f" % (label_name, scores[b])
else:
label_name = class_name[int(class_id)]
class_color = CLASS_COLOR_MAP[int(class_id)]
color = instance_color
multiplier = image.shape[0] / 500
cv2.rectangle(image, (x1, y1), (x1 + int(multiplier*15)*len(label_name), y1 + 20), class_color.tolist(), -10)
cv2.putText(image, label_name, (x1+2, y1 + 20), cv2.FONT_HERSHEY_SIMPLEX, 0.6 * multiplier, (0, 0, 0), 1)
cv2.rectangle(image, (x1, y1), (x2, y2), tuple(class_color.tolist()), 2)
return image | 5,331,861 |
def loads(text, template=None, colored=None, comments=None, **kwargs):
"""
Deserialize `text` (a `str` or `unicode` instance containing a JSON
document supporting template references `{$.key}`) to a Python object.
:param text: serialized JSON string
:type text: str
:param template: (optional) None, str, dict, list, io.IOBase - Causes template values to be sourced form this object
:type template: dict
:type template: list
:param kwargs: all the arguments that `json.loads <http://docs.python.org/
2/library/json.html#json.loads>`_ accepts.
:returns: dict or list.
# TODO update docstring
"""
if not isinstance(text, (str, bytes, bytearray)):
# just use the default json library to raise the normal error (TypeError)
json.loads(text)
return _loads(text=text, template=template, colored=colored, comments=comments, **kwargs) | 5,331,862 |
def redact_tiff_tags(ifds, redactList, title):
"""
Redact any tags of the form *;tiff.<tiff name name> from all IFDs.
:param ifds: a list of ifd info records. Tags may be removed or modified.
:param redactList: the list of redactions (see get_redact_list).
:param title: the new title for the item. If any of a list of title tags
exist, they are replaced with this value.
"""
redactedTags = {}
for key, value in redactList['metadata'].items():
tiffkey = key.rsplit(';tiff.', 1)[-1]
tiffdir = 0
if ';tiff;' in key:
tiffkey = key.rsplit(';tiff;', 1)[-1]
if ':' in tiffkey:
tiffkey, tiffdir = tiffkey.rsplit(':', 1)
tiffdir = int(tiffdir)
if tiffkey in tifftools.Tag:
tag = tifftools.Tag[tiffkey].value
redactedTags.setdefault(tiffdir, {})
redactedTags[tiffdir][tag] = value['value']
for titleKey in {'DocumentName', 'NDPI_REFERENCE', }:
redactedTags[tifftools.Tag[titleKey].value] = title
for idx, ifd in enumerate(ifds):
# convert to a list since we may mutage the tag dictionary
for tag, taginfo in list(ifd['tags'].items()):
if tag in redactedTags.get(idx, {}):
if redactedTags[idx][tag] is None:
del ifd['tags'][tag]
else:
taginfo['datatype'] = tifftools.Datatype.ASCII
taginfo['data'] = redactedTags[idx][tag] | 5,331,863 |
def AsyncSleep(delay, name=None):
"""Pause for `delay` seconds (which need not be an integer).
This is an asynchronous (non-blocking) version of a sleep op. It includes
any time spent being blocked by another thread in `delay`. If it is blocked
for a fraction of the time specified by `delay`, it only calls `sleep`
(actually `usleep`) only for the remainder. If it is blocked for the full
time specified by `delay` or more, it returns without explictly calling
`sleep`.
Args:
delay: tf.Tensor which is a scalar of type float.
name: An optional name for the op.
Returns:
The `delay` value.
"""
return examples_async_sleep(delay=delay, name=name) | 5,331,864 |
def palide(string, length, ellipsis="...", pad=" ", position=1.0, left=False):
"""
A combination of `elide` and `pad`.
"""
return globals()["pad"](
elide(string, length, ellipsis=ellipsis, position=position),
length, pad=pad, left=left) | 5,331,865 |
def get_email_adderess(email_addr):
""" Return dict from opalstack for given email address, or None """
mails = get_request("mail/list/")
for record in mails['mails']:
if record['address'] == email_addr:
return get_request("mail/read/{}".format(record['id']))
return None | 5,331,866 |
def main(app, env_file, api_key, set_alt, dump):
"""
CLI tool to manipulate environment variables on Heroku
with local .env files, through the Heroku API.
It is recommended for security purposes that you set API
key as an environment variable like this:
export HEROKU_API_KEY=a1b12c24-ab1d-123f-5678-1234b12a0a1b
Example usages:
heroku.env --app swimming-briskly-123 --env-file dot.env
heroku.env --app swimming-briskly-123 --env-file dot.env
--api-key a1b12c24-ab1d-123f-5678-1234b12a0a1b
"""
# if not defined, then set it.
if not os.getenv('HEROKU_API_KEY'):
os.environ['HEROKU_API_KEY'] = api_key
try:
if dump:
dump_env(app, env_file)
else:
upload_env(app, env_file, set_alt)
except IndexError:
raise click.ClickException("The entries in your .env file are not of the form KEY=VALUE")
except HerokuRunError as e:
# launch Heroku troubleshooting page for a failed run.
click.launch(HEROKU_TROUBLESHOOT_URL)
raise click.ClickException(str(e))
except InvalidAPIKeyError as e:
# launch API key doc
click.launch(HEROKU_API_KEY_HELP_URL)
raise click.ClickException(str(e))
except (
HerokuRunError,
InvalidHerokuAppError,
EnvFileNotFoundError,
EnvFileNotWritableError
) as e:
raise click.ClickException(str(e))
except Exception as e:
# all other exceptions
click.echo(e)
raise click.ClickException("An unknown error occurred. Please open an issue with the log.") | 5,331,867 |
def GK3toUTM(ea, no=None, zone=32):
"""Transform Gauss-Krueger zone 3 into UTM (for backward compatibility)."""
return GKtoUTM(ea, no, zone, gkzone=3) | 5,331,868 |
def test_command():
""" """
projection = __create_viirs_projection("mollweide")
print(type(projection)) | 5,331,869 |
def test_bad_username(client: TestClient, session: db.Session):
"""Logging in with invalid username must generate an error"""
user, password = utils.create_user_password(session)
bad_email = f"nope_{user.email}"
response = client.post("/v2/token", {"username": bad_email, "password": password})
assert response.status_code == status.HTTP_401_UNAUTHORIZED, response.json() | 5,331,870 |
def get_all_predictions(
model: nn.Module,
dataloader: DataLoader,
device: _Device,
threshold_prob: Optional[float] = None,
decouple_fn: Optional[_DecoupleFnTest] = None,
) -> _TestResult:
"""
Make predictions on entire dataset and return raw outputs
and optionally class predictions and probabilities if it's
a classification model.
See `perform_one_epoch()` for more details.
"""
return perform_one_epoch(
phase="test",
model=model,
dataloader=dataloader,
device=device,
threshold_prob=threshold_prob,
decouple_fn=decouple_fn,
) | 5,331,871 |
def plot_perf_stats(returns, factor_returns):
"""
Create box plot of some performance metrics of the strategy.
The width of the box whiskers is determined by a bootstrap.
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in tears.create_full_tear_sheet.
factor_returns : pd.Series
Daily noncumulative returns of the benchmark factor to which betas are
computed. Usually a benchmark such as market returns.
- This is in the same style as returns.
Returns
-------
ax : matplotlib.Axes
The axes that were plotted on.
"""
bootstrap_values = timeseries.perf_stats_bootstrap(returns,
factor_returns,
return_stats=False)
bootstrap_values = bootstrap_values.drop('Kurtosis', axis='columns')
return bootstrap_values | 5,331,872 |
def create(user):
"""
This function creates a new user in the database
based on the passed-in user data.
:param user: User to create in database
:return: 201 on success, 400 on bad postal code, 406 on user exists
"""
username = user.get("username", None)
postalcode = user.get("postalcode", None)
cityname = _get_cityname(postalcode)
# Does the user already exist?
existing_master = Master.query.filter(Master.username == username).one_or_none()
if existing_master is None and cityname is not None:
# Create a user instance using the schema and the passed-in user.
user_master = Master(username=username)
db.session.add(user_master)
user_detail = Detail(postalcode=postalcode, cityname=cityname)
db.session.add(user_detail)
# Save changes to database.
db.session.commit()
return make_response(
"{username} successfully created".format(username=username), 201,
)
# If the Postal Code doesn't return any hits in Geonames
elif cityname is None:
abort(
400, "Postal code {postalcode} is invalid".format(postalcode=postalcode),
)
# Otherwise, they exist, and that's an error
else:
abort(
406,
"User with username {username} already exists".format(username=username),
) | 5,331,873 |
def show_profile(uid):
"""
Return serializable users data
:param uid:
:return String: (JSON)
"""
user = get_user_by_id(uid)
return jsonify(user.serialize) | 5,331,874 |
def degree_correlation(coeffs_1, coeffs_2):
"""
Correlation per spherical harmonic degree between two models 1 and 2.
Parameters
----------
coeffs_1, coeffs_2 : ndarray, shape (N,)
Two sets of coefficients of equal length `N`.
Returns
-------
C_n : ndarray, shape (nmax,)
Degree correlation of the two models. There are `N = nmax(nmax+2)`
coefficients.
"""
if coeffs_1.ndim != 1:
raise ValueError(f'Only 1-D input allowed {coeffs_1.ndim} != 1')
if coeffs_2.ndim != 1:
raise ValueError(f'Only 1-D input allowed {coeffs_2.ndim} != 1')
if coeffs_1.size != coeffs_2.size:
raise ValueError(
'Number of coefficients is '
'not equal ({0} != {1}).'.format(coeffs_1.size, coeffs_2.size))
nmax = int(np.sqrt(coeffs_1.size + 1) - 1)
C_n = np.zeros((nmax,))
R_n = np.zeros((nmax,)) # elements are prop. to power spectrum of coeffs_1
S_n = np.zeros((nmax,)) # elements are prop. to power spectrum of coeffs_2
coeffs_12 = coeffs_1*coeffs_2
for n in range(1, nmax+1):
min = n**2 - 1
max = min + (2*n + 1)
R_n[n-1] = np.sum(coeffs_1[min:max]**2)
S_n[n-1] = np.sum(coeffs_2[min:max]**2)
C_n[n-1] = (np.sum(coeffs_12[min:max]) / np.sqrt(R_n[n-1]*S_n[n-1]))
return C_n | 5,331,875 |
def parse_args():
"""
Add file_path to the default cdr_cleaner.args_parser argument list
:return: an expanded argument list object
"""
import cdr_cleaner.args_parser as parser
help_text = 'path to csv file (with header row) containing pids whose observation records are to be removed'
additional_argument_1 = {parser.SHORT_ARGUMENT: '-f',
parser.LONG_ARGUMENT: '--file_path',
parser.ACTION: 'store',
parser.DEST: 'file_path',
parser.HELP: help_text,
parser.REQUIRED: True}
args = parser.default_parse_args([additional_argument_1])
return args | 5,331,876 |
def name(model):
"""A repeatable way to get the formatted model name."""
return model.__name__.replace('_', '').lower() | 5,331,877 |
def render_fields(
fields: List[Field], instance_name: Optional[str] = None
) -> List[str]:
"""Renders fields to string.
Arguments:
fields:
The fields to render.
instance_name:
The name of model instance for which the fields are written.
If given, automatically insert the value for FK fields.
This assumes that the FK variables are defined before this class and follow
the convention `column_name1_column_name2_...`.
Sorts fields by being optional or not.
"""
descriptions = []
optional_descriptions = []
for field in fields:
text = render_field(field, instance_name=instance_name)
if field.null:
optional_descriptions.append(text)
else:
descriptions.append(text)
return descriptions + optional_descriptions | 5,331,878 |
def get_primary_tasks_for_service(service_arn):
"""Get the task ARN of the primary service"""
response = ecs.describe_services(cluster=cluster, services=[service_arn])
for deployment in response['services'][0]['deployments']:
if deployment['status'] == 'PRIMARY':
return get_tasks_for_task_definition(deployment['taskDefinition'])
return None | 5,331,879 |
def post_to_slack(alarm_name, reason, config):
""" Send message text to slack channel
INPUTS:
* alarm_name - subject of the message
* reason - message text
"""
# get params from config file
proxy_server = config['proxy_server']
if proxy_server !='':
os.environ['HTTP_PROXY'] = proxy_server
os.environ['HTTPS_PROXY'] = proxy_server
slack_webhook_url = config['slack_webhook_url']
slack_message = build_pr_message(alarm_name,reason)
data={"text":slack_message['text'], "attachments" : slack_message['attachments'] }
reponse=requests.post(slack_webhook_url, json=data)
return reponse.text | 5,331,880 |
def load_data(csv_file):
"""
@type csv_file: string
@param csv_file: path to csv file
Loads data from specified csv file
@rtype: pandas.DataFrame
@return: DataFrame from csv file without Month column
"""
return pd.read_csv(csv_file).drop('Month', 1) | 5,331,881 |
def nearest(array,value):
"""
Find the index of the array that is close to value
Args:
array (array): array to be tested
value (float): value to be tested
Returns:
int: index
"""
return (np.abs(array-value)).argmin() | 5,331,882 |
async def test_read_current_mode(hass, utcnow):
"""Test that Ecobee mode can be correctly read and show as human readable text."""
helper = await setup_test_component(hass, create_service_with_ecobee_mode)
# Helper will be for the primary entity, which is the service. Make a helper for the sensor.
ecobee_mode = Helper(
hass,
"select.testdevice_current_mode",
helper.pairing,
helper.accessory,
helper.config_entry,
)
state = await ecobee_mode.async_update(
ServicesTypes.THERMOSTAT,
{
CharacteristicsTypes.VENDOR_ECOBEE_CURRENT_MODE: 0,
},
)
assert state.state == "home"
state = await ecobee_mode.async_update(
ServicesTypes.THERMOSTAT,
{
CharacteristicsTypes.VENDOR_ECOBEE_CURRENT_MODE: 1,
},
)
assert state.state == "sleep"
state = await ecobee_mode.async_update(
ServicesTypes.THERMOSTAT,
{
CharacteristicsTypes.VENDOR_ECOBEE_CURRENT_MODE: 2,
},
)
assert state.state == "away" | 5,331,883 |
def fine_tune_model(trainX: np.ndarray, trainy: np.ndarray, cv: int = 5) -> SVC:
"""Receives training set and run a grid search to find the best
hyperparameters. It returns the best model, already trained.
Args:
trainX (np.ndarray): train array containg embedding images.
trainy (np.ndarray): train array containg labels.
cv (int, optional): Number of folds to apply in cross validation.
Defaults to 5.
Returns:
SVC: Trained model.
"""
param_grid = {'C': [0.1, 1, 10, 100, 1000],
'gamma': ['auto', 'scale'],
'kernel': ['linear', 'poly', 'rbf', 'sigmoid'],
'probability': [True]}
grid = GridSearchCV(SVC(), param_grid, refit=True, verbose=1,
return_train_score=True, cv=cv)
grid.fit(trainX, trainy)
return grid.best_estimator_ | 5,331,884 |
def get_empty_array_year(year=datetime.now().year, start_end=True, variable_list=['TEST', ], variable_list_dtype=None, record_interval='HH'):
"""
Allocates and returns new empty record array for given year using list of dtypes
(or variable labels as 8byte floats if no dtype list provided) for variables plus
TIMESTAMP_START and TIMESTAMP_END at beginning
:param year: year to be represented in array (current year if not provided)
:type year: int
:param start_end: if True, uses TIMESTAMP_START and TIMESTAMP_END, if not, uses only TIMESTAMP (end)
:type start_end: bool
:param variable_list: list of strings to be used as variable labels (assumed f8 type)
:type variable_list: list (of str)
:param variable_list_dtype: list of dtype tuples (label, data type) to be used as variables
:type variable_list_dtype: list (of (str, str)-tuples)
:param record_interval: resolution to be used for record ['HR' for hourly, 'HH' for half-hourly (default)]
:type record_interval: str
"""
# record_interval
if record_interval.lower() == 'hh':
step = timedelta(minutes=30)
elif record_interval.lower() == 'hr':
step = timedelta(minutes=60)
else:
msg = 'Unknown record_interval: {r}'.format(r=record_interval)
log.critical(msg)
raise ONEFluxError(msg)
# timestamp list
timestamp_list = []
current_timestamp = datetime(int(year), 1, 1, 0, 0, 0)
while current_timestamp.year < int(year) + 1:
timestamp_list.append(current_timestamp)
current_timestamp += step
timestamp_list.append(current_timestamp)
timestamp_list_begin = timestamp_list[:-1]
timestamp_list_end = timestamp_list[1:]
# array dtype
dtype = ([(var, 'f8') for var in variable_list] if variable_list_dtype is None else variable_list_dtype)
if start_end:
dtype = [('TIMESTAMP_START', 'a25'), ('TIMESTAMP_END', 'a25')] + dtype
else:
dtype = [('TIMESTAMP', 'a25'), ] + dtype
# record array
data = numpy.zeros(len(timestamp_list_begin), dtype=dtype)
data[:] = -9999.0
if start_end:
data['TIMESTAMP_START'][:] = [i.strftime('%Y%m%d%H%M') for i in timestamp_list_begin]
data['TIMESTAMP_END'][:] = [i.strftime('%Y%m%d%H%M') for i in timestamp_list_end]
else:
data['TIMESTAMP'][:] = [i.strftime('%Y%m%d%H%M') for i in timestamp_list_end]
return data | 5,331,885 |
def service_account_update(sid, groups):
"""Update service_account groups"""
display.vvv("DC/OS: IAM update service_account {}".format(sid))
for g in groups:
display.vvv("Assigning service_account {} to group {}".format(
sid,g))
cmd = [
'dcos',
'security',
'org',
'groups',
'add_user',
g,
sid
]
run_command(cmd, 'update service_account', stop_on_error=False) | 5,331,886 |
def validate_geojson(data):
"""
Validate geojson
"""
if not (isinstance(data, dict)):
return False
if not isinstance(data.get('features'), list):
return False
gj = geojson.FeatureCollection([geojson.Feature(f) for f in data['features']])
return gj.is_valid | 5,331,887 |
def run_wps(conn, config_wpsprocess, **kwargs):
"""
primary function to orchestrate running the wps job from submission to download (if required)
Parameters:
-----------
conn: dict,
Connection parameters
Example: conn = {'domain': 'https://earthobs.defra.gov.uk',
'username': '<insert-username>',
'access_token': '<insert-access-token>'}
config_wpsprocess: list or dict,
list of dictionaries for individual wps submission requests.
users can generate a list of multiple dictionaries, one dict per wps job
with "xml_config", this is dict of variables that templated into the xml
payload for the WPS request submission
Example:
config_wpsprocess = [{'template_xml':'gsdownload_template.xml',
'xml_config':{
'template_layer_name':lyr,
'template_outputformat':'image/tiff',
'template_mimetype':'application/zip'},
'dl_bool':True
}]
output_dir: str or Pathlib object, optional,
user specified output directory
verify: str, optional:
add custom path to any organisation certificate stores that the
environment needs
Default Value:
* True
Possible Value:
* 'dir/dir/cert.file'
Returns:
-----------
list_download_paths: list,
list of pathlib objects for downloaded output for further reuse
"""
# set output path if not specified
if 'output_dir' not in kwargs:
kwargs['output_dir']=Path.cwd()
if 'verify' not in kwargs:
kwargs['verify'] = True
# set the request config dictionary
request_config = {
'wps_server':conn['domain'] + '/geoserver/ows',
'access_token':conn['access_token'],
'headers':{'Content-type': 'application/xml','User-Agent': 'python'},
'verify':kwargs['verify']
}
# submit wps jobs
try:
execution_dict = submit_wps_queue(request_config, config_wpsprocess)
except Exception as error:
print(error.args)
print('The WPS submission has failed')
else:
# INITIALISE VARIABLES and drop the wps log file if it exists
path_output = make_output_dir(kwargs['output_dir'])
# keep calling the wps job status until 'continue_process' = False
while True:
execution_dict = poll_api_status(execution_dict, request_config, path_output)
if execution_dict['continue_process']:
time.sleep(15)
else:
break
# after download is complete, process downloaded files (eg renames and extracting zips)
if execution_dict['job_status'] == 'DOWNLOAD-SUCCESSFUL':
execution_dict = process_wps_downloaded_files(execution_dict)
# set log file and job duration in dict
execution_dict['log_file_path'] = path_output / 'wps-log.csv'
execution_dict['total_job_duration'] = (execution_dict['timestamp_job_end'] - execution_dict['timestamp_job_start']).total_seconds() / 60
return execution_dict | 5,331,888 |
def save_episode_to_db(identifier, season, episode, provider, lang, meta):
"""
save episode's meta info tp database
Args:
identifier: identifier of the item
season: season number of the episode
episode: episode number of the episode
provider: metadata provider to sabe info for
lang: language to save info for
meta: metadata
"""
import time
import koding
koding.Remove_From_Table("episode_meta", {
"identifier": identifier,
"provider": provider,
"season": season,
"episode": episode,
"lang": lang
})
koding.Add_To_Table("episode_meta", {
"identifier": identifier,
"provider": provider,
"season": season,
"episode": episode,
"lang": lang,
"meta": pickle.dumps(meta).replace("\"", "'"),
"created": time.time()
}) | 5,331,889 |
def test_award_category_endpoint(client, award_spending_data):
"""Test the award_category endpoint."""
# Test that like results are combined and results are output in descending obligated_amount order
resp = client.get('/api/v2/award_spending/award_category/?fiscal_year=2017&awarding_agency_id=111')
assert resp.status_code == status.HTTP_200_OK
assert len(resp.data['results']) == 3
assert float(resp.data['results'][0]['obligated_amount']) == 40
# Test for missing entries
resp = client.get('/api/v2/award_spending/award_category/')
assert resp.status_code == status.HTTP_400_BAD_REQUEST | 5,331,890 |
def get_redirect_url(user):
"""
Analyse user and redirect:
Instructor:
onboarding is disabled - to /ctms/
onboarding is enabled and not achieved needed percent - to /ctms/onboarding/
onboarding is enabled and achieved needed percent - to /ctms/
Student:
Depends on type of chat student took part of and redirect to:
/lms/courses/<course_id> or /lms/tester/courses/<course_pk>
If user doesn't have any chat:
look at user's role and get lms type whether from invite or course of role
Arguments:
user (obj): User model of django.contrib.auth.models
Return:
redirect_url (str)
"""
from chat.models import Chat
from ct.models import Role
redirect_url = reverse('ct:home') # default
if not user:
return
if getattr(user, 'instructor', None):
if waffle.switch_is_active('ctms_onboarding_enabled') and \
get_onboarding_percentage(user.id) < settings.ONBOARDING_PERCENTAGE_DONE:
redirect_url = reverse('ctms:onboarding')
else:
redirect_url = reverse('ctms:my_courses')
else:
chat = Chat.objects.filter(user=user).order_by('-timestamp').first()
if chat:
view_identificator = ''
if chat.is_test:
view_identificator = 'tester_'
course = chat.enroll_code.courseUnit.course
redirect_url = reverse(
'lms:{}course_view'.format(view_identificator),
kwargs={'course_id': course.id}
)
else:
view_identificator = ''
role = user.role_set.filter(role__in=[Role.ENROLLED, Role.SELFSTUDY]).last()
if role:
last_invite = role.course.invite_set.filter(status='joined', user=user, type='tester').last()
if last_invite:
view_identificator = 'tester_'
redirect_url = reverse(
'lms:{}course_view'.format(view_identificator),
kwargs={'course_id': role.course.id}
)
return redirect_url | 5,331,891 |
def _ds_to_arrraylist(
ds, bands, time_dim, x_dim, y_dim, percentile_stretch, image_proc_func=None
):
"""
Converts an xarray dataset to a list of numpy arrays for plt.imshow plotting
"""
# Compute percents
p_low, p_high = ds[bands].to_array().quantile(percentile_stretch).values
array_list = []
for i, timestep in enumerate(ds[time_dim]):
# Select single timestep from the data array
ds_i = ds[{time_dim: i}]
# Get shape of array
x = len(ds[x_dim])
y = len(ds[y_dim])
if len(bands) == 1:
# Create new one band array
img_toshow = exposure.rescale_intensity(
ds_i[bands[0]].values, in_range=(
p_low, p_high), out_range="image"
)
else:
# Create new three band array
rawimg = np.zeros((y, x, 3), dtype=np.float32)
# Add xarray bands into three dimensional numpy array
for band, colour in enumerate(bands):
rawimg[:, :, band] = ds_i[colour].values
# Stretch contrast using percentile values
img_toshow = exposure.rescale_intensity(
rawimg, in_range=(p_low, p_high), out_range=(0, 1.0)
)
# Optionally image processing
if image_proc_func:
img_toshow = image_proc_func(img_toshow).clip(0, 1)
array_list.append(img_toshow)
return array_list, p_low, p_high | 5,331,892 |
def derive_key(secret, salt, iterations=1000, keylen=32):
"""
Computes a derived cryptographic key from a password according to PBKDF2.
.. seealso:: http://en.wikipedia.org/wiki/PBKDF2
:param secret: The secret.
:type secret: bytes or unicode
:param salt: The salt to be used.
:type salt: bytes or unicode
:param iterations: Number of iterations of derivation algorithm to run.
:type iterations: int
:param keylen: Length of the key to derive in bytes.
:type keylen: int
:return: The derived key in Base64 encoding.
:rtype: bytes
"""
assert(type(secret) in [six.text_type, six.binary_type])
assert(type(salt) in [six.text_type, six.binary_type])
assert(type(iterations) in six.integer_types)
assert(type(keylen) in six.integer_types)
if type(secret) == six.text_type:
secret = secret.encode('utf8')
if type(salt) == six.text_type:
salt = salt.encode('utf8')
key = pbkdf2(secret, salt, iterations, keylen)
return binascii.b2a_base64(key).strip() | 5,331,893 |
def copy_ecu(ecu_or_glob, source_db, target_db):
# type: (typing.Union[canmatrix.Ecu, str], canmatrix.CanMatrix, canmatrix.CanMatrix) -> None
"""
Copy ECU(s) identified by Name or as Object from source CAN matrix to target CAN matrix.
This function additionally copy all relevant Defines.
:param ecu_or_glob: Ecu instance or glob pattern for Ecu name
:param source_db: Source CAN matrix
:param target_db: Destination CAN matrix
"""
# check whether ecu_or_glob is object or symbolic name
if isinstance(ecu_or_glob, canmatrix.Ecu):
ecu_list = [ecu_or_glob]
else:
ecu_list = source_db.glob_ecus(ecu_or_glob)
for ecu in ecu_list:
target_db.add_ecu(copy.deepcopy(ecu))
# copy all ecu-defines
for attribute in ecu.attributes:
if attribute not in target_db.ecu_defines:
target_db.add_ecu_defines(
copy.deepcopy(attribute), copy.deepcopy(source_db.ecu_defines[attribute].definition))
target_db.add_define_default(
copy.deepcopy(attribute), copy.deepcopy(source_db.ecu_defines[attribute].defaultValue))
# update enum data types if needed:
if source_db.ecu_defines[attribute].type == 'ENUM':
temp_attr = ecu.attribute(attribute, db=source_db)
if temp_attr not in target_db.ecu_defines[attribute].values:
target_db.ecu_defines[attribute].values.append(copy.deepcopy(temp_attr))
target_db.ecu_defines[attribute].update() | 5,331,894 |
def pseudo_import( pkg_name ):
"""
return a new module that contains the variables of pkg_name.__init__
"""
init = os.path.join( pkg_name, '__init__.py' )
# remove imports and 'from foo import'
lines = open(init, 'r').readlines()
lines = filter( lambda l: l.startswith('__'), lines)
code = '\n'.join(lines)
import imp
module = imp.new_module(pkg_name)
exec(code, module.__dict__)
return module | 5,331,895 |
def calc_params_l2_norm(model: torch.nn.Module, bf16: bool):
"""Calculate l2 norm of parameters """
# args = get_args()
if not isinstance(model, list):
model = [model]
# Remove duplicate params.
params_data = []
for model_ in model:
for param in model_.parameters():
is_not_shared = param_is_not_shared(param)
is_not_tp_duplicate = parallel_state.param_is_not_tensor_parallel_duplicate(param)
if is_not_shared and is_not_tp_duplicate:
if bf16:
params_data.append(param.data.float())
else:
params_data.append(param.data)
# Calculate norm
dummy_overflow_buf = torch.cuda.IntTensor([0])
norm, _ = multi_tensor_applier(
amp_C.multi_tensor_l2norm, dummy_overflow_buf, [params_data], False # no per-parameter norm
)
norm_2 = norm * norm
# Sum across all model-parallel GPUs.
torch.distributed.all_reduce(
norm_2, op=torch.distributed.ReduceOp.SUM, group=parallel_state.get_model_parallel_group()
)
return norm_2.item() ** 0.5 | 5,331,896 |
def gradient_descent(f,init_val_dict, learning_rate=0.001, max_iter=1000, stop_stepsize=1e-6,return_history=False):
"""
Gradient Descent finding minimum for a
single expression
INPUTS
=======
f: expression
init_val_dict:dictionary containing initial value of variables
learning_rate: the step size between iterations
max_iter: maximum iteration before the algorithm stops
stop_stepsize: tolerance, the minimum threshold for absolute
difference of value of f from 0 for the algorithm to stop
return_history: default set to False. If True, return the trajectory
of the algorithm including the final answer
RETURNS
========
If return_history = False: variable values corresponding to the
minimum value of f
If return_history = True, return the trajectory
of the algorithm including the final answer
"""
f_grad = f.gradient_at(init_val_dict)
variables = [var for var in init_val_dict.keys()]
curr_point = np.array([v for k, v in init_val_dict.items()])
history = [curr_point.tolist()]
for i in range(max_iter):
prev_point =curr_point
prev_val_dict = {var: val for var, val in zip(variables, prev_point)}
f_grad =f.gradient_at(prev_val_dict)
curr_point =curr_point - learning_rate*f_grad
history.append(curr_point.tolist())
if np.linalg.norm(curr_point-prev_point, ord=2) < stop_stepsize: break
if return_history:
return history
return {var: val for var, val in zip(variables, curr_point)} | 5,331,897 |
def my_email(request, recipients, subject, body, sender=None):
""" Sends email message
"""
mailer = get_mailer(request)
if not sender:
#sender = apex_settings('sender_email')
if not sender:
sender = 'nobody@example.com'
message = Message(subject=subject,
sender=sender,
recipients=[recipients],
body=body)
mailer.send(message) | 5,331,898 |
def replace_literal_nulls(layer_name):
"""Replaces literal string representation of null, '<Null>', with a true null value
(None in Python).
Parameters:
layer_name - The name of the layer to replace literal nulls.
Returns:
None
"""
logger.debug('Start replacing literal nulls.')
fields, row, rows = None, None, None
try:
# Create a list of field objects.
fields = arcpy.ListFields(layer_name)
# Create an update cursor that will loop through and update each row.
rows = arcpy.UpdateCursor(layer_name)
# Loop through each row and field and replace literal nulls.
for row in rows:
for field in fields:
if field.type == 'String':
value = row.getValue(field.name)
# Ignore null/empty fields
if (value != None):
# Check for '<Null>' string
if (value.find('<Null>') > -1):
logger.debug('Found a "<Null>" string to nullify in field: {0}.'.format(field.name))
logger.debug('Replacing null string')
row.setValue(field.name, None)
logger.debug('Replaced with {0}'.format(value))
# Update row
rows.updateRow(row)
logger.debug('Done replacing literal nulls in {0}.'.format(layer_name))
finally: # Clean up
if row:
del row
if rows:
del rows | 5,331,899 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.