content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
|---|---|---|
import os
def dwritef2(obj, path):
"""The dwritef2() function writes the object @p obj to the Python
pickle file whose path is pointed to by @p path. Non-existent
directories of @p path are created as necessary.
@param obj Object to write, as created by e.g. dpack()
@param path Path of output file
@return Path of output file
"""
dirname = os.path.dirname(path)
if dirname != "" and not os.path.isdir(dirname):
os.makedirs(dirname)
easy_pickle.dump(path, obj)
return path
|
5816a8bcacbc45c3931ee9b217c1c579883caf12
| 3,647,600
|
def calculate_area(geometry):
"""
Calculate geometry area
:param geometry: GeoJSON geometry
:return: the geometry area
"""
coords = get_coords_from_geometry(
geometry, ["Polygon", "MultiPolygon"], raise_exception=False
)
if get_input_dimensions(coords) >= 4:
areas = list(map(lambda sub_item: calculate_area(sub_item), coords))
return sum(areas)
elif get_input_dimensions(coords) == 3:
polygon(coords)
return polygon_area(coords)
else:
return 0
|
6bc08b57c3416c14f5eca00acbe914a06053b81e
| 3,647,601
|
from pathlib import Path
import re
def read_prb(file):
"""
Read a PRB file and return a ProbeGroup object.
Since PRB do not handle contact shape then circle of 5um are put.
Same for contact shape a dummy tip is put.
PRB format do not contain any information about the channel of the probe
Only the channel index on device is given.
"""
file = Path(file).absolute()
assert file.is_file()
with file.open('r') as f:
contents = f.read()
contents = re.sub(r'range\(([\d,]*)\)', r'list(range(\1))', contents)
prb = {}
exec(contents, None, prb)
prb = {k.lower(): v for (k, v) in prb.items()}
if 'channel_groups' not in prb:
raise ValueError('This file is not a standard PRB file')
probegroup = ProbeGroup()
for i, group in prb['channel_groups'].items():
probe = Probe(ndim=2, si_units='um')
chans = np.array(group['channels'], dtype='int64')
positions = np.array([group['geometry'][c] for c in chans],
dtype='float64')
probe.set_contacts(positions=positions, shapes='circle',
shape_params={'radius': 5})
probe.create_auto_shape(probe_type='tip')
probe.set_device_channel_indices(chans)
probegroup.add_probe(probe)
return probegroup
|
f46c8befbd348c1473867d5c7475911ce960830c
| 3,647,602
|
import os
import pathlib
def alt_credits():
""" Route for alt credits page. Uses json list to generate page body """
alternate_credits = tasks.json_list(os.path.join(pathlib.Path(__file__).parent.absolute(),'static/alt_credits.json'))
return render_template('alt_credits.html',title='collegeSMART - Alternative College Credits',alt_credits=alternate_credits)
|
2ad6c1b374153c79efbd0b71d36c1c5771e30795
| 3,647,603
|
def CreateNode(parent, node_type, position, wx_id):
""" Create an instance of a node associated with the specified name.
:param parent: parent of the node object (usually a wx.Window)
:param node_type: type of node from registry - the IDName
:param position: default position for the node
:param wx_id: id for the node. Usually an id generated by wxPython.
:returns: Node object
:raises: NodeNotFoundError if the node is not registered in the Node Registry
"""
if node_type in REGISTERED_NODES:
# Initialize the base class here so that a new instance
# is created for each node. We also set some important
# values for the position and type of the node.
node = REGISTERED_NODES[node_type]
node = node(wx_id)
node.SetPosition(position)
node.Model.SetType(node_type)
node.Model.SetParent(parent)
return node
else:
raise exceptions.NodeNotFoundError(node_type)
|
d0a68d584bde29ef47b8e1a1a3129261cbdb6df4
| 3,647,604
|
import json
def decode_json_content(content):
"""
Decodes a given string content to a JSON object
:param str content: content to be decoded to JSON.
:return: A JSON object if the string could be successfully decoded and None otherwise
:rtype: json or None
"""
try:
return json.loads(content) if content is not None else None
except JSONDecodeError:
print("The given content could not be decoded as a JSON file")
return None
|
b0a65734876fd012feb89c606a9c7a0dced866b6
| 3,647,605
|
def plot_dist(noise_feats, label=None, ymax=1.1, color=None, title=None, save_path=None):
"""
Kernel density plot of the number of noisy features included in explanations,
for a certain number of test samples
"""
if not any(noise_feats): # handle special case where noise_feats=0
noise_feats[0] = 0.5
# plt.switch_backend("agg")
sns.set_style('darkgrid')
ax = sns.distplot(noise_feats, hist=False, kde=True,
kde_kws={'label': label}, color=color)
plt.xlim(-3, 11)
plt.ylim(ymin=0.0, ymax=ymax)
if title:
plt.title(title)
if save_path:
plt.savefig(save_path)
return ax
|
33623c77434b936a730064890a80d34d1f5ac143
| 3,647,606
|
import chunk
def simple_simulate(choosers, spec, nest_spec,
skims=None, locals_d=None,
chunk_size=0, custom_chooser=None,
log_alt_losers=False,
want_logsums=False,
estimator=None,
trace_label=None, trace_choice_name=None, trace_column_names=None):
"""
Run an MNL or NL simulation for when the model spec does not involve alternative
specific data, e.g. there are no interactions with alternative
properties and no need to sample from alternatives.
"""
trace_label = tracing.extend_trace_label(trace_label, 'simple_simulate')
assert len(choosers) > 0
result_list = []
# segment by person type and pick the right spec for each person type
for i, chooser_chunk, chunk_trace_label \
in chunk.adaptive_chunked_choosers(choosers, chunk_size, trace_label):
choices = _simple_simulate(
chooser_chunk, spec, nest_spec,
skims=skims,
locals_d=locals_d,
custom_chooser=custom_chooser,
log_alt_losers=log_alt_losers,
want_logsums=want_logsums,
estimator=estimator,
trace_label=chunk_trace_label,
trace_choice_name=trace_choice_name,
trace_column_names=trace_column_names)
result_list.append(choices)
chunk.log_df(trace_label, f'result_list', result_list)
if len(result_list) > 1:
choices = pd.concat(result_list)
assert len(choices.index == len(choosers.index))
return choices
|
92e06a6b57add21e0b3bd1fcd537d6818786d19c
| 3,647,607
|
def state(predicate):
"""DBC helper for reusable, simple predicates for object-state tests used in both preconditions and postconditions"""
@wraps(predicate)
def wrapped_predicate(s, *args, **kwargs):
return predicate(s)
return wrapped_predicate
|
0c9116ccd3fba1b431ce0a492bc6337406954cd8
| 3,647,608
|
import math
def dpp(kernel_matrix, max_length, epsilon=1E-10):
"""
Our proposed fast implementation of the greedy algorithm
:param kernel_matrix: 2-d array
:param max_length: positive int
:param epsilon: small positive scalar
:return: list
"""
item_size = kernel_matrix.shape[0]
cis = np.zeros((max_length, item_size))
di2s = np.copy(np.diag(kernel_matrix))
selected_items = list()
selected_item = np.argmax(di2s)
selected_items.append(selected_item)
while len(selected_items) < max_length:
k = len(selected_items) - 1
ci_optimal = cis[:k, selected_item]
di_optimal = math.sqrt(di2s[selected_item])
elements = kernel_matrix[selected_item, :]
eis = (elements - np.dot(ci_optimal, cis[:k, :])) / di_optimal
cis[k, :] = eis
di2s -= np.square(eis)
di2s[selected_item] = -np.inf
selected_item = np.argmax(di2s)
if di2s[selected_item] < epsilon:
break
selected_items.append(selected_item)
return selected_items
|
fd6c141f1a2f80971ed8e6e5d36b0d074bcdc4b9
| 3,647,609
|
def adjust_image_resolution(data):
"""Given image data, shrink it to no greater than 1024 for its larger
dimension."""
inputbytes = cStringIO.StringIO(data)
output = cStringIO.StringIO()
try:
im = Image.open(inputbytes)
im.thumbnail((240, 240), Image.ANTIALIAS)
# could run entropy check to see if GIF makes more sense given an item.
im.save(output, 'JPEG')
except IOError:
return None
return output.getvalue()
|
d2fedb68e79b1aed0ce0a209d43bb6b16d492f16
| 3,647,610
|
import sys
import time
def timing_function():
"""
There's a better timing function available in Python 3.3+
Otherwise use the old one.
TODO: This could be a static analysis at the top of the module
"""
if sys.version_info[0] >= 3 and sys.version_info[1] >= 3:
return time.monotonic()
else:
return time.time()
|
0de5d8ed8eb93617cec6feef47bf51441ef9a73e
| 3,647,611
|
from datetime import datetime
def parse_date(txt):
""" Returns None or parsed date as {h, m, D, M, Y}. """
date = None
clock = None
for word in txt.split(' '):
if date is None:
try:
date = datetime.strptime(word, "%d-%m-%Y")
continue
except ValueError:
pass
try:
date = datetime.strptime(word, "%d.%m.%Y")
continue
except ValueError:
pass
if clock is None:
try:
clock = datetime.strptime(word, "%H:%M")
continue
except ValueError:
pass
if date is not None and clock is not None:
return {'h': clock.hour,
'm': clock.minute,
'D': date.day,
'M': date.month,
'Y': date.year}
return None
|
80660673d6b4179fa7b4907983ed84bc41c4189b
| 3,647,612
|
import os
import six
def diff_configurations(model_config, bench_config, model_bundle, bench_bundle):
"""
Description
Args:
model_config: a dictionary with the model configuration data
bench_config: a dictionary with the benchmark configuration data
model_bundle: a LIVVkit model bundle object
bench_bundle: a LIVVkit model bundle object
Returns:
A dictionary created by the elements object corresponding to
the results of the bit for bit testing
"""
diff_dict = LIVVDict()
model_data = model_bundle.parse_config(model_config)
bench_data = bench_bundle.parse_config(bench_config)
if model_data == {} and bench_data == {}:
return elements.error("Configuration Comparison",
"Could not open file: " + model_config.split(os.path.sep)[-1])
model_sections = set(six.iterkeys(model_data))
bench_sections = set(six.iterkeys(bench_data))
all_sections = set(model_sections.union(bench_sections))
for s in all_sections:
model_vars = set(six.iterkeys(model_data[s])) if s in model_sections else set()
bench_vars = set(six.iterkeys(bench_data[s])) if s in bench_sections else set()
all_vars = set(model_vars.union(bench_vars))
for v in all_vars:
model_val = model_data[s][v] if s in model_sections and v in model_vars else 'NA'
bench_val = bench_data[s][v] if s in bench_sections and v in bench_vars else 'NA'
same = True if model_val == bench_val and model_val != 'NA' else False
diff_dict[s][v] = (same, model_val, bench_val)
return elements.file_diff("Configuration Comparison", diff_dict)
|
a756c9694e50a8ace29e6c518e22fa11f3744b89
| 3,647,613
|
import six
import tarfile
import zipfile
import os
import shutil
def _extract_archive(file_path, path='.', archive_format='auto'):
"""Extracts an archive if it matches tar, tar.gz, tar.bz, or zip formats.
Arguments:
file_path: path to the archive file
path: path to extract the archive file
archive_format: Archive format to try for extracting the file.
Options are 'auto', 'tar', 'zip', and None.
'tar' includes tar, tar.gz, and tar.bz files.
The default 'auto' is ['tar', 'zip'].
None or an empty list will return no matches found.
Returns:
True if a match was found and an archive extraction was completed,
False otherwise.
"""
if archive_format is None:
return False
if archive_format == 'auto':
archive_format = ['tar', 'zip']
if isinstance(archive_format, six.string_types):
archive_format = [archive_format]
for archive_type in archive_format:
if archive_type == 'tar':
open_fn = tarfile.open
is_match_fn = tarfile.is_tarfile
if archive_type == 'zip':
open_fn = zipfile.ZipFile
is_match_fn = zipfile.is_zipfile
if is_match_fn(file_path):
with open_fn(file_path) as archive:
try:
archive.extractall(path)
except (tarfile.TarError, RuntimeError, KeyboardInterrupt):
if os.path.exists(path):
if os.path.isfile(path):
os.remove(path)
else:
shutil.rmtree(path)
raise
return True
return False
|
aba0d2e47c19c7d11fc8eefbd4c1f110df0a5f4e
| 3,647,614
|
def calc_angle(m, n):
"""
Calculate the cosθ,
where θ is the angle between 2 vectors, m and n.
"""
if inner_p_s(m, n) == -1:
print('Error! The 2 vectors should belong on the same space Rn!')
elif inner_p_s(m,n) == 0:
print('The cosine of the two vectors is 0, so these vectors are orthogonal!')
else:
angle = (inner_p_s(m, n))/(calc_norm(m) * calc_norm(n))
return angle
|
e0361370a9479eaf7e706673d71c88d25c110473
| 3,647,615
|
def Seuil_var(img):
"""
This fonction compute threshold value. In first the image's histogram is calculated. The threshold value is set to the first indexe of histogram wich respect the following criterion : DH > 0, DH(i)/H(i) > 0.1 , H(i) < 0.01 % of the Norm.
In : img : ipl Image : image to treated
Out: seuil : Int : Value of the threshold
"""
dim=255
MaxValue=np.amax(np.asarray(img[:]))
Norm = np.asarray(img[:]).shape[0]*np.asarray(img[:]).shape[1]
scale=MaxValue/dim
Wdim=dim*scale
MaxValue=np.amax(np.asarray(img[:]))
bins= [float(x) for x in range(dim)]
hist,bin_edges = np.histogram(np.asarray(img[:]), bins)
Norm = Norm -hist[0]
median=np.median(hist)
mean=0
var=0
i=1
som = 0
while (som < 0.8*Norm and i <len(hist)-1):
som = som + hist[i]
i=i+1
while ((hist[i]-hist[i-1] < 0 or (hist[i]-hist[i-1])/hist[i-1]>0.1 or hist[i]> 0.01*Norm ) and i < len(hist)-1):
i=i+1
if( i == len(hist)-1):
seuil=0
seuil = i
var = 0
return seuil
|
435e8eeca0ddff618a2491b0529f1252d8566721
| 3,647,616
|
def convert_numpy(file_path, dst=None, orient='row', hold=False, axisf=False, *arg):
"""
Extract an array of data stored in a .npy file or DATABLOCK
Parameters
---------
file_path : path (str)
Full path to the file to be extracted.
dst : str
Full path to the file where data will be appended as bytes.
In the case of None value, a temporary file is created and the path is returned.
orient : str
orientation of the spectra in the file. Defaults to spectra as row.
hold : bool
If true, limits parts of the code to only get data type and parameters. (faster)
axisf : bool
Extracts the 1st axis and set it as the file axis as it is being converted.
Return
------
Asgard_param : dict
Stores update to Asgard parameters (i.e. spec_amount, spec_len, Axis for sif)
dst : path (str)
Full path to the file where data were writen, may it be temporary or user selected
"""
if dst is None and hold is False:
dst = TemporaryFile('wb', delete=False).name
try :
arr = load(file_path, allow_pickle=True, mmap_mode='r')
except ValueError :
raise Exc.FileFormatError('Selected file is not a valid numpy array')
if orient != 'row' :
arr = arr.T
if len(arr.shape) == 1:
arr = arr.reshape([1, arr.shape[0]])
if len(arr.shape) != 2 :
raise Exc.FileFormatError('Selected file contains an array with more than 2 dimensions')
Asgard_param = {'Spec len':arr.shape[1], 'Spec amount':arr.shape[0]}
if hold is True :
if axisf is True :
Asgard_param['Spec amount'] -= 1
axis = arr[0,:]
return Asgard_param, axis
else :
return Asgard_param
else :
with open(dst,'ab') as f :
for spec in range(arr.shape[0]):
if axisf is True :
Asgard_param['Spec amount'] -= 1
axis = arr[spec,:]
else :
for pix in arr[spec,:]:
f.write(bytearray(pack('f',pix)))
if axisf is True :
return dst, Asgard_param, axis
else :
return dst, Asgard_param
|
2ac1b25277b466cdcd5c6d78844a7bccee9817a6
| 3,647,617
|
def index():
"""Every time the html page refreshes this function is called.
Checks for any activity from the user (setting an alarm, deleting an alarm,
or deleting a notification)
:return: The html template with alarms and notifications added
"""
notification_scheduler.run(blocking=False)
# get the inputs from the users alarm submission
alarm_time = request.args.get("alarm")
alarm_title = request.args.get("two")
alarm_news = request.args.get("news")
alarm_weather = request.args.get("weather")
check_for_delete_request()
if alarm_title and alarm_time:
alarm = {"alarm time": str(alarm_time), "title": str(alarm_title), "content": "",
"weather": alarm_weather is not None, "news": alarm_news is not None}
notification_delay = get_notification_delay(alarm["alarm time"])
# if the notification delays is negative then it is set in the past which is invalid
if notification_delay > 0 and valid_alarm_title(alarm["title"]):
alarm_date_time = alarm_time.split("T")
alarm["content"] = format_alarm_content(alarm_date_time, alarm_news, alarm_weather)
notification_scheduler.enter(notification_delay, len(notifications),
set_off_alarms, (alarm,))
log.info("Alarm set: %s", alarm)
log.info("Delay for alarm: %d seconds", notification_delay)
alarms.append(alarm)
else:
log.error("INVALID ALARM: %s", alarm)
return render_template('index.html', title='Daily update', alarms=alarms,
notifications=notifications, image="alarm_clock.jpg",
favicon="static/images/favicon.jpg")
|
845ba53918bb44d3170a2e93e93346212ccc1247
| 3,647,618
|
import json
import time
def check_icinga_should_run(state_file: str) -> bool:
"""Return True if the script should continue to update the state file, False if the state file is fresh enough."""
try:
with open(state_file) as f:
state = json.load(f)
except Exception as e:
logger.error('Failed to read Icinga state from %s: %s', state_file, e)
return True
delta = time.time() - state['timestamp']
logger.info('Last run was %d seconds ago with exit code %d', delta, state['exit_code'])
if state['exit_code'] == 0:
if delta > ICINGA_RUN_EVERY_MINUTES * 60:
return True
logger.info('Skipping')
return False
if delta > ICINGA_RETRY_ON_FAILURE_MINUTES * 60:
return True
logger.info('Skipping')
return False
|
d508f000eb28da42b43049f49ac180702d49bdc7
| 3,647,619
|
def ln_new_model_to_gll(py, new_flag_dir, output_dir):
"""
make up the new gll directory based on the OUTPUT_MODEL.
"""
script = f"{py} -m seisflow.scripts.structure_inversion.ln_new_model_to_gll --new_flag_dir {new_flag_dir} --output_dir {output_dir}; \n"
return script
|
acdf28cbc2231bd2f33ae418136ce7da0fce421f
| 3,647,620
|
def deserialize_item(item: dict):
"""Deserialize DynamoDB item to Python types.
Args:
item: item to deserialize
Return: deserialized item
"""
return {k: DDB_DESERIALIZER.deserialize(v) for k, v in item.items()}
|
451d97ed656982b5b8df4fb2178051560cb5d8bd
| 3,647,621
|
def good_result(path_value, pred, source=None, target_path=''):
"""Constructs a JsonFoundValueResult where pred returns value as valid."""
source = path_value.value if source is None else source
return jp.PathValueResult(pred=pred, source=source, target_path=target_path,
path_value=path_value, valid=True)
|
2cfeab7df8b52d64cabad973bffeb1723d9e3215
| 3,647,622
|
def bot_properties(bot_id):
"""
Return all available properties for the given bot. The bot id should be
available in the `app.config` dictionary.
"""
bot_config = app.config['BOTS'][bot_id]
return [pd[0] for pd in bot_config['properties']]
|
a7922173d31fbb0d6b20ef1112cef6f88fe4749a
| 3,647,623
|
def find_path(ph_tok_list, dep_parse, link_anchor, ans_anchor, edge_dict, ph_dict):
"""
:param dep_parse: dependency graph
:param link_anchor: token index of the focus word (0-based)
:param ans_anchor: token index of the answer (0-based)
:param link_category: the category of the current focus link
:param edge_dict: <head-dep, rel> dict
:param ph_dict: <token_idx, ph> dict
:return:
"""
if ans_anchor != link_anchor:
edges = []
for head, rel, dep in triples(dep_parse=dep_parse):
edges.append((head, dep))
graph = nx.Graph(edges)
path_nodes = nx.shortest_path(graph, source=ans_anchor+1, target=link_anchor+1) #[0, 1, 2, 3, 4]
else:
path_nodes = [link_anchor]
path_tok_list = []
path_len = len(path_nodes)
if path_len > 0:
for position in range(path_len-1):
edge = edge_dict['%d-%d' % (path_nodes[position], path_nodes[position+1])]
cur_token_idx = path_nodes[position] - 1
if cur_token_idx in ph_dict:
path_tok_list.append(ph_dict[cur_token_idx])
else:
path_tok_list.append(ph_tok_list[cur_token_idx])
path_tok_list.append(edge)
if link_anchor in ph_dict:
path_tok_list.append(ph_dict[link_anchor])
else:
path_tok_list.append('<E>')
return path_tok_list
|
51b621f1f1cdffd645b1528884603a383abf12a5
| 3,647,624
|
import os
def get_theme_section_directories(theme_folder:str, sections:list = []) -> list:
"""Gets a list of the available sections for a theme
Explanation
-----------
Essentially this function goes into a theme folder (full path to a theme), looks for a folder
called sections and returns a list of all the .jinja files available stripped of the extension
so i.e. if `<theme folder>/sections` had 3 files `education.jinja`, `work_experience.jinja` and
`volunteering_experience.jinja` this function would return ['education', 'work_experience', 'volunteering_experience']
Parameters
----------
sections : (list, optional)
A list of sections names, or an empty list if they need to be searched for
theme_folder : str
The full path to the theme folder (typically from calling locate_theme_directory() )
Returns
-------
list
The name(s) of the section templates that exist within the sections list without extensions
"""
if sections:
return sections
if not sections and os.path.exists(os.path.join(theme_folder, "sections")):
for section in os.listdir(os.path.join(theme_folder, "sections")):
if section.endswith(".jinja"):
section = section.replace(".jinja", "")
sections.append(section)
return sections
|
5e024546bbf878e0954660d4bd5adb765ffd7e43
| 3,647,625
|
import logging
def download_video_url(
video_url: str,
pipeline: PipelineContext,
destination="%(title)s.%(ext)s",
progress=ProgressMonitor.NULL,
):
"""Download a single video from the ."""
config = pipeline.config
logger = logging.getLogger(__name__)
logger.info("Starting video download from URL: %s", video_url)
# Setup progress-tracking
progress.scale(total_work=1.0)
progress_tracker = YDLProgressTracker(show_progress_bar=True)
# Resolve destination path template
output_template = complete_template(config.sources.root, destination)
logger.info("Output template: %s", output_template)
ydl_opts = {
"format": "mp4",
"logger": YDLLogger(logger),
"progress_hooks": [progress_tracker.hook],
"outtmpl": output_template,
}
with youtube_dl.YoutubeDL(ydl_opts) as ydl:
# Determine destination file name
video_info = ydl.extract_info(video_url, download=False)
file_name = ydl.prepare_filename(video_info)
logger.info("Downloading file: %s", file_name)
# Download file
with progress_tracker.track(progress):
ydl.download([video_url])
progress.complete()
return file_name
|
f3546d929fa6c976479fe86b945bb87279a22341
| 3,647,626
|
def get_block_name(source):
"""Get block name version from source."""
url_parts = urlparse(source)
file_name = url_parts.path
extension = file_name.split(".")[-1]
new_path = file_name.replace("." + extension, "_block." + extension)
new_file_name = urlunparse(
(
url_parts.scheme,
url_parts.netloc,
new_path,
url_parts.params,
url_parts.query,
url_parts.fragment,
)
)
return new_file_name
|
ae2792a4c56baaa9045ed49961ad1c5029191d3d
| 3,647,627
|
import token
def int_to_symbol(i):
""" Convert numeric symbol or token to a desriptive name.
"""
try:
return symbol.sym_name[i]
except KeyError:
return token.tok_name[i]
|
6f939d359dd92961f199dfd412dced3ecaef3a60
| 3,647,628
|
def debugger(parser, token):
"""
Activates a debugger session in both passes of the template renderer
"""
pudb.set_trace()
return DebuggerNode()
|
a1ab924ee2ccb1e2389c7432444a829e70a7392b
| 3,647,629
|
def cranimp(i, s, m, N):
"""
Calculates the result of c_i,s^dag a_s acting on an integer m. Returns the new basis state and the fermionic prefactor.
Spin: UP - s=0, DOWN - s=1.
"""
offi = 2*(N-i)-1-s
offimp = 2*(N+1)-1-s
m1 = flipBit(m, offimp)
if m1<m:
m2=flipBit(m1, offi)
if m2>m1:
prefactor = prefactor_offset(m1, offi, N)
prefactor *= prefactor_offset_imp(m, s, N)
return prefactor, m2
return 0, 0
|
aa64f6f5e9d0e596a801d854baf4e222e2f2192e
| 3,647,630
|
def _can_beeify():
""" Determines if the random chance to beeify has occured """
return randint(0, 12) == 0
|
c79a116a6d1529d69f88c35a1264735d475b26d4
| 3,647,631
|
def get_object_classes(db):
"""return a list of all object classes"""
list=[]
for item in classinfo:
list.append(item)
return list
|
e95676f19f3bf042a5f531d708f2e12a0ab3813f
| 3,647,632
|
import os
import _sha256
import itertools
def load_arviz_data(dataset=None, data_home=None):
"""Load a local or remote pre-made dataset.
Run with no parameters to get a list of all available models.
The directory to save to can also be set with the environement
variable `ARVIZ_HOME`. The checksum of the dataset is checked against a
hardcoded value to watch for data corruption.
Run `az.clear_data_home` to clear the data directory.
Parameters
----------
dataset : str
Name of dataset to load.
data_home : str, optional
Where to save remote datasets
Returns
-------
xarray.Dataset
"""
if dataset in LOCAL_DATASETS:
resource = LOCAL_DATASETS[dataset]
return from_netcdf(resource.filename)
elif dataset in REMOTE_DATASETS:
remote = REMOTE_DATASETS[dataset]
home_dir = get_data_home(data_home=data_home)
file_path = os.path.join(home_dir, remote.filename)
if not os.path.exists(file_path):
http_type = rcParams["data.http_protocol"]
# Replaces http type. Redundant if http_type is http, useful if http_type is https
url = remote.url.replace("http", http_type)
urlretrieve(url, file_path)
checksum = _sha256(file_path)
if remote.checksum != checksum:
raise IOError(
"{} has an SHA256 checksum ({}) differing from expected ({}), "
"file may be corrupted. Run `arviz.clear_data_home()` and try "
"again, or please open an issue.".format(file_path, checksum, remote.checksum)
)
return from_netcdf(file_path)
else:
if dataset is None:
return dict(itertools.chain(LOCAL_DATASETS.items(), REMOTE_DATASETS.items()))
else:
raise ValueError(
"Dataset {} not found! The following are available:\n{}".format(
dataset, list_datasets()
)
)
|
8cf1020c5e9e9aaa8dbd184ddc9f400a7caaef5f
| 3,647,633
|
def get_ax(rows=1, cols=1, size=8):
"""Return a Matplotlib Axes array to be used in
all visualizations in the notebook. Provide a
central point to control graph sizes.
Change the default size attribute to control the size
of rendered images
"""
_, ax = plt.subplots(rows, cols, figsize=(size*cols, size*rows))
return ax
|
0a79458ad335856198d5208071581685cd7c34a0
| 3,647,634
|
from typing import Mapping
from typing import Any
import os
import json
def prepare_ablation_from_config(config: Mapping[str, Any], directory: str, save_artifacts: bool):
"""Prepare a set of ablation study directories."""
metadata = config['metadata']
optuna_config = config['optuna']
ablation_config = config['ablation']
evaluator = ablation_config['evaluator']
evaluator_kwargs = ablation_config['evaluator_kwargs']
evaluation_kwargs = ablation_config['evaluation_kwargs']
it = itt.product(
ablation_config['datasets'],
ablation_config['create_inverse_triples'],
ablation_config['models'],
ablation_config['loss_functions'],
ablation_config['regularizers'],
ablation_config['optimizers'],
ablation_config['training_loops'],
)
directories = []
for counter, (
dataset,
create_inverse_triples,
model,
loss,
regularizer,
optimizer,
training_loop,
) in enumerate(it):
experiment_name = f'{counter:04d}_{normalize_string(dataset)}_{normalize_string(model)}'
output_directory = os.path.join(directory, experiment_name)
os.makedirs(output_directory, exist_ok=True)
# TODO what happens if already exists?
_experiment_optuna_config = optuna_config.copy()
_experiment_optuna_config['storage'] = f'sqlite:///{output_directory}/optuna_results.db'
if save_artifacts:
save_model_directory = os.path.join(output_directory, 'artifacts')
os.makedirs(save_model_directory, exist_ok=True)
_experiment_optuna_config['save_model_directory'] = save_model_directory
hpo_config = dict()
for retain_key in ('stopper', 'stopper_kwargs'):
if retain_key in ablation_config:
logger.info(f'Retaining {retain_key} configuration in HPO')
hpo_config[retain_key] = deepcopy(ablation_config[retain_key])
for error_key in ('early_stopping', 'early_stopping_kwargs'):
if error_key in ablation_config:
raise ValueError(f'Outdated key: {error_key}. Please update')
# TODO incorporate setting of random seed
# pipeline_kwargs=dict(
# random_seed=random.randint(1, 2 ** 32 - 1),
# ),
def _set_arguments(key: str, value: str) -> None:
"""Set argument and its values."""
d = {key: value}
kwargs = ablation_config[f'{key}_kwargs'][model][value]
if kwargs:
d[f'{key}_kwargs'] = kwargs
kwargs_ranges = ablation_config[f'{key}_kwargs_ranges'][model][value]
if kwargs_ranges:
d[f'{key}_kwargs_ranges'] = kwargs_ranges
hpo_config.update(d)
# Add dataset to current_pipeline
hpo_config['dataset'] = dataset
logger.info(f"Dataset: {dataset}")
hpo_config['dataset_kwargs'] = dict(create_inverse_triples=create_inverse_triples)
logger.info(f"Add inverse triples: {create_inverse_triples}")
hpo_config['model'] = model
model_kwargs = ablation_config['model_kwargs'][model]
if model_kwargs:
hpo_config['model_kwargs'] = ablation_config['model_kwargs'][model]
hpo_config['model_kwargs_ranges'] = ablation_config['model_kwargs_ranges'][model]
logger.info(f"Model: {model}")
# Add loss function to current_pipeline
_set_arguments(key='loss', value=loss)
logger.info(f"Loss function: {loss}")
# Add regularizer to current_pipeline
_set_arguments(key='regularizer', value=regularizer)
logger.info(f"Regularizer: {regularizer}")
# Add optimizer to current_pipeline
_set_arguments(key='optimizer', value=optimizer)
logger.info(f"Optimizer: {optimizer}")
# Add training approach to current_pipeline
hpo_config['training_loop'] = training_loop
logger.info(f"Training loop: {training_loop}")
if normalize_string(training_loop, suffix=_TRAINING_LOOP_SUFFIX) == 'slcwa':
negative_sampler = ablation_config['negative_sampler']
_set_arguments(key='negative_sampler', value=negative_sampler)
logger.info(f"Negative sampler: {negative_sampler}")
# Add training kwargs and kwargs_ranges
training_kwargs = ablation_config['training_kwargs'][model][training_loop]
if training_kwargs:
hpo_config['training_kwargs'] = training_kwargs
hpo_config['training_kwargs_ranges'] = ablation_config['training_kwargs_ranges'][model][training_loop]
# Add evaluation
hpo_config['evaluator'] = evaluator
if evaluator_kwargs:
hpo_config['evaluator_kwargs'] = evaluator_kwargs
hpo_config['evaluation_kwargs'] = evaluation_kwargs
logger.info(f"Evaluator: {evaluator}")
rv_config = dict(
type='hpo',
metadata=metadata,
pipeline=hpo_config,
optuna=_experiment_optuna_config,
)
rv_config_path = os.path.join(output_directory, 'hpo_config.json')
with open(rv_config_path, 'w') as file:
json.dump(rv_config, file, indent=2, ensure_ascii=True)
directories.append((output_directory, rv_config_path))
return directories
|
c084361ac51102eaf84e3016f3a0c50f1ef9313f
| 3,647,635
|
def spin_coherent(j, theta, phi, type='ket'):
"""Generates the spin state |j, m>, i.e. the eigenstate
of the spin-j Sz operator with eigenvalue m.
Parameters
----------
j : float
The spin of the state.
theta : float
Angle from z axis.
phi : float
Angle from x axis.
type : string {'ket', 'bra', 'dm'}
Type of state to generate.
Returns
-------
state : qobj
Qobj quantum object for spin coherent state
"""
Sp = jmat(j, '+')
Sm = jmat(j, '-')
psi = (0.5 * theta * np.exp(1j * phi) * Sm -
0.5 * theta * np.exp(-1j * phi) * Sp).expm() * spin_state(j, j)
if type == 'ket':
return psi
elif type == 'bra':
return psi.dag()
elif type == 'dm':
return ket2dm(psi)
else:
raise ValueError("invalid value keyword argument 'type'")
|
e64d207aeb27a5cf2ccdb1dff13da52be294c903
| 3,647,636
|
from operator import add
def vgg_upsampling(classes, target_shape=None, scale=1, weight_decay=0., block_name='featx'):
"""A VGG convolutional block with bilinear upsampling for decoding.
:param classes: Integer, number of classes
:param scale: Float, scale factor to the input feature, varing from 0 to 1
:param target_shape: 4D Tuples with targe_height, target_width as
the 2nd, 3rd elements if `channels_last` or as the 3rd, 4th elements if
`channels_first`.
>>> from keras_fcn.blocks import vgg_upsampling
>>> feat1, feat2, feat3 = feat_pyramid[:3]
>>> y = vgg_upsampling(classes=21, target_shape=(None, 14, 14, None),
>>> scale=1, block_name='feat1')(feat1, None)
>>> y = vgg_upsampling(classes=21, target_shape=(None, 28, 28, None),
>>> scale=1e-2, block_name='feat2')(feat2, y)
>>> y = vgg_upsampling(classes=21, target_shape=(None, 224, 224, None),
>>> scale=1e-4, block_name='feat3')(feat3, y)
"""
def f(x, y):
score = Conv2D(filters=classes, kernel_size=(1, 1),
activation='linear',
padding='valid',
kernel_initializer='he_normal',
kernel_regularizer=l2(weight_decay),
name='score_{}'.format(block_name))(x)
if y is not None:
def scaling(xx, ss=1):
return xx * ss
scaled = Lambda(scaling, arguments={'ss': scale},
name='scale_{}'.format(block_name))(score)
score = add([y, scaled])
upscore = BilinearUpSampling2D(
target_shape=target_shape,
name='upscore_{}'.format(block_name))(score)
return upscore
return f
|
9c372520adc3185a8b61b57ed73cc303f47c8275
| 3,647,637
|
def show_toolbar(request):
"""Determine if toolbar will be displayed."""
return settings.DEBUG
|
d29dfd9c6e29509a882c0654802d993c0928bd22
| 3,647,638
|
def compute_metrics(logits, labels, weights):
"""Compute summary metrics."""
loss, weight_sum = compute_weighted_cross_entropy(logits, labels, weights)
acc, _ = compute_weighted_accuracy(logits, labels, weights)
metrics = {
'loss': loss,
'accuracy': acc,
'denominator': weight_sum,
}
return metrics
|
c969b2aadf9b16b1c26755dc1db4f1f24faa2c11
| 3,647,639
|
def start_session(web_session=None):
"""Starts a SQL Editor Session
Args:
web_session (object): The web_session object this session will belong to
Returns:
A dict holding the result message
"""
new_session = SqleditorModuleSession(web_session)
result = Response.ok("New SQL Editor session created successfully.", {
"module_session_id": new_session.module_session_id
})
return result
|
596603e5bc1d21df95728b4797a64cb4ff78fa2a
| 3,647,640
|
async def error_middleware(request: Request, handler: t.Callable[[Request], t.Awaitable[Response]]) -> Response:
"""logs an exception and returns an error message to the client
"""
try:
return await handler(request)
except Exception as e:
logger.exception(e)
return json_response(text=str(e), status=HTTPInternalServerError.status_code)
|
28748bd2018a0527ef740d8bed9c74983900e655
| 3,647,641
|
def init_mobility_accordion():
"""
Initialize the accordion for mobility tab.
Args: None
Returns:
mobility_accordion (object): dash html.Div that contains individual accordions
"""
accord_1 = init_accordion_element(
title="Mobility Index",
id='id_mobility_index',
tab_n=4,
group_n=1
)
accord_2 = init_accordion_element(
title="Comming Soon!",
id='id_metro_accordion',
tab_n=4,
group_n=2
)
accord_3 = init_accordion_element(
title="Comming Soon!",
id='id_tram_accordion',
tab_n=4,
group_n=3
)
accord_4 = init_accordion_element(
title="Comming Soon!",
id='id_bikes_accordion',
tab_n=4,
group_n=4
)
mobility_accordion = [
accord_1,
accord_2,
accord_3,
accord_4
]
return assemble_accordion(mobility_accordion)
|
25c5475e8ea972d057d230526d8dcc82b27d8ee0
| 3,647,642
|
def per_image_whiten(X):
""" Subtracts the mean of each image in X and renormalizes them to unit norm.
"""
num_examples, height, width, depth = X.shape
X_flat = X.reshape((num_examples, -1))
X_mean = X_flat.mean(axis=1)
X_cent = X_flat - X_mean[:, None]
X_norm = np.sqrt( np.sum( X_cent * X_cent, axis=1) )
X_out = X_cent / X_norm[:, None]
X_out = X_out.reshape(X.shape)
return X_out
|
f831860c3697e6eac637b2fb3e502570fa4f31af
| 3,647,643
|
def fill_defaults(data, vals) -> dict:
"""Fill defaults if source is not present"""
for val in vals:
_name = val['name']
_type = val['type'] if 'type' in val else 'str'
_source = val['source'] if 'source' in val else _name
if _type == 'str':
_default = val['default'] if 'default' in val else ''
if 'default_val' in val and val['default_val'] in val:
_default = val[val['default_val']]
if _name not in data:
data[_name] = from_entry([], _source, default=_default)
elif _type == 'bool':
_default = val['default'] if 'default' in val else False
_reverse = val['reverse'] if 'reverse' in val else False
if _name not in data:
data[_name] = from_entry_bool([], _source, default=_default, reverse=_reverse)
return data
|
aa5df5bca76f1eaa426bf4e416a540fb725eb730
| 3,647,644
|
def static_shuttle_between():
"""
Route endpoint to show real shuttle data within a certain time range at once.
Returns:
rendered website displaying all points at once.
Example:
http://127.0.0.1:5000/?start_time=2018-02-14%2015:40:00&end_time=2018-02-14%2016:02:00
"""
start_time = request.args.get('start_time', default="2018-02-14 13:00:00")
end_time = request.args.get('end_time', default="2018-02-14 17:00:00")
return render_to_static(start_time, end_time)
|
eea24bb0abe90fe7b708ff8a9c73c2795f07865a
| 3,647,645
|
def read_data(inargs, infiles, ref_cube=None):
"""Read data."""
clim_dict = {}
trend_dict = {}
for filenum, infile in enumerate(infiles):
cube = iris.load_cube(infile, gio.check_iris_var(inargs.var))
if ref_cube:
branch_time = None if inargs.branch_times[filenum] == 'default' else str(inargs.branch_times[filenum])
time_constraint = timeseries.get_control_time_constraint(cube, ref_cube, inargs.time, branch_time=branch_time)
cube = cube.extract(time_constraint)
iris.util.unify_time_units([ref_cube, cube])
cube.coord('time').units = ref_cube.coord('time').units
cube.replace_coord(ref_cube.coord('time'))
else:
time_constraint = gio.get_time_constraint(inargs.time)
cube = cube.extract(time_constraint)
#cube = uconv.convert_to_joules(cube)
if inargs.perlat:
grid_spacing = grids.get_grid_spacing(cube)
cube.data = cube.data / grid_spacing
trend_cube = calc_trend_cube(cube.copy())
clim_cube = cube.collapsed('time', iris.analysis.MEAN)
clim_cube.remove_coord('time')
model = cube.attributes['model_id']
realization = 'r' + str(cube.attributes['realization'])
physics = 'p' + str(cube.attributes['physics_version'])
key = (model, physics, realization)
trend_dict[key] = trend_cube
clim_dict[key] = clim_cube
experiment = cube.attributes['experiment_id']
experiment = 'historicalAA' if experiment == "historicalMisc" else experiment
trend_ylabel = get_ylabel(cube, 'trend', inargs)
clim_ylabel = get_ylabel(cube, 'climatology', inargs)
metadata_dict = {infile: cube.attributes['history']}
return cube, trend_dict, clim_dict, experiment, trend_ylabel, clim_ylabel, metadata_dict
|
a3ffc2172394fe5a44e8239152a3f7b7ee660559
| 3,647,646
|
import json
async def create_account(*, user):
"""
Open an account for a user
Save account details in json file
"""
with open("mainbank.json", "r") as f:
users = json.load(f)
if str(user.id) in users:
return False
else:
users[str(user.id)] = {"wallet": 0, "bank": 0}
with open("mainbank.json", "w") as f:
json.dump(users, f)
|
0e1aaccfd0c9cda6238ba8caa90e80979540f2e8
| 3,647,647
|
import ntpath
import genericpath
def commonpath(paths):
"""Given a sequence of path names, returns the longest common sub-path."""
if not paths:
raise ValueError('commonpath() arg is an empty sequence')
if isinstance(paths[0], bytes):
sep = b'\\'
altsep = b'/'
curdir = b'.'
else:
sep = '\\'
altsep = '/'
curdir = '.'
try:
drivesplits = [ntpath.splitdrive(p.replace(altsep, sep).lower()) for p in paths]
split_paths = [p.split(sep) for d, p in drivesplits]
try:
isabs, = set(p[:1] == sep for d, p in drivesplits)
except ValueError:
raise ValueError("Can't mix absolute and relative paths")
# Check that all drive letters or UNC paths match. The check is made only
# now otherwise type errors for mixing strings and bytes would not be
# caught.
if len(set(d for d, p in drivesplits)) != 1:
raise ValueError("Paths don't have the same drive")
drive, path = ntpath.splitdrive(paths[0].replace(altsep, sep))
common = path.split(sep)
common = [c for c in common if c and c != curdir]
split_paths = [[c for c in s if c and c != curdir] for s in split_paths]
s1 = min(split_paths)
s2 = max(split_paths)
for i, c in enumerate(s1):
if c != s2[i]:
common = common[:i]
break
else:
common = common[:len(s1)]
prefix = drive + sep if isabs else drive
return prefix + sep.join(common)
except (TypeError, AttributeError):
genericpath._check_arg_types('commonpath', *paths)
raise
|
a8ef082e2944138ea08d409e273d724fd5d489eb
| 3,647,648
|
from .slicing import sanitize_index
from functools import reduce
from operator import mul
import tokenize
from re import M
def reshape(x, shape):
""" Reshape array to new shape
This is a parallelized version of the ``np.reshape`` function with the
following limitations:
1. It assumes that the array is stored in `row-major order`_
2. It only allows for reshapings that collapse or merge dimensions like
``(1, 2, 3, 4) -> (1, 6, 4)`` or ``(64,) -> (4, 4, 4)``
.. _`row-major order`: https://en.wikipedia.org/wiki/Row-_and_column-major_order
When communication is necessary this algorithm depends on the logic within
rechunk. It endeavors to keep chunk sizes roughly the same when possible.
See Also
--------
dask.array.rechunk
numpy.reshape
"""
# Sanitize inputs, look for -1 in shape
shape = tuple(map(sanitize_index, shape))
known_sizes = [s for s in shape if s != -1]
if len(known_sizes) < len(shape):
if len(known_sizes) - len(shape) > 1:
raise ValueError("can only specify one unknown dimension")
# Fastpath for x.reshape(-1) on 1D arrays, allows unknown shape in x
# for this case only.
if len(shape) == 1 and x.ndim == 1:
return x
missing_size = sanitize_index(x.size / reduce(mul, known_sizes, 1))
shape = tuple(missing_size if s == -1 else s for s in shape)
if np.isnan(sum(x.shape)):
raise ValueError("Array chunk size or shape is unknown. shape: %s", x.shape)
if reduce(mul, shape, 1) != x.size:
raise ValueError("total size of new array must be unchanged")
if x.shape == shape:
return x
meta = meta_from_array(x, len(shape))
name = "reshape-" + tokenize(x, shape)
if x.npartitions == 1:
key = next(flatten(x.__dask_keys__()))
dsk = {(name,) + (0,) * len(shape): (M.reshape, key, shape)}
chunks = tuple((d,) for d in shape)
graph = HighLevelGraph.from_collections(name, dsk, dependencies=[x])
return Array(graph, name, chunks, meta=meta)
# Logic for how to rechunk
inchunks, outchunks = reshape_rechunk(x.shape, shape, x.chunks)
x2 = x.rechunk(inchunks)
# Construct graph
in_keys = list(product([x2.name], *[range(len(c)) for c in inchunks]))
out_keys = list(product([name], *[range(len(c)) for c in outchunks]))
shapes = list(product(*outchunks))
dsk = {a: (M.reshape, b, shape) for a, b, shape in zip(out_keys, in_keys, shapes)}
graph = HighLevelGraph.from_collections(name, dsk, dependencies=[x2])
return Array(graph, name, outchunks, meta=meta)
|
2e8ed79f95319e02cacf78ce790b6dc550ac4e29
| 3,647,649
|
def get_state(module_instance, incremental_state, key_postfix):
""" Helper for extracting incremental state """
if incremental_state is None:
return None
full_key = _get_full_key(module_instance, key_postfix)
return incremental_state.get(full_key, None)
|
b3ba8f10fd26ed8878cb608076873cad52a19841
| 3,647,650
|
def get_lenovo_urls(from_date, to_date):
"""
Extracts URL on which the data about vulnerabilities are available.
:param from_date: start of date interval
:param to_date: end of date interval
:return: urls
"""
lenovo_url = config['vendor-cve']['lenovo_url']
len_p = LenovoMainPageParser(lenovo_url, from_date, to_date)
len_p.parse()
return len_p.entities
|
503f078d9a4b78d60792a2019553f65432c21320
| 3,647,651
|
def normalize_batch_in_training(x, gamma, beta,
reduction_axes, epsilon=1e-3):
"""
Computes mean and std for batch then apply batch_normalization on batch.
# Arguments
x: Input tensor or variable.
gamma: Tensor by which to scale the input.
beta: Tensor with which to center the input.
reduction_axes: iterable of integers,
axes over which to normalize.
epsilon: Fuzz factor.
# Returns
A tuple length of 3, `(normalized_tensor, mean, variance)`.
"""
if gamma is None:
gamma = ones_like(x)
if beta is None:
beta = zeros_like(x)
mean = av.ops.reduce_mean(x, reduction_axes, True)
variance = av.ops.reduce_mean(av.ops.square(x - mean), reduction_axes, True)
normalized_tensor = batch_normalization(
x, mean, variance, beta, gamma, axis=reduction_axes, epsilon=epsilon)
return normalized_tensor, mean, variance
|
2c1cc9368438cbd62d48c71da013848068a7664e
| 3,647,652
|
from ase.build import get_deviation_from_optimal_cell_shape, find_optimal_cell_shape
def supercell_scaling_by_target_atoms(structure, min_atoms=60, max_atoms=120,
target_shape='sc', lower_search_limit=-2, upper_search_limit=2,
verbose=False):
"""
Find a the supercell scaling matrix that gives the most cubic supercell for a
structure, where the supercell has between the minimum and maximum nubmer of atoms.
Parameters
----------
structure : pymatgen.Structure
Unitcell of a structure
min_atoms : target number of atoms in the supercell, defaults to 5
max_atoms : int
Maximum number of atoms allowed in the supercell
target_shape : str
Target shape of supercell. Could choose 'sc' for simple cubic or 'fcc' for face centered
cubic. Default is 'sc'.
lower_search_limit : int
How far to search below the 'ideal' cubic scaling. Default is -2.
upper_search_limit : int
How far to search below the 'ideal' cubic scaling. Default is 2.
verbose : bool
Whether to print extra details on the cell shapes and scores. Useful for debugging.
Returns
-------
numpy.ndarray
2d array of a scaling matrix, e.g. [[3,0,0],[0,3,0],[0,0,3]]
Notes
-----
The motiviation for this is for use in phonon calculations and defect calculations.
It is important that defect atoms are far enough apart that they do not interact.
Scaling unit cells that are not cubic by even dimensions might result in interacting
defects. An example would be a tetragonal cell with 2x8x8 Ang lattice vectors being
made into a 2x2x2 supercell. Atoms along the first dimension would not be very far
apart.
We are using a pure Python implementation from ASE, which is not very fast for a given
supercell size. This allows for a variable supercell size, so it's going to be slow
for a large range of atoms.
The search limits are passed directloy to ``find_optimal_cell_shape``.
They define the search space for each individual supercell based on the "ideal" scaling.
For example, a cell with 4 atoms and a target size of 110 atoms might have an ideal scaling
of 3x3x3. The search space for a lower and upper limit of -2/+2 would be 1-5. Since the
calculations are based on the cartesian product of 3x3 matrices, large search ranges are
very expensive.
"""
# range of supercell sizes in number of unitcells
supercell_sizes = range(min_atoms//len(structure), max_atoms//len(structure) + 1)
optimal_supercell_shapes = [] # numpy arrays of optimal shapes
optimal_supercell_scores = [] # will correspond to supercell size
# find the target shapes
for sc_size in supercell_sizes:
optimal_shape = find_optimal_cell_shape(structure.lattice.matrix, sc_size, target_shape, upper_limit=upper_search_limit, lower_limit=lower_search_limit, verbose = True)
optimal_supercell_shapes.append(optimal_shape)
optimal_supercell_scores.append(get_deviation_from_optimal_cell_shape(optimal_shape, target_shape))
if verbose:
for i in range(len(supercell_sizes)):
print('{} {:0.4f} {}'.format(supercell_sizes[i], optimal_supercell_scores[i], optimal_supercell_shapes[i].tolist()))
# find the most optimal cell shape along the range of sizes
optimal_sc_shape = optimal_supercell_shapes[np.argmin(optimal_supercell_scores)]
return optimal_sc_shape
|
24d7db41a0f270b037eac411fca3f5a6d9a4d8a7
| 3,647,653
|
def itemAPIEndpoint(categoryid):
"""Return page to display JSON formatted information of item."""
items = session.query(Item).filter_by(category_id=categoryid).all()
return jsonify(Items=[i.serialize for i in items])
|
33abd39d7d7270fe3b040c228d11b0017a8b7f83
| 3,647,654
|
def command(settings_module,
command,
bin_env=None,
pythonpath=None,
*args, **kwargs):
"""
run arbitrary django management command
"""
da = _get_django_admin(bin_env)
cmd = "{0} {1} --settings={2}".format(da, command, settings_module)
if pythonpath:
cmd = "{0} --pythonpath={1}".format(cmd, pythonpath)
for arg in args:
cmd = "{0} --{1}".format(cmd, arg)
for key, value in kwargs.iteritems():
if not key.startswith("__"):
cmd = '{0} --{1}={2}'.format(cmd, key, value)
return __salt__['cmd.run'](cmd)
|
6f7f4193b95df786d6c1540f4c687dec89cf01a6
| 3,647,655
|
def read_input(fpath):
"""
Read an input file, and return a list of tuples, each item
containing a single line.
Args:
fpath (str): File path of the file to read.
Returns:
list of tuples:
[ (xxx, xxx, xxx) ]
"""
with open(fpath, 'r') as f:
data = [line.strip() for line in f.readlines()]
rows = [tuple(map(int, d.split())) for d in data]
columns = format_into_columns(data)
return rows, columns
|
ceeb418403bef286eda82ba18cd0ac8e4899ea4f
| 3,647,656
|
import os
def get_parquet_lists():
"""
Load all .parquet files and get train and test splits
"""
parquet_files = [f for f in os.listdir(
Config.data_dir) if f.endswith(".parquet")]
train_files = [f for f in parquet_files if 'train' in f]
test_files = [f for f in parquet_files if 'test' in f]
return train_files, test_files
|
2e533b4526562d70aab1c2ee79f1bebb3e3652af
| 3,647,657
|
def find_level(key):
"""
Find the last 15 bits of a key, corresponding to a level.
"""
return key & LEVEL_MASK
|
30c454220e6dac36c1612b5a1a5abf53a7a2911c
| 3,647,658
|
def _whctrs(anchor):
"""return width, height, x center, and y center for an anchor (window)."""
w = anchor[2] - anchor[0] + 1
h = anchor[3] - anchor[1] + 1
x_ctr = anchor[0] + 0.5 * (w - 1)
y_ctr = anchor[1] + 0.5 * (h - 1)
return w, h, x_ctr, y_ctr
|
e1a6ff1745aac77e80996bfbb98f42c18af059d7
| 3,647,659
|
def filter_tiddlers(tiddlers, filters, environ=None):
"""
Return a generator of tiddlers resulting from filtering the provided
iterator of tiddlers by the provided filters.
If filters is a string, it will be parsed for filters.
"""
if isinstance(filters, basestring):
filters, _ = parse_for_filters(filters, environ)
return recursive_filter(filters, tiddlers)
|
25c86fdcb6f924ce8349d45b999ebe491c4b6299
| 3,647,660
|
def apply_move(board_state, move, side):
"""Returns a copy of the given board_state with the desired move applied.
Args:
board_state (3x3 tuple of int): The given board_state we want to apply the move to.
move (int, int): The position we want to make the move in.
side (int): The side we are making this move for, 1 for the first player, -1 for the second player.
Returns:
(3x3 tuple of int): A copy of the board_state with the given move applied for the given side.
"""
move_x, move_y = move
def get_tuples():
for x in range(3):
if move_x == x:
temp = list(board_state[x])
temp[move_y] = side
yield tuple(temp)
else:
yield board_state[x]
return tuple(get_tuples())
|
b47da6ddab3bd1abf99ee558471a3696e46b8352
| 3,647,661
|
import copy
from functools import reduce
def merge(dicts, overwrite=False, append=False, list_of_dicts=False):
""" merge dicts,
starting with dicts[1] into dicts[0]
Parameters
----------
dicts : list[dict]
list of dictionaries
overwrite : bool
if true allow overwriting of current data
append : bool
if true and items are both lists, then add them
list_of_dicts: bool
treat list of dicts as additional branches
Examples
--------
>>> from pprint import pprint
>>> d1 = {1:{"a":"A"},2:{"b":"B"}}
>>> d2 = {1:{"a":"A"},2:{"c":"C"}}
>>> pprint(merge([d1,d2]))
{1: {'a': 'A'}, 2: {'b': 'B', 'c': 'C'}}
>>> d1 = {1:{"a":["A"]}}
>>> d2 = {1:{"a":["D"]}}
>>> pprint(merge([d1,d2],append=True))
{1: {'a': ['A', 'D']}}
>>> d1 = {1:{"a":"A"},2:{"b":"B"}}
>>> d2 = {1:{"a":"X"},2:{"c":"C"}}
>>> merge([d1,d2],overwrite=False)
Traceback (most recent call last):
...
ValueError: different data already exists at "1.a": old: A, new: X
>>> merge([{},{}],overwrite=False)
{}
>>> merge([{},{'a':1}],overwrite=False)
{'a': 1}
>>> pprint(merge([{},{'a':1},{'a':1},{'b':2}]))
{'a': 1, 'b': 2}
>>> pprint(merge([{'a':[{"b": 1}, {"c": 2}]}, {'a':[{"d": 3}]}]))
Traceback (most recent call last):
...
ValueError: different data already exists at "a": old: [{'b': 1}, {'c': 2}], new: [{'d': 3}]
>>> pprint(merge([{'a':[{"b": 1}, {"c": 2}]}, {'a':[{"d": 3}]}], list_of_dicts=True))
Traceback (most recent call last):
...
ValueError: list of dicts are of different lengths at "a": old: [{'b': 1}, {'c': 2}], new: [{'d': 3}]
>>> pprint(merge([{'a':[{"b": 1}, {"c": 2}]}, {'a':[{"d": 3}, {"e": 4}]}], list_of_dicts=True))
{'a': [{'b': 1, 'd': 3}, {'c': 2, 'e': 4}]}
""" # noqa: E501
outdict = copy.deepcopy(dicts[0])
def single_merge(a, b):
return _single_merge(a, b, overwrite=overwrite, append=append,
list_of_dicts=list_of_dicts)
reduce(single_merge, [outdict] + dicts[1:])
return outdict
|
fdbde1c83f2fbcb74be5c4fb1376af4981655ad7
| 3,647,662
|
import re
def compute_delivery_period_index(frequency = None,
delivery_begin_dt_local = None,
delivery_end_date_local = None,
tz_local = None,
profile = None,
):
"""
Computes the delivery period index of a given contract.
:param frequency: The type of delivery contract (year, month, etc.)
:param delivery_begin_dt_local: The beginning datetime of the delivery
:param delivery_end_date_local: The end date of the delivery
:param local_tz: The local timezone
:param profile: The profile of the contract
:type frequency: string
:type delivery_begin_dt_local: pd.Timestamp
:type delivery_end_date_local: pd.Timestamp
:type local_tz: pytz.tzfile
:type profile: string
:return: The delivery period index
:rtype: int
"""
if ( pd.isnull(delivery_begin_dt_local)
or frequency == global_var.contract_frequency_unknown
or frequency == global_var.contract_frequency_spread
):
return global_var.contract_delivery_period_index_unknown
assert tz_local
assert delivery_begin_dt_local.tz.zone == (tz_local
if type(tz_local) == str
else
tz_local.zone
), (delivery_begin_dt_local.tz.zone,
tz_local,
)
if frequency == global_var.contract_frequency_half_hour:
ans = int('{0:0>2}{1:0>2}{2:0>2}'.format(delivery_begin_dt_local.month,
delivery_begin_dt_local.day,
delivery_begin_dt_local.hour,
delivery_begin_dt_local.minute,
))
elif frequency == global_var.contract_frequency_hour:
ans = int('{0:0>2}{1:0>2}{2:0>2}'.format(delivery_begin_dt_local.month,
delivery_begin_dt_local.day,
delivery_begin_dt_local.hour,
))
elif frequency == global_var.contract_frequency_bloc:
bloc_match = re.compile(global_var.contract_profile_bloc_pattern).match(profile)
hour1 = int(bloc_match.group(1))
hour2 = int(bloc_match.group(2))
assert hour1 < hour2
ans = int('{0:0>2}{1:0>2}{2:0>2}{3:0>2}'.format(delivery_begin_dt_local.month,
delivery_begin_dt_local.day,
hour1,
hour2,
))
elif frequency == global_var.contract_frequency_day:
ans = int('{0:0>2}{1:0>2}'.format(delivery_begin_dt_local.month,
delivery_begin_dt_local.day,
))
elif frequency == global_var.contract_frequency_days:
ans = int('{0:0>2}{1:0>2}{2}'.format(delivery_begin_dt_local.month,
delivery_begin_dt_local.day,
int(( delivery_end_date_local
- delivery_begin_dt_local.replace(hour = 0, minute = 0)
).total_seconds()/(3600*24)),
))
elif frequency == global_var.contract_frequency_weekend:
ans = int('{0:0>2}{1:0>2}'.format(delivery_begin_dt_local.month,
delivery_begin_dt_local.day,
))
elif frequency == global_var.contract_frequency_week:
ans = int('{0:0>2}{1:0>2}'.format(delivery_begin_dt_local.month,
delivery_begin_dt_local.day,
))
elif frequency == global_var.contract_frequency_bow:
ans = int('{0:0>2}{1:0>2}'.format(delivery_begin_dt_local.month,
delivery_begin_dt_local.day,
))
elif frequency == global_var.contract_frequency_month:
ans = delivery_begin_dt_local.month
elif frequency == global_var.contract_frequency_bom:
ans = int('{0:0>2}{1:0>2}'.format(delivery_begin_dt_local.month,
delivery_begin_dt_local.day,
))
elif frequency == global_var.contract_frequency_quarter:
ans = (delivery_begin_dt_local.month//3)+1
elif frequency == global_var.contract_frequency_season:
if delivery_begin_dt_local.month == 4:
ans = global_var.contract_delivery_period_index_summer
elif delivery_begin_dt_local.month == 10:
ans = global_var.contract_delivery_period_index_winter
else:
raise ValueError(frequency, delivery_begin_dt_local)
elif frequency == global_var.contract_frequency_year:
ans = global_var.contract_delivery_period_index_year
else:
raise NotImplementedError(frequency, delivery_begin_dt_local)
return ans
|
4eb47c857a235a7db31624dc78c83f291f0ba67a
| 3,647,663
|
def make_proxy(global_conf, address, allowed_request_methods="",
suppress_http_headers=""):
"""
Make a WSGI application that proxies to another address:
``address``
the full URL ending with a trailing ``/``
``allowed_request_methods``:
a space seperated list of request methods (e.g., ``GET POST``)
``suppress_http_headers``
a space seperated list of http headers (lower case, without
the leading ``http_``) that should not be passed on to target
host
"""
allowed_request_methods = aslist(allowed_request_methods)
suppress_http_headers = aslist(suppress_http_headers)
return Proxy(
address,
allowed_request_methods=allowed_request_methods,
suppress_http_headers=suppress_http_headers)
|
054bcce2d10db2947d5322283e4e3c87328688cb
| 3,647,664
|
import unittest
def test():
"""Runs the unit tests without test coverage."""
tests = unittest.TestLoader().discover('eachday/tests',
pattern='test*.py')
result = unittest.TextTestRunner(verbosity=2).run(tests)
if result.wasSuccessful():
return 0
return 1
|
3d8fcfada7309e62215fd2b3a1913ed51d5f14f8
| 3,647,665
|
async def save_training_result(r: dependency.TrainingResultHttpBody):
"""
Saves the model training statistics to the database. This method is called only by registered dataset
microservices.
:param r: Training Result with updated fields sent by dataset microservice
:return: {'status': 'success'} if successful update, else http error.
"""
tr = get_training_result_by_training_id(r.training_id)
tr.training_accuracy = r.results['training_accuracy']
tr.validation_accuracy = r.results['validation_accuracy']
tr.training_loss = r.results['training_loss']
tr.validation_loss = r.results['validation_loss']
tr.loss_config = r.results['loss_config']
tr.optimizer_config = r.results['optimizer_config']
tr.complete = True
update_training_result_db(tr)
return {
'status': 'success',
'detail': 'Training data successfully updated.'
}
|
60302a3053a8be3781ad8da9e75b05aed85a5b06
| 3,647,666
|
def sort2nd(xs):
"""Returns a list containing the same elements as xs, but sorted by their
second elements."""
xs.sort(cmp2nd)
return xs
|
9b9ccef6794db2cfaa31492eeddb0d6344ff30e5
| 3,647,667
|
def is_one_of_type(val, types):
"""Returns whether the given value is one of the given types.
:param val: The value to evaluate
:param types: A sequence of types to check against.
:return: Whether the given value is one of the given types.
"""
result = False
val_type = type(val)
for tt in types:
if val_type is tt:
result = True
return result
|
4bda5ebc41aa7377a93fdb02ce85c50b9042e2c1
| 3,647,668
|
import os
import fnmatch
import re
def load_qadata(qa_dir):
"""
:param qa_dir: the file path of the provided QA dataset, eg: /data/preprocessed_data_10k/test;
:return: the dictionary of the QA dataset, for instance QA_1_;
"""
print("begin_load_qadata")
qa_set = {}
# os.walk: generates the file names in a directory tree by walking the tree.
# default: top, which is used to yield 3-tuples,
# i.e., (dirpath, dirnames, filenames) for each directory rooted at directory
for root, dirnames, filenames in os.walk(qa_dir):
if(dirnames == []):
qa_id = root[root.rfind("_")+1:]
qa_dict ={}
for filename in fnmatch.filter(filenames, '*.txt'):
pattern = re.compile('QA_\d+_')
# re.sub: substitute the pattern with "" in filename.
keystr = re.sub(pattern,"", filename).replace(".txt","")
qa_dict[keystr] = open(root+"/"+filename).readlines()
qa_set[qa_id] = qa_dict
print("load_qadata_success")
return qa_set
|
3f0060d34ca47951efb29388f723fe75bfaa875a
| 3,647,669
|
def get_online(cheat_id):
"""Получение онлайна чита
---
consumes:
- application/json
parameters:
- in: path
name: cheat_id
type: string
description: ObjectId чита в строковом формате
responses:
200:
description: Успешный запрос
400:
schema:
$ref: '#/definitions/Error'
"""
count = 0
if cheat_id in online_counter_dict:
for _ in online_counter_dict[cheat_id]:
count += 1
return make_response({'online': count}), 400
|
bcfc0c44a0284ad298f533bdb8afd1e415be13b8
| 3,647,670
|
def grpc_client_connection(svc: str = None, target: str = None, session: Session = None) -> Channel:
"""
Create a new GRPC client connection from a service name, target endpoint and session
@param svc: The name of the service to which we're trying to connect (ex. blue)
@param target: The endpoint, associated with the service, to which the connection
should direct its GRPC requests
@param session: The session to associate with the connection. This object will be
used to authenticate with the service
"""
# First, set the session and target to default values if they weren't provided
session = session if session else Session()
target = target if target else BLUE_ENDPOINT
# Next, get the access token from the session and then embed
# it into credentials we can send to the GRPC service
token = session.access_token()
credentials = composite_channel_credentials(
ssl_channel_credentials(),
access_token_call_credentials(token))
# Now, create a secure channel from the target and credentials
if svc:
conn = secure_channel(
target = target,
credentials = credentials,
options = (('grpc.enable_http_proxy', 0),),
interceptors = [
_header_adder_interceptor("service-name", svc),
_header_adder_interceptor("x-agent", "blue-sdk-python")])
else:
conn = secure_channel(target = target, credentials = credentials)
# Return the connection
return conn
|
fc805b15c1d94bcde5ac4eacfc72d854f860f95f
| 3,647,671
|
def aqi(pm25):
"""AQI Calculator
Calculates AQI from PM2.5 using EPA formula and breakpoints from:
https://www.airnow.gov/sites/default/files/2018-05/aqi-technical
-assistance-document-may2016.pdf
Args:
- pm25 (int or float): PM2.5 in ug/m3
"""
if pm25 < 0:
raise ValueError("PM2.5 must be positive.")
else:
# round PM2.5 to nearest tenth for categorization
pm25 = np.round(pm25, 1)
green = {
"aqi_low": 0,
"aqi_hi": 50,
"pm_low": 0.0,
"pm_hi": 12.0
}
yellow = {
"aqi_low": 51,
"aqi_hi": 100,
"pm_low": 12.1,
"pm_hi": 35.4
}
orange = {
"aqi_low": 101,
"aqi_hi": 150,
"pm_low": 35.5,
"pm_hi": 55.4
}
red = {
"aqi_low": 151,
"aqi_hi": 200,
"pm_low": 55.5,
"pm_hi": 150.4
}
purple = {
"aqi_low": 201,
"aqi_hi": 300,
"pm_low": 150.5,
"pm_hi": 250.4
}
maroon = {
"aqi_low": 301,
"aqi_hi": 500,
"pm_low": 250.5,
"pm_hi": 500.4
}
colors = [green, yellow, orange, red, purple, maroon]
categorized = False
# Assign measurement to AQI category.
for color in colors:
if pm25 >= color["pm_low"] and pm25 <= color["pm_hi"]:
cat = color
categorized = True
break
# else:
# pass
# Put in highest category if still not assigned.
if not categorized:
cat = colors[-1]
# EPA formula for AQI.
aqi_num = (cat["aqi_hi"] - cat["aqi_low"]) / \
(cat["pm_hi"] - cat["pm_low"]) * \
(pm25 - cat["pm_low"]) + cat["aqi_low"]
return aqi_num
|
199066221d91a527ea3c2f3f67a994eb13b7a708
| 3,647,672
|
from django.db import connection
def require_lock(model, lock='ACCESS EXCLUSIVE'):
"""
Decorator for PostgreSQL's table-level lock functionality
Example:
@transaction.commit_on_success
@require_lock(MyModel, 'ACCESS EXCLUSIVE')
def myview(request)
...
PostgreSQL's LOCK Documentation:
http://www.postgresql.org/docs/8.3/interactive/sql-lock.html
"""
def require_lock_decorator(view_func):
def wrapper(*args, **kwargs):
if lock not in LOCK_MODES:
raise ValueError('%s is not a PostgreSQL supported lock mode.')
cursor = connection.cursor()
cursor.execute(
'LOCK TABLE %s IN %s MODE' % (model._meta.db_table, lock)
)
return view_func(*args, **kwargs)
return wrapper
return require_lock_decorator
|
1cfa74246ddbde9840f5e519e1481cd8773fb038
| 3,647,673
|
def welcome():
"""List all available api routes."""
# Set the app.route() decorator for the "/api/v1.0/precipitation" route
return (
f"Available Routes:<br/>"
f"/api/v1.0/names<br/>"
f"/api/v1.0/precipitation"
)
|
74d6509fede66bf4243b9e4a4e107391b13aef16
| 3,647,674
|
def pbootstrap(data, R, fun, initval = None, ncpus = 1):
"""
:func pbootstrap: Calls boot method for R iteration in parallel and gets estimates of y-intercept
and slope
:param data: data - contains dataset
:param R: number of iterations
:param func: optim - function to get estimate of y-intercept and slope
:param initval: initial guess of y-intercept and slope can be passed - optional
:param ncpus: number of physical cores to run the pbootstrap method - optional
:return: estimates of y-intercept and slope
"""
N = data.shape[0]
thetas = Parallel(ncpus) (delayed(boot) (data, N, fun, initval) for _ in range(R))
return np.asarray(thetas)
|
f5d1b523969735ef30873f593472e79f8399622c
| 3,647,675
|
def _el_orb(string):
"""Parse the element and orbital argument strings.
The presence of an element without any orbitals means that we want to plot
all of its orbitals.
Args:
string (str): The element and orbitals as a string, in the form
``"C.s.p,O"``.
Returns:
dict: The elements and orbitals as a :obj:`dict`. For example::
{'Bi': ['s', 'px', 'py', 'd']}.
If an element symbol is included with an empty list, then all orbitals
for that species are considered.
"""
el_orbs = {}
for split in string.split(','):
orbs = split.split('.')
orbs = [orbs[0], 's', 'p', 'd', 'f'] if len(orbs) == 1 else orbs
el_orbs[orbs.pop(0)] = orbs
return el_orbs
|
654d085347913bca2fd2834816b988ea81ab7164
| 3,647,676
|
import numpy
def create_LOFAR_configuration(antfile: str, meta: dict = None) -> Configuration:
""" Define from the LOFAR configuration file
:param antfile:
:param meta:
:return: Configuration
"""
antxyz = numpy.genfromtxt(antfile, skip_header=2, usecols=[1, 2, 3], delimiter=",")
nants = antxyz.shape[0]
assert antxyz.shape[1] == 3, "Antenna array has wrong shape %s" % antxyz.shape
anames = numpy.genfromtxt(antfile, dtype='str', skip_header=2, usecols=[0], delimiter=",")
mounts = numpy.repeat('XY', nants)
location = EarthLocation(x=[3826923.9] * u.m, y=[460915.1] * u.m, z=[5064643.2] * u.m)
fc = Configuration(location=location, names=anames, mount=mounts, xyz=antxyz, frame='global',
diameter=35.0)
return fc
|
0ed07f1cdd0ef193e51cf88d336cbb421f6ea248
| 3,647,677
|
def fmla_for_filt(filt):
"""
transform a set of column filters
from a dictionary like
{ 'varX':['lv11','lvl2'],...}
into an R selector expression like
'varX %in% c("lvl1","lvl2")' & ...
"""
return ' & '.join([
'{var} %in% c({lvls})'.format(
var=k,
lvls=','.join(map(lambda x:'"%s"' % x, v)) if
type(v) == list else '"%s"' % v
) for k, v in filt.items()
])
|
149d23822a408ad0d96d7cefd393b489b4b7ecfa
| 3,647,678
|
def sfen_board(ban):
"""Convert ban (nrow*nrow array) to sfen string
"""
s = ''
num = 0
for iy in range(nrow):
for ix in range(nrow):
i = iy*nrow + ix
if ban[i]:
if num:
s += str(num)
num = 0
s += ban[i]
else:
num += 1
if iy < 8:
if num:
s += str(num)
num = 0
s += '/'
return s
|
55bf08c39457278ff8aaca35f1dd5f3fd6955590
| 3,647,679
|
import time
def join_simple_tables(G_df_dict, G_data_info, G_hist, is_train, remain_time):
"""
获得G_df_dict['BIG']
"""
start = time.time()
if is_train:
if 'relations' in G_data_info:
G_hist['join_simple_tables'] = [x for x in G_data_info['relations'] if
x['type'] == '1-1' and x['related_to_main_table'] == 'true']
else:
G_hist['join_simple_tables'] = []
time_budget = G_data_info['time_budget']
Id = G_data_info['target_id']
target = G_data_info['target_label']
main_table_name = G_data_info['target_entity']
log('[+] join simple tables')
G_df_dict['BIG'] = G_df_dict[main_table_name]
# 如果为时序数据,对BIG表排序
if G_data_info['target_time'] != '':
G_df_dict['BIG'].sort_values(by=G_data_info['target_time'])
for relation in G_hist['join_simple_tables']:
left_table_name = relation['left_entity']
right_table_name = relation['right_entity']
left_on = relation['left_on']
right_on = relation['right_on']
if main_table_name == left_table_name:
merge_table_name = right_table_name
skip_name = right_on
else:
merge_table_name = left_table_name
left_on, right_on = right_on, left_on
skip_name = left_on
log(merge_table_name)
merge_table = G_df_dict[merge_table_name].copy()
merge_table.columns = [x if x in skip_name else merge_table_name + "_" + x for x in merge_table.columns]
G_df_dict['BIG'] = G_df_dict['BIG'].merge(merge_table, left_on=left_on, right_on=right_on, how='left')
log(f"G_df_dict['BIG'].shape: {G_df_dict['BIG'].shape}")
end = time.time()
remain_time -= (end - start)
log("remain_time: {} s".format(remain_time))
return remain_time
|
ba625cee3d4ede6939b8e12ce734f85325044349
| 3,647,680
|
def create_epochs(data, events_onsets, sampling_rate=1000, duration=1, onset=0, index=None):
"""
Epoching a dataframe.
Parameters
----------
data : pandas.DataFrame
Data*time.
events_onsets : list
A list of event onsets indices.
sampling_rate : int
Sampling rate (samples/second).
duration : int or list
Duration(s) of each epoch(s) (in seconds).
onset : int
Epoch onset(s) relative to events_onsets (in seconds).
index : list
Events names in order that will be used as index. Must contains uniques names. If not provided, will be replaced by event number.
Returns
----------
epochs : dict
dict containing all epochs.
Example
----------
>>> import neurokit as nk
>>> epochs = nk.create_epochs(data, events_onsets)
Notes
----------
*Authors*
- Dominique Makowski (https://github.com/DominiqueMakowski)
*Dependencies*
- numpy
"""
# Convert ints to arrays if needed
if isinstance(duration, list) or isinstance(duration, np.ndarray):
duration = np.array(duration)
else:
duration = np.array([duration]*len(events_onsets))
if isinstance(onset, list) or isinstance(onset, np.ndarray):
onset = np.array(onset)
else:
onset = np.array([onset]*len(events_onsets))
if isinstance(data, list) or isinstance(data, np.ndarray) or isinstance(data, pd.Series):
data = pd.DataFrame({"Signal": list(data)})
# Store durations
duration_in_s = duration.copy()
onset_in_s = onset.copy()
# Convert to timepoints
duration = duration*sampling_rate
onset = onset*sampling_rate
# Create the index
if index is None:
index = list(range(len(events_onsets)))
else:
if len(list(set(index))) != len(index):
print("NeuroKit Warning: create_epochs(): events_names does not contain uniques names, replacing them by numbers.")
index = list(range(len(events_onsets)))
else:
index = list(index)
# Create epochs
epochs = {}
for event, event_onset in enumerate(events_onsets):
epoch_onset = int(event_onset + onset[event])
epoch_end = int(event_onset+duration[event]+1)
epoch = data[epoch_onset:epoch_end].copy()
epoch.index = np.linspace(start=onset_in_s[event], stop=duration_in_s[event], num=len(epoch), endpoint=True)
relative_time = np.linspace(start=onset[event], stop=duration[event], num=len(epoch), endpoint=True).astype(int).tolist()
absolute_time = np.linspace(start=epoch_onset, stop=epoch_end, num=len(epoch), endpoint=True).astype(int).tolist()
epoch["Epoch_Relative_Time"] = relative_time
epoch["Epoch_Absolute_Time"] = absolute_time
epochs[index[event]] = epoch
return(epochs)
|
d173b04d5e5835509a41b3ac2288d0d01ff54784
| 3,647,681
|
from typing import Type
def is_equal_limit_site(
site: SiteToUse, limit_site: SiteToUse, site_class: Type[Site]
) -> None:
"""Check if site is a limit site."""
if site_class == Site:
return site.point.x == limit_site.x and site.point.y == limit_site.y
elif site_class == WeightedSite:
return (
site.point.x == limit_site[0].x
and site.point.y == limit_site[0].y
and site.weight == limit_site[1]
)
|
1ebe8b18749bb42cf1e55e89a1e861b687f8881b
| 3,647,682
|
def get_header(filename):
"""retrieves the header of an image
Args:
filename (str): file name
Returns:
(str): header
"""
im = fabio.open(filename)
return im.header
|
a3c195d23b671179bc765c081c0a1e6b9119a71d
| 3,647,683
|
def gaussian_ll_pdf(x, mu, sigma):
"""Evaluates the (unnormalized) log of the normal PDF at point x
Parameters
----------
x : float or array-like
point at which to evaluate the log pdf
mu : float or array-like
mean of the normal on a linear scale
sigma : float or array-like
standard deviation of the normal on a linear scale
"""
log_pdf = -0.5*(x - mu)**2.0/sigma**2.0 #- np.log(sigma) - 0.5*np.log(2.0*np.pi)
return log_pdf
|
dbf1e389ad8349093c6262b2c595a2e511f2cb28
| 3,647,684
|
def _show_traceback(method):
"""decorator for showing tracebacks in IPython"""
def m(self, *args, **kwargs):
try:
return(method(self, *args, **kwargs))
except Exception as e:
ip = get_ipython()
if ip is None:
self.log.warn("Exception in widget method %s: %s", method, e, exc_info=True)
else:
ip.showtraceback()
return m
|
28909d57247d68200adf1e658ed4d3f7c36f0221
| 3,647,685
|
def ecdf(data):
"""Compute ECDF for a one-dimensional array of measurements."""
# Number of data points
n = len(data)
# x-data for the ECDF
x = np.sort(data)
# y-data for the ECDF
y = np.arange(1, len(x)+1) / n
return x, y
|
e0271f87e2c031a55c84de94dbfed34ec34d34f1
| 3,647,686
|
def import_module_part(request, pk):
"""Module part import. Use an .xlsx file to submit grades to a module part
On GET the user is presented with a file upload form.
On POST, the submitted .xlsx file is processed by the system, registering Grade object for each grade in the excel
file. It dynamically detects the tests that are submitted (by exact name match or database ID), and omits extra
columns silently. Also, lines that do not have a filled in student number are ignored. Students that are not
declared as part of the module (def:import_student_to_module) raise an import error.
:param request: Django request
:param pk: Module part that grades should be submitted to
:return: A redirect to the Grades course view on success. Otherwise a 404 (module does not exist), 403
(no permissions) or 400 (bad excel file or other import error)
"""
module_part = get_object_or_404(ModulePart, pk=pk)
module_edition = get_object_or_404(ModuleEdition, modulepart=module_part)
person = Person.objects.filter(user=request.user).filter(
Q(coordinator__module_edition__modulepart=module_part) | Q(teacher__module_part=module_part)
).first()
if not ModuleEdition.objects.filter(modulepart=module_part):
raise Http404('Module does not exist.')
if not (is_coordinator_or_assistant_of_module(person, module_edition) or is_coordinator_or_teacher_of_module_part(person, module_part)):
raise PermissionDenied('You are not allowed to do this.')
if request.method == "POST":
form = GradeUploadForm(request.POST, request.FILES)
if form.is_valid():
title_row = form.cleaned_data.get('title_row') - 1
# Check if /any/ tests and/or grades are imported.
any_tests = False
# List of all tests that are imported.
all_tests = []
sheet = request.FILES['file'].get_book_dict()
for table in sheet:
# Check if the sheet has enough rows
if title_row >= len(sheet[table]):
return bad_request(request, {'message': 'The file that was uploaded was not recognised as a grade'
' excel file. Are you sure the file is an .xlsx file, and'
' that all fields are present? Otherwise, download a new'
' gradesheet and try using that instead.'})
test_rows = dict()
university_number_field = None
# Detect university_number and test columns
for title_index in range(0, len(sheet[table][title_row])):
# This is the university number column
if ('number' in str(sheet[table][title_row][title_index]).lower()) or \
('nummer' in str(sheet[table][title_row][title_index]).lower()):
university_number_field = title_index
else:
# Attempt to find a Test
# search by ID
try:
test = Test.objects.filter(
pk=sheet[table][title_row][title_index])
if test and test.filter(module_part=module_part):
test_rows[title_index] = sheet[table][title_row][title_index] # pk of Test
any_tests = True
except (ValueError, TypeError):
pass # Not an int.
# search by name
if Test.objects.filter(module_part=module_part).filter(
name=sheet[table][title_row][title_index]):
test_rows[title_index] = Test.objects.filter(
name=sheet[table][title_row][title_index]
).filter(module_part=module_part)[0].pk # pk of Test
any_tests = True
# Attempt to ignore test altogether.
else:
pass
if university_number_field is None:
continue # Ignore this sheet
if len(test_rows.keys()) == 0:
continue # Ignore this sheet
# The current user's Person is the corrector of the grades.
teacher = Person.objects.filter(user=request.user).first()
grades = []
# Retrieve Test object beforehand to validate permissions on tests and speed up Grade creation
tests = dict()
for test_column in test_rows.keys():
tests[test_column] = Test.objects.get(pk=test_rows[test_column])
[all_tests.append(test) for test in tests.values() if test]
# Check excel file for invalid students
invalid_students = []
for row in sheet[table][(title_row + 1):]:
if not Studying.objects.filter(person__university_number__contains=row[university_number_field]).filter(
module_edition=module_edition):
invalid_students.append(row[university_number_field])
# Check for invalid student numbers in the university_number column, but ignore empty fields.
if [student for student in invalid_students if student is not '']:
return bad_request(request, {'message': 'Students {} are not enrolled in this module.\n '
'Enroll these students first before retrying'
.format(invalid_students)})
# Make Grades
for row in sheet[table][(title_row + 1):]: # Walk horizontally over table
student = Person.objects.filter(university_number__contains=row[university_number_field]).first()
# check if this is not an empty line, else continue.
if student:
for test_column in test_rows.keys():
try:
grades.append(make_grade(
student=student,
corrector=teacher,
test=tests[test_column],
grade=row[test_column]
))
except GradeException as e: # Called for either: bad grade, grade out of bounds
return bad_request(request, {'message': e})
save_grades(grades) # Bulk-save grades. Also prevents a partial import of the sheet.
# Check if anything was imported.
if not any_tests:
return bad_request(request, {'message': 'There were no tests recognized to import.'})
return render(request=request,
template_name='importer/successfully_imported.html',
context={'tests': all_tests})
else:
return bad_request(request, {'message': 'The file uploaded was not recognised as a grade excel file.'
' Are you sure the file is an .xlsx file? Otherwise, download a new'
' gradesheet and try using that instead'})
else: # GET request
form = GradeUploadForm()
return render(request, 'importer/importmodulepart.html', {'form': form, 'pk': pk, 'module_part': module_part})
|
a915b426b8c870ee62a154a1370080e87a7de42f
| 3,647,687
|
from typing import Tuple
def ordered_pair(x: complex) -> Tuple[float, float]:
"""
Returns the tuple (a, b), like the ordered pair
in the complex plane
"""
return (x.real, x.imag)
|
c67e43cf80194f7a5c7c5fd20f2e52464816d056
| 3,647,688
|
import os
def find_fits_file(plate_dir_list, fits_partial_path):
"""
Returns a path
:rtype : basestring
"""
for plate_dir in plate_dir_list:
fits_path = os.path.join(plate_dir, fits_partial_path)
if os.path.exists(fits_path):
return fits_path
return None
|
24c5c0e8a42cc5f91e3935c8250b217ac2becd3f
| 3,647,689
|
from FuXi.Rete.RuleStore import SetupRuleStore
def HornFromDL(owlGraph, safety=DATALOG_SAFETY_NONE, derivedPreds=[], complSkip=[]):
"""
Takes an OWL RDF graph, an indication of what level of ruleset safety
(see: http://code.google.com/p/fuxi/wiki/FuXiUserManual#Rule_Safety) to apply,
and a list of derived predicates and returns a Ruleset instance comprised of
the rules extracted from the OWL RDF graph (using a variation of the OWL 2 RL transformation)
"""
ruleStore, ruleGraph, network = SetupRuleStore(makeNetwork=True)
return network.setupDescriptionLogicProgramming(
owlGraph,
derivedPreds=derivedPreds,
expanded=complSkip,
addPDSemantics=False,
constructNetwork=False,
safety=safety)
|
37dfe479dd0f150956261197b47cfbd468285f92
| 3,647,690
|
def _assembleMatrix(data, indices, indptr, shape):
"""
Generic assemble matrix function to create a CSR matrix
Parameters
----------
data : array
Data values for matrix
indices : int array
CSR type indices
indptr : int array
Row pointer
shape : tuple-like
Actual shape of matrix
Returns
-------
M : scipy csr sparse matrix
The assembled matrix
"""
M = sparse.csr_matrix((data, indices, indptr), shape)
return M
|
6ada37b14270b314bcc6ba1ef55da10c07619731
| 3,647,691
|
def mock_state_store(decoy: Decoy) -> StateStore:
"""Get a mocked out StateStore."""
return decoy.mock(cls=StateStore)
|
db8e9e99dcd4bbc37094b09febb63c849550bc81
| 3,647,692
|
from typing import Callable
from typing import List
def beam_search_runner_range(output_series: str,
decoder: BeamSearchDecoder,
max_rank: int = None,
postprocess: Callable[
[List[str]], List[str]]=None
) -> List[BeamSearchRunner]:
"""Return beam search runners for a range of ranks from 1 to max_rank.
This means there is max_rank output series where the n-th series contains
the n-th best hypothesis from the beam search.
Args:
output_series: Prefix of output series.
decoder: Beam search decoder shared by all runners.
max_rank: Maximum rank of the hypotheses.
postprocess: Series-level postprocess applied on output.
Returns:
List of beam search runners getting hypotheses with rank from 1 to
max_rank.
"""
check_argument_types()
if max_rank is None:
max_rank = decoder.beam_size
if max_rank > decoder.beam_size:
raise ValueError(
("The maximum rank ({}) cannot be "
"bigger than beam size {}.").format(
max_rank, decoder.beam_size))
return [BeamSearchRunner("{}.rank{:03d}".format(output_series, r),
decoder, r, postprocess)
for r in range(1, max_rank + 1)]
|
01c63368219f4e1c95a7557df585893d68134478
| 3,647,693
|
def read_variants(
pipeline, # type: beam.Pipeline
all_patterns, # type: List[str]
pipeline_mode, # type: PipelineModes
allow_malformed_records, # type: bool
representative_header_lines=None, # type: List[str]
pre_infer_headers=False, # type: bool
sample_name_encoding=SampleNameEncoding.WITHOUT_FILE_PATH, # type: int
use_1_based_coordinate=False # type: bool
):
# type: (...) -> pvalue.PCollection
"""Returns a PCollection of Variants by reading VCFs."""
compression_type = get_compression_type(all_patterns)
if compression_type == filesystem.CompressionTypes.GZIP:
splittable_bgzf = _get_splittable_bgzf(all_patterns)
if splittable_bgzf:
return (pipeline
| 'ReadVariants'
>> vcfio.ReadFromBGZF(splittable_bgzf,
representative_header_lines,
allow_malformed_records,
pre_infer_headers,
sample_name_encoding,
use_1_based_coordinate))
if pipeline_mode == PipelineModes.LARGE:
variants = (pipeline
| 'InputFilePattern' >> beam.Create(all_patterns)
| 'ReadAllFromVcf' >> vcfio.ReadAllFromVcf(
representative_header_lines=representative_header_lines,
compression_type=compression_type,
allow_malformed_records=allow_malformed_records,
pre_infer_headers=pre_infer_headers,
sample_name_encoding=sample_name_encoding,
use_1_based_coordinate=use_1_based_coordinate))
else:
variants = pipeline | 'ReadFromVcf' >> vcfio.ReadFromVcf(
all_patterns[0],
representative_header_lines=representative_header_lines,
compression_type=compression_type,
allow_malformed_records=allow_malformed_records,
pre_infer_headers=pre_infer_headers,
sample_name_encoding=sample_name_encoding,
use_1_based_coordinate=use_1_based_coordinate)
if compression_type == filesystem.CompressionTypes.GZIP:
variants |= 'FusionBreak' >> fusion_break.FusionBreak()
return variants
|
5f706219ccc5a5f59980122b4fdac93e35056f5d
| 3,647,694
|
import numpy
def carla_location_to_numpy_vector(carla_location):
"""
Convert a carla location to a icv vector3
Considers the conversion from left-handed system (unreal) to right-handed
system (icv)
:param carla_location: the carla location
:type carla_location: carla.Location
:return: a numpy.array with 3 elements
:rtype: numpy.array
"""
return numpy.array([
carla_location.x,
-carla_location.y,
carla_location.z
])
|
a207ec5d878a07e62f96f21cd33c980cb1e5dacc
| 3,647,695
|
def prev_cur_next(lst):
"""
Returns list of tuples (prev, cur, next) for each item in list, where
"prev" and "next" are the previous and next items in the list,
respectively, or None if they do not exist.
"""
return zip([None] + lst[:-1], lst, lst[1:]) + [(lst[-2], lst[-1], None)]
|
c00cd27e1eaeffd335a44ac625cb740f126a06e5
| 3,647,696
|
import pathlib
def vet_input_path(filename):
"""
Check if the given input file exists.
Returns a pathlib.Path object if everything is OK, raises
InputFileException if not.
"""
putative_path = pathlib.Path(filename)
if putative_path.exists():
if not putative_path.is_file():
msg = ('A given input file is not infact a file. ' + \
'You input {}.'.format(putative_path))
raise InputFileException(msg)
else:
msg = ('Could not find a specified input file. You input {}.'.format(
putative_path))
raise InputFileException(msg)
return putative_path
|
9c517cf9e56781b995d7109ea0983171760cf58c
| 3,647,697
|
import requests
def check_for_updates(repo: str = REPO) -> str:
"""
Check for updates to the current version.
"""
message = ""
url = f"https://api.github.com/repos/{repo}/releases/latest"
response = requests.get(url)
if response.status_code != 200:
raise RuntimeError(
f"Failed to get commit count. Status code: {response.status_code}"
)
data = response.json()
latest_version = data["name"] # returns "vx.x.x"
current_version = f"v{_version.__version__}" # returns "vx.x.x"
if latest_version != current_version:
message = f"New version available: {latest_version}.\n\n"
else:
message = "No updates available.\n\n"
master = get_status(current_version, "master")
dev = get_status(current_version, "dev")
for branch in ["master", "dev"]:
name = branch.capitalize()
if branch == "master":
status, ahead_by, behind_by = master
else:
status, ahead_by, behind_by = dev
if status == "behind":
message += f"{name} is {status} by {behind_by} commits.\n"
elif status == "ahead":
message += f"{name} is {status} by {ahead_by} commits.\n"
else:
message += f"{name} is up to date.\n"
return message
|
0d3b37a74e252552f1e912a0d7072b60a34de86d
| 3,647,698
|
def _process_image(record, training):
"""Decodes the image and performs data augmentation if training."""
image = tf.io.decode_raw(record, tf.uint8)
image = tf.cast(image, tf.float32)
image = tf.reshape(image, [32, 32, 3])
image = image * (1. / 255) - 0.5
if training:
padding = 4
image = tf.image.resize_with_crop_or_pad(image, 32 + padding, 32 + padding)
image = tf.image.random_crop(image, [32, 32, 3])
image = tf.image.random_flip_left_right(image)
return image
|
0a255d954c7ca537f10be6ac5c077fd99aaf72cd
| 3,647,699
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.