content stringlengths 22 815k | id int64 0 4.91M |
|---|---|
def get_id(asset, **kwargs):
"""Get an asset by the unique id.
The key for the id must have 'id' in the name in the kwargs.
Example::
get_id(Foo, foo_id=1) # works
get_id(Foo, foo=1) # TypeError
"""
id_key = next(_parse_id(kwargs), None)
if id_key is None:
raise TypeError('Could not parse id key:{}'.format(kwargs))
instance = asset.get_id(kwargs[id_key])
if instance is not None:
return instance.dump()
return NoContent, 404 | 5,334,600 |
def put_study_document(request):
"""PUT method for editing an existing study.
Adds "resource_type" -> "study" then calls generic `put_document`.
See `finish_write_operation` for description of the response.
"""
request.matchdict['resource_type'] = 'study'
return put_document(request) | 5,334,601 |
def getDataByHash(sha256):
"""Grab all data Viper has. Save."""
payload = {'sha256': sha256}
resp = requests.post(viper_url + "file/find", payload,
headers=post_headers)
resp.raise_for_status()
result = resp.json()
data = {}
data = result['results']['default']
saveAnalysis(sha256=sha256, key="viperData", data=data) | 5,334,602 |
def create_command(input_file, columns_to_use, column_separator, output_file):
"""
This function creates the linux command to filter the columns and creating the output file
:param input_file: A valid file path to raw data file
:param columns_to_use: Indexes of the columns that needs to be filtered out (index starts from 1)
:param column_separator: Column separator in input/output file (default is ',' [comma])
:param output_file: A valid file path where the output will be stored
:return: A linux shell command
"""
print('Creating text filter command.....', log_type='info')
column_indexes = columns_to_use.split(',')
prefix, command_segment = ('$', '')
count = 1
index_length = len(column_indexes)
for item in column_indexes:
if count < index_length:
segment = prefix + item + '" "'
command_segment += segment
count += 1
else:
segment = prefix + item
command_segment += segment
if column_separator is None:
delimiter = ''
else:
delimiter = ' -F "' + column_separator + '"'
command = "awk" + delimiter + " '{print " + command_segment + "}' " + input_file + " > " + output_file
print('Command creation complete!', log_type='info')
# Return command
return command | 5,334,603 |
def create():
"""Creates all tables defined in models.py"""
db.create_all()
log.info("Table {} was created.".format(SETTINGS['provider_parameters']['table_name'])) | 5,334,604 |
def vprint(message, stream=DEFAULT_STREAM, flush=False):
"""
Easily handle verbose printing. Newline characters are automatically
appended if they are not already present.
Arguments:
message (str|unicode|list|tuple): A single or multi-line message to be
written to the specified stream. If
the input datatype is a list or tuple,
each element is assumed to be a line
of the message and are written
separately.
stream (file): An open file or other object with a callable "write()"
[default: sys.stdout]
"""
global VERBOSE_MODE
global DEFAULT_STREAM
if VERBOSE_MODE:
# Configure the stream
if stream is None or isinstance(stream, (str, unicode)) and stream.lower() == 'default':
stream = DEFAULT_STREAM
# Wrap single line messages
if not isinstance(message, (list, tuple)):
message = [message]
# Assemble message into a single block of text
o_message = ''
for line in message:
# Check if line is just supposed to be a return
if line is not None:
if len(line) is 0:
line = linesep
elif line[-1] != linesep:
line += linesep
o_message += line
# Write to stream
if flush:
if o_message[-1] == linesep:
o_message = o_message[:-1]
stream.write(o_message)
stream.flush()
else:
stream.write(o_message) | 5,334,605 |
def _new_data_generated(dataset, datagen):
"""
Function to put augmented data in directories
:param dataset: The path for the specified directory
:param datagen: The augmented data
:return: The new data to use for model
"""
new_data = datagen.flow_from_directory(
dataset,
target_size=(IMG_HEIGHT, IMG_WIDTH),
batch_size=32,
class_mode="categorical")
return new_data | 5,334,606 |
def findOutNode( node, testFunc, fallback=... ):
""" get node and all its parents, inner to outer order """
for out_node in getOutNodes( node ):
if testFunc( out_node ):
return out_node
if fallback is not ...:
return fallback
raise Exception( 'cannot find out node' ) | 5,334,607 |
def lookup_cpe(vendor, product, cpe_type, cpe_table, remap):
"""Identify the correct vendor and product values for a CPE
This function attempts to determine the correct CPE using vendor and product
values supplied by the caller as well as a remapping dictionary for mapping
these values to more correct values used by NIST.
For example, the remapping might tell us that a value of 'alpine' for the
vendor string should be 'aplinelinux' instead, or for product 'solaris'
should be 'sunos'.
This function should only emit values seen in the official NIST CPE list
which is provided to it in cpe_table.
Lookup priority:
1. Original vendor / product
2. Original vendor / remap product
3. Remap vendor / original product
4. Remap vendor / remap product
Args:
vendor (str): vendor name
product (str): product name
cpe_type (str): CPE type - o, a, h, etc.
cpe_table (dict): dict containing the official NIST CPE data
remap (dict): dict containing the remapping values
Returns:
success, vendor, product
"""
if (
vendor in cpe_table[cpe_type]
and product in cpe_table[cpe_type][vendor]
):
# Hot path, success with original values
return True, vendor, product
# Everything else depends on a remap of some sort.
# get the remappings for this one vendor string.
vendor_remap = remap.get(vendor, None)
if vendor_remap:
# If we have product remappings, work that angle next
possible_product = None
if (
vendor_remap.get('products', None)
and product in vendor_remap['products']
):
possible_product = vendor_remap['products'][product]
if (vendor in cpe_table[cpe_type]
and possible_product
and possible_product in cpe_table[cpe_type][vendor]):
# Found original vendor, remap product
return True, vendor, possible_product
# Start working the process to find a match with a remapped vendor name
if vendor_remap.get('vendor', None):
new_vendor = vendor_remap['vendor']
if new_vendor in cpe_table[cpe_type]:
if product in cpe_table[cpe_type][new_vendor]:
# Found remap vendor, original product
return True, new_vendor, product
if possible_product and possible_product in cpe_table[cpe_type][new_vendor]:
# Found remap vendor, remap product
return True, new_vendor, possible_product
logging.error("Product %s from vendor %s invalid for CPE %s and no mapping",
product, vendor, cpe_type)
return False, None, None | 5,334,608 |
def main():
"""main function of git learning
"""
return 'Google git' | 5,334,609 |
def tf_split_v_infer(node: Node):
"""
Partial infer of split node similar to SplitV op of TF.
"""
if len(node.in_nodes()) == 1 and not (node.has_valid('axis') and node.has_valid('size_splits')):
return
if len(node.in_nodes()) == 3 and (node.has_valid('axis') or node.has_valid('size_splits')):
return
# Three inputs: [input, size_splits, split_dim)
if len(node.in_nodes()) == 3:
split_dim = node.in_node(2).value
assert split_dim.ndim == 0
split_dim = split_dim.item()
size_splits = node.in_node(1).value
node.graph.remove_edge(node.in_node(1).id, node.id)
node.graph.remove_edge(node.in_node(2).id, node.id)
else:
split_dim = node.axis
size_splits = node.size_splits
if split_dim is None:
log.error('split_dim value for node {} is None. Cannot do shape inference.')
return
input = node.in_node(0)
if input.shape is None or size_splits is None:
log.error('input shape or size of splits are not defined for node {}'.format(node.soft_get('name')))
return
node['size_splits'] = size_splits
log.debug('split_dim = {}, input.shape = {}, size_splits.value = {}'.format(split_dim, input.shape, size_splits))
# split_dim is a numpy array, axis is split_dim
split(input, node, split_dim, size_splits)
PermuteAttrs.create_permute_attrs(node, attrs=[('axis', 'input:0')]) | 5,334,610 |
def SSIM(img1, img2, cs_map=False):
"""Return the Structural Similarity Map corresponding to input images img1
and img2 (images are assumed to be uint8)
This function attempts to mimic precisely the functionality of ssim.m a
MATLAB provided by the author's of SSIM
https://ece.uwaterloo.ca/~z70wang/research/ssim/ssim_index.m
"""
img1 = img1.astype(numpy.float64)
img2 = img2.astype(numpy.float64)
size = 11
sigma = 1.5
window = fspecial_gauss(size, sigma)
K1 = 0.01
K2 = 0.03
L = 255 #bitdepth of image
C1 = (K1*L)**2
C2 = (K2*L)**2
mu1 = signal.fftconvolve(window, img1, mode='valid')
mu2 = signal.fftconvolve(window, img2, mode='valid')
mu1_sq = mu1*mu1
mu2_sq = mu2*mu2
mu1_mu2 = mu1*mu2
sigma1_sq = signal.fftconvolve(window, img1*img1, mode='valid') - mu1_sq
sigma2_sq = signal.fftconvolve(window, img2*img2, mode='valid') - mu2_sq
sigma12 = signal.fftconvolve(window, img1*img2, mode='valid') - mu1_mu2
if cs_map:
return (((2*mu1_mu2 + C1)*(2*sigma12 + C2))/((mu1_sq + mu2_sq + C1)*
(sigma1_sq + sigma2_sq + C2)),
(2.0*sigma12 + C2)/(sigma1_sq + sigma2_sq + C2))
else:
return ((2*mu1_mu2 + C1)*(2*sigma12 + C2))/((mu1_sq + mu2_sq + C1)*
(sigma1_sq + sigma2_sq + C2)) | 5,334,611 |
def covariance(prices: np.ndarray) -> np.ndarray:
"""Calculate covariance matrix.
Args:
prices: Prices of market data.
Returns:
Covariance matrix.
"""
Q = np.cov(prices.T, ddof=0)
return np.array(Q) | 5,334,612 |
def resume_scrape(db, tf):
"""
Resume a unfinished scrape. Need to import a task file
:param db: Dictionary object to which the task information is stored
:param tf: File descriptor for the task file
"""
store = json.load(db)
db.close()
db = open(tf, 'w')
rosie = crawler.Crawler(db=db, dictionary=store)
# Restore variables from persistent file
try:
scrape_nodes = store['scrape_nodes']
scrape_registrations = store['scrape_registrations']
scrape_users = store['scrape_users']
scrape_institutions = store['scrape_institutions']
nodes_finished = store['nodes_finished']
registrations_finished = store['registrations_finished']
users_finished = store['users_finished']
institutions_finished = store['institutions_finished']
scrape_finished = store['scrape_finished']
milestone_url = store['milestone']
rosie.node_urls = store['node_urls']
rosie.registration_urls = store['registration_urls']
rosie.user_urls = store['user_urls']
rosie.institution_urls = store['institution_urls']
if store['error_list'] is not None:
rosie.error_list = store['error_list']
except KeyError:
click.echo('Cannot restore variables from file')
return
if scrape_finished:
click.echo("The scrape to resume was already finished")
return
if scrape_nodes and not nodes_finished:
if milestone_url in rosie.node_urls:
rosie.node_urls = rosie.node_urls[rosie.node_urls.index(milestone_url):]
rosie.scrape_nodes(async=True)
store['nodes_finished'] = True
db.seek(0)
db.truncate()
json.dump(store, db, indent=4)
db.flush()
if scrape_registrations and not registrations_finished:
if milestone_url in rosie.registration_urls:
rosie.registration_urls = rosie.registration_urls[rosie.registration_urls.index(milestone_url):]
rosie.scrape_registrations(async=True)
store['registrations_finished'] = True
db.seek(0)
db.truncate()
json.dump(store, db, indent=4)
db.flush()
if scrape_users and not users_finished:
if milestone_url in rosie.user_urls:
rosie.user_urls = \
rosie.user_urls[rosie.user_urls.index(milestone_url):]
rosie.scrape_users()
store['users_finished'] = True
db.seek(0)
db.truncate()
json.dump(store, db, indent=4)
db.flush()
if scrape_institutions and not institutions_finished:
if milestone_url in rosie.institution_urls:
rosie.institution_urls = rosie.institution_urls[rosie.institution_urls.index(milestone_url):]
rosie.scrape_institutions()
store['institutions_finished'] = True
db.seek(0)
db.truncate()
json.dump(store, db, indent=4)
db.flush()
store['scrape_finished'] = True
db.seek(0)
db.truncate()
json.dump(store, db, indent=4)
db.flush() | 5,334,613 |
def fiveplates_clean_design_file(field, designID):
"""
string representation of targets_clean file for field within
fiveplates_field_files zip file.
Parameters
----------
field : str
identifier of field, e.g. 'GG_010'
"""
return f'{field}_des{designID}_targets_clean.txt' | 5,334,614 |
def setup_figure(diff=False):
"""Set diff to True if you want an additional panel showing pair-wise differences in accuracy"""
fig = plt.figure(figsize=(2*3.385, 2*3)) # two column figure for bio-informatics
plt.subplots_adjust(left=0.15, bottom=0.1, right=0.98, top=0.93, wspace=0.05, hspace=0.01)
gs = plt.GridSpec(4 if diff else 3, 3,
width_ratios=[6, 0.9, 6],
height_ratios=[3.5, 2, 1, 1] if diff else [3.5, 1, 1])
ax = {k: plt.subplot(g) for k, g in
zip([''.join(e) for e in itertools.product(['A', 'Ad', 'B', 'C'] if diff else ['A', 'B', 'C'], ['-DEL', '-REF/SNP', '-INS'])], gs)}
return fig, ax | 5,334,615 |
def main():
"""Connects to the stream and starts threads to write them to a file."""
listener = QueueListener()
ckey="i9m8JABm5zmB1scZDUg94SLf4"
csecret="Og7AXk99eXMF4v6sc0FTj3fEvfdDhkATzr3HZnNJksXCBPClpF"
atoken="258676850-2TSMkNpJq3mkp6SXXIDoHZvTtthnPkOgR3utaxME"
asecret="vz22ULf6FKowaDq2GIh0tL8K47t0nAXTeGRTjJg7txgig"
auth = OAuthHandler(ckey, csecret)
auth.set_access_token(atoken, asecret)
writer_thread = threading.Thread(target=worker, args=(listener,))
writer_thread.start()
stream = Stream(auth, listener).filter(track=["Desk Information Security", "Access Information Security",
"Fire Extinguisher Information Security", "Emergency Information Security", "Lightning resister Information Security",
"Lock Information Security", "Power Information Security", "Location Information Security", "Surveillance Information Security",
"Monitor Information Security", "Heating Ventillation Airconditioning Information Security",
"Alarm Information Security", "Floor Information Security", "Ceiling Information Security", "Rack Information Security",
"Server Security", "Storage Security",
"Alert Information Security", "Monitor Information Security", "Asset Information Security", "Incident Information Security",
"Policy Information Security", "People Information Security",
"Standard Information Security", "Procedure Information Security", "Governance Information Security",
"Contract Information Security", "Law Information Security", "Intellectual Property Rights Information Security",
"Metrics Information Security", "Testing Information Security", "Certificate Information Security",
"Compliance Information Security", "Regulation Information Security", "Business Continuity Information Security",
"Firewall ", "Network Time Protocol Security","Virtual Private Network ", "VPN", "Open Systems Interconnect Security", "Topology Security",
"Throughput Security", "Bandwidth Security", "Local Area Network Security", "LAN Security",
"Wide Area Network Security", "WAN Security", "Virtual Local Area Network Security",
"Demilitarized zone Network Security", "Domain Name System Security", "Internet Protocol V4 Security",
"Internet Protocol V6 Security", "IP Security", "IPV4 Security", "IPV6 Security", "Wireless Security",
"Internet Security", "Switch Network Security", "Router Network Security", "Multiplexer Network Security",
"Operating System Security", "Data Security", "Web Security", "Code Application Security", "Web Application Firewall", "Middle Tier Security",
"Account Security", "Authorization Security", "Authentication Security", "Cryptography",
"Computer Information Security", "Desktop Information Security",
"Laptop Information Security", "Thin Client Information Security", "Mobile Device Security",
"Projector Information Security", "Printer Information Security", "Keyboard Information Security",
"Mouse Information Security", "USB Information Security", "Anti-virus",
"IaaS Security", "PaaS Security", "SaaS Security","Virtualization Security", "Virtual Private Cloud", "VPC Security ",
"Crime Cyber", "Cyber Squatter", "Cyber Security", "Social Engineering Cyber", "Safety Cyber",
"deceptive software", "Injection Information",
"Tampering Information", "Repudiation Security", "Information disclosure", "hacking", "hactivism",
"adware", "spyware", "trojan Security", "zombie Security", "denial of service", "DOS attack",
"Distributed Denial of Service", "DDOS attack", "Cross site scripting", "XSS", "Cross Site Request Forgery",
"CSRF", "Buffer overflow", "sniffer Information Security", "spam", "spoofing", "Groupware", "Phishing",
"Smishing", "Vishing", "ransomware", "malware", "botnet"]
)
print_status(listener)
try:
while True:
try:
stream.sample() # blocking!
except KeyboardInterrupt:
print('KEYBOARD INTERRUPT', file=sys.stderr)
return
except (socket.error, httplib.HTTPException):
global tcpip_delay
print(
'TCP/IP Error: Restarting after {delay} seconds.'.format(
delay=tcpip_delay,
),
file=sys.stderr,
)
time.sleep(min(tcpip_delay, MAX_TCPIP_TIMEOUT))
tcpip_delay += 0.25
finally:
print('Disconnecting stream', file=sys.stderr)
stream.disconnect()
print('Waiting for last tweets to finish processing', file=sys.stderr)
# Send poison pill to writer thread and wait for it to exit
listener.queue.put(None)
listener.queue.join()
print('Waiting for writer thread to finish', file=sys.stderr)
writer_thread.join()
print('Exit successful', file=sys.stderr) | 5,334,616 |
def load_axon_morphometrics(morphometrics_file):
"""
:param morphometrics_file: absolute path of file containing the morphometrics (must be .csv, .xlsx or pickle format)
:return: stats_dataframe: dataframe containing the morphometrics
"""
# If string, convert to Path objects
morphometrics_file = convert_path(morphometrics_file)
if morphometrics_file.suffix == "":
raise ValueError("File not specified. Please provide the full path of the file, including its extension")
try:
#Use the appropriate loader depending on the extension
if morphometrics_file.suffix.lower() == ".csv":
stats_dataframe = pd.read_csv(morphometrics_file, na_values='NaN')
elif morphometrics_file.suffix.lower() == ".xlsx":
stats_dataframe = pd.read_excel(morphometrics_file, na_values='NaN')
else:
stats_dataframe = pd.read_pickle(morphometrics_file)
except IOError as e:
logger.error(f"Error: Could not load file {str(morphometrics_file)}")
raise
stats_dataframe = rename_column_names_after_loading(stats_dataframe)
# with csv and excel files, they often will have an "unnamed" column because of the indexes saved with the dataframe
# we remove it here
for column in stats_dataframe.columns:
if "unnamed" in column.lower():
stats_dataframe = stats_dataframe.drop(columns=column)
return stats_dataframe | 5,334,617 |
def register_network(key, module):
"""
Register a customized GNN model.
After registeration, the module can be directly called by GraphGym.
Args:
key (string): Name of the module
module: PyTorch module
"""
register(key, module, network_dict) | 5,334,618 |
def fatal(message: str) -> None:
"""
Sends a message with the FATAL prefix.
:param message: The message that must be printed.
"""
if log:
printf(fatal_prefix + message) | 5,334,619 |
def get_input_costs(
inputs, cost_class="monetary", unit="billion_2015eur", mapping=COST_NAME_MAPPING,
**kwargs
):
"""
Get costs used as model inputs
"""
costs = {}
for var_name, var_data in inputs.data_vars.items():
if "costs" not in var_data.dims or not var_name.startswith("cost"):
continue
if "cap" in var_name:
_unit = f"{unit}_per_tw"
elif var_name == "cost_om_annual":
_unit = f"{unit}_per_tw_per_year"
elif var_name == "cost_om_annual_investment_fraction":
_unit = "fraction_of_total_investment"
elif var_name == "cost_depreciation_rate":
_unit = "fraction"
elif "om_" in var_name:
_unit = f"{unit}_per_twh"
_name = mapping[var_name]
mapped_da = map_da(var_data.loc[{"costs": cost_class}], loc_tech_agg="mean", **kwargs)
series = clean_series(mapped_da)
if series is not None:
costs[_name] = (
series
.to_frame(_unit)
.rename_axis(columns="unit")
.stack()
)
costs[_name].loc[costs[_name].index.get_level_values("unit").str.find("per_tw") > -1] *= 10
return costs | 5,334,620 |
def render_template(language, context, data, template):
"""Renders HTML display of metadata XML"""
env = Environment(extensions=['jinja2.ext.i18n'],
loader=FileSystemLoader(context.ppath))
env.install_gettext_callables(gettext, ngettext, newstyle=True)
template_file = 'resources/templates/%s' % template
template = env.get_template(template_file)
return template.render(language=language, obj=data) | 5,334,621 |
def ht_26():
"""Making one Hash table instance with 26 key val pairs inserted."""
ht = HashTable()
count = 1
for char in letters:
ht.set(char, count)
count += 1
return ht | 5,334,622 |
def get_session(uuid):
"""
Api.get_session method
returns: [uuid, users, payload, state, ts]
200 -- session created
400 -- wrong arguments
403 -- wrong authorization
404 -- session not found
500 -- internal error
"""
conn = conn_get()
session = database.get_session(conn, uuid)
if session is None:
abort(404)
if session['state'] in ['Started', 'Finished']:
if AUTH.username() not in session['users']:
abort(404)
return jsonify(session)
if AUTH.username() not in session['users']:
session['users'].append(AUTH.username())
session['ts'] = int(time())
if len(session['users']) == session['players']:
session['state'] = 'Started'
session['round'] = 1
database.add_round(conn, {
'uuid': uuid,
'round': 1,
'user_moves': {},
})
database.update_session(conn, session)
conn.commit()
return jsonify(session) | 5,334,623 |
def fetch_data(
indicator: WorldBankIndicators, country_names: Iterable[str], fill_missing=None
) -> pd.DataFrame:
"""
Fetch data from the market_data_cache collection (not to be confused with the market_quote_cache collection)
and ensure the specified countries are only present in the data (if present). Optionally apply a callable to
fill in gaps eg. resample
"""
if indicator is None:
return None
with io.BytesIO(indicator.fetch_data()) as fp:
df = pd.read_parquet(fp)
if df is not None and len(df) > 0:
plot_df = df[df["country"].isin(country_names)]
# print(country_names)
if len(plot_df) == 0:
return None
if fill_missing:
# print(plot_df)
plot_df.index = pd.to_datetime(
plot_df["date"], format="%Y-%m-%d"
) # avoid callers having to specify 'on' keyword as they may not know which column
plot_df = fill_missing(plot_df)
# print(plot_df)
return plot_df
else:
return None | 5,334,624 |
def _(dbmodel, backend):
"""
get_backend_entity for Django DbAuthInfo
"""
from . import authinfos
return authinfos.DjangoAuthInfo.from_dbmodel(dbmodel, backend) | 5,334,625 |
def wavelen_diversity_doppler_est(echo, prf, samprate, bandwidth,
centerfreq):
"""Estimate Doppler based on wavelength diversity.
It uses slope of phase of range frequency along with single-lag
time-domain correlator approach proposed by [BAMLER1991]_.
Parameters
----------
echo : np.ndarray(complex)
2-D complex basebanded echo, azimuth by range in time domain.
prf : float
Pulse repetition frequency in (Hz)
samprate : float
Sampling rate in range , second dim, in (Hz)
bandwidth : float
RF/chirp bandiwdth in (Hz)
centerfreq : float
RF center frequency of chirp in (Hz)
Returns
-------
float
Unambiguous Doppler centroid at center frequency in (Hz)
Raises
------
ValueError
For bad input
TypeError
If echo is not numpy array
See Also
--------
corr_doppler_est : Correlation Doppler Estimator (CDE)
sign_doppler_est : Sign-Doppler estimator (SDE)
References
----------
.. [BAMLER1991] R. Bamler and H. Runge, 'PRF-Ambiguity Resolving by
Wavelength Diversity', IEEE Transaction on GeoSci and Remote Sensing,
November 1991.
"""
if prf <= 0:
raise ValueError('PRF must be positive value!')
if samprate <= 0:
raise ValueError('samprate must be positive value!')
if bandwidth <= 0 or bandwidth >= samprate:
raise ValueError('badnwidth must be positive less than samprate!')
if centerfreq <= 0.0:
raise ValueError('centerfreq must be positive value!')
if not isinstance(echo, np.ndarray):
raise TypeError('echo must be a numpy array')
if echo.ndim != 2:
raise ValueError('echo must have two dimensions')
num_azb, num_rgb = echo.shape
if num_azb <= 2:
raise ValueError('The first dimension of echo must be larger than 2')
if num_rgb > 2:
raise ValueError('The second dimension of echo must be larger than 2!')
# FFT along range
nfft = fft.next_fast_len(num_rgb)
echo_fft = fft.fft(echo, nfft, axis=1)
# one-lag correlator along azimuth
az_corr = (echo_fft[1:] * echo_fft[:-1].conj()).mean(axis=0)
# Get the unwrapped phase of range spectrum within +/-bandwidth/2.
df = samprate / nfft
half_bw = 0.5 * bandwidth
idx_hbw = nfft // 2 - int(half_bw / df)
unwrap_phs_rg = np.unwrap(np.angle(fft.fftshift(az_corr)
[idx_hbw: -idx_hbw])) # (rad)
# perform linear regression in range freq within bandwidth
freq_bw = -half_bw + df * np.arange(nfft - 2 * idx_hbw)
pf_coef = np.polyfit(freq_bw, unwrap_phs_rg, deg=1)
# get the doppler centroid at center freq based on slope
dop_slope = prf / (2. * np.pi) * pf_coef[0]
return centerfreq * dop_slope | 5,334,626 |
def _parse_line(line):
"""
Parse node string representation and return a dict with appropriate node values.
"""
res = {}
if 'leaf' in line:
res['is_leaf'] = 1
res['leaf_val'] = _parse_leaf_node_line(line)
else:
res['is_leaf'] = 0
res['feature'], res['threshold'] = _parse_decision_node_line(line)
return res | 5,334,627 |
def generate_richcompare_wrapper(cl: ClassIR, emitter: Emitter) -> Optional[str]:
"""Generates a wrapper for richcompare dunder methods."""
# Sort for determinism on Python 3.5
matches = sorted([name for name in RICHCOMPARE_OPS if cl.has_method(name)])
if not matches:
return None
name = '{}_RichCompare_{}'.format(DUNDER_PREFIX, cl.name_prefix(emitter.names))
emitter.emit_line(
'static PyObject *{name}(PyObject *obj_lhs, PyObject *obj_rhs, int op) {{'.format(
name=name)
)
emitter.emit_line('switch (op) {')
for func in matches:
emitter.emit_line('case {}: {{'.format(RICHCOMPARE_OPS[func]))
method = cl.get_method(func)
assert method is not None
generate_wrapper_core(method, emitter, arg_names=['lhs', 'rhs'])
emitter.emit_line('}')
emitter.emit_line('}')
emitter.emit_line('Py_INCREF(Py_NotImplemented);')
emitter.emit_line('return Py_NotImplemented;')
emitter.emit_line('}')
return name | 5,334,628 |
def display_credentials():
"""
Function that displays all saved credentials
"""
return Credentials.display_credentials() | 5,334,629 |
def render_diff_report():
"""
Render a summary of the diffs found and/or changed.
Returns a string.
Dependencies:
config settings: action, templates, report_order
globals: diff_dict, T_NAME_KEY
modules: nori
"""
if nori.core.cfg['action'] == 'diff':
diff_report = ' Diff Report '
elif nori.core.cfg['action'] == 'sync':
diff_report = ' Diff / Sync Report '
diff_report = ('#' * len(diff_report) + '\n' +
diff_report + '\n' +
'#' * len(diff_report) + '\n\n')
if nori.core.cfg['report_order'] == 'template':
for template_index in diff_dict:
template = nori.core.cfg['templates'][template_index]
section_header = ('Template {0} ({1}):' .
format(template_index,
nori.pps(template[T_NAME_KEY])))
section_header += '\n' + ('-' * len(section_header)) + '\n\n'
diff_report += section_header
for diff_t in diff_dict[template_index]:
exists_in_source = diff_t[0]
source_row = diff_t[1]
exists_in_dest = diff_t[2]
dest_row = diff_t[3]
has_been_changed = diff_t[4]
if exists_in_source:
source_str = nori.pps(source_row[1])
elif exists_in_source is None:
source_str = '[no value match in source database]'
else:
source_str = '[no key match in source database]'
if exists_in_dest:
dest_str = nori.pps(dest_row[1])
elif exists_in_dest is None:
dest_str = '[no value match in destination database]'
else:
dest_str = '[no key match in destination database]'
if has_been_changed is None:
changed_str = 'unchanged'
elif not has_been_changed:
changed_str = (
'partially changed - action may be needed!'
)
else:
changed_str = 'changed'
diff_report += (
'Source: {0}\nDest: {1}\nStatus: {2}\n\n' .
format(source_str, dest_str, changed_str)
)
diff_report += '\n'
elif nori.core.cfg['report_order'] == 'keys':
for key_str in diff_dict:
section_header = ('Key tuple {0}:' .
format(nori.pps(key_str)))
section_header += '\n' + ('-' * len(section_header)) + '\n\n'
diff_report += section_header
for diff_t in diff_dict[key_str]:
template_index = diff_t[0]
exists_in_source = diff_t[1]
source_row = diff_t[2]
exists_in_dest = diff_t[3]
dest_row = diff_t[4]
has_been_changed = diff_t[5]
template = nori.core.cfg['templates'][template_index]
if exists_in_source:
num_keys = source_row[0]
source_data = source_row[1]
source_str = nori.pps(source_data[num_keys:])
elif exists_in_source is None:
source_str = '[no value match in source database]'
else:
source_str = '[no key match in source database]'
if exists_in_dest:
num_keys = dest_row[0]
dest_data = dest_row[1]
dest_str = nori.pps(dest_data[num_keys:])
elif exists_in_dest is None:
dest_str = '[no value match in destination database]'
else:
dest_str = '[no key match in destination database]'
if has_been_changed is None:
changed_str = 'unchanged'
elif not has_been_changed:
changed_str = (
'partially changed - action may be needed!'
)
else:
changed_str = 'changed'
diff_report += (
'Template: {0}\nSource: {1}\nDest: {2}\n'
'Status: {3}\n\n' .
format(template[T_NAME_KEY], source_str, dest_str,
changed_str)
)
diff_report += '\n'
return diff_report.strip() | 5,334,630 |
def get_step_handler_for_gym_env(gym_env_name: str, cfg: Configuration) -> StepRewardDoneHandler:
"""Return an example step handler for the given gym environemtn name, that uses the
given config file."""
if gym_env_name == 'Acrobot-v1':
handler = AcrobotStepHandler(cfg)
elif gym_env_name == 'CartPole-v1':
handler = CartPoleStepHandler(cfg)
elif gym_env_name == 'MountainCarContinuous-v0':
handler = ContinuousMountainCarStepHandler(cfg)
elif gym_env_name == 'MountainCar-v0':
handler = MountainCarStepHandler(cfg)
elif gym_env_name == 'Pendulum-v0':
handler = PendulumStepHandler(cfg)
else:
raise NotImplementedError(f'No support for this gym env: {gym_env_name}')
return handler | 5,334,631 |
def archive(ts):
"""Reprocess an older date
Currently, we only support the METAR database :(
"""
asos = get_dbconn('asos', user='nobody')
acursor = asos.cursor()
iem = get_dbconn('iem')
icursor = iem.cursor()
table = "t%s" % (ts.year,)
acursor.execute("""WITH data as (
SELECT station, max(p01i) from """ + table + """
WHERE valid > %s and valid <= %s and p01i is not null
GROUP by station)
SELECT station, network, max, iemid from data d JOIN stations s on
(d.station = s.id) WHERE (s.network ~* 'ASOS' or s.network = 'AWOS')
""", (ts, ts + datetime.timedelta(minutes=60)))
table = "hourly_%s" % (ts.year,)
for row in acursor:
icursor.execute("""INSERT into """ + table + """
(station, network, valid, phour, iemid)
VALUES (%s, %s, %s, %s, %s)
""", (row[0], row[1], ts, row[2], row[3]))
icursor.close()
iem.commit() | 5,334,632 |
def get_post_ids() -> list:
"""
"""
create_directory(WORK_PATH)
list_of_files_and_folders = os.listdir(WORK_PATH)
list_of_folders = []
for p in list_of_files_and_folders:
path = f'{WORK_PATH}/{p}'
if os.path.isdir(path):
list_of_folders.append(p)
list_of_post_ids = []
for p in list_of_folders:
if int(p) > 0:
list_of_post_ids.append(int(p))
return list_of_post_ids | 5,334,633 |
def get_module(mod_name):
"""Import module and return."""
try:
return import_module(mod_name)
except ImportError:
logger.error('Failed to import module "%s".' % mod_name)
logger.error(traceback.format_exc())
raise | 5,334,634 |
def sort_ranks(ranks):
"""Sort ranks by MAIN_RANKS order.
Parameters
----------
ranks
Ranks to sort
Returns
-------
Sorted ranks
"""
ret = False
ranks = list(ranks) if not isinstance(ranks, list) else ranks
if len(ranks) > 0:
ret = [rank for rank in VALID_RANKS if rank in ranks]
return ret | 5,334,635 |
def formalize_rules(list_rules):
""" Gives an list of rules where
facts are separeted by coma.
Returns string with rules in
convinient form (such as
'If' and 'Then' words, etc.).
"""
text = ''
for r in list_rules:
t = [i for i in r.split(',') if i]
text += 'If %s,\n' % t[0]
for i in t[1:-1]:
text += ' %s,\n' % i
text += 'Then: %s.\n' % t[-1]
return text | 5,334,636 |
def load_bib(config):
"""Read bibliography file if there is one."""
if "bib" in config:
with open(config["bib"], "r") as reader:
config["bib_data"] = bibtexparser.load(reader).entries
else:
config["bib_data"] = {} | 5,334,637 |
def filenames_to_labels(filenames, filename_label_dict):
"""Converts filename strings to integer labels.
Args:
filenames (List[str]): The filenames of the images.
filename_label_dict (Dict[str, int]): A dictionary mapping filenames to
integer labels.
Returns:
ndarray: Integer labels
"""
return np.asarray([int(filename_label_dict[filename]) for filename in filenames]) | 5,334,638 |
def test_get_init_3():
"""get_init can't find __init__ in empty testdir"""
assert mp.get_init(TMP_TEST_DIR) is None | 5,334,639 |
def load_model():
"""
保存した提供されているモデルを読み込む
Returns
----------
model
提供されたmodel
tokernizre
提供されたヤツ(よくわかってない)
"""
with open(MODEL_DIR + 'model.pickle', 'rb') as f:
model = pick.load(f)
with open(MODEL_DIR + 'tokenizer.pickle', 'rb') as f:
tokenizer = pick.load(f)
return model, tokenizer | 5,334,640 |
def json_write(data, fname) -> bool:
"""
Write to JSON file given data.
"""
with open(fname, 'w', encoding='utf-8') as f:
json.dump(
data,
f,
ensure_ascii=False,
indent=4
) | 5,334,641 |
def post(post_id):
"""View function for post page"""
# Form object: `Comment`
form = CommentForm()
# form.validate_on_submit() will be true and return the
# data object to form instance from user enter,
# when the HTTP request is POST
if form.validate_on_submit():
new_comment = Comment(id=str(uuid4()),
name=form.name.data)
new_comment.text = form.text.data
new_comment.date = datetime.now()
new_comment.post_id = post_id
db.session.add(new_comment)
db.session.commit()
post = db.session.query(Post).get_or_404(post_id)
tags = post.tags
comments = post.comments.order_by(Comment.date.desc()).all()
recent, top_tags = sidebar_data()
return render_template('post.html',
post=post,
tags=tags,
comments=comments,
form=form,
recent=recent,
top_tags=top_tags) | 5,334,642 |
def pipe(val, *funcs):
"""Pipe a value through a sequence of functions
I.e. ``pipe(val, f, g, h)`` is equivalent to ``h(g(f(val)))``
>>> double = lambda i: 2 * i
>>> pipe(3, double, str)
'6'
"""
if not funcs:
raise PipeNotGivenAnyFunctions
if any_is_async(funcs):
return async_functions.compose(*reversed(funcs))(val)
for f in funcs:
val = f(val)
return val | 5,334,643 |
def do_query(method, query, values):
"""Executes a query on a DFP API method, returning a list of results."""
# Trap exceptions here instead of in caller?
statement = dfp.FilterStatement(query, values)
data = []
while True:
response = method(statement.ToStatement())
if 'results' in response:
data += response['results']
statement.offset += dfp.SUGGESTED_PAGE_LIMIT
else:
break
return data | 5,334,644 |
def markdown_format(text):
"""
The outside param 'name' is similar to "tag" (in 'usage')
which it'll determines how you use it, e.g. {{ THING | FILTER }}.
Just a reminder
for tag, {% my_post_count %}
for filter, {{ post.body | truncatewords:30 }}
Why using `mark_safe`?
It simply marks the string as 'safe' for HTML output.
It was used pretty ?everywhere (in short: ready-for-rendering).
Also, about the 'markdown',
I simply typed these (& not digging more ...), =_=!
"""
return mark_safe(markdown.markdown(text)) | 5,334,645 |
def _find_tex_env_recursive(original_s: str, s: str, offset: int = 0, depth: int = 0) -> List:
"""
Find all environments.
:param s: Latex string code
:param offset: Offset applied to the search
:return: Tuple of all commands
"""
tags = find_tex_commands(s, offset=offset)
new_tags = []
for t in tags:
a, b, c, d, _ = t
source_cmd = s[a - offset:b - offset + 1]
if 'begin' not in source_cmd and 'end' not in source_cmd:
# Get the arguments of the command, and check more environments there
cmd_args = s[c - offset:d - offset + 1]
if 'begin' in cmd_args or 'end' in cmd_args:
if 'newenvironment' in source_cmd or 'newcommand' in source_cmd: # Prone to bugs
continue
for tr in _find_tex_env_recursive(original_s, cmd_args, offset=c, depth=depth + 1):
new_tags.append(tr)
else:
new_tags.append(t)
return new_tags | 5,334,646 |
def group_by_instance_type(
jobs: Iterable[JobConfiguration],
) -> List[List[JobConfiguration]]:
"""
Group job-configuration into different queues depending on which instance
each job should be run. This returns a list of the different queues.
>>> group_by_instance_type( # doctest: +SKIP
... [
... {"ResourceConfig": {"InstanceType": 1}, "name": 1},
... {"ResourceConfig": {"InstanceType": 2}, "name": 2},
... {"ResourceConfig": {"InstanceType": 2}, "name": 3},
... ]
... )
[
[
{"ResourceConfig": {"InstanceType": 1}, "name": 1}
],
[
{"ResourceConfig": {"InstanceType": 2}, "name": 2},
{"ResourceConfig": {"InstanceType": 2}, "name": 3},
],
]
"""
return list(
groupby(
lambda job_config: job_config["ResourceConfig"]["InstanceType"],
jobs,
).values()
) | 5,334,647 |
def integral_func(phi, th1, n):
""" Used in computing the continuous hypersphere cap intersection below. """
return np.sin(phi)**(n-2) * scipy.special.betainc( (n-2)/2 , 1/2, 1-( (np.tan(th1))/(np.tan(phi)) )**2 ) | 5,334,648 |
def remove_first_item(nested_list):
"""
Removes the first element of the deepest list.
>>> tuple(remove_first_item([['abc','def','ghi'],['123','456','789'],[['000','999'],['AAA','BBB']]]))
(['def', 'ghi'], ['456', '789'], [['999'], ['BBB']])
"""
for item in nested_list:
if isinstance(item, list):
yield list(remove_first_item(item))
elif nested_list.index(item) != 0:
yield item | 5,334,649 |
def test_model(image_path, class_names, img_height, img_width):
"""测试你的模型"""
img = keras.preprocessing.image.load_img(image_path, target_size=(img_height, img_width)) # 将图片加载为PIL格式
input_array = keras.preprocessing.image.img_to_array(img) # 将PIL映像实例转换为Numpy数组
input_array = np.array([input_array]) # 来自load_img中描述
# input_array = tf.expand_dims(input_array, 0) # Create a batch # 使用expand_dims来将维度加1
# print('input_array: ',input_array)
input_array = preprocess_input(input_array)
predictions = model.predict(input_array)[0] # 输入测试数据,输出预测结果
class_index = int(np.argmax(predictions)) # 返回识别后最大值索引
max_value = predictions[class_index] # 获取最大index的值, 下面防止分数大于1是*100
class_score = 100 * np.max(predictions) if max_value <= 1 else np.max(predictions) # 相似度 返回数组的最大值或沿轴的最大值。
print("这个图像最有可能是: {} 置信度: {:.2f} %".format(class_names[class_index], class_score))
return class_names[class_index] | 5,334,650 |
def asbytes(s: Literal["101 101 1.0e-05\n"]):
"""
usage.scipy: 1
"""
... | 5,334,651 |
def addList():
"""
Add a list. Needs custom function to create its internal meta object
"""
pass | 5,334,652 |
def order(order_id,complete):
"""
Charge completion return URL. Once the customer is redirected
back to this site from the authorization page, we search for the
charge based on the provided `order_id`.
"""
return render_template(
"complete.html",
order_id=order_id,
complete=complete
) | 5,334,653 |
def load_dimension_subdag(
parent_dag_name,
task_id,
redshift_conn_id,
*args, **kwargs):
"""
A python function with arguments, which creates a dag
:param parent_dag_name: imp ({parent_dag_name}.{task_id})
:param task_id: imp {task_id}
:param redshift_conn_id: {any connection id}
:param args: {verbose}
:param kwargs: {verbose and context variables}
:return:
"""
dag = DAG(
f"{parent_dag_name}.{task_id}",
**kwargs
)
copy_ports = StageToRedshiftOperator(
task_id='copy_ports',
dag=dag,
redshift_conn_id="redshift",
aws_credentials_id="aws_default",
file='i94port.csv',
delimiter=',',
table='i94ports',
s3_bucket=Variable.get("s3_bucket"),
s3_key="csv",
sql_stmt=SqlQueries.copy_csv_cmd,
provide_context=True)
copy_visa = StageToRedshiftOperator(
task_id='copy_visa',
dag=dag,
redshift_conn_id="redshift",
aws_credentials_id="aws_default",
file='i94visa.csv',
delimiter=',',
table='i94visa',
s3_bucket=Variable.get("s3_bucket"),
s3_key="csv",
sql_stmt=SqlQueries.copy_csv_cmd,
provide_context=True)
copy_modes = StageToRedshiftOperator(
task_id='copy_modes',
dag=dag,
redshift_conn_id="redshift",
aws_credentials_id="aws_default",
file='i94mode.csv',
delimiter=',',
table='i94mode',
s3_bucket=Variable.get("s3_bucket"),
s3_key="csv",
sql_stmt=SqlQueries.copy_csv_cmd,
provide_context=True)
copy_addr = StageToRedshiftOperator(
task_id='copy_addr',
dag=dag,
redshift_conn_id="redshift",
aws_credentials_id="aws_default",
file='i94addr.csv',
delimiter=',',
table='i94addr',
s3_bucket=Variable.get("s3_bucket"),
s3_key="csv",
sql_stmt=SqlQueries.copy_csv_cmd,
provide_context=True)
copy_country_codes = StageToRedshiftOperator(
task_id='copy_country_codes',
dag=dag,
redshift_conn_id="redshift",
aws_credentials_id="aws_default",
file='i94cit&i94res.csv',
delimiter=',',
table='i94res',
s3_bucket=Variable.get("s3_bucket"),
s3_key="csv",
sql_stmt=SqlQueries.copy_csv_cmd,
provide_context=True)
copy_cities_demographics = StageToRedshiftOperator(
task_id='copy_cities_demographics',
dag=dag,
redshift_conn_id="redshift",
aws_credentials_id="aws_default",
file='us-cities-demographics.csv',
delimiter=';',
table='us_cities_demographics',
s3_bucket=Variable.get("s3_bucket"),
s3_key="csv",
sql_stmt=SqlQueries.copy_csv_cmd,
provide_context=True)
copy_airports = StageToRedshiftOperator(
task_id='copy_airports',
dag=dag,
redshift_conn_id="redshift",
aws_credentials_id="aws_default",
file='airport-codes_csv.csv',
delimiter=',',
table='airport_codes',
s3_bucket=Variable.get("s3_bucket"),
s3_key="csv",
sql_stmt=SqlQueries.copy_csv_cmd,
provide_context=True)
def parquet_to_redshift(table, s3_bucket, s3_key, iam_role,
sql_stmt, redshift_conn_id, **kwargs):
"""
This function reads parquet files and copies them to redshift
schema.db
:param table:
:param s3_bucket:
:param s3_key:
:param iam_role:
:param sql_stmt:
:param redshift_conn_id:
:param kwargs:
:return:
"""
redshift = PostgresHook(postgres_conn_id=redshift_conn_id)
logging.info("Copying data from S3 to Redshift")
s3_path = "s3://{}/{}".format(s3_bucket, s3_key)
formatted_sql = sql_stmt.format(
table,
s3_path,
iam_role
)
redshift.run(formatted_sql)
aws_hook = AwsHook("aws_default")
credentials = aws_hook.get_credentials()
client = boto3.client('s3',
aws_access_key_id=credentials.access_key,
aws_secret_access_key=credentials.secret_key)
objects_to_delete = client.list_objects(
Bucket=Variable.get("s3_bucket"), Prefix="parquet")
delete_keys = {'Objects': []}
delete_keys['Objects'] = [{'Key': k} for k in
[obj['Key'] for obj in
objects_to_delete.get('Contents',
[])]]
client.delete_objects(Bucket=Variable.get("s3_bucket"),
Delete=delete_keys)
copy_immigration = PythonOperator(
task_id='copy_immigration',
python_callable=parquet_to_redshift, # changed
provide_context=True,
op_kwargs={'table': "immigration",
's3_bucket': Variable.get("s3_bucket"),
's3_key': 'parquet',
'iam_role': Variable.get('iam_role'),
'sql_stmt': SqlQueries.copy_parquet_cmd,
'redshift_conn_id': 'redshift'},
dag=dag
)
copy_ports
copy_visa
copy_modes
copy_addr
copy_country_codes
copy_airports
copy_cities_demographics
copy_immigration
return dag | 5,334,654 |
def get_ns_lns_ids_config_file():
"""Reads node_id to host name mapping from one of the config files in the map"""
assert exp_config.node_config_folder is not None and os.path.exists(exp_config.node_config_folder)
files = os.listdir(exp_config.node_config_folder)
# read mapping from any file
return read_node_to_hostname_mapping(os.path.join(exp_config.node_config_folder, files[0])) | 5,334,655 |
def find(name, environment=None, guess=None):
"""Finds a particular binary on this system.
Attempts to find the binary given by ``name``, first checking the value of
the environment variable named ``environment`` (if provided), then by
checking the system path, then finally checking hardcoded paths in
``guess`` (if provided). This function is cross-platform compatible - it
works on Windows, Linux, and Mac. If there are spaces in the path found,
this function will wrap its return value in double quotes.
Args:
name (str): Binary name.
environment (str): An optional environment variable to check.
guess (iterable): An optional list of hardcoded paths to check.
Returns:
A string with the absolute path to the binary if found, otherwise
``None``.
"""
def sanitize(path):
quotes = ("'", "'")
if " " in path and path[0] not in quotes and path[-1] not in quotes:
path = '"{}"'.format(path)
return path
if environment:
path = os.environ.get(environment)
if path is not None:
path = os.path.abspath(os.path.expanduser(path))
if os.path.isfile(path):
return sanitize(path)
if os.name == "posix":
search = "which"
elif os.name == "nt":
search = "where.exe"
else:
raise EnvironmentError("unknown platform: {}".format(os.name))
try:
with open(os.devnull, "w") as output:
path = subprocess.check_output([search, name], stderr=output).decode(
"utf-8"
)
return sanitize(os.path.abspath(path.strip()))
except subprocess.CalledProcessError:
pass
if guess:
for path in guess:
if os.path.isfile(path):
return sanitize(path)
return None | 5,334,656 |
def upload_to(path):
"""
Generates unique ascii filename before saving. Supports strftime()
formatting as django.db.models.FileField.upload_to does.
Example:
class SomeModel(models.Model):
picture = models.ImageField(upload_to=upload_to('my_model_uploads/'))
It is possible to define `upload_to` folder depending on model.
Declare dict `IMAGE_UPLOAD_TO` in settings:
{
'ModelName': 'path for upload_to"',
}
And provide None to upload_to func as path.
"""
def upload_callback(instance, filename):
random_fname = ''.join(
random.choice(string.ascii_uppercase + string.digits) for x in range(16))
random_fname += os.path.splitext(filename)[-1]
if path is None:
img_path = path_dict.get(instance.__class__.__name__, "images")
else:
img_path = path
img_path = os.path.normpath(force_text(
datetime.datetime.now().strftime(force_str(img_path))))
return '%s/%s' % (img_path.rstrip('/'), random_fname)
return upload_callback | 5,334,657 |
def standardize_measurements_lastref(measurements: List[Measurement], remove_ref: bool = True) \
-> List[Measurement]:
""" Sets the standardization of all measurement to the Reference Measurement before """
last_null_meas = None
clean_measurements = []
for measurement in measurements:
isref = measurement.is_reference()
if isref:
last_null_meas = measurement
measurement.set_reference(last_null_meas, StandardizationType.LAST_REFERENCE)
if not isref or not remove_ref:
if last_null_meas is None:
raise ValueError("ERROR - NO NULL MEASUREMENT FOUND")
clean_measurements.append(measurement)
return clean_measurements | 5,334,658 |
def blg2texkey(filename):
"""Extract TeX keys from a .blg file."""
keys = []
if not os.path.exists(filename):
LOGGER.error("File %s not found.", filename)
return keys
with open(filename, "r") as f:
lines = f.readlines()
# regexp to match 'Warning--I didn\'t find a database entry for "..."' (bibtex)
# or 'WARN - I didn\'t find a database entry for '...'' (biber)
pattern = re.compile(
r".*I didn\'t find a database entry for [\"\'](?P<keys>[^\"]+)[\"\'].*"
)
# get nested list of texkeys
keys = [
re.search(pattern, c).group("keys").split(",")
for c in lines
if re.match(pattern, c)
]
# flatten nested list
keys = [item for sublist in keys for item in sublist]
# remove duplicates
keys = list(set(keys))
# remove blacklisted keys
keys = [x for x in keys if x not in BLACKLISTED_KEYS]
return keys | 5,334,659 |
def build_train_valid_test_data_iterators(
build_train_valid_test_datasets_provider):
"""XXX"""
args = get_args()
(train_dataloader, valid_dataloader, test_dataloader) = (None, None, None)
print_rank_0('> building train, validation, and test datasets ...')
# Backward compatibility, assume fixed batch size.
if args.iteration > 0 and args.consumed_train_samples == 0:
assert args.train_samples is None, \
'only backward compatiblity support for iteration-based training'
args.consumed_train_samples = args.iteration * args.global_batch_size
if args.iteration > 0 and args.consumed_valid_samples == 0:
if args.train_samples is None:
args.consumed_valid_samples = (args.iteration // args.eval_interval) * \
args.eval_iters * args.global_batch_size
# Data loader only on rank 0 of each model parallel group.
if mpu.get_tensor_model_parallel_rank() == 0:
# Number of train/valid/test samples.
if args.train_samples:
train_samples = args.train_samples
else:
train_samples = args.train_iters * args.global_batch_size
eval_iters = (args.train_iters // args.eval_interval + 1) * \
args.eval_iters
test_iters = args.eval_iters
train_val_test_num_samples = [train_samples,
eval_iters * args.global_batch_size,
test_iters * args.global_batch_size]
print_rank_0(' > datasets target sizes (minimum size):')
print_rank_0(' train: {}'.format(train_val_test_num_samples[0]))
print_rank_0(' validation: {}'.format(train_val_test_num_samples[1]))
print_rank_0(' test: {}'.format(train_val_test_num_samples[2]))
# Build the datasets.
train_ds, valid_ds, test_ds = build_train_valid_test_datasets_provider(
train_val_test_num_samples)
# Build dataloders.
train_dataloader = build_pretraining_data_loader(
train_ds, args.consumed_train_samples)
valid_dataloader = build_pretraining_data_loader(
valid_ds, args.consumed_valid_samples)
test_dataloader = build_pretraining_data_loader(test_ds, 0)
# Flags to know if we need to do training/validation/testing.
do_train = train_dataloader is not None and args.train_iters > 0
do_valid = valid_dataloader is not None and args.eval_iters > 0
do_test = test_dataloader is not None and args.eval_iters > 0
# Need to broadcast num_tokens and num_type_tokens.
flags = torch.cuda.LongTensor(
[int(do_train), int(do_valid), int(do_test)])
else:
flags = torch.cuda.LongTensor([0, 0, 0])
# Broadcast num tokens.
torch.distributed.broadcast(flags,
mpu.get_tensor_model_parallel_src_rank(),
group=mpu.get_tensor_model_parallel_group())
args.do_train = flags[0].item()
args.do_valid = flags[1].item()
args.do_test = flags[2].item()
# Build iterators.
dl_type = args.dataloader_type
assert dl_type in ['single', 'cyclic']
if train_dataloader is not None:
train_data_iterator = iter(train_dataloader) if dl_type == 'single' \
else iter(cyclic_iter(train_dataloader))
else:
train_data_iterator = None
if valid_dataloader is not None:
valid_data_iterator = iter(valid_dataloader) if dl_type == 'single' \
else iter(cyclic_iter(valid_dataloader))
else:
valid_data_iterator = None
if test_dataloader is not None:
test_data_iterator = iter(test_dataloader) if dl_type == 'single' \
else iter(cyclic_iter(test_dataloader))
else:
test_data_iterator = None
return train_data_iterator, valid_data_iterator, test_data_iterator | 5,334,660 |
def get_lib_ver(library_path=""):
"""Returns the version of the Minipresto library.
### Parameters
- `library_path`: The Minipresto library directory."""
version_file = os.path.join(library_path, "version")
try:
with open(version_file, "r") as f:
for line in f:
line = line.strip()
if line:
return line
return "NOT FOUND"
except:
return "NOT FOUND" | 5,334,661 |
def test_validate_email():
"""Confirm that "only" valid emails are accepted."""
validator = validate.VALIDATION_MAPPER["email"]
assert validator("")
assert validator("test@example.com")
assert validator("test.name@sub.example.com")
with pytest.raises(ValueError):
validator("test@localhost")
with pytest.raises(ValueError):
validator("test@localhost@localhost.com")
with pytest.raises(ValueError):
validator(5)
with pytest.raises(ValueError):
validator("asd")
with pytest.raises(ValueError):
validator([1, 2, 3, 4])
with pytest.raises(ValueError):
validator(4.5) | 5,334,662 |
def width():
"""Get console width."""
x, y = get()
return x | 5,334,663 |
def load_yaml_config(path):
"""returns the config parsed based on the info in the flags.
Grabs the config file, written in yaml, slurps it in.
"""
with open(path) as f:
config = yaml.load(f, Loader=yaml.FullLoader)
return config | 5,334,664 |
def convert_year(years, debug=False):
"""Example usage: db['date'] = cln.convert_year(db['date']) """
for i, yr in years.iteritems():
if debug:
print(yr)
print(type(yr))
if yr is None:
years.set_value(i, np.nan)
continue
if is_int(yr):
continue
if isinstance(yr, float):
if np.isnan(yr):
continue
yr = q_html(yr)
yr = q_all(yr)
yr = dedashslash(yr)
if is_int(yr):
years.set_value(i, int(yr))
else:
years.set_value(i, np.nan)
return years | 5,334,665 |
def home(request):
"""View function for home page of site."""
return laboratorio_list(request) | 5,334,666 |
def scan_armatures(context):
"""
scans the selected objects or the scene for a source (regular)
armature and a destination (Make Human) armature
"""
src = (
scan_for_armature(context.selected_objects)
or scan_for_armature(context.scene.objects)
)
dst = (
scan_for_armature(context.selected_objects, look_for_mhx=True)
or scan_for_armature(context.scene.objects, look_for_mhx=True)
)
if not src or not dst:
raise LookupError("Couldn't find source or target armatures")
return src, dst | 5,334,667 |
def populate_instance(msg, inst):
"""
:param msg: contains the values to use to populate inst.
:param inst: message class instance to populate.
:return: an instance of the provided message class, with its fields populated according to the values in msg
"""
return _to_inst(msg, type(inst).__name__, type(inst).__name__, inst) | 5,334,668 |
def setup_args():
"""Setup and return the command line argument parser"""
parser = argparse.ArgumentParser(description='')
# parser.add_argument('csv', type=str, help='CSV file to load')
parser.add_argument(
'-clang-tidy-binary', help='Path to the clang-tidy executable.', metavar='PATH', required=True)
parser.add_argument('-clang-apply-replacements-binary',
help='Path to the clang-apply-replacements binary. Required when using -fix and -runner-py' +
' arguments.')
parser.add_argument(
'-runner-py', help='Python script wrapping clang-tidy with support for multiple jobs. run-clang-tidy.py ships' +
' with clang-tidy. Without this clang-tidy is run directly.', metavar='PATH')
parser.add_argument('-fix', action='store_true',
help='Apply automatic fixes. Passes -fix to clang-tidy. When using -runner-py' +
' (run-clang-tidy.py), the argument -clang-apply-replacements-binary must also be set to the' +
' clang-apply-fixes binary.')
parser.add_argument(
'-config-file', help='clang-tidy configuration file. Extracted and passed as the -config argument to' +
' clang-tidy.')
parser.add_argument(
'-p', help='clang-tidy build path (path to compile_commands.json). Extracted and passed as the -p argument to' +
' clang-tidy.', required=False)
parser.add_argument(
'-j', help='Number of parallel jobs to run. Only supported when using the -runner-py script. Ignored ' +
'otherwise.', required=False)
parser.add_argument(
'-relative-to', help='Modify clang-tidy message paths to be relative to this directory. Intended for CI' +
' builds to report portable paths.', required=False)
return parser | 5,334,669 |
def create_event(type_, source):
"""Create Event"""
cls = _events.get(type_, UnknownEvent)
try:
return cls(type=type_, **source)
except TypeError as e:
raise TypeError(f'Error at creating {cls.__name__}: {e}') | 5,334,670 |
def _execute(script, prefix=None, path=None):
"""
Execute a shell script.
Setting prefix will add the environment variable
COLCON_BUNDLE_INSTALL_PFREFIX equal to the passed in value
:param str script: script to execute
:param str prefix: the installation prefix
:param str path: (optional) path to temp directory, or ``None`` to use
default temp directory, ``str``
"""
path = tempfile.gettempdir() if path is None else path
result = 1
try:
fh = tempfile.NamedTemporaryFile('w', delete=False)
fh.write(script)
fh.close()
print('Executing script below with cwd=%s\n{{{\n%s\n}}}\n' %
(path, script))
try:
os.chmod(fh.name, stat.S_IRWXU)
env = os.environ.copy()
if prefix is not None:
env['COLCON_BUNDLE_INSTALL_PREFIX'] = prefix
result = subprocess.run(
fh.name, cwd=path, env=env, stdout=PIPE, stderr=PIPE,
universal_newlines=True)
if result.stdout is not None:
logger.debug('stdout output: \n' + result.stdout)
if result.stderr is not None:
logger.warn('stderr output: \n' + result.stderr)
except OSError as ex:
print('Execution failed with OSError: %s' % ex)
finally:
if os.path.exists(fh.name):
os.remove(fh.name)
logger.info('Return code was: %s' % result)
return result.returncode == 0 | 5,334,671 |
def main(event, context):
"""
Gets layer arns for each region and publish to S3
"""
dynamodb = boto3.resource("dynamodb")
table = dynamodb.Table(os.environ["DB_NAME"])
region = event.get("pathParameters").get("region")
python_version = event.get("pathParameters").get("python_version", "p3.8")
format = event.get("pathParameters").get("format", "json")
api_response = query_table(
table=table, region=region, python_version=python_version
)
body, headers = return_format(
data=api_response, format=format, region=region, python_version=python_version
)
return {
"statusCode": 200,
"headers": headers,
"body": body,
} | 5,334,672 |
def rescore_and_rerank_by_num_inliers(test_image_id,
train_ids_labels_and_scores):
"""Returns rescored and sorted training images by local feature extraction."""
test_image_path = get_image_path(test_image_id)
try:
name = os.path.basename(test_image_path).split('.')[0]
with open(f'{TEST_LF}/{name}.pkl', 'rb') as fp:
test_keypoints, test_descriptors = pickle.load(fp)
except FileNotFoundError:
test_keypoints, test_descriptors = extract_local_features(test_image_path)
for i in range(len(train_ids_labels_and_scores)):
train_image_id, label, global_score = train_ids_labels_and_scores[i]
train_image_path = get_image_path(train_image_id)
name = os.path.basename(train_image_path).split('.')[0]
with open(os.path.join(TRAIN_LF, f"{name}.pkl"), 'rb') as fp:
train_keypoints, train_descriptors = pickle.load(fp)
num_inliers = get_num_inliers(test_keypoints, test_descriptors,
train_keypoints, train_descriptors)
total_score = get_total_score(num_inliers, global_score)
train_ids_labels_and_scores[i] = (train_image_id, label, total_score)
train_ids_labels_and_scores.sort(key=lambda x: x[2], reverse=True)
return train_ids_labels_and_scores | 5,334,673 |
def aggregate_policy(
policies: Iterable[PermissionPolicy_T],
aggregator: Callable[[Iterable[object]], bool] = all
) -> PermissionPolicy_T:
"""
在默认参数下,将多个权限检查策略函数使用 AND 操作符连接并返回单个权限检查策略。在实现中对这几个策略使用内置 `all` 函数,会优先执行同步函数而且尽可能在同步模式的情况下短路。
在新的策略下,只有事件满足了 `policies` 中所有的原策略,才会返回 `True`。
`aggregator` 参数也可以设置为其他函数,例如 `any`: 在此情况下会使用 `OR` 操作符连接。
如果参数中所有的策略都是同步的,则返回值是同步的,否则返回值是异步函数。
版本: 1.9.0+
参数:
policies: 要合并的权限检查策略
aggregator: 用于合并策略的函数
返回:
PermissionPolicy_T: 新的权限检查策略
用法:
```python
# 以下两种方式在效果上等同
policy1 = lambda sender: sender.is_groupchat and sender.from_group(123456789)
policy2 = aggregate_policy(lambda sender: sender.is_groupchat,
lambda sender: sender.from_group(123456789))
```
"""
syncs: List[Callable[[SenderRoles], bool]]
asyncs: List[Callable[[SenderRoles], Awaitable[bool]]]
syncs, asyncs = separate_async_funcs(policies)
def checker_sync(sender: SenderRoles) -> bool:
return aggregator(f(sender) for f in syncs)
if len(asyncs) == 0:
return checker_sync
async def checker_async(sender: SenderRoles) -> bool:
if not checker_sync(sender):
return False
# no short circuiting currently :-(
coros = [f(sender) for f in asyncs]
return aggregator(await asyncio.gather(*coros))
return checker_async | 5,334,674 |
def optionally_load_system_paasta_config(
path: str = PATH_TO_SYSTEM_PAASTA_CONFIG_DIR,
) -> "SystemPaastaConfig":
"""
Tries to load the system paasta config, but will return an empty configuration if not available,
without raising.
"""
try:
return load_system_paasta_config(path=path)
except PaastaNotConfiguredError:
return SystemPaastaConfig({}, "") | 5,334,675 |
def get_sage_bank_accounts(company_id: int) -> list:
"""
Retrieves the bank accounts for a company in Sage One
**company_id** The Company ID
"""
config = get_config() # Get the config
sage_client = SageOneAPIClient(config.get("sageone", "url"), config.get("sageone", "api_key"), config.get("sageone", "username"), config.get("sageone", "password"))
return sage_client.get_company_bank_accounts(company_id) | 5,334,676 |
def compute_fstar(tarr, mstar, index_select, index_high, fstar_tdelay):
"""Time averaged SFH that has ocurred over some previous time period
fstar = (mstar(t) - mstar(t-fstar_tdelay)) / fstar_tdelay
Parameters
----------
tarr : ndarray of shape (n_times, )
Cosmic time of each simulated snapshot in Gyr
mstar : ndarray of shape (n_times, )
Stellar mass history in Msun units.
index_select: ndarray of shape (n_times_fstar, )
Snapshot indices used in fstar computation.
index_high: ndarray of shape (n_times_fstar, )
Indices of np.searchsorted(t, t - fstar_tdelay)[index_select]
fstar_tdelay: float
Time interval in Gyr units for fstar definition.
fstar = (mstar(t) - mstar(t-fstar_tdelay)) / fstar_tdelay
Returns
-------
fstar : ndarray of shape (n_times)
SFH averaged over timescale fstar_tdelay in units of Msun/yr assuming h=1.
"""
mstar_high = mstar[index_select]
mstar_low = jax_np_interp(
tarr[index_select] - fstar_tdelay, tarr, mstar, index_high
)
fstar = (mstar_high - mstar_low) / fstar_tdelay / 1e9
return fstar | 5,334,677 |
def workaround():
"""contextually invoke this when loading *.src.py"""
__metadata_to_wrap.__globals__["metadata"] = metadata
try:
yield
finally:
__metadata_to_wrap.__globals__["metadata"] = __metadata_to_wrap | 5,334,678 |
def notification_error(code: str, search_id: str, status_code, message: str = None):
"""Return to the event listener a notification error response based on the status code."""
error = CALLBACK_MESSAGES[code].format(search_id=search_id)
if message:
error += ' ' + message
current_app.logger.error(error)
# Track event here.
EventTracking.create(search_id, EventTracking.EventTrackingTypes.API_NOTIFICATION, status_code, message)
if status_code != HTTPStatus.BAD_REQUEST and code not in (resource_utils.CallbackExceptionCodes.MAX_RETRIES,
resource_utils.CallbackExceptionCodes.UNKNOWN_ID):
# set up retry
enqueue_notification(search_id)
return resource_utils.error_response(status_code, error) | 5,334,679 |
async def send_dumplings_from_queue_to_hub(
kitchen_name: str,
hub: str,
dumpling_queue: multiprocessing.Queue,
kitchen_info: dict,
log: logging.Logger,
):
"""
Grabs dumplings from the dumpling queue and sends them to ``nd-hub``.
:param kitchen_name: The name of the kitchen.
:param hub: The address where ``nd-hub`` is receiving dumplings.
:param dumpling_queue: Queue to grab dumplings from.
:param kitchen_info: Dict describing the kitchen.
:param log: Logger.
"""
hub_ws = 'ws://{0}'.format(hub)
log.info("{0}: Connecting to the dumpling hub at {1}".format(
kitchen_name, hub_ws)
)
try:
websocket = await websockets.client.connect(hub_ws)
except OSError as e:
log.error(
"{0}: There was a problem with the dumpling hub connection. "
"Is nd-hub available?".format(kitchen_name))
log.error("{0}: {1}".format(kitchen_name, e))
return
try:
# Register our kitchen information with the dumpling hub.
await websocket.send(json.dumps(kitchen_info))
# Send dumplings to the hub when they come in from the chefs.
while True:
# This is a bit hacky. We have a multiprocessing queue to get from,
# but we're running in a coroutine. The get() blocks, which I think
# is inferfering with websockets' ability to manage its heartbeat
# with the hub. This only seems to affect Windows. The workaround
# implemented here is to put a 1-second timeout on the queue get,
# ignore empty gets, and await asyncio.sleep() which appears to
# allow the run loop to continue (presumably allowing the keepalives
# to work).
try:
dumpling = dumpling_queue.get(timeout=1)
await websocket.send(dumpling)
except queue.Empty:
pass
await asyncio.sleep(0)
except asyncio.CancelledError:
log.warning(
"{0}: Connection to dumpling hub cancelled; closing...".format(
kitchen_name))
try:
await websocket.close(*ND_CLOSE_MSGS['conn_cancelled'])
except websockets.exceptions.InvalidState:
pass
except websockets.exceptions.ConnectionClosed as e:
log.warning("{0}: Lost connection to dumpling hub: {1}".format(
kitchen_name, e))
except OSError as e:
log.exception(
"{0}: Error talking to dumpling hub: {1}".format(kitchen_name, e)
) | 5,334,680 |
def get_avg(feature_name, default_value):
"""Get the average of numeric feature from the environment.
Return the default value if there is no the statistics in
the environment.
Args:
feature_name: String, feature name or column name in a table
default_value: Float.
Return:
Float
"""
env_name = AnalysisEnvTemplate.AVG_ENV.format(feature_name)
mean = os.getenv(env_name, None)
if mean is None:
return default_value
else:
return float(mean) | 5,334,681 |
def arccos(x: REAL) -> float:
"""Arc cosine."""
return pi/2 - arcsin(x) | 5,334,682 |
def isLinkValid(test_video_link):
"""def isLinkValid(test_video_link): -> test_video_link
check if youtube video link is valid."""
try:
import requests
data = requests.get("https://www.youtube.com/oembed?format=json&url=" + test_video_link).json()
if data == "Not Found":
return False
else:
return True
except:
return False | 5,334,683 |
def _strptime(data_string, format='%a %b %d %H:%M:%S %Y'):
"""Return a 2-tuple consisting of a time struct and an int containing
the number of microseconds based on the input string and the
format string."""
for index, arg in enumerate([data_string, format]):
if not isinstance(arg, str):
msg = 'strptime() argument {} must be str, not {}'
raise TypeError(msg.format(index, type(arg)))
global _TimeRE_cache, _regex_cache
with _cache_lock:
locale_time = _TimeRE_cache.locale_time
if (_getlang() != locale_time.lang or time.tzname != locale_time.
tzname or time.daylight != locale_time.daylight):
_TimeRE_cache = TimeRE()
_regex_cache.clear()
locale_time = _TimeRE_cache.locale_time
if len(_regex_cache) > _CACHE_MAX_SIZE:
_regex_cache.clear()
format_regex = _regex_cache.get(format)
if not format_regex:
try:
format_regex = _TimeRE_cache.compile(format)
except KeyError as err:
bad_directive = err.args[0]
if bad_directive == '\\':
bad_directive = '%'
del err
raise ValueError("'%s' is a bad directive in format '%s'" %
(bad_directive, format)) from None
except IndexError:
raise ValueError("stray %% in format '%s'" % format) from None
_regex_cache[format] = format_regex
found = format_regex.match(data_string)
if not found:
raise ValueError('time data %r does not match format %r' % (
data_string, format))
if len(data_string) != found.end():
raise ValueError('unconverted data remains: %s' % data_string[found
.end():])
iso_year = year = None
month = day = 1
hour = minute = second = fraction = 0
tz = -1
tzoffset = None
iso_week = week_of_year = None
week_of_year_start = None
weekday = julian = None
found_dict = found.groupdict()
for group_key in found_dict.keys():
if group_key == 'y':
year = int(found_dict['y'])
if year <= 68:
year += 2000
else:
year += 1900
elif group_key == 'Y':
year = int(found_dict['Y'])
elif group_key == 'G':
iso_year = int(found_dict['G'])
elif group_key == 'm':
month = int(found_dict['m'])
elif group_key == 'B':
month = locale_time.f_month.index(found_dict['B'].lower())
elif group_key == 'b':
month = locale_time.a_month.index(found_dict['b'].lower())
elif group_key == 'd':
day = int(found_dict['d'])
elif group_key == 'H':
hour = int(found_dict['H'])
elif group_key == 'I':
hour = int(found_dict['I'])
ampm = found_dict.get('p', '').lower()
if ampm in ('', locale_time.am_pm[0]):
if hour == 12:
hour = 0
elif ampm == locale_time.am_pm[1]:
if hour != 12:
hour += 12
elif group_key == 'M':
minute = int(found_dict['M'])
elif group_key == 'S':
second = int(found_dict['S'])
elif group_key == 'f':
s = found_dict['f']
s += '0' * (6 - len(s))
fraction = int(s)
elif group_key == 'A':
weekday = locale_time.f_weekday.index(found_dict['A'].lower())
elif group_key == 'a':
weekday = locale_time.a_weekday.index(found_dict['a'].lower())
elif group_key == 'w':
weekday = int(found_dict['w'])
if weekday == 0:
weekday = 6
else:
weekday -= 1
elif group_key == 'u':
weekday = int(found_dict['u'])
weekday -= 1
elif group_key == 'j':
julian = int(found_dict['j'])
elif group_key in ('U', 'W'):
week_of_year = int(found_dict[group_key])
if group_key == 'U':
week_of_year_start = 6
else:
week_of_year_start = 0
elif group_key == 'V':
iso_week = int(found_dict['V'])
elif group_key == 'z':
z = found_dict['z']
tzoffset = int(z[1:3]) * 60 + int(z[3:5])
if z.startswith('-'):
tzoffset = -tzoffset
elif group_key == 'Z':
found_zone = found_dict['Z'].lower()
for value, tz_values in enumerate(locale_time.timezone):
if found_zone in tz_values:
if time.tzname[0] == time.tzname[1
] and time.daylight and found_zone not in ('utc', 'gmt'
):
break
else:
tz = value
break
if year is None and iso_year is not None:
if iso_week is None or weekday is None:
raise ValueError(
"ISO year directive '%G' must be used with the ISO week directive '%V' and a weekday directive ('%A', '%a', '%w', or '%u')."
)
if julian is not None:
raise ValueError(
"Day of the year directive '%j' is not compatible with ISO year directive '%G'. Use '%Y' instead."
)
elif week_of_year is None and iso_week is not None:
if weekday is None:
raise ValueError(
"ISO week directive '%V' must be used with the ISO year directive '%G' and a weekday directive ('%A', '%a', '%w', or '%u')."
)
else:
raise ValueError(
"ISO week directive '%V' is incompatible with the year directive '%Y'. Use the ISO year '%G' instead."
)
leap_year_fix = False
if year is None and month == 2 and day == 29:
year = 1904
leap_year_fix = True
elif year is None:
year = 1900
if julian is None and weekday is not None:
if week_of_year is not None:
week_starts_Mon = True if week_of_year_start == 0 else False
julian = _calc_julian_from_U_or_W(year, week_of_year, weekday,
week_starts_Mon)
elif iso_year is not None and iso_week is not None:
year, julian = _calc_julian_from_V(iso_year, iso_week, weekday + 1)
if julian is not None and julian <= 0:
year -= 1
yday = 366 if calendar.isleap(year) else 365
julian += yday
if julian is None:
julian = datetime_date(year, month, day).toordinal() - datetime_date(
year, 1, 1).toordinal() + 1
else:
datetime_result = datetime_date.fromordinal(julian - 1 +
datetime_date(year, 1, 1).toordinal())
year = datetime_result.year
month = datetime_result.month
day = datetime_result.day
if weekday is None:
weekday = datetime_date(year, month, day).weekday()
tzname = found_dict.get('Z')
if tzoffset is not None:
gmtoff = tzoffset * 60
else:
gmtoff = None
if leap_year_fix:
year = 1900
return (year, month, day, hour, minute, second, weekday, julian, tz,
tzname, gmtoff), fraction | 5,334,684 |
def to_simple_rdd(sc, features, labels):
"""Convert numpy arrays of features and labels into
an RDD of pairs.
:param sc: Spark context
:param features: numpy array with features
:param labels: numpy array with labels
:return: Spark RDD with feature-label pairs
"""
pairs = [(x, y) for x, y in zip(features, labels)]
if custom_hash == True:
rdd = sc.parallelize(pairs).map(lambda pair: (data_partitioner(pair[1]), pair)).partitionBy(NUM_PARTITION,
label_hash)
rdd = rdd.map(lambda composite_pair: composite_pair[1]).cache()
else:
rdd = sc.parallelize(pairs, NUM_PARTITION).cache()
return rdd | 5,334,685 |
def delete():
"""Delete located record, if there is one."""
if g[REC]:
records.remove(g[REC])
g[REC] = None | 5,334,686 |
def add_decimal(op1: Decimal, op2: Decimal)-> Decimal:
"""
add
:param op1:
:param op2:
:return:
"""
result = op1 + op2
if result > 999:
return float(result)
return result | 5,334,687 |
def get_data_file_path(project, filename):
"""
Gets the path of data files we've stored for each project
:param project:
:return:
"""
return os.path.join(BASE_DIR, "waterspout_api", "data", project, filename) | 5,334,688 |
def test_PoliteBufferedConsumer(flush: mock.MagicMock) -> None:
"""Test that PoliteBufferedConsumer logs errors and continues."""
structlog.configure(
processors=[structlog.processors.KeyValueRenderer(sort_keys=True)],
logger_factory=structlog.stdlib.LoggerFactory(),
)
consumer = PoliteBufferedConsumer(use_structlog=True)
consumer.send(endpoint="events", json_message='{"foo":"Foo"}')
consumer.send(endpoint="events", json_message='{"bar":"Bar"}')
assert consumer._buffers == { # noqa: SF01
"events": ['{"foo":"Foo"}', '{"bar":"Bar"}'],
"people": [],
"groups": [],
"imports": [],
}
consumer.flush()
flush.assert_called_with()
with LogCapture() as logs:
flush.side_effect = URLError("foo")
consumer.flush()
logs.check(
(
"pyramid_mixpanel.consumer",
"ERROR",
"event='It seems like Mixpanel is down.' exc_info=True",
)
)
consumer = PoliteBufferedConsumer()
with LogCapture() as logs:
flush.side_effect = URLError("foo")
consumer.flush()
logs.check(
("pyramid_mixpanel.consumer", "ERROR", "It seems like Mixpanel is down.")
) | 5,334,689 |
def freenas_spec(**kwargs):
"""FreeNAS specs."""
# Setup vars from kwargs
builder_spec = kwargs['data']['builder_spec']
bootstrap_cfg = None
builder_spec.update(
{
'boot_command': [
'<enter>',
'<wait30>1<enter>',
'y',
'<wait5><spacebar>o<enter>',
'<enter>',
'{{ user `password` }}<tab>{{ user `password` }}<tab><enter>',
'<enter>',
'<wait60><wait60><wait60>',
'<enter>',
'3<enter>',
'<wait60><wait60><wait60><wait60><wait60>',
'9<enter>',
'curl -X PUT -u {{ user `username` }}:{{ user `password` }} -H \'Content-Type: application/json\' -d \'{\"ssh_rootlogin\": true}\' http://localhost/api/v1.0/services/ssh/<enter>', # noqa: E501
'curl -X PUT -u {{ user `username` }}:{{ user `password` }} -H \'Content-Type: application/json\' -d \'{\"srv_enable\": true}\' http://localhost/api/v1.0/services/services/ssh/<enter>' # noqa: E501
],
'boot_wait': '30s',
'shutdown_command': 'shutdown -p now',
}
)
return bootstrap_cfg, builder_spec | 5,334,690 |
def test_id_g017_id_g017_v(mode, save_output, output_format):
"""
TEST :Identity-constraint Definition Schema Component : key category,
selector points to element outside of targetNamespace in a non-
imported schema
"""
assert_bindings(
schema="msData/identityConstraint/idG017.xsd",
instance="msData/identityConstraint/idG017.xml",
class_name="Root",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
) | 5,334,691 |
def checksum(routine):
"""
Compute the M routine checksum used by ``CHECK1^XTSUMBLD``,
implemented in ``^%ZOSF("RSUM1")`` and ``SUMB^XPDRSUM``.
"""
checksum = 0
lineNumber = 0
with open(routine, 'r') as f:
for line in f:
line = line.rstrip('\r\n')
lineNumber += 1
# ignore the second line
if lineNumber == 2:
continue
checksum += routineLineCheckSum(line, lineNumber)
return checksum | 5,334,692 |
def nusdas_parameter_change(param, value):
"""
def nusdas_parameter_change()
"""
# Set argtypes and restype
nusdas_parameter_change_ct = libnus.NuSDaS_parameter_change
nusdas_parameter_change_ct.restype = c_int32
nusdas_parameter_change_ct.argtypes = (c_int32,POINTER(c_int32))
icond = nusdas_parameter_change_ct(c_int32(param), byref(c_int32(value)))
if (icond !=0):
raise Exception("nusdas_parameter_change Error: Unsupported parameter" + str(icond))
return icond | 5,334,693 |
def validate_retention_time(retention_time):
# type: (str) -> str
"""Validate retention_time. If -1, return string, else convert to ms.
Keyword arguments:
retention_time -- user configured retention-ms, pattern: %d%h%m%s%ms
Return:
retention_time -- If set to "-1", return it
"""
if retention_time == "-1": # sets retention-time to unlimited
return retention_time
return convert_time_ms(retention_time, "retention_time") | 5,334,694 |
def is_sale(this_line):
"""Determine whether a given line describes a sale of cattle."""
is_not_succinct = len(this_line.split()) > 3
has_price = re.search(r'[0-9]+\.[0-9]{2}', this_line)
return bool(has_price and is_not_succinct) | 5,334,695 |
def map_copy(source: tcod.map.Map, dest: tcod.map.Map) -> None:
"""Copy map data from `source` to `dest`.
.. deprecated:: 4.5
Use Python's copy module, or see :any:`tcod.map.Map` and assign between
array attributes manually.
"""
if source.width != dest.width or source.height != dest.height:
dest.__init__( # type: ignore
source.width, source.height, source._order
)
dest._Map__buffer[:] = source._Map__buffer[:] | 5,334,696 |
def transformRoot():
"""Bind trackball to the root object.
The mouse transforms the whole scene move"""
vi = self.GUI.VIEWER
vi.TransformRootOnly(yesno=1)
vi.SetCurrentObject(vi.rootObject) | 5,334,697 |
def extract_policy(env, v, gamma = 1.0):
""" Extract the policy given a value-function """
policy = np.zeros(env.env.nS)
for s in range(env.env.nS):
q_sa = np.zeros(env.env.nA)
for a in range(env.env.nA):
q_sa[a] = sum([p * (r + gamma * v[s_]) for p, s_, r, _ in env.env.P[s][a]])
policy[s] = np.argmax(q_sa)
return policy | 5,334,698 |
def generate_parameters(var):
"""
Defines a distribution of parameters
Returns a settings dictionary
var is an iterable of variables in the range [0,1) which
we can make use of.
"""
var = iter(var)
model={}
training={}
settings = {'model':model, 'training':training}
# max_radius is exponential from 3 to 8
model['max_radius'] = 1.5 * 2 ** (1 + 1.4 * next(var))
# number of radial basis funcs is exponential from 8 to 31
model['number_of_basis'] = int( 2 ** (3 + 2 * next(var)) )
# radial_layers is from 1 to 15
model['radial_layers'] = int(2 ** (4 * next(var)) )
# radial_h from 10 to 79
model['radial_h'] = int( 5 * 2 ** (1 + 3 * next(var)) )
# numlayers from exp from 2 to 12
numlayers = int( 2 ** (1 + 2.584963 * next(var)))
# lmax is a polynomial on [0,1), of x = layer/numlayers
# lmax = l0 + l1 x + l2 x^2
# where l0 is whatever gives this min of lmin on [0,1)
l2 = 6 * next(var) - 3 # l2 in [-3, 3]
l1 = 6 * next(var) - 3 # l1 in [-3, 3]
lmin = int(6 * (next(var) ** 2)) # lmin integer in [0,5] inclusive
ns = [l / numlayers for l in range(numlayers)]
lmaxes = [min(lmax_max, int(round(l1 * n + l2 * n**2))) for n in ns]
bump = -min(lmaxes)
lmaxes = [l + bump + lmin for l in lmaxes]
model['lmaxes'] = lmaxes
global mul_coeff
print(f"Using mul_coeff = {mul_coeff}.")
# multiplicities are a fn of both n = layer/numlayers and x = 10/(2l+1)
# m = m0 + m01 x + m10 n + m11 xn
# where m0 is whatever gives this min of mmin
m01 = mul_coeff * (40 * next(var) - 10) # m01 in [-10, 30]
m11 = mul_coeff * (40 * next(var) - 10) # m11 in [-10, 30]
m10 = mul_coeff * (80 * next(var) - 40) # m10 in [-40, 40]
#mmin = int(16 * (next(var) ** 2)) # mmin integer in [1,16] incl.
mmin = int(mul_coeff * 2 ** (next(var) * 6)) + 1 # mmin integer in [2,64] incl.
xs = [[10 / (2*l + 1) for l in range(lmaxes[n]+1)] for n in range(numlayers)]
muls = [[int(m01 * x + m10 * n + m11 * x * n) for x in xl] for n,xl in zip(ns,xs)]
bump = -min([min(lmul) for lmul in muls])
muls = [[m + bump + mmin for m in lmul] for lmul in muls]
model['muls'] = muls
return settings | 5,334,699 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.