content stringlengths 22 815k | id int64 0 4.91M |
|---|---|
def validate(model, model_name: str, dataloader_valid, class_weights, epoch: int,
validations_dir: str, save_oof=True):
"""
Validate model at the epoch end
Input:
model: current model
dataloader_valid: dataloader for the validation fold
device: CUDA or CPU
epoch: current epoch
save_oof: boolean flag, if calculate oof predictions and save them in pickle
save_oof_numpy: boolean flag, if save oof predictions in numpy
predictions_dir: directory fro saving predictions
Output:
loss_valid: total validation loss, history
"""
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
print(device)
with torch.no_grad():
model.eval()
val_cls_losses, val_seg_losses = [], []
progress_bar = tqdm(dataloader_valid, total=len(dataloader_valid))
for iter_num, (img, target, sample_ids) in enumerate(progress_bar):
img = img.to(device) # [N, 3, H, W]
target = target.to(device) # [N, H, W] with class indices (0, 1)
(mask_pred, cls_pred) = model(img) # [N, 2, H, W] | prediction
loss_seg = F.cross_entropy(mask_pred, target, weight=class_weights)
loss_cls = F.cross_entropy(cls_pred, labels, weight=class_weights)
val_seg_losses.append(loss_seg.detach(),cpu(),numpy())
val_cls_losses.append(loss_cls.detach().cpu().numpy())
# Visualize the first prediction
if iter_num == 0:
visualize_predictions(img, mask_pred, target, validations_dir, model_name, epoch)
print(f"Epoch {epoch}, Valid Classification Loss: {np.mean(val_cls_losses)},
Train Segmentation Loss: {np.mean(val_seg_losses)}")
return np.mean(val_losses) | 5,326,600 |
def shared_empty(dim=2, dtype=None):
"""
Shortcut to create an empty Theano shared variable with
the specified number of dimensions.
"""
if dtype is None:
dtype = theano.config.floatX
shp = tuple([1] * dim)
return theano.shared(np.zeros(shp, dtype=dtype)) | 5,326,601 |
def _cleanup_session_var(request, var):
"""Cleanup Session var helper
@param request: is a Django request object
@param var: is a variable to cleanup session for"""
try:
request.session[var] = None
del request.session[var]
except KeyError:
pass | 5,326,602 |
def add_kwds(dictionary, key, value):
"""
A simple helper function to initialize our dictionary if it is None and then add in a single keyword if
the value is not None.
It doesn't add any keywords at all if passed value==None.
Parameters
----------
dictionary: dict (or None)
A dictionary to copy and update. If none it will instantiate a new dictionary.
key: str
A the key to add to the dictionary
value: object (or None)
A value to add to the dictionary. If None then no key, value pair will be added to the dictionary.
Returns
-------
dictionary
A copy of dictionary with (key,value) added into it or a new dictionary with (key,value) in it.
"""
if dictionary is None:
kwds = {}
else:
kwds = dictionary.copy()
if (value is not None) and (key is not None):
kwds.update({key: value})
return kwds | 5,326,603 |
def parse_private_key(data, password):
"""
Identifies, decrypts and returns a cryptography private key object.
"""
# PEM
if is_pem(data):
if b"ENCRYPTED" in data:
if password is None:
raise TypeError("No password provided for encrypted key.")
try:
return serialization.load_pem_private_key(
data, password, backend=default_backend()
)
except ValueError:
# Cryptography raises ValueError if decryption fails.
raise
except Exception as e:
logger.debug("Failed to parse PEM private key ", exc_info=e)
# PKCS12
if is_pkcs12(data):
try:
p12 = crypto.load_pkcs12(data, password)
data = crypto.dump_privatekey(crypto.FILETYPE_PEM, p12.get_privatekey())
return serialization.load_pem_private_key(
data, password=None, backend=default_backend()
)
except crypto.Error as e:
raise ValueError(e)
# DER
try:
return serialization.load_der_private_key(
data, password, backend=default_backend()
)
except Exception as e:
logger.debug("Failed to parse private key as DER", exc_info=e)
# All parsing failed
raise ValueError("Could not parse private key.") | 5,326,604 |
def _get_cindex(circ, name, index):
"""
Find the classical bit index.
Args:
circ: The Qiskit QuantumCircuit in question
name: The name of the classical register
index: The qubit's relative index inside the register
Returns:
The classical bit's absolute index if all registers are concatenated.
"""
ret = 0
for reg in circ.cregs:
if name != reg.name:
ret += reg.size
else:
return ret + index
return ret + index | 5,326,605 |
def test_store_public_key_for_other_economy_is_not_enabled():
"""
Case: send transaction request, to store certificate public key for other, when economy isn't enabled.
Expect: public key information is stored to blockchain linked to owner address. Owner hasn't paid for storing.
"""
new_public_key_payload = generate_rsa_payload(key=CERTIFICATE_PUBLIC_KEY)
serialized_new_public_key_payload = new_public_key_payload.SerializeToString()
private_key = Secp256k1PrivateKey.from_hex(OWNER_PRIVATE_KEY)
signature_by_owner = Secp256k1Context().sign(serialized_new_public_key_payload, private_key)
new_public_key_store_and_pay_payload = NewPubKeyStoreAndPayPayload(
pub_key_payload=new_public_key_payload,
owner_public_key=bytes.fromhex(OWNER_PUBLIC_KEY),
signature_by_owner=bytes.fromhex(signature_by_owner),
)
transaction_payload = TransactionPayload()
transaction_payload.method = PubKeyMethod.STORE_AND_PAY
transaction_payload.data = new_public_key_store_and_pay_payload.SerializeToString()
serialized_transaction_payload = transaction_payload.SerializeToString()
transaction_header = generate_header(
serialized_transaction_payload, INPUTS, OUTPUTS, signer_public_key=PAYER_PUBLIC_KEY,
)
serialized_header = transaction_header.SerializeToString()
transaction_request = TpProcessRequest(
header=transaction_header,
payload=serialized_transaction_payload,
signature=create_signer(private_key=PAYER_PRIVATE_KEY).sign(serialized_header),
)
payer_account = Account()
payer_account.balance = PAYER_INITIAL_BALANCE
serialized_payer_account = payer_account.SerializeToString()
owner_account = Account()
owner_account.pub_keys.append(RANDOM_ALREADY_STORED_OWNER_PUBLIC_KEY_ADDRESS)
serialized_owner_account = owner_account.SerializeToString()
zero_account = Account()
zero_account.balance = 0
serialized_zero_account = zero_account.SerializeToString()
is_economy_enabled_setting = Setting()
is_economy_enabled_setting.entries.add(key='remme.economy_enabled', value='false')
serialized_is_economy_enabled_setting = is_economy_enabled_setting.SerializeToString()
mock_context = StubContext(inputs=INPUTS, outputs=OUTPUTS, initial_state={
OWNER_ADDRESS: serialized_owner_account,
PAYER_ADDRESS: serialized_payer_account,
ZERO_ADDRESS: serialized_zero_account,
IS_NODE_ECONOMY_ENABLED_ADDRESS: serialized_is_economy_enabled_setting,
})
expected_public_key_storage = PubKeyStorage()
expected_public_key_storage.owner = OWNER_PUBLIC_KEY
expected_public_key_storage.payload.CopyFrom(new_public_key_payload)
expected_public_key_storage.is_revoked = False
expected_serialized_public_key_storage = expected_public_key_storage.SerializeToString()
expected_payer_account = Account()
expected_payer_account.balance = PAYER_INITIAL_BALANCE
serialized_expected_payer_account = expected_payer_account.SerializeToString()
expected_owner_account = Account()
expected_owner_account.pub_keys.append(RANDOM_ALREADY_STORED_OWNER_PUBLIC_KEY_ADDRESS)
expected_owner_account.pub_keys.append(ADDRESS_FROM_CERTIFICATE_PUBLIC_KEY)
serialized_expected_owner_account = expected_owner_account.SerializeToString()
expected_zero_account = Account()
expected_zero_account.balance = 0
expected_serialized_zero_account = expected_zero_account.SerializeToString()
expected_state = {
OWNER_ADDRESS: serialized_expected_owner_account,
PAYER_ADDRESS: serialized_expected_payer_account,
ADDRESS_FROM_CERTIFICATE_PUBLIC_KEY: expected_serialized_public_key_storage,
ZERO_ADDRESS: expected_serialized_zero_account,
}
PubKeyHandler().apply(transaction=transaction_request, context=mock_context)
state_as_list = mock_context.get_state(addresses=[
OWNER_ADDRESS, PAYER_ADDRESS, ADDRESS_FROM_CERTIFICATE_PUBLIC_KEY, ZERO_ADDRESS,
])
state_as_dict = {entry.address: entry.data for entry in state_as_list}
assert expected_state == state_as_dict | 5,326,606 |
def sys_firewall_disallow_incoming_postgresql():
""" Disallow postgresql (port 5432) requests to this server - Ex: (cmd)"""
sudo('ufw delete allow postgresql')
sudo('ufw disable; echo "y" | ufw enable; sudo ufw status verbose') | 5,326,607 |
def get_bin(pdf: str) -> str:
"""
Get the bins of the pdf, e.g. './00/02/Br_J_Cancer_1977_Jan_35(1)_78-86.tar.gz'
returns '00/02'.
"""
parts = pdf.split('/')
return parts[-3] + '/' + parts[-2] + '/' | 5,326,608 |
def to_symbol(text):
"""Return the corresponding string representing the websites.
:param str text: TODO
:returns: TODO
"""
text = text.upper()
if text in ("BGM", "BANGUMI"):
return "bgm"
elif text in ("MAL", "MYANIMELIST"):
return "mal"
else:
return None | 5,326,609 |
def burkert_density(r, r_s, rho_o):
"""
Burkert dark matter density profile
"""
x = r / r_s
density = rho_o / ( (x) * (1.0 + x)**2)
return density.to('g/cm**3') | 5,326,610 |
def create_engine(user, password, database, host='127.0.0.1', port=5432, **kwargs):
"""
Create the engine connect the database.
Use postgreSQL database.
"""
import psycopg2
global engine
# set psycopg2 module select unicode result
psycopg2.extensions.register_type(psycopg2.extensions.UNICODE, None)
if engine is not None:
raise DBError('Engine is already initialized.')
params = dict(user=user, password=password, database=database, host=host, port=port)
defaults = dict(client_encoding='UTF8', connection_factory=None, cursor_factory=None, async=False)
for k, v in defaults.items():
params[k] = kwargs.pop(k, v)
params.update(kwargs)
engine = _Engine(lambda: psycopg2.connect(**params))
logging.info('Initialize postgreSQL engine <%s>' % hex(id(engine))) | 5,326,611 |
def send_file(path):
"""
Route for file downloads
"""
# If the document path has a tilde, expand it
path_prefix = expanduser(DOCUMENT_DIRECTORY_PATH)
return send_from_directory(path_prefix, path) | 5,326,612 |
def locations__single(request, location_id: int):
"""
Renders the locations page, when a single location has been selected.
"""
context = {'geolocation': request.session.get('geolocation'),
'location_error': request.session.get('location_error')}
try:
location_id = validators.validate_integer(location_id)
except validators.ValidationError as e:
context['validation_error'] = True
context['invalid_data'] = e.invalid_data
request.session['no_matching_location'] = True
return redirect('/locations')
loc = location_service.get_single_location(location_id)
if loc is None:
request.session['no_matching_location'] = True
return redirect('/locations')
else:
context['current_location'] = loc
context['AQI_colors'] = location_service.get_air_pollution_colors()
return render(request, 'climate/locations.html', context) | 5,326,613 |
def test_ignoredInClass():
"""An C{__all__} definition does not suppress unused import warnings in a class scope."""
flakes('''
class foo:
import bar
__all__ = ["bar"]
''', m.UnusedImport) | 5,326,614 |
def page_not_found(e):
"""
Application wide 404 error handler
"""
return render_template('404.html',
base_template=appbuilder.base_template,
appbuilder=appbuilder), 404 | 5,326,615 |
def licols(A, tol=1e-10):
"""
Extracts a linearly independent set of columns from a given matrix A.
Solution found at https://nl.mathworks.com/matlabcentral/answers/108835-how-to-get-only-linearly-independent-rows-in-a-matrix-or-to-remove-linear-dependency-b-w-rows-in-a-m
:param A: matrix
:param tol: Rank estimation tolerance
:return: index of linearly independent columns
"""
if np.min(A.shape) == 0:
return []
elif np.min(A.shape) == 1:
return [0]
else:
Q, R, E = qr(A, mode='economic', pivoting=True)
diagr = np.abs(np.diagonal(R))
# Estimate rank
rank = np.where(diagr >= tol * diagr[0])[0][-1] + 1
# Get corresponding columns
col_ids = np.sort(E[:rank])
return col_ids | 5,326,616 |
def GetArchivePath ():
"""Return the archive path as defined by the L{PathEnvironmentVariable},
or C{None} if that variable is not defined."""
import os
return os.environ.get(PathEnvironmentVariable) | 5,326,617 |
def area_description(area,theory_expt):
""" Generate plain-language name of research area from database codes.
"""
area_name_by_area = {
"As" : "Astrophysics",
"BP" : "Biophysics",
"CM" : "Condensed matter",
"HE" : "High energy",
"NS" : "Network science",
"NUC" : "Nuclear",
"" : None
}
area_name = area_name_by_area[area]
if (area=="As" and theory_expt=="Experimental"):
qualifier = "observation"
elif (theory_expt=="Experimental"):
qualifier = "experiment"
elif (theory_expt=="Theory"):
qualifier = "theory"
else:
qualifier = ""
return "{} {}".format(area_name,qualifier) | 5,326,618 |
def parse_username_password_hostname(remote_url):
"""
Parse a command line string and return username, password, remote hostname and remote path.
:param remote_url: A command line string.
:return: A tuple, containing username, password, remote hostname and remote path.
"""
assert remote_url
assert ':' in remote_url
if '@' in remote_url:
username, hostname = remote_url.rsplit('@', 1)
else:
username, hostname = None, remote_url
hostname, remote_path = hostname.split(':', 1)
password = None
if username and ':' in username:
username, password = username.split(':', 1)
assert hostname
assert remote_path
return username, password, hostname, remote_path | 5,326,619 |
def main(api_endpoint, credentials, project_id,
device_model_id, device_id, device_config, lang, verbose,
input_audio_file, output_audio_file,
audio_sample_rate, audio_sample_width,
audio_iter_size, audio_block_size, audio_flush_size,
grpc_deadline, once, *args, **kwargs):
"""Samples for the Google Assistant API.
Examples:
Run the sample with microphone input and speaker output:
$ python -m googlesamples.assistant
Run the sample with file input and speaker output:
$ python -m googlesamples.assistant -i <input file>
Run the sample with file input and output:
$ python -m googlesamples.assistant -i <input file> -o <output file>
"""
# Setup logging.
logging.basicConfig(level=logging.DEBUG if verbose else logging.INFO)
# Load OAuth 2.0 credentials.
try:
with open(credentials, 'r') as f:
credentials = google.oauth2.credentials.Credentials(token=None,
**json.load(f))
http_request = google.auth.transport.requests.Request()
credentials.refresh(http_request)
except Exception as e:
logging.error('Error loading credentials: %s', e)
logging.error('Run google-oauthlib-tool to initialize '
'new OAuth 2.0 credentials.')
sys.exit(-1)
# Create an authorized gRPC channel.
grpc_channel = google.auth.transport.grpc.secure_authorized_channel(
credentials, http_request, api_endpoint)
logging.info('Connecting to %s', api_endpoint)
# Configure audio source and sink.
if input_audio_file:
audio_source = audio_helpers.WaveSource(
open(input_audio_file, 'rb'),
sample_rate=audio_sample_rate,
sample_width=audio_sample_width
)
else:
audio_source = audio_helpers.SoundDeviceStream(
sample_rate=audio_sample_rate,
sample_width=audio_sample_width,
block_size=audio_block_size,
flush_size=audio_flush_size,
direction=0
)
if output_audio_file:
audio_sink = audio_helpers.WaveSink(
open(output_audio_file, 'wb'),
sample_rate=audio_sample_rate,
sample_width=audio_sample_width
)
else:
audio_sink = audio_helpers.SoundDeviceStream(
sample_rate=audio_sample_rate,
sample_width=audio_sample_width,
block_size=audio_block_size,
flush_size=audio_flush_size,
direction=1
)
# Create conversation stream with the given audio source and sink.
conversation_stream = audio_helpers.ConversationStream(
source=audio_source,
sink=audio_sink,
iter_size=audio_iter_size,
sample_width=audio_sample_width,
)
if not device_id or not device_model_id:
try:
with open(device_config) as f:
device = json.load(f)
device_id = device['id']
device_model_id = device['model_id']
logging.info("Using device model %s and device id %s",
device_model_id,
device_id)
except Exception as e:
logging.warning('Device config not found: %s' % e)
logging.info('Registering device')
if not device_model_id:
logging.error('Option --device-model-id required '
'when registering a device instance.')
sys.exit(-1)
if not project_id:
logging.error('Option --project-id required '
'when registering a device instance.')
sys.exit(-1)
device_base_url = (
'https://%s/v1alpha2/projects/%s/devices' % (api_endpoint,
project_id)
)
device_id = str(uuid.uuid1())
payload = {
'id': device_id,
'model_id': device_model_id,
'client_type': 'SDK_SERVICE'
}
session = google.auth.transport.requests.AuthorizedSession(
credentials
)
r = session.post(device_base_url, data=json.dumps(payload))
if r.status_code != 200:
logging.error('Failed to register device: %s', r.text)
sys.exit(-1)
logging.info('Device registered: %s', device_id)
pathlib.Path(os.path.dirname(device_config)).mkdir(exist_ok=True)
with open(device_config, 'w') as f:
json.dump(payload, f)
device_handler = device_helpers.DeviceRequestHandler(device_id)
@device_handler.command('action.devices.commands.OnOff')
def onoff(on):
if on:
logging.info('Turning device on')
else:
logging.info('Turning device off')
with SampleAssistant(lang, device_model_id, device_id,
conversation_stream,
grpc_channel, grpc_deadline,
device_handler) as assistant:
# If file arguments are supplied:
# exit after the first turn of the conversation.
if input_audio_file or output_audio_file:
assistant.assist()
return
# If no file arguments supplied:
# keep recording voice requests using the microphone
# and playing back assistant response using the speaker.
# When the once flag is set, don't wait for a trigger. Otherwise, wait.
wait_for_user_trigger = not once
while True:
if wait_for_user_trigger:
click.pause(info='Press Enter to send a new request...')
continue_conversation = assistant.assist()
# wait for user trigger if there is no follow-up turn in
# the conversation.
wait_for_user_trigger = not continue_conversation
# If we only want one conversation, break.
if once and (not continue_conversation):
break | 5,326,620 |
def _random_inverse_gaussian_no_gradient(shape, loc, concentration, seed):
"""Sample from Inverse Gaussian distribution."""
# See https://en.wikipedia.org/wiki/Inverse_Gaussian_distribution or
# https://www.jstor.org/stable/2683801
dtype = dtype_util.common_dtype([loc, concentration], tf.float32)
concentration = tf.convert_to_tensor(concentration)
loc = tf.convert_to_tensor(loc)
chi2_seed, unif_seed = samplers.split_seed(seed, salt='inverse_gaussian')
sampled_chi2 = tf.square(samplers.normal(shape, seed=chi2_seed, dtype=dtype))
sampled_uniform = samplers.uniform(shape, seed=unif_seed, dtype=dtype)
# Wikipedia defines an intermediate x with the formula
# x = loc + loc ** 2 * y / (2 * conc)
# - loc / (2 * conc) * sqrt(4 * loc * conc * y + loc ** 2 * y ** 2)
# where y ~ N(0, 1)**2 (sampled_chi2 above) and conc is the concentration.
# Let us write
# w = loc * y / (2 * conc)
# Then we can extract the common factor in the last two terms to obtain
# x = loc + loc * w * (1 - sqrt(2 / w + 1))
# Now we see that the Wikipedia formula suffers from catastrphic
# cancellation for large w (e.g., if conc << loc).
#
# Fortunately, we can fix this by multiplying both sides
# by 1 + sqrt(2 / w + 1). We get
# x * (1 + sqrt(2 / w + 1)) =
# = loc * (1 + sqrt(2 / w + 1)) + loc * w * (1 - (2 / w + 1))
# = loc * (sqrt(2 / w + 1) - 1)
# The term sqrt(2 / w + 1) + 1 no longer presents numerical
# difficulties for large w, and sqrt(2 / w + 1) - 1 is just
# sqrt1pm1(2 / w), which we know how to compute accurately.
# This just leaves the matter of small w, where 2 / w may
# overflow. In the limit a w -> 0, x -> loc, so we just mask
# that case.
sqrt1pm1_arg = 4 * concentration / (loc * sampled_chi2) # 2 / w above
safe_sqrt1pm1_arg = tf.where(sqrt1pm1_arg < np.inf, sqrt1pm1_arg, 1.0)
denominator = 1.0 + tf.sqrt(safe_sqrt1pm1_arg + 1.0)
ratio = tfp_math.sqrt1pm1(safe_sqrt1pm1_arg) / denominator
sampled = loc * tf.where(sqrt1pm1_arg < np.inf, ratio, 1.0) # x above
return tf.where(sampled_uniform <= loc / (loc + sampled),
sampled, tf.square(loc) / sampled) | 5,326,621 |
def optParse():
"""Parses commandline options."""
parser = OptionParser(prog=__applicationName__, version="%prog " + __version__)
parser.set_usage("%prog [options] filename")
parser.add_option("--autobrief",
action="store_true", dest="autobrief",
help="use the docstring summary line as \\brief description"
)
parser.add_option("--debug",
action="store_true", dest="debug",
help="enable debug output on stderr"
)
## parse options
global options
(options, filename) = parser.parse_args()
if not filename:
print >>sys.stderr, "No filename given."
sys.exit(-1)
return filename[0] | 5,326,622 |
def create_options_example_switcher(sw):
"""Add example actions."""
sw.clear()
sw.set_placeholder_text('Select Action')
section = _('change view')
sw.add_item(title=_('Indent Using Spaces'), description='Test',
section=section, shortcut='Ctrl+I')
sw.add_item(title=_('Indent Using Tabs'), description='Test',
section=section)
sw.add_item(title=_('Detect Indentation from Content'), section=section)
sw.add_separator()
section = _('convert file')
sw.add_item(title=_('Convert Indentation to Spaces'), description='Test',
section=section)
sw.add_item(title=_('Convert Indentation to Tabs'), section=section)
sw.add_item(title=_('Trim Trailing Whitespace'), section=section) | 5,326,623 |
def test_data_arithmetic():
"""
Tests arithmetic operations between :class:`Data` objects and values.
"""
grid = Grid(shape=(16, 16, 16))
u = Function(name='yu3D', grid=grid, space_order=0)
u.data[:] = 1
# Simple arithmetic
assert np.all(u.data == 1)
assert np.all(u.data + 2. == 3.)
assert np.all(u.data - 2. == -1.)
assert np.all(u.data * 2. == 2.)
assert np.all(u.data / 2. == 0.5)
assert np.all(u.data % 2 == 1.)
# Increments and partial increments
u.data[:] += 2.
assert np.all(u.data == 3.)
u.data[9, :, :] += 1.
assert all(np.all(u.data[i, :, :] == 3.) for i in range(9))
assert np.all(u.data[9, :, :] == 4.)
# Right operations __rOP__
u.data[:] = 1.
arr = np.ndarray(shape=(16, 16, 16), dtype=np.float32)
arr.fill(2.)
assert np.all(arr - u.data == 1.) | 5,326,624 |
def resolve(path):
"""
fully resolve a path:
resolve env vars ($HOME etc.) -> expand user (~) -> make absolute
Returns:
pathlib.Path: resolved absolute path
"""
return Path(expandvars(str(path))).expanduser().resolve() | 5,326,625 |
def i_select_database():
"""
Selects Balsam database by setting the index of BALSAM_DB_PATH environment variable
from the dropdown list.
"""
from ipywidgets import interact
databasepaths = get_databases()
interact(activate_database,db=[(i,db) for i,db in enumerate(databasepaths)])
return | 5,326,626 |
def human_turn(board, hum_token):
"""
makes a human player's turn, changes the board accordingly
"""
while True:
try:
human_turn = input('\nEnter the number of the gameboard cell you want to \n'
'place your token in (or enter "quit" to exit):\n')
if human_turn == 'quit':
print('\nGoodbye!')
sys.exit()
human_turn = int(human_turn)
if human_turn in legal_moves:
board[human_turn - 1] = hum_token
legal_moves.remove(human_turn)
break
elif human_turn not in list(range(total_cells)) and human_turn not in legal_moves:
print('\nThere is no such cell.', end='')
continue
elif human_turn == 'quit':
print('\nGoodbye!')
input('Press Enter to exit.')
sys.exit()
elif board[human_turn - 1] in ('X', 'O'):
print('\nCell already occupied.', end='')
continue
except ValueError:
print('\nImpossible choice value!', end='')
continue | 5,326,627 |
def sas_2J1x_x(x):
"""return 2*J1(x)/x"""
if np.isscalar(x):
retvalue = 2*sas_J1(x)/x if x != 0 else 1.
else:
with np.errstate(all='ignore'):
retvalue = 2*sas_J1(x)/x
retvalue[x == 0] = 1.
return retvalue | 5,326,628 |
def get_alpha_given_w(M, last_wrong):
"""
:param M: Matrix of weights in dim. Number of Obs.x Number of Rounds
:param last_wrong: Binary vector of wrong classified obs. at the last round.
:return:
"""
alpha = []
for i in range(M.shape[1] - 1):
er = np.dot(M[:, i], (M[:, i + 1] > M[:, i]))
alpha.append(1 / 2 * np.log((1 - er) / er))
er = np.dot(M[:, M.shape[1] - 1], last_wrong)
alpha.append(1 / 2 * np.log((1 - er) / er))
print(alpha) | 5,326,629 |
def get_cross_matrix(vec: ndarray) -> ndarray:
"""Get the matrix equivalent of cross product. S() in (10.68)
cross_product_matrix(vec1)@vec2 == np.cross(vec1, vec2)
Hint: see (10.5)
Args:
vec (ndarray[3]): vector
Returns:
S (ndarray[3,3]): cross product matrix equivalent
"""
S = np.array([
[0, -vec[2], vec[1]],
[vec[2], 0, -vec[0]],
[-vec[1], vec[0], 0]
])
return S | 5,326,630 |
def apply_proteomics_elastic_relaxation(
original_model: Model,
objective_rule: Objective_rule = Objective_rule.MIN_ELASTIC_SUM_OBJECTIVE,
) -> Tuple[Model, Set]:
"""Relax the problem by relaxing the protein concentration constraints.
The relaxed problems will be determined via Elastic filtering, returning
the model and a irreducibly inconsistent set of functional constraints (
[Chinnek and Dravnieks, 1990]
(https://pubsonline.informs.org/doi/abs/10.1287/ijoc.3.2.157)).
Parameters
----------
original_model: geckopy.Model
Geckopy model. It won't be modified but copied.
objective_rule: Objective_rule
The IIS is selected by minimizing an objective as defined in
:class:`Objective_rule`.
Returns
-------
tuple: (geckopy.Model, set)
copy of the model with the relaxed variables applied and the sets
"""
model = original_model.copy()
# model is inspescted for IIS
elastics = elastic_upper_relaxation(
model,
[
prot.id
for prot in model.proteins
if isinstance(prot.concentration, float) and not isnan(prot.concentration)
],
objective_rule,
)
# model is modified in place given the elastic candidates found
iis, _ = get_upper_relaxation(model, elastics, objective_rule)
return model, iis | 5,326,631 |
def skip_node(node):
"""Whether to skip a step in the traceback based on ast node type."""
return isinstance(node, (ast.If, ast.While, ast.For)) | 5,326,632 |
def lorentzian_sim(xdata, Amp, Width, Center):
"""
Estimates sum of Lorentzian functions where:
Amp = 1 X N lorentzian of amplitudes
Width = 1 X N lorentzian of widths
Center = 1 X N lorentzian of centers
xdata = 1XN indepedent variable
"""
# Convert to arrays (just in case):
Amp = np.array(Amp)
Width = np.array(Width)
Center = np.array(Center)
# Estimate number of pools
Num_variables = Amp.__len__() + Width.__len__() + Center.__len__()
# make sure it is divisible by 3.0
assert (Num_variables % 3 == 0),"Please provide 3 variables per pool"
# calculate final output
num_pools = int(Num_variables/3)
# Preallocate output
Lsum = np.zeros( (xdata.shape[0]))
for idx in range(num_pools):
# assign each variable
amp = Amp[idx]
width = Width[idx]
center = Center[idx]
# estimate signal and sum
Lsum += Lorentzian( [amp,width,center], xdata)
return Lsum | 5,326,633 |
def test_ignore_unknown_properties():
"""Test Resource unknown base properties."""
data = resource_data()
data['prop1'] = 'property1'
data['prop2'] = 'property2'
assert len(data) == 4
res = Resource(**data)
assert len(res.data) == 2
assert res.whitelist is False
with pytest.raises(KeyError):
_ = res.data['prop1']
with pytest.raises(KeyError):
_ = res.data['prop2']
assert res.data['name'] == 'test_resource'
assert res.data['partition'] == 'Common' | 5,326,634 |
def convertASTtoThreeAddrForm(ast):
"""Convert an AST to a three address form.
Three address form is (op, reg1, reg2, reg3), where reg1 is the
destination of the result of the instruction.
I suppose this should be called three register form, but three
address form is found in compiler theory.
"""
return [(node.value, node.reg) + tuple([c.reg for c in node.children])
for node in ast.allOf('op')] | 5,326,635 |
def aggregate_corrupt_metrics(metrics,
corruption_types,
max_intensity,
alexnet_errors_path=None,
fine_metrics=False):
"""Aggregates metrics across intensities and corruption types."""
results = {
'test/nll_mean_corrupted': 0.,
'test/kl_mean_corrupted': 0.,
'test/elbo_mean_corrupted': 0.,
'test/accuracy_mean_corrupted': 0.,
'test/ece_mean_corrupted': 0.,
'test/member_acc_mean_corrupted': 0.,
'test/member_ece_mean_corrupted': 0.
}
for intensity in range(1, max_intensity + 1):
nll = np.zeros(len(corruption_types))
kl = np.zeros(len(corruption_types))
elbo = np.zeros(len(corruption_types))
acc = np.zeros(len(corruption_types))
ece = np.zeros(len(corruption_types))
member_acc = np.zeros(len(corruption_types))
member_ece = np.zeros(len(corruption_types))
for i in range(len(corruption_types)):
dataset_name = '{0}_{1}'.format(corruption_types[i], intensity)
nll[i] = metrics['test/nll_{}'.format(dataset_name)].result()
if 'test/kl_{}'.format(dataset_name) in metrics.keys():
kl[i] = metrics['test/kl_{}'.format(dataset_name)].result()
else:
kl[i] = 0.
if 'test/elbo_{}'.format(dataset_name) in metrics.keys():
elbo[i] = metrics['test/elbo_{}'.format(dataset_name)].result()
else:
elbo[i] = 0.
acc[i] = metrics['test/accuracy_{}'.format(dataset_name)].result()
ece[i] = metrics['test/ece_{}'.format(dataset_name)].result()
if 'test/member_acc_mean_{}'.format(dataset_name) in metrics.keys():
member_acc[i] = metrics['test/member_acc_mean_{}'.format(
dataset_name)].result()
else:
member_acc[i] = 0.
if 'test/member_ece_mean_{}'.format(dataset_name) in metrics.keys():
member_ece[i] = list(metrics['test/member_ece_mean_{}'.format(
dataset_name)].result().values())[0]
else:
member_ece[i] = 0.
if fine_metrics:
results['test/nll_{}'.format(dataset_name)] = nll[i]
results['test/kl_{}'.format(dataset_name)] = kl[i]
results['test/elbo_{}'.format(dataset_name)] = elbo[i]
results['test/accuracy_{}'.format(dataset_name)] = acc[i]
results['test/ece_{}'.format(dataset_name)] = ece[i]
avg_nll = np.mean(nll)
avg_kl = np.mean(kl)
avg_elbo = np.mean(elbo)
avg_accuracy = np.mean(acc)
avg_ece = np.mean(ece)
avg_member_acc = np.mean(member_acc)
avg_member_ece = np.mean(member_ece)
results['test/nll_mean_{}'.format(intensity)] = avg_nll
results['test/kl_mean_{}'.format(intensity)] = avg_kl
results['test/elbo_mean_{}'.format(intensity)] = avg_elbo
results['test/accuracy_mean_{}'.format(intensity)] = avg_accuracy
results['test/ece_mean_{}'.format(intensity)] = avg_ece
results['test/nll_median_{}'.format(intensity)] = np.median(nll)
results['test/kl_median_{}'.format(intensity)] = np.median(kl)
results['test/elbo_median_{}'.format(intensity)] = np.median(elbo)
results['test/accuracy_median_{}'.format(intensity)] = np.median(acc)
results['test/ece_median_{}'.format(intensity)] = np.median(ece)
results['test/nll_mean_corrupted'] += avg_nll
results['test/kl_mean_corrupted'] += avg_kl
results['test/elbo_mean_corrupted'] += avg_elbo
results['test/accuracy_mean_corrupted'] += avg_accuracy
results['test/ece_mean_corrupted'] += avg_ece
results['test/member_acc_mean_{}'.format(intensity)] = avg_member_acc
results['test/member_ece_mean_{}'.format(intensity)] = avg_member_ece
results['test/member_acc_mean_corrupted'] += avg_member_acc
results['test/member_ece_mean_corrupted'] += avg_member_ece
results['test/nll_mean_corrupted'] /= max_intensity
results['test/kl_mean_corrupted'] /= max_intensity
results['test/elbo_mean_corrupted'] /= max_intensity
results['test/accuracy_mean_corrupted'] /= max_intensity
results['test/ece_mean_corrupted'] /= max_intensity
results['test/member_acc_mean_corrupted'] /= max_intensity
results['test/member_ece_mean_corrupted'] /= max_intensity
if alexnet_errors_path:
with tf.io.gfile.GFile(alexnet_errors_path, 'r') as f:
df = pd.read_csv(f, index_col='intensity').transpose()
alexnet_errors = df.to_dict()
corrupt_error = {}
for corruption in corruption_types:
alexnet_normalization = alexnet_errors[corruption]['average']
errors = np.zeros(max_intensity)
for index in range(max_intensity):
dataset_name = '{0}_{1}'.format(corruption, index + 1)
errors[index] = 1. - metrics['test/accuracy_{}'.format(
dataset_name)].result()
average_error = np.mean(errors)
corrupt_error[corruption] = average_error / alexnet_normalization
results['test/corruption_error_{}'.format(
corruption)] = 100 * corrupt_error[corruption]
results['test/mCE'] = 100 * np.mean(list(corrupt_error.values()))
return results | 5,326,636 |
def lc_reverse_integer(n):
"""
Given a 32-bit signed integer, reverse digits of an integer. Assume we are dealing with an environment which could
only hold integers within the 32-bit signed integer range. For the purpose of this problem, assume that your
function returns 0 when the reversed integer overflows.
Examples:
>>> lc_reverse_integer(123)
321
>>> lc_reverse_integer(-123)
-321
>>> lc_reverse_integer(120)
21
"""
class Solution(object):
@staticmethod
def reverse(x):
neg = x < 0
if neg:
x = -x
result = 0
while x:
result = result * 10 + x % 10
x /= 10
if result > 2 ** 31:
return 0
return -result if neg else result
return Solution.reverse(n) | 5,326,637 |
def hard_swish(x_tens: Tensor, inplace: bool = False):
"""
| Hardswish layer implementation:
| 0 for x <= -3
| x for x >= 3
| x * (x + 3) / 6 otherwise
More information can be found in the paper
`here <https://arxiv.org/abs/1905.02244>`__.
:param x_tens: the input tensor to perform the swish op on
:param inplace: True to run the operation in place in memory, False otherwise
:return: 0 for x <= -3, x for x >= 3, x * (x + 3) / 6 otherwise
"""
if inplace:
x_tens.mul_(clamp(x_tens + 3, 0, 6))
x_tens.div_(6)
else:
relu_6 = x_tens + 3
relu_6 = relu_6.clamp(0, 6)
x_tens = x_tens * relu_6
x_tens = x_tens / 6
return x_tens | 5,326,638 |
def assert_equal_containers_state(expected_containers_list=None,
timeout=120, interval=2,
recreate_expected=False):
"""compare all overcloud container states with using two lists:
one is current , the other some past list
first time this method runs it creates a file holding overcloud
containers' states: /home/stack/expected_containers_list_df.csv'
second time it creates a current containers states list and
compares them, they must be identical"""
# if we have a file or an explicit variable use that , otherwise create
# and return
if recreate_expected or (not expected_containers_list and
not os.path.exists(expected_containers_file)):
save_containers_state_to_file(list_containers())
return
elif expected_containers_list:
expected_containers_list_df = pandas.DataFrame(
get_container_states_list(expected_containers_list),
columns=['container_host', 'container_name', 'container_state'])
elif os.path.exists(expected_containers_file):
expected_containers_list_df = pandas.read_csv(
expected_containers_file)
failures = []
start = time.time()
error_info = 'Output explanation: left_only is the original state, ' \
'right_only is the new state'
while time.time() - start < timeout:
failures = []
actual_containers_list_df = list_containers_df()
LOG.info('expected_containers_list_df: {} '.format(
expected_containers_list_df.to_string(index=False)))
LOG.info('actual_containers_list_df: {} '.format(
actual_containers_list_df.to_string(index=False)))
# execute a `dataframe` diff between the expected and actual containers
expected_containers_state_changed = \
dataframe_difference(expected_containers_list_df,
actual_containers_list_df)
# check for changed state containerstopology
if not expected_containers_state_changed.empty:
failures.append('expected containers changed state ! : '
'\n\n{}\n{}'.format(
expected_containers_state_changed.
to_string(index=False), error_info))
LOG.info('container states mismatched:\n{}\n'.format(failures))
time.sleep(interval)
# clear cache to obtain new data
list_node_containers.cache_clear()
else:
LOG.info("assert_equal_containers_state :"
" OK, all containers are on the same state")
return
if failures:
tobiko.fail('container states mismatched:\n{!s}', '\n'.join(
failures)) | 5,326,639 |
def init_pin_callback(box, nb): # required by Whand
"""
This function is always called by whand_io.py
Only needs to perform something if input pins are accessed through interrupts
Assigns callback for interrupt
box is one instance of the hardware
nb is an input pin number for this box
Whand calls this function but does not expect any returned value
"""
pass # interrupts are not used | 5,326,640 |
def get_buttons():
""" renders the ok and cancel buttons. Called from get_body() """
# this is going to be what we actually do when someone clicks the button
def ok_button_callback(button):
raise ExitPasterDemo(exit_token='ok')
# leading spaces to center it....seems like there should be a better way
b = urwid.Button(' OK', on_press=ok_button_callback)
okbutton = urwid.AttrWrap(b, 'button', 'buttonfocus')
# second verse, same as the first....
def cancel_button_callback(button):
raise ExitPasterDemo(exit_token='cancel')
b = urwid.Button('Cancel', on_press=cancel_button_callback)
cancelbutton = urwid.AttrWrap(b, 'button', 'buttonfocus')
return urwid.GridFlow([okbutton, cancelbutton], 10, 7, 1, 'center') | 5,326,641 |
def dist_weights(distfile, weight_type, ids, cutoff, inverse=False):
"""
Returns a distance-based weights object using user-defined options
Parameters
----------
distfile: string, a path to distance csv file
weighttype: string, either 'threshold' or 'knn'
ids: a numpy array of id values
cutoff: float or integer; float for 'threshold' weight type and integer for knn type
inverse: boolean; true if inversed weights required
"""
try:
data_csv = csv.reader(open(distfile))
if csv.Sniffer().has_header(distfile):
data_csv.next()
except:
data_csv = None
if weight_type == 'threshold':
def neighbor_func(dists, threshold):
dists = filter(lambda x: x[0] <= threshold, dists)
return dists
else:
def neighbor_func(dists, k):
dists.sort()
return dists[:k]
if inverse:
def weight_func(dists, alpha=-1.0):
return list((np.array(dists)**alpha).round(decimals=6))
else:
def weight_func(dists, binary=False):
return [1]*len(dists)
dist_src = {}
for row in data_csv:
des = dist_src.setdefault(row[0], {})
if row[0] != row[1]:
des[row[1]] = float(row[2])
neighbors, weights = {}, {}
for id_val in ids:
if id_val not in dist_src:
raise ValueError, 'An ID value doest not exist in distance file'
else:
dists = zip(dist_src[id_val].values(), dist_src[id_val].keys())
ngh, wgt = [], []
if len(dists) > 0:
nghs = neighbor_func(dists, cutoff)
for d, i in nghs:
ngh.append(i)
wgt.append(d)
neighbors[id_val] = ngh
weights[id_val] = weight_func(wgt)
w = W(neighbors, weights)
w.id_order = ids
return w | 5,326,642 |
def install_to_temp_directory(pip_dependency: str,
temp_dir: Optional[str] = None) -> str:
"""Install the given pip dependency specifier to a temporary directory.
Args:
pip_dependency: Path to a wheel file or a pip dependency specifier (e.g.
"setuptools==18.0").
temp_dir: Path to temporary installation location (optional).
Returns:
Temporary directory where the package was installed, that should be added
to the Python import path.
"""
logging.info('Installing %r to a temporary directory.', pip_dependency)
if not temp_dir:
temp_dir = tempfile.mkdtemp()
install_command = [
sys.executable, '-m', 'pip', 'install', '--target', temp_dir,
pip_dependency
]
logging.info('Executing: %s', install_command)
subprocess.check_call(install_command)
logging.info('Successfully installed %r.', pip_dependency)
return temp_dir | 5,326,643 |
def alias_phased_obs_with_phase(x, y, start, end):
"""
:param x: a list containing phases
:param y: a list containing observations
:param start: start phase
:param end: end phase
:return: aliased phases and observations
"""
x = [float(n) for n in x]
y = [float(n) for n in y]
if start > end:
raise ValueError("Start phase can't be larger than stop phase.")
if len(x) != len(y):
raise ValueError("x and y must be the same size.")
distance = int(start - min(x))
if (distance == 0 and min(x) > start) or (distance < 0 < min(x)):
distance = distance - 1
x = [phase + distance for phase in x]
new_x = x[:]
new_y = y[:]
i = 1
while max(new_x) < end:
x_temp = [phase + i for phase in x]
new_x = new_x + x_temp
new_y = new_y + y[:]
i = i + 1
_x = []
_y = []
for phase, value in zip(new_x, new_y):
if start <= phase <= end:
_x.append(phase)
_y.append(value)
return _x, _y | 5,326,644 |
def create_storage_account(subscription: str, resource_group: str, name: str) -> None:
"""Creates an Azure storage account. Also adds upload access, as well
as possibility to list/generate access keys, to the user creating it
(i.e. the currently logged in user).
Note that Azure documentation states that it can take up to five minutes
after the command has finished until the added access is enabled in practice.
"""
storage_client = StorageManagementClient(
_credential(), _subscription_id(subscription)
)
azure_pim_already_open = False
while True:
try:
return storage_client.storage_accounts.begin_create(
resource_group,
name,
{
"sku": {"name": "Standard_ZRS"},
"kind": "StorageV2",
"location": "northeurope",
"encryption": {
"key_source": "Microsoft.Storage",
"services": {"blob": {"key_type": "Account", "enabled": True}},
},
},
).result()
except HttpResponseError as exc:
if "AuthorizationFailed" in str(exc):
if not azure_pim_already_open:
webbrowser.open(f"{PIMCOMMON_URL}/azurerbac")
print(
"Not able to create new storage account. Do you have "
"enough priviliges to do it? We automatically opened the URL "
"to where you activate Azure PIM. Please activate/add necessary "
"priviliges."
)
azure_pim_already_open = True
print("New attempt of creating storage account in 30 seconds.")
time.sleep(30)
else:
raise RuntimeError("Not able to create new storage account.") from exc | 5,326,645 |
def get_os():
"""
Checks the OS of the system running and alters the directory structure accordingly
:return: The directory location of the Wordlists folder
"""
if platform.system() == "Windows":
wordlist_dir = "Wordlists\\"
else:
wordlist_dir = "Wordlists/"
return wordlist_dir | 5,326,646 |
def _try_warp(image, transform_, large_warp_dim, dsize, max_dsize, new_origin,
flags, borderMode, borderValue):
"""
Helper for warp_affine
"""
if large_warp_dim == 'auto':
# this is as close as we can get to actually discovering SHRT_MAX since
# it's not introspectable through cv2. numpy and cv2 could be pointing
# to a different limits.h, but otherwise this is correct
# https://stackoverflow.com/a/44123354
SHRT_MAX = np.iinfo(np.short).max
large_warp_dim = SHRT_MAX
max_dim = max(image.shape[0:2])
if large_warp_dim is None or max_dim < large_warp_dim:
try:
M = np.asarray(transform_)
return cv2.warpAffine(image, M[0:2], dsize=dsize, flags=flags,
borderMode=borderMode,
borderValue=borderValue)
except cv2.error as e:
if e.err == 'dst.cols < SHRT_MAX && dst.rows < SHRT_MAX && src.cols < SHRT_MAX && src.rows < SHRT_MAX':
print(
'Image too large for warp_affine. Bypass this error by setting '
'kwimage.warp_affine(large_warp_dim="auto")')
raise e
else:
# make these pieces as large as possible for efficiency
pieces_per_dim = 1 + max_dim // (large_warp_dim - 1)
return _large_warp(image, transform_, dsize, max_dsize,
new_origin, flags, borderMode,
borderValue, pieces_per_dim) | 5,326,647 |
def read_leaderboard(
*,
db: Session = Depends(deps.get_db),
rank_type: schemas.RankType,
skip: int = None,
limit: int = 10,
min_studied: int = 10,
deck_id: int = None,
date_start: datetime = None,
date_end: datetime = None,
current_user: models.User = Depends(deps.get_current_active_user),
) -> Any:
"""
Retrieves leaderboard of users since the specified start time, or all time otherwise
"""
top_users = interface.statistics.get_leaderboard(db=db, user=current_user, rank_type=rank_type, skip=skip,
limit=limit,
min_studied=min_studied, deck_id=deck_id, date_start=date_start,
date_end=date_end)
if isinstance(top_users, requests.exceptions.RequestException):
raise HTTPException(status_code=555, detail="Connection to scheduler is down")
if isinstance(top_users, json.decoder.JSONDecodeError):
raise HTTPException(status_code=556, detail="Scheduler malfunction")
return top_users | 5,326,648 |
def getMultiFeatureOnsets(XAudio, Fs, hopSize):
"""
Call Essentia's implemtation of multi feature
beat tracking
:param XAudio: Numpy array of raw audio samples
:param Fs: Sample rate
:param hopSize: Hop size of each onset function value
:returns (tempo, beats): Average tempo, numpy array
of beat intervals in seconds
"""
from essentia import Pool, array
import essentia.standard as ess
X = array(XAudio)
b = ess.BeatTrackerMultiFeature()
beats = b(X)
print("Beat confidence: ", beats[1])
beats = beats[0]
tempo = 60/np.mean(beats[1::] - beats[0:-1])
beats = np.array(np.round(beats*Fs/hopSize), dtype=np.int64)
return (tempo, beats) | 5,326,649 |
def execute(
cool: str = typer.Option(..., "--cool",help="Path to cool file"),
out: str = typer.Option(..., "--out", "-o", help="Path to output directory"),
norm: str = typer.Option('KR',"--norm",help="Normalization method. It should be one of the column name of Cooler.bin(). Check it with Cooler.bins().columns (e.g., KR, VC, VC_SQRT)"),
chrom: str = typer.Option('all', "--chrom", "-k", help="Set of chromosomes. e.g., 'chr1,chr2,chr3', 'all' will generate stripes from all chromosomes"),
canny: float = typer.Option(2.5, "--canny", "-c", help="Canny edge detection parameter."),
minL: int = typer.Option(10,'-l','--minL', help="Minimum length of stripe."),
maxW: int = typer.Option(8, '-w','--maxW', help="Maximum width of stripe."),
maxpixel: str = typer.Option('0.95,0.96,0.97,0.98,0.99','-m','--maxpixel', help="Percentiles of the contact frequency data to saturate the image. Separated by comma"),
numcores: int = typer.Option(multiprocessing.cpu_count(), '-n','--numcores', help='The number of cores will be used.'),
pvalue: float = typer.Option(0.1, '-p', '--pvalue', help='P-value cutoff for stripe.')
):
"""Finds stripe coordinates from 3D genomic data
"""
stripenn.compute(cool, out, norm, chrom, canny, minL, maxW, maxpixel, numcores, pvalue) | 5,326,650 |
def check_derivation(derivation, premises, conclusion):
"""Checks if a derivation is ok. If it is, returns an empty list, otherwise returns [step, error]
Does not check if the conclusion and premises are ok, for that there is another function"""
for step in sorted(derivation):
try:
# See that the on steps are all between 1 and the current step
for s in derivation[step]['on_steps']:
if not 0 < s < step:
raise ValueError("Incorrect 'on steps' specification")
current_sups = derivation[step]['open_sups'][:]
previous_sups = list()
if step > 1:
previous_sups = derivation[step-1]['open_sups'][:]
# If the step does not open or close any previous supossitions, or closes the last open one
if (current_sups == previous_sups or current_sups == previous_sups[:-1]) and \
derivation[step]['rule'] != 'SUP':
if derivation[step]['rule'] == 'PREM':
# Check that the formula is a premise
if derivation[step]['formula'] not in premises:
raise ValueError("Formula given is not among the premises")
# Check that this is the first step or that the previous step is also a premise
# And that the steps field is empty
if (step == 1 or derivation[step-1]['rule'] == 'PREM') and derivation[step]['on_steps'] == list():
pass
else:
raise ValueError("Premises go at the beggining of the derivation and have empty 'on steps'")
else:
# A rule is being applied
prev_steps = list()
for s in derivation[step]['on_steps']:
if s not in derivation:
raise ValueError(f"Non existent step {s}")
prev_steps.append(derivation[s])
results = derivation[step]['rule'](derivation[step], prev_steps)
is_ok = False
for result in results:
if derivation[step]['formula'] == result:
is_ok = True
if not is_ok:
raise ValueError("Rule incorrectly applied")
# Y FINALMENTE VER SI LA FORMULA DEL PASO COINCIDE CON ALGUNO DE LOS RETURNS
pass
# If it contains one more supposition (the current step opens one)
elif current_sups[:-1] == previous_sups and current_sups[-1] == step:
# The rule must be SUP and the on_steps must be empty
if derivation[step]['rule'] == 'SUP' and derivation[step]['on_steps'] == list():
pass
else:
raise ValueError("Only SUP can open suppositions, and it must have empty 'on steps'")
else:
raise ValueError("Incorrect handling of suppositions")
except ValueError as e:
return [step, str(e)]
# Lastly, see that the derivation does not contain open suppositions at the last step,
# and that the conclusion is the last step
last_step = max(derivation)
if derivation[last_step]['open_sups'] != list():
return [last_step, 'The derivation ends with open suppositions']
elif derivation[last_step]['formula'] != conclusion:
return [last_step, 'The rules are correctly applied but the final formula is not the conclusion']
return [] | 5,326,651 |
def plot_psha_psa_sigma(
vs30_values: List,
mag_dict: Dict,
period_values: List,
result_dict: Dict,
plot_directory: pathlib.PosixPath,
):
"""Plots for pSA sigma versus T
Parameters
----------
vs30_values: List
list of Vs30s
mag_dict: Dict
Dictionary with a different Mw lists for a different tectonic type
period_values: List
list of Periods
result_dict: Dict
nested dictionary with a different Vs30 and Magnitude
plot_directory: pathlib.PosixPath
absolute path for a directory to store plot image
"""
for tect_type, im_models in const.MODELS_DICT.items():
x_position = 0
fig, ax = plt.subplots(
len(vs30_values), len(mag_dict[tect_type]), figsize=(18, 13.5), dpi=300
)
for vs30 in vs30_values:
y_position = 0
for mag in mag_dict[tect_type]:
color_index = 0
for model in im_models[const.PSA_IM_NAME]:
# To match the color with global version
if model.endswith("NZ"):
color_index -= 1
ax[x_position, y_position].plot(
period_values,
result_dict[tect_type][const.PSA_IM_NAME][vs30][mag][model],
label=model,
color=const.DEFAULT_LABEL_COLOR[color_index],
linestyle="dashed" if model.endswith("NZ") else "solid",
)
color_index += 1
ax[x_position, y_position].set_title(
f"Sigma versus T - Mw{mag}, Vs30-{vs30}"
)
ax[x_position, y_position].legend(im_models[const.PSA_IM_NAME])
ax[x_position, y_position].xaxis.set_label_text("Period [sec]")
ax[x_position, y_position].yaxis.set_label_text("Sigma [Ln Units]")
ax[x_position, y_position].set_xscale("log")
ax[x_position, y_position].set_ylim([0, 1])
ax[x_position, y_position].xaxis.grid(
True, which="both", linestyle="dotted"
)
ax[x_position, y_position].yaxis.grid(
True, which="both", linestyle="dotted"
)
y_position += 1
x_position += 1
fig.tight_layout()
plt.savefig(f"{plot_directory}/{tect_type}_pSA_sigma_versus_T.png")
plt.close() | 5,326,652 |
def __trunc__(self,l) :
"""Return a bitstring formed by truncating self to length |l|; if l < 0, from left"""
if not isint(l) :
raise IndexError('length not an integer');
if l < 0 :
B = self._B;
if self._l <= max(B,-l) :
return __itrunc__(type(self)(self),l);
s = type(self)();
if -l <= B :
s._x = (((self._x[-2]<<B)|self._x[-1]) >> ((B-self._l)%B))&((1<<-l)-1);
else :
o = (self._l+l)%B
nl = o-l; # new length in best case
s._x = x = self._x[-((nl+B-1)//B):];
if o : # have to shift
m = (1<<B)-1;
for i in xrange(len(x)-1) :
x[i] = (x[i]<<o)&m | (x[i+1]>>(B-o));
if (B-1-l)//B < len(x) :
del x[-1];
else :
x[-1] = (x[-1]<<o)&m;
s._l = -l;
return s;
if l >= self._l : # extend with zeroes
return __itrunc__(type(self)(self),l);
B = self._B;
x = self._x;
r = type(self)();
if l <= B:
r._x = x[0]>>(B-l) if self._l > B else x>>(self._l-l);
else :
r._x = x[:(l+B-1)//B];
if l%B : r._x[-1] &= -1<<(B-l%B);
r._l = l;
return r; | 5,326,653 |
def transpose(expr: Expr,
params: Dict[str, np.ndarray],
schedule: Schedule,
net: Dict[Expr, Expr],
op_idx: Dict[str, int],
RELAY_2_XLAYER: Dict[str, Callable],
**kwargs) -> XLayer:
"""
Relay Transpose to XLayer converter
Relay
-----
Type: tvm.relay.op.transform.transpose
Ref: https://docs.tvm.ai/api/python/relay/nn.html
Parameters:
- data (relay.Expr)
The input data to the operator.
- axes (None or List[int])
The target axes order, reverse order if not specified.
"""
if expr in net:
logger.debug("MEMORY: TRANSPOSE")
# This expressions is already transformed so we reuse that one
return net[expr]
expr_axes = expr.attrs.axes
axes = [int(e) for e in list(expr_axes)] if expr_axes is not None else None
data_expr, data_expr_class = expr.args[0], expr.args[0].__class__.__name__
data_layer = RELAY_2_XLAYER[data_expr_class](data_expr, params, schedule,
net, op_idx, RELAY_2_XLAYER,
**kwargs)
logger.debug("transpose")
if 'Constant' in data_layer.type:
logger.debug("-- constant")
# TODO: TEST
data = np.transpose(data_layer.data[0], tuple(axes))
dtype = data_layer.attrs['dtype']
op_name = 'constant-' + str(hash(expr))
# Merge relay ids
relay_idx = data_layer.attrs['relay_id'][:]
relay_idx.append(hash(expr))
X = xlf.get_xop_factory_func('Constant')(op_name,
data,
relay_id=relay_idx)
else:
# Update schedule with input data layer
if data_expr not in net:
schedule.append(data_expr)
net[data_expr] = data_layer
# Create XLayer
# Relay converts a NHWC conv2d_transpose layer into a
# transpose -> conv2d_transpose (NCHW) -> transpose. For partitioning we
# keep track of those relay ids inside the conv2d_transpose operation
if 'Conv2DTranspose' in data_layer.type:
data_layer.attrs['relay_id'].append(hash(expr))
# Create name
op_name = 'transpose-' + str(hash(expr))
X = xlf.get_xop_factory_func('Transpose')(op_name, data_layer,
axes,
relay_id=[hash(expr)])
logger.debug("-- outshape: {}".format(list(X.shapes)))
# !Important: set input layer tops:
data_layer.tops.append(op_name)
return X | 5,326,654 |
def version_list_url(content):
"""Returns a URL to list of content model versions,
filtered by `content`'s grouper
"""
versionable = _cms_extension().versionables_by_content[content.__class__]
return _version_list_url(
versionable, **versionable.grouping_values(content, relation_suffix=False)
) | 5,326,655 |
def as_ops(xs):
"""
Converts an iterable of values to a tuple of Ops using as_op.
Arguments:
xs: An iterable of values.
Returns:
A tuple of Ops.
"""
return tuple(as_op(x) for x in xs) | 5,326,656 |
def send_neighbors():
"""
The node sends its neighbors to the requesting node.
:return: <json> This node's neighbors.
"""
bc_nodes_mutex.acquire()
neighbors = blockchain.nodes
neighbors_dict = {}
for i, node in enumerate(neighbors):
neighbors_dict[i] = node
bc_nodes_mutex.release()
return jsonify(neighbors_dict), 200 | 5,326,657 |
def get_db_home(db_home_id: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetDbHomeResult:
"""
This data source provides details about a specific Db Home resource in Oracle Cloud Infrastructure Database service.
Gets information about the specified Database Home.
## Example Usage
```python
import pulumi
import pulumi_oci as oci
test_db_home = oci.database.get_db_home(db_home_id=var["db_home_id"])
```
:param str db_home_id: The Database Home [OCID](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm).
"""
__args__ = dict()
__args__['dbHomeId'] = db_home_id
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('oci:database/getDbHome:getDbHome', __args__, opts=opts, typ=GetDbHomeResult).value
return AwaitableGetDbHomeResult(
compartment_id=__ret__.compartment_id,
database=__ret__.database,
database_software_image_id=__ret__.database_software_image_id,
db_home_id=__ret__.db_home_id,
db_home_location=__ret__.db_home_location,
db_system_id=__ret__.db_system_id,
db_version=__ret__.db_version,
defined_tags=__ret__.defined_tags,
display_name=__ret__.display_name,
freeform_tags=__ret__.freeform_tags,
id=__ret__.id,
is_desupported_version=__ret__.is_desupported_version,
kms_key_id=__ret__.kms_key_id,
kms_key_version_id=__ret__.kms_key_version_id,
last_patch_history_entry_id=__ret__.last_patch_history_entry_id,
lifecycle_details=__ret__.lifecycle_details,
source=__ret__.source,
state=__ret__.state,
time_created=__ret__.time_created,
vm_cluster_id=__ret__.vm_cluster_id) | 5,326,658 |
def alpha_080(code, end_date=None, fq="pre"):
"""
公式:
(VOLUME-DELAY(VOLUME,5))/DELAY(VOLUME,5)*100
Inputs:
code: 股票池
end_date: 查询日期
Outputs:
因子的值
"""
end_date = to_date_str(end_date)
func_name = sys._getframe().f_code.co_name
return JQDataClient.instance().get_alpha_191(**locals()) | 5,326,659 |
def sync(clone, fetch):
"""Sync repositories in current profile."""
def do_sync(manifest: RepositoryManifest):
repo = manifest.handle
if not repo.exists():
if clone:
with operation('Cloning', repo):
Repo.clone_from(manifest.remote_url, manifest.location)
else:
error('Repository is not cloned:', repo)
else:
if fetch:
repo.vcs.fetch()
else:
repo.vcs.pull()
repos = list(current_repositories())
for_each(repos, do_sync, desc='Syncing', item_desc=lambda r: r.handle)
success('Successfully synced', len(repos), 'repositories.') | 5,326,660 |
def all_daily_file_paths_for_month(hemisphere, year, month, search_paths):
"""Return a list of all the filenames available for the given year and month.
"""
start_date = dt.date(year, month, 1)
end_date = dt.date(year, month, monthrange(year, month)[1])
return daily_file_paths_in_date_range(hemisphere, start_date, end_date, search_paths) | 5,326,661 |
def uniques_only(iterable):
"""
This works only for sequence, but not for all iterable
"""
items = []
for i, n in enumerate(iterable):
if n not in iterable[:i]:
items.append(n)
return items | 5,326,662 |
def CreateHardlink(target, output):
"""Recursively create a hardlink named output pointing to target.
Args:
target: path to an existing file or directory
output: path to the newly created hardlink
If output already exists, it is first removed. In all cases, the
parent directory containing output is created.
"""
if os.path.exists(output):
shutil.rmtree(output)
parent_dir = os.path.dirname(os.path.abspath(output))
if not os.path.isdir(parent_dir):
os.makedirs(parent_dir)
CreateHardlinkHelper(target, output) | 5,326,663 |
def test_unquotefromdtd_unimplemented_cases():
"""Test unimplemented unquoting DTD cases."""
assert dtd.unquotefromdtd('"Color & Light"') == "Color & Light"
assert dtd.unquotefromdtd('"Color & █"') == "Color & █"
assert dtd.unquotefromdtd('"<p> and </p>"') == "<p> and </p>" | 5,326,664 |
def init_rooms():
"""
Creates a directory for rooms if it does not exist.
"""
if not os.path.exists('r'):
os.mkdir('r') | 5,326,665 |
def str2date(start_time_str):
"""
将到到期日中的大写中文字符转化为标准数字格式
2020年1月1日 --> 2020-1-1
"""
list_s = [i for i in start_time_str]
num_list = []
for index, s in enumerate(list_s):
list_s[index] = CN_NUM.get(s, s)
if isinstance(list_s[index], int):
num_list.append((index, str(list_s[index])))
# 判断类型,利用是否为数字,将几组数字找出来然后组合
str_num = ''
flag = 0
if num_list:
str_num = num_list[0][1]
str_flag = num_list[0][0]
for num in num_list[1:]:
if num[0] - str_flag == 1:
str_flag = num[0]
str_num += num[1]
else:
str_flag = num[0]
str_num = str_num + '-' + num[1]
flag += 1
if flag == 0 and len(str_num) > 4:
try:
str_n = str(parse(str_num))
str_num = str_n.split(' ')[0]
except Exception as e:
print(e)
str_num = ''
if len(str_num): # 不为空的情况下转换时间格式
if validate_date(str_num):
str_num, flag = match_date(str_num)
else:
str_num = None # 默认时间
return str_num | 5,326,666 |
def load_base_qb():
"""
load base QB components
"""
print "base QB4ST loaded via static ontology file" | 5,326,667 |
def load_omnical_metrics(filename):
"""Load an omnical metrics file.
Parameters
----------
filename : str
Path to an omnical metrics file.
Returns
-------
metrics : dict
A dictionary containing omnical metrics.
Raises
------
IOError:
If the filetype inferred from the filename is not "json" or "pkl",
an IOError is raised.
"""
# get filetype
filetype = filename.split('.')[-1]
# load json
if filetype == 'json':
with open(filename, 'r') as f:
metrics = json.load(f, object_pairs_hook=odict)
# ensure keys of ant_dicts are not strings
# loop over pols
for key, metric in metrics.items():
# loop over items in each pol metric dict
for key2 in metric.keys():
if isinstance(metric[key2], (dict, odict)):
if isinstance(list(metric[key2].values())[0], list):
metric[key2] = odict([(int(i), np.array(metric[key2][i])) for i in metric[key2]])
elif isinstance(list(metric[key2].values())[0], (str, np.unicode_)):
metric[key2] = odict([(int(i), metric[key2][i].astype(np.complex128)) for i in metric[key2]])
elif isinstance(metric[key2], list):
metric[key2] = np.array(metric[key2])
# load pickle
elif filetype == 'pkl':
with open(filename, 'rb') as f:
inp = pkl.Unpickler(f)
metrics = inp.load()
else:
raise IOError("Filetype not recognized, try a json or pkl file")
return metrics | 5,326,668 |
def indexDocElement(es_Url, awsauth, docData):
"""
Loads completed document to Elasticsearch index.
PARAMS:
es_url - Elasticsearch Url for PUT requests
awsauth - AWS credentials for Elasticsearch
docData - formated dict like object to update elasticsearch record.
"""
try:
headers = {"Content-Type": "application/json"}
# headers = {'Content-type': 'application/json', 'Accept': 'text/plain'}
resp = requests.put(es_Url, auth=awsauth,
headers=headers, json=docData)
print(resp.content)
if resp.status_code == 201:
logger.info('INFO: Successfully created element into ES')
elif resp.status_code == 200:
logger.info('INFO: Successfully updated element into ES')
else:
logger.error(f'FAILURE: Unable to index element {resp.content}')
raise
except Exception as e:
logger.error(f'ERROR: {str(e)}')
logger.error(f"ERROR: Unable to index line:{docData['content']}")
raise | 5,326,669 |
def getexpdata(madobj,code,odir):
"""
gets all experiments for all available times (between years 1900 and 2100)
note month and day canNOT be 0 or you'll get an empty list in return
code: instrument code from Pandas Series
"""
# this is list of all experiments for this instrument for all time
# iterate over this to get data files for each experiment
print('downloading experiment codes')
exps = madobj.getExperiments(code,1900,1,1,0,0,0,2100,1,1,0,0,0,local=0)
# let's put this experiment list in a user-friendly format
# there might be a better way to do this to allow better querying vs. time
experiments = DataFrame(index=[e.id for e in exps],columns=['start','end','fn'])
for e in exps:
experiments.at[e.id,'start'] = datetime(year=e.startyear, month=e.startmonth,
day=e.startday,hour=e.starthour,minute=e.startmin,
second=e.startsec)
experiments.at[e.id,'end'] = datetime(year=e.endyear, month=e.endmonth,
day=e.endday,hour=e.endhour,minute=e.endmin,
second=e.endsec) | 5,326,670 |
def tsallis(ion_temp, avg_temp, n):
"""
Non-normalized probability of an ion at ion-temp using a Tsallis distribution
:param ion_temp: temperature of ion (K)
:param avg_temp: average temperature of ions (K)
:param n: average harmonic oscillator level
:return: value
"""
kb = 1.38e-23
energy = ion_temp * kb
top = (n - 3) * (n - 2) * (n - 1) * energy ** 2
bot = 2 * (n * kb * avg_temp) ** 3 * (1 + energy / (n * kb * avg_temp)) ** n
output = top / bot
return output | 5,326,671 |
def nasnet_dual_path_sequential(return_two=True,
first_ordinals=0,
last_ordinals=0,
can_skip_input=False):
"""
NASNet specific dual path sequential container.
Parameters:
----------
return_two : bool, default True
Whether to return two output after execution.
first_ordinals : int, default 0
Number of the first modules with single input/output.
last_ordinals : int, default 0
Number of the final modules with single input/output.
dual_path_scheme : function
Scheme of dual path response for a module.
dual_path_scheme_ordinal : function
Scheme of dual path response for an ordinal module.
can_skip_input : bool, default False
Whether can skip input for some modules.
"""
return DualPathSequential(
return_two=return_two,
first_ordinals=first_ordinals,
last_ordinals=last_ordinals,
dual_path_scheme=NasDualPathScheme(can_skip_input=can_skip_input),
dual_path_scheme_ordinal=nasnet_dual_path_scheme_ordinal) | 5,326,672 |
def _extractRGBFromHex(hexCode):
"""
Extract RGB information from an hexadecimal color code
Parameters:
hexCode (string): an hexadecimal color code
Returns:
A tuple containing Red, Green and Blue information
"""
hexCode = hexCode.lstrip('#') # Remove the '#' from the string
# Convert each byte into decimal, store it in a tuple and return
return tuple(int(hexCode[i:i+2], 16) for i in (0, 2, 4)) | 5,326,673 |
def summarise(indices, fields, **kwargs):
"""Summarise taxonomy."""
summary = {}
meta = kwargs['meta']
try:
if 'taxid' in meta.taxon:
summary.update({'taxid': meta.taxon['taxid']})
names = []
for rank in ('superkingdom', 'kingdom', 'phylum', 'class', 'order', 'family', 'genus', 'species'):
if rank in meta.taxon:
names.append(meta.taxon[rank])
if names:
summary.update({'lineage': '; '.join(names)})
if 'cat' in meta.plot:
rank = meta.plot['cat'].split('_')[-1]
summary.update({'targetRank': rank})
summary.update({'target': meta.taxon[rank]})
except Exception:
pass
return summary | 5,326,674 |
def main():
""" main funcion """
Tools.mnnconvert(sys.argv)
return 0 | 5,326,675 |
def _get_active_user_p_by_date(start_date, end_date, *, seed=None):
"""Generate a trajectory for % daily active users (DAU).
The series is a random walk with drift.
DAU % starts between approx. 20-30% range and then follows random walk
with drift over time. In bad cases, DAU % might halve within a year.
Args:
start_date (str): Y-m-d date string for the start of the series.
end_date (str): Y-m-d date string for the end of the series.
approx_yoy_growth_rate (int, optional): YoY growth rate for the
user count (>=1), e.g. 2 for +100%, 3 for +200%. Defaults to 3.
seed (int, optional): Random seed. Defaults to None.
Returns:
[type]: [description]
"""
if seed:
np.random.seed(seed)
date_index = pd.date_range(start_date, end_date,
closed='left')
# set up random walk (steps cumsum for drift)
ACTIVE_P = np.random.normal(0.25, 0.03)
steps = np.random.normal(0, 0.002, size=len(date_index))
DAU_p_draw = ((np.zeros(len(date_index))
+ ACTIVE_P + steps.cumsum())
.clip(0, 1))
return pd.Series(DAU_p_draw, index=date_index) | 5,326,676 |
def get_unknow_opttrans_attr(path):
"""Utility method that gives a `dict` of unknown optional transitive
path attributes of `path`.
Returns dict: <key> - attribute type code, <value> - unknown path-attr.
"""
path_attrs = path.pathattr_map
unknown_opt_tran_attrs = {}
for _, attr in path_attrs.iteritems():
if (isinstance(attr, BGPPathAttributeUnknown) and
attr.is_optional_transitive()):
unknown_opt_tran_attrs[attr.type_code] = attr
return unknown_opt_tran_attrs | 5,326,677 |
def get_user_partition_groups(course_key, user_partitions, user, partition_dict_key='name'):
"""
Collect group ID for each partition in this course for this user.
Arguments:
course_key (CourseKey)
user_partitions (list[UserPartition])
user (User)
partition_dict_key - i.e. 'id', 'name' depending on which partition attribute you want as a key.
Returns:
dict[partition_dict_key: Group]: Mapping from user partitions to the group to
which the user belongs in each partition. If the user isn't
in a group for a particular partition, then that partition's
ID will not be in the dict.
"""
partition_groups = {}
for partition in user_partitions:
group = partition.scheme.get_group_for_user(
course_key,
user,
partition,
)
if group is not None:
partition_groups[getattr(partition, partition_dict_key)] = group
return partition_groups | 5,326,678 |
def getemptybracket(league, testyear):
"""
Generates an empty bracket for the current year. Does not work for any other year due to the url only containing
info for the current year. Prefer to use the output yaml as a guide and correct as needed each year
:param league: league: str, mens or womens league
:param testyear: int, requested empty bracket year
:return: None, creates a yaml file with the seeds for the current year
"""
# URL for bracket
urlbracket = f'http://www.espn.com/{league}-college-basketball/tournament/bracket'
bracket_teams = {}
seed_list = []
results = requests.get(urlbracket, headers=headers)
soup = BeautifulSoup(results.text, "html.parser")
# Gets year for the bracket
year = soup.find(class_="h2")
year = str(year).replace('<h1 class="h2">NCAA Tournament Bracket - ', '').replace('</h1>', '')
# Exits if year selected is not current year
if testyear != year:
log.error('No empty bracket for selected year')
raise ValueError
# Split regions
region_div = soup.find_all(class_="region")
for region in region_div:
# Splits region into list of details
regionlist = str(region).split('>')
for i in regionlist:
# If regionlist item starts with a number(seed) and is longer than 25 chars(team link) adds element to list
if len(i) > 25 and i[:1].isnumeric():
# Does not add element if it already exists
if i not in seed_list:
seed_list.append(i)
# Creates dictionary with seed as key for team
for element in seed_list:
bracket_teams[f'd1r{-(-(seed_list.index(element)+1)//16)}seed{element.split(" ")[0]}'] = \
nameCheck(element.split('title=')[-1].replace('"', ''))
# Dump teams dict into yaml
if league == 'mens':
with open(f'{bracketpath}NCAAMBracket{year}.yaml', 'w') as f:
yaml.dump(bracket_teams, f)
elif league == 'womens':
with open(f'{bracketpath}NCAAWBracket{year}.yaml', 'w') as f:
yaml.dump(bracket_teams, f)
return None | 5,326,679 |
def test_skip_multiple_dates():
"""Test multiple skip date entries, with a skip date range"""
cal = models.Calendar()
cal.title = "Test"
cal.start_date = date(2021, 1, 1)
cal.end_date = date(2021, 12, 31)
cal.save()
models.SkipDate.objects.create(calendar=cal, date=date(2021, 1, 5))
models.SkipDate.objects.create(calendar=cal, date=date(2021, 2, 1), end_date=date(2021, 2, 5))
expected_days = {
date(2021, 1, 5),
date(2021, 2, 1),
date(2021, 2, 2),
date(2021, 2, 3),
date(2021, 2, 4),
date(2021, 2, 5), }
assert cal.get_all_skip_days() == expected_days | 5,326,680 |
def kendall_tau(solar_ts, wind_ts):
"""
Compute Kendall's tau correlation between the given solar
and wind timeseries data. Return just the correlation coefficient.
Parameters
----------
solar_ts : ndarray
Solar time-series vector for a single site
wind_ts : ndarray
Wind time-series vector for a single site
Returns
-------
float
Kendall's tau
"""
return kendalltau(solar_ts, wind_ts)[0] | 5,326,681 |
def register_all(keras_objects: bool = True, custom_kernels: bool = True) -> None:
"""Register TensorFlow Addons' objects in TensorFlow global dictionaries.
When loading a Keras model that has a TF Addons' function, it is needed
for this function to be known by the Keras deserialization process.
There are two ways to do this, either do
```python
tf.keras.models.load_model(
"my_model.tf",
custom_objects={"LAMB": tfa.image.optimizer.LAMB}
)
```
or you can do:
```python
tfa.register_all()
tf.tf.keras.models.load_model("my_model.tf")
```
If the model contains custom ops (compiled ops) of TensorFlow Addons,
and the graph is loaded with `tf.saved_model.load`, then custom ops need
to be registered before to avoid an error of the type:
```
tensorflow.python.framework.errors_impl.NotFoundError: Op type not registered
'...' in binary running on ... Make sure the Op and Kernel are
registered in the binary running in this process.
```
In this case, the only way to make sure that the ops are registered is to call
this function:
```python
tfa.register_all()
tf.saved_model.load("my_model.tf")
```
Note that you can call this function multiple times in the same process,
it only has an effect the first time. Afterward, it's just a no-op.
Args:
keras_objects: boolean, `True` by default. If `True`, register all
Keras objects
with `tf.keras.utils.register_keras_serializable(package="Addons")`
If set to False, doesn't register any Keras objects
of Addons in TensorFlow.
custom_kernels: boolean, `True` by default. If `True`, loads all
custom kernels of TensorFlow Addons with
`tf.load_op_library("path/to/so/file.so")`. Loading the SO files
register them automatically. If `False` doesn't load and register
the shared objects files. Not that it might be useful to turn it off
if your installation of Addons doesn't work well with custom ops.
Returns:
None
"""
if keras_objects:
register_keras_objects()
if custom_kernels:
register_custom_kernels() | 5,326,682 |
def get_single_intersight_object(intersight_api_key_id,
intersight_api_key,
intersight_api_path,
object_moid,
object_type="object",
intersight_base_url="https://www.intersight.com/api/v1",
preconfigured_api_client=None
):
"""This is a function to perform an HTTP GET on a single object under an
available Intersight API type.
Args:
intersight_api_key_id (str):
The ID of the Intersight API key.
intersight_api_key (str):
The system file path of the Intersight API key.
intersight_api_path (str):
The path to the targeted Intersight API object type. For example,
to specify the Intersight API type for adapter configuration
policies, enter "adapter/ConfigPolicies". More API types can be
found in the Intersight API reference library at
https://intersight.com/apidocs/introduction/overview/.
object_moid (str):
The MOID of the single Intersight object.
object_type (str):
Optional; The type of Intersight object. The default value is
"object".
intersight_base_url (str):
Optional; The base URL for Intersight API paths. The default value
is "https://www.intersight.com/api/v1". This value typically only
needs to be changed if using the Intersight Virtual Appliance.
preconfigured_api_client ("ApiClient"):
Optional; An ApiClient class instance which handles
Intersight client-server communication through the use of API keys.
The default value is None. If a preconfigured_api_client argument
is provided, empty strings ("") or None can be provided for the
intersight_api_key_id, intersight_api_key, and intersight_base_url
arguments.
Returns:
A dictionary containing all objects of the specified API type. If the
API type is inaccessible, an implicit value of None will be returned.
Raises:
Exception:
An exception occurred due to an issue accessing the Intersight API
path. The status code or error message will be specified.
"""
# Define Intersight SDK ApiClient variable
if preconfigured_api_client is None:
api_client = get_api_client(api_key_id=intersight_api_key_id,
api_secret_file=intersight_api_key,
endpoint=intersight_base_url
)
else:
api_client = preconfigured_api_client
# Retrieving the provided object from Intersight...
full_intersight_api_path = f"/{intersight_api_path}/{object_moid}"
try:
api_client.call_api(resource_path=full_intersight_api_path,
method="GET",
auth_settings=['cookieAuth', 'http_signature', 'oAuth2', 'oAuth2']
)
response = api_client.last_response.data
single_intersight_object = json.loads(response)
# The Intersight API resource path has been accessed successfully.
return single_intersight_object
except Exception:
print("\nA configuration error has occurred!\n")
print(f"There was an issue retrieving the requested {object_type} "
"instance from Intersight.")
print("Unable to access the provided Intersight API resource path "
f"'{intersight_api_path}'.")
print("Please review and resolve any error messages, then re-attempt "
"execution.\n")
print("Exception Message: ")
traceback.print_exc()
sys.exit(0) | 5,326,683 |
def test_osx_memdata_with_comma():
"""
test osx memdata method when comma returns
"""
def _cmd_side_effect(cmd):
if "hw.memsize" in cmd:
return "4294967296"
elif "vm.swapusage" in cmd:
return "total = 1024,00M used = 160,75M free = 863,25M (encrypted)"
with patch.dict(
core.__salt__, {"cmd.run": MagicMock(side_effect=_cmd_side_effect)}
), patch("salt.utils.path.which", MagicMock(return_value="/usr/sbin/sysctl")):
ret = core._osx_memdata()
assert ret["swap_total"] == 1024
assert ret["mem_total"] == 4096 | 5,326,684 |
def convert_html_to_dash(el, style=None):
"""[Quite] Conveniently auto-converts whole input HTML
into the corresponding Python Dash HTML components. Uses
Beautiful Soup to auto-parse into required HTML elements
('el') if given str input.
Parameters
----------
el : bs.element.NavigableString, str
Accepts bs4 HTML 'element' object or raw html as string.
(Input condition checked and converted by inner function)
Beautiful Soup-parsed HTML element, by the tag (e.g., "<p>Hello</p>").
If not already in bs4-format (just str instead), recursion is employed to simply
auto bs4-parse the HTML into a `NavigableString` which can then be passed
into the included Dash conversion.
style : None, optional
Style params for the HTML element.
Returns
-------
Dash.html.Div()
Where content (i.e. via attr 'children') is a list of Dash `html` components
precisely mirroring the elements input as standard-format HTML.
"""
ALLOWED_TAGS = {
"a",
"address",
"b",
"big",
"blockquote",
"br",
"caption",
"center",
"cite",
"div",
"em",
"font",
"footer",
"header",
"h1",
"h2",
"h3",
"h4",
"h5",
"h6",
"hr",
"i",
"img",
"li",
"ol",
"option",
"p",
"pre",
"s",
"small",
"span",
"strong",
"table",
"td",
"textarea",
"th",
"tr",
"tt",
"u",
"ul",
}
def _extract_style(el):
"""Convert HTML-formatted style code into a
format that can be passed to a Dash html object
during instantiation, which underlies the
conversion procedure herein.
Parameters
----------
el : bs.element.NavigableString
Returns
-------
dict
Dash-compatible style params
"""
if not el.attrs.get("style"):
return None
return {
k.strip(): v.strip()
for k, v in [
x.split(": ") for x in el.attrs["style"].split(";")
if len(x) > 0
]
}
if type(el) is str:
return convert_html_to_dash(bs.BeautifulSoup(el, "html.parser"))
if type(el) == bs.element.NavigableString:
return str(el)
else:
name = el.name
style = _extract_style(el) if style is None else style
contents = [convert_html_to_dash(x) for x in el.contents]
if name.title().lower() not in ALLOWED_TAGS:
return contents[0] if len(contents) == 1 else html.Div(contents)
return getattr(html, name.title())(contents, style=style) | 5,326,685 |
def _ice_d2gdt2(temp,pres):
"""Calculate ice Gibbs free energy TT-derivative.
Calculate the second derivative of the specific Gibbs free energy of
ice with respect to temperature.
:arg float temp: Temperature in K.
:arg float pres: Pressure in Pa.
:returns: Gibbs free energy derivative in J/kg/K^2.
"""
# Reduced variables
tn = temp/_TTP
pn = pres/_PTPE
_PI0 = _PATM/_PTPE
g_tt = 0.
# Residual terms including complex numbers
sr = [_GCOEFFS[1], complex(0.0,0.0)]
for (k,rk) in enumerate(_GCOEFFS[2]):
sr[1] += rk * (pn-_PI0)**k
for (tk,s) in zip(_GCOEFFS[3],sr):
term = 1./(tk-tn) + 1./(tk+tn) - 2./tk
g_tt += (s*term).real / _TTP
return g_tt | 5,326,686 |
def runTrainingCases(client, caseNums, start_time, end_time, endPerc = 99):
""" Main function to run the training cases
Args:
client: XPlane Client
caseNums: indices of cases to run
-------------------
endPerc: percentage down runway to end trajectory and reset
"""
for i in caseNums:
# Set time of day
time_of_day = np.random.uniform(start_time, end_time)
client.sendDREF("sim/time/zulu_time_sec", time_of_day * 3600 + 8 * 3600)
angLimit, turn, centerCTE = getParams(i)
runSinusoidal(client, angLimit, turn, centerCTE, endPerc=endPerc)
time.sleep(1) | 5,326,687 |
def ifht(A, dln, mu, offset=0.0, bias=0.0):
"""ifht multimethod."""
return (Dispatchable(A, np.ndarray),) | 5,326,688 |
def make_import_names_callback(library_calls, library_addr):
""" Return a callback function used by idaapi.enum_import_names(). """
def callback(ea, name, ordinal):
""" Callback function to retrieve code references to library calls. """
library_calls[name] = []
library_addr[name] = ea
for ref in idautils.CodeRefsTo(ea, 0):
library_calls[name].append(ref)
return True # True -> Continue enumeration
return callback | 5,326,689 |
def test_many(h3):
"""Manage many buckets."""
count = 100 # More than 10
assert h3.list_buckets() == []
for i in range(count):
assert h3.create_bucket('bucket%d' % i) == True
for i in random.sample(range(count), 10):
with pytest.raises(pyh3lib.H3ExistsError):
h3.create_bucket('bucket%d' % i)
for i in range(count):
bucket_info = h3.info_bucket('bucket%d' % i)
assert bucket_info.stats == None
assert type(bucket_info.creation) == float
assert h3.list_buckets() == [('bucket%d' % i) for i in range(count)]
for i in range(count):
assert h3.delete_bucket('bucket%d' % i) == True
for i in random.sample(range(count), 10):
with pytest.raises(pyh3lib.H3NotExistsError):
h3.delete_bucket('bucket%d' % i)
for i in random.sample(range(count), 10):
with pytest.raises(pyh3lib.H3NotExistsError):
h3.info_bucket('bucket%d' % i)
assert h3.list_buckets() == [] | 5,326,690 |
def is_leap_year_test():
"""Test the is_leap_year(year) function."""
test_cases = [1800, 1900, 1600, 2000, 1324, 2020, 3001, 2029]
result_list = [False, False, True, True, True, True, False, False]
for i in range(len(test_cases)):
tested = is_leap_year(test_cases[i])
result = result_list[i]
assert tested == result, (
"for {} is {}, should be {}".format(test_cases[i], tested, result)
)
print("All tests have been passed.") | 5,326,691 |
def test_param(param_def):
"""Test that parameter definitions are correct."""
assert param_def.options == {'scope': 'list'}
assert param_def.fixed == {'a': 1, 'b': 2, 'c': 3}
assert param_def.free == {'f': [0, 1]}
assert param_def.dependent == {'d': '2 + mean([a, b])'}
assert param_def.dynamic == {'study': {'e': 'distract / c'}}
assert param_def.sublayers == {'f': ['task'], 'c': ['loc', 'cat']}
assert param_def.weights['fc'] == {
(('task', 'item'), ('loc', 'item')): 'loc',
(('task', 'item'), ('cat', 'item')): 'cat',
}
assert param_def.weights['cf'] == {
(('task', 'item'), ('loc', 'item')): 'loc',
(('task', 'item'), ('cat', 'item')): 'cat',
}
assert param_def.weights['ff'] == {('task', 'item'): 'loc + cat'}
assert param_def.sublayer_param['c'] == {
'loc': {'B_enc': 'B_enc_loc'},
'cat': {'B_enc': 'B_enc_cat'},
} | 5,326,692 |
def has_reuse_port(*, use_env=True):
"""Return true if the platform indicates support for SO_REUSEPORT.
Can be overridden by explicitly setting ``AIOCOAP_REUSE_PORT`` to 1 or
0."""
if use_env and os.environ.get('AIOCOAP_REUSE_PORT'):
return bool(int(os.environ['AIOCOAP_REUSE_PORT']))
return hasattr(socket, 'SO_REUSEPORT') | 5,326,693 |
def unNormalizeData(normalizedData, data_mean, data_std, dimensions_to_ignore, actions, one_hot):
"""Borrowed from SRNN code. Reads a csv file and returns a float32 matrix.
https://github.com/asheshjain399/RNNexp/blob/srnn/structural_rnn/CRFProblems/H3.6m/generateMotionData.py#L12
Args
normalizedData: nxd matrix with normalized data
data_mean: vector of mean used to normalize the data
data_std: vector of standard deviation used to normalize the data
dimensions_to_ignore: vector with dimensions not used by the model
actions: list of strings with the encoded actions
one_hot: whether the data comes with one-hot encoding
Returns
origData: data originally used to
"""
T = normalizedData.shape[0]
D = data_mean.shape[0]
origData = np.zeros((T, D), dtype=np.float32)
dimensions_to_use = []
for i in range(D):
if i in dimensions_to_ignore:
continue
dimensions_to_use.append(i)
dimensions_to_use = np.array(dimensions_to_use)
if one_hot:
origData[:, dimensions_to_use] = normalizedData[:, :-len(actions)]
else:
origData[:, dimensions_to_use] = normalizedData[:, :]
origData = origData * np.expand_dims(data_std, 0) + np.expand_dims(data_mean, 0)
return origData | 5,326,694 |
def rbf_approx(t, y, centers, eps, C):
"""
function to return vector field of a single point (rbf)
:param t: time (for solve_ivp)
:param y: single point
:param centers: all centers
:param eps: radius of gaussians
:param C: coefficient matrix, found with least squares
:return: derivative for point y
"""
y = y.reshape(1, y.shape[-1])
phi = np.exp(-cdist(y, centers) ** 2 / eps ** 2)
return phi @ C | 5,326,695 |
def show(ctx):
"""show Freeldep variables"""
pass | 5,326,696 |
def get_preview_fragment(request, descriptor, context):
"""
Returns the HTML returned by the XModule's student_view or author_view (if available),
specified by the descriptor and idx.
"""
module = _load_preview_module(request, descriptor)
preview_view = AUTHOR_VIEW if has_author_view(module) else STUDENT_VIEW
try:
fragment = module.render(preview_view, context)
except Exception as exc: # pylint: disable=broad-except
log.warning("Unable to render %s for %r", preview_view, module, exc_info=True)
fragment = Fragment(render_to_string('html_error.html', {'message': str(exc)}))
return fragment | 5,326,697 |
def expr_close(src, size = 5):
"""
Same result as core.morpho.Close(), faster and workable in 32 bit.
"""
close = expr_dilate(src, size)
return expr_erode(close, size) | 5,326,698 |
def recent_records(request):
"""
The landing view, at /.
"""
interval = request.GET.get('interval', 'month')
days = 31
if interval == 'week':
days = 7
recent_records, last_processed = _get_recent_records_range(0, NR_OF_RECENT_RECORDS, days)
context = {
'active': 'home',
'records_recent': recent_records,
# not very pretty but good enough for now
'interval': interval,
'last_processed': last_processed
}
return render(request, 'isisdata/recent_records.html', context=context) | 5,326,699 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.