content stringlengths 22 815k | id int64 0 4.91M |
|---|---|
def gps_link_init(session_id):
""" 将所有无线网 断开,初始化仿真节点经纬高 """
sql = f'SELECT * FROM session_{session_id}_iface'
temp_data = nest_data.mysql_cmd1(sql,True)
for i in temp_data:
if i['node2_eth']=='':
core.node_command(session_id,i['node1_id'],f"ip link set {i['node1_eth']} down",False)
logger.debug(f"会话{session_id}, 节点id{i['node1_id']}, 初始化失效网卡{i['node1_eth']}")
Links_Mobility(session_id, f"{link_path}-00000.csv", 'init')
Nodes_Mobility(session_id, f"{gps_path}-00000.csv") | 33,200 |
def starting():
"""
Start a deployment, make sure server(s) ready.
""" | 33,201 |
def cache_fun(fname_cache, fun):
"""Check whether cached data exists, otherwise call fun and return
Parameters
----------
fname_cache: string
name of cache to look for
fun: function
function to call in case cache doesn't exist
probably a lambda function
"""
try:
print("checking cache for", fname_cache)
with open(fname_cache, 'rb') as fhandle:
print("found cache")
ret = pickle.load(fhandle)
except (FileNotFoundError, EOFError):
print("cache not found, running function")
ret = fun()
with open(fname_cache, 'wb') as fhandle:
pickle.dump(ret, fhandle)
return ret | 33,202 |
def find_server_storage_UUIDs(serveruuid):
"""
@rtype : list
@return:
"""
storageuuids = []
db = dbconnect()
cursor = db.cursor()
cursor.execute("SELECT UUID, ServerUUID FROM Storage WHERE ServerUUID = '%s'" % serveruuid)
results = cursor.fetchall()
for row in results:
storageuuids.append(row[0])
db.close()
return storageuuids | 33,203 |
def unpack_bidirectional_lstm_state(state, num_directions=2):
"""
Unpack the packed hidden state of a BiLSTM s.t. the first dimension equals to the number of layers multiplied by
the number of directions.
"""
batch_size = state.size(1)
new_hidden_dim = int(state.size(2) / num_directions)
return torch.stack(torch.split(state, new_hidden_dim, dim=2), dim=1).view(-1, batch_size, new_hidden_dim) | 33,204 |
def isna(obj: Literal["0"]):
"""
usage.dask: 1
"""
... | 33,205 |
def macd_diff(close, window_slow=26, window_fast=12, window_sign=9, fillna=False):
"""Moving Average Convergence Divergence (MACD Diff)
Shows the relationship between MACD and MACD Signal.
https://en.wikipedia.org/wiki/MACD
Args:
close(pandas.Series): dataset 'Close' column.
window_fast(int): n period short-term.
window_slow(int): n period long-term.
window_sign(int): n period to signal.
fillna(bool): if True, fill nan values.
Returns:
pandas.Series: New feature generated.
"""
return MACD(
close=close,
window_slow=window_slow,
window_fast=window_fast,
window_sign=window_sign,
fillna=fillna,
).macd_diff() | 33,206 |
def spike_histogram(series, merge_spikes=True, window_duration=60, n_bins=8):
"""
Args:
* series (pd.Series): watts
* merge_spikes (bool): Default = True
* window_duration (float): Width of each window in seconds
* n_bins (int): number of bins per window.
Returns:
spike_hist, bin_edges:
spike_hist (pd.DataFrame):
index is pd.DateTimeIndex of start of each time window
columns are 2-tuples of the bin edges in watts (int)
bin_edges (list of ints):
"""
fdiff = series.diff()
if merge_spikes:
fdiff = get_merged_spikes_pandas(fdiff)
abs_fdiff = np.fabs(fdiff)
freq = (window_duration, 'S')
date_range, boundaries = _indicies_of_periods(fdiff.index,
freq=freq)
bin_edges = np.concatenate(([0], np.exp(np.arange(1,n_bins+1))))
bin_edges = np.round(bin_edges).astype(int)
cols = zip(bin_edges[:-1], bin_edges[1:])
spike_hist = pd.DataFrame(index=date_range, columns=cols)
for date_i, date in enumerate(date_range):
start_i, end_i = boundaries[date_i]
chunk = abs_fdiff[start_i:end_i]
spike_hist.loc[date] = np.histogram(chunk, bins=bin_edges)[0]
return spike_hist, bin_edges | 33,207 |
def data_context_connectivity_context_connectivity_serviceuuid_end_pointlocal_id_capacity_bandwidth_profile_peak_information_rate_post(uuid, local_id, tapi_common_capacity_value=None): # noqa: E501
"""data_context_connectivity_context_connectivity_serviceuuid_end_pointlocal_id_capacity_bandwidth_profile_peak_information_rate_post
creates tapi.common.CapacityValue # noqa: E501
:param uuid: Id of connectivity-service
:type uuid: str
:param local_id: Id of end-point
:type local_id: str
:param tapi_common_capacity_value: tapi.common.CapacityValue to be added to list
:type tapi_common_capacity_value: dict | bytes
:rtype: None
"""
if connexion.request.is_json:
tapi_common_capacity_value = TapiCommonCapacityValue.from_dict(connexion.request.get_json()) # noqa: E501
return 'do some magic!' | 33,208 |
def any(*args, span=None):
"""Create a new experssion of the union of all conditions in the arguments
Parameters
----------
args : list
List of symbolic boolean expressions
span : Optional[Span]
The location of this operator in the source code.
Returns
-------
expr: Expr
Expression
"""
if not args:
raise ValueError("Any must take at least 1 argument")
if len(args) == 1:
return args[0]
val = _ffi_api._OpOr(args[0], args[1], span) # type: ignore
for i in range(2, len(args)):
val = _ffi_api._OpOr(val, args[i], span) # type: ignore
return val | 33,209 |
def firstOrNone(list: List[Any]) -> Any:
"""
Return the first element of a list or None if it is not set
"""
return nthOrNone(list, 0) | 33,210 |
def ellipse_points( xy=[0,-5.], ex=254., ez=190., n=1000 ):
"""
:param ec: center of ellipse
:param ex: xy radius of ellipse
:param ez: z radius of ellipse
:param n: number of points
:return e: array of shape (n,2) of points on the ellipse
"""
t = np.linspace( 0, 2*np.pi, n )
e = np.zeros([len(t), 2])
e[:,0] = ex*np.cos(t) + xy[0]
e[:,1] = ez*np.sin(t) + xy[1]
return e | 33,211 |
def sum_dose_maps(dose_maps):
""" sum a collection of dose maps to obtain the total dose """
ps.logger.debug('Summing %s dose_maps', len(dose_maps))
dose_maps = np.stack(dose_maps)
return np.nansum(dose_maps, axis=0) | 33,212 |
def ion_list():
"""List of ions with pre-computed CLOUDY ionization fraction"""
ions = np.array(['al2','c2','c3','c4','fe2','h1','mg2',
'n1','n2','n3','n4','n5','ne8','o1','o6',
'o7','o8','si2','si3','si4'])
return ions | 33,213 |
def test():
"""
For bug testing. Changes often.
"""
g = Game()
Chessboard.new_board('default')
r1 = Rook(color='w')
n1 = Knight('a5', color='b')
p1 = Pawn('e1', color='w')
p2 = Pawn('e8', color='b')
p3 = Pawn('f7', color='w')
r1.teleport('b3')
# p1.teleport('f4')
# p1 = p1.promote(Bishop)
# print(Config.white_pieces['Pawn'], Config.black_pieces['Pawn']) | 33,214 |
def is_valid_filename(filename):
"""Determines if a filename is valid (with extension)."""
valid_extensions = ['mp4', 'webm', 'ogg']
extension = get_extension(filename)
return bool(extension and extension in valid_extensions) | 33,215 |
def read(line_str, line_pos, pattern='[0-9a-zA-Z_:?!><=&]'):
"""
Read all tokens from a code line matching specific characters,
starting at a specified position.
Args:
line_str (str): The code line.
line_pos (int): The code line position to start reading.
pattern (str): Regular expression for a single character. All matching
characters will be read.
Returns:
literal (str): The literal that was read, including only characters
that were defined in the pattern argument.
line_pos (int): The updated line position.
"""
length = len(line_str)
literal = ''
while line_pos < length and re.match(pattern, line_str[line_pos]):
literal += line_str[line_pos]
line_pos += 1
return literal, line_pos | 33,216 |
def demographic(population: int, highest_lvl_ratio: int = ONE_MILLION, num_levels: int = NUM_LEVELS) -> Dict[int, int]:
"""
Calculate the number of levelled NPCs in a given population.
Args:
population:
The population to consider these levelled NPCs in.
highest_lvl_ratio:
The fraction of the population that should be of the highest level.
num_levels:
The number of levels to consider.
Returns:
A dict mapping the levels (0-highest) to the number of NPCs at each level.
"""
# Generate the proportions of each level and scale to the desired population
fractions = generate_per_level_fractions(highest_lvl_ratio, num_levels)
rough_numbers = {(k + 1): (v * population) for k, v in enumerate(fractions)}
# Take the rough numbers use the whole number part and probabilistically add the remainder
final_numbers = dict()
for level, rough_num in rough_numbers.items():
num, extra_prob = divmod(rough_num, 1)
if random.random() < extra_prob:
num += 1
final_numbers[level] = int(num)
final_numbers[0] = population - sum(final_numbers.values())
return final_numbers | 33,217 |
def gencpppxd(env, exceptions=True, ts=None):
"""Generates all cpp_*.pxd Cython header files for an environment of modules.
Parameters
----------
env : dict
Environment dictonary mapping target module names to module description
dictionaries.
exceptions : bool or str, optional
Cython exception annotation. Set to True to automatically detect exception
types, False for when exceptions should not be included, and a str (such as
'+' or '-1') to apply to everywhere.
ts : TypeSystem, optional
A type system instance.
Returns
-------
cpppxds : dict
Maps environment target names to Cython cpp_*.pxd header files strings.
"""
ts = ts or TypeSystem()
cpppxds = {}
for name, mod in env.items():
if mod['srcpxd_filename'] is None:
continue
cpppxds[name] = modcpppxd(mod, exceptions, ts=ts)
return cpppxds | 33,218 |
def configure_mongo_connection(
key: str, host: str, port: int, dbname: str, username: str, password: str
):
"""
Configure the connection with the given `key` in fidesops with your PostgreSQL database credentials.
Returns the response JSON if successful, or throws an error otherwise.
See http://localhost:8000/docs#/Connections/put_connection_config_secrets_api_v1_connection__connection_key__secret_put
"""
connection_secrets_data = {
"host": host,
"port": port,
"defaultauthdb": dbname,
"username": username,
"password": password,
}
response = requests.put(
f"{FIDESOPS_URL}/api/v1/connection/{key}/secret",
headers=oauth_header,
json=connection_secrets_data,
)
if response.ok:
if (response.json())["test_status"] != "failed":
logger.info(
f"Configured fidesops mongo connection secrets via /api/v1/connection/{key}/secret"
)
return response.json()
raise RuntimeError(
f"fidesops connection configuration failed! response.status_code={response.status_code}, response.json()={response.json()}"
) | 33,219 |
def fully_random(entries, count):
"""Choose completely at random from all entries"""
return random.sample(entries, count) | 33,220 |
def _get_sets_grp(grpName="controllers_grp"):
"""Get set group
Args:
grpName (str, optional): group name
Returns:
PyNode: Set
"""
rig = _get_simple_rig_root()
sets = rig.listConnections(type="objectSet")
controllersGrp = None
for oSet in sets:
if grpName in oSet.name():
controllersGrp = oSet
return controllersGrp | 33,221 |
def clean_text(text, cvt_to_lowercase=True, norm_whitespaces=True):
"""
Cleans a text for language detection by transforming it to lowercase, removing unwanted
characters and replacing whitespace characters for a simple space.
:rtype : string
:param text: Text to clean
:param cvt_to_lowercase: Convert text to lowercase
:param norm_whitespaces: Normalize whitespaces
"""
# converting text to lowercase (if required)
cleaned_text = text.lower() if cvt_to_lowercase else text
# removing unwanted characters
cleaned_text = ''.join([
c for c in cleaned_text
if c not in unwanted_chars
])
# normalizing whitespaces
cleaned_text = re.sub(r'\s+', ' ', cleaned_text) if norm_whitespaces else cleaned_text
# returning the cleaned text
return cleaned_text | 33,222 |
def test_v1_5_3_migration(
tmp_path: Path, cloned_template: Path, supported_odoo_version: float
):
"""Test migration to v1.5.3."""
auto_addons = tmp_path / "odoo" / "auto" / "addons"
# This part makes sense only when v1.5.3 is not yet released
with local.cwd(cloned_template):
if "v1.5.3" not in git("tag").split():
git("tag", "-d", "test")
git("tag", "v1.5.3")
with local.cwd(tmp_path):
# Copy v1.5.2
copy(src_path=str(cloned_template), vcs_ref="v1.5.2", force=True)
assert not auto_addons.exists()
git("add", ".")
git("commit", "-am", "reformat", retcode=1)
git("commit", "-am", "copied from template in v1.5.2")
# Update to v1.5.3
copy(vcs_ref="v1.5.3", force=True)
assert not auto_addons.exists()
invoke("develop")
assert auto_addons.is_dir()
# odoo/auto/addons dir must be writable
(auto_addons / "sample").touch() | 33,223 |
def print(*args, **kwargs) -> None:
"""Proxy for Console print."""
console = get_console()
return console.print(*args, **kwargs) | 33,224 |
def student_dashboard(lti=lti, user_id=None):
# def student_dashboard(user_id=None):
"""
Dashboard froms a student view. Used for students, parents and advisors
:param lti: pylti
:param user_id: users Canvas ID
:return: template or error message
"""
# TODO REMOVE ME - not using records anymore
record = Record.query.order_by(Record.id.desc()).first()
# get current term
current_term = EnrollmentTerm.query.filter(EnrollmentTerm.current_term).first()
if current_term.cut_off_date:
cut_off_date = current_term.cut_off_date
else:
cut_off_date = current_term.end_at
# format as a string
cut_off_date = cut_off_date.strftime("%Y-%m-%d")
if user_id: # Todo - this probably isn't needed
# check user is NOT authorized to access this file
auth_users_id = [user["id"] for user in session["users"]]
if not (
int(user_id) in auth_users_id
or lti.is_role("admin")
or lti.is_role("instructor")
): # TODO - OR role = 'admin'
return "You are not authorized to view this users information"
alignments, grades, user = get_user_dash_data(user_id)
# calculation dictionaries
calculation_dictionaries = get_calculation_dictionaries()
if grades:
return render_template(
"users/dashboard.html",
record=record,
user=user,
cut_off_date=cut_off_date,
students=session["users"],
grades=grades,
calculation_dict=calculation_dictionaries,
alignments=alignments,
current_term=current_term
)
return "You currently don't have any grades!" | 33,225 |
def _yaml_to_dict(yaml_string):
"""
Converts a yaml string to dictionary
Args:
yaml_string: String containing YAML
Returns:
Dictionary containing the same object
"""
return yaml.safe_load(yaml_string) | 33,226 |
def room_urls_for_search_url(url):
"""
the urls of all rooms that are yieled in a search url
"""
with urllib.request.urlopen(url) as response:
html = response.read()
soup = BeautifulSoup(html, 'html.parser')
room_urls = {erg_list_entry.find('a').find('strong').get_text():
room_url_for_room_id(erg_list_entry.find('a').get('href').split('.rgid=')[1].split('&')[0])
for erg_list_entry in soup.find_all('div', {'class': 'erg_list_entry'})
if erg_list_entry.find('div', {'class': 'erg_list_label'}).get_text() == 'Raum:'}
return room_urls | 33,227 |
def bittrex_get_balance(api_key, api_secret):
"""Get your total balances for your bittrex account
args:
required:
api_key (str)
api_secret (str)
return:
results (DataFrame) of balance information for each crypto
"""
nonce = int(time.time()*1000)
url = "https://bittrex.com/api/v1.1/account/getbalances?apikey={}&nonce={}".format(api_key, nonce)
# url = 'https://bittrex.com/api/v1.1/account/getbalances'
sign = hmac.new(api_secret.encode('utf-8'), url.encode('utf-8'), hashlib.sha512).hexdigest()
headers = {'apisign': sign}
r = requests.get(url, headers=headers)
j = json.loads(r.text)
results = j['result']
df = pd.DataFrame.from_dict(results)
return df | 33,228 |
def check_vm_snapshot_sanity(vm_id):
"""
Checks if the snapshot information of VM is in sync with actual snapshots of the VM.
"""
vm_data = db.vm_data[vm_id]
snapshot_check = []
try:
conn = libvirt.openReadOnly('qemu+ssh://root@'+vm_data.host_id.host_ip.private_ip+'/system')
domain = conn.lookupByName(vm_data.vm_identity)
dom_snapshot_names = domain.snapshotListNames(0)
logger.debug(dom_snapshot_names)
conn.close()
snapshots = db(db.snapshot.vm_id == vm_id).select()
for snapshot in snapshots:
if snapshot.snapshot_name in dom_snapshot_names:
snapshot_check.append({'snapshot_name' : snapshot.snapshot_name,
'snapshot_type' : get_snapshot_type(snapshot.type),
'message' : 'Snapshot present',
'operation' : 'None'})
dom_snapshot_names.remove(snapshot.snapshot_name)
else:
snapshot_check.append({'snapshot_id' : snapshot.id,
'snapshot_name' : snapshot.snapshot_name,
'snapshot_type' : get_snapshot_type(snapshot.type),
'message' : 'Snapshot not present',
'operation' : 'Undefined'})
for dom_snapshot_name in dom_snapshot_names:
snapshot_check.append({'vm_name' : vm_data.vm_identity,
'snapshot_name' : dom_snapshot_name,
'snapshot_type' : 'Unknown',
'message' : 'Orphan Snapshot',
'operation' : 'Orphan'})
except Exception:
log_exception()
logger.debug(snapshot_check)
return (vm_data.id, vm_data.vm_name, snapshot_check) | 33,229 |
def get_device(device_path: str) -> Device:
"""Safely get an evdev device handle."""
fd = open(device_path, 'rb')
evdev = Device(fd)
try:
yield evdev
finally:
fd.close() | 33,230 |
def evaluate_functions(payload, context, get_node_instances_method, get_node_instance_method, get_node_method):
"""
Evaluate functions in payload.
:param payload: The payload to evaluate.
:param context: Context used during evaluation.
:param get_node_instances_method: A method for getting node instances.
:param get_node_instance_method: A method for getting a node instance.
:param get_node_method: A method for getting a node.
:return: payload.
"""
#print '!!! evaluate_function', payload, context
context = PostProcessingContext(None, context, get_node_instances_method, get_node_instance_method, get_node_method)
return context.evaluate(payload) | 33,231 |
def rollout(render=False):
""" Execute a rollout and returns minus cumulative reward.
Load :params: into the controller and execute a single rollout. This
is the main API of this class.
:args params: parameters as a single 1D np array
:returns: minus cumulative reward
# Why is this the minus cumulative reward?!?!!?
"""
print('a rollout dims', len(a_rollout))
#env.seed(int(rand_env_seed)) # ensuring that each rollout has a differnet random seed.
obs = env.reset()
# This first render is required !
env.render()
next_hidden = [
torch.zeros(1, LATENT_RECURRENT_SIZE).to(device)
for _ in range(2)]
cumulative = 0
i = 0
rollout_dict = {k:[] for k in ['obs', 'rew', 'act', 'term']}
obs = transform(obs).unsqueeze(0).to(device)
mu, logsigma = vae.encoder(obs)
next_z = mu + logsigma.exp() * torch.randn_like(mu)
while True:
#print(i)
action = torch.Tensor(a_rollout[i]).to(device).unsqueeze(0)
#print('into mdrnn',action.shape, next_z.shape, next_hidden[0].shape)
# commented out reward and done.
mus, sigmas, logpi, _, _, next_hidden = mdrnn(action, next_z, next_hidden)
# decode current z to see what it looks like.
recon_obs = vae.decoder(next_z)
if i>dream_point:
if type(obs) != torch.Tensor:
obs = transform(obs).unsqueeze(0)
to_save = torch.cat([obs, recon_obs.cpu()], dim=0)
#print(to_save.shape)
# .view(args.batch_size*2, 3, IMAGE_RESIZE_DIM, IMAGE_RESIZE_DIM)
save_image(to_save,
join(mdir, 'dream/sample_' + str(i) + '.png'))
obs, reward, done, _ = env.step(a_rollout[i])
if i < dream_point or np.random.random()>0.95:
print('using real obs at point:', i)
obs = transform(obs).unsqueeze(0).to(device)
mu, logsigma = vae.encoder(obs)
next_z = mu + logsigma.exp() * torch.randn_like(mu)
else:
# sample the next z.
g_probs = Categorical(probs=torch.exp(logpi).permute(0,2,1))
which_g = g_probs.sample()
#print(logpi.shape, mus.permute(0,2,1)[:,which_g].shape ,mus[:,:,which_g].shape, which_g, mus.shape )
#print(mus.squeeze().permute(1,0).shape, which_g.permute(1,0))
mus_g, sigs_g = torch.gather(mus.squeeze(), 0, which_g), torch.gather(sigmas.squeeze(), 0, which_g)
#print(mus_g.shape)
next_z = mus_g + sigs_g * torch.randn_like(mus_g)
#print(next_z.shape)
#for key, var in zip(['obs', 'rew', 'act', 'term'], [obs,reward, action, done]):
# rollout_dict[key].append(var)
if render:
env.render()
cumulative += reward
if done or i >= time_limit:
return - cumulative
i += 1 | 33,232 |
def _update_bcbiovm():
"""Update or install a local bcbiovm install with tools and dependencies"""
print("## CWL support with bcbio-vm")
python_env = "python=3.6"
conda_bin, env_name = _add_environment("bcbiovm", python_env)
base_cmd = [conda_bin, "install", "--yes", "--name", env_name]
subprocess.check_call(base_cmd + [python_env, "nomkl", "bcbio-nextgen"])
extra_uptodate = ["cromwell"]
subprocess.check_call(base_cmd + [python_env, "bcbio-nextgen-vm"] + extra_uptodate) | 33,233 |
def get_request(url, access_token, origin_address: str = None):
"""
Create a HTTP get request.
"""
api_headers = {
'Authorization': 'Bearer {0}'.format(access_token),
'X-Forwarded-For': origin_address
}
response = requests.get(
url,
headers=api_headers
)
if response.status_code == 200:
return json.loads(response.text)
else:
raise Exception(response.text) | 33,234 |
async def test_emissions_nursery_wraps(is_async):
"""Emissions nursery wraps callbacks as requested."""
class SignalHost(QtCore.QObject):
signal = QtCore.Signal()
class LocalUniqueException(Exception):
pass
result: outcome.Outcome
event = trio.Event()
signal_host = SignalHost()
async def wrapper(asyncfn, *args):
nonlocal result
try:
await asyncfn(*args)
except Exception as e:
result = outcome.Error(e)
event.set()
def slot():
raise LocalUniqueException()
async with qtrio.open_emissions_nursery(wrapper=wrapper) as emissions_nursery:
emissions_nursery_connect_maybe_async(
is_async=is_async,
nursery=emissions_nursery,
signal=signal_host.signal,
slot=slot,
)
signal_host.signal.emit()
await event.wait()
with pytest.raises(LocalUniqueException):
result.unwrap() | 33,235 |
def join_metadata(df: pd.DataFrame) -> pd.DataFrame:
"""Joins data including 'agent_id' to work out agent settings."""
assert 'agent_id' in df.columns
sweep = make_agent_sweep()
data = []
for agent_id, agent_ctor_config in enumerate(sweep):
agent_params = {'agent_id': agent_id}
agent_params.update(agent_ctor_config.settings)
data.append(agent_params)
agent_df = pd.DataFrame(data)
# Suffixes should not be needed... but added to be safe in case of clash.
return pd.merge(df, agent_df, on='agent_id', suffixes=('', '_agent')) | 33,236 |
def mocked_get_release_by_id(id_, includes=[], release_status=[],
release_type=[]):
"""Mimic musicbrainzngs.get_release_by_id, accepting only a restricted list
of MB ids (ID_RELEASE_0, ID_RELEASE_1). The returned dict differs only in
the release title and artist name, so that ID_RELEASE_0 is a closer match
to the items created by ImportHelper._create_import_dir()."""
# Map IDs to (release title, artist), so the distances are different.
releases = {ImportMusicBrainzIdTest.ID_RELEASE_0: ('VALID_RELEASE_0',
'TAG ARTIST'),
ImportMusicBrainzIdTest.ID_RELEASE_1: ('VALID_RELEASE_1',
'DISTANT_MATCH')}
return {
'release': {
'title': releases[id_][0],
'id': id_,
'medium-list': [{
'track-list': [{
'id': 'baz',
'recording': {
'title': 'foo',
'id': 'bar',
'length': 59,
},
'position': 9,
'number': 'A2'
}],
'position': 5,
}],
'artist-credit': [{
'artist': {
'name': releases[id_][1],
'id': 'some-id',
},
}],
'release-group': {
'id': 'another-id',
}
}
} | 33,237 |
def test_taxii20_collection(mocker, taxii2_server_v20):
"""
Given
TAXII Server v2.0, collection_id
When
Calling collection by id api request
Then
Validate that right collection returned
"""
collections = util_load_json('test_files/collections20.json')
mocker.patch('TAXII2Server.SERVER', taxii2_server_v20)
with APP.test_client() as test_client:
response = test_client.get('/threatintel/collections/4c649e16-2bb7-50f5-8826-2a2d0a0b9631/', headers=HEADERS)
assert response.status_code == 200
assert response.content_type == 'application/vnd.oasis.taxii+json; version=2.0'
assert response.json == collections.get('collections')[0] | 33,238 |
def mp_rf_optimizer_func(fn_tuple):
"""Executes in parallel creation of random forrest creation."""
fn, flags, file_suffix = fn_tuple
n_trees = flags["n_trees"]
is_regressor = flags["is_regressor"]
sample_size = flags["sample_size"]
n_features = flags["n_features"]
max_depth = flags["max_depth"]
if not file_suffix:
file_suffix = "none"
path_split = fn.split("/")
path = "/".join(path_split[:-1]) + "/"
fn_split = path_split[-1].split(".")
# o_file = path + ".".join(fn_split[0:-2] + [fn_split[-1]])
cv_file = path + ".".join(fn_split[0:-2] + [file_suffix])
rfb_file = path + ".".join(fn_split[0:-2] + ["rb", "bin"])
# let's compress the table first to make the job easier for random forest.
# compression can usually achieve a ratio of 50x or more.
# compress(fn, o_file)
train = load(fn)
n_features = "auto" if not n_features else float(n_features)
# min_size = 1
if max_depth:
max_depth = int(max_depth)
print("... creating random forrest for " + os.path.basename(fn) + " with " +
str(sample_size) + " samples")
if is_regressor:
rf = RandomForestRegressor(
n_estimators=n_trees,
max_depth=max_depth,
# min_samples_split=2,
# min_samples_leaf=min_size,
max_features=n_features,
# max_leaf_nodes=100,
# oob_score=True,
# warm_start=True,
bootstrap=True,
random_state=42,
n_jobs=1)
else:
rf = RandomForestClassifier(
n_estimators=n_trees,
max_depth=max_depth,
# min_samples_split=2,
# min_samples_leaf=min_size,
max_features=n_features,
# max_leaf_nodes=100,
# oob_score=True,
# warm_start=True,
bootstrap=True,
random_state=42,
n_jobs=1)
if sample_size and train.shape[0] >= 10000:
sample_size = int(sample_size)
np.random.seed(42)
idx = np.random.choice(train.shape[0], train.shape[0], replace=False)
x = train[idx[sample_size:], 0:-1]
y = train[idx[sample_size:], -1]
x_test = train[idx[0:sample_size], 0:-1]
y_test = train[idx[0:sample_size], -1]
else:
x = train[:, 0:-1]
y = train[:, -1]
x_test = x
y_test = y
estimators = []
with warnings.catch_warnings():
warnings.simplefilter("ignore")
rf.fit(x, y)
func_name = fn_split[0]
bits = np.ceil(
np.log2(
np.abs(
np.amax(x, axis=0) -
np.amin(x, axis=0) + 1))).astype(np.int32)
is_neg = (np.amin(x, axis=0) < 0).astype(np.int8)
o_bits = np.ceil(
np.log2(
np.abs(
np.amax(y, axis=0) -
np.amin(y, axis=0) + 1))).astype(np.int32)
o_is_neg = (np.amin(y, axis=0) < 0).astype(np.int8)
rf.bits = bits
rf.is_neg = is_neg
rf.o_bits = o_bits
rf.o_is_neg = o_is_neg
code = gen_random_forest(
rf, func_name, bits, is_neg, o_bits, o_is_neg,
is_regressor=is_regressor, is_top_level=False,
is_cc=file_suffix == "cc")
open(cv_file, "w").write("\n".join(code))
p = 1.0 * np.round(rf.predict(x_test))
dy = np.max(train[:, -1]) - np.min(train[:, -1])
error = np.sum(np.abs(y_test - p)) / (1.0 * p.shape[0] * dy)
score = np.sum(y_test == p) / p.shape[0]
print("y:", np.max(y_test), y_test[0:30].astype(np.int32))
print("p:", np.max(p), p[0:30].astype(np.int32))
print("... model {} with score of {:.2f}% and error of {:.2f}%".format(
func_name, 100.0*score, 100.0*error))
print("... saving model in {}".format(rfb_file))
pickle.dump(rf, open(rfb_file, "wb"))
return rfb_file | 33,239 |
def GetTracePaths(bucket):
"""Returns a list of trace files in a bucket.
Finds and loads the trace databases, and returns their content as a list of
paths.
This function assumes a specific structure for the files in the bucket. These
assumptions must match the behavior of the backend:
- The trace databases are located in the bucket.
- The trace databases files are the only objects with the
TRACE_DATABASE_PREFIX prefix in their name.
Returns:
list: The list of paths to traces, as strings.
"""
traces = []
prefix = os.path.join('/', bucket, common.clovis_paths.TRACE_DATABASE_PREFIX)
file_stats = cloudstorage.listbucket(prefix)
for file_stat in file_stats:
database_file = file_stat.filename
clovis_logger.info('Loading trace database: ' + database_file)
with cloudstorage.open(database_file) as remote_file:
json_string = remote_file.read()
if not json_string:
clovis_logger.warning('Failed to download: ' + database_file)
continue
database = LoadingTraceDatabase.FromJsonString(json_string)
if not database:
clovis_logger.warning('Failed to parse: ' + database_file)
continue
for path in database.ToJsonDict():
traces.append(path)
return traces | 33,240 |
def FindRunfilesDirectory() -> typing.Optional[pathlib.Path]:
"""Find the '.runfiles' directory, if there is one.
Returns:
The absolute path of the runfiles directory, else None if not found.
"""
# Follow symlinks, looking for my module space
stub_filename = os.path.abspath(__file__)
module_space = stub_filename + '.runfiles'
if os.path.isdir(module_space):
return pathlib.Path(module_space)
match = RUNFILES_PATTERN.match(os.path.abspath(__file__))
if match:
return pathlib.Path(match.group(1))
return None | 33,241 |
def get_name_with_template_specialization(node):
"""
node is a class
returns the name, possibly added with the <..> of the specialisation
"""
if not node.kind in (
CursorKind.CLASS_DECL, CursorKind.STRUCT_DECL, CursorKind.CLASS_TEMPLATE_PARTIAL_SPECIALIZATION): return None
tokens = get_tokens(node)
name = node.spelling
if tokens and tokens[0] == 'template':
t = tokens[len(extract_bracketed(tokens[1:])) + 3:]
if t and t[0] == '<': name = name + ''.join(extract_bracketed(t))
return name | 33,242 |
def linear_timeseries(
start_value: float = 0,
end_value: float = 1,
start: Optional[Union[pd.Timestamp, int]] = pd.Timestamp("2000-01-01"),
end: Optional[Union[pd.Timestamp, int]] = None,
length: Optional[int] = None,
freq: str = "D",
column_name: Optional[str] = "linear",
dtype: np.dtype = np.float64,
) -> TimeSeries:
"""
Creates a univariate TimeSeries with a starting value of `start_value` that increases linearly such that
it takes on the value `end_value` at the last entry of the TimeSeries. This means that
the difference between two adjacent entries will be equal to
(`end_value` - `start_value`) / (`length` - 1).
Parameters
----------
start_value
The value of the first entry in the TimeSeries.
end_value
The value of the last entry in the TimeSeries.
start
The start of the returned TimeSeries' index. If a pandas Timestamp is passed, the TimeSeries will have a pandas
DatetimeIndex. If an integer is passed, the TimeSeries will have a pandas Int64Index index. Works only with
either `length` or `end`.
end
Optionally, the end of the returned index. Works only with either `start` or `length`. If `start` is
set, `end` must be of same type as `start`. Else, it can be either a pandas Timestamp or an integer.
length
Optionally, the length of the returned index. Works only with either `start` or `end`.
freq
The time difference between two adjacent entries in the returned TimeSeries. Only effective if `start` is a
pandas Timestamp. A DateOffset alias is expected; see
`docs <https://pandas.pydata.org/pandas-docs/stable/user_guide/TimeSeries.html#dateoffset-objects>`_.
column_name
Optionally, the name of the value column for the returned TimeSeries
dtype
The desired NumPy dtype (np.float32 or np.float64) for the resulting series
Returns
-------
TimeSeries
A linear TimeSeries created as indicated above.
"""
index = _generate_index(start=start, end=end, freq=freq, length=length)
values = np.linspace(start_value, end_value, len(index), dtype=dtype)
return TimeSeries.from_times_and_values(
index, values, freq=freq, columns=pd.Index([column_name])
) | 33,243 |
def sendpfast(x, pps=None, mbps=None, realtime=None, loop=0, iface=None):
"""Send packets at layer 2 using tcpreplay for performance
pps: packets per second
mpbs: MBits per second
realtime: use packet's timestamp, bending time with realtime value
loop: number of times to process the packet list
iface: output interface """
if iface is None:
iface = conf.iface
options = ["--intf1=%s" % iface ]
if pps is not None:
options.append("--pps=%i" % pps)
elif mbps is not None:
options.append("--mbps=%i" % mbps)
elif realtime is not None:
options.append("--multiplier=%i" % realtime)
else:
options.append("--topspeed")
if loop:
options.append("--loop=%i" % loop)
f = os.tempnam("scapy")
options.append(f)
wrpcap(f, x)
try:
try:
os.spawnlp(os.P_WAIT, conf.prog.tcpreplay, conf.prog.tcpreplay, *options)
except KeyboardInterrupt:
log_interactive.info("Interrupted by user")
finally:
os.unlink(f) | 33,244 |
def outlierBySd(X: Matrix,
max_iterations: int,
**kwargs: Dict[str, VALID_INPUT_TYPES]):
"""
Builtin function for detecting and repairing outliers using standard deviation
:param X: Matrix X
:param k: threshold values 1, 2, 3 for 68%, 95%, 99.7% respectively (3-sigma rule)
:param repairMethod: values: 0 = delete rows having outliers, 1 = replace outliers as zeros
2 = replace outliers as missing values
:param max_iterations: values: 0 = arbitrary number of iteration until all outliers are removed,
n = any constant defined by user
:return: Matrix X with no outliers
"""
params_dict = {'X': X, 'max_iterations': max_iterations}
params_dict.update(kwargs)
vX_0 = Matrix(X.sds_context, '')
vX_1 = Matrix(X.sds_context, '')
vX_2 = Matrix(X.sds_context, '')
vX_3 = Scalar(X.sds_context, '')
vX_4 = Scalar(X.sds_context, '')
output_nodes = [vX_0, vX_1, vX_2, vX_3, vX_4, ]
op = MultiReturn(X.sds_context, 'outlierBySd', output_nodes, named_input_nodes=params_dict)
vX_0._unnamed_input_nodes = [op]
vX_1._unnamed_input_nodes = [op]
vX_2._unnamed_input_nodes = [op]
vX_3._unnamed_input_nodes = [op]
vX_4._unnamed_input_nodes = [op]
return op | 33,245 |
def stop():
"""Stop S3 sync service."""
from . import s3_sync_impl
s3_sync_impl.stop() | 33,246 |
def _DownloadStatusHook(a, b, c):
"""Shows progress of download."""
print '% 3.1f%% of %d bytes\r' % (min(100, float(a * b) / c * 100), c) | 33,247 |
def encode_address(address: Dict) -> bytes:
"""
Creates bytes representation of address data.
args:
address: Dictionary containing the address data.
returns:
Bytes to be saved as address value in DB.
"""
address_str = ''
address_str += address['balance'] + '\0'
address_str += address['code'] + '\0'
address_str += str(address['inputTxIndex']) + '\0'
address_str += str(address['outputTxIndex']) + '\0'
address_str += str(address['minedIndex']) + '\0'
address_str += address['tokenContract'] + '\0'
address_str += str(address['inputTokenTxIndex']) + '\0'
address_str += str(address['outputTokenTxIndex']) + '\0'
address_str += str(address['inputIntTxIndex']) + '\0'
address_str += str(address['outputIntTxIndex']) + '\0'
return address_str.encode() | 33,248 |
def analyze_image(image_url, tag_limit=10):
"""
Given an image_url and a tag_limit, make requests to both the Clarifai API
and the Microsoft Congnitive Services API to return two things:
(1) A list of tags, limited by tag_limit,
(2) A description of the image
"""
clarifai_tags = clarifai_analysis(image_url)
ms_tags, ms_caption = oxford_project_analysis(image_url)
clarifai_tags = map(lambda s: s.lower(), clarifai_tags)
ms_tags = map(lambda s: s.lower(), ms_tags)
# Get tags that occur in both
merged_tags = []
set(ms_tags)
for tag in clarifai_tags:
if tag in ms_tags:
merged_tags.append(tag)
merged_tags_set = set(merged_tags)
merged_tags += [tag for tag in clarifai_tags if tag not in merged_tags]
merged_tags += [tag for tag in ms_tags if tag not in merged_tags]
# Limit the tags
merged_tags = merged_tags[:tag_limit]
return merged_tags, ms_caption | 33,249 |
def _decision_function(scope, operator, container, model, proto_type):
"""Predict for linear model.
score = X * coefficient + intercept
"""
coef_name = scope.get_unique_variable_name('coef')
intercept_name = scope.get_unique_variable_name('intercept')
matmul_result_name = scope.get_unique_variable_name(
'matmul_result')
score_name = scope.get_unique_variable_name('score')
coef = model.coef_.T
container.add_initializer(coef_name, proto_type,
coef.shape, coef.ravel())
container.add_initializer(intercept_name, proto_type,
model.intercept_.shape, model.intercept_)
input_name = operator.inputs[0].full_name
if type(operator.inputs[0].type) in (BooleanTensorType, Int64TensorType):
cast_input_name = scope.get_unique_variable_name('cast_input')
apply_cast(scope, operator.input_full_names, cast_input_name,
container, to=proto_type)
input_name = cast_input_name
container.add_node(
'MatMul', [input_name, coef_name],
matmul_result_name,
name=scope.get_unique_operator_name('MatMul'))
apply_add(scope, [matmul_result_name, intercept_name],
score_name, container, broadcast=0)
return score_name | 33,250 |
def create_mapping(dico):
"""
Create a mapping (item to ID / ID to item) from a dictionary.
Items are ordered by decreasing frequency.
"""
sorted_items = sorted(list(dico.items()), key=lambda x: (-x[1], x[0]))
id_to_item = {i: v[0] for i, v in enumerate(sorted_items)}
item_to_id = {v: k for k, v in list(id_to_item.items())}
return item_to_id, id_to_item | 33,251 |
def _splitall(path):
"""
This function splits a path /a/b/c into a list [/,a,b,c]
"""
allparts = []
while True:
parts = os.path.split(path)
if parts[0] == path:
allparts.insert(0, parts[0])
break
if parts[1] == path:
allparts.insert(0, parts[1])
break
path = parts[0]
allparts.insert(0, parts[1])
return allparts | 33,252 |
def read_library(args):
"""Read in a haplotype library. Returns a HaplotypeLibrary() and allele coding array"""
assert args.library or args.libphase
filename = args.library if args.library else args.libphase
print(f'Reading haplotype library from: {filename}')
library = Pedigree.Pedigree()
if args.library:
library.readInPed(args.library, args.startsnp, args.stopsnp, haps=True, update_coding=True)
elif args.libphase:
library.readInPhase(args.libphase, args.startsnp, args.stopsnp)
else:
# This shouldn't happen
raise ValueError('No library specified')
print(f'Haplotype library contains {len(library)} individuals with {library.nLoci} markers')
haplotype_library = HaplotypeLibrary.HaplotypeLibrary(library.nLoci)
for individual in library:
for haplotype in individual.haplotypes:
haplotype_library.append(haplotype, individual.idx)
haplotype_library.freeze()
return haplotype_library, library.allele_coding | 33,253 |
def to_async(func: Callable, scheduler=None) -> Callable:
"""Converts the function into an asynchronous function. Each
invocation of the resulting asynchronous function causes an
invocation of the original synchronous function on the specified
scheduler.
Example:
res = Observable.to_async(lambda x, y: x + y)(4, 3)
res = Observable.to_async(lambda x, y: x + y, Scheduler.timeout)(4, 3)
res = Observable.to_async(lambda x: log.debug(x),
Scheduler.timeout)('hello')
Keyword arguments:
func -- Function to convert to an asynchronous function.
scheduler -- [Optional] Scheduler to run the function on. If not
specified, defaults to Scheduler.timeout.
Returns asynchronous function.
"""
scheduler = scheduler or timeout_scheduler
def wrapper(*args) -> ObservableBase:
subject = AsyncSubject()
def action(scheduler, state):
try:
result = func(*args)
except Exception as ex:
subject.on_error(ex)
return
subject.on_next(result)
subject.on_completed()
scheduler.schedule(action)
return subject.as_observable()
return wrapper | 33,254 |
def error(message): #pragma: no cover
""" Utility error function to ease logging. """
_leverage_logger.error(message) | 33,255 |
def get_symbolic_quaternion_from_axis_angle(axis, angle, convention='xyzw'):
"""Get the symbolic quaternion associated from the axis/angle representation.
Args:
axis (np.array[float[3]], np.array[sympy.Symbol[3]]): 3d axis vector.
angle (float, sympy.Symbol): angle.
convention (str): convention to be adopted when representing the quaternion. You can choose between 'xyzw' or
'wxyz'.
Returns:
np.array[float[4]]: symbolic quaternion.
"""
w = sympy.cos(angle / 2.)
x, y, z = sympy.sin(angle / 2.) * axis
if convention == 'xyzw':
return np.array([x, y, z, w])
elif convention == 'wxyz':
return np.array([w, x, y, z])
else:
raise NotImplementedError("Asking for a convention that has not been implemented") | 33,256 |
def get_session(role_arn, session_name, duration_seconds=900):
"""
Returns a boto3 session for the specified role.
"""
response = sts_client.assume_role(
RoleArn=role_arn,
RoleSessionName=session_name,
DurationSeconds=duration_seconds,
)
creds = response["Credentials"]
return boto3.Session(
aws_access_key_id=creds["AccessKeyId"],
aws_secret_access_key=creds["SecretAccessKey"],
aws_session_token=creds["SessionToken"],
) | 33,257 |
def _format_status(filename, current, total):
"""Wrapper to print progress while uploading.
"""
progress = 0
if total > 0:
progress = float(current)/float(total)*100
sys.stdout.write("\r%(filename)s %(percent)d %% "
"(%(current)s B of %(total)s B)" % {
'filename': filename,
'percent': int(progress),
'current': str(current),
'total': str(total)})
sys.stdout.flush() | 33,258 |
def base_kinesis_role(construct, resource_name: str, principal_resource: str, **kwargs):
"""
Function that generates an IAM Role with a Policy for SQS Send Message.
:param construct: Custom construct that will use this function. From the external construct is usually 'self'.
:param resource_name: Name of the resource. Used for naming purposes.
:param principal_resource: Resource used to define a Service Principal. Has to match an AWS Resource. For example, 'iot' -> 'iot.amazonaws.com'.
:param kwargs: Other parameters that could be used by the construct.
:return: IAM Role with an IAM Policy attached.
"""
try:
actions = ["kinesis:PutRecord"]
resources = [construct._kinesis_stream.stream_arn]
role = base_service_role(construct, resource_name, principal_resource, actions=actions, resources=resources)
except Exception:
print(traceback.format_exc())
else:
return role | 33,259 |
def set_age_distribution_default(dic, value=None, drop=False):
"""
Set the ages_distribution key of dictionary to the given value or to the
World's age distribution.
"""
ages = dic.pop("age_distribution", None)
if ages is None:
ages = world_age_distribution() if value is None else value
if isinstance(ages, str):
ages = mdm.age_distribution(value)
elif not isinstance(ages, (pd.Series, pd.DataFrame)):
ages = get_param("age_distribution", value)
if not drop:
dic["age_distribution"] = ages
return ages | 33,260 |
async def on_message(message : discord.Message):
"""
All messages are directly handled by cmdHandler
:param message:
:return:
"""
try:
await cmdHandler(message)
except discord.errors.HTTPException:
pass | 33,261 |
def home():
"""Home page"""
return render_template('home.html') | 33,262 |
def csv2dict(file_csv, delimiter=','):
"""
This function is used to load the csv file and return a dict which contains
the information of the csv file. The first row of the csv file contains the
column names.
Parameters
----------
file_csv : str
The input filename including path of the csv file.
Returns
-------
outdic : dict
The return dict which contains all information in the csv file.
"""
# load station infomation: SED COSEISMIQ CSV format, temporary format
df = pd.read_csv(file_csv, delimiter=delimiter, header="infer", skipinitialspace=True, encoding='utf-8')
outdic = {}
for column in df:
outdic[column] = copy.deepcopy(df[column].values)
return outdic | 33,263 |
def make_bb_coord_l(contour_l, img, IMG_HEIGHT):
"""
Take in a list of contour arrays and return a list of four coordinates
of a bounding box for each contour array.
"""
assert isinstance(contour_l, list)
coord_l = []
for i in range(len(contour_l)):
c = contour_l[i]
bb = get_bb_coord(contour=c, img=img, IMG_HEIGHT=IMG_HEIGHT)
# extend if bb is a list (i.e. a split bounding box)
if isinstance(bb, list):
coord_l.extend(bb)
else:
coord_l.append(bb)
return coord_l | 33,264 |
def calc_pair_scale(seqs, obs1, obs2, weights1, weights2):
"""Return entropies and weights for comparable alignment.
A comparable alignment is one in which, for each paired state ij, all
alternate observable paired symbols are created. For instance, let the
symbols {A,C} be observed at position i and {A,C} at position j. If we
observe the paired types {AC, AA}. A comparable alignment would involve
replacing an AC pair with a CC pair."""
# scale is calculated as the product of mi from col1 with alternate
# characters. This means the number of states is changed by swapping
# between the original and selected alternate, calculating the new mi
pair_freqs = CategoryCounter(seqs)
weights1 = dict(weights1)
weights2 = dict(weights2)
scales = []
for a, b in list(pair_freqs.keys()):
weights = weights1[a]
pr = a + b
pair_freqs -= pr
obs1 -= a
# make comparable alignments by mods to col 1
for c, w in list(weights.items()):
new_pr = c + b
pair_freqs += new_pr
obs1 += c
entropy = mi(obs1.entropy, obs2.entropy, pair_freqs.entropy)
scales += [(pr, entropy, w)]
pair_freqs -= new_pr
obs1 -= c
obs1 += a
# make comparable alignments by mods to col 2
weights = weights2[b]
obs2 -= b
for c, w in list(weights.items()):
new_pr = a + c
pair_freqs += new_pr
obs2 += c
entropy = mi(obs1.entropy, obs2.entropy, pair_freqs.entropy)
scales += [(pr, entropy, w)]
obs2 -= c
pair_freqs -= new_pr
obs2 += b
pair_freqs += pr
return scales | 33,265 |
def distance_to_line(pt, line_pt_pair):
"""
Returns perpendicular distance of point 'pt' to a line given by
the pair of points in second argument
"""
x = pt[0]
y = pt[1]
p, q = line_pt_pair
q0_m_p0 = q[0]-p[0]
q1_m_p1 = q[1]-p[1]
denom = sqrt(q0_m_p0*q0_m_p0 + q1_m_p1*q1_m_p1)
return (q0_m_p0*p[1]-q1_m_p1*p[0] - q0_m_p0*y + q1_m_p1*x)/denom | 33,266 |
def extension_suffixes(*args, **kwargs): # real signature unknown
""" Returns the list of file suffixes used to identify extension modules. """
pass | 33,267 |
def test_noderoledimension_construction_item():
"""Check that we construct node role dimension when sub-props are not a list."""
role = NodeRole("a", "b")
dimension1 = NodeRoleDimension("g1", roles=[role])
assert NodeRoleDimension("g1", roles=role) == dimension1
rdMap = RoleDimensionMapping("a", [], {})
dimension2 = NodeRoleDimension("g1", roleDimensionMappings=[rdMap])
assert NodeRoleDimension("g1", roleDimensionMappings=rdMap) == dimension2 | 33,268 |
def _paste(bg: Image, p_conf: PasteConf) -> None:
"""
according to paste configuration paste image over another
:param bg: background image
:param p_conf: paste configuration
:return: None
"""
with Image.open(p_conf.im_path) as im:
im = im.convert('RGBA')
im_w, im_h = im.size
if p_conf.target_w <= 0 or p_conf.target_h <= 0:
print('Target size(w, h) must > 0!')
exit()
if p_conf.resize_mode == 'scale':
# 缩放贴图,以宽 target_w 为准,等比计算高度,可放大缩小
p_conf.target_h = int(p_conf.target_w / im_w * im_h)
im = im.resize((p_conf.target_w, p_conf.target_h), Image.ANTIALIAS)
elif p_conf.resize_mode == 'trim':
# 裁剪贴图,默认中心裁剪,只能缩小
if p_conf.target_w > im_w:
p_conf.target_w = im_w
if p_conf.target_h > im_h:
p_conf.target_h = im_h
crop1_x = int((im_w - p_conf.target_w) // 2)
crop1_y = int((im_h - p_conf.target_h) // 2)
crop2_x = int((im_w + p_conf.target_w) // 2)
crop2_y = int((im_h + p_conf.target_h) // 2)
crop_box = (crop1_x, crop1_y, crop2_x, crop2_y)
im = im.crop(crop_box)
bg.paste(im, (p_conf.c_pos_x - p_conf.target_w // 2, p_conf.c_pos_y - p_conf.target_h // 2), im) | 33,269 |
def define_wfr(ekev):
"""
defines the wavefront in the plane prior to the mirror ie., after d1
:param ekev: energy of the source
"""
spb = Instrument()
spb.build_elements(focus = 'nano')
spb.build_beamline(focus = 'nano')
spb.crop_beamline(element1 = "d1")
bl = spb.get_beamline()
wfr = construct_SA1_wavefront(512, 512, ekev, 0.25)
bl.propagate(wfr)
return wfr | 33,270 |
def ratlab(top="K+", bottom="H+", molality=False):
"""
Python wrapper for the ratlab() function in CHNOSZ.
Produces a expression for the activity ratio between the ions in the top and
bottom arguments. The default is a ratio with H+, i.e.
(activity of the ion) / [(activity of H+) ^ (charge of the ion)]
Parameters
----------
top : str, default "K+"
The ion in the numerator of the ratio.
bottom : str, default "H+"
The ion in the denominator of the ratio.
molality : bool, default False
Use molality (m) instead of activity (a) for aqueous species?
Returns
-------
A formatted string representing the activity ratio.
"""
top_formula = chemparse.parse_formula(top)
if "+" in top_formula.keys():
top_charge = top_formula["+"]
elif "-" in top_formula.keys():
top_charge = top_formula["-"]
else:
raise Exception("Cannot create an ion ratio involving one or more neutral species.")
bottom_formula = chemparse.parse_formula(bottom)
if "+" in bottom_formula.keys():
bottom_charge = bottom_formula["+"]
elif "-" in bottom_formula.keys():
top_charge = bottom_formula["-"]
else:
raise Exception("Cannot create an ion ratio involving one or more neutral species.")
if top_charge.is_integer():
top_charge = int(top_charge)
if bottom_charge.is_integer():
bottom_charge = int(bottom_charge)
if top_charge != 1:
top_charge = "<sup>"+str(top_charge)+"</sup>"
else:
top_charge = ""
if bottom_charge != 1:
bottom_charge = "<sup>"+str(bottom_charge)+"</sup>"
else:
bottom_charge = ""
if molality:
sym = "m"
else:
sym = "a"
return "log("+sym+bottom_charge+"<sub>"+html_chemname_format(top)+"</sub>/"+sym+top_charge+"<sub>"+html_chemname_format(bottom)+"</sub>)" | 33,271 |
def demean_dataframe_two_cat(df_copy, consist_var, category_col, is_unbalance):
"""
reference: Baltagi http://library.wbi.ac.id/repository/27.pdf page 176, equation (9.30)
:param df_copy: Dataframe
:param consist_var: List of columns need centering on fixed effects
:param category_col: List of fixed effects
:return: Demeaned dataframe
"""
if is_unbalance:
# first determine which is uid or the category that has the most items
max_ncat = df_copy[category_col[0]].nunique()
max_cat = category_col[0]
for cat in category_col:
if df_copy[cat].nunique() >= max_ncat:
max_ncat = df_copy[cat].nunique()
max_cat = cat
min_cat = category_col.copy()
min_cat.remove(max_cat)
min_cat = min_cat[0]
df_copy.sort_values(by=[max_cat, min_cat], inplace=True)
# demean on the first category variable, max_cat
for consist in consist_var:
df_copy[consist] = df_copy[consist] - df_copy.groupby(max_cat)[consist].transform('mean')
dummies = get_dummies(df_copy[min_cat]) # time dummies
dummies[max_cat] = df_copy[max_cat]
dummies[min_cat] = df_copy[min_cat]
dummies[max_cat] = dummies[max_cat].apply(str)
dummies[min_cat] = dummies[min_cat].apply(str)
dummies.set_index([max_cat, min_cat], inplace = True)
group_mu = dummies.groupby(level=max_cat).transform("mean")
out = dummies - group_mu # q_delta_1 @ delta_2
e = df_copy[consist_var].values
d = out.values
resid = e - d @ lstsq(d, e, rcond=None)[0]
df_out = pd.DataFrame(data=resid, columns=consist_var)
df_out[max_cat] = df_copy[max_cat]
df_out[min_cat] = df_copy[min_cat]
else: # balance
for consist in consist_var:
for cat in category_col:
df_copy[consist] = df_copy[consist] - df_copy.groupby(cat)[consist].transform('mean')
df_out = df_copy
return df_out | 33,272 |
def get_time_string(time_obj=None):
"""The canonical time string format (in UTC).
:param time_obj: an optional datetime.datetime or timestruct (defaults to
gm_time)
Note: Changing this function will change all times that this project uses
in the returned data.
"""
if isinstance(time_obj, datetime.datetime):
if time_obj.tzinfo:
offset = time_obj.tzinfo.utcoffset(time_obj)
utc_dt = time_obj + offset
return datetime.datetime.strftime(utc_dt, STRING_FORMAT)
return datetime.datetime.strftime(time_obj, STRING_FORMAT)
elif isinstance(time_obj, time.struct_time):
return time.strftime(STRING_FORMAT, time_obj)
elif time_obj is not None:
raise TypeError("get_time_string takes only a time_struct, none, or a "
"datetime. It was given a %s" % type(time_obj))
return time.strftime(STRING_FORMAT, time.gmtime()) | 33,273 |
def apply_activation_checkpointing_wrapper(
model, checkpoint_wrapper_fn=checkpoint_wrapper, check_fn=lambda _: True
):
"""
Applies :func:`checkpoint_wrapper` to modules within `model` based on a user-defined
configuration. For each module within `model`, the `check_fn` is used to decide
whether `module` should be wrapped with :func:`checkpoint_wrapper` or not.
Note::
This function modifies `model` in place and replaces appropriate layers with
their checkpoint-wrapped modules.
Note::
This function will not wrap the overall root module. If this is needed, please directly use
:class:`CheckpointWrapper`.
Usage::
model = nn.Sequential(
nn.Linear(10, 10), nn.Linear(10, 10), nn.Linear(10, 10)
)
check_fn = lambda l: isinstance(l, nn.Linear)
apply_activation_checkpointing(model, checkpoint_wrapper_fn=checkpoint_wrapper, check_fn=check_fn)
Args:
module (nn.Module):
The model who's submodules (or self) should be wrapped with activation checkpointing.
checkpoint_wrapper_fn (Optional[Callable[nn.Module]])
A `Callable` which will wrap modules
check_fn (Optional[Callable[nn.Module, nn.Module]])
A lambda function which will be passed current layer and returns
``True`` or ``False`` depending on whether input layer should be wrapped.
Returns: None (`model` is modified inplace)
"""
return _recursive_wrap(
module=model,
auto_wrap_policy=partial(lambda_auto_wrap_policy, lambda_fn=check_fn),
wrapper_cls=checkpoint_wrapper_fn,
ignored_modules=set(),
ignored_params=set(),
only_wrap_children=True
) | 33,274 |
def train_validation_split(x, y):
"""
Prepare validation data with proper size
Args:
x: (pandas.DataFrame) Feature set / Affecting features
y: (pandas.Dataframe) Target set / dependent feature
Returns:
x_train: (pandas.DataFrame) Feature set / Affecting features for training
y_train: (pandas.Dataframe) Target set / dependent feature for training
x_val: (pandas.DataFrame) Feature set / Affecting features for validation
y_val: (pandas.Dataframe) Target set / dependent feature for validation
"""
# For large datasets
if x.shape[0] > 100000:
val_ratio = 0.2
# For medium size datasets
elif x.shape[0] > 1000:
val_ratio = 0.15
# For small datasets
else:
val_ratio = 0.1
# Splitting dataset into train and validation
x_train, x_val, y_train, y_val = train_test_split(x, y, test_size=val_ratio, random_state=42)
print(f"Validation data prepared."
f" Train - Validation ratio taken {int(100 - val_ratio * 100)} % - {int(val_ratio * 100)} % .")
return x_train, y_train, x_val, y_val | 33,275 |
def gen_data_set(file_name, num_of_var, num_of_clause, atom_count_set):
""" Generate a data set with clauses size drawn uniformly from atom_count_set """
out_file = open(file_name, "w")
out_file.write("p cnf " + str(num_of_var) + " " + str(num_of_clause) + "\n")
for i in range(0, num_of_clause):
atom_count = random.choice(atom_count_set)
clause = []
for j in range(0, atom_count):
atom = random.randint(1, num_of_var)
while atom in clause:
atom = random.randint(1, num_of_var)
if random.randint(0, 1) == 0:
atom = -atom
clause.append(atom)
out_file.write(str(atom) + " ")
out_file.write("0\n")
out_file.close() | 33,276 |
def test_record_default_with_long() -> None:
"""Confirm that record defaults are respected."""
tool_path = get_data("tests/wf/paramref_arguments_roundtrip.cwl")
err_code, stdout, stderr = get_main_output([tool_path])
assert err_code == 0
result = json.loads(stdout)["same_record"]
assert result["first"] == "y"
assert result["second"] == 23
assert result["third"] == 2.3
assert result["fourth"] == 4242424242
assert result["fifth"] == 4200000000000000000000000000000000000000000
assert result["sixth"]["class"] == "File"
assert result["sixth"]["basename"] == "whale.txt"
assert result["sixth"]["size"] == 1111
assert (
result["sixth"]["checksum"] == "sha1$327fc7aedf4f6b69a42a7c8b808dc5a7aff61376"
) | 33,277 |
def topk_accuracy(
rankings: np.ndarray, labels: np.ndarray, ks: Union[Tuple[int, ...], int] = (1, 5)
) -> List[float]:
"""Computes Top-K accuracies for different values of k
Args:
rankings: 2D rankings array: shape = (instance_count, label_count)
labels: 1D correct labels array: shape = (instance_count,)
ks: The k values in top-k, either an int or a list of ints.
Returns:
list of float: TOP-K accuracy for each k in ks
Raises:
ValueError
If the dimensionality of the rankings or labels is incorrect, or
if the length of rankings and labels aren't equal
"""
if isinstance(ks, int):
ks = (ks,)
_check_label_predictions_preconditions(rankings, labels)
# trim to max k to avoid extra computation
maxk = np.max(ks)
# compute true positives in the top-maxk predictions
tp = rankings[:, :maxk] == labels.reshape(-1, 1)
# trim to selected ks and compute accuracies
accuracies = [tp[:, :k].max(1).mean() for k in ks]
if any(np.isnan(accuracies)):
raise ValueError(f"NaN present in accuracies {accuracies}")
return accuracies | 33,278 |
def has_substr(line, chars):
""" checks to see if the line has one of the substrings given """
for char in chars:
if char in line:
return True
return False | 33,279 |
def multifiltertestmethod(testmethod, strfilters):
"""returns a version of the testmethod that operates on filtered strings using strfilter"""
def filteredmethod(str1, str2):
return testmethod(multifilter(str1, strfilters), multifilter(str2, strfilters))
filteredmethod.__doc__ = testmethod.__doc__
filteredmethod.name = getattr(testmethod, 'name', testmethod.__name__)
return filteredmethod | 33,280 |
def add_node_set(structure, guids, name):
"""
Adds node set information from Rhino point guids.
Parameters
----------
structure : obj
Structure object to update.
guids : list
Rhino point guids.
name : str
Name of the new node set.
Returns
-------
None
"""
nodes = []
for guid in guids:
if rs.IsPoint(guid):
node = structure.check_node_exists(rs.PointCoordinates(guid))
if node is not None:
nodes.append(node)
structure.add_set(name=name, type='node', selection=nodes) | 33,281 |
def getbasins(basin,Nx,Ny,Nz,S1,S2,S3):
"""
Args:
basin (numpy array): including the
Returns:
N/A
Only Extend CHGCAR while mode is 'all'
"""
temp = np.zeros(Nx*Ny*Nz*S1*S2*S3)
basins = np.resize(temp,(Nz*S3,Ny*S2,Nx*S1))
block = np.resize(temp,(Nz*S3,Ny*S2,Nx*S1))
flag = 0
b = 1
teemp = []
for kss in range(Nz*S3):
for jss in range(Ny*S2):
for iss in range(Nx*S1):
flag += 1
if (flag == Nx*Ny*Nz+1):
b += 1
flag = 1
# print ('Nx:{:0} Ny:{:1} Nz:{:2} flagx:{:3} flagy:{:4} flagz:{:5}'.format(Nx,Ny,Nz,flagx,flagy,flagz))
block[kss,jss,iss] = b
basins[kss,jss,iss] = int(S1*S2*S3*(basin[kss%Nz,jss%Ny,iss%Nx]-1)) + b
basins_1D = np.resize(basins,Nx*Ny*Nz*S1*S2*S3)
# numindex = []
# numcount = [0,0,0,0,0,0,0,0]
# for i in basins_1D:
# numcount[int(i)-1] += 1
# print (numcount)
return basins_1D | 33,282 |
def test_ExogenousParameters_init():
"""Test initialization of ExogenousParameters object"""
# Set a PRNG key to use
key = jax.random.PRNGKey(0)
# Test with a range of sizes
sizes = range(1, 10)
for size in sizes:
ep = ExogenousParameters(size)
# Initialization should be successfull
assert ep is not None
# If we sample, we should get the right size
sample = ep.sample(key)
assert sample.shape == (size,) | 33,283 |
def makeframefromhumanstring(s):
"""Create a frame from a human readable string
Strings have the form:
<request-id> <stream-id> <stream-flags> <type> <flags> <payload>
This can be used by user-facing applications and tests for creating
frames easily without having to type out a bunch of constants.
Request ID and stream IDs are integers.
Stream flags, frame type, and flags can be specified by integer or
named constant.
Flags can be delimited by `|` to bitwise OR them together.
If the payload begins with ``cbor:``, the following string will be
evaluated as Python literal and the resulting object will be fed into
a CBOR encoder. Otherwise, the payload is interpreted as a Python
byte string literal.
"""
fields = s.split(b' ', 5)
requestid, streamid, streamflags, frametype, frameflags, payload = fields
requestid = int(requestid)
streamid = int(streamid)
finalstreamflags = 0
for flag in streamflags.split(b'|'):
if flag in STREAM_FLAGS:
finalstreamflags |= STREAM_FLAGS[flag]
else:
finalstreamflags |= int(flag)
if frametype in FRAME_TYPES:
frametype = FRAME_TYPES[frametype]
else:
frametype = int(frametype)
finalflags = 0
validflags = FRAME_TYPE_FLAGS[frametype]
for flag in frameflags.split(b'|'):
if flag in validflags:
finalflags |= validflags[flag]
else:
finalflags |= int(flag)
if payload.startswith(b'cbor:'):
payload = b''.join(
cborutil.streamencode(stringutil.evalpythonliteral(payload[5:]))
)
else:
payload = stringutil.unescapestr(payload)
return makeframe(
requestid=requestid,
streamid=streamid,
streamflags=finalstreamflags,
typeid=frametype,
flags=finalflags,
payload=payload,
) | 33,284 |
def display_full_name_with_correct_capitalization(full_name):
"""
See documentation here: https://github.com/derek73/python-nameparser
:param full_name:
:return:
"""
full_name.strip()
full_name_parsed = HumanName(full_name)
full_name_parsed.capitalize()
full_name_capitalized = str(full_name_parsed)
return full_name_capitalized | 33,285 |
def staging():
"""
Use the staging server
"""
# the flavor of the django environment
env.flavor = 'staging'
# the process name, also the base name
env.procname = GLUE_SETTINGS['staging']['process_name']
# mix_env
env.mix_env = 'prod'
# the dockerfile
env.dockerfile = 'Dockerfile'
# username for the ssh connection
env.user = GLUE_SETTINGS['ssh_user']
# hostname for the ssh connection
env.host = GLUE_SETTINGS['ssh_host']
# port for the ssh connection
env.port = GLUE_SETTINGS['ssh_port']
# here we build the hosts string
env.hosts = ['%s@%s:%s' % (env.user, env.host, env.port)]
# password to use
if SSH_PASS != '':
env.passwords = {'%s@%s:%s' % (env.user, env.host, env.port): SSH_PASS}
# project base
env.project_base = GLUE_SETTINGS['staging']['project_base']
# the path to work with
env.path = os.path.join(GLUE_SETTINGS['staging']['project_base'],
GLUE_SETTINGS['project_name'])
# the user we will create on host, also runs manage.py tasks etc.
env.project_user = GLUE_SETTINGS['project_name']
# the group we add the user to. this is what the project path
# gets chowned to
env.project_group = GLUE_SETTINGS['project_group']
# the postgres database username we create
env.db_user = GLUE_SETTINGS['staging']['db_user']
# name of the postgres database we create
env.db_name = GLUE_SETTINGS['staging']['db_name']
# password to the postgres database user
env.db_pass = GLUE_SETTINGS['staging']['db_pass']
# full path to our project's public/ directory
env.public_path = os.path.join(env.path,
GLUE_SETTINGS['staging']['public_path'])
# full path to our project's media directory
env.media_path = os.path.join(env.public_path,
GLUE_SETTINGS['staging']['media_path'])
# application name
env.project_name = GLUE_SETTINGS['project_name'] | 33,286 |
def load_experiment_artifacts(
src_dir: str, file_name: str, selected_idxs: Optional[Iterable[int]] = None
) -> Dict[int, Any]:
"""
Load all the files in dirs under `src_dir` that match `file_name`.
This function assumes subdirectories withing `dst_dir` have the following
structure:
```
{dst_dir}/result_{idx}/{file_name}
```
where `idx` denotes an integer encoded in the subdirectory name.
The function returns the contents of the files, indexed by the integer extracted
from the subdirectory index name.
:param src_dir: directory containing subdirectories of experiment results
It is the directory that was specified as `--dst_dir` in `run_experiment.py`
and `run_notebook.py`
:param file_name: the file name within each run results subdirectory to load
E.g., `result_bundle.pkl`
:param selected_idxs: specific experiment indices to load
- `None` (default) loads all available indices
"""
artifact_tuples = yield_experiment_artifacts(
src_dir, file_name, selected_idxs
)
artifacts = collections.OrderedDict()
for key, artifact in artifact_tuples:
artifacts[key] = artifact
return artifacts | 33,287 |
def upgrade_oozie_database_and_sharelib():
"""
Performs the creation and upload of the sharelib and the upgrade of the
database. This method will also perform a kinit if necessary.
It is run before the upgrade of oozie begins exactly once as part of the
upgrade orchestration.
Since this runs before the upgrade has occurred, it should not use any
"current" directories since they will still be pointing to the older
version of Oozie. Instead, it should use versioned directories to ensure
that the commands running are from the oozie version about to be upgraded to.
:return:
"""
import params
# get the kerberos token if necessary to execute commands as oozie
if params.security_enabled:
oozie_principal_with_host = params.oozie_principal.replace("_HOST", params.hostname)
command = format("{kinit_path_local} -kt {oozie_keytab} {oozie_principal_with_host}")
Execute(command, user=params.oozie_user)
upgrade_stack = stack_select._get_upgrade_stack()
if upgrade_stack is None or len(upgrade_stack) < 2 or upgrade_stack[1] is None:
raise Fail("Unable to determine the stack that is being upgraded to or downgraded to.")
stack_version = upgrade_stack[1]
# upgrade oozie DB
Logger.info('Upgrading the Oozie database...')
oozie.download_database_library_if_needed()
database_upgrade_command = "/usr/iop/{0}/oozie/bin/ooziedb.sh upgrade -run".format(stack_version)
Execute(database_upgrade_command, user=params.oozie_user, logoutput=True)
create_sharelib() | 33,288 |
def get_market_metrics(market_portfolio: pd.DataFrame, t_costs: float, index_id: str, index_name: str,
test_data_start_date: datetime.date, test_data_end_date: datetime.date, market_logs=False) -> \
Tuple[pd.Series, pd.Series, pd.Series]:
"""
Get performance metrics for full equal-weighted market portfolio
:param market_logs: Write log data for market portfolio
:param test_data_start_date: Start date (with regard to test set)
:param test_data_end_date: End date (with regard to test set)
:param index_name: Index name
:param index_id: Index ID
:param t_costs: Transaction costs per half-turn
:param market_portfolio: DataFrame including full test set (market portfolio)
:return: Tuple of market portfolio metrics (Series) and cumulative returns series (Series)
"""
market_portfolio_metrics = pd.Series([]).rename('Market')
market_portfolio_metrics.index.name = 'Metrics'
excess_return_series = calc_excess_returns(
market_portfolio.loc[:, 'daily_return'].groupby(level=['datadate']).mean()).rename('daily_excess_return')
excess_return_series = excess_return_series.reset_index()
excess_return_series.loc[:, 'datadate'] = excess_return_series['datadate'].dt.strftime(
'%Y-%m-%d')
excess_return_series.set_index('datadate', inplace=True)
cumulative_excess_return = (excess_return_series.get('daily_excess_return') + 1).cumprod().rename(
'Cumulative Market Return')
cumulative_excess_return.index.name = 'Time'
# cumulative_return.plot(title='Cumulative Market Performance')
# plt.legend(loc='best')
# plt.show()
# JOB: Calculate metrics
# noinspection DuplicatedCode
annualized_sharpe = calc_sharpe(market_portfolio.loc[:, ['daily_return']].groupby(level=['datadate']).mean(),
annualize=True)
annualized_sharpe_atc = calc_sharpe(
market_portfolio.loc[:, ['daily_return']].groupby(level=['datadate']).mean() - 4 * t_costs,
annualize=True)
annualized_sortino = calc_sortino(market_portfolio.loc[:, ['daily_return']].groupby(level=['datadate']).mean(),
annualize=True)
annualized_sortino_atc = calc_sortino(
market_portfolio.loc[:, ['daily_return']].groupby(level=['datadate']).mean() - 4 * t_costs,
annualize=True)
mean_daily_return = market_portfolio.groupby(level=['datadate'])['daily_return'].mean().mean()
mean_daily_excess_return = calc_excess_returns(
market_portfolio.groupby(level=['datadate'])['daily_return'].mean().rename('daily_return')).mean()
market_portfolio_metrics.loc['Mean Daily Return'] = mean_daily_return
market_portfolio_metrics.loc['Annualized Return'] = annualize_metric(mean_daily_return)
market_portfolio_metrics.loc['Mean Daily Excess Return'] = mean_daily_excess_return
market_portfolio_metrics.loc['Annualized Excess Return'] = annualize_metric(mean_daily_excess_return)
market_portfolio_metrics.loc['Annualized Sharpe'] = annualized_sharpe
market_portfolio_metrics.loc['Annualized Sortino'] = annualized_sortino
# JOB: Add metrics incl. transaction costs of 5 bps per half-turn
market_portfolio_metrics.loc['Mean Daily Return_atc'] = mean_daily_return - 4 * t_costs
market_portfolio_metrics.loc['Annualized Return_atc'] = annualize_metric(mean_daily_return - 4 * t_costs)
market_portfolio_metrics.loc['Mean Daily Excess Return_atc'] = mean_daily_excess_return - 4 * t_costs
market_portfolio_metrics.loc['Annualized Excess Return_atc'] = annualize_metric(
mean_daily_excess_return - 4 * t_costs)
market_portfolio_metrics.loc['Annualized Sharpe_atc'] = annualized_sharpe_atc
market_portfolio_metrics.loc['Annualized Sortino_atc'] = annualized_sortino_atc
data_record = {
'ID': config.run_id,
'Experiment Run End': datetime.datetime.now().isoformat(),
'Parent Model Type': 'Market',
'Model Type': 'Market',
'Index ID': index_id,
'Index Name': index_name,
'Study Period ID': config.study_period_id,
'Study Period Length': None,
'Period Range': None,
'Study Period Start Date': None,
'Study Period End Date': None,
'Test Set Size': None,
'Days Test Set': None,
'Constituent Number': None,
'Average Cross Section Size': None,
'Test Set Start Date': test_data_start_date.isoformat(),
'Test Set End Date': test_data_end_date.isoformat(),
'Total Accuracy': None,
'Top-k Accuracy Scores': None,
'Top-k Mean Daily Return': market_portfolio_metrics['Mean Daily Return'],
'Top-k Mean Daily Excess Return': market_portfolio_metrics['Mean Daily Excess Return'],
'Top-k Annualized Excess Return': market_portfolio_metrics['Annualized Excess Return'],
'Top-k Annualized Return': market_portfolio_metrics['Annualized Return'],
'Top-k Annualized Sharpe': market_portfolio_metrics['Annualized Sharpe'],
'Top-k Annualized Sortino': market_portfolio_metrics['Annualized Sortino'],
'Mean Daily Return (Short)': None,
'Mean Daily Return (Long)': None,
'Top-k Mean Daily Return_atc': market_portfolio_metrics['Mean Daily Return_atc'],
'Top-k Annualized Return_atc': market_portfolio_metrics['Annualized Return_atc'],
'Top-k Mean Daily Excess Return_atc': market_portfolio_metrics['Mean Daily Excess Return_atc'],
'Top-k Annualized Excess Return_atc': market_portfolio_metrics['Annualized Excess Return_atc'],
'Top-k Annualized Sharpe_atc': market_portfolio_metrics['Annualized Sharpe_atc'],
'Top-k Annualized Sortino_atc': market_portfolio_metrics['Annualized Sortino_atc'],
'Top-k Mean Daily Return (Short)_atc': None,
'Top-k Mean Daily Return (Long)_atc': None,
'Model Configs': None,
'Total Epochs': None,
'Return Series': excess_return_series['daily_excess_return'].to_dict(),
'Prediction Error': None
}
if market_logs:
write_to_logs(data_record)
return market_portfolio_metrics, excess_return_series, cumulative_excess_return | 33,289 |
def get_puf_columns(seed=True, categorical=True, calculated=True):
"""Get a list of columns.
Args:
seed: Whether to include standard seed columns: ['MARS', 'XTOT', 'S006']
categorical: Whether to include categorical columns: ['F6251', 'MIDR', 'FDED', 'DSI']
calculated: Whether to include calculated columns: ['E00100', 'E09600']
Returns: List of columns.
"""
res = []
if seed:
res += SEED_COLS
if categorical:
res += CATEGORICAL_COLS
if calculated:
res += CALCULATED_COLS
return res | 33,290 |
def list_all_vms(osvars):
"""Returns a listing of all VM objects as reported by Nova"""
novac = novaclient.Client('2',
osvars['OS_USERNAME'],
osvars['OS_PASSWORD'],
osvars['OS_TENANT_NAME'],
osvars['OS_AUTH_URL'],
service_type="compute")
return novac.servers.list(True, {'all_tenants': '1'}) | 33,291 |
def test_highest4():
"""First new test."""
x = 'making up sentences is very hard'
assert high(x) == 'sentences' | 33,292 |
def exec_cmd(cmd, path):
""" Execute the specified command and return the result. """
out = ''
err = ''
sys.stdout.write("-------- Running \"%s\" in \"%s\"...\n" % (cmd, path))
parts = cmd.split()
try:
process = subprocess.Popen(parts, cwd=path,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=(sys.platform == 'win32'))
out, err = process.communicate()
except IOError, (errno, strerror):
raise
except:
raise
return {'out': out, 'err': err} | 33,293 |
def test_upload_retry(tmpdir, default_repo, capsys):
"""Print retry messages when the upload response indicates a server error."""
default_repo.disable_progress_bar = True
default_repo.session = pretend.stub(
post=lambda url, data, allow_redirects, headers: response_with(
status_code=500, reason="Internal server error"
)
)
fakefile = tmpdir.join("fake.whl")
fakefile.write(".")
package = pretend.stub(
safe_name="fake",
metadata=pretend.stub(version="2.12.0"),
basefilename="fake.whl",
filename=str(fakefile),
metadata_dictionary=lambda: {"name": "fake"},
)
# Upload with default max_redirects of 5
default_repo.upload(package)
msg = [
(
"Uploading fake.whl\n"
'Received "500: Internal server error" '
f"Package upload appears to have failed. Retry {i} of 5"
)
for i in range(1, 6)
]
captured = capsys.readouterr()
assert captured.out == "\n".join(msg) + "\n"
# Upload with custom max_redirects of 3
default_repo.upload(package, 3)
msg = [
(
"Uploading fake.whl\n"
'Received "500: Internal server error" '
f"Package upload appears to have failed. Retry {i} of 3"
)
for i in range(1, 4)
]
captured = capsys.readouterr()
assert captured.out == "\n".join(msg) + "\n" | 33,294 |
def test_orientation_error5():
""" """
yaw1 = np.deg2rad(3)
yaw2 = np.deg2rad(-3)
error_deg = get_orientation_error_deg(yaw1, yaw2)
assert np.allclose(error_deg, 6.0, atol=1e-2) | 33,295 |
def contains_numbers(iterable):
""" Check if first iterable item is a number. """
return isinstance(iterable[0], Number) | 33,296 |
def get_image(img: PathStr) -> PILImage:
"""Get picture from either a path or URL"""
if str(img).startswith("http"):
with tempfile.TemporaryDirectory() as tmpdirname:
dest = Path(tmpdirname) / str(img).split("?")[0].rpartition("/")[-1]
# NOTE: to be replaced by download(url, dest=dest) [from unpackai.utils]
with requests.get(str(img)) as resp:
resp.raise_for_status()
dest.write_bytes(resp.content)
return PILImage.create(dest)
else:
return PILImage.create(img) | 33,297 |
def FAIMSNETNN_model(train_df, train_y, val_df, val_y, model_args, cv=3):
"""FIT neuralnetwork model."""
input_dim = train_df.shape[1]
if model_args["grid"] == "tiny":
param_grid = {"n1": [100], "d1": [0.3, 0.1], "lr": [0.001, 0.01], "epochs": [50],
"batch_size": [32, 128], "input_dim": [input_dim]}
else:
param_grid = {"n1": [100, 200, 500], "d1": [0.5, 0.3, 0.1],
"lr": [0.0001, 0.001, 0.01], "epochs": [50],
"batch_size": [32, 64, 128], "input_dim": [input_dim]}
model = keras.wrappers.scikit_learn.KerasRegressor(build_fn=create_model, verbose=0)
gs = GridSearchCV(estimator=model, param_grid=param_grid, n_jobs=model_args["jobs"], cv=cv,
return_train_score=True, verbose=2)
gsresults = gs.fit(train_df, train_y)
# history = model.fit(train_df, train_y, validation_split=0.1, epochs=200, batch_size=16)
print(gs.best_params_)
gs.best_params_["epochs"] = 100
model = create_model(**gs.best_params_)
history = model.fit(train_df, train_y, validation_split=0.1, epochs=gs.best_params_["epochs"],
batch_size=gs.best_params_["batch_size"])
df_results, cv_res = format_summary(train_df, val_df, train_y, val_y, model, "FNN", gsresults)
cv_res["params"] = str(gs.best_params_)
return df_results, cv_res, gs, model | 33,298 |
def assign_exam_blocks(data, departments, splitted_departments, number_exam_days):
"""
Assign departments to exam blocks and optimize this schedule to reduce conflicts.
data (pandas.DataFrame): Course enrollments data
departments (dict): Departments (str key) and courses in departments (list value)
number_exam_days (int): The number of days for exams
returns (list): Departments for each exam block
"""
# create two exam blocks per day
exam_blocks = [[] for i in range(2*number_exam_days)]
# sequentially fill exam_blocks with departments in random order
i = 0
department_list = list(departments)
index = np.random.permutation(np.arange(len(department_list)))
for j in range(len(department_list)):
department = department_list[index[j]]
exam_blocks[i%(2*number_exam_days)].append(department)
i += 1
# swap exam blocks until this swap method can no longer reduce conflicts
total_conflicts = sum([count_conflicts(data, departments, block) for block in exam_blocks])
conflicts_reduced = True
while conflicts_reduced:
conflicts_reduced = False
# do swapping between departments in exam block i and exam block j
for i in range(len(exam_blocks)-1):
for j in range(i+1, len(exam_blocks)):
do_swapping(data, departments, exam_blocks[i], exam_blocks[j])
# do swapping between the two blocks of split departments to try to minimize
# conflicts on a course basis
course_conflicts_reduced = True
while course_conflicts_reduced:
current_conflicts = sum([count_conflicts(data, departments, block) for block in exam_blocks])
course_conflicts_reduced = False
# do this for every splitted department
for department in splitted_departments:
courses = [departments[department+"-1"], departments[department+"-2"]]
# this allows us to swap from first department exam block to other department exam block
for course_index in (0, 1):
# swap any course from one department section to the other if it reduces conflicts
i = 0
while i < len(courses[course_index]):
courses[~course_index].append(courses[course_index].pop(i))
tmp_conflicts = sum([count_conflicts(data, departments, block) for block in exam_blocks])
if tmp_conflicts >= current_conflicts:
courses[course_index].insert(i, courses[~course_index].pop())
else:
course_conflicts_reduced = True
i += 1
current_conflicts = sum([count_conflicts(data, departments, block) for block in exam_blocks])
if current_conflicts < total_conflicts:
total_conflicts = current_conflicts
conflicts_reduced = True
return exam_blocks | 33,299 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.