content stringlengths 35 762k | sha1 stringlengths 40 40 | id int64 0 3.66M |
|---|---|---|
import hashlib
def get_sign(data_dict, key):
"""
签名函数
:param data_dict: 需要签名的参数,格式为字典
:param key: 密钥 ,即上面的API_KEY
:return: 字符串
"""
params_list = sorted(data_dict.items(), key=lambda e: e[0], reverse=False) # 参数字典倒排序为列表
params_str = "&".join(u"{}={}".format(k, v) for k, v in params_list) + '&key=' + key
# 组织参数字符串并在末尾添加商户交易密钥
md5 = hashlib.md5() # 使用MD5加密模式
md5.update(params_str.encode('utf-8')) # 将参数字符串传入
sign = md5.hexdigest().upper() # 完成加密并转为大写
return sign | ea7ee65cd3ae72e19293dc851255bc0f3ad4b321 | 29,100 |
def string():
"""String representation."""
return "{:s}".format('something') | d13ae4fe229f767c515b0f0d6439ac61c6bfdbe8 | 29,101 |
def find_films_in_location(films: pd.DataFrame) -> pd.DataFrame:
"""finds films filmed in certain location
Args:
films (pd.DataFrame): films with their locations
Returns:
pd.DataFrame: films which were filmed in certain location
"""
films.dropna(inplace=True)
# change for more precise address for better performance
local_films = films.loc[films["Location"].str.contains("Ukraine")]
if "Cooridinates" not in local_films.columns:
local_films["Coordinates"] = local_films["Location"].apply(find_location)
return local_films | 91801357aab19e2a263951907fe74f114b833f68 | 29,102 |
from typing import Any
import yaml
import os
def get_saved_schemas() -> Any:
"""Lists file and display names of all saved schemas.
File extensions are stripped, and the names are sorted.
Returns:
A JSON response.
"""
schema_paths = sorted(p for p in SCHEMA_DIR.glob("*.yaml"))
display_files = []
for schema_path in schema_paths:
with open(schema_path) as f:
content = yaml.safe_load(f)[0]
schema_id = content["schema_id"]
schema_name = content["schema_name"]
schema_dscpt = content["schema_dscpt"]
timestamp = content["schema_version"].split("-")
time_stamp = "-".join(timestamp[:3]) + ", " + ":".join(timestamp[3:6])
augmentation_flag = os.path.exists(EVENT_REC_DIR / f"{schema_path.stem}.json")
display_files.append(
{
"file": schema_path.stem,
"schema_id": schema_id,
"schema_name": schema_name,
"schema_dscpt": schema_dscpt,
"timestamp": time_stamp,
"augmentation_flag": augmentation_flag,
}
)
return {"schemaFiles": display_files} | f7fe9dbac9975bc7d8223dc9fade317c4e17005c | 29,103 |
def _if_installed(pname):
"""Run if the given program name is installed.
"""
def argcatcher(func):
def decorator(*args, **kwargs):
envs = [x for x in args if hasattr(x, "system_install")]
env = envs[0] if envs else None
if shared.which(pname, env):
return func(*args, **kwargs)
return decorator
return argcatcher | 63eb3a0a3c2b2b6c7370ee4db449e6e3e1d2c84e | 29,104 |
def check_gym_environments(env: gym.Env) -> None:
"""Checking for common errors in gym environments.
Args:
env: Environment to be checked.
Warning:
If env has no attribute spec with a sub attribute,
max_episode_steps.
Raises:
AttributeError: If env has no observation space.
AttributeError: If env has no action space.
ValueError: Observation space must be a gym.spaces.Space.
ValueError: Action space must be a gym.spaces.Space.
ValueError: Observation sampled from observation space must be
contained in the observation space.
ValueError: Action sampled from action space must be
contained in the observation space.
ValueError: If env cannot be resetted.
ValueError: If an observation collected from a call to env.reset().
is not contained in the observation_space.
ValueError: If env cannot be stepped via a call to env.step().
ValueError: If the observation collected from env.step() is not
contained in the observation_space.
AssertionError: If env.step() returns a reward that is not an
int or float.
AssertionError: IF env.step() returns a done that is not a bool.
AssertionError: If env.step() returns an env_info that is not a dict.
"""
# check that env has observation and action spaces
if not hasattr(env, "observation_space"):
raise AttributeError("Env must have observation_space.")
if not hasattr(env, "action_space"):
raise AttributeError("Env must have action_space.")
# check that observation and action spaces are gym.spaces
if not isinstance(env.observation_space, gym.spaces.Space):
raise ValueError("Observation space must be a gym.space")
if not isinstance(env.action_space, gym.spaces.Space):
raise ValueError("Action space must be a gym.space")
# Raise a warning if there isn't a max_episode_steps attribute.
if not hasattr(env, "spec") or not hasattr(env.spec, "max_episode_steps"):
if log_once("max_episode_steps"):
logger.warning(
"Your env doesn't have a .spec.max_episode_steps "
"attribute. This is fine if you have set 'horizon' "
"in your config dictionary, or `soft_horizon`. "
"However, if you haven't, 'horizon' will default "
"to infinity, and your environment will not be "
"reset."
)
# check if sampled actions and observations are contained within their
# respective action and observation spaces.
def get_type(var):
return var.dtype if hasattr(var, "dtype") else type(var)
sampled_action = env.action_space.sample()
sampled_observation = env.observation_space.sample()
# check if observation generated from stepping the environment is
# contained within the observation space
reset_obs = env.reset()
if not env.observation_space.contains(reset_obs):
reset_obs_type = get_type(reset_obs)
space_type = env.observation_space.dtype
error = (
f"The observation collected from env.reset() was not "
f"contained within your env's observation space. Its possible "
f"that There was a type mismatch, or that one of the "
f"sub-observations was out of bounds: \n\n reset_obs: "
f"{reset_obs}\n\n env.observation_space: "
f"{env.observation_space}\n\n reset_obs's dtype: "
f"{reset_obs_type}\n\n env.observation_space's dtype: "
f"{space_type}"
)
temp_sampled_reset_obs = convert_element_to_space_type(
reset_obs, sampled_observation
)
if not env.observation_space.contains(temp_sampled_reset_obs):
raise ValueError(error)
# check if env.step can run, and generates observations rewards, done
# signals and infos that are within their respective spaces and are of
# the correct dtypes
next_obs, reward, done, info = env.step(sampled_action)
if not env.observation_space.contains(next_obs):
next_obs_type = get_type(next_obs)
space_type = env.observation_space.dtype
error = (
f"The observation collected from env.step(sampled_action) was "
f"not contained within your env's observation space. Its "
f"possible that There was a type mismatch, or that one of the "
f"sub-observations was out of bounds:\n\n next_obs: {next_obs}"
f"\n\n env.observation_space: {env.observation_space}"
f"\n\n next_obs's dtype: {next_obs_type}"
f"\n\n env.observation_space's dtype: {space_type}"
)
temp_sampled_next_obs = convert_element_to_space_type(
next_obs, sampled_observation
)
if not env.observation_space.contains(temp_sampled_next_obs):
raise ValueError(error)
_check_done(done)
_check_reward(reward)
_check_info(info) | 95d3a3b7804981cb8308269580359111b257eefe | 29,105 |
def WI(bands: dict) -> xr.DataArray:
"""
Water Index (2015): Fisher et al. (2016)
Args:
bands (dict): Bands as {band_name: xr.DataArray}
Returns:
xr.DataArray: Computed index
"""
return (
1.7204
+ 171 * bands[obn.GREEN]
+ 3 * bands[obn.RED]
- 70 * bands[obn.NIR]
- 45 * bands[obn.SWIR_1]
- 71 * bands[obn.SWIR_2]
) | 400c7277d5d7cca07df7953b0db957f3d4fdfd0a | 29,106 |
import io
def readZipData(filePath):
"""
Opening the zip file in READ mode and transform scalars.csv to data frame
:param filePath: path to zip-file
:return: data frame with scalars.csv content
"""
with ZipFile(filePath.as_posix(), 'r') as zip:
scalars = None
for i in zip.namelist():
if i.endswith('Scalars.csv'):
scalars = i
break
print('Reading', scalars)
if scalars is None:
print('No scalars file exists in zip file!')
return pd.DataFrame()
scalars = zip.read(scalars)
# allow colon and semicolon as separators
df = pd.read_csv(io.BytesIO(scalars), sep=',|;')
return df | 2efd90426366754454f13a8c5e9e61ed2a1c150d | 29,107 |
def calc_relative_scale(skeleton, ref_bone_lengths, joint_tree) -> (float, float):
"""Calculate the factor by which the reference is larger than the query skeleton.
Args:
skeleton (torch.DoubleTensor): The query skeleton.
ref_bone_lengths (torch.DoubleTensor): The reference skeleton bone lengths.
joint_tree (list of int):
Returns:
The average scale factor.
"""
bone_lengths = cartesian_to_spherical(
absolute_to_parent_relative(ensure_cartesian(skeleton, d=3), joint_tree)
)[:, 0]
non_zero = bone_lengths.gt(1e-6)
if non_zero.sum() == 0: return 0
ratio = (ref_bone_lengths / bone_lengths).masked_select(non_zero)
return ratio.median().item() | cf1bbf2692666e393eb50eeb4ae9d0724af78c7f | 29,108 |
def vertical_move(t, v_speed=2/320):
"""Probe moves vertically at v_speed [cm/s]"""
return 0.*t, 0*t, v_speed*t | eb6a066bf6b6659728647c78dd7673a3d45b250d | 29,109 |
def get_favored_peaks(rama_key):
"""
returns exact favored peaks with their score value
"""
assert rama_key in range(6)
if rama_key == RAMA_GENERAL:
return [((-115.0, 131.0), 0.57068),
((-63.0, -43.0), 1.0),
((53.0, 43.0), 0.323004),
((53.0, -127.0), 0.0246619)]
if rama_key == RAMA_GLYCINE:
return [((63.0, 41.0), 1.0),
((-63.0, -41.0), 1.0),
((79.0, -173.0), 0.553852),
# ((-79.0, 173.0), 0.553852),
]
if rama_key == RAMA_CISPRO:
return [((-75.0, 155.0), 1.0),
((-89.0, 5.0), 0.701149)]
if rama_key == RAMA_TRANSPRO:
return [((-57.0, -37.0), 0.99566),
((-59.0, 143.0), 1.0),
((-81.0, 65.0), 0.0896269)]
if rama_key == RAMA_PREPRO:
return [((-57.0, -45.0), 1.0),
((-67.0, 147.0), 0.992025),
((49.0, 57.0), 0.185259)]
if rama_key == RAMA_ILE_VAL:
return [((-63.0, -45.0), 1.0),
((-121.0, 129.0), 0.76163)]
return None | 79bf814becbbf36796e229f69d0a99cd8ef1716e | 29,110 |
def get_all_tablespace_acls(conn):
"""
Returns:
List of :class:`~.types.RelationInfo` objects.
"""
return [RelationInfo(**row) for row in conn.execute(_pg_tablespace_stmt)] | 561514b7986d374ba1dc7a4addf4d0588b53e59b | 29,111 |
import regex
def chunk_pars(content):
"""Given the context contained between `\\beginnumbering` and
`\\endnumbering`, return list of paragraphs.
This is able to handle paragraphs demarcated by `\\pstart` and `\\pend` as
well as when `\\autopar` is used (see §5.2.2 of the reledmac
documentation). The use of `\\autopar` assumes that the `\\autopar` command
is given right after the `\\beginnumbering` as in the documentation.
"""
if content.find(r"\autopar") is not -1:
positions = [idx.start() for idx in regex.finditer("\n\n", content)]
else:
positions = [idx.start() for idx in regex.finditer(r"\\pstart", content)]
paragraphs = []
paragraphs.append(content[: positions[0]])
for index, par in enumerate(positions):
try:
paragraphs.append(content[par : positions[index + 1]])
except IndexError:
paragraphs.append(content[par:])
return paragraphs | 958890791c67c90a9ed3264e82caca9bfebb5885 | 29,112 |
def bound():
""" Generate boundary for testing"""
bound = data.Boundary()
bound.degree = 3
bound.start = np.array([0.0, 0.0, 0.0])
bound.end = np.array([1.0, 0.0, 0.0])
bound.num_ctrlpts = 5
return bound | 210636e3e0618ff8b5ffd48b48f4aa035a38e928 | 29,113 |
from typing import Union
def to_tensor(pic: Union[Image, np.ndarray]) -> Tensor:
"""Convert a ``PIL Image`` or ``numpy.ndarray`` to tensor."""
if not (F_pil._is_pil_image(pic) or _is_numpy(pic)):
raise TypeError(f"input pic should be PIL image or numpy.ndarray, Got {type(pic)}")
if _is_numpy(pic) and not _is_numpy_image(pic):
raise ValueError(f"input pic should be 2 or 3 dimensional. Got {pic.ndim} dimensions")
# handle np.ndarray
if isinstance(pic, np.ndarray):
if pic.ndim == 2:
pic = pic[:, :, None]
img = cranet.tensor(pic.transpose(2, 0, 1))
return img
# handle PIL Image
mode_to_nptype = {'I': np.int32, 'I;16': np.int16, 'F': np.float32}
img = cranet.tensor(
np.array(pic, mode_to_nptype.get(pic.mode, np.uint8))
)
if pic.mode == '1':
img = 255 * img
img = img.reshape(pic.size[1], pic.size[0], len(pic.getbands()))
# (H x W x C) -> (C x H x W)
img = img.permute((2, 0, 1))
return img / 255 | 26040d594cee200200945d7561bc9e6bcda95f01 | 29,114 |
import numpy
def dummy_image():
"""Create a dummy image"""
x = numpy.linspace(-1.5, 1.5, 1024)
xv, yv = numpy.meshgrid(x, x)
signal = numpy.exp(- (xv ** 2 / 0.15 ** 2 + yv ** 2 / 0.25 ** 2))
# add noise
signal += 0.3 * numpy.random.random(size=signal.shape)
return signal | 8cbf5f31cde69b8ac775114277cee8f88d6dd932 | 29,115 |
import os
def GetPicList(basedir):
"""
base_dir
-> batch1
-> we
-> want
-> these
-> images
-> batch2
"""
filename = ''
for name in os.listdir(basedir):
if not name.startswith('.'):
filename = name
break
if not filename:
raise ValueError("Couldn't find any non-hidden directories in basedir")
pic_list = os.listdir(os.path.join(basedir,filename))
for pic in pic_list:
pic = pic.replace('.tif','')
return pic_list | de671e7f336e59999f89dd8f3ead2d4bfb059907 | 29,116 |
def text_coloured_errors(tree,
gold=None,
depth=0,
single_line=False,
missing=None,
extra=None,
compressed=True,
POS=True):
"""Pretty print, with errors marked using colour.
'missing' should contain tuples (or be None):
(start, end, label, crossing-T/F)
"""
# TODO: Add the ability to compress the same parts consistently (even after
# errors are no longer present). This would need to be span based as
# structure could change.
ans = ''
if missing is None or extra is None:
if gold is None:
return "Error - no gold tree and no missing list for colour repr"
# look at gold and work out what missing should be
errors = parse_errors.get_errors(tree, gold, POS)
extra = [e[3] for e in errors if e[0] == 'extra' and e[3].word is None]
extra = set(extra)
missing = [(e[1][0], e[1][1], e[2], False) for e in errors
if e[0] == 'missing' and e[3].word is None]
missing += [(e[1][0], e[1][1], e[2], True) for e in errors
if e[0] == 'crossing' and e[3].word is None]
POS = [e for e in errors if e[0] == 'diff POS']
start_missing = "\033[01;36m"
start_extra = "\033[01;31m"
start_crossing = "\033[01;33m"
end_colour = "\033[00m"
if not single_line:
ans += '\n' + depth * '\t'
# start of this
if tree in extra:
ans += start_extra + '(' + tree.label + end_colour
elif tree.word is not None and POS is not None:
found = False
for error in POS:
if error[3] == tree:
found = True
ans += '(' + start_missing + error[4] + end_colour
ans += ' ' + start_extra + tree.label + end_colour
break
if not found:
ans += '(' + tree.label
else:
ans += '(' + tree.label
# If we are compressing, check for correctness and then just print words
sub_done = False
if compressed and tree not in extra and tree.word is None:
all_right = True
for error in extra:
if tree.span[0] <= error.span[0] and error.span[1] <= tree.span[1]:
all_right = False
break
for error in missing:
if error[3]:
if tree.span[0] < error[0] < tree.span[1]:
all_right = False
break
if tree.span[0] < error[1] < tree.span[1]:
all_right = False
break
elif tree.span[0] <= error[0] and error[1] <= tree.span[1]:
all_right = False
break
if POS is not None:
for error in POS:
if tree.span[0] <= error[1][0] and error[1][1] <= tree.span[1]:
all_right = False
break
if all_right:
ans += ' ' + text_words(tree) + ')'
sub_done = True
# crossing brackets starting
if tree.parent is None or tree.parent.subtrees[0] != tree:
# these are marked as high as possible
labels = []
for error in missing:
if error[0] == tree.span[0] and error[3]:
labels.append((error[1], error[2]))
labels.sort(reverse=True)
if len(labels) > 0:
to_add = start_crossing + ' '.join(
['(' + label[1] for label in labels]) + end_colour
if sub_done:
nans = ''
for char in ans:
if char in '\t\n':
nans += char
clen = len(nans)
nans += to_add
nans += ' ' + ans[clen:]
ans = nans
else:
ans += ' ' + to_add
if not sub_done:
# word
if tree.word is not None:
ans += ' ' + tree.word
# subtrees
below = []
for subtree in tree.subtrees:
text = text_coloured_errors(subtree, gold, depth + 1, single_line,
missing, extra, compressed, POS)
if single_line:
text = ' ' + text
below.append([subtree.span[0], subtree.span[1], text])
# add missing brackets that surround subtrees
for length in range(1, len(below)):
for i in range(len(below)):
j = i + length
if i == 0 and j == len(below) - 1:
continue
if j >= len(below):
break
for error in missing:
if below[i][0] == error[0] and below[j][1] == error[
1] and not error[3]:
start = ''
for char in below[i][2]:
if char not in '\n\t':
break
start += char
for k in range(i, j + 1):
below[k][2] = '\n\t'.join(below[k][2].split('\n'))
below[i][2] = start + start_missing + '(' + error[
2] + end_colour + below[i][2]
below[j][2] += start_missing + ')' + end_colour
ans += ''.join([part[2] for part in below])
# end of this
if tree in extra:
ans += start_extra + ')' + end_colour
else:
ans += ')'
if tree.parent is None or tree.parent.subtrees[-1] != tree:
# if there are crossing brackets that end here, mark that
labels = []
for error in missing:
if error[1] == tree.span[1] and error[3]:
labels.append((-error[0], error[2]))
labels.sort()
if len(labels) > 0:
ans += ' ' + start_crossing + ' '.join(
[label[1] + ')' for label in labels]) + end_colour
# TODO: Change so that at the top level,
# FRAG etc isn't printed outside of ROOT
# Actually, just have a canonical ordering for unaries
# (so that NPs end up under FRAGs)
if tree.parent is None or len(tree.parent.subtrees) > 1:
# check for missing brackets that go around this node
for error in missing:
if (error[0] == tree.span[0]
and error[1] == tree.span[1] and not error[3]):
if tree not in extra:
# Put them on a new level
extra_text = ''
if not single_line:
ans = '\n\t'.join(ans.split('\n'))
extra_text = '\n' + depth * '\t'
extra_text += start_missing + '(' + error[2] + end_colour
if single_line:
ans = ' ' + ans
ans = extra_text + ans
ans += start_missing + ')' + end_colour
else:
# Put them on the same line
start = 0
for char in ans:
if char not in '\n\t':
break
start += 1
pretext = ans[:start]
ans = ans[start:]
extra_text = start_missing + '(' + error[
2] + end_colour + ' '
ans = pretext + extra_text + ans
ans += start_missing + ')' + end_colour
return ans | 4eb7555ade29b478c2d526be0d84a5a68047a1ee | 29,117 |
def update_user(uid, **kwargs):
"""Updates an existing user account with the specified properties.
Args:
uid: A user ID string.
kwargs: A series of keyword arguments (optional).
Keyword Args:
display_name: The user's display name (optional). Can be removed by explicitly passing
None.
email: The user's primary email (optional).
email_verified: A boolean indicating whether or not the user's primary email is
verified (optional).
phone_number: The user's primary phone number (optional). Can be removed by explicitly
passing None.
photo_url: The user's photo URL (optional). Can be removed by explicitly passing None.
password: The user's raw, unhashed password. (optional).
disabled: A boolean indicating whether or not the user account is disabled (optional).
custom_claims: A dictionary or a JSON string contining the custom claims to be set on the
user account (optional).
valid_since: An integer signifying the seconds since the epoch. This field is set by
``revoke_refresh_tokens`` and it is discouraged to set this field directly.
Returns:
UserRecord: An updated UserRecord instance for the user.
Raises:
ValueError: If the specified user ID or properties are invalid.
AuthError: If an error occurs while updating the user account.
"""
app = kwargs.pop('app', None)
user_manager = _get_auth_service(app).user_manager
try:
user_manager.update_user(uid, **kwargs)
return UserRecord(user_manager.get_user(uid=uid))
except _user_mgt.ApiCallError as error:
raise AuthError(error.code, str(error), error.detail) | 0b52b7e42f286861b43e6e2e25a9547b1cd354d7 | 29,118 |
def add_center_dist(nusc: NuScenes,
eval_boxes: EvalBoxes):
"""
Adds the cylindrical (xy) center distance from ego vehicle to each box.
:param nusc: The NuScenes instance.
:param eval_boxes: A set of boxes, either GT or predictions.
:return: eval_boxes augmented with center distances.
"""
for sample_token in eval_boxes.sample_tokens:
sample_rec = nusc.get('sample', sample_token)
sd_record = nusc.get('sample_data', sample_rec['data']['LIDAR_TOP'])
pose_record = nusc.get('ego_pose', sd_record['ego_pose_token'])
for box in eval_boxes[sample_token]:
# Both boxes and ego pose are given in global coord system, so distance can be calculated directly.
# Note that the z component of the ego pose is 0.
ego_translation = (box.translation[0] - pose_record['translation'][0],
box.translation[1] - pose_record['translation'][1],
box.translation[2] - pose_record['translation'][2])
if isinstance(box, DetectionBox):
box.ego_dist = np.sqrt(np.sum(np.array(ego_translation[:2]) ** 2))
elif isinstance(box, TrackingBox):
box.ego_translation = ego_translation
else:
raise NotImplementedError
return eval_boxes | 5a0c09f9de689efe294a6ce500ba4dbf09885149 | 29,119 |
def check_address(btc_addr, network='test'):
""" Checks if a given string is a Bitcoin address for a given network (or at least if it is formatted as if it is).
:param btc_addr: Bitcoin address to be checked.
:rtype: hex str
:param network: Network to be checked (either mainnet or testnet).
:type network: hex str
:return: True if the Bitcoin address matches the format, raise exception otherwise.
"""
if network in ['test', "testnet"] and btc_addr[0] not in ['m', 'n']:
raise Exception("Wrong testnet address format.")
elif network in ['main', 'mainnet'] and btc_addr[0] != '1':
raise Exception("Wrong mainnet address format.")
elif network not in ['test', 'testnet', 'main', 'mainnet']:
raise Exception("Network must be test/testnet or main/mainnet")
elif len(btc_addr) not in range(26, 35+1):
raise Exception("Wrong address format, Bitcoin addresses should be 27-35 hex char long.")
else:
return True | 9f236f5d6ccf2f28944c577e2ce8fbfb2c2a58b8 | 29,120 |
import time
def format_time(record):
"""Format time to ISO 8601.
https://en.wikipedia.org/wiki/ISO_8601
"""
utc_time = time.gmtime(record.created)
time_string = time.strftime('%Y-%m-%d %H:%M:%S', utc_time)
return '%s.%03dZ' % (time_string, record.msecs) | ea07736965711a214a738f5443f68cf02e20fcb2 | 29,121 |
def _cb_decode(s, maxsize=8192):
"""Decode a list of IDs from storage in a cookie.
``s`` is text as encoded by ``_cb_encode``.
``maxsize`` is the maximum size of uncompressed data. ``0`` means no limit.
Return a list of text IDs.
"""
dec = decompressobj()
squashed = unquote(s).encode('latin-1')
data = dec.decompress(squashed, maxsize)
if dec.unconsumed_tail:
raise ValueError
json_bytes = data.decode('utf-8')
return loads(json_bytes) | bf1239cf33bf83b1163d96641a20e4adc3e83221 | 29,122 |
def validate_model(df, fix=False):
"""
Validates the form of a model dataframe. A model dataframe must look something like this:
pos val_A val_C val_G val_T
3 1.1 4.3 -6.19 5.2
4 0.01 3.40 -10.5 5.3
5 0 1.4 10.9 231.0
A 'pos' column reports the position within a sequence to which this modle applies. 'val_X' then describe the values of the model parameters.
Specifications:
0. The dataframe must have at least one row and one column.
1. A 'pos' column is mandatory and must occur first. Values must be nonnegative integers in sequential order.
2. 'val_X' columns must conform to one of the accepted model types. These columns must be arranged in alphabetical order. Parameter values must be finite float values.
Arguments:
df (pd.DataFrame): Dataset in dataframe format
fix (bool): A flag saying whether to fix the dataframe into shape if possible.
Returns:
if fix=True:
df_valid: a valid dataframe that has been fixed by the function
if fix=False:
Nothing
Function:
Raises a TyepError if the data frame violates the specifications (if fix=False) or if these violations cannot be fixed (fix=True).
"""
# Verify dataframe has at least one row and one column
if not df.shape[0] >= 1:
raise SortSeqError(\
'Dataframe must contain at least one row')
# Validate column names
for col in df.columns:
if not is_col_type(col,['pos','vals']):
raise SortSeqError('Invalid column in dataframe: %s.'%col)
for col in ['pos']:
if not col in df.columns:
raise SortSeqError('%s column missing'%col)
# Validate parameter column names
val_cols = sorted([c for c in df.columns if is_col_type(c,'vals')])
ok = False
for cols in model_parameters_dict.values():
# Check if cols and df.columns are identical
if len(cols)==len(val_cols):
if all([a==b for a,b in zip(cols,val_cols)]):
ok = True
if not ok:
raise SortSeqError('Dataframe represents model with invalid columns: %s'%str(val_cols))
# Validate contents of all columns
df = _validate_cols(df,fix=fix)
return df | 81c8663934c2ae33318635dd68939cff5652912b | 29,123 |
def create_training_instances(input_files, tokenizer, max_seq_length,
dupe_factor, short_seq_prob, masked_lm_prob,
max_predictions_per_seq, rng):
"""Create `TrainingInstance`s from raw text."""
all_documents = [[]]
# Input file format:
# (1) One sentence per line. These should ideally be actual sentences, not
# entire paragraphs or arbitrary spans of text. (Because we use the
# sentence boundaries for the "next sentence prediction" task).
# (2) Blank lines between documents. Document boundaries are needed so
# that the "next sentence prediction" task doesn't span between documents.
for input_file in input_files:
if tf.__version__[0] == '1':
with tf.gfile.GFile(input_file, "r") as reader:
while True:
line = tokenization.convert_to_unicode(reader.readline())
if not line:
break
line = line.strip()
# Empty lines are used as document delimiters
if not line:
all_documents.append([])
tokens = tokenizer.tokenize(line)
if tokens:
all_documents[-1].append(tokens)
else:
# in tf == 2.4.1, tf.gfile is moved to tf.compat.v1.gfile
with tf.compat.v1.gfile.GFile(input_file, "r") as reader:
while True:
line = tokenization.convert_to_unicode(reader.readline())
if not line:
break
line = line.strip()
# Empty lines are used as document delimiters
if not line:
all_documents.append([])
tokens = tokenizer.tokenize(line)
if tokens:
all_documents[-1].append(tokens)
# Remove empty documents
all_documents = [x for x in all_documents if x]
rng.shuffle(all_documents)
vocab_words = list(tokenizer.vocab.keys())
instances = []
for _ in range(dupe_factor):
for document_index in range(len(all_documents)):
instances.extend(
create_instances_from_document(
all_documents, document_index, max_seq_length, short_seq_prob,
masked_lm_prob, max_predictions_per_seq, vocab_words, rng))
rng.shuffle(instances)
return instances | 0274db246e701ac1da78564707c851c9e295a21e | 29,124 |
def cdgmm(A, B, inplace=False):
"""Complex pointwise multiplication.
Complex pointwise multiplication between (batched) tensor A and tensor B.
Parameters
----------
A : tensor
A is a complex tensor of size (B, C, M, N, 2).
B : tensor
B is a complex tensor of size (M, N, 2) or real tensor of (M, N, 1).
inplace : boolean, optional
If set to True, all the operations are performed inplace.
Returns
-------
C : tensor
Output tensor of size (B, C, M, N, 2) such that:
C[b, c, m, n, :] = A[b, c, m, n, :] * B[m, n, :].
"""
if not iscomplex(A):
raise TypeError('The input must be complex, indicated by a last '
'dimension of size 2.')
if B.ndimension() != 3:
raise RuntimeError('The filter must be a 3-tensor, with a last '
'dimension of size 1 or 2 to indicate it is real '
'or complex, respectively.')
if not iscomplex(B) and not isreal(B):
raise TypeError('The filter must be complex or real, indicated by a '
'last dimension of size 2 or 1, respectively.')
if A.size()[-3:-1] != B.size()[-3:-1]:
raise RuntimeError('The filters are not compatible for multiplication!')
if A.dtype is not B.dtype:
raise TypeError('A and B must be of the same dtype.')
if A.device.type != B.device.type:
raise TypeError('A and B must be both on GPU or both on CPU.')
if A.device.type == 'cuda':
if A.device.index != B.device.index:
raise TypeError('A and B must be on the same GPU!')
if isreal(B):
if inplace:
return A.mul_(B)
else:
return A * B
else:
C = A.new(A.size())
A_r = A[..., 0].contiguous().view(-1, A.size(-2)*A.size(-3))
A_i = A[..., 1].contiguous().view(-1, A.size(-2)*A.size(-3))
B_r = B[...,0].contiguous().view(B.size(-2)*B.size(-3)).unsqueeze(0).expand_as(A_i)
B_i = B[..., 1].contiguous().view(B.size(-2)*B.size(-3)).unsqueeze(0).expand_as(A_r)
C[..., 0].view(-1, C.size(-2)*C.size(-3))[:] = A_r * B_r - A_i * B_i
C[..., 1].view(-1, C.size(-2)*C.size(-3))[:] = A_r * B_i + A_i * B_r
return C if not inplace else A.copy_(C) | c3a65ec03339edd0defe723fc860ff9f54495eda | 29,125 |
def InvocationStartEncKeyVector(builder, numElems):
"""This method is deprecated. Please switch to Start."""
return StartEncKeyVector(builder, numElems) | c00609da890986ff4cf5c30f246459342b9d60bd | 29,126 |
from functools import reduce
def dynamic_partial_sum_product(
sum_op, prod_op, factors, eliminate=frozenset(), plate_to_step=dict()
):
"""
Generalization of the tensor variable elimination algorithm of
:func:`funsor.sum_product.partial_sum_product` to handle higer-order markov
dimensions in addition to plate dimensions. Markov dimensions in transition
factors are eliminated efficiently using the parallel-scan algorithm in
:func:`funsor.sum_product.sarkka_bilmes_product`. The resulting factors are then
combined with the initial factors and final states are eliminated. Therefore,
when Markov dimension is eliminated ``factors`` has to contain
initial factors and transition factors.
:param ~funsor.ops.AssociativeOp sum_op: A semiring sum operation.
:param ~funsor.ops.AssociativeOp prod_op: A semiring product operation.
:param factors: A collection of funsors.
:type factors: tuple or list
:param frozenset eliminate: A set of free variables to eliminate,
including both sum variables and product variable.
:param dict plate_to_step: A dict mapping markov dimensions to
``step`` collections that contain ordered sequences of Markov variable names
(e.g., ``{"time": frozenset({("x_0", "x_prev", "x_curr")})}``).
Plates are passed with an empty ``step``.
:return: a list of partially contracted Funsors.
:rtype: list
"""
assert callable(sum_op)
assert callable(prod_op)
assert isinstance(factors, (tuple, list))
assert all(isinstance(f, Funsor) for f in factors)
assert isinstance(eliminate, frozenset)
assert isinstance(plate_to_step, dict)
# process plate_to_step
plate_to_step = plate_to_step.copy()
prev_to_init = {}
markov_to_sarkka = {}
markov_sum_vars = set()
for key, step in plate_to_step.items():
for chain in step:
# map old markov step names to sarkka_bilmes format step names
# Case 1
# x_slice(0, 5, None) -> _PREV__PREV_x_slice(2, 7, None)
# x_slice(1, 6, None) -> _PREV_x_slice(2, 7, None)
# x_slice(2, 7, None) -> x_slice(2, 7, None)
# Case 2
# x_prev - > _PREV_x_curr
# x_curr -> x_curr
history = len(chain) // 2
base_name = chain[-1]
for t, name in enumerate(reversed(chain[history:-1])):
markov_to_sarkka[name] = _shift_name(base_name, t + 1)
markov_sum_vars.add(base_name)
markov_sum_vars.update(markov_to_sarkka)
# map prev to init; works for any history > 0
init, prev = chain[: len(chain) // 2], chain[len(chain) // 2 : -1]
prev = tuple(markov_to_sarkka[name] for name in prev)
prev_to_init.update(zip(prev, init))
markov_sum_vars = frozenset(markov_sum_vars)
plates = frozenset(plate_to_step.keys())
sum_vars = eliminate - plates
prod_vars = eliminate.intersection(plates)
markov_prod_vars = frozenset(
k for k, v in plate_to_step.items() if v and k in eliminate
)
markov_sum_to_prod = defaultdict(set)
for markov_prod in markov_prod_vars:
for chain in plate_to_step[markov_prod]:
for name in chain[len(chain) // 2 :]:
markov_sum_to_prod[name].add(markov_prod)
var_to_ordinal = {}
ordinal_to_factors = defaultdict(list)
for f in factors:
ordinal = plates.intersection(f.inputs)
ordinal_to_factors[ordinal].append(f)
for var in sum_vars.intersection(f.inputs):
var_to_ordinal[var] = var_to_ordinal.get(var, ordinal) & ordinal
ordinal_to_vars = defaultdict(set)
for var, ordinal in var_to_ordinal.items():
ordinal_to_vars[ordinal].add(var)
results = []
while ordinal_to_factors:
leaf = max(ordinal_to_factors, key=len)
leaf_factors = ordinal_to_factors.pop(leaf)
leaf_reduce_vars = ordinal_to_vars[leaf]
for (group_factors, group_vars) in _partition(
leaf_factors, leaf_reduce_vars | markov_prod_vars
):
# eliminate non markov vars
nonmarkov_vars = group_vars - markov_sum_vars - markov_prod_vars
f = reduce(prod_op, group_factors).reduce(sum_op, nonmarkov_vars)
# eliminate markov vars
markov_vars = group_vars.intersection(markov_sum_vars)
if markov_vars:
markov_prod_var = [markov_sum_to_prod[var] for var in markov_vars]
assert all(p == markov_prod_var[0] for p in markov_prod_var)
if len(markov_prod_var[0]) != 1:
raise ValueError("intractable!")
time = next(iter(markov_prod_var[0]))
for v in sum_vars.intersection(f.inputs):
if time in var_to_ordinal[v] and var_to_ordinal[v] < leaf:
raise ValueError("intractable!")
time_var = Variable(time, f.inputs[time])
# markov_to_sarkka renames variables in MarkovProduct format
# to sarkka_bilmes_product format
base_names = markov_vars.intersection(
_shift_name(name, -_get_shift(name))
for name in markov_to_sarkka.values()
)
f = f(**markov_to_sarkka)
global_vars = frozenset(
set(f.inputs)
- {time_var.name}
- set(markov_to_sarkka.values())
- base_names
)
with funsor.terms.eager:
f = funsor.optimizer.apply_optimizer(f)
f = sarkka_bilmes_product(sum_op, prod_op, f, time_var, global_vars)
f = f.reduce(sum_op, base_names)
f = f(**prev_to_init)
remaining_sum_vars = sum_vars.intersection(f.inputs)
if not remaining_sum_vars:
results.append(f.reduce(prod_op, leaf & prod_vars - markov_prod_vars))
else:
new_plates = frozenset().union(
*(var_to_ordinal[v] for v in remaining_sum_vars)
)
if new_plates == leaf:
raise ValueError("intractable!")
f = f.reduce(prod_op, leaf - new_plates - markov_prod_vars)
ordinal_to_factors[new_plates].append(f)
return results | a08298f1440c212310cc3298629e27743325c9ca | 29,127 |
def range_to_number(interval_str):
"""Converts "X-Y" -> "X"."""
if not '-' in interval_str:
return int(interval_str)
# If first character is -, X is a negative number
if interval_str.startswith('-'):
number = '-' + interval_str.split('-')[1]
else:
number = interval_str.split('-')[0]
if number[-1] == 'M':
return int(round(float(number[:-1]) * 1000000))
elif number[-1] == 'B':
return int(round(float(number[:-1]) * 1000000000))
elif '.' in number:
return float(number)
else:
return int(number) | 562031503241cc37b1b6df5dd657f2f2d90b79a3 | 29,128 |
import warnings
def load_wav_file_with_wavio(
file_path, sample_rate, mono=True, resample_type="kaiser_best"
):
"""Load a 24-bit wav audio file as a floating point time series. Significantly faster than
load_sound_file."""
wavio_obj = wavio.read(str(file_path))
samples = wavio_obj.data
actual_sample_rate = wavio_obj.rate
if samples.dtype != np.float32:
if wavio_obj.sampwidth == 3:
samples = np.true_divide(
samples, 8388608, dtype=np.float32
) # ends up roughly between -1 and 1
elif wavio_obj.sampwidth == 2:
samples = np.true_divide(
samples, 32768, dtype=np.float32
) # ends up roughly between -1 and 1
else:
raise Exception("Unknown sampwidth")
if mono and len(samples.shape) > 1:
if samples.shape[1] == 1:
samples = samples[:, 0]
else:
samples = np.mean(samples, axis=1)
if sample_rate is not None and actual_sample_rate != sample_rate:
if resample_type == "auto":
resample_type = (
"kaiser_fast" if actual_sample_rate < sample_rate else "kaiser_best"
)
samples = librosa.resample(
samples, actual_sample_rate, sample_rate, res_type=resample_type
)
warnings.warn(
"{} had to be resampled from {} hz to {} hz. This hurt execution time.".format(
str(file_path), actual_sample_rate, sample_rate
)
)
actual_sample_rate = actual_sample_rate if sample_rate is None else sample_rate
return samples, actual_sample_rate | a1b7896e8ac4b9b5833c3ca25776295deb56839e | 29,129 |
import types
def make_proxy_cls(
remote_cls: netref.BaseNetref,
origin_cls: type,
override: type,
cls_name: str = None,
):
"""
Makes a new class type which inherits from <origin_cls> (for isinstance() and issubtype()),
takes methods from <override> as-is and proxy all requests for other members to <remote_cls>.
Note that origin_cls and remote_cls are assumed to be the same class types, but one is local
and other is obtained from RPyC.
Effectively implements subclassing, but without subclassing. This is needed because it is
impossible to subclass a remote-obtained class, something in the very internals of RPyC bugs out.
Parameters
----------
remote_cls: netref.BaseNetref
Type obtained from RPyC connection, expected to mirror origin_cls
origin_cls: type
The class to prepare a proxying wrapping for
override: type
The mixin providing methods and attributes to overlay on top of remote values and methods.
cls_name: str, optional
The name to give to the resulting class.
Returns
-------
type
New wrapper that takes attributes from override and relays requests to all other
attributes to remote_cls
"""
class ProxyMeta(RemoteMeta):
"""
This metaclass deals with printing a telling repr() to assist in debugging,
and to actually implement the "subclass without subclassing" thing by
directly adding references to attributes of "override" and by making proxy methods
for other functions of origin_cls. Class-level attributes being proxied is managed
by RemoteMeta parent.
Do note that we cannot do the same for certain special members like __getitem__
because CPython for optimization doesn't do a lookup of "type(obj).__getitem__(foo)" when
"obj[foo]" is called, but it effectively does "type(obj).__dict__['__getitem__'](foo)"
(but even without checking for __dict__), so all present methods must be declared
beforehand.
"""
def __repr__(self):
return f"<proxy for {origin_cls.__module__}.{origin_cls.__name__}:{cls_name or origin_cls.__name__}>"
def __prepare__(*args, **kw):
"""
Cooks the __dict__ of the type being constructed. Takes attributes from <override> as is
and adds proxying wrappers for other attributes of <origin_cls>.
This "manual inheritance" is needed for RemoteMeta.__getattribute__ which first looks into
type(obj).__dict__ (EXCLUDING parent classes) and then goes to proxy type.
"""
namespace = type.__prepare__(*args, **kw)
# try computing overridden differently to allow subclassing one override from another
no_override = set(_NO_OVERRIDE)
for base in override.__mro__:
if base == object:
continue
for attr_name, attr_value in base.__dict__.items():
if (
attr_name not in namespace
and attr_name not in no_override
and getattr(object, attr_name, None) != attr_value
):
namespace[
attr_name
] = attr_value # force-inherit an attribute manually
no_override.add(attr_name)
for base in origin_cls.__mro__:
if base == object:
continue
# try unwrapping a dual-nature class first
while True:
try:
sub_base = object.__getattribute__(base, "__real_cls__")
except AttributeError:
break
if sub_base is base:
break
base = sub_base
for name, entry in base.__dict__.items():
if (
name not in namespace
and name not in no_override
and isinstance(entry, types.FunctionType)
):
def method(_self, *_args, __method_name__=name, **_kw):
return getattr(_self.__remote_end__, __method_name__)(
*_args, **_kw
)
method.__name__ = name
namespace[name] = method
return namespace
class Wrapper(override, origin_cls, metaclass=ProxyMeta):
"""
Subclass origin_cls replacing attributes with what is defined in override while
relaying requests for all other attributes to remote_cls.
"""
__name__ = cls_name or origin_cls.__name__
__wrapper_remote__ = remote_cls
def __new__(cls, *a, **kw):
return override.__new__(cls)
def __init__(self, *a, __remote_end__=None, **kw):
if __remote_end__ is None:
__remote_end__ = remote_cls(*a, **kw)
while True:
# unwrap the object if it's a wrapper
try:
__remote_end__ = object.__getattribute__(
__remote_end__, "__remote_end__"
)
except AttributeError:
break
object.__setattr__(self, "__remote_end__", __remote_end__)
@classmethod
def from_remote_end(cls, remote_inst):
return cls(__remote_end__=remote_inst)
def __getattribute__(self, name):
"""
Implement "default" resolution order to override whatever __getattribute__
a parent being wrapped may have defined, but only look up on own __dict__
without looking into ancestors' ones, because we copy them in __prepare__.
Effectively, any attributes not currently known to Wrapper (i.e. not defined here
or in override class) will be retrieved from the remote end.
Algorithm (mimicking default Python behaviour):
1) check if type(self).__dict__[name] exists and is a get/set data descriptor
2) check if self.__dict__[name] exists
3) check if type(self).__dict__[name] is a non-data descriptor
4) check if type(self).__dict__[name] exists
5) pass through to remote end
"""
dct = object.__getattribute__(self, "__dict__")
if name == "__dict__":
return dct
cls_dct = object.__getattribute__(type(self), "__dict__")
try:
cls_attr, has_cls_attr = cls_dct[name], True
except KeyError:
has_cls_attr = False
else:
oget = None
try:
oget = object.__getattribute__(cls_attr, "__get__")
object.__getattribute__(cls_attr, "__set__")
except AttributeError:
pass # not a get/set data descriptor, go next
else:
return oget(self, type(self))
# type(self).name is not a get/set data descriptor
try:
return dct[name]
except KeyError:
# instance doesn't have an attribute
if has_cls_attr:
# type(self) has this attribute, but it's not a get/set descriptor
if oget:
# this attribute is a get data descriptor
return oget(self, type(self))
return cls_attr # not a data descriptor whatsoever
# this instance/class does not have this attribute, pass it through to remote end
return getattr(dct["__remote_end__"], name)
if override.__setattr__ == object.__setattr__:
# no custom attribute setting, define our own relaying to remote end
def __setattr__(self, name, value):
if name not in _PROXY_LOCAL_ATTRS:
setattr(self.__remote_end__, name, value)
else:
object.__setattr__(self, name, value)
if override.__delattr__ == object.__delattr__:
# no custom __delattr__, define our own
def __delattr__(self, name):
if name not in _PROXY_LOCAL_ATTRS:
delattr(self.__remote_end__, name)
return Wrapper | fa48a656ed4fee4a4d1d44b7b97aad32139e644e | 29,130 |
def as_cidr(cr: CidrRepr) -> Cidr:
"""
Returns a strict network address expressed as in CIDR form: either a string, expressing the network address as
``"<network number><zeros>/<mask bits>"``, or as a ``Cidr`` object, which is returned unaltered.
"""
if isinstance(cr, _BaseNetwork):
return cr
return ip_network(cr) | 7d6d40c7269619f6189ea1cee940ed4d33eadb1f | 29,131 |
from vivofoundation import get_triples
def get_authorship(authorship_uri):
"""
Given a URI, return an object that contains the authorship it represents
"""
authorship = {'authorship_uri':authorship_uri}
triples = get_triples(authorship_uri)
try:
count = len(triples["results"]["bindings"])
except:
count = 0
i = 0
while i < count:
b = triples["results"]["bindings"][i]
p = b['p']['value']
o = b['o']['value']
if p == "http://vivoweb.org/ontology/core#authorRank":
authorship['author_rank'] = o
if p == "http://vivoweb.org/ontology/core#linkedAuthor":
authorship['author_uri'] = o
if p == "http://vivoweb.org/ontology/core#linkedInformationResource":
authorship['publication_uri'] = o
if p == "http://vivoweb.org/ontology/core#isCorrespondingAuthor":
authorship['corresponding_author'] = o
i = i + 1
return authorship | 83a1d6a763e16d43c7c83f65f7f3ad11afd5506e | 29,132 |
def Init():
""" Инициализации важных переменных """
# Получаем список листов, их Id и название
spreadsheet = service.spreadsheets().get(spreadsheetId = spreadsheet_id).execute()
sheetList = spreadsheet.get('sheets')
sheetUsers = sheetList[0]['properties']['sheetId']
sheetSW = sheetList[1]['properties']['sheetId']
#последняя строчка откуда можно присоединять новенького
last_raw_in_SW = sorted( sheetList[1]['merges'], key = lambda x: x['endRowIndex'], reverse = True)[0]['endRowIndex'] # Узнаём последнюю заполненную строчку в таблице с очками
return sheetUsers, sheetSW, last_raw_in_SW | 917f2ec6f5d39260ed89c546695e7cd2839bc7b6 | 29,133 |
def Nlam_to_Flam(wave, zeropoint, zp_min=5.0, zp_max=30.0):
"""
The factor that when multiplied into N_lam converts to F_lam, i.e. S_lam where S_lam \equiv F_lam/N_lam
Parameters
----------
wave (`numpy.ndarray`_):
Wavelength vector for zeropoint
zeropoint (`numpy.ndarray`_):
zeropoint
zp_min (float, optional):
Minimum allowed value of the ZP. For smaller values the S_lam factor is set to zero
zp_max (float, optional):
Maximum allowed value of the ZP. For larger values the S_lam factor is set to zero
Returns
-------
"""
gpm = (wave > 1.0) & (zeropoint > zp_min) & (zeropoint < zp_max)
factor = np.zeros_like(wave)
factor[gpm] = np.power(10.0, -0.4*(zeropoint[gpm] - ZP_UNIT_CONST))/np.square(wave[gpm])
return factor | 843560dde9e4ec6d2781e4179ca047d00c5e3abc | 29,134 |
def base64_values_validate(name, description, color_set):
"""Ensures the string wasn't maliciously fabricated to feed corrupted data into the app, even if the b64 code itself
successfully decoded into a valid string."""
if custom_palette_name_validate(name) or custom_palette_description_validate(description) or \
custom_palette_color_set_validate(color_set):
return {'error': True} | 878a5cdf20bcc380a8e81afc47e40592eb4db030 | 29,135 |
def isIndepFromTarget(df, attr, x):
"""
Determiner si un attr est independant de target
:df: dataframe choisit
:attr: l'argument choisit a etudier
:x: seuil
:retourner true si n attr est independant de target
"""
obs=[[], []]
ref=[]
for t in df.itertuples():
dic=t._asdict()
if dic[attr] not in ref:
ref.append(dic[attr])
index=ref.index(dic[attr])
if(len(obs[0])<index+1):
obs[0].append(0)
obs[1].append(0)
obs[dic['target']][index]+=1
a,b,c,d=chi2_contingency(obs)
if b<x:
return False
return True | 7bad7b227f3c7af5413ad9778870c44c7844bf6a | 29,136 |
def read_docs_md(filename, root=None):
"""
Retrieves an apidoc markdown file to be implemented in swagger_auto_schema
:param(str) root: root base dir, settings.BASE_DIR as default
:param(str) filename: the filename to be retrieved without the .md file type
:return: the content of the md file, None if not found
"""
base = root or settings.BASE_DIR
try:
f = open(f"{base}/apidocs/{filename}.md", "r")
return f.read()
except FileNotFoundError:
return None | e888e1df91b4154fb9d67f6268da76ab2028f780 | 29,137 |
def unrotate(points, posor):
"""Rotate the matrix of column vectors points according to posor, i.e., from
absolute coordinates to camera coordinates"""
rot_matrix = calc_rot_matrix(posor)
return rot_matrix.I * points | 58066c958da982d035792997eaff00bfc573d2d1 | 29,138 |
def exp_tail(d, x):
"""Tail of the exponential series starting at d. Needed in the set sampler.
Parameters
----------
d: int
x: float
Returns
-------
float
"""
result = exp(x)
# Subtract the _first _d terms.
for i in range(d):
result -= (pow(x, i) / factorial(i))
return result | 48ac3da79e293451d42c5d55196863e27ed3b3e1 | 29,139 |
def check_projects_scores(request, hackathon_id):
""" When a judge submits the score, check if all projects in the Hackathon
were scored by all the judges in all the categories by comparing the
number of objects in HackProjectScore for each projects to the required
number of objects.
If all projects weren't scored, render final_score.html without the
score table.
If all the projects were scored, calculate the total score for each team,
sort the teams by scores
and render final_score.html with the score table.
"""
hackathon = get_object_or_404(Hackathon, pk=hackathon_id)
HackAwardFormSet = modelformset_factory(
HackAward, fields=('id', 'hack_award_category',
'winning_project'),
form=HackAwardForm, extra=0)
if request.method == 'POST':
hack_awards_formset = HackAwardFormSet(
request.POST,
form_kwargs={'hackathon_id': hackathon_id},
queryset=HackAward.objects.filter(hackathon=hackathon))
if hack_awards_formset.is_valid():
try:
with transaction.atomic():
hack_awards_formset.save()
except IntegrityError as e:
if 'UNIQUE' in str(e):
messages.error(request,
("Each award category can only be added "
"once to a hackathon."))
else:
logger.exception(e)
messages.error(request,
("An unexpected error occurred. Please "
"try again."))
else:
messages.error(request,
"An unexpected error occurred. Please try again.")
return redirect(reverse('hackathon:final_score',
kwargs={'hackathon_id': hackathon_id}))
else:
judges = [judge.slack_display_name for judge in hackathon.judges.all()]
teams = [team.display_name for team in hackathon.teams.all()
if team.project]
scores = query_scores(hackathon_id)
scores_table = create_judges_scores_table(scores, judges, teams)
hack_awards_formset = HackAwardFormSet(
form_kwargs={'hackathon_id': hackathon_id},
queryset=HackAward.objects.filter(hackathon=hackathon))
return render(request, 'hackathon/final_score.html', {
'hackathon': hackathon.display_name,
'hack_awards_formset': hack_awards_formset,
'scores_table': scores_table,
'teams_without_projects': '\n'+'\n'.join([
team.display_name
for team in hackathon.teams.all()
if not team.project]),
}) | edfb52db396a984e10a437a1b6561a3e493d9e0e | 29,140 |
import os
def remove_and_create_dir(path):
""" System call to rm -rf and then re-create a dir """
dir = os.path.dirname(path)
print('attempting to delete ', dir, ' path ', path)
if os.path.exists(path):
os.system("rm -rf " + path)
os.system("mkdir -p " + path)
return path | 5921e55e799580fdb0a3bfea91b0589f60bdbafc | 29,141 |
import pytz
from datetime import datetime
def _get_week_comfortband(building, zone, date, interval):
"""
Gets the whole comfortband from the zone configuration file. Correctly Resamples the data according to interval
:param date: The date for which we want to start the week. Timezone aware.
:param interval: int:seconds. The interval/frequency of resampling. Has to be such that 60 % interval == 0
:return: pd.df (col = "t_low", "t_high") with time_series index for the date provided and in timezone aware and in timezone of data input.
"""
config, err = _get_temperature_band_config(building, zone)
if config is None:
return None, err
# Set the date to the controller timezone.
building_date = date.astimezone(tz=pytz.timezone(config["tz"]))
weekday = building_date.weekday()
list_data = []
comfortband_data = config["comfortband"]
df_do_not_exceed, err = _get_week_do_not_exceed(building, zone, building_date, interval)
if df_do_not_exceed is None:
return None, err
# Note, we need to get a day before the start and after the end of the week to correctly resample due to timezones.
for i in range(DAYS_IN_WEEK + 2):
curr_weekday = (weekday + i - 1) % DAYS_IN_WEEK
curr_day = building_date + datetime.timedelta(days=i - 1)
curr_idx = []
curr_comfortband = []
weekday_comfortband = np.array(comfortband_data[curr_weekday])
for interval_comfortband in weekday_comfortband:
start, end, t_low, t_high = interval_comfortband
start = utils.combine_date_time(start, curr_day)
if t_low is None or t_low == "None":
interval_safety = df_do_not_exceed[start-datetime.timedelta(seconds=interval):start]
t_low = interval_safety["t_low"].mean() # TODO We want mean weighter by duration. Fine approximation for now
if t_high is None or t_high == "None":
interval_safety = df_do_not_exceed[start-datetime.timedelta(seconds=interval):start]
t_high = interval_safety["t_high"].mean()
curr_idx.append(start)
curr_comfortband.append({"t_low": float(t_low),
"t_high": float(t_high)})
list_data.append(pd.DataFrame(index=curr_idx, data=curr_comfortband))
df_comfortband = pd.concat(list_data)
df_comfortband = df_comfortband.tz_convert(date.tzinfo)
rounded_date = utils.decrement_to_start_of_day(date, interval)
df_comfortband = utils.smart_resample(df_comfortband, rounded_date, rounded_date+datetime.timedelta(days=7), interval, "pad")
return df_comfortband, None | d94542446f61c5dc5b08f1efbff47fccd81dfee4 | 29,142 |
import time
import os
import shutil
def setup_testrun_dir():
"""
Sets up a testrun_* directory in the cwd and returns the path to it
"""
test_run = "testrun_{}".format(int(time.time()))
os.mkdir(test_run)
this_files_dir = os.path.dirname(os.path.realpath(__file__))
config_templates = os.path.join(this_files_dir, "integration", "config")
os.mkdir(os.path.join(test_run, "runfolders"))
shutil.copy2(os.path.join(config_templates, "app.config"), test_run)
shutil.copy2(os.path.join(config_templates, "logger.config"), test_run)
return os.path.realpath(test_run) | f71ca502677ececfa1cec9f97b4a13d9426dce23 | 29,143 |
def get_path(obj, path, default=None):
"""Get the value at any depth of a nested object based on the path
described by `path`. If path doesn't exist, `default` is returned.
Args:
obj (list|dict): Object to process.
path (str|list): List or ``.`` delimited string of path describing
path.
Keyword Arguments:
default (mixed): Default value to return if path doesn't exist.
Defaults to ``None``.
Returns:
mixed: Value of `obj` at path.
Example:
>>> get_path({}, 'a.b.c') is None
True
>>> get_path({'a': {'b': {'c': [1, 2, 3, 4]}}}, 'a.b.c.1')
2
.. versionadded:: 2.0.0
.. versionchanged:: 2.2.0
Support escaping "." delimiter in single string path key.
"""
for key in path_keys(path):
obj = get_item(obj, key, default=default)
if obj is None:
break
return obj | c72cd428979a3f39214c57346aa345087a0248c7 | 29,144 |
def get_label_set_args():
"""
Add arguments specific to the "Label Set" experiment.
Return ArgParser object.
"""
cmd = get_general_args()
cmd = get_explainer_args(cmd)
cmd.add('--in_dir', type=str, default='output/influence_set/')
cmd.add('--out_dir', type=str, default='output/label_set/')
cmd.add('--val_frac', type=float, default=0.1)
cmd.add('--edit_frac', type=float, nargs='+',
default=[0.0, 0.05, 0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.45, 0.5])
return cmd | d08e59452ba7afdd8a478f589366e9ba219abb18 | 29,145 |
def literal_label(lit):
""" Invent a nice label name for the given literal """
return '{}_{}'.format(lit.function.name, lit.name) | 14a22d989ee9f07e00e66d1340b946d385d677fd | 29,146 |
from typing import Optional
def key(element: DOMElement) -> Optional[str]:
"""
Retrieve the key of a particular :class:`.DOMElement` in its parent element, if it can be
referred to by a key (i.e. if it its parent element is a :class:`collections.abc.Mapping`).
:param element: A DOM element
:return: The key of that DOM element in its parent, or None if it has no key
"""
return dom(element).element_key | f7ac059faa2023f88bad386a3d28e44a44c4259d | 29,147 |
def absolute_reverse(view_name, query_kwargs=None, args=None, kwargs=None):
"""Like django's `reverse`, except returns an absolute URL. Also add query parameters."""
relative_url = reverse(view_name, kwargs=kwargs)
url = website_util.api_v2_url(relative_url, params=query_kwargs)
return url | 11fc1bdc7be40fbbd462570f70f9fe77a5b4777f | 29,148 |
def convert(chinese):
"""converts Chinese numbers to int
in: string
out: string
"""
numbers = {'零':0, '一':1, '二':2, '三':3, '四':4, '五':5, '六':6, '七':7, '八':8, '九':9, '壹':1, '贰':2, '叁':3, '肆':4, '伍':5, '陆':6, '柒':7, '捌':8, '玖':9, '两':2, '廿':20, '卅':30, '卌':40, '虚':50, '圆':60, '近':70, '枯':80, '无':90}
units = {'个':1, '十':10, '百':100, '千':1000, '万':10000, '亿':100000000, '拾':10, '佰':100, '仟':1000}
number, pureNumber = 0, True
for i in range(len(chinese)):
if chinese[i] in units or chinese[i] in ['廿', '卅', '卌', '虚', '圆', '近', '枯', '无']:
pureNumber = False
break
if chinese[i] in numbers:
number = number * 10 + numbers[chinese[i]]
if pureNumber:
return number
number = 0
for i in range(len(chinese)):
if chinese[i] in numbers or chinese[i] == '十' and (i == 0 or chinese[i - 1] not in numbers or chinese[i - 1] == '零'):
base, currentUnit = 10 if chinese[i] == '十' and (i == 0 or chinese[i] == '十' and chinese[i - 1] not in numbers or chinese[i - 1] == '零') else numbers[chinese[i]], '个'
for j in range(i + 1, len(chinese)):
if chinese[j] in units:
if units[chinese[j]] >= units[currentUnit]:
base, currentUnit = base * units[chinese[j]], chinese[j]
number = number + base
return number | c08b9e01f0981afd09d2d9537ec1e98f2af46c06 | 29,149 |
def method_not_raises(UnexpectedException):
"""A decorator that ensures that the underlying function does not raise the UnexpectedException"""
@Decorators.decorator
def method_not_raises(target, *args, **kwargs):
return not_raises(UnexpectedException, target, *args, **kwargs)
return method_not_raises | f5267acedcd7bebec7d0cae998635c03c95e2eb8 | 29,150 |
def create_random_data(n_randoms, stomp_map):
"""Function for creating randomly positioned unknown objects on the considerd
geomometry. These is used for normalizing the output PDF and properly
estimating the "zero point" of the correlation amplitude. The code returns
a spatially searchable quad tree of the random points.
----------------------------------------------------------------------------
Args:
n_randoms: int number of random points to generate
stomp_map: STOMP.Map object specifying the survey geomometry
Returns:
STOMP::TreeMap object
"""
print("Creating %i randoms..." % n_randoms)
random_vect = stomp.AngularVector()
stomp_map.GenerateRandomPoints(random_vect, n_randoms)
random_tree = stomp.TreeMap(
int(np.max((128, stomp_map.RegionResolution()))), 200)
print("\tLoading randoms into tree map...")
for rand_ang in random_vect:
random_tree.AddPoint(rand_ang, 1.0)
return random_tree | c3c73e358d767e46064400d9b48e99a79b7bcfea | 29,151 |
def _rotate_flag(flag, rotation):
"""Rotates the square flag by the given number of degrees. Preserves size."""
original_size = flag.size
offset = int(((flag.size[0] * (2**0.5)) - flag.size[0]) / 2)
flag = flag.resize((int(flag.size[0]*(2**0.5)) + 1,)*2, resample=Image.BICUBIC)
flag = flag.rotate(rotation, resample=Image.BICUBIC)
if rotation % 90 == 0: # TODO: Actually calculate crop amount needed
return ImageOps.fit(flag, original_size)
return flag.crop([offset, offset, offset + original_size[0], offset + original_size[0]]) | 169a0542ce056eaf8368f81e421bb21a97a4d117 | 29,152 |
def mobilenetv1():
"""Handler da página inicial do modelo de Mobilenet V1
:return:
"""
return render_template("mobilenetv1.html") | edf8cb04c715c4ce0d3c70883813fb11d0640a19 | 29,153 |
def move(board):
"""Queries the user to move. Returns false if the user puts in an invalid input or move, returns true if the move was successful"""
start_input = input("MOVE WHICH PIECE? ")
if not start_input.isdigit():
return False
start = int(start_input)
if start not in board or board[start] != "!":
return False
end_input = input("TO WHERE? ")
if not end_input.isdigit():
return False
end = int(end_input)
if end not in board or board[end] != "O":
return False
difference = abs(start - end)
center = (end + start) / 2
if (
(difference == 2 or difference == 18)
and board[end] == "O"
and board[center] == "!"
):
board[start] = "O"
board[center] = "O"
board[end] = "!"
return True
else:
return False | 3377b4f349c9519eff4ede707d10e08038e9d7fc | 29,154 |
import re
def _read_reaction_kinetic_law_from_sbml(reaction, mass_reaction, f_replace, **kwargs):
"""Read the SBML reaction kinetic law and return it.
Warnings
--------
This method is intended for internal use only.
"""
mass_rid = mass_reaction.id
sbml_species = (
list(reaction.getListOfReactants())
+ list(reaction.getListOfProducts())
+ list(reaction.getListOfModifiers())
)
sbml_species = [sref.getSpecies() for sref in sbml_species]
local_parameters = {}
if reaction.isSetKineticLaw():
sbml_rid = reaction.getIdAttribute()
kinetic_law = reaction.getKineticLaw()
# Get the kinetic law and the rate equation as a string.
kinetic_law = reaction.getKineticLaw()
rate_eq = _check_required(kinetic_law, kinetic_law.getFormula(), "formula")
# Perform substitution for power law operations to sympify rate
for match in _KLAW_POW_RE.finditer(rate_eq):
old = match.group(0)
new = "(({0})**{1})".format(match.group("arg"), match.group("exp"))
rate_eq = rate_eq.replace(old, new)
# Try to sympify the reaction rate
try:
rate_eq = sympify(rate_eq)
except SympifyError as e:
raise MassSBMLError(e)
# If ID replacements were performed earlier then apply the ID
# replacements for metabolite and parameter arguments in rate law also.
id_subs = {}
for arg in list(rate_eq.atoms(Symbol)):
arg = str(arg)
new_arg = arg
# Check if reaction is in the name of the parameter
if re.search(sbml_rid, arg) and sbml_rid != mass_rid:
new_arg = _get_corrected_id(
new_arg,
(sbml_rid, mass_rid, arg),
"Parameter",
kwargs.get("remove_char"),
)
elif arg in sbml_species:
new_arg = _get_corrected_id(
new_arg, (f_replace, F_SPECIE), None, kwargs.get("remove_char")
)
else:
if kwargs.get("remove_char"):
new_arg = _remove_char_from_id(new_arg)
id_subs[arg] = new_arg
# Make rate equation
rate_eq = rate_eq.subs(id_subs)
for local_parameter in kinetic_law.getListOfLocalParameters():
pid = _check_required(
local_parameter, local_parameter.getIdAttribute(), "id"
)
value = local_parameter.getValue()
if re.search(sbml_rid, pid) and sbml_rid != mass_rid:
pid = _get_corrected_id(
pid,
(sbml_rid, mass_rid, pid),
"Parameter",
kwargs.get("remove_char"),
)
elif kwargs.get("remove_char"):
pid = _remove_char_from_id(pid)
local_parameters[pid] = value
else:
LOGGER.warning(
"No kinetic law found for SBML reaction '%s'. Therefore, assigning"
" the MassReaction '%s' a rate law based on Mass Action Kinetics.",
reaction,
mass_rid,
)
rate_eq = mass_reaction.get_mass_action_rate(1)
return rate_eq, local_parameters | ea800e5b7ccde7dbc87c11066176f963e0367256 | 29,155 |
def _total_probe_count_without_interp(params, probe_counts):
"""Calculate a total probe count without interpolation.
This assumes that params are keys in the datasets of probe_counts.
The result of ic._make_total_probe_count_across_datasets_fn should give
the same count as this function (if params are keys in the datasets
of probe_counts). But this uses probe_counts directly and can be
used as a sanity check -- i.e., it does not do any interpolation.
Args:
params: parameter values to use when determining probe counts;
params[i] is the (i % N)'th parameter of the (i/N)'th dataset,
where N is the number of datasets
probe_counts: dict giving number of probes for each dataset and
choice of parameters
Returns:
total number of probes across all datasets, according to the
given values of params
"""
num_datasets = len(probe_counts)
# The total number of parameters must be a multiple of the number
# of datasets
assert len(params) % num_datasets == 0
num_params = int(len(params) / num_datasets)
s = 0
for i, dataset in enumerate(sorted(probe_counts.keys())):
p = tuple(params[num_params * i + j] for j in range(num_params))
s += probe_counts[dataset][p]
return s | 0973e667dbf1fc3bdf476791cbf709549230f94b | 29,156 |
import os
import argparse
def existing_file(path):
"""Checks if a file exists.
Returns:
str: The path to the file.
Raises:
argparse.ArgumentTypeError: If a path argument does not exist.
"""
if not os.path.isfile(path):
raise argparse.ArgumentTypeError(
'No such file or directory: "%s"' % path)
return path | 64ae432231d71ec98132b7a32be149f9f5a192dd | 29,157 |
from typing import Any
import math
def make_divisible(x: Any, divisor: int):
"""Returns x evenly divisible by divisor."""
return math.ceil(x / divisor) * divisor | bfbcfb334777a6c7214f16aa0fadd56906e2b7bc | 29,158 |
import csv
import os
import tarfile
import tempfile
import glob
import shutil
import html
import uuid
def run(tarfolder,outfolder,typeformat="sbml",choice="2",selenzyme_table="N",filenames=''):
"""Main function that runs the tool"""
print(typeformat)
#Initialization
scores={} #scores (thermodynamics values, FBA...)
scores_col={} #scores colors (for gradient mapping)
RdfG_o={}
RdfG_m={}
RdfG_uncert={}
Path_flux_value={}
Length={}
dict_net={} #dictionary if the user provides a file to match ID and name of intermediate compounds
#CREATE DICT WITH MNX COMPOUNDS ID->NAMES FROM METANETX DB
reader = csv.reader(open(os.path.join(os.path.dirname(__file__),"chem_prop.tsv")),delimiter="\t")
d={}
for i in range(385): #skip 1st rows
next(reader)
for row in reader:
d[row[0]]=list(row[1:])[0] #1st column = CMPD..., 2nd column=name
#IF THERE IS A FILE FOR PRODUCTS NAMES
try:
namesdict={}
with open(filenames, 'r') as csvFile:
reader = csv.reader(csvFile,delimiter=';')
for row in reader:
namesdict[row[0]]=row[1] #row 0 : id (CMPD...), row 1 : name
except:
namesdict={}
def readoutput(f,output,outfolder):
"""either from libsbml, or from readcsv"""
G=nx.DiGraph() #new pathway = new network
LR=output[0]
Lreact=output[1]
Lprod=output[2]
name=output[3]
species_smiles=output[4]
reac_smiles=output[5]
images=output[6]
images2=output[7] #small reaction image
species_names=output[8]
species_links=output[9] #link to metanetx for metabolic compounds
roots=output[10] #for target reaction and target molecule
dic_types=output[11]
image2big=output[12] #zoom on reaction image
data_tab=output[13] #selenzyme table 5st rows
dfG_prime_o=output[14]
dfG_prime_m=output[15]
dfG_uncert=output[16]
flux_value=output[17]
rule_id=output[18] #RetroRules ID
rule_score=output[19]
fba_obj_name=output[20]
RdfG_o[f]=output[21]
RdfG_m[f]=output[22]
RdfG_uncert[f]=output[23]
if flux_value !={}:
Path_flux_value[f]=list(flux_value.values())[-1]
if 'target_reaction' in roots.keys():
Length[f]=len(LR)-1 #pathway length doesn't include the "target reaction" node
else :
Length[f]=len(LR)
revers=output[24]
G=network2(G,LR,Lreact,Lprod,name,species_smiles,reac_smiles,images,\
images2,species_names,species_links,roots,dic_types,\
image2big,data_tab, dfG_prime_o,dfG_prime_m, dfG_uncert,\
flux_value, rule_id,rule_score, fba_obj_name,revers)
#CREATE NETWORK DICTIONNARY
js = nx.readwrite.json_graph.cytoscape_data(G)
elements=js['elements']
dict_net[name]=elements #dictionary with list of nodes and edges
downloadcsv(outfolder,f,LR,reac_smiles,Lreact,Lprod,species_names,dfG_prime_o,dfG_prime_m,dfG_uncert,flux_value,\
rule_id,rule_score,RdfG_o,RdfG_m, RdfG_uncert,Path_flux_value,roots)
return(G,name,RdfG_o,RdfG_m,RdfG_uncert,Path_flux_value,Length)
#READ AND EXTRACT TARFILE
try:
tar = tarfile.open(tarfolder) ##read tar file
isFolder = False #the input is not a folder but a tar file
except:
isFolder = True
with tempfile.TemporaryDirectory() as tmpdirname:
if not isFolder:
print('created temporary directory', tmpdirname) #create a temporary folder
tar.extractall(path=tmpdirname)
tar.close()
infolder=tmpdirname
else:
infolder=tarfolder
tmpdirname=tarfolder #the folder is directly the input, not temporary
#DEPEND ON THE FORMAT
if typeformat=='sbml':
pathways=os.listdir(infolder) #1 sbml file per pathway
for f in pathways:
print(f)
file=os.path.join(infolder,f)
output=sbml2list(file, selenzyme_table,d,namesdict) #extract info from sbml
data=readoutput(f, output,outfolder)
RdfG_o=data[2]
RdfG_m=data[3]
RdfG_uncert=data[4]
Path_flux_value=data[5]
Length=data[6]
if typeformat=='csv':
"""Input = output folder from RP2paths"""
# READ CSV FILE WITH PATHWAYS (out_path.csv)
csvfilepath=os.path.join(tmpdirname,"path","out1","out_paths.csv")
datapath=[]
with open(csvfilepath, 'r') as csvFile:
reader = csv.reader(csvFile)
for row in reader:
datapath.append(row)
csvFile.close()
nbpath=int(datapath[-1][0])
for path in range(1,nbpath+1): #for each pathway
print(path)
output=csv2list2(tmpdirname,path, datapath, selenzyme_table,d,namesdict)
data=readoutput(path, output,outfolder)
RdfG_o=data[2]
RdfG_m=data[3]
RdfG_uncert=data[4]
Path_flux_value=data[5]
Length=data[6]
pathways=range(1,nbpath+1)
scores["dfG_prime_o (kJ/mol)"]=RdfG_o
scores["dfG_prime_m (kJ/mol)"]=RdfG_m
scores["dfG_uncert (kJ/mol)"]=RdfG_uncert
scores["flux_value (mmol/gDW/h)"]=Path_flux_value
scores["length"]=Length
if choice=="2":#view in separated files
for f in glob.glob(os.path.join(os.path.dirname(__file__),'new_html','*')): #to copy the required files in the outfolder
shutil.copy(f,outfolder)
html(outfolder,pathways,scores,scores_col,dict_net)
os.chdir( outfolder )
return (os.path.join(os.path.abspath(outfolder), 'index.html'))
elif choice=="5": #provide a tar file as output
for f in glob.glob(os.path.join(os.path.dirname(__file__),'new_html','*')):
shutil.copy(f,outfolder)
html(outfolder,pathways,scores,scores_col,dict_net)
#CREATE TAR FILE AS OUTPUT
fid = str(uuid.uuid4())
newtarfile = os.path.join(os.path.abspath(outfolder),fid+'.tar')
files = os.listdir(outfolder)
os.chdir( outfolder )
tFile = tarfile.open(newtarfile, 'w')
for f in files:
tFile.add(f)
tFile.close()
return(newtarfile) | 38c036a4acc086d783fc3a1c33bc394fd585c05c | 29,159 |
def one_vehicle_xml():
"""Emulates a XML response for 1 vehicle trajectory"""
STREAM = b'<INST nbVeh="1" val="2.00"><CREATIONS><CREATION entree="Ext_In" id="1" sortie="Ext_Out" type="VL"/></CREATIONS><SORTIES/><TRAJS><TRAJ abs="25.00" acc="0.00" dst="25.00" id="0" ord="0.00" tron="Zone_001" type="VL" vit="25.00" voie="1" z="0.00"/></TRAJS><STREAMS/><LINKS/><SGTS/><FEUX/><ENTREES><ENTREE id="Ext_In" nb_veh_en_attente="1"/></ENTREES><REGULATIONS/></INST>'
return STREAM | 792cfb5895fd033c40a4cdbf6e79083c865d0093 | 29,160 |
def select_data(all_tetrode_data, index):
"""
Select tetrode data by trial indices.
:param all_tetrode_data: (list of 4d numpy arrays) each of format [trial, 1, neuron + tetrode, time]
:param index: (1d numpy array) trial indices
:return: (list of 4d numpy arrays) selected subset of tetrode data
"""
current_data = []
for x in all_tetrode_data:
current_data.append(x[index, :, :, :])
return current_data | 5a883771ef499e0b82e0d3ac5b86550180760e13 | 29,161 |
from datetime import datetime
def normalize(ds_train, ds_cv, ds_test):
"""
Normalization of datasets
Parameters
----------
ds_train: Dataset
Training set
ds_cv: Dataset
Cross-validation set
ds_test: Dataset
Test set
Returns
-------
norm_train: Dataset
Normalized training set
norm_cv: Dataset
Normalized cross-validation set
norm_test: Dataset
Normalized test set
"""
t = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
print("[INFO] {} - Normalizing training set ... ".format(t))
normalizer = dl.Normalization()
ds_train.data = normalizer.fit_and_transform(ds_train.data,
method='z_score_std',
per_col_scaler=True)
t = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
print("[INFO] {} - Normalizing crossval set ... ".format(t))
ds_cv.data = normalizer.transform(ds_cv.data)
t = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
print("[INFO] {} - Normalizing test set ... ".format(t))
ds_test.data = normalizer.transform(ds_test.data)
return ds_train, ds_cv, ds_test | ad6731096e1081f3ff764ec055d4d3035a40ecbe | 29,162 |
def generate_sequential(num_users=100,
num_items=1000,
num_interactions=10000,
concentration_parameter=0.1,
order=3,
random_state=None):
"""
Generate a dataset of user-item interactions where sequential
information matters.
The interactions are generated by a n-th order Markov chain with
a uniform stationary distribution, where transition probabilities
are given by doubly-stochastic transition matrix. For n-th order chains,
transition probabilities are a convex combination of the transition
probabilities of the last n states in the chain.
The transition matrix is sampled from a Dirichlet distribution described
by a constant concentration parameter. Concentration parameters closer
to zero generate more predictable sequences.
Parameters
----------
num_users: int, optional
number of users in the dataset
num_items: int, optional
number of items (Markov states) in the dataset
num_interactions: int, optional
number of interactions to generate
concentration_parameter: float, optional
Controls how predictable the sequence is. Values
closer to zero give more predictable sequences.
order: int, optional
order of the Markov chain
random_state: numpy.random.RandomState, optional
random state used to generate the data
Returns
-------
Interactions: :class:`spotlight.interactions.Interactions`
instance of the interactions class
"""
if random_state is None:
random_state = np.random.RandomState()
transition_matrix = _build_transition_matrix(
num_items - 1,
concentration_parameter,
random_state)
user_ids = np.sort(random_state.randint(0,
num_users,
num_interactions,
dtype=np.int32))
item_ids = _generate_sequences(num_interactions,
transition_matrix,
order,
random_state) + 1
timestamps = np.arange(len(user_ids), dtype=np.int32)
ratings = np.ones(len(user_ids), dtype=np.float32)
return Interactions(user_ids,
item_ids,
ratings=ratings,
timestamps=timestamps,
num_users=num_users,
num_items=num_items) | 1a9a23fda9c17d5b7085d860986aab78368a4408 | 29,163 |
def expand(fluid, pfinal, eta):
"""Adiabatically expand a fluid to pressure pfinal, using
a turbine with isentropic efficiency eta."""
h0 = fluid.enthalpy_mass()
s0 = fluid.entropy_mass()
fluid.set(S = s0, P = pfinal)
h1s = fluid.enthalpy_mass()
isentropic_work = h0 - h1s
actual_work = isentropic_work * eta
h1 = h0 - actual_work
fluid.set(H = h1, P = pfinal)
return actual_work | acf8cd63684ccf3c41c38cc631d66b4bc143c5c6 | 29,164 |
def ADOSC(
frame,
fast=3,
slow=10,
high_col="high",
low_col="low",
close_col="close",
vol_col="Volume",
):
"""Chaikin A/D oscillator"""
return _frame_to_series(
frame, [high_col, low_col, close_col, vol_col], talib.ADOSC, fast, slow
) | 61b4959407d68fce2023a135253e02aa7e3428fc | 29,165 |
def op_scr(
gep: pd.DataFrame,
gross_tp: pd.DataFrame,
ul_exp: float,
bscr: float
):
"""
SCR Op Risk module
Inputs:
- Gross EP last 12m and 12m prior
- Gross TP: BEL should be positive
- BSCR
"""
op_premiums = 0.04 * (gep.at['life_all', 'gep_last12m'] - gep.at['life_ul', 'gep_last12m']) \
+ 0.04 * max(0., (gep.at['life_all', 'gep_last12m'] - gep.at['life_ul', 'gep_last12m'])
- 1.2 * (gep.at['life_all', 'gep_prior12m'] - gep.at['life_ul', 'gep_prior12m'])) \
+ 0.03 * gep.at['nl', 'gep_last12m'] \
+ 0.03 * max(0., gep.at['nl', 'gep_last12m'] - 1.2 * gep.at['nl', 'gep_prior12m'])
op_provisions = 0.0045 * max(0., gross_tp.at['life_all'] - gross_tp.at['life_ul']) \
+ 0.03 * max(0., gross_tp.at['nl'])
op = max(op_premiums, op_provisions)
scr_op = min(op, 0.3 * bscr) + 0.25 * ul_exp
return scr_op, op | fc1455e5ad7d4da92068b18b80a0ce929b5a9a50 | 29,166 |
def parse_voyager_sclk(sclk, planet=None):
"""Convert a Voyager clock string (FDS) to a numeric value.
Typically, a partition number is not specified for FDS counts. However, if
it is, it must be compatible with the planetary flyby. The partition number
is 2 for Jupiter and Saturn, 3 for Uranus, and 4 for Neptune.
If the planet is not specified (planet = None), then any partition value in
the range 2-4 is allowed and its value is ignored. If the planet is given as
input (5 for Jupiter, 6 for Saturn, 7 for Uranus, 8 for Neptune), then an
explicitly stated partition number must be compatible with the associated
planetary flyby.
"""
assert planet in (None, 5, 6, 7, 8), 'Invalid planet value: ' + str(planet)
# Check the partition number before ignoring it
parts = sclk.split('/')
if len(parts) > 2:
raise ValueError('Invalid FDS format, extraneous "/": ' + sclk)
if len(parts) == 2:
try:
partition = int(parts[0])
except ValueError:
raise ValueError('Partition number is not an integer: ' + sclk)
if planet is None:
if partition not in VOYAGER_PLANET_PARTITIONS.values():
raise ValueError('Partition number out of range 2-4: ' + sclk)
else:
required_partition = VOYAGER_PLANET_PARTITIONS[planet]
if partition != required_partition:
name = VOYAGER_PLANET_NAMES[planet]
raise ValueError('Partition number for %s flyby ' % name +
'must be %d: ' % required_partition + sclk)
sclk = parts[1]
# Separator can be '.' or ':'
if '.' in sclk:
parts = sclk.split('.')
elif ':' in sclk:
parts = sclk.split(':')
else:
parts = [sclk]
if len(parts) > 3:
raise ValueError('More than three fields in Voyager clock: ' + sclk)
# Make sure field are integers
ints = []
try:
for part in parts:
ints.append(int(part))
except ValueError:
raise ValueError('Voyager clock fields must be integers: ' + sclk)
# If we have just a single six- or seven-digit number, maybe the separator
# was omitted. This is how Voyager image names are handled.
if len(ints) == 1 and ints[0] >= 100000:
ints = [ints[0] // 100, ints[0] % 100]
# Append fields to make three
if len(ints) == 1:
ints.append(0)
if len(ints) == 2:
ints.append(1)
# Check fields for valid ranges
if ints[0] > 65535 or ints[0] < 0:
raise ValueError('Voyager clock "hours" out of range 0-65535: ' + sclk)
if ints[1] > 59 or ints[1] < 0:
raise ValueError('Voyager clock "minutes" out of range 0-59: ' + sclk)
if ints[2] > 800 or ints[2] < 1:
raise ValueError('Voyager clock "seconds" out of range 1-800: ' + sclk)
# Return in units of FDS hours
return ints[0] + (ints[1] + (ints[2]-1) / 800.) / 60. | 237695d43fe17af4f1d7fb704c01ab925099e663 | 29,167 |
def format_url(url):
"""
Formats url by adding 'http://' if necessary and deleting 'www.'
:param url: ulr to article or domain
:return: formatted url e.g. the following urls:
'http://www.google.pl/', 'google.pl/', 'google.pl/', 'www.google.pl/',
'http://google.pl/', 'https://www.google.pl/'
will be all formatted to: http://google.pl/
"""
parsed_url = urlparse(url, 'http')
netloc = parsed_url.netloc or parsed_url.path
path = parsed_url.path if parsed_url.netloc else ''
netloc = netloc.replace('www.', '')
parsed_url = ParseResult('http', netloc, path, *parsed_url[3:])
if not validators.url(parsed_url.geturl()):
raise ValueError('Provided url=' + url + ' is not valid')
return parsed_url.geturl() | a9d99b3ad73efb2d79931e9f0d75b1ea557fc6f4 | 29,168 |
def append(arr, values, axis=None):
"""Append to the end of an array along axis (ravel first if None)
"""
arr = asanyarray(arr)
if axis is None:
if arr.ndim != 1:
arr = arr.ravel()
values = ravel(values)
axis = arr.ndim-1
return concatenate((arr, values), axis=axis) | 9654f761bd7437840e355abc7b881e3dbe6dd260 | 29,169 |
def volo_d4_448(pretrained=False, **kwargs):
""" VOLO-D4 model, Params: 193M """
model_args = dict(layers=(8, 8, 16, 4), embed_dims=(384, 768, 768, 768), num_heads=(12, 16, 16, 16), **kwargs)
model = _create_volo('volo_d4_448', pretrained=pretrained, **model_args)
return model | c7e51cf1af050d79d5c31ef1b7aa107d6eac9c27 | 29,170 |
from functools import reduce
def rec_hasattr(obj, attr):
"""
Recursive hasattr.
:param obj:
The top-level object to check for attributes on
:param attr:
Dot delimited attribute name
Example::
rec_hasattr(obj, 'a.b.c')
"""
try:
reduce(getattr, attr.split('.'), obj)
except AttributeError:
return False
else:
return True | b1a9b12f54abb93202a5b41c950f761986307170 | 29,171 |
def find_svos(tokens):
"""
Extracts all the subject-verb objects in a list of tokens.
:param tokens: the parsed list.
:return: a list of the subject verb objects.
"""
svos = []
verbs = [tok for tok in tokens if tok.pos_ == "VERB" and tok.dep_ != "aux"]
for verb in verbs:
subs, verb_negated = get_all_subs(verb)
# hopefully there are subs, if not, don't examine this verb any longer
if subs:
verb, objs = get_all_objs(verb)
for sub in subs:
for obj in objs:
obj_negated = is_negated(obj)
svos.append((sub.lower_, "!" + verb.lower_
if verb_negated or obj_negated else verb.lower_, obj.lower_))
return svos | 1ece330f828dcf54d1a010127b583327b24aa682 | 29,172 |
def def_axiom(arg1):
"""
def-axiom rule prove propositional tautologies axioms.
for reason that prove need propositional logic decision procedure,
currently use proofterm.sorry
"""
# Ts = analyze_type(arg1)
# if IntType in Ts:
# pt = refl(arg1).on_rhs(
# top_conv(rewr_conv('int_ite01')),
# bottom_conv(rewr_conv('eq_mean_true')),
# bottom_conv(integer.int_norm_eq()),
# bottom_conv(integer.int_neq_false_conv()),
# proplogic.norm_full()
# )
# pt = pt.symmetric()
# try:
# basic.load_theory('sat')
# pt_cnf = solve_cnf(pt.lhs)
# basic.load_theory('smt')
# return pt.equal_elim(pt_cnf)
# except:
# pass
# try:
# return solve_cnf(arg1)
# except:
return ProofTerm.sorry(Thm([], arg1)) | ccf2b1a4ca57a96a09772d1f17c11ba345e62a31 | 29,173 |
def not_shiptoast_check(self, message):
"""Checks whether the message object is not in a shiptoast chat."""
if (message.channel.id in self.settings["shiptoast"]) or (message.channel.name in self.settings["shiptoast"]):
return False
else:
return True | b951ee6be9d9173065f340eda08e997b83964fe4 | 29,174 |
def jaccard_distance_loss(y_true, y_pred, smooth=100):
"""
Jaccard = (|X & Y|)/ (|X|+ |Y| - |X & Y|)
= sum(|A*B|)/(sum(|A|)+sum(|B|)-sum(|A*B|))
"""
intersection = tf.reduce_sum(tf.math.abs(y_true * y_pred), axis=-1)
sum_ = tf.reduce_sum(tf.math.abs(y_true) + tf.math.abs(y_pred), axis=-1)
jac = (intersection + smooth) / (sum_ - intersection + smooth)
return tf.reduce_sum((1 - jac) * smooth) | 3ed1236856bc911210f19882a03c107f82450996 | 29,175 |
def DiscoverConnect(sources, target_attributes, allowed_links, chain_attribute, chain_filters, link_type, max_length, connect_function = None, debug = None):
"""
sources - ModelList of elements to start from
target_attributes - list of attributes (pairs) to detect target attributes
allowed_links - list of attributes (pairs) to filter allowed links during traversing
chain_attribute - attribute that will be used to generate path string for chain_filters
the path string will be generated using values of the required attributes, separated by '-'
e.g. "ib_node-ib_port-ib_port-ib_switch_element-ib_switch"
chain_filters - list of regular expressions, all of them must succeed on target path string
, or it will be discarded
link_type - type for a new link
example: DiscoverConnect(list_of_switches, [("type", "ib_switch"), ("type", "node")], [("type", "ib")], "type", ["^.*-ib_port-ib_port-[^p]*$"], "logical_ib")
"""
result = ModelLinkList()
for source in sources:
result = result.append(DiscoverConnectOne(source, target_attributes, allowed_links, chain_attribute, chain_filters, link_type, max_length, connect_function, debug))
return result | 24f73e65852349e32253b6cb73b26fe1c8efeea8 | 29,176 |
def unshare_document(token, docid, userid):
"""
Unshares a document from another user.
:param token:
The user JWT token.
:type token:
str
:param docid:
The DocID of the document.
:type docid:
str
:param userid:
The UserID of the user to be unshared from.
:type userid:
str
:raises grpc.RpcError:
Raised by the gRPC library to indicate non-OK-status RPC termination.
:returns:
The number of users unshared with.
:rtype:
int
"""
with client.connect_to_server_with_auth(token) as auth_conn:
client_stub = strongdoc_pb2_grpc.StrongDocServiceStub(auth_conn)
request = document_pb2.UnshareDocumentReq(docID=docid, userID=userid)
response = client_stub.UnshareDocument(request, timeout=constants.GRPC_TIMEOUT)
return response.count | c79479d93ee687dece0d60137d8837a17c306fca | 29,177 |
import copy
import torch
def AlterNChannels(layer2alter_id, new_n_channels, old_model):
"""
Function to increase number of channels
Args:
layer2alter_id: layer to change
new_n_channels: number of channels for the altered layer
old_model: model before mutation
Returns:
Returns mutated model
"""
new_model_descriptor = copy.deepcopy(old_model['model_descriptor'])
old_pytorch_model = old_model['pytorch_model']
# Get layer where altering number of channels and also subsequent layers (as input of subsequent layer is changed)
layer2alter_conv = [layer for layer in new_model_descriptor['layers'] if layer['id'] == layer2alter_id][0]
layer2alter_bn = [layer for layer in new_model_descriptor['layers'] if layer['input'] == [layer2alter_id]][0]
layer2alter_acti = [layer for layer in new_model_descriptor['layers'] if layer['input'] == [layer2alter_bn['id']]][0]
subsequentlayer2alter = [layer for layer in new_model_descriptor['layers'] if
layer2alter_acti['id'] in layer['input']]
layer_type = layer2alter_conv['type']
# Check some constraints
assert ((layer2alter_conv['type'] == 'conv') or (layer2alter_conv['type'] == 'sep')), 'Error: Layer hast to be conv or sepconv layer.'
assert layer2alter_conv['params']['channels'] < new_n_channels, 'Error: Can only increase number of channels.'
assert len(subsequentlayer2alter) == 1, 'Error, more than one outgoing connection not allowed'
assert ((subsequentlayer2alter[0]['type'] == 'conv') or (
subsequentlayer2alter[0]['type'] == 'dense')), 'Error, subsequent layer has to be conv or dense layer'
# Make necessary changes to new descriptor
layer2alter_conv['params']['channels'] = new_n_channels
# For new architecture
layer2alter_bn['params']['in_channels'] = new_n_channels
old_id_conv = layer2alter_conv['id']
old_id_bn = layer2alter_bn['id']
old_id_sub = subsequentlayer2alter[0]['id']
new_id_conv = utils.GetUnusedID(new_model_descriptor)
new_id_bn = new_id_conv + 1
new_id_acti = new_id_conv + 2
new_id_sub = new_id_conv + 3
layer2alter_conv['id'] = new_id_conv
layer2alter_bn['id'] = new_id_bn
layer2alter_bn['input'] = [new_id_conv]
layer2alter_acti['id'] = new_id_acti
layer2alter_acti['input'] = [new_id_bn]
subsequentlayer2alter[0]['input'] = [new_id_acti]
subsequentlayer2alter[0]['id'] = new_id_sub
subsubsequentlayers = [layer for layer in new_model_descriptor['layers'] if old_id_sub in layer['input']]
# For new architecture
for layer in subsequentlayer2alter:
layer['params']['in_channels'] = new_n_channels
utils.ReplaceInput(subsubsequentlayers, old_id_sub, new_id_sub)
new_pytorch_model = ConvNet(new_model_descriptor)
new_pytorch_model.cuda()
new_pytorch_model = utils.InheritWeights(old_model['pytorch_model'], new_pytorch_model)
# Modify weights of changed layers
if layer_type == 'conv':
# Conv layer where number of channels have been changed
new_weights_conv = copy.deepcopy(new_pytorch_model._modules[str(new_id_conv)].weight)
new_bias_conv = copy.deepcopy(new_pytorch_model._modules[str(new_id_conv)].bias)
old_weights_conv = copy.deepcopy(old_pytorch_model._modules[str(old_id_conv)].weight)
old_bias_conv = copy.deepcopy(old_pytorch_model._modules[str(old_id_conv)].bias)
# Recalculate
new_weights_conv[0:old_weights_conv.shape[0], :, :, :] = nn.Parameter(old_weights_conv)
new_bias_conv[0:old_bias_conv.shape[0]] = nn.Parameter(old_bias_conv)
state_dict = {"weight": new_weights_conv.cuda(),
"bias": new_bias_conv.cuda()}
new_pytorch_model._modules[str(new_id_conv)].load_state_dict(state_dict)
elif layer_type == 'sep':
# Depthwise
old_weights_conv = copy.deepcopy(old_pytorch_model._modules[str(old_id_conv)].depthwise.weight)
old_bias_conv = copy.deepcopy(old_pytorch_model._modules[str(old_id_conv)].depthwise.bias)
state_dict = {"weight": nn.Parameter(old_weights_conv).cuda(),
"bias": nn.Parameter(old_bias_conv).cuda()}
new_pytorch_model._modules[str(new_id_conv)].depthwise.load_state_dict(state_dict)
# Pointwise
new_weights_conv = copy.deepcopy(new_pytorch_model._modules[str(new_id_conv)].pointwise.weight)
new_bias_conv = copy.deepcopy(new_pytorch_model._modules[str(new_id_conv)].pointwise.bias)
old_weights_conv = copy.deepcopy(old_pytorch_model._modules[str(old_id_conv)].pointwise.weight)
old_bias_conv = copy.deepcopy(old_pytorch_model._modules[str(old_id_conv)].pointwise.bias)
# Recalculate
new_weights_conv[0:old_weights_conv.shape[0], :, :, :] = nn.Parameter(old_weights_conv)
new_bias_conv[0:old_bias_conv.shape[0]] = nn.Parameter(old_bias_conv)
state_dict = {"weight": new_weights_conv.cuda(),
"bias": new_bias_conv.cuda()}
new_pytorch_model._modules[str(new_id_conv)].pointwise.load_state_dict(state_dict)
# Copy old weights for BN layer
new_weights_bn = []
new_weights_bn.append(copy.deepcopy(new_pytorch_model._modules[str(new_id_bn)].weight))
new_weights_bn.append(copy.deepcopy(new_pytorch_model._modules[str(new_id_bn)].bias))
new_weights_bn.append(copy.deepcopy(new_pytorch_model._modules[str(new_id_bn)].running_mean))
new_weights_bn.append(copy.deepcopy(new_pytorch_model._modules[str(new_id_bn)].running_var))
old_weights_bn = []
old_weights_bn.append(copy.deepcopy(old_pytorch_model._modules[str(old_id_bn)].weight))
old_weights_bn.append(copy.deepcopy(old_pytorch_model._modules[str(old_id_bn)].bias))
old_weights_bn.append(copy.deepcopy(old_pytorch_model._modules[str(old_id_bn)].running_mean))
old_weights_bn.append(copy.deepcopy(old_pytorch_model._modules[str(old_id_bn)].running_var))
for weight_idx, weight in enumerate(new_weights_bn):
if weight_idx < 2:
new_weights_bn[weight_idx][0:old_weights_bn[weight_idx].shape[0]] = nn.Parameter(
old_weights_bn[weight_idx])
else:
new_weights_bn[weight_idx][0:old_weights_bn[weight_idx].shape[0]] = old_weights_bn[weight_idx]
state_dict = {"weight": new_weights_bn[0].cuda(),
"bias": new_weights_bn[1].cuda(),
"running_mean": new_weights_bn[2].cuda(),
"running_var": new_weights_bn[3].cuda()}
new_pytorch_model._modules[str(new_id_bn)].load_state_dict(state_dict)
new_weights_sub = copy.deepcopy(new_pytorch_model._modules[str(new_id_sub)].weight)
old_weights_sub = copy.deepcopy(old_pytorch_model._modules[str(old_id_sub)].weight)
old_bias_sub = copy.deepcopy(old_pytorch_model._modules[str(old_id_sub)].bias)
# Copy old weights
new_weights_sub[:, 0:old_weights_sub.shape[1], :, :] = old_weights_sub
# Fill up new channels with 0's
new_weights_sub[:, old_weights_sub.shape[1]:, :, :] = torch.from_numpy(
np.zeros(shape=new_weights_sub[:, old_weights_sub.shape[1]:, :, :].shape))
new_bias_sub = copy.deepcopy(old_bias_sub)
state_dict = {"weight": nn.Parameter(new_weights_sub.cuda()),
"bias": nn.Parameter(new_bias_sub.cuda())}
new_pytorch_model._modules[str(new_id_sub)].load_state_dict(state_dict)
new_model = {'pytorch_model': new_pytorch_model,
'model_descriptor': new_model_descriptor,
'topo_ordering': new_pytorch_model.topo_ordering}
return new_model | a6e02739eddd5c1de572f303b580bcd9c72a272a | 29,178 |
def generate_bins(bins, values=None):
"""Compute bin edges for numpy.histogram based on values and a requested bin parameters
Unlike `range`, the largest value is included within the range of the last, largest value,
so generate_bins(N) with produce a sequence with length N+1
Arguments:
bins (int or 2-tuple of floats or sequence of floats) s or the first pair of bin edges
>>> generate_bins(0, [])
[0]
>>> generate_bins(3, [])
[0, 1, 2, 3]
>>> generate_bins(0)
[0]
>>> generate_bins(10)
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
>>> generate_bins(10, range(21))
[0.0, 2.0, 4.0, 6.0, 8.0, 10.0, 12.0, 14.0, 16.0, 18.0, 20.0]
>>> generate_bins((0, 3), range(21))
[0, 3, 6, 9, 12, 15, 18, 21]
"""
if isinstance(bins, int):
bins = (bins,)
if isinstance(bins, float):
bins = (0, bins)
if not len(bins) in (1, 2):
return bins
if values is None or not hasattr(values, '__iter__') or not any(values) or not hasattr(values, '__len__') or len(values) < 1:
values = [0]
value_min, value_max = pd.np.min(values), pd.np.max(values)
value_range = value_max - value_min
if len(bins) == 1:
if not value_range:
return range(int(bins[0]) + 1)
bins = (0, value_range / float(bins[0]))
if len(bins) == 2:
if not value_range:
return bins
binwidth = ((bins[1] - bins[0]) or 1)
bin0 = bins[0] or pd.np.min(values)
if (bin0 / value_range) <= .3:
bin0 = 0
numbins = int(value_range / float(binwidth))
bins = list(pd.np.arange(numbins + 1) * binwidth + bin0)
else:
binwidth = pd.np.min(pd.np.diff(bins)) or pd.np.mean(pd.np.diff(bins)) or 1.
bins = list(bins)
while bins[-1] < value_max:
bins.append(bins[-1] + binwidth)
return bins | 2d448746658193b8dd6c3ac3ef27418b37116a93 | 29,179 |
def makeRollAnswerStr( roll_res, mention_str ):
"""Formats an answer string depending on the roll result. If provided with an invalid roll result, returns 'None'."""
answer = None
if roll_res == None:
answer = "Invalid dice expression !"
elif len(roll_res)==2: #either threshold or success roll
res,aux = roll_res
if isinstance(res,bool): #threshold roll
#care, bool apparently extand from int in python
if res:
answer = "{} succeeded ! (Roll value was: `{}`)".format(mention_str,aux)
else:
answer = "{} failed ! (Roll value was: `{}`)".format(mention_str,aux)
elif isinstance(res,int): #success roll
answer = "{} succeeded `{}` times ! (Number of attempts: `{}`)".format(mention_str,res,aux)
elif len(roll_res)==3: #default roll
res,minVal,maxVal = roll_res
answer = "{} rolled a `{}`! (Possible values between `{}` and `{}`)".format(mention_str,res,minVal,maxVal)
if answer == None:
loc_log.warning("makeRollAnswerStr: The 'roll_res' argument '{}' is invalid !".format(roll_res))
return answer | 940f43b5592ff0da6d941bcb13b100c8fb2a590e | 29,180 |
import math
def ECSPower(min, max, size):
"""
on modélise l'eau du réseau comme une fonction sinusoidale de période annuelle
cette fonction est complètement calée sur un fichier météo qui commence au 1er janvier mais qui peut être pluriannuel
min : température minimale d'injection de l'eau du réseau dans le ballon
max : température maximale d'injection de l'eau du réseau dans le ballon
"""
T_water=np.zeros(size)
## période
w = 2*math.pi/npy
for i in range(size):
# numéro de step dans l'année
siy = i - npy*(i//npy)
T_water[i]= 0.5 * ( (min-max)* math.cos(w*siy) + max + min )
# le besoin s'entend pour une journée, ie 24*3600 secondes
# il faut donc diviser par 24*3600 pour convertir de J à W, Cpf étant exprimée en J/kg/K
return Volume_ballon*Npers*(Tballon-T_water)*Cpf/(24*3600) | c8f2422bfc066fc2e87caa3d2d87b07d0f1e4335 | 29,181 |
from typing import Dict
from typing import Any
def __create_notification(title: str, content: str) -> Dict[str, Any]:
"""
Creates a notification "object" from the given title and content.
:params title: The title of the notification.
:params content: The content of the notification.
:returns A dictionary representing a notification "object".
"""
return {"title": title, "content": content} | 484abcc2afcb8f726811e36516572bc5c302a415 | 29,182 |
def readme():
"""Read and patch README."""
readme_text = read('README.rst')
# PyPI does not accept :class: references.
return readme_text.replace(':class:`base64io.Base64IO`', '``base64io.Base64IO``') | bad97b377022ec15e0dc0c0c3bcb984924dce216 | 29,183 |
def generator_dcgan(noise_dim, img_source_dim,img_dest_dim, bn_mode,deterministic,pureGAN,inject_noise,wd, model_name="generator_dcgan"):
"""DCGAN generator based on Upsampling and Conv2D
Args:
noise_dim: Dimension of the noise input
img_dim: dimension of the image output
bn_mode: keras batchnorm mode
model_name: model name (default: {"generator_upsampling"})
dset: dataset (default: {"mnist"})
Returns:
keras model
"""
s = img_source_dim[1]
f = 512
# shp = np.expand_dims(img_dim[1:],1) # to make shp= (None, 1, 28, 28) but is not working
start_dim = int(s / 4)
nb_upconv = 2
nb_filters = 64
if K.image_dim_ordering() == "th":
bn_axis = 1
input_channels = img_source_dim[0]
output_channels = img_dest_dim[0]
reshape_shape = (input_channels, s, s)
shp=reshape_shape
else:
bn_axis = -1
input_channels = img_source_dim[-1]
output_channels = img_dest_dim[-1]
reshape_shape = (s, s, input_channels)
shp=reshape_shape
gen_noise_input = Input(shape=noise_dim, name="generator_input")
gen_image_input = Input(shape=shp, name="generator_image_input")
start_dim = int(s / 16)
n_fc_filters = 16
x = Dense(n_fc_filters * 16 * 16, input_dim=noise_dim, weight_norm=True,init="he_normal")(gen_noise_input) #WN = True in AFFINE
x = Activation("relu")(x)
# x = Dense(n_fc_filters * 16 * 16, input_dim=noise_dim)(x)
# x = BatchNormalization(mode=bn_mode, axis=bn_axis)(x)
# x = Activation("relu")(x)
x = Reshape((n_fc_filters,16,16))(x)
# Upscaling blocks: Upsampling2D->Conv2D->ReLU->BN->Conv2D->ReLU
for i in range(nb_upconv):
x = UpSampling2D(size=(2, 2))(x)
nb_filters = int(f / (2 ** (i + 1)))
x = Convolution2D(nb_filters, 3, 3, border_mode="same",weight_norm=True, kernel_initializer="he_normal")(x)
# x = BatchNormalization(mode=bn_mode, axis=bn_axis)(x)
x = Activation("relu")(x)
x = Convolution2D(nb_filters, 3, 3, border_mode="same",weight_norm=True, kernel_initializer="he_normal")(x)
x = Activation("relu")(x)
# Last Conv to get the output image
x = Convolution2D(output_channels, 3, 3, name="gen_conv2d_final",
border_mode="same", activation='tanh', kernel_initializer="he_normal")(x) #W_constraint=unitnorm()
generator_model = Model(input=[gen_noise_input,gen_image_input], output=[x], name=model_name)
visualize_model(generator_model)
return generator_model | 9d8d481fc9688b30fd3b9ffdb5914a61291b56b7 | 29,184 |
import sys
def get_ironic_client():
"""Get Ironic client instance."""
kwargs = {'os_password': CONF.ironic.os_password,
'os_username': CONF.ironic.os_username,
'os_tenant_name': CONF.ironic.os_tenant_name,
'os_auth_url': CONF.ironic.os_auth_url,
'os_endpoint_type': 'internal'}
try:
ironic = client.get_client(1, **kwargs)
except AmbiguousAuthSystem:
err_msg = ("Some credentials are missing from the [ironic] section of "
"the configuration. The following configuration files were "
"searched: (%s)." % ', '.join(CONF.config_file))
sys.exit(err_msg)
return ironic | 061a47b9c2b3d13d8e874b6d905784120c36ac1f | 29,185 |
def emg21(peak_index, x_pos, amp, init_pars=pars_dict,
vary_shape_pars=True, index_first_peak=None):
"""
Hyper-EMG(2,1) lmfit model (single-peak fit model with two exponential tails
on the left and one exponential tail on the right)
Parameters
----------
peak_index : int
Index of peak to fit.
x_pos : float
Initial guess of peak centroid.
amp : float
Initial guess of peak amplitude.
init_pars : dict
Initial parameters for fit ('amp' and 'mu' parameters in `init_pars`
dictionary are overwritten by the given `amp` and `x_pos` arguments)
vary_shape_pars : bool
Whether to vary or fix peak shape parameters (i.e. sigma, theta,
eta's and tau's).
index_first_peak : int
Index of the first peak to be fit in a multi-peak-fit. Only use this
during peak shape determination to enforce common shape parameters
for all peaks to be fitted. (For a regular fit with
``vary_shape_pars = False`` this is irrelevant.)
Returns
-------
:class:`lmfit.model.Model`
`lmfit` model object
"""
# Define model function
def emg21(x, amp, mu, sigma, theta, eta_m1,eta_m2,tau_m1,tau_m2,tau_p1):
return amp*h_emg(x, mu, sigma, theta, (eta_m1,eta_m2),(tau_m1,tau_m2),(1,),(tau_p1,)) # from emg_funcs.py
pref = 'p{0}_'.format(peak_index) # set prefix for respective peak (e.g. 'p0' for peak with index 0)
model = fit.Model(emg21, prefix = pref, nan_policy='propagate')
# Add parameters bounds or restrictions and define starting values
model.set_param_hint(pref+'amp', value=amp, min=1e-20)
model.set_param_hint(pref+'mu', value=x_pos, min=x_pos*(1-rel_var_mus), max=x_pos*(1+rel_var_mus))
model.set_param_hint(pref+'sigma', value= init_pars['sigma'], min=0, max=init_pars['sigma']+0.005, vary=vary_shape_pars)
model.set_param_hint(pref+'theta', value= init_pars['theta'], min=0, max=1, vary=vary_shape_pars)
model.set_param_hint(pref+'eta_m1', value= init_pars['eta_m1'], min=0, max=1, vary=vary_shape_pars)
model.set_param_hint(pref+'eta_m2', value= init_pars['eta_m2'], min=0, max=1, expr='1-'+pref+'eta_m1') # ensures normalization of eta_m's
model.set_param_hint(pref+'tau_m1', value= init_pars['tau_m1'], min=1e-12, max=upper_bound_taus, vary=vary_shape_pars)
model.set_param_hint(pref+'tau_m2', value= init_pars['tau_m2'], min=1e-12, max=upper_bound_taus, vary=vary_shape_pars)
model.set_param_hint(pref+'tau_p1', value= init_pars['tau_p1'], min=1e-12, max=upper_bound_taus, vary=vary_shape_pars)
# Enfore common shape parameters for all peaks
# (only needed during peak shape calibration)
if index_first_peak != None and (peak_index != index_first_peak):
first_pref = 'p{0}_'.format(index_first_peak)
model.set_param_hint(pref+'sigma', value= init_pars['sigma'], min=0, max=init_pars['sigma']+0.005, expr=first_pref+'sigma')
model.set_param_hint(pref+'theta', value= init_pars['theta'], min=0, max=1, expr=first_pref+'theta')
model.set_param_hint(pref+'eta_m1', value= init_pars['eta_m1'], min=0, max=1, expr=first_pref+'eta_m1' )
model.set_param_hint(pref+'eta_m2', value= init_pars['eta_m2'], min=0, max=1, expr='1-'+pref+'eta_m1') # ensures normalization of eta_m's
model.set_param_hint(pref+'tau_m1', value= init_pars['tau_m1'], min=1e-12, max=upper_bound_taus, expr=first_pref+'tau_m1')
model.set_param_hint(pref+'tau_m2', value= init_pars['tau_m2'], min=1e-12, max=upper_bound_taus, expr=first_pref+'tau_m2')
model.set_param_hint(pref+'tau_p1', value= init_pars['tau_p1'], min=1e-12, max=upper_bound_taus, expr=first_pref+'tau_p1')
return model | 9e35deb35806aa1da1c70080a0eb0e5af022fe53 | 29,186 |
def _wait_and_retry(provider, job_id, poll_interval, retries, job_descriptor,
summary):
"""Wait for job and retry any tasks that fail.
Stops retrying an individual task when: it succeeds, is canceled, or has been
retried "retries" times.
This function exits when there are no tasks running and there are no tasks
eligible to be retried.
Args:
provider: job service provider
job_id: a single job ID (string) to wait for
poll_interval: integer seconds to wait between iterations
retries: number of retries
job_descriptor: job descriptor used to originally submit job
summary: whether to output summary messages
Returns:
Empty list if there was no error,
a list containing an error message from a failed task otherwise.
"""
while True:
formatted_tasks = []
tasks = provider.lookup_job_tasks({'*'}, job_ids=[job_id])
running_tasks = set()
completed_tasks = set()
canceled_tasks = set()
fully_failed_tasks = set()
task_fail_count = dict()
# This is an arbitrary task that is either fully failed or canceled (with
# preference for the former).
message_task = None
task_dict = dict()
for t in tasks:
task_id = t.get_field('task-id')
if task_id is not None:
task_id = int(task_id)
task_dict[task_id] = t
status = t.get_field('task-status')
if status == 'FAILURE':
# Could compute this from task-attempt as well.
task_fail_count[task_id] = task_fail_count.get(task_id, 0) + 1
if task_fail_count[task_id] > retries:
fully_failed_tasks.add(task_id)
message_task = t
elif status == 'CANCELED':
canceled_tasks.add(task_id)
if not message_task:
message_task = t
elif status == 'SUCCESS':
completed_tasks.add(task_id)
elif status == 'RUNNING':
running_tasks.add(task_id)
if summary:
formatted_tasks.append(
output_formatter.prepare_row(t, full=False, summary=True))
if summary:
formatter = output_formatter.TextOutput(full=False)
formatter.prepare_and_print_table(formatted_tasks, summary)
retry_tasks = (
set(task_fail_count).difference(fully_failed_tasks)
.difference(running_tasks).difference(completed_tasks)
.difference(canceled_tasks))
# job completed.
if not retry_tasks and not running_tasks:
# If there are any fully failed tasks, return the completion message of an
# arbitrary one.
# If not, but there are canceled tasks, return the completion message of
# an arbitrary one.
if message_task:
return [provider.get_tasks_completion_messages([message_task])]
# Otherwise successful completion.
return []
for task_id in retry_tasks:
identifier = '{}.{}'.format(job_id, task_id) if task_id else job_id
print(' {} (attempt {}) failed. Retrying.'.format(
identifier, task_fail_count[task_id]))
msg = task_dict[task_id].get_field('status-message')
print(' Failure message: ' + msg)
_retry_task(provider, job_descriptor, task_id,
task_fail_count[task_id] + 1)
SLEEP_FUNCTION(poll_interval) | fc0f78d1ceb9d4d26dbf7b92fedc2de33a4ac4e9 | 29,187 |
def summarize_2_dual_3(package_list):
"""
Given list of packages, return counts of (py3-only, dual-support, py2-only)
"""
py3 = 0
dual = 0
py2 = 0
for pkg in package_list:
if pkg['status'] == 'py3-only':
py3 += 1
elif pkg['status'] in PY2_STATUSES:
dual += 1
else:
py2 += 1
return py3, dual, py2 | 6a863b456a71fd51e1ac2744424a42495413778f | 29,188 |
import os
import json
def get_invalid_resumes():
""" Loads invalid json resumes for unit tests, returning a list of TestSchemaFile """
folder = os.path.join(RESUME_DIR, 'invalid')
return [
TestSchemaFile(
invalid_resume_filename,
json.load(
open(os.path.join(folder, invalid_resume_filename), 'r')
),
remarks=os.path.splitext(invalid_resume_filename)[0],
valid=False
)
for invalid_resume_filename in os.listdir(folder)
] | 15cd771b9dade7e9fc723ca5d36afa2b1c9fbb05 | 29,189 |
import os
import shutil
def get_exppath(tag, name=None, override=False, prompt=False, root='~'):
""" Specific path for experiments results
Parameters
----------
tag: string
specific tag for the task you are working on
name: string
name of the folder contains all the results (NOTE: the
name can has subfolder)
override: bool
if True, remove exist folder
prompt: bool
if True, display prompt and require (Y) input before
delete old folder, if (N), the program exit.
root: string
root path for the results (default: "~/.odin")
"""
path = _get_managed_path('exp',
tag,
False,
is_folder=True,
root=root,
odin_base=False)
# only return the main folder
if name is None:
pass
# return the sub-folder
else:
name = str(name).split('/')
for i in name:
path = os.path.join(path, i)
if not os.path.exists(path):
os.mkdir(path)
# ====== check if override ====== #
if override and len(os.listdir(path)) > 0:
if prompt:
user_cmd = raw_input('Do you want to delete "%s" (Y for yes):').lower()
if user_cmd != 'y':
exit()
shutil.rmtree(path)
os.mkdir(path)
return path | e2c995f855a5d7e776f1b87015ff18ed9e725a5b | 29,190 |
def isPTSF(p, T=[]):
"""
>>> from common.production import Production
>>> p = Production(['A'], [['\\"a\\"', '\\"b\\"'],['\\"cde\\"']])
>>> isPTSF(p)
True
>>> p = Production(['A'], [['\\"a\\"', '\\"b\\"'],['\\"cde\\"']])
>>> isPTSF(p, ['a', 'b', 'c', 'd', 'e'])
True
>>> p = Production(['A'], [['\\"a\\"', '\\"b\\"'],['\\"cde\\"']])
>>> isPTSF(p, ['a'])
False
>>> p = Production(['A'], [['a', 'b'],['Ade']])
>>> isPTSF(p)
False
"""
for opt in p.right:
for symbol in opt:
if not isLiteralValue(symbol, T):
return False
return True | 13f1ed36bb93035490fde33dad3840fe8b98c263 | 29,191 |
def new_getvalue( state, name, p):
"""
Called every time a node value is used in an expression.
It will override the value for the current step only.
Returns random values for the node states
"""
global TARGETS
value = util.default_get_value( state, name, p )
if name in TARGETS:
# pick at random from True, False and original value
return choice( [True, False, value] )
else:
return value | 30b6abacaf478936663b94c45fc2bb3951706299 | 29,192 |
import os
import csv
def csv_find(filein, data):
"""Finds and returns the row number of the element given, in a CSV file."""
if not os.path.isfile(filein):
return(-1)
with open(filein, 'rt') as fi:
reader = csv.reader(fi, delimiter=',')
for row in reader:
hashout = row[1] # location of hash
if hashout == data:
return(row[0])
return(-1) | 524e006720ebe3043fbfe539c45e42d19b77250b | 29,193 |
def informe_ministerios():
"""
Listado de personas
"""
check_edit_or_admin()
roles = db.session.query(Rol).filter(Rol.tipo_rol == 'M')\
.join(relacion_miembros_roles,
relacion_miembros_roles.c.id_rol ==
Rol.id)\
.join(Miembro,
Miembro.id ==
relacion_miembros_roles.c.id_miembro)\
.add_columns(
Miembro.id,
Rol.nombre_rol)
query = db.session.query(Miembro)\
.outerjoin(relacion_miembros_roles,
Miembro.id ==
relacion_miembros_roles.c.id_miembro)\
.outerjoin(Rol,
Rol.id ==
relacion_miembros_roles.c.id_rol)\
.outerjoin(Direccion,
Miembro.id_direccion ==
Direccion.id)\
.outerjoin(TipoMiembro,
Miembro.id_tipomiembro ==
TipoMiembro.id)\
.outerjoin(EstadoCivil,
Miembro.id_estadocivil ==
EstadoCivil.id)\
.filter(Rol.tipo_rol == 'M')\
.add_columns(
Miembro.id,
Miembro.fullname,
Miembro.email,
Miembro.telefono_fijo,
Miembro.telefono_movil,
EstadoCivil.nombre_estado,
TipoMiembro.nombre_tipomiembro,
Direccion.tipo_via,
Direccion.nombre_via,
Direccion.nro_via,
Direccion.portalescalotros_via,
Direccion.cp_via,
Direccion.ciudad_via,
Direccion.provincia_via,
Direccion.pais_via)
query_miembros = query.all()
return render_template('informes/informe_ministerios.html',
informes=query_miembros, roles=roles) | 3dddb4756a092faaa8b6f191f93e22787fb7e38d | 29,194 |
def merge_regions_and_departments(regions, departments):
"""Merge regions and departments in one DataFrame.
The columns in the final DataFrame should be:
['code_reg', 'name_reg', 'code_dep', 'name_dep']
"""
return pd.merge(left=regions[["code", "name"]],
right=departments[['region_code',
"code", "name"]],
left_on='code',
right_on='region_code', suffixes=('_reg', '_dep'),
how='left').drop('region_code', axis=1) | 0852df4d8ace31a74397ad88140336dbdf9488d2 | 29,195 |
import json
def updateResourceJsons(swagger,examplesDict,dirName):
"""
Update the Resource JSON file to include examples in other folder
"""
try:
# Iterate through all resources in the output folder
for id in range(len(swagger['tags'])):
resourceName = swagger['tags'][id]['name']
if resourceName == 'CapabilityStatement':
continue
# create swagger subset which was initially created in 'AnnotateFiles.py'
with open('./output/'+resourceName+'.json',encoding='utf8') as f:
swaggerSubset = json.load(f)
resourceExamples = {}
# Iterate through all examples for the resource
for example in examplesDict[resourceName]:
with open(dirName+"/"+example,encoding='utf8') as f:
exampleContents = json.load(f)
# Add the example keyed by the file name
resourceExamples[example] = {"value":exampleContents}
swaggerSubset['paths']['/'+resourceName]['post']['requestBody']['content']['application/fhir+json']['examples'] = resourceExamples
swagger['paths']['/'+resourceName]['post']['requestBody']['content']['application/fhir+json']['examples'] = resourceExamples
# Save the file with 'w' to overwrite current outputted file
with open('./output/'+resourceName+'.json','w',encoding='utf8') as f:
json.dump(swaggerSubset,f)
# Return status
with open('./output/openapi3.json','w',encoding='utf8') as f:
json.dump(swagger,f)
return "SUCCESS"
except Exception as e:
print("Error duing saving")
print(e)
return "ERROR" | 3d9a7a31e3875bb7c56d8dfbd26ca5b73039101b | 29,196 |
def sign(x):
"""Sign function.
:return -1 if x < 0, else return 1
"""
if x < 0: return -1
else: return 1 | aae4fcf8fcfafca63593e908c264c08107640ec6 | 29,197 |
def set_default_dataseg(*args):
"""
set_default_dataseg(ds_sel)
Set default value of DS register for all segments.
@param ds_sel (C++: sel_t)
"""
return _ida_segregs.set_default_dataseg(*args) | e1f988537cb9eb0518fe5467d07d5487f3f8c440 | 29,198 |
def get_sms_history(key: str):
"""
Get SMS history.
:param str key: Authentication key.
:return: List of SMSHistoryItems.
"""
session = get_session(key)
url = f"{SITE_BASE_URL}/index.php?page=10&lang=en"
response = session.get(url)
pages = bs(response.text, "html.parser").find_all("span", {"class": "page_number"})
items = _parse_sms_history_items(response)
if len(pages) != 0:
del pages[0]
for page in pages:
items = items + _parse_sms_history_items(
session.post(url, {"cur_page": page.text})
)
return items | ed6f4a4a63d90fc91e25baa92179c220783f78b2 | 29,199 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.