content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
|---|---|---|
import argparse
def setup_args():
"""Setup and return the command line argument parser"""
parser = argparse.ArgumentParser(description='')
# parser.add_argument('csv', type=str, help='CSV file to load')
parser.add_argument(
'-clang-tidy-binary', help='Path to the clang-tidy executable.', metavar='PATH', required=True)
parser.add_argument('-clang-apply-replacements-binary',
help='Path to the clang-apply-replacements binary. Required when using -fix and -runner-py' +
' arguments.')
parser.add_argument(
'-runner-py', help='Python script wrapping clang-tidy with support for multiple jobs. run-clang-tidy.py ships' +
' with clang-tidy. Without this clang-tidy is run directly.', metavar='PATH')
parser.add_argument('-fix', action='store_true',
help='Apply automatic fixes. Passes -fix to clang-tidy. When using -runner-py' +
' (run-clang-tidy.py), the argument -clang-apply-replacements-binary must also be set to the' +
' clang-apply-fixes binary.')
parser.add_argument(
'-config-file', help='clang-tidy configuration file. Extracted and passed as the -config argument to' +
' clang-tidy.')
parser.add_argument(
'-p', help='clang-tidy build path (path to compile_commands.json). Extracted and passed as the -p argument to' +
' clang-tidy.', required=False)
parser.add_argument(
'-j', help='Number of parallel jobs to run. Only supported when using the -runner-py script. Ignored ' +
'otherwise.', required=False)
parser.add_argument(
'-relative-to', help='Modify clang-tidy message paths to be relative to this directory. Intended for CI' +
' builds to report portable paths.', required=False)
return parser
|
477da4faf063a461a77791f372f50e0e105b8ac7
| 3,642,900
|
def create_event(type_, source):
"""Create Event"""
cls = _events.get(type_, UnknownEvent)
try:
return cls(type=type_, **source)
except TypeError as e:
raise TypeError(f'Error at creating {cls.__name__}: {e}')
|
263fc768d94db5ae9cb0acb8565c01337ffb56c6
| 3,642,901
|
import tempfile
import os
import stat
import subprocess
def _execute(script, prefix=None, path=None):
"""
Execute a shell script.
Setting prefix will add the environment variable
COLCON_BUNDLE_INSTALL_PFREFIX equal to the passed in value
:param str script: script to execute
:param str prefix: the installation prefix
:param str path: (optional) path to temp directory, or ``None`` to use
default temp directory, ``str``
"""
path = tempfile.gettempdir() if path is None else path
result = 1
try:
fh = tempfile.NamedTemporaryFile('w', delete=False)
fh.write(script)
fh.close()
print('Executing script below with cwd=%s\n{{{\n%s\n}}}\n' %
(path, script))
try:
os.chmod(fh.name, stat.S_IRWXU)
env = os.environ.copy()
if prefix is not None:
env['COLCON_BUNDLE_INSTALL_PREFIX'] = prefix
result = subprocess.run(
fh.name, cwd=path, env=env, stdout=PIPE, stderr=PIPE,
universal_newlines=True)
if result.stdout is not None:
logger.debug('stdout output: \n' + result.stdout)
if result.stderr is not None:
logger.warn('stderr output: \n' + result.stderr)
except OSError as ex:
print('Execution failed with OSError: %s' % ex)
finally:
if os.path.exists(fh.name):
os.remove(fh.name)
logger.info('Return code was: %s' % result)
return result.returncode == 0
|
e7e34a0da2acee1193f4511688aea43685b359e8
| 3,642,902
|
import os
def main(event, context):
"""
Gets layer arns for each region and publish to S3
"""
dynamodb = boto3.resource("dynamodb")
table = dynamodb.Table(os.environ["DB_NAME"])
region = event.get("pathParameters").get("region")
python_version = event.get("pathParameters").get("python_version", "p3.8")
format = event.get("pathParameters").get("format", "json")
api_response = query_table(
table=table, region=region, python_version=python_version
)
body, headers = return_format(
data=api_response, format=format, region=region, python_version=python_version
)
return {
"statusCode": 200,
"headers": headers,
"body": body,
}
|
392b8a7cc5c13efc489ce1c331d4e457e9da994b
| 3,642,903
|
import os
import pickle
def rescore_and_rerank_by_num_inliers(test_image_id,
train_ids_labels_and_scores):
"""Returns rescored and sorted training images by local feature extraction."""
test_image_path = get_image_path(test_image_id)
try:
name = os.path.basename(test_image_path).split('.')[0]
with open(f'{TEST_LF}/{name}.pkl', 'rb') as fp:
test_keypoints, test_descriptors = pickle.load(fp)
except FileNotFoundError:
test_keypoints, test_descriptors = extract_local_features(test_image_path)
for i in range(len(train_ids_labels_and_scores)):
train_image_id, label, global_score = train_ids_labels_and_scores[i]
train_image_path = get_image_path(train_image_id)
name = os.path.basename(train_image_path).split('.')[0]
with open(os.path.join(TRAIN_LF, f"{name}.pkl"), 'rb') as fp:
train_keypoints, train_descriptors = pickle.load(fp)
num_inliers = get_num_inliers(test_keypoints, test_descriptors,
train_keypoints, train_descriptors)
total_score = get_total_score(num_inliers, global_score)
train_ids_labels_and_scores[i] = (train_image_id, label, total_score)
train_ids_labels_and_scores.sort(key=lambda x: x[2], reverse=True)
return train_ids_labels_and_scores
|
837718c2d3d206485651a2dc4ee16682747a0889
| 3,642,904
|
from typing import Iterable
from typing import Callable
from typing import List
from typing import Awaitable
import asyncio
def aggregate_policy(
policies: Iterable[PermissionPolicy_T],
aggregator: Callable[[Iterable[object]], bool] = all
) -> PermissionPolicy_T:
"""
在默认参数下,将多个权限检查策略函数使用 AND 操作符连接并返回单个权限检查策略。在实现中对这几个策略使用内置 `all` 函数,会优先执行同步函数而且尽可能在同步模式的情况下短路。
在新的策略下,只有事件满足了 `policies` 中所有的原策略,才会返回 `True`。
`aggregator` 参数也可以设置为其他函数,例如 `any`: 在此情况下会使用 `OR` 操作符连接。
如果参数中所有的策略都是同步的,则返回值是同步的,否则返回值是异步函数。
版本: 1.9.0+
参数:
policies: 要合并的权限检查策略
aggregator: 用于合并策略的函数
返回:
PermissionPolicy_T: 新的权限检查策略
用法:
```python
# 以下两种方式在效果上等同
policy1 = lambda sender: sender.is_groupchat and sender.from_group(123456789)
policy2 = aggregate_policy(lambda sender: sender.is_groupchat,
lambda sender: sender.from_group(123456789))
```
"""
syncs: List[Callable[[SenderRoles], bool]]
asyncs: List[Callable[[SenderRoles], Awaitable[bool]]]
syncs, asyncs = separate_async_funcs(policies)
def checker_sync(sender: SenderRoles) -> bool:
return aggregator(f(sender) for f in syncs)
if len(asyncs) == 0:
return checker_sync
async def checker_async(sender: SenderRoles) -> bool:
if not checker_sync(sender):
return False
# no short circuiting currently :-(
coros = [f(sender) for f in asyncs]
return aggregator(await asyncio.gather(*coros))
return checker_async
|
c0fa2ef66ba71deba88ca4e9debbb521ba49fe82
| 3,642,905
|
def optionally_load_system_paasta_config(
path: str = PATH_TO_SYSTEM_PAASTA_CONFIG_DIR,
) -> "SystemPaastaConfig":
"""
Tries to load the system paasta config, but will return an empty configuration if not available,
without raising.
"""
try:
return load_system_paasta_config(path=path)
except PaastaNotConfiguredError:
return SystemPaastaConfig({}, "")
|
7309d28ab156572d1b3cdbf347d4979f3ee607d8
| 3,642,906
|
def get_sage_bank_accounts(company_id: int) -> list:
"""
Retrieves the bank accounts for a company in Sage One
**company_id** The Company ID
"""
config = get_config() # Get the config
sage_client = SageOneAPIClient(config.get("sageone", "url"), config.get("sageone", "api_key"), config.get("sageone", "username"), config.get("sageone", "password"))
return sage_client.get_company_bank_accounts(company_id)
|
a3684d5a3b06944ba297f95b5498d61f7281c40c
| 3,642,907
|
def compute_fstar(tarr, mstar, index_select, index_high, fstar_tdelay):
"""Time averaged SFH that has ocurred over some previous time period
fstar = (mstar(t) - mstar(t-fstar_tdelay)) / fstar_tdelay
Parameters
----------
tarr : ndarray of shape (n_times, )
Cosmic time of each simulated snapshot in Gyr
mstar : ndarray of shape (n_times, )
Stellar mass history in Msun units.
index_select: ndarray of shape (n_times_fstar, )
Snapshot indices used in fstar computation.
index_high: ndarray of shape (n_times_fstar, )
Indices of np.searchsorted(t, t - fstar_tdelay)[index_select]
fstar_tdelay: float
Time interval in Gyr units for fstar definition.
fstar = (mstar(t) - mstar(t-fstar_tdelay)) / fstar_tdelay
Returns
-------
fstar : ndarray of shape (n_times)
SFH averaged over timescale fstar_tdelay in units of Msun/yr assuming h=1.
"""
mstar_high = mstar[index_select]
mstar_low = jax_np_interp(
tarr[index_select] - fstar_tdelay, tarr, mstar, index_high
)
fstar = (mstar_high - mstar_low) / fstar_tdelay / 1e9
return fstar
|
89ef08ee08f41fa6b7931ebdbbb3611bda6346ae
| 3,642,908
|
def notification_error(code: str, search_id: str, status_code, message: str = None):
"""Return to the event listener a notification error response based on the status code."""
error = CALLBACK_MESSAGES[code].format(search_id=search_id)
if message:
error += ' ' + message
current_app.logger.error(error)
# Track event here.
EventTracking.create(search_id, EventTracking.EventTrackingTypes.API_NOTIFICATION, status_code, message)
if status_code != HTTPStatus.BAD_REQUEST and code not in (resource_utils.CallbackExceptionCodes.MAX_RETRIES,
resource_utils.CallbackExceptionCodes.UNKNOWN_ID):
# set up retry
enqueue_notification(search_id)
return resource_utils.error_response(status_code, error)
|
6522023ad36f7164a2c721493d38d3cc0b5d4690
| 3,642,909
|
import os
def get_avg(feature_name, default_value):
"""Get the average of numeric feature from the environment.
Return the default value if there is no the statistics in
the environment.
Args:
feature_name: String, feature name or column name in a table
default_value: Float.
Return:
Float
"""
env_name = AnalysisEnvTemplate.AVG_ENV.format(feature_name)
mean = os.getenv(env_name, None)
if mean is None:
return default_value
else:
return float(mean)
|
5d1b7270dc0021f5b4a28ad3203a65860b6a05b5
| 3,642,910
|
def arccos(x: REAL) -> float:
"""Arc cosine."""
return pi/2 - arcsin(x)
|
1829e6d777c32172afee7e8608d5d1034458660f
| 3,642,911
|
import requests
def isLinkValid(test_video_link):
"""def isLinkValid(test_video_link): -> test_video_link
check if youtube video link is valid."""
try:
data = requests.get("https://www.youtube.com/oembed?format=json&url=" + test_video_link).json()
if data == "Not Found":
return False
else:
return True
except:
return False
|
0af4f8c1d05f2b98d046d63d5eaf39f679a37818
| 3,642,912
|
import time
import calendar
def _strptime(data_string, format='%a %b %d %H:%M:%S %Y'):
"""Return a 2-tuple consisting of a time struct and an int containing
the number of microseconds based on the input string and the
format string."""
for index, arg in enumerate([data_string, format]):
if not isinstance(arg, str):
msg = 'strptime() argument {} must be str, not {}'
raise TypeError(msg.format(index, type(arg)))
global _TimeRE_cache, _regex_cache
with _cache_lock:
locale_time = _TimeRE_cache.locale_time
if (_getlang() != locale_time.lang or time.tzname != locale_time.
tzname or time.daylight != locale_time.daylight):
_TimeRE_cache = TimeRE()
_regex_cache.clear()
locale_time = _TimeRE_cache.locale_time
if len(_regex_cache) > _CACHE_MAX_SIZE:
_regex_cache.clear()
format_regex = _regex_cache.get(format)
if not format_regex:
try:
format_regex = _TimeRE_cache.compile(format)
except KeyError as err:
bad_directive = err.args[0]
if bad_directive == '\\':
bad_directive = '%'
del err
raise ValueError("'%s' is a bad directive in format '%s'" %
(bad_directive, format)) from None
except IndexError:
raise ValueError("stray %% in format '%s'" % format) from None
_regex_cache[format] = format_regex
found = format_regex.match(data_string)
if not found:
raise ValueError('time data %r does not match format %r' % (
data_string, format))
if len(data_string) != found.end():
raise ValueError('unconverted data remains: %s' % data_string[found
.end():])
iso_year = year = None
month = day = 1
hour = minute = second = fraction = 0
tz = -1
tzoffset = None
iso_week = week_of_year = None
week_of_year_start = None
weekday = julian = None
found_dict = found.groupdict()
for group_key in found_dict.keys():
if group_key == 'y':
year = int(found_dict['y'])
if year <= 68:
year += 2000
else:
year += 1900
elif group_key == 'Y':
year = int(found_dict['Y'])
elif group_key == 'G':
iso_year = int(found_dict['G'])
elif group_key == 'm':
month = int(found_dict['m'])
elif group_key == 'B':
month = locale_time.f_month.index(found_dict['B'].lower())
elif group_key == 'b':
month = locale_time.a_month.index(found_dict['b'].lower())
elif group_key == 'd':
day = int(found_dict['d'])
elif group_key == 'H':
hour = int(found_dict['H'])
elif group_key == 'I':
hour = int(found_dict['I'])
ampm = found_dict.get('p', '').lower()
if ampm in ('', locale_time.am_pm[0]):
if hour == 12:
hour = 0
elif ampm == locale_time.am_pm[1]:
if hour != 12:
hour += 12
elif group_key == 'M':
minute = int(found_dict['M'])
elif group_key == 'S':
second = int(found_dict['S'])
elif group_key == 'f':
s = found_dict['f']
s += '0' * (6 - len(s))
fraction = int(s)
elif group_key == 'A':
weekday = locale_time.f_weekday.index(found_dict['A'].lower())
elif group_key == 'a':
weekday = locale_time.a_weekday.index(found_dict['a'].lower())
elif group_key == 'w':
weekday = int(found_dict['w'])
if weekday == 0:
weekday = 6
else:
weekday -= 1
elif group_key == 'u':
weekday = int(found_dict['u'])
weekday -= 1
elif group_key == 'j':
julian = int(found_dict['j'])
elif group_key in ('U', 'W'):
week_of_year = int(found_dict[group_key])
if group_key == 'U':
week_of_year_start = 6
else:
week_of_year_start = 0
elif group_key == 'V':
iso_week = int(found_dict['V'])
elif group_key == 'z':
z = found_dict['z']
tzoffset = int(z[1:3]) * 60 + int(z[3:5])
if z.startswith('-'):
tzoffset = -tzoffset
elif group_key == 'Z':
found_zone = found_dict['Z'].lower()
for value, tz_values in enumerate(locale_time.timezone):
if found_zone in tz_values:
if time.tzname[0] == time.tzname[1
] and time.daylight and found_zone not in ('utc', 'gmt'
):
break
else:
tz = value
break
if year is None and iso_year is not None:
if iso_week is None or weekday is None:
raise ValueError(
"ISO year directive '%G' must be used with the ISO week directive '%V' and a weekday directive ('%A', '%a', '%w', or '%u')."
)
if julian is not None:
raise ValueError(
"Day of the year directive '%j' is not compatible with ISO year directive '%G'. Use '%Y' instead."
)
elif week_of_year is None and iso_week is not None:
if weekday is None:
raise ValueError(
"ISO week directive '%V' must be used with the ISO year directive '%G' and a weekday directive ('%A', '%a', '%w', or '%u')."
)
else:
raise ValueError(
"ISO week directive '%V' is incompatible with the year directive '%Y'. Use the ISO year '%G' instead."
)
leap_year_fix = False
if year is None and month == 2 and day == 29:
year = 1904
leap_year_fix = True
elif year is None:
year = 1900
if julian is None and weekday is not None:
if week_of_year is not None:
week_starts_Mon = True if week_of_year_start == 0 else False
julian = _calc_julian_from_U_or_W(year, week_of_year, weekday,
week_starts_Mon)
elif iso_year is not None and iso_week is not None:
year, julian = _calc_julian_from_V(iso_year, iso_week, weekday + 1)
if julian is not None and julian <= 0:
year -= 1
yday = 366 if calendar.isleap(year) else 365
julian += yday
if julian is None:
julian = datetime_date(year, month, day).toordinal() - datetime_date(
year, 1, 1).toordinal() + 1
else:
datetime_result = datetime_date.fromordinal(julian - 1 +
datetime_date(year, 1, 1).toordinal())
year = datetime_result.year
month = datetime_result.month
day = datetime_result.day
if weekday is None:
weekday = datetime_date(year, month, day).weekday()
tzname = found_dict.get('Z')
if tzoffset is not None:
gmtoff = tzoffset * 60
else:
gmtoff = None
if leap_year_fix:
year = 1900
return (year, month, day, hour, minute, second, weekday, julian, tz,
tzname, gmtoff), fraction
|
bd222fde85a3db2bdad28394f001ed74b1d68622
| 3,642,913
|
def to_simple_rdd(sc, features, labels):
"""Convert numpy arrays of features and labels into
an RDD of pairs.
:param sc: Spark context
:param features: numpy array with features
:param labels: numpy array with labels
:return: Spark RDD with feature-label pairs
"""
pairs = [(x, y) for x, y in zip(features, labels)]
if custom_hash == True:
rdd = sc.parallelize(pairs).map(lambda pair: (data_partitioner(pair[1]), pair)).partitionBy(NUM_PARTITION,
label_hash)
rdd = rdd.map(lambda composite_pair: composite_pair[1]).cache()
else:
rdd = sc.parallelize(pairs, NUM_PARTITION).cache()
return rdd
|
87afaae6214bedbde60d46e21d8ea82d644a0ca1
| 3,642,914
|
def add_decimal(op1: Decimal, op2: Decimal)-> Decimal:
"""
add
:param op1:
:param op2:
:return:
"""
result = op1 + op2
if result > 999:
return float(result)
return result
|
d05501daf67845eb339c7103fad02c7d0e2f8dc9
| 3,642,915
|
import os
def get_data_file_path(project, filename):
"""
Gets the path of data files we've stored for each project
:param project:
:return:
"""
return os.path.join(BASE_DIR, "waterspout_api", "data", project, filename)
|
cd64f6c96671e4ea9e28dd906d551a5edf35ca41
| 3,642,916
|
def freenas_spec(**kwargs):
"""FreeNAS specs."""
# Setup vars from kwargs
builder_spec = kwargs['data']['builder_spec']
bootstrap_cfg = None
builder_spec.update(
{
'boot_command': [
'<enter>',
'<wait30>1<enter>',
'y',
'<wait5><spacebar>o<enter>',
'<enter>',
'{{ user `password` }}<tab>{{ user `password` }}<tab><enter>',
'<enter>',
'<wait60><wait60><wait60>',
'<enter>',
'3<enter>',
'<wait60><wait60><wait60><wait60><wait60>',
'9<enter>',
'curl -X PUT -u {{ user `username` }}:{{ user `password` }} -H \'Content-Type: application/json\' -d \'{\"ssh_rootlogin\": true}\' http://localhost/api/v1.0/services/ssh/<enter>', # noqa: E501
'curl -X PUT -u {{ user `username` }}:{{ user `password` }} -H \'Content-Type: application/json\' -d \'{\"srv_enable\": true}\' http://localhost/api/v1.0/services/services/ssh/<enter>' # noqa: E501
],
'boot_wait': '30s',
'shutdown_command': 'shutdown -p now',
}
)
return bootstrap_cfg, builder_spec
|
ffe666fd48b6d545e44389ae0413bc1f0c29c44e
| 3,642,917
|
def checksum(routine):
"""
Compute the M routine checksum used by ``CHECK1^XTSUMBLD``,
implemented in ``^%ZOSF("RSUM1")`` and ``SUMB^XPDRSUM``.
"""
checksum = 0
lineNumber = 0
with open(routine, 'r') as f:
for line in f:
line = line.rstrip('\r\n')
lineNumber += 1
# ignore the second line
if lineNumber == 2:
continue
checksum += routineLineCheckSum(line, lineNumber)
return checksum
|
ca93bbf29967a90b22de007f84ba5ec3898a4f1a
| 3,642,918
|
def nusdas_parameter_change(param, value):
"""
def nusdas_parameter_change()
"""
# Set argtypes and restype
nusdas_parameter_change_ct = libnus.NuSDaS_parameter_change
nusdas_parameter_change_ct.restype = c_int32
nusdas_parameter_change_ct.argtypes = (c_int32,POINTER(c_int32))
icond = nusdas_parameter_change_ct(c_int32(param), byref(c_int32(value)))
if (icond !=0):
raise Exception("nusdas_parameter_change Error: Unsupported parameter" + str(icond))
return icond
|
cea9011eb807c6281c9e3d07b640860ee085d1ad
| 3,642,919
|
def validate_retention_time(retention_time):
# type: (str) -> str
"""Validate retention_time. If -1, return string, else convert to ms.
Keyword arguments:
retention_time -- user configured retention-ms, pattern: %d%h%m%s%ms
Return:
retention_time -- If set to "-1", return it
"""
if retention_time == "-1": # sets retention-time to unlimited
return retention_time
return convert_time_ms(retention_time, "retention_time")
|
8f7c701f7e2f2e8e5fa708fef80f04804964928c
| 3,642,920
|
import re
def is_sale(this_line):
"""Determine whether a given line describes a sale of cattle."""
is_not_succinct = len(this_line.split()) > 3
has_price = re.search(r'[0-9]+\.[0-9]{2}', this_line)
return bool(has_price and is_not_succinct)
|
382da3d9a1690950e64a29c6f2fcd54e062eb600
| 3,642,921
|
def extract_policy(env, v, gamma = 1.0):
""" Extract the policy given a value-function """
policy = np.zeros(env.env.nS)
for s in range(env.env.nS):
q_sa = np.zeros(env.env.nA)
for a in range(env.env.nA):
q_sa[a] = sum([p * (r + gamma * v[s_]) for p, s_, r, _ in env.env.P[s][a]])
policy[s] = np.argmax(q_sa)
return policy
|
2342a531e0fa29e4b7bb1946aa30cbe8b739b688
| 3,642,922
|
def generate_parameters(var):
"""
Defines a distribution of parameters
Returns a settings dictionary
var is an iterable of variables in the range [0,1) which
we can make use of.
"""
var = iter(var)
model={}
training={}
settings = {'model':model, 'training':training}
# max_radius is exponential from 3 to 8
model['max_radius'] = 1.5 * 2 ** (1 + 1.4 * next(var))
# number of radial basis funcs is exponential from 8 to 31
model['number_of_basis'] = int( 2 ** (3 + 2 * next(var)) )
# radial_layers is from 1 to 15
model['radial_layers'] = int(2 ** (4 * next(var)) )
# radial_h from 10 to 79
model['radial_h'] = int( 5 * 2 ** (1 + 3 * next(var)) )
# numlayers from exp from 2 to 12
numlayers = int( 2 ** (1 + 2.584963 * next(var)))
# lmax is a polynomial on [0,1), of x = layer/numlayers
# lmax = l0 + l1 x + l2 x^2
# where l0 is whatever gives this min of lmin on [0,1)
l2 = 6 * next(var) - 3 # l2 in [-3, 3]
l1 = 6 * next(var) - 3 # l1 in [-3, 3]
lmin = int(6 * (next(var) ** 2)) # lmin integer in [0,5] inclusive
ns = [l / numlayers for l in range(numlayers)]
lmaxes = [min(lmax_max, int(round(l1 * n + l2 * n**2))) for n in ns]
bump = -min(lmaxes)
lmaxes = [l + bump + lmin for l in lmaxes]
model['lmaxes'] = lmaxes
global mul_coeff
print(f"Using mul_coeff = {mul_coeff}.")
# multiplicities are a fn of both n = layer/numlayers and x = 10/(2l+1)
# m = m0 + m01 x + m10 n + m11 xn
# where m0 is whatever gives this min of mmin
m01 = mul_coeff * (40 * next(var) - 10) # m01 in [-10, 30]
m11 = mul_coeff * (40 * next(var) - 10) # m11 in [-10, 30]
m10 = mul_coeff * (80 * next(var) - 40) # m10 in [-40, 40]
#mmin = int(16 * (next(var) ** 2)) # mmin integer in [1,16] incl.
mmin = int(mul_coeff * 2 ** (next(var) * 6)) + 1 # mmin integer in [2,64] incl.
xs = [[10 / (2*l + 1) for l in range(lmaxes[n]+1)] for n in range(numlayers)]
muls = [[int(m01 * x + m10 * n + m11 * x * n) for x in xl] for n,xl in zip(ns,xs)]
bump = -min([min(lmul) for lmul in muls])
muls = [[m + bump + mmin for m in lmul] for lmul in muls]
model['muls'] = muls
return settings
|
8336a7cb19db62fa95b3b0d131f2ca6f0e919e39
| 3,642,923
|
def sourceExtractImage(data, bkgArr=None, sortType='centre', verbose=False,
**kwargs):
"""Extract sources from data array and return enumerated objects sorted
smallest to largest, and the segmentation map provided by source extractor
"""
data = np.array(data).byteswap().newbyteorder()
if bkgArr is None:
bkgArr = np.zeros(data.shape)
o = sep.extract(data, kwargs.pop('threshold', 0.05), segmentation_map=True,
**kwargs)
if sortType == 'size':
if verbose:
print('Sorting extracted objects by radius from size')
sizeSortedObjects = sorted(
enumerate(o[0]), key=lambda src: src[1]['npix']
)
return sizeSortedObjects, o[1]
elif sortType == 'centre':
if verbose:
print('Sorting extracted objects by radius from centre')
centreSortedObjects = sorted(
enumerate(o[0]),
key=lambda src: (
(src[1]['x'] - data.shape[0] / 2)**2
+ (src[1]['y'] - data.shape[1] / 2)**2
)
)[::-1]
return centreSortedObjects, o[1]
|
6fec63cc6e154f874ae3a46a373fb2d7ceff2423
| 3,642,924
|
import os
def _check_resource(resource_path: str) -> bool:
"""
Checks if the resource is file and accessible, or checks that all resources in directory are files and accessible
:param resource_path: A path to the resource
:return: True if resource is OK to upload, False otherwise
"""
if os.path.isfile(resource_path):
try:
open(resource_path, 'rb')
return True
except PermissionError or FileNotFoundError:
return False
return True
|
39f8109054367fe2c7f3f5dc61b24564f81160d7
| 3,642,925
|
from typing import Optional
def convert_one_fmt_off_pair(node: Node) -> bool:
"""Convert content of a single `# fmt: off`/`# fmt: on` into a standalone comment.
Returns True if a pair was converted.
"""
for leaf in node.leaves():
previous_consumed = 0
for comment in list_comments(leaf.prefix, is_endmarker=False):
if comment.value not in FMT_PASS:
previous_consumed = comment.consumed
continue
# We only want standalone comments. If there's no previous leaf or
# the previous leaf is indentation, it's a standalone comment in
# disguise.
if comment.value in FMT_PASS and comment.type != STANDALONE_COMMENT:
prev = preceding_leaf(leaf)
if prev:
if comment.value in FMT_OFF and prev.type not in WHITESPACE:
continue
if comment.value in FMT_SKIP and prev.type in WHITESPACE:
continue
ignored_nodes = list(generate_ignored_nodes(leaf, comment))
if not ignored_nodes:
continue
first = ignored_nodes[0] # Can be a container node with the `leaf`.
parent = first.parent
prefix = first.prefix
if comment.value in FMT_OFF:
first.prefix = prefix[comment.consumed :]
if comment.value in FMT_SKIP:
first.prefix = ""
hidden_value = "".join(str(n) for n in ignored_nodes)
if comment.value in FMT_OFF:
hidden_value = comment.value + "\n" + hidden_value
if comment.value in FMT_SKIP:
hidden_value += " " + comment.value
if hidden_value.endswith("\n"):
# That happens when one of the `ignored_nodes` ended with a NEWLINE
# leaf (possibly followed by a DEDENT).
hidden_value = hidden_value[:-1]
first_idx: Optional[int] = None
for ignored in ignored_nodes:
index = ignored.remove()
if first_idx is None:
first_idx = index
assert parent is not None, "INTERNAL ERROR: fmt: on/off handling (1)"
assert first_idx is not None, "INTERNAL ERROR: fmt: on/off handling (2)"
parent.insert_child(
first_idx,
Leaf(
STANDALONE_COMMENT,
hidden_value,
prefix=prefix[:previous_consumed] + "\n" * comment.newlines,
),
)
return True
return False
|
1ebbb67406a5d1de4e51c5a516b15750b0205567
| 3,642,926
|
def validate_uuid4(uuid_string):
"""
Source: https://gist.github.com/ShawnMilo/7777304
Validate that a UUID string is infact a valid uuid4. Luckily, the uuid module
does the actual checking for us. It is vital that the 'version' kwarg be
passed to the UUID() call, otherwise any 32-characterhex string is considered valid.
"""
try:
val = UUID(uuid_string, version=4)
except ValueError:
# If ValueError, then the string is not a valid hex code for a UUID.
return False
# If the uuid_string is a valid hex code, but an invalid uuid4,
# the UUID.__init__ will convert it to a valid uuid4.
# This is bad for validation purposes.
return val.hex == uuid_string.replace('-','')
|
56bf751cddd412ddc234f371a17019ee9192aefe
| 3,642,927
|
def check_sp(sp):
"""Validate seasonal periodicity.
Parameters
----------
sp : int
Seasonal periodicity
Returns
-------
sp : int
Validated seasonal periodicity
"""
if sp is not None:
if not is_int(sp) or sp < 1:
raise ValueError("`sp` must be a positive integer >= 1 or None")
return sp
|
475a56584915bc4b67663b3460959ca5e807ae06
| 3,642,928
|
def api_error_handler(func):
"""
Handy decorator that catches any exception from the Media Cloud API and
sends it back to the browser as a nicely formatted JSON error. The idea is
that the client code can catch these at a low level and display error messages.
"""
@wraps(func)
def wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except MCException as e:
logger.exception(e)
return json_error_response(e.message, e.status_code)
return wrapper
|
6e03a5dc081a5aed7436a948194965d1e61504d4
| 3,642,929
|
def gen_data(data_format, dtype, shape):
"""Generate data for testing the op"""
input = random_gaussian(shape, miu=1, sigma=0.1).astype(dtype)
head_np = input
if data_format == "NC1HWC0":
channel_dims = [1, 4]
elif data_format == DEFAULT:
channel_dims = [1]
else:
channel_dims = [len(shape) - 1]
reduce_axis = [i for i in range(len(shape)) if i not in channel_dims]
if dtype == "float16":
expect = np_bisect_sum(input, axis=tuple(reduce_axis), keepdims=True)
else:
expect = np.sum(input, axis=tuple(reduce_axis), keepdims=True)
output = np.full(expect.shape, np.nan, dtype)
return expect, head_np, input, output
|
6fbc40b4879abec7a2f30c7f763102f51215e079
| 3,642,930
|
from typing import Any
import pydantic
import functools
import inspect
import pathlib
import copy
def clean_value_name(value: Any) -> str:
"""Returns a string representation of an object."""
if isinstance(value, pydantic.BaseModel):
value = str(value)
elif isinstance(value, float) and int(value) == value:
value = int(value)
value = str(value)
elif isinstance(value, (np.int64, np.int32)):
value = int(value)
value = str(value)
elif isinstance(value, np.ndarray):
value = np.round(value, 3)
value = get_string(value)
elif callable(value) and isinstance(value, functools.partial):
sig = inspect.signature(value.func)
args_as_kwargs = dict(zip(sig.parameters.keys(), value.args))
args_as_kwargs.update(**value.keywords)
clean_dict(args_as_kwargs)
args_as_kwargs.pop("function", None)
func = value.func
while hasattr(func, "func"):
func = func.func
value = dict(function=func.__name__, **args_as_kwargs)
value = get_string(value)
elif hasattr(value, "to_dict"):
value = value.to_dict()
value = get_string(value)
elif isinstance(value, np.float64):
value = float(value)
value = str(value)
elif type(value) in [int, float, str, bool]:
pass
elif callable(value) and isinstance(value, toolz.functoolz.Compose):
value = [clean_value_name(value.first)] + [
clean_value_name(func) for func in value.funcs
]
value = get_string(value)
elif callable(value) and hasattr(value, "__name__"):
value = value.__name__
elif isinstance(value, PathPhidl):
value = value.hash_geometry()
elif isinstance(value, pathlib.Path):
value = value.stem
elif isinstance(value, dict):
d = copy.deepcopy(value)
for k, v in d.items():
if isinstance(v, dict):
d[k] = clean_dict(v)
else:
d[k] = clean_value_name(v)
value = get_string(value)
else:
value = get_string(value)
return value
|
24635521ee8bd94324c0b29384ba0f0b39060244
| 3,642,931
|
def add_line_analyzer(func):
"""A simple decorator that adds a function to the list
of all functions that analyze a single line of code."""
LINE_ANALYZERS.append(func)
def wrapper(tokens):
return func(tokens)
return wrapper
|
538b6495be88d47b49efcd3ac28bd0b291810587
| 3,642,932
|
import hashlib
def decode_account(source_a):
"""
Take a string of the form "xrb_..." of length 64 and return
the associated public key (as a bytes object)
"""
assert len(source_a) == 64
assert source_a.startswith('xrb_') or source_a.startswith('xrb-')
number_l = 0
for character in source_a[4:]:
if ord(character) < 0x30 or ord(character) >= 0x80:
raise ValueError('Character out of range')
byte = account_decode(character)
if byte == '~':
raise ValueError('Invalid character')
number_l <<= 5
number_l += ord(byte)
account = (number_l >> 40).to_bytes(length=32, byteorder='big')
# The digest to check is in the lowest 40 bits of the address
check = number_l & 0xffffffffff
hash = hashlib.blake2b(digest_size=5)
hash.update(account)
validation = hash.digest()
assert check.to_bytes(length=5, byteorder='little') == validation
"""
if (!result)
{
*this = (number_l >> 40).convert_to <rai::uint256_t> ();
uint64_t check (number_l.convert_to <uint64_t> ());
check &= 0xffffffffff;
uint64_t validation (0);
blake2b_state hash;
blake2b_init (&hash, 5);
blake2b_update (&hash, bytes.data (), bytes.size ());
blake2b_final (&hash, reinterpret_cast <uint8_t *> (&validation), 5);
result = check != validation;
}
"""
return account
|
5d083adcdd2f64c03c6a2e454b74b4d911381132
| 3,642,933
|
def update_epics_order_in_bulk(bulk_data: list, field: str, project: object):
"""
Update the order of some epics.
`bulk_data` should be a list of tuples with the following format:
[{'epic_id': <value>, 'order': <value>}, ...]
"""
epics = project.epics.all()
epic_orders = {e.id: getattr(e, field) for e in epics}
new_epic_orders = {d["epic_id"]: d["order"] for d in bulk_data}
apply_order_updates(epic_orders, new_epic_orders)
epic_ids = epic_orders.keys()
events.emit_event_for_ids(ids=epic_ids,
content_type="epics.epic",
projectid=project.pk)
db.update_attr_in_bulk_for_ids(epic_orders, field, models.Epic)
return epic_orders
|
948bfaa6e165ac401cfa0244ee6c1b0bcd813493
| 3,642,934
|
def calc_precision(output, target):
"""calculate precision from tensor(b,c,x,y) for every category c"""
precs = []
for c in range(target.size(1)):
true_positives = ((output[:, c] - (output[:, c] != 1).int()) == target[:, c]).int().sum().item()
# print(true_positives)
false_positives = ((output[:, c] - (output[:, c] != 1).int()) == (target[:, c] != 1).int()).int().sum().item()
# print(false_positives)
if (true_positives == 0):
precs.append(1.0)
else:
precs.append(true_positives / (true_positives + false_positives))
return precs
|
c35c500c786539578c46a8e8c4f6517bf30b4525
| 3,642,935
|
def upper_credible_choice(self):
"""pick the bandit with the best LOWER BOUND. See chapter 5"""
def lb(a,b):
return a/(a+b) + 1.65*np.sqrt((a*b)/((a+b)**2*(a+b+1)))
a = self.wins + 1
b = self.trials - self.wins + 1
return np.argmax(lb(a,b))
|
20cefd0796f52a78d03b2d38cb74c532a07ec20c
| 3,642,936
|
import os
def download(bell, evnt):
"""
Download the current event from the given doorbell.
If the video is already in the download history or
successfully downloaded then return True otherwise False.
"""
event_id = evnt.get("id")
event_time = evnt.get("created_at")
filename = "".join(
(
f"{DOWNLOADFOLDER}/",
f"{bell.name}-",
f'{event_time.strftime("%Y%m%d_%H%M%S")}-',
f"{event_id}.mp4",
)
)
filename = filename.replace(" ", "_")
print(filename)
status = evnt.get("recording", {}).get("status")
if status == "ready":
try:
bell.recording_download(event_id, filename=filename)
os.utime(
filename, (event_time.timestamp(), event_time.timestamp())
)
return True
except Exception as ex: # pylint: disable=broad-except
print(ex)
return False
else:
print(f"Event: {event_id} is {status}")
return False
|
437ba7167af2e5540fae317fbaab376f36723a80
| 3,642,937
|
import uuid
def get_unique_id():
"""
for unique random docname
:return: length 32 string
"""
_id = str(uuid.uuid4()).replace("-", "")
return _id
|
4cf99a919bd0e9672f0b186626df0532cacebaf4
| 3,642,938
|
def callback():
""" Step 3: Retrieving an access token.
The user has been redirected back from the provider to your registered
callback URL. With this redirection comes an authorization code included
in the redirect URL. We will use that to obtain an access token.
"""
# Grab the Refresh and Access Token.
token_dict = app.config['auth_client'].grab_access_token_and_refresh_token(url=request.url)
# Store it in the Session.
session['oauth_token'] = token_dict
if app.config['call_close']:
return redirect(url_for('shutdown'))
return jsonify(token_dict)
|
76c4bec2c7c2a3433d4ab7665fca9d6829083626
| 3,642,939
|
def load_azure_auth() -> AzureSSOClientConfig:
"""
Load config for Azure Auth
"""
return AzureSSOClientConfig(
clientSecret=conf.get(LINEAGE, "client_secret"),
authority=conf.get(LINEAGE, "authority"),
clientId=conf.get(LINEAGE, "client_id"),
scopes=conf.getjson(LINEAGE, "scopes", fallback=[]),
)
|
4e7eb1886da496c465db95aa2cee2aec4a107d78
| 3,642,940
|
def get_map_zones(map_id):
"""Get map zones.
.. :quickref: Zones; Get map zones.
**Example request**:
.. sourcecode:: http
GET /zones/map/1 HTTP/1.1
**Example response**:
.. sourcecode:: json
[
{
"id": 1,
"p1": [0, 0, 0],
"p2": [256, 256, 256],
"zone_type": "start"
},
{
"id": 2,
"p1": [1000, 1000, 1000],
"p2": [1256, 1256, 1256],
"zone_type": "end"
},
{
"id": 1,
"zone_type": "cp",
"map_id": 1,
"cp_index": 1,
"zone": {
"id": 3,
"p1": [500, 500, 500],
"p2": [756, 756, 756]
}
}
]
:query map_id: map id.
:status 200: Success.
:status 404: Map not found.
:returns: List of zones
"""
map_ = Map.query.filter_by(id_=map_id).first()
if map_ is None:
error = {"message": "Map not found."}
return make_response(jsonify(error), 404)
zones = []
if map_.start_zone is not None:
zone = Zone.query.filter_by(id_=map_.start_zone).first()
if zone:
zone_dict = zone.json
zone_dict["zone_type"] = "start"
zones.append(zone_dict)
if map_.end_zone is not None:
zone = Zone.query.filter_by(id_=map_.end_zone).first()
if zone:
zone_dict = zone.json
zone_dict["zone_type"] = "end"
zones.append(zone_dict)
checkpoints = MapCheckpoint.query.filter_by(map_id=map_id).all()
if checkpoints:
for checkpoint in checkpoints:
zones.append(checkpoint.json)
return make_response(jsonify(zones), 200)
|
89e60a0fd2e0e2b743aa54cf1debe67e5f860a13
| 3,642,941
|
def rgb2he_macenko(img, D=None, alpha=1.0, beta=0.15, white=255.0,
return_deconvolution_matrix=False):
"""
Performs stain separation from RGB images using the method in
M Macenko, et al. "A method for normalizing histology slides for quantitative analysis",
IEEE ISBI, 2009. dx.doi.org/10.1109/ISBI.2009.5193250
Args:
img (numpy.ndarray): RGB input image
D (numpy.ndarray): a deconvolution matrix. If None, one will be computed from the image
alpha (float): tolerance for pseudo-min/-max
beta (float): OD threshold for transparent pixels
white (float): white level (in each channel)
return_deconvolution_matrix (bool): if True, the deconvolution matrix is also returned
Returns:
three 2d arrays for H-, E- and remainder channels, respectively.
If return_deconvolution_matrix is True, the deconvolution matrix is also returned.
"""
assert (img.ndim == 3)
assert (img.shape[2] == 3)
I = img.reshape((img.shape[0] * img.shape[1], 3))
OD = -np.log((I + 1.0) / white) # optical density
if D is None:
# the deconvolution matrix is not provided so one has to be estimated from the
# image
rows = (OD >= beta).all(axis=1)
if not any(rows):
# no rows with all pixels above the threshold
raise RuntimeError('optical density below threshold')
ODhat = OD[rows, :] # discard transparent pixels
u, V, _ = eig(np.cov(ODhat.T))
idx = np.argsort(u) # get a permutation to sort eigenvalues increasingly
V = V[:, idx] # sort eigenvectors
theta = np.dot(ODhat, V[:, 1:3]) # project optical density onto the eigenvectors
# corresponding to the largest eigenvalues
phi = np.arctan2(theta[:,1], theta[:,0])
min_phi, max_phi = np.percentile(phi, [alpha, 100.0-alpha], axis=None)
u1 = np.dot(V[:,1:3], np.array([[np.cos(min_phi)],[np.sin(min_phi)]]))
u2 = np.dot(V[:,1:3], np.array([[np.cos(max_phi)],[np.sin(max_phi)]]))
if u1[0] > u2[0]:
D = np.hstack((u1, u2)).T
else:
D = np.hstack((u2, u1)).T
D = np.vstack((D, np.cross(D[0,],D[1,])))
D = D / np.reshape(np.repeat(np.linalg.norm(D, axis=1), 3), (3,3), order=str('C'))
img_res = np.linalg.solve(D.T, OD.T).T
img_res = np.reshape(img_res, img.shape, order=str('C'))
if not return_deconvolution_matrix:
D = None
return rescale_intensity(img_res[:,:,0], out_range=(0,1)), \
rescale_intensity(img_res[:,:,1], out_range=(0,1)), \
rescale_intensity(img_res[:,:,2], out_range=(0,1)), \
D
|
bfe19ef7882ac713534d28c0d636bda086cf95c6
| 3,642,942
|
import inspect
from typing import Any
import functools
from typing import OrderedDict
import torch
def validated(base_model=None):
"""
Decorates an ``__init__`` method with typed parameters with validation
and auto-conversion logic.
>>> class ComplexNumber:
... @validated()
... def __init__(self, x: float = 0.0, y: float = 0.0) -> None:
... self.x = x
... self.y = y
Classes with decorated initializers can be instantiated using arguments of
another type (e.g. an ``y`` argument of type ``str`` ). The decorator
handles the type conversion logic.
>>> c = ComplexNumber(y='42')
>>> (c.x, c.y)
(0.0, 42.0)
If the bound argument cannot be converted, the decorator throws an error.
>>> c = ComplexNumber(y=None)
Traceback (most recent call last):
...
pydantic.error_wrappers.ValidationError: 1 validation error for ComplexNumberModel
y
none is not an allowed value (type=type_error.none.not_allowed)
Internally, the decorator delegates all validation and conversion logic to
`a Pydantic model <https://pydantic-docs.helpmanual.io/>`_, which can be
accessed through the ``Model`` attribute of the decorated initiazlier.
>>> ComplexNumber.__init__.Model
<class 'ComplexNumberModel'>
The Pydantic model is synthesized automatically from on the parameter
names and types of the decorated initializer. In the ``ComplexNumber``
example, the synthesized Pydantic model corresponds to the following
definition.
>>> class ComplexNumberModel(BaseValidatedInitializerModel):
... x: float = 0.0
... y: float = 0.0
Clients can optionally customize the base class of the synthesized
Pydantic model using the ``base_model`` decorator parameter. The default
behavior uses :class:`BaseValidatedInitializerModel` and its
`model config <https://pydantic-docs.helpmanual.io/#config>`_.
See Also
--------
BaseValidatedInitializerModel
Default base class for all synthesized Pydantic models.
"""
def validator(init):
init_qualname = dict(inspect.getmembers(init))["__qualname__"]
init_clsnme = init_qualname.split(".")[0]
init_params = inspect.signature(init).parameters
init_fields = {
param.name: (
param.annotation
if param.annotation != inspect.Parameter.empty
else Any,
param.default
if param.default != inspect.Parameter.empty
else ...,
)
for param in init_params.values()
if param.name != "self"
and param.kind == inspect.Parameter.POSITIONAL_OR_KEYWORD
}
if base_model is None:
PydanticModel = create_model(
model_name=f"{init_clsnme}Model",
__config__=BaseValidatedInitializerModel.Config,
**init_fields,
)
else:
PydanticModel = create_model(
model_name=f"{init_clsnme}Model",
__base__=base_model,
**init_fields,
)
def validated_repr(self) -> str:
return dump_code(self)
def validated_getnewargs_ex(self):
return (), self.__init_args__
@functools.wraps(init)
def init_wrapper(*args, **kwargs):
self, *args = args
nmargs = {
name: arg
for (name, param), arg in zip(
list(init_params.items()), [self] + args
)
if name != "self"
}
model = PydanticModel(**{**nmargs, **kwargs})
# merge nmargs, kwargs, and the model fields into a single dict
all_args = {**nmargs, **kwargs, **model.__dict__}
# save the merged dictionary for Representable use, but only of the
# __init_args__ is not already set in order to avoid overriding a
# value set by a subclass initializer in super().__init__ calls
if not getattr(self, "__init_args__", {}):
self.__init_args__ = OrderedDict(
{
name: arg
for name, arg in sorted(all_args.items())
if type(arg) != torch.nn.ParameterDict
}
)
self.__class__.__getnewargs_ex__ = validated_getnewargs_ex
self.__class__.__repr__ = validated_repr
return init(self, **all_args)
# attach the Pydantic model as the attribute of the initializer wrapper
setattr(init_wrapper, "Model", PydanticModel)
return init_wrapper
return validator
|
af599bff5aa5d1efeb44297c117685609842a212
| 3,642,943
|
from pathlib import Path
def make_header_table(fitsdir, search_string='*fl?.fits'):
"""Construct a table of key-value pairs from FITS headers of images
used in dolphot run. Columns are the set of all keywords that appear
in any header, and rows are per image.
Inputs
------
fitsdir : string or Path
directory of FITS files
search_string : string or regex pattern, optional
string to search for FITS images with. Default is
'*fl?.chip?.fits'
Returns
-------
df : DataFrame
A table of header key-value pairs indexed by image name.
"""
keys = []
headers = {}
# force fitsdir to Path
if type(fitsdir) == str:
fitsdir = Path(fitsdir)
fitslist = list(fitsdir.glob(search_string))
if len(fitslist) == 0: # this shouldn't happen
print('No fits files found in {}!'.format(fitsdir))
return pd.DataFrame()
# get headers from each image
with Pool(cpu_count()-1) as p:
all_headers = p.map(combine_headers, fitslist)
for name, head in all_headers:
headers.update({name:head})
keys += [k for k in head]
unique_keys = np.unique(keys).tolist()
remove_keys = ['COMMENT', 'HISTORY', '']
[unique_keys.remove(key) for key in remove_keys if key in unique_keys]
# construct dataframe
df = pd.DataFrame(columns=unique_keys)
for fitsname, head in headers.items():
row = pd.Series(dict(head.items()))
df.loc[fitsname.split('.fits')[0]] = row.T
# I do not know why dask is so bad at mixed types
# but here is my hacky solution
try:
df = df.infer_objects()
except Exception:
print("Could not infer objects")
df_obj = df.select_dtypes(['object'])
# iterate over columns and force types
for c in df_obj:
dtype = pd.api.types.infer_dtype(df[c], skipna=True)
if dtype == 'string':
df.loc[:,c] = df.loc[:,c].astype(str)
elif dtype in ['float', 'mixed-integer-float']:
df.loc[:,c] = df.loc[:,c].astype(float)
elif dtype == 'integer':
df.loc[:,c] = df.loc[:,c].astype(int)
elif dtype == 'boolean':
df.loc[:,c] = df.loc[:,c].astype(bool)
else:
print('Unrecognized datatype "{}" for column {}; coercing to string'.format(dtype, c))
df.loc[:,c] = df.loc[:,c].astype(str)
# lambda function to construct detector-filter pairs
lamfunc = lambda x: '-'.join(x[~(x.str.startswith('CLEAR') | x.str.startswith('nan'))])
df['FILT_DET'] = df.filter(regex='(DETECTOR)|(FILTER)').astype(str).apply(lamfunc, axis=1)
return df
|
3d5d10b73a8e76abedcf85ef97a9854920996a0a
| 3,642,944
|
import re
def parse_head_final_tags(ctx, lang, form):
"""Parses tags that are allowed at the end of a form head from the end
of the form. This can also be used for parsing the final gender etc tags
from translations and linkages."""
assert isinstance(ctx, Wtp)
assert isinstance(lang, str) # Should be language that "form" is for
assert isinstance(form, str)
# print("parse_head_final_tags: lang={} form={!r}".format(lang, form))
# Make sure there are no double spaces in the form as this code does not
# handle them otherwise.
form = re.sub(r"\s+", " ", form.strip())
if not form:
return form, []
origform = form
tags = []
# If parsing for certain Bantu languages (e.g., Swahili), handle
# some extra head-final tags first
if lang in head_final_bantu_langs:
m = re.search(head_final_bantu_re, form)
if m is not None:
tagkeys = m.group(1)
if not ctx.title.endswith(tagkeys):
form = form[:m.start()]
v = head_final_bantu_map[tagkeys]
if v.startswith("?"):
v = v[1:]
ctx.debug("suspicious suffix {!r} in language {}: {}"
.format(tagkeys, lang, origform))
tags.extend(v.split())
# If parsing for certain Semitic languages (e.g., Arabic), handle
# some extra head-final tags first
if lang in head_final_semitic_langs:
m = re.search(head_final_semitic_re, form)
if m is not None:
tagkeys = m.group(1)
if not ctx.title.endswith(tagkeys):
form = form[:m.start()]
v = head_final_semitic_map[tagkeys]
if v.startswith("?"):
v = v[1:]
ctx.debug("suspicious suffix {!r} in language {}: {}"
.format(tagkeys, lang, origform))
tags.extend(v.split())
# If parsing for certain other languages (e.g., Lithuanian,
# French, Finnish), handle some extra head-final tags first
if lang in head_final_other_langs:
m = re.search(head_final_other_re, form)
if m is not None:
tagkeys = m.group(1)
if not ctx.title.endswith(tagkeys):
form = form[:m.start()]
tags.extend(head_final_other_map[tagkeys].split(" "))
# Handle normal head-final tags
m = re.search(head_final_re, form)
if m is not None:
tagkeys = m.group(3)
# Only replace tags ending with numbers in languages that have
# head-final numeric tags (e.g., Bantu classes); also, don't replace
# tags if the main title ends with them (then presume they are part
# of the word)
# print("head_final_tags form={!r} tagkeys={!r} lang={}"
# .format(form, tagkeys, lang))
tagkeys_contains_digit = re.search(r"\d", tagkeys)
if ((not tagkeys_contains_digit or
lang in head_final_numeric_langs) and
not ctx.title.endswith(" " + tagkeys)):
if not tagkeys_contains_digit or lang in head_final_numeric_langs:
form = form[:m.start()]
v = xlat_head_map[tagkeys]
if v.startswith("?"):
v = v[1:]
ctx.debug("suspicious suffix {!r} in language {}: {}"
.format(tagkeys, lang, origform))
tags.extend(v.split())
# Generate warnings about words ending in " or" after processing
if ((form.endswith(" or") and not origform.endswith(" or")) or
re.search(r" (1|2|3|4|5|6|7|8|9|10|11|12|13|14|15|16|17|18|"
r"1a|2a|9a|10a|m1|f1|f2|m2|f3|m3|f4|m4|f5|m5|or|\?)"
r"($|/| (f|m|sg|pl|anim|inan))", form) or
form.endswith(" du")):
if form not in ok_suspicious_forms:
ctx.debug("suspicious unhandled suffix in {}: {!r}, originally {!r}"
.format(lang, form, origform))
# print("parse_head_final_tags: form={!r} tags={}".format(form, tags))
return form, tags
|
38891b08fa2223e90f73c732ff497606ab1c729b
| 3,642,945
|
def cars_to_people(df,peoplePerCar=1.7,percentOfTransit=.005):
"""
args: demand dataframe, people/car float, % of transit floats
returns: people demand dataframe by terminal and arrival/departure
"""
columns = ['Arrive_A','Arrive_B','Arrive_C','Arrive_D','Arrive_E',
'Depart_A','Depart_B','Depart_C','Depart_D','Depart_E']
tmp_df = pd.DataFrame()
for col in columns:
tmp_people = []
for row in df[col]:
tmp_people.append((row * peoplePerCar/(1-percentOfTransit)))
tmp_df[col + "_people"] = tmp_people
depart_columns = []
arrive_columns = []
for col in tmp_df.columns:
if col.startswith('Depart'):
depart_columns.append(col)
elif col.startswith('Arrive'):
arrive_columns.append(col)
tmp_df['Depart_total'] = tmp_df[depart_columns].sum(axis=1)
tmp_df['Arrival_total'] = tmp_df[arrive_columns].sum(axis=1)
tmp_df['pass_thru'] = df['pass_thru']
tmp_df['Total'] = tmp_df[['Depart_total','Arrival_total']].sum(axis=1)
return tmp_df
|
54672baacf10683a3d7224ee8672e08ee1574b30
| 3,642,946
|
import re
def get_dup_key_val(errmsg):
"""Return the duplicate key referenced in an error message.
Parameters
----------
errmsg : |str|
A pymongo `DuplicateKeyError` message.
Returns
-------
|dict|
The key(s) and value(s) of the duplicate key.
Example
-------
>>> errmsg = ('insertDocument :: caused by :: 11000 E11000 duplicate '
>>> 'key error collection: cyphon.posts index: '
>>> '_platform_1_doc_id_1 dup key: { : twitter", : '
>>> '"ObjectId(\'5543769ef861c942838c7ee9\') }')
>>> get_dup_key_val(errmsg)
{'_platform': 'twitter', '_doc_id': ObjectId('5543769ef861c942838c7ee9')}
"""
msg = errmsg.split(' dup key: { ')
key = extract_substring(msg[0], 'index: ', '_', 'right').strip()
val = extract_substring(msg[1], ':', '}').strip()
# parse compound indexes
keys = re.split(r'_[0-9]+_', key)
values = val.split(', : ')
if len(keys) != len(values): # pragma: no cover
raise ValueError('cannot match index keys with values')
key_val = {}
for index, value in enumerate(values):
key_val[keys[index]] = restore_type_from_str(values[index])
return key_val
|
14cbf0f51a89c4b76c5a1d363e2ef1dfe994ede6
| 3,642,947
|
def worker(vac_flag,cache_dict,mylock): # Used in multiprocess_traditional_evaluate() #20220204
"""thread worker function"""
this_key = tuple(vac_flag.squeeze().cpu().numpy())
if(this_key in cache_dict):
print('Found in cache_dict')
[total_cases, case_rate_std] = cache_dict[this_key]
elif(this_key in combined_dict):
print('Found in combined_dict')
[total_cases, case_rate_std] = combined_dict[this_key]
else:
print('Not found in cache')
total_cases, case_rate_std = traditional_evaluate(vac_flag)
cache_dict[this_key] = [total_cases, case_rate_std]
print(len(list(cache_dict.keys())))
return total_cases
|
d88e92c7cd3c5bf85389e83440dc7752719d66c0
| 3,642,948
|
from kubernetes import client as k8s_client
from typing import Optional
from typing import Dict
def use_k8s_secret(
secret_name: str = 'k8s-secret',
k8s_secret_key_to_env: Optional[Dict] = None,
):
"""An operator that configures the container to use k8s credentials.
k8s_secret_key_to_env specifies a mapping from the name of the keys in the k8s secret to the name of the
environment variables where the values will be added.
The secret needs to be deployed manually a priori.
Example:
::
train = train_op(...)
train.apply(use_k8s_secret(secret_name='s3-secret',
k8s_secret_key_to_env={'secret_key': 'AWS_SECRET_ACCESS_KEY'}))
This will load the value in secret 's3-secret' at key 'secret_key' and source it as the environment variable
'AWS_SECRET_ACCESS_KEY'. I.e. it will produce the following section on the pod:
env:
- name: AWS_SECRET_ACCESS_KEY
valueFrom:
secretKeyRef:
name: s3-secret
key: secret_key
"""
k8s_secret_key_to_env = k8s_secret_key_to_env or {}
def _use_k8s_secret(task):
for secret_key, env_var in k8s_secret_key_to_env.items():
task.container \
.add_env_variable(
k8s_client.V1EnvVar(
name=env_var,
value_from=k8s_client.V1EnvVarSource(
secret_key_ref=k8s_client.V1SecretKeySelector(
name=secret_name,
key=secret_key
)
)
)
)
return task
return _use_k8s_secret
|
2e88ad765322752ba7417d865f0ea60879c4bafe
| 3,642,949
|
def get_recommendations(artists = tuple(), genres = tuple(), limit = 20, features = True, client = None):
"""Return DataFrame of recommended tracks.
Arguments:
artists: an optional sequence of artists to seed recommendation
genres: an optional sequence of genres to seed recommendation
limit: number of tracks to return
features: whether to include track features in output
"""
recs = client.recommendations(seed_artists = artists, seed_genres = genres, limit = limit)
tracks = recs['tracks']
# TODO: need a compose function...
to_keep = (
'album_name', 'artist_name', 'name', 'popularity', 'duration_ms',
'explicit', 'id'
)
rows = list(map(row_filter(to_keep, False), map(_hoist_track_info, tracks)))
out = pd.DataFrame(rows)
track_ids = [row['id'] for row in rows]
if features:
extra_cols = ['uri', 'type', 'duration_ms', 'analysis_url', 'track_href']
return out.merge(
get_track_features(track_ids).drop(columns = extra_cols),
on = "id"
)
return out
|
6c9c4c44b7c5269fbb9718b22dba74632e80b092
| 3,642,950
|
def reorder_point(max_units_sold_daily, avg_units_sold_daily, max_lead_time, avg_lead_time, lead_time):
"""Returns the reorder point for a given product based on sales and lead time.
The reorder point is the stock level at which a new order should be placed in order to avoid stock outs.
Args:
max_units_sold_daily (int): Maximum number of units sold daily in previous period.
avg_units_sold_daily (float): Average number of units sold daily in previous period.
max_lead_time (int): Maximum number of days required to obtain stock.
avg_lead_time (int): Average number of days required to obtain stock.
lead_time (int): Number of days required to obtain stock.
Returns:
Safety stock level for the product based on sales and lead time.
"""
safety = safety_stock(max_units_sold_daily, avg_units_sold_daily, max_lead_time, avg_lead_time)
return (lead_time * avg_units_sold_daily) + safety
|
876544b5bce39342fb753f6a1bf33913fae6e33d
| 3,642,951
|
def get_total_value_report(total_value):
""""TBD"""
# Total value report
currency = CURRENCY
slack_str = "*" + "Total value report" + "*\n>>>\n"
slack_str = slack_str + make_slack_etf_chain_total(total_value, currency)
"""
sendSlackNotification('etf', slack_str, "ETF Notification", ':chart_with_upwards_trend:')
"""
return total_value
|
be1a9ff18c2faf37ae7b58be34f15eb454c72d62
| 3,642,952
|
def verify_count_responses(responses):
""" Verifies that the responses given are well formed.
Parameters
----------
responses : int OR list-like
If an int, the exact number of responses targeted.
If list-like, the first two elements are the minimum and maximum
(inclusive) range of responses targeted.
If a third item is in the list it must be a list of values from
which the range of target responses is being restricted.
Returns
-------
None
"""
if isinstance(responses, int):
responses = [responses]
elif isinstance(responses, (list, tuple)):
if not len(responses) in [2, 3]:
raise IndexError (
"The responses list given to has_count() is must have "
"either 2 or 3 items in the form: "
"[min, max, [values subset]]. Found %s." % (responses)
)
valid_types = [int, int, (list, tuple)]
for r, response in enumerate(responses):
if not isinstance(response, valid_types[r]):
raise TypeError (
"The responses list given to has_count() has "
"incorrectly typed items. It must be either 2 or 3 "
"items in the form: [int, int, list/tuple]. "
"Found %s." % (responses)
)
if r==3:
for value in response:
if not isinstance(value, int):
raise TypeError (
"The values subset given as the third item "
"in has_count(responses) is not correctly "
"typed. Each value must be int. "
"Found %s." % (response)
)
return responses
|
63fbd00bc26fee8eb960f389d5d56178e90ff7ae
| 3,642,953
|
def _subtract(supernet, subnets, subnet_idx, ranges):
"""Calculate IPSet([supernet]) - IPSet(subnets).
Assumptions: subnets is sorted, subnet_idx points to the first
element in subnets that is a subnet of supernet.
Results are appended to the ranges parameter as tuples of in format
(version, first, last). Return value is the first subnet_idx that
does not point to a subnet of supernet (or len(subnets) if all
subsequents items are a subnet of supernet).
"""
version = supernet._module.version
subnet = subnets[subnet_idx]
if subnet.first > supernet.first:
ranges.append((version, supernet.first, subnet.first - 1))
subnet_idx += 1
prev_subnet = subnet
while subnet_idx < len(subnets):
cur_subnet = subnets[subnet_idx]
if cur_subnet not in supernet:
break
if prev_subnet.last + 1 == cur_subnet.first:
# two adjacent, non-mergable IPNetworks
pass
else:
ranges.append((version, prev_subnet.last + 1, cur_subnet.first - 1))
subnet_idx += 1
prev_subnet = cur_subnet
first = prev_subnet.last + 1
last = supernet.last
if first <= last:
ranges.append((version, first, last))
return subnet_idx
|
a7c738b5ddab1ed896677a011029a00af5779bcd
| 3,642,954
|
def commiter_factory(config: dict) -> BaseCommitizen:
"""Return the correct commitizen existing in the registry."""
name: str = config["name"]
try:
_cz = registry[name](config)
except KeyError:
msg_error = (
"The commiter has not been found in the system.\n\n"
f"Try running 'pip install {name}'\n"
)
out.error(msg_error)
raise SystemExit(NO_COMMITIZEN_FOUND)
else:
return _cz
|
0e001652e0698efe981bf7dfe0cc69ce337e6f97
| 3,642,955
|
from pathlib import Path
import os
def faceshq(output_folder):
"""faceshq.
src yaml: 'https://app.koofr.net/links/a04deec9-0c59-4673-8b37-3d696fe63a5d?path=%2F2020-11-13T21-41-45_faceshq_transformer%2Fconfigs%2F2020-11-13T21-41-45-project.yaml'
src ckpt: 'https://app.koofr.net/content/links/a04deec9-0c59-4673-8b37-3d696fe63a5d/files/get/last.ckpt?path=%2F2020-11-13T21-41-45_faceshq_transformer%2Fcheckpoints%2Flast.ckpt'
"""
filename = "faceshq"
yaml_file = 'https://app.koofr.net/links/a04deec9-0c59-4673-8b37-3d696fe63a5d?path=%2F2020-11-13T21-41-45_faceshq_transformer%2Fconfigs%2F2020-11-13T21-41-45-project.yaml'
ckpt_file = 'https://app.koofr.net/content/links/a04deec9-0c59-4673-8b37-3d696fe63a5d/files/get/last.ckpt?path=%2F2020-11-13T21-41-45_faceshq_transformer%2Fcheckpoints%2Flast.ckpt'
output_yaml_file = Path(output_folder)/ f"{filename}.yaml"
output_ckpt_file = Path(output_folder)/ f"{filename}.ckpt"
os.makedirs(Path(output_folder), exist_ok=True)
return (__download(yaml_file, output_yaml_file), __download(ckpt_file, output_ckpt_file))
|
15e17e7217bd7a03e56b508633249c24197caf7f
| 3,642,956
|
import re
from datetime import datetime
import json
async def apiAccountEditPhaaze(cls:"WebIndex", WebRequest:Request) -> Response:
"""
Default url: /api/account/phaaze/edit
"""
WebUser:WebUserInfo = await cls.getWebUserInfo(WebRequest)
if not WebUser.found:
return await apiMissingAuthorisation(cls, WebRequest)
Data:WebRequestContent = WebRequestContent(WebRequest)
await Data.load()
# get required stuff
current_password:str = Data.getStr("password", "")
new_username:str = Data.getStr("username", "")
new_email:str = Data.getStr("email", "")
new_password:str = Data.getStr("newpassword", "")
new_password2:str = Data.getStr("newpassword2", "")
# checks
if not current_password or WebUser.password != password_function(current_password):
return await apiAccountPasswordsDontMatch(cls, WebRequest, msg="Current password is not correct")
changed_email:bool = False # if yes, reset valiated and send mail
update:dict = dict()
# if new_password is set, check all and set to update
if new_password:
if new_password != new_password2:
return await apiAccountPasswordsDontMatch(cls, WebRequest)
if len(new_password) < 8:
return await apiAccountPasswordToShort(cls, WebRequest, min_length=8)
update["password"] = password_function(new_password)
if new_username:
# want a new username
if new_username.lower() != WebUser.username.lower():
is_occupied:list = await getWebUsers(cls, "LOWER(`user`.`username`) = LOWER(%s)", (new_username,))
if is_occupied:
# already taken
return await apiAccountTaken(cls, WebRequest)
else:
# username is free, add to update and add one to username_changed,
# maybe i do something later with it
update["username_changed"] = WebUser.username_changed + 1
update["username"] = new_username
# else, it's a diffrent captation or so
elif new_username != WebUser.username:
update["username"] = new_username
if new_email and new_email.lower() != WebUser.email:
if re.match(IsEmail, new_email) == None:
# does not look like a email
return await apiAccountEmailWrong(cls, WebRequest, email=new_email)
is_occupied:list = await getWebUsers(cls, "user.email LIKE %s", (new_email,))
if is_occupied:
# already taken
return await apiAccountTaken(cls, WebRequest)
else:
changed_email = True
update["email"] = new_email
if not update:
return await apiWrongData(cls, WebRequest, msg=f"No changes, please add at least one")
# verification mail
if changed_email:
cls.Web.BASE.Logger.warning(f"(API) New Email, send new verification mail: {new_email}", require="api:account")
# TODO: SEND MAIL
update["edited_at"] = str(datetime.datetime.now())
cls.Web.BASE.PhaazeDB.updateQuery(
table = "user",
content = update,
where = "`user`.`id` = %s",
where_values = (WebUser.user_id,)
)
cls.Web.BASE.Logger.debug(f"(API) Account edit ({WebUser.user_id}) : {str(update)}", require="api:account")
return cls.response(
status=200,
text=json.dumps( dict(error="successfull_edited", msg="Your account has been successfull edited", update=update, status=200) ),
content_type="application/json"
)
|
5ac51d1c70031858294283c1f000af80e8838c99
| 3,642,957
|
def get_s3_items_by_type_from_queue(volume_folder):
"""
Load redis queue named "volume:<volume_folder>", and return dict of keys and md5s sorted by file type.
Queue will contain items consisting of newline and tab-delimited lists of files.
Returned value:
{'alto': [[s3_key, md5], [s3_key, md5]], 'jp2': ..., 'tiff': ..., 'casemets': ..., 'volmets': ..., 'md5': ...}
"""
# Get all entries from volume:<volume_folder> queue, splitting tab-delimited strings back into tuples:
s3_items = [line.split("\t") for files_str in spop_all('volume:' + volume_folder) for line in
force_str(files_str).split("\n")]
return sort_s3_items_by_type(s3_items, volume_folder)
|
7afece0e2f1f8d0863873f1ef987790c2eb1dad3
| 3,642,958
|
def strip_spectral_type(series, return_mask=False):
"""
Strip spectral type from series of string
Args:
series (pd.Series): series of object names (strings)
return_mask (bool): returns boolean mask True where there is a type
Returns:
no_type (pd.Series): series without spectral types
type_mask (pd.Series): boolean mask where type is given
"""
type_mask = series.str.match('\\([OBAFGKM]\\)')
no_type = series.copy()
no_type[type_mask] = series[type_mask].str.slice(start=4)
return (no_type, type_mask) if return_mask else no_type
|
65b91749742b229637819582b1158554b1a457ea
| 3,642,959
|
def already_in_bioconda(recipe, meta, df):
"""
Does the package exist in bioconda?
"""
results = _subset_df(recipe, meta, df)
build_number = int(meta.get_value('build/number', 0))
build_results = results[results.build_number == build_number]
channels = set(build_results.channel)
if 'bioconda' in channels:
return {
'already_in_bioconda': True,
'fix': 'bump version or build number'
}
|
0a7402e85a36f2f97a36a91bc379077f01ab22f5
| 3,642,960
|
def expand_groups(node_id, groups):
"""
node_id: a node ID that may be a group
groups: store group IDs and list of sub-ids
return value: a list that contains all group IDs deconvoluted
"""
node_list = []
if node_id in groups.keys():
for component_id in groups[node_id]:
node_list.extend(expand_groups(component_id, groups))
else:
node_list.extend([node_id])
return node_list
|
4c4b9c569a85396f201c589635b6ecea3807ddc2
| 3,642,961
|
def _preservation_derivatives_query(storage_service_id, storage_location_id, aip_uuid):
"""Fetch information on preservation derivatives from db.
:param storage_service_id: Storage Service ID (int)
:param storage_location_id: Storage Location ID (int)
:param aip_uuid: AIP UUID (str)
:returns: SQLAlchemy query results
"""
files = (
File.query.join(AIP)
.join(StorageLocation)
.join(StorageService)
.filter(StorageService.id == storage_service_id)
.filter(File.file_type == FileType.preservation)
.order_by(AIP.uuid, File.file_format)
)
if storage_location_id:
files = files.filter(StorageLocation.id == storage_location_id)
if aip_uuid:
files = files.filter(AIP.uuid == aip_uuid)
return files
|
a4ab7d6fc011c3ffc3678388b221514f38ecb5db
| 3,642,962
|
def euler2rot_symbolic(angle1='ϕ', angle2='θ', angle3='ψ', order='X-Y-Z', ertype='extrinsic'):
"""returns symbolic expression for the composition of elementary rotation matrices
Parameters
----------
angle1 : string or sympy.Symbol
angle representing first rotation
angle2 : string or sympy.Symbol
angle representing second rotation
angle3 : string or sympy.Symbol
angle representing third rotation
order : string
valid string sequence that specifies the order of rotation. See `euler2rot()`
for details
ertype : string ('extrinsic' or 'intrinsic') See `euler2rot()` for details
the type of elemental rotations.
deg : bool
`True` = degree (default), `False` = radians
Example
-------
>>> R = euler2rot_symbolic('1', '2', '3', 'X-Y-Z' , 'intrinsic')
>>> c, s = sy.symbols('c, s', cls=sy.Function)
>>> R.subs({sy.cos:c, sy.sin:s})
Matrix([
[ c(2)*c(3), -c(2)*s(3), s(2)],
[ c(1)*s(3) + c(3)*s(1)*s(2), c(1)*c(3) - s(1)*s(2)*s(3), -c(2)*s(1)],
[-c(1)*c(3)*s(2) + s(1)*s(3), c(1)*s(2)*s(3) + c(3)*s(1), c(1)*c(2)]])
Note
----
The order of the input angles are specified in the order of rotations (corresponding
to the `order`). They are not specified with respect to any particular axis.
"""
X = rotX_symbolic
Y = rotY_symbolic
Z = rotZ_symbolic
order = order.split('-')
if ertype == 'extrinsic':
order.reverse()
composition = '{}(angle3)*{}(angle2)*{}(angle1)'.format(*order)
elif ertype == 'intrinsic':
composition = '{}(angle1)*{}(angle2)*{}(angle3)'.format(*order)
else:
raise ValueError('Incorrect elemental rotation parameter.')
#print(composition)
return eval(composition)
|
07069fc6c543acb9960f8203130cabcd04a762f4
| 3,642,963
|
import ctypes
def k4a_playback_get_next_imu_sample(playback_handle, imu_sample):
"""
K4ARECORD_EXPORT k4a_stream_result_t k4a_playback_get_next_imu_sample(k4a_playback_t playback_handle,
k4a_imu_sample_t *imu_sample);
"""
_k4a_playback_get_next_imu_sample = record_dll.k4a_playback_get_next_imu_sample
_k4a_playback_get_next_imu_sample.restype = k4a_stream_result_t
_k4a_playback_get_next_imu_sample.argtypes = (k4a_playback_t, \
ctypes.POINTER(k4a_imu_sample_t),)
return _k4a_playback_get_next_imu_sample(playback_handle, imu_sample)
|
faa127b8788163de209863adee1419c349852827
| 3,642,964
|
def knapsack_iterative_numpy(items, maxweight):
"""
Iterative knapsack method
maximize \sum_{i \in T} v_i
subject to \sum_{i \in T} w_i \leq W
Notes:
dpmat is the dynamic programming memoization matrix.
dpmat[i, w] is the total value of the items with weight at most W
T is the set of indicies in the optimal solution
"""
#import numpy as np
items = np.array(items)
weights = items.T[1]
# Find maximum decimal place (this problem is in NP)
max_exp = max([number_of_decimals(w_) for w_ in weights])
coeff = 10 ** max_exp
# Adjust weights to be integral
weights = (weights * coeff).astype(np.int)
values = items.T[0]
MAXWEIGHT = int(maxweight * coeff)
W_SIZE = MAXWEIGHT + 1
dpmat = np.full((len(items), W_SIZE), np.inf)
kmat = np.full((len(items), W_SIZE), 0, dtype=np.bool)
idx_subset = []
for w in range(W_SIZE):
dpmat[0][w] = 0
for idx in range(1, len(items)):
item_val = values[idx]
item_weight = weights[idx]
for w in range(W_SIZE):
valid_item = item_weight <= w
prev_val = dpmat[idx - 1][w]
if valid_item:
prev_noitem_val = dpmat[idx - 1][w - item_weight]
withitem_val = item_val + prev_noitem_val
more_valuable = withitem_val > prev_val
else:
more_valuable = False
dpmat[idx][w] = withitem_val if more_valuable else prev_val
kmat[idx][w] = more_valuable
K = MAXWEIGHT
for idx in reversed(range(1, len(items))):
if kmat[idx, K]:
idx_subset.append(idx)
K = K - weights[idx]
idx_subset = sorted(idx_subset)
items_subset = [items[i] for i in idx_subset]
total_value = dpmat[len(items) - 1][MAXWEIGHT]
return total_value, items_subset
|
7ef8ab10b91e72b7625fdfc9445501c4ae8e5554
| 3,642,965
|
import torch
def gram_matrix(image: torch.Tensor):
"""https://pytorch.org/tutorials/
advanced/neural_style_tutorial.html#style-loss"""
n, c, h, w = image.shape
x = image.view(n * c, w * h)
gram_m = torch.mm(x, x.t()).div(n * c * w * h)
return gram_m
|
5912cfec026cba26a77131c3b52a8e751c0f575e
| 3,642,966
|
def photos_of_user(request, user_id):
"""Displaying user's photo gallery and adding new photos to user's gellery
view.
"""
template = 'accounts/profile/photos_gallery.html'
user_acc = get_object_or_404(TLAccount, id=user_id)
photos = user_acc.photos_of_user.all() # Custom related name
context = {
'photos': photos,
'user_acc': user_acc
}
if request.method == 'POST':
if request.user.email != user_acc.email:
return HttpResponseBadRequest()
initial = {
'photo': request.FILES['user_gallery_photo']
}
form = AddPhotoToUserGalleryForm(request.POST, initial)
if form.is_valid():
final_form = form.save(commit=False)
# final_form.place = place
final_form.author = user_acc
final_form.save()
return HttpResponseRedirect(request.META.get('HTTP_REFERER'))
else:
return render(request, template, context)
# If HTTP method is GET...
else:
return render(request, template, context)
|
3fd3cdfac7f1af4de13c464a3fe2bea26f72e6c2
| 3,642,967
|
def sw_update_opts_w_name_db_model_to_dict(sw_update_opts, subcloud_name):
"""Convert sw update options db model plus subcloud name to dictionary."""
result = {"id": sw_update_opts.id,
"name": subcloud_name,
"subcloud-id": sw_update_opts.subcloud_id,
"storage-apply-type": sw_update_opts.storage_apply_type,
"compute-apply-type": sw_update_opts.compute_apply_type,
"max-parallel-computes": sw_update_opts.max_parallel_computes,
"alarm-restriction-type": sw_update_opts.alarm_restriction_type,
"default-instance-action":
sw_update_opts.default_instance_action,
"created-at": sw_update_opts.created_at,
"updated-at": sw_update_opts.updated_at}
return result
|
c9c1703d9e4d0b69920d3ab06e5bf19fbb622103
| 3,642,968
|
def imf_binary_primary(m, imf, binary_fraction=constants.BIN_FRACTION):
"""
Initial mass function for primary stars of binary systems
Integrated between m' and m'' using Newton-Cotes
Returns 0 unless m is in (1.5, 16)
"""
m_inf = max(constants.B_MIN, m)
m_sup = min(constants.B_MAX, 2 * m)
if m <= 0 or m_sup <= m_inf:
return 0.0
return binary_fraction * newton_cotes(m_inf, m_sup, phi_primary(m, imf))
|
ae49a298d66a7ee844b252b5e736ea1c1846c31b
| 3,642,969
|
import torch
def compute_scene_graph_similarity(ade20k_split, threshold=None,
recall_funct=compute_recall_johnson_feiefei):
"""
:param ade20k_split:
:param threshold:
:param recall_funct:
:return:
"""
model = get_scene_graph_encoder()
model.eval()
test_results = []
with torch.no_grad():
for k, graph_dict in ade20k_split.items():
res = model(graph_dict)
test_results.append(res)
stacked_vectors = torch.stack(test_results)
category = get_categories(ade20k_split)
num_captions = stacked_vectors.shape[1]
index_inferred_caption = num_captions - 1
index_range_human_captions = index_inferred_caption
caption_dim = 1
recall_list = []
mean_rank_list = []
similarity_list = []
for index_caption in range(index_range_human_captions):
comparison = torch.cat((stacked_vectors[:, index_caption, :].unsqueeze(caption_dim),
stacked_vectors[:, index_inferred_caption, :].unsqueeze(caption_dim)),
dim=caption_dim)
similarity_caption = calculate_normalized_cosine_similarity_on_tensor(comparison)
recall_val, mean_rank = recall_funct(similarity_caption, threshold, category)
similarity_list.append(similarity_caption.diag().mean().to("cpu").numpy())
recall_list.append(recall_val)
mean_rank_list.append(mean_rank)
print(f"Threshold for retrieval: {threshold}")
recall_mean = pd.DataFrame(recall_list).mean().to_dict()
average_mean_rank = pd.DataFrame(mean_rank_list).mean()[0]
average_similarity = pd.DataFrame(similarity_list).mean()[0]
for k in recall_mean.keys():
print(f"Average {k}: {recall_mean[k]}")
recall_mean["mean_rank"] = average_mean_rank
print(f"Average Mean Rank: {average_mean_rank}")
print(f"Average Similarity{average_similarity}")
recall_mean["average_similarity"] = average_similarity
recall_mean["threshold"] = threshold
return recall_mean
|
061435209baa2c8d03af93ce09d00fcaf02adf8a
| 3,642,970
|
import importlib_resources
def load_cmudict():
"""Loads the CMU Pronouncing Dictionary"""
dict_ref = importlib_resources.files("tacotron").joinpath("cmudict-0.7b.txt")
with open(dict_ref, encoding="ISO-8859-1") as file:
cmudict = (line.strip().split(" ") for line in islice(file, 126, 133905))
cmudict = {
format_alt_entry(word): pronunciation for word, pronunciation in cmudict
}
return cmudict
|
76f3ed592cb3709d4f073c42ee7229ac0142b77a
| 3,642,971
|
def evalasm(d, text, r0 = 0, defines = defines, address = pad, thumb = False):
"""Compile and remotely execute an assembly snippet.
32-bit ARM instruction set by default.
Saves and restores r2-r12 and lr.
Returns (r0, r1).
"""
if thumb:
# In Thumb mode, we still use ARM code to save/restore registers.
assemble(d, address, '''\
push { r2-r12, lr }
adr lr, link
adr r8, text+1
bx r8
link:
pop { r2-r12, pc }
.pool
.thumb
.align 5
text:
%(text)s
bx lr
''' % locals(), defines=defines, thumb=False)
return d.blx(address, r0)
else:
# ARM mode (default)
assemble(d, address, '''\
push { r2-r12, lr }
%(text)s
pop { r2-r12, pc }
''' % locals(), defines=defines, thumb=False)
return d.blx(address, r0)
|
c5bf3f5728fc9e85dbfd2540083ae7b2b87cd452
| 3,642,972
|
import multiprocessing
def _get_thread_count():
"""Gets a thread_count based on the multiprocessing.cpu_count()."""
try:
thread_count = multiprocessing.cpu_count()
# cpu_count only gets the physical core count. There doesn't appear to be a
# simple way of determining whether a CPU supports simultaneous
# multithreading in Python, so assume that anything with 6 or more cores
# supports it.
if thread_count >= 6:
thread_count *= 2
except NotImplementedError:
# Assume a quad core if we can't get the actual core count.
thread_count = 4
return thread_count
|
f7c4959734e49a70412d87ebc1f03b811b600600
| 3,642,973
|
import os
import argparse
def is_dir(dir_name):
"""Checks if a path is an actual directory"""
if not os.path.isdir(dir_name):
msg = "{0} does not exist".format(dir_name)
raise argparse.ArgumentTypeError(msg)
else:
return dir_name
|
fc24ce57394cd0d854a2d5f054faaea65339ae80
| 3,642,974
|
def weighted_avg(x, weights): # used in lego_reader.py
""" x = batch * len * d
weights = batch * len
"""
return weights.unsqueeze(1).bmm(x).squeeze(1)
|
efa08d9719ccbcc727cb7349888f0a26140521e9
| 3,642,975
|
def CYR(df, N=5, M=5):
"""
市场强弱
:param df:
:param M:
:return:
"""
VOL = df['volume']
AMOUNT = df['amount']
DIVE = 0.01 * EMA(AMOUNT, N) / EMA(VOL, N)
CRY = (DIVE / REF(DIVE, 1) - 1) * 100
MACYR = MA(CRY, M)
return pd.DataFrame({
'CRY': CRY, 'MACYR': MACYR
})
|
7d5f31064d8eb3e4aaed8f6694226760a656f4d7
| 3,642,976
|
def packCode(code):
"""Packs the given code by passing it to the compression engine"""
if code in packCache:
return packCache[code]
packed = compressor.compress(parse(code))
packCache[code] = packed
return packed
|
b20714a022e73cbec38819d515c1cb89b8157d8c
| 3,642,977
|
def cus_excepthook(logger):
"""
Custom excepthook function to log exception information.
logger will log exception information automatically.
This doesn't work in ipython(including jupyter). Use `get_ipython().set_custom_execs((Exception,), your_exception_function)` instead in ipython environment.
Parameters
----------
logger: a logger object.
Examples
--------
import sys
sys.excepthook = cus_excepthook(logger)
"""
def _excepthook(etype, value, tb):
sys_excepthook(etype, value, tb)
logger.debug("Got exception.\n", exc_info = (etype, value, tb))
return _excepthook
|
15e13567af3584a970cc78549c222b9d1ef921d9
| 3,642,978
|
def langpack_submission_allowed(user, parsed_addon_data):
"""Language packs can only be submitted by people with the right
permission.
See https://github.com/mozilla/addons-server/issues/11788 and
https://github.com/mozilla/addons-server/issues/11793
"""
return (
not parsed_addon_data.get('type') == amo.ADDON_LPAPP or
action_allowed_user(user, amo.permissions.LANGPACK_SUBMIT))
|
5d26aaff3089a4e4ba6b2325f25d7ad5d759bcd9
| 3,642,979
|
def ht_edge_probabilities(p):
"""
Given the probability of sampling an edge, returns the probabilities of sampling two-stars and triangles
Parameters
---------------------
p: float
"""
pi_twostars = 0
pi_triangles = 0
###TIP: #TODO write the probabilites of sampling twostars and triangles under edge sampling
# YOUR CODE HERE
return pi_twostars, pi_triangles
|
47711a464e07583736348c7245caafbfd9f89abc
| 3,642,980
|
def get_tag_color_name(colorid):
""" Return name of the Finder color based on ID """
# TODO: need to figure out how to do this in locale/language name
try:
colorname = _COLORIDS[colorid]
except:
raise ValueError(f"Invalid colorid: {colorid}")
return colorname
|
baa8519d1d3379a45ee79469f060fc9913e3a73c
| 3,642,981
|
import re
def process_derived_core_properties(derived_core_properties):
"""Parse DerivedCoreProperties.txt and returns its version,
and set of characters with ID_Start and ID_Continue. """
id_start = set()
id_continue = set()
m = re.match('# DerivedCoreProperties-([0-9\.]+).txt', derived_core_properties)
assert m
version = m.group(1)
for (char, prop) in read_derived_core_properties(derived_core_properties):
if prop == 'ID_Start':
id_start.add(char)
if prop == 'ID_Continue':
id_continue.add(char)
return (version, id_start, id_continue)
|
cb15993eb84e3d1e7a1f65528f2f677e1e596668
| 3,642,982
|
def error_500(error):
"""Route function for handling 500 error pages
"""
return flask.templating.render_template("errors/500.html.j2"), 500
|
8d93367e21e855c672de50901de9793a326867e6
| 3,642,983
|
def poormax(X : np.ndarray, feature_axis = 1) -> np.ndarray:
"""
对数据进行极差化 \n
:param feature_axis: 各特征所在的维度 \n
feature_axis = 1 表示每列是不同的特征 \n
"""
if not feature_axis:
X = X.T
_min = np.min(X, axis = 0)
_max = np.max(X, axis = 0)
across = _max - _min
X = (X - _min) / across
if not feature_axis:
X = X.T
return X
|
8d2c45b225d05f36951eb6fac2fc19214b6e3f31
| 3,642,984
|
def login_form(request):
"""
The request must be get
"""
menu = MenuService.visitor_menu()
requestContext = RequestContext(request, {'menu':menu,
'page_title': 'Login'} )
return render_to_response('login.html', requestContext)
|
596273f8925a4d6aa39584f94262fc0f1d53657d
| 3,642,985
|
def tz_from_dd(points):
"""Get the timezone for a coordinate pair
Args:
points: (lat, lon) | [(lat, lon),] | pd.DataFrame w/lat and lon as columns
Returns:
np.array
"""
if isinstance(points, pd.DataFrame):
points = points.values.tolist()
if not isinstance(points, list):
points = [points]
x = ztree.query(points)
x = zips.iloc[x[1]].timezone.values
return x
|
5a6b05f1bf88c3a016cc5beae024a99873715904
| 3,642,986
|
def create_volume(devstack_node, ceph_node, vol_name, size):
"""
:param size: The size of the volume, in GB
"""
size = str(size)
log.info("Creating a {size}GB volume named {name}...".format(
name=vol_name,
size=size))
args = ['source', 'devstack/openrc', run.Raw('&&'), 'cinder', 'create',
'--display-name', vol_name, size]
cinder_create = devstack_node.sh(args, wait=True)
vol_info = parse_os_table(cinder_create)
log.debug("Volume info: %s", str(vol_info))
try:
rbd_output = ceph_node.sh("rbd --id cinder ls -l volumes", wait=True)
except run.CommandFailedError:
log.debug("Original rbd call failed; retrying without '--id cinder'")
rbd_output = ceph_node.sh("rbd ls -l volumes", wait=True)
assert vol_info['id'] in rbd_output, \
"Volume not found on Ceph cluster"
assert vol_info['size'] == size, \
"Volume size on Ceph cluster is different than specified"
return vol_info['id']
|
0cc8949bb18bcd5f71c50ea44b439d5ce63ef6f4
| 3,642,987
|
def find_touching_pixels(label_img, distance=1, selem=None):
"""
Returns a mask indicating touching regions. Either provide a diameter for a disk shape
distance or a selem mask.
:param label_img: a label image with integer labels
:param distance: =1: touching pixels, >1 pixels labels distance appart
:param selem: optional, a selection mask, e.g. skimage.morphology.disk(1) (if this is bigger than
1 the 'distance' is not true.
:return: a mask of the regions touching or are close up to a certain diameter
"""
if selem is None:
selem = morphology.disk(1)
touch_mask = np.zeros(label_img.shape)
not_bg = label_img > 0
for i in np.unique(label_img):
if i != 0:
cur_lab = (label_img == i)
# touch_mask[ndi.filters.maximum_filter(cur_lab, footprint=selem) &
# not_bg & (cur_lab == False)] = 1
touch_mask[ndi.binary_dilation(cur_lab, structure=selem, iterations=distance, mask=not_bg) &
(cur_lab == False)] = 1
return touch_mask
|
a69b2b89be2df9660f1016c008c266de7932bb90
| 3,642,988
|
def draw_boxes_on_image(img, boxes, labels_index, labelmap_dict,
**kwargs):
"""Short summary.
Parameters
----------
img : ndarray
Input image.
boxes : ndarray-like
It must has shape (n ,4) where n is the number of
bounding boxes.
labels_index : ndarray-like
An array containing index of labels of bounding boxes. If None, only
bounding boxes will be drawn.
labelmap_dict : dict
A dictionary mapping labels with its index.
Returns
-------
img
Return annotated image.
"""
# When no box is detected
if boxes is None:
return img
try:
boxes = convert(boxes,
lambda x: np.asarray(x, dtype=np.int32),
np.ndarray)
except TypeError:
raise_type_error(type(boxes), [np.ndarray])
# When no box is detected
if boxes.shape[0] == 0:
return img
if boxes.shape[1] != 4 or boxes.ndim != 2:
raise ValueError("Input bounding box must be of shape (n, 4), "
"got shape {} instead".format(boxes.shape))
else:
return _draw_boxes_on_image(img, boxes, labels_index,
labelmap_dict, **kwargs)
|
1ee1d7b4e04e8646dd4e986e1a7e72d42d3f9685
| 3,642,989
|
def guess_locations(location):
"""Convenience function to guess where other Strongholds are located."""
location = Point(*location)
return (location,
rotate(location, CLOCKWISE),
rotate(location, COUNTERCLOCKWISE))
|
34c6824d63dbd99e4b09c6bb588298add404d87a
| 3,642,990
|
def get_centroid(mol, conformer=-1):
"""
Returns the centroid of the molecule.
Parameters
---------
conformer : :class:`int`, optional
The id of the conformer to use.
Returns
-------
:class:`numpy.array`
A numpy array holding the position of the centroid.
"""
centroid = sum(x for _, x in all_atom_coords(mol, conformer))
return np.divide(centroid, mol.GetNumAtoms())
|
393b5e27a5fa1779f98c2455c88d36027036e5f2
| 3,642,991
|
import torch
def idct(X, norm=None):
"""
The inverse to DCT-II, which is a scaled Discrete Cosine Transform, Type III
Our definition of idct is that idct(dct(x)) == x
For the meaning of the parameter `norm`, see:
https://docs.scipy.org/doc/ scipy.fftpack.dct.html
:param X: the input signal
:param norm: the normalization, None or 'ortho'
:return: the inverse DCT-II of the signal over the last dimension
"""
x_shape = X.shape
N = x_shape[-1]
X_v = X.contiguous().view(-1, x_shape[-1]) / 2
if norm == 'ortho':
X_v[:, 0] *= np.sqrt(N) * 2
X_v[:, 1:] *= np.sqrt(N / 2) * 2
k = torch.arange(x_shape[-1], dtype=X.dtype,
device=X.device)[None, :] * np.pi / (2 * N)
W_r = torch.cos(k)
W_i = torch.sin(k)
V_t_r = X_v
V_t_i = torch.cat([X_v[:, :1] * 0, -X_v.flip([1])[:, :-1]], dim=1)
V_r = V_t_r * W_r - V_t_i * W_i
V_i = V_t_r * W_i + V_t_i * W_r
V = torch.cat([V_r.unsqueeze(2), V_i.unsqueeze(2)], dim=2)
v = torch.irfft(V, 1, onesided=False)
x = v.new_zeros(v.shape)
x[:, ::2] += v[:, :N - (N // 2)]
x[:, 1::2] += v.flip([1])[:, :N // 2]
return x.view(*x_shape)
|
f0b86dbbe80fe9b2e4b442f55ea67b82f7eaa019
| 3,642,992
|
def div25():
"""
Returns the divider 44444444444444444444444
:return: divider25
"""
return divider25
|
6bb38e50a6cd7fe80c9aef5dbb2d829c0c5a6fb5
| 3,642,993
|
def comp_periodicity(self, wind_mat=None):
"""Computes the winding matrix (anti-)periodicity
Parameters
----------
self : Winding
A Winding object
wind_mat : ndarray
Winding connection matrix
Returns
-------
per_a: int
Number of spatial periods of the winding
is_aper_a: bool
True if the winding is anti-periodic over space
"""
if wind_mat is None:
wind_mat = self.get_connection_mat()
assert len(wind_mat.shape) == 4, "dim 4 expected for wind_mat"
# Summing on all the layers (Nlay_r and Nlay_theta)
wind_mat2 = squeeze(np_sum(np_sum(wind_mat, axis=1), axis=0))
qs = wind_mat.shape[3] # Number of phase
Zs = wind_mat.shape[2] # Number of Slot
Nperw = 1 # Number of electrical period of the winding
Nperslot = 1 # Periodicity of the winding in number of slots
# Looking for the periodicity of each phase
for q in range(0, qs):
k = 1
is_per = False
while k <= Zs and not is_per:
# We shift the array arround the slot and check if it's the same
if array_equal(wind_mat2[:, q], roll(wind_mat2[:, q], shift=k)):
is_per = True
else:
k += 1
# least common multiple to find common periodicity between different phase
Nperslot = lcm(Nperslot, k)
# If Nperslot > Zs no symmetry
if Nperslot > 0 and Nperslot < Zs:
# nb of periods of the winding (2 means 180°)
Nperw = Zs / float(Nperslot)
# if Zs cannot be divided by Nperslot (non integer)
if Nperw % 1 != 0:
Nperw = 1
# Check for anti symmetries in the elementary winding pattern
if (
Nperslot % 2 == 0
and norm(
wind_mat2[0 : Nperslot // 2, :] + wind_mat2[Nperslot // 2 : Nperslot, :]
)
== 0
):
is_aper_a = True
Nperw = Nperw * 2
else:
is_aper_a = False
return int(Nperw), is_aper_a
|
f1c7074cdc55be6af3c5511a071a1df0835e666e
| 3,642,994
|
def _is_valid_target(target, target_name, target_ports, is_pair):
"""Return True if the specified target is valid, False otherwise."""
if is_pair:
return (target[:utils.PORT_ID_LENGTH] in target_ports and
target_name == _PAIR_TARGET_NAME)
if (target[:utils.PORT_ID_LENGTH] not in target_ports or
not target_name.startswith(utils.TARGET_PREFIX) or
target_name == _PAIR_TARGET_NAME):
return False
return True
|
58a7c2ceb7b3206777c01122b0c3ef01a5887b65
| 3,642,995
|
def _get_span_name(servicer_context):
"""Generates a span name based off of the gRPC server rpc_request_info"""
method_name = servicer_context._rpc_event.call_details.method[1:]
if isinstance(method_name, bytes):
method_name = method_name.decode('utf-8')
method_name = method_name.replace('/', '.')
return '{}.{}'.format(RECV_PREFIX, method_name)
|
5527820fa766fe29009e6fe060e76c01a75e3c37
| 3,642,996
|
def calculateNDFairnessPara(_ranking, _protected_group, _cut_point, _gf_measure, _normalizer, items_n, proItems_n ):
"""
Calculate group fairness value of the whole ranking.
Calls function 'calculateFairness' in the calculation.
:param _ranking: A permutation of N numbers (0..N-1) that represents a ranking of N individuals,
e.g., [0, 3, 5, 2, 1, 4]. Each number is an identifier of an individual.
Stored as a python array.
:param _protected_group: A set of identifiers from _ranking that represent members of the protected group
e.g., [0, 2, 3]. Stored as a python array for convenience, order does not matter.
:param _cut_point: Cut range for the calculation of group fairness, e.g., 10, 20, 30,...
:param _gf_measure: Group fairness measure to be used in the calculation,
one of 'rKL', 'rND', 'rRD'.
:param _normalizer: The normalizer of the input _gf_measure that is computed externally for efficiency.
:param
:param
:return: returns fairness value of _ranking, a float, normalized to [0, 1]
"""
#print("calculateNDFairnessPara")
#user_N=len(_ranking)
#pro_N=len(_protected_group)
if _normalizer==0:
raise ValueError("Normalizer equals to zero")
# error handling for input type
if not isinstance(_ranking, (list, tuple, np.ndarray)) and not isinstance( _ranking, str ):
raise TypeError("Input ranking must be a list-wise structure defined by '[]' symbol")
if not isinstance(_protected_group, (list, tuple, np.ndarray)) and not isinstance( _protected_group, str ):
raise TypeError("Input protected group must be a list-wise structure defined by '[]' symbol")
if not isinstance( _cut_point, ( int ) ):
raise TypeError("Input batch size must be an integer larger than 0")
if not isinstance( _normalizer, (int, float, complex) ):
raise TypeError("Input normalizer must be a number larger than 0")
if not isinstance( _gf_measure, str ):
raise TypeError("Input group fairness measure must be a string that choose from ['rKL', 'rND', 'rRD']")
discounted_gf=0 #initialize the returned gf value
for countni in range(len(_ranking)):
countni=countni+1
if(countni%_cut_point ==0):
ranking_cutpoint=_ranking[0:countni]
pro_cutpoint=set(ranking_cutpoint).intersection(_protected_group)
gf=calculateFairness(ranking_cutpoint,pro_cutpoint,items_n, proItems_n,_gf_measure)
#discounted_gf+=gf/math.log(countni+1,LOG_BASE) # log base -> global variable
#print("counttni : ", countni)
discounted_gf+=gf/(1.1**(countni-10/1000)) # log base -> global variable
# make a call to compute, or look up, the normalizer; make sure to check that it's not 0!
# generally, think about error handling
return discounted_gf/_normalizer
|
b1c0dfa53d1842f8d93a6ed6d2ae2ddd9ebafd7b
| 3,642,997
|
import os
def getCategories(blog_id, username, password):
"""
Parameters
int blog_id
string username
string password
Return Values
array
struct
int categoryId
int parentId
string description
string categoryName
string htmlUrl
string rssUrl
example from wordpress.com
[{'categoryDescription': '',
'categoryId': 1356,
'categoryName': 'Blogroll',
'description': 'Blogroll',
'htmlUrl': 'https://rubelongfellow.wordpress.com/category/blogroll/',
'parentId': 0,
'rssUrl': 'https://rubelongfellow.wordpress.com/category/blogroll/feed/'},
{'categoryDescription': '',
'categoryId': 42431,
'categoryName': 'Gearhead',
'description': 'Gearhead',
'htmlUrl': 'https://rubelongfellow.wordpress.com/category/gearhead/',
'parentId': 0,
'rssUrl': 'https://rubelongfellow.wordpress.com/category/gearhead/feed/'},
{'categoryDescription': '',
'categoryId': 1,
'categoryName': 'Uncategorized',
'description': 'Uncategorized',
'htmlUrl': 'https://rubelongfellow.wordpress.com/category/uncategorized/',
'parentId': 0,
'rssUrl': 'https://rubelongfellow.wordpress.com/category/uncategorized/feed/'}]
"""
logger.debug("%s.getCategories entered" % __name__)
res = []
user = get_user(username, password)
blog = Blog.objects.get(pk=blog_id)
check_perms(user, blog)
logger.debug("getting categories for %s" % blog)
for cat in Category.objects.filter(blog=blog):
res.append({
'categoryDescription': cat.description,
'categoryId': cat.id,
'categoryName': cat.title,
'description': cat.description,
'htmlUrl': cat.blog.get_absolute_url(),
'parentId': 0,
'rssUrl': os.path.join(cat.blog.get_absolute_url(), "feed"),
})
return res
|
f299cccbcc35b43029fd60c5dc459deed0a60906
| 3,642,998
|
import json
import requests
def change_server(name: str = None, description: str = None, repo_url: str = None, main_status: int = None, components: dict = None, password: str = None):
"""Change server according to arguments (using package config).
This will automatically change the config so it has the right credentials."""
check_config()
global server_name
global server_password
payload = {"name": server_name, "password": server_password}
if name != None:
if type(name) != str:
raise TypeError("name expected to be of type str.")
payload["newName"] = name
if description != None:
if type(description) != str:
raise TypeError("description expected to be of type str.")
payload["description"] = description
if repo_url != None:
if type(repo_url) != str:
raise TypeError("repo_url expected to be of type str.")
payload["repoURL"] = repo_url
if main_status != None:
if type(main_status) != int:
raise TypeError("main_status expected to be of type int.")
payload["mainStatus"] = main_status
if components != None:
if type(components) != dict:
raise TypeError("components expected to be of type dict.")
payload["components"] = json.dumps(components)
if password != None:
if type(password) != str:
raise TypeError("password expected to be of type str.")
payload["newPassword"] = password
try:
r = requests.post(_url + "api/changeserver",
json.dumps(payload), timeout=3.05)
if r.status_code == 200:
if name != None:
server_name = name
if password != None:
server_password = password
return True
else:
return (False, r.status_code, r.text)
except requests.exceptions.ConnectTimeout:
raise ConnectionTimeout
|
f7a5334da8ef011969c8ffb5c31c1b4f477ed2a5
| 3,642,999
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.