content stringlengths 22 815k | id int64 0 4.91M |
|---|---|
def get_proto_messages(protocol: str) -> list:
""" Get messages of a protocol. """
db = MetaDB()
rows = db.get_all_meta()
db.close_conn()
messages = set(row[1] for row in rows if row[0] == protocol)
return messages | 28,300 |
def TokenAMarkdownReference(href, reference, title=None):
"""
[link text][1]
[1]: <https://example.com> "Title"
<a href="https://example.com" title="Title">link text</a>
"""
title = ' "%s"' % title if title else ""
data = "[%s]: %s%s" % (reference, href, title)
token = {
"type": "Characters",
"data": data,
"_md_type": mdTokenTypes["TokenAMarkdownReference"],
}
return token | 28,301 |
def inverse_document_frequency(word_occurrence, num_texts):
"""Takes in a word (string) and texts (list of lists and calculates the
number of texts over number of texts where the word occurs"""
try:
IDF = float(num_texts) / float(word_occurrence)
return math.log(IDF)
except ZeroDivisionError:
return 0 | 28,302 |
def bip32_mprv_from_seed(seed: octets, version: octets) -> bytes:
"""derive the master extended private key from the seed"""
if isinstance(version, str): # hex string
version = bytes.fromhex(version)
assert version in PRIVATE, "wrong version, master key must be private"
# serialization data
xmprv = version # version
xmprv += b'\x00' # depth
xmprv += b'\x00\x00\x00\x00' # parent pubkey fingerprint
xmprv += b'\x00\x00\x00\x00' # child index
# actual extended key (key + chain code) derivation
if isinstance(seed, str): # hex string
seed = bytes.fromhex(seed)
hd = HMAC(b"Bitcoin seed", seed, sha512).digest()
mprv = octets2int(hd[:32])
xmprv += hd[32:] # chain code
xmprv += b'\x00' + mprv.to_bytes(32, 'big') # private key
return b58encode_check(xmprv) | 28,303 |
def cmd_ml_load_model(current_board : board.Board, flags, player_dict):
""" cmd loadmodel <filename> : load a machine learning model from file into MLPlayer """
if "default" not in flags:
error_message("Incorrectly formatted arguments")
return
model_name = flags["default"]
try:
player_dict["ml"] = lambda: ml_player.MLPlayer("MachineLearning", model_name)
player_dict["machinelearning"] = lambda: ml_player.MLPlayer("MachineLearning", model_name)
player_dict["nn"] = lambda: ml_player.MLPlayer("MachineLearning", model_name)
except:
error_message("The specified ML model does not exist")
return
print(f"Loaded model {model_name} into MLPlayer") | 28,304 |
def filterCombineCount(input, output):
"""extract count column only"""
df = pd.read_csv(input[0])
df =df.filter(like='Counts', axis=1)
df = df.set_index("Counts")
df.to_csv(output, index=True, sep='\t')
return | 28,305 |
def create_menu_node(context: WxRenderingContext) -> WxNode:
"""Creates node from xml node using namespace as module and tag name as class name"""
inst_type = get_type(context.xml_node)
args = get_attr_args(context.xml_node, 'init', context.node_globals)
inst = inst_type(**args)
return WxNode(inst, context.xml_node, node_globals=context.node_globals) | 28,306 |
def show_loading(screen):
"""Show loading screen, renders independently on rest of the game"""
text = gfx.Text(screen, CFG().font_main, 16, (255, 255, 255))
screen.fill((0,0,0))
rect = screen.get_rect()
text.write('now loading...', rect.centerx, rect.centery, (255,255,255), origin='center')
pygame.display.flip() | 28,307 |
def attach_calibration_pattern(ax, **calibration_pattern_kwargs):
"""Attach a calibration pattern to axes.
This function uses calibration_pattern to generate a figure.
Args:
calibration_pattern_kwargs: kwargs, optional
Parameters to be given to the calibration_pattern function.
Returns:
image_axes: matplotlib.AxesImage
See matplotlib.imshow documentation
Useful for changing the image dynamically
circle_artist: matplotlib.artist
See matplotlib.circle documentation
Useful for removing the circle from the figure
"""
pattern, flow = calibration_pattern(**calibration_pattern_kwargs)
flow_max_radius = calibration_pattern_kwargs.get("flow_max_radius", 1)
extent = (-flow_max_radius, flow_max_radius) * 2
image = ax.imshow(pattern, extent=extent)
ax.spines["top"].set_visible(False)
ax.spines["right"].set_visible(False)
for spine in ("bottom", "left"):
ax.spines[spine].set_position("zero")
ax.spines[spine].set_linewidth(1)
ax.xaxis.set_minor_locator(AutoMinorLocator())
ax.yaxis.set_minor_locator(AutoMinorLocator())
attach_coord(ax, flow, extent=extent)
circle = plt.Circle((0, 0), flow_max_radius, fill=False, lw=1)
ax.add_artist(circle)
return image, circle | 28,308 |
def alignplot(align_data, en_tokens = None, es_tokens = None, annot = False):
"""
plot the align data with tokens in both language
:params: annot: whether give annot on each element in the matrix
:params: align_data: attention matrix, array-like
:params: en_tokens: english tokens (list, array)
:params: es_tokens: spanish tokens (list, array)
"""
align_data_shape = align_data.shape
if en_tokens is not None and es_tokens is not None:
if annot:
fig = plt.figure(figsize = (align_data_shape[0]/3,align_data_shape[1]/3))
sns.heatmap(align_data, cmap = "Reds", annot=annot, fmt=".1f", cbar = True, linewidths=.5, linecolor='gray', xticklabels = en_tokens, yticklabels = es_tokens)
else:
fig = plt.figure()
sns.heatmap(align_data, cmap = "Reds", annot=annot, fmt=".1f", cbar = True, linewidths=.5, xticklabels = en_tokens, yticklabels = es_tokens)
plt.xticks(rotation=45)
image = BytesIO()
fig.tight_layout()
fig.savefig(image, format='jpeg')
return base64.b64encode(image.getvalue()).decode('utf-8').replace('\n', '') | 28,309 |
def GetModuleObjectAndName(globals_dict):
"""Returns the module that defines a global environment, and its name.
Args:
globals_dict: A dictionary that should correspond to an environment
providing the values of the globals.
Returns:
_ModuleObjectAndName - pair of module object & module name.
Returns (None, None) if the module could not be identified.
"""
name = globals_dict.get('__name__', None)
module = sys.modules.get(name, None)
# Pick a more informative name for the main module.
return _ModuleObjectAndName(module,
(sys.argv[0] if name == '__main__' else name)) | 28,310 |
def to_datetime(timestamp: str) -> datetime.datetime:
"""Converts a timestamp string in ISO format into a datatime object.
Parameters
----------
timstamp : string
Timestamp in ISO format
Returns
-------
datetime.datetime
Datetime object
"""
# Assumes a string in ISO format (with or without milliseconds)
for format in ['%Y-%m-%dT%H:%M:%S.%f', '%Y-%m-%dT%H:%M:%S']:
try:
return datetime.datetime.strptime(timestamp, format)
except ValueError:
pass
return isoparse(timestamp) | 28,311 |
def paint_box(stdscr, width, start_y, start_x, heading, fieldsets):
"""Paint a number of (text) fields in a box on the screen."""
indent = 2
padding = width - (len(heading) + 1)
full_heading = " " + heading
if padding > 0:
for x in range(padding):
full_heading += " "
stdscr.addstr(start_y, start_x, full_heading, curses.A_REVERSE)
start_y += 1
for i, fieldset in enumerate(fieldsets):
start_y += 1
for line, field in enumerate(fieldset):
if line == 0:
stdscr.addstr(
start_y,
start_x,
"- " + str(field),
curses.A_BOLD)
else:
stdscr.addstr(
start_y,
start_x + indent,
str(field))
start_y += 1 | 28,312 |
def test_pwd_removal_preserve_reserved_word(regexes, config_line):
"""Test that reserved words are preserved even if they appear in password lines."""
pwd_lookup = {}
assert config_line == replace_matching_item(regexes, config_line, pwd_lookup) | 28,313 |
def print_packages(ctx, param, value):
"""
Print the list of supported package manifests and datafile formats
"""
if not value or ctx.resilient_parsing:
return
from packagedcode import PACKAGE_DATAFILE_HANDLERS
for cls in sorted(
PACKAGE_DATAFILE_HANDLERS, key=lambda pc: (pc.default_package_type or '', pc.datasource_id)
):
pp = ', '.join(repr(p) for p in cls.path_patterns)
click.echo('--------------------------------------------')
click.echo(f'Package type: {cls.default_package_type}')
if cls.datasource_id is None:
raise Exception(cls)
click.echo(f' datasource_id: {cls.datasource_id}')
click.echo(f' documentation URL: {cls.documentation_url}')
click.echo(f' primary language: {cls.default_primary_language}')
click.echo(f' description: {cls.description}')
click.echo(f' path_patterns: {pp}')
ctx.exit() | 28,314 |
async def handle_arg(
ctx: SlashContext,
key: str,
value: Any,
type_: Union[Callable, commands.Converter]
) -> Any:
"""
Handle an argument and deal with typing.Optional modifiers
Parameters
----------
ctx : SlashContext
The context of the argument
key : str
The argument name
value : any
The value of the argument
type_ : callable or commands.Converter
The type, instance of type converter or type converter class
Returns
-------
any
The handled argument
Raises
------
BadSlashArgument
Invalid argument, this occurs when no converter is able to provide a
non-None response or not raise an error while converting. If a
typing.Optional is provided, or any object with .__args__ containing
NoneType, then this method will never raise and will instead return
None.
"""
# Is a typing.Optional, typing.Union, etc class
if hasattr(type_, "__args__"):
optional = type(None) in type_.__args__
for item in type_.__args__: # Iterate through possible types
if item is None:
# Don't try to convert with None
continue
try:
# Attempt to convert, this also allows nesting of
# typing.Optional etc
new_value = await handle_arg(ctx, key, value, item)
# Return by default if it's good (should go left to right)
return new_value
except Exception as exc:
# This is optional, so we can skip past it
pass
if not optional:
raise BadSlashArgument(message=f"Argument {key} is not of any valid type")
return None
else:
if hasattr(type_, "convert"):
# Check item is instantiated
if isinstance(type_, type):
# Instantaniate the class first
type_converter = type_().convert
else:
# Grab the function of the init'd converter
type_converter = type_.convert
try:
return await type_converter(ctx, value)
except Exception as exc:
raise BadSlashArgument(f"Failed to convert argument {key}") from exc
else:
# Probably not a converter
return type_(value) | 28,315 |
def parse_command_arguments():
"""Returns parsed command arguments"""
parser = argparse.ArgumentParser(description="svg-to-swift converter")
parser.add_argument("--input_file", required=True, help="SVG file to convert.")
parser.add_argument("--output_file", default="svg.swift", help="File to save in swift code.")
return parser.parse_args() | 28,316 |
def get_module_source_path(modname, basename=None):
"""Return module *modname* source path
If *basename* is specified, return *modname.basename* path where
*modname* is a package containing the module *basename*
*basename* is a filename (not a module name), so it must include the
file extension: .py or .pyw
Handles py2exe/cx_Freeze distributions"""
srcpath = get_module_path(modname)
parentdir = osp.join(srcpath, osp.pardir)
if osp.isfile(parentdir):
# Parent directory is not a directory but the 'library.zip' file:
# this is either a py2exe or a cx_Freeze distribution
srcpath = osp.abspath(osp.join(osp.join(parentdir, osp.pardir),
modname))
if basename is not None:
srcpath = osp.abspath(osp.join(srcpath, basename))
return srcpath | 28,317 |
def drop_useless_columns(data):
"""Drop the columns containing duplicate or useless columns."""
data = data.drop(
labels=[
# we stay in a given city
"agency_id",
"agency_name",
"agency_short_name",
# we stay on a given transportation network
"transportation_type",
"transportation_subtype",
# we already have stop id
"stop_name_unofficial",
# we already have line name
"line_id",
# we don't need this
"circuit_transfer",
],
axis=1,
)
return data | 28,318 |
def arg_to_timestamp(arg: Any, arg_name: str, required: bool = False) -> Optional[int]:
"""Converts an XSOAR argument to a timestamp (seconds from epoch)
This function is used to quickly validate an argument provided to XSOAR
via ``demisto.args()`` into an ``int`` containing a timestamp (seconds
since epoch). It will throw a ValueError if the input is invalid.
If the input is None, it will throw a ValueError if required is ``True``,
or ``None`` if required is ``False.
:type arg: ``Any``
:param arg: argument to convert
:type arg_name: ``str``
:param arg_name: argument name
:type required: ``bool``
:param required:
throws exception if ``True`` and argument provided is None
:return:
returns an ``int`` containing a timestamp (seconds from epoch) if conversion works
returns ``None`` if arg is ``None`` and required is set to ``False``
otherwise throws an Exception
:rtype: ``Optional[int]``
"""
if arg is None:
if required is True:
raise ValueError(f'Missing "{arg_name}"')
return None
if isinstance(arg, str) and arg.isdigit():
# timestamp is a str containing digits - we just convert it to int
return int(arg)
if isinstance(arg, str):
# we use dateparser to handle strings either in ISO8601 format, or
# relative time stamps.
# For example: format 2019-10-23T00:00:00 or "3 days", etc
date = dateparser.parse(arg, settings={'TIMEZONE': 'UTC'})
if date is None:
# if d is None it means dateparser failed to parse it
raise ValueError(f'Invalid date: {arg_name}')
return int(date.timestamp())
if isinstance(arg, (int, float)):
# Convert to int if the input is a float
return int(arg)
raise ValueError(f'Invalid date: "{arg_name}"') | 28,319 |
def expfloats (floats):
"""Manipulates floats so that their tiles are logarithmic sizes
large to small"""
return [math.exp(i) for i in floats] | 28,320 |
def typeChecker(obj, *types, error=True):
"""
Check type(s) of an object.
The first type correlates to the first layer of obj and so on.
Each type can be a (tuple that holds) type, string or literal object such as `None`.
:param obj: Generic obj, iterable or not
:param types: lists or tuples if obj at that level can be multiple types, single type if only one
:param error: Raise error if true, otherwise returns False when fail
:return:
"""
literalObjects = [None]
try:
if not types:
raise TypeError("No types were given as args")
types = _typeChecker_prepareTypesList(types, literalObjects)
_typeChecker_checkObject(obj, types, literalObjects)
except TypeError as e:
if error:
raise e
else:
return False
else:
return True | 28,321 |
def post_profile_identifier_chunks_token(identifier, token):
"""
Updates a public chunk.
"""
chunk = models.Chunk.get_by_token(identifier, token)
if not chunk:
raise errors.ResourceNotFound('That chunk does not exist')
stream_entity = chunk.key.parent().get()
others = filter(lambda p: p.account != chunk.sender, stream_entity.participants)
if len(others) != 1:
raise errors.ForbiddenAction('Cannot modify that chunk')
stream = streams.MutableStream(others[0], stream_entity)
# Update played state on behalf of the receiver.
if flask_extras.get_flag('played'):
was_unplayed = not stream.is_played
stream.set_played_until(chunk.end, report=False)
stream._report('played', duration=chunk.duration / 1000.0, unplayed=was_unplayed)
return {'success': True} | 28,322 |
def assert_res_props(res, exp_props, ignore_values=None, prop_names=None):
"""
Check the properties of a resource object.
"""
res_props = dict(res.properties)
# checked_prop_names = set()
for prop_name in exp_props:
if prop_names is not None and prop_name not in prop_names:
continue # Only check properties in prop_names
assert prop_name in res_props, \
"Property '{p}' not found in {k} object '{o}'". \
format(p=prop_name, k=res.prop('class'), o=res.name)
if ignore_values is not None and prop_name not in ignore_values:
act_value = res_props[prop_name]
exp_value = exp_props[prop_name]
assert_res_prop(act_value, exp_value, prop_name, res)
# checked_prop_names.add(prop_name)
# extra_prop_names = set(res_props.keys()) - checked_prop_names
# TODO: Decide whether we want to check the exact set, or the minimum set.
# assert not extra_prop_names, \
# "The following properties were unexpectedly present in {k} object " \
# "'{o}' : {e}". \
# format(k=res.prop('class'), o=res.name, e=', '.join(extra_prop_names)) | 28,323 |
def get_latest_checkpoint(checkpoint_dir: str) -> int:
"""Find the episode ID of the latest checkpoint, if any."""
glob = osp.join(checkpoint_dir, 'checkpoint_*.pkl')
def extract_episode(x):
return int(x[x.rfind('checkpoint_') + 11:-4])
try:
checkpoint_files = tf.io.gfile.glob(glob)
except tf.errors.NotFoundError:
logging.warning('Unable to reload checkpoint at %s', checkpoint_dir)
return -1
try:
latest_episode = max(extract_episode(x) for x in checkpoint_files)
except ValueError:
return -1
return latest_episode | 28,324 |
def lazy(f):
"""A decorator to simply yield the result of a function"""
@wraps(f)
def lazyfunc(*args):
yield f(*args)
return lazyfunc | 28,325 |
def build_source_test_raw_sql(test_namespace, source, table, test_type,
test_args):
"""Build the raw SQL from a source test definition.
:param test_namespace: The test's namespace, if one exists
:param source: The source under test.
:param table: The table under test
:param test_type: The type of the test (unique_id, etc)
:param test_args: The arguments passed to the test as a list of `key=value`
strings
:return: A string of raw sql for the test node.
"""
# sort the dict so the keys are rendered deterministically (for tests)
kwargs = [as_kwarg(key, test_args[key]) for key in sorted(test_args)]
if test_namespace is None:
macro_name = "test_{}".format(test_type)
else:
macro_name = "{}.test_{}".format(test_namespace, test_type)
raw_sql = (
"{{{{ {macro}(model=source('{source}', '{table}'), {kwargs}) }}}}"
.format(
source=source['name'],
table=table['name'],
macro=macro_name,
kwargs=", ".join(kwargs))
)
return raw_sql | 28,326 |
def get_random_word(used_words: List[int]) -> Tuple[Optional[str], List[int]]:
"""Select a random word from a list and pass on a list of used words.
Args:
used_words (list): A list of the indexes of every already used word.
Returns:
Tuple[Optional[str], list]: The random word that is selected and a list
of the index of every random word that has been selected.
"""
list_of_words: List[str] = [
"hangman", "kanban", "evidence", "problem", "decomposed", "components",
"developed", "trialled", "assembled", "tested", "create", "final",
"working", "outcome"
]
if len(used_words) == len(list_of_words):
# if len(used_words) == len(list_of_words), when line 26 and 27
# run, it will just delete every word from list_of_words, so we
# save the computational energy, and another if statement to check
# if list_of_words_without_used_words is empty, and just return
# None as the word, thus signalling to the caller, that the word
# list is empty.
return None, used_words
list_of_words_without_used_words: List[str] = list_of_words.copy()
for i in sorted(used_words, reverse=True):
# used_words is looped through in reverse for this as, when in
# reverse, the 'popping' of an list item at index 'i' will
# never effect a higher index list item, as that higher index
# list item has already been popped.
list_of_words_without_used_words.pop(i)
# len(list_of_words_without_used_words) - 1 because, lists start
# at index 0.
random_number: int = randint(0, len(list_of_words_without_used_words) - 1)
word = list_of_words_without_used_words[random_number]
# because random_number picks a word from a popped version of
# list_of_words, we can't directly translate the index of the word in
# list_of_words_without_used_words to the index of the word in
# list_of_words, therefore, we have to use the list.index()
# function to find the index of the word in the full list.
used_words.append(list_of_words.index(word))
return word, used_words | 28,327 |
def _generate_image_and_label_batch(image, label, min_queue_examples,
batch_size, height, shuffle, channels_last=True):
"""Construct a queued batch of images and labels.
Args:
image: 3-D Tensor of [height, width, 3] of type.float32.
label: 1-D Tensor of type.int32
min_queue_examples: int32, minimum number of samples to retain
in the queue that provides of batches of examples.
batch_size: Number of images per batch.
shuffle: boolean indicating whether to use a shuffling queue.
Returns:
images: Images. 4D tensor of [batch_size, height, width, 3] size.
labels: Labels. 1D tensor of [batch_size] size.
"""
# Create a queue that shuffles the examples, and then
# read 'batch_size' images + labels from the example queue.
if not channels_last:
image = tf.transpose(image, [2, 0, 1])
features = {
'images': image,
'labels': tf.one_hot(label, 10),
'recons_image': image,
'recons_label': label,
}
if shuffle:
batched_features = tf.train.shuffle_batch(
features,
batch_size=batch_size,
num_threads=16,
capacity=min_queue_examples + 3 * batch_size,
min_after_dequeue=min_queue_examples)
else:
batched_features = tf.train.batch(
features,
batch_size=batch_size,
num_threads=1,
capacity=min_queue_examples + 3 * batch_size)
batched_features['labels'] = tf.reshape(batched_features['labels'],
[batch_size, 10])
batched_features['recons_label'] = tf.reshape(
batched_features['recons_label'], [batch_size])
batched_features['height'] = height
batched_features['depth'] = 3
batched_features['num_targets'] = 1
batched_features['num_classes'] = 10
# Display the training images in the visualizer.
tf.summary.image('images', batched_features['images'])
return batched_features | 28,328 |
def test_create_user(api_client, user_data):
"""Test user creation."""
response = api_client.post("/v1/users/", user_data)
assert response.status_code == status.HTTP_201_CREATED
assert User.objects.filter(username=user_data["username"]).exists() | 28,329 |
def update_attributes(cloudnet_variables: dict, attributes: dict) -> None:
"""Overrides existing CloudnetArray-attributes.
Overrides existing attributes using hard-coded values.
New attributes are added.
Args:
cloudnet_variables: CloudnetArray instances.
attributes: Product-specific attributes.
"""
for key in cloudnet_variables:
if key in attributes:
cloudnet_variables[key].set_attributes(attributes[key])
if key in COMMON_ATTRIBUTES:
cloudnet_variables[key].set_attributes(COMMON_ATTRIBUTES[key]) | 28,330 |
def riskscoreci(x1, n1, x2, n2, alpha=0.05, correction=True):
"""Compute CI for the ratio of two binomial rates.
Implements the non-iterative method of Nam (1995).
It has better properties than Wald/Katz intervals,
especially with small samples and rare events.
Translated from R-package 'PropCIs':
https://github.com/shearer/PropCIs
Nam, J. M. (1995) Confidence limits for the ratio of two binomial proportions based on likelihood
scores: Non-iterative method. Biom. J. 37 (3), 375-379.
Koopman PAR. (1984) Confidence limits for the ratio of two binomial proportions. Biometrics 40,
513-517.
Miettinen OS, Nurminen M. (1985) Comparative analysis of two rates. Statistics in Medicine 4,
213-226.
Nurminen, M. (1986) Analysis of trends in proportions with an ordinally scaled determinant. Biometrical
J 28, 965-974
Agresti, A. (2002) Categorical Data Analysis. Wiley, 2nd Edition.
Parameters
----------
xi : int
Number of events in group i
ni : int
Number of trials/subjects in group i
alpha : float
Specifies coverage of the confidence interval
correction : bool
A corrected estimate of RR can be returned by adding 0.5 to each cell
of the contingency table.
Returns
-------
ci : array
Confidence interval array [LL, UL, RR_est]"""
if correction:
rr_est = ((x1+0.5) / (n1+1)) / ((x2+0.5) / (n2+1))
else:
rr_est = (x1 / n1) / (x2 / n2)
z = np.abs(stats.norm.ppf(alpha/2))
if x2==0 and x1 == 0:
ul = np.inf
ll = 0
else:
a1 = n2*(n2*(n2+n1)*x1+n1*(n2+x1)*(z**2))
a2 = -n2*(n2*n1*(x2+x1)+2*(n2+n1)*x2*x1+n1*(n2+x2+2*x1)*(z**2))
a3 = 2*n2*n1*x2*(x2+x1)+(n2+n1)*(x2**2)*x1+n2*n1*(x2+x1)*(z**2)
a4 = -n1*(x2**2)*(x2+x1)
b1 = a2/a1
b2 = a3/a1
b3 = a4/a1
c1 = b2-(b1**2)/3
c2 = b3-b1*b2/3+2*(b1**3)/27
ceta = np.arccos(np.sqrt(27)*c2/(2*c1*np.sqrt(-c1)))
t1 = -2*np.sqrt(-c1/3)*np.cos(np.pi/3-ceta/3)
t2 = -2*np.sqrt(-c1/3)*np.cos(np.pi/3+ceta/3)
t3 = 2*np.sqrt(-c1/3)*np.cos(ceta/3)
p01 = t1-b1/3
p02 = t2-b1/3
p03 = t3-b1/3
p0sum = p01+p02+p03
p0up = np.min([p01,p02,p03])
p0low = p0sum-p0up-np.max([p01,p02,p03])
if x2 == 0 and x1 != 0:
ll = (1-(n1-x1)*(1-p0low)/(x2+n1-(n2+n1)*p0low))/p0low
ul = np.inf
elif x2 != n2 and x1==0:
ul = (1-(n1-x1)*(1-p0up)/(x2+n1-(n2+n1)*p0up))/p0up
ll = 0
elif x2 == n2 and x1 == n1:
ul = (n2+z**2)/n2
ll = n1/(n1+z**2)
elif x1 == n1 or x2 == n2:
if x2 == n2 and x1 == 0:
ll = 0
if x2 == n2 and x1 != 0:
phat1 = x2/n2
phat2 = x1/n1
phihat = phat2/phat1
phil = 0.95*phihat
chi2 = 0
while chi2 <= z:
a = (n2+n1)*phil
b = -((x2+n1)*phil+x1+n2)
c = x2+x1
p1hat = (-b-np.sqrt(b**2-4*a*c))/(2*a)
p2hat = p1hat*phil
q2hat = 1-p2hat
var = (n2*n1*p2hat)/(n1*(phil-p2hat)+n2*q2hat)
chi2 = ((x1-n1*p2hat)/q2hat)/np.sqrt(var)
ll = phil
phil = ll/1.0001
i = x2
j = x1
ni = n2
nj = n1
if x1 == n1:
i = x1
j = x2
ni = n1
nj = n2
phat1 = i/ni
phat2 = j/nj
phihat = phat2/phat1
phiu = 1.1*phihat
if x2 == n2 and x1 == 0:
if n2<100:
phiu = .01
else:
phiu = 0.001
chi1 = 0
while chi1 >= -z:
a = (ni+nj)*phiu
b = -((i+nj)*phiu+j+ni)
c = i+j
p1hat = (-b-np.sqrt(b**2-4*a*c))/(2*a)
p2hat = p1hat*phiu
q2hat = 1-p2hat
var = (ni*nj*p2hat)/(nj*(phiu-p2hat)+ni*q2hat)
chi1 = ((j-nj*p2hat)/q2hat)/np.sqrt(var)
phiu1 = phiu
phiu = 1.0001*phiu1
if x1 == n1:
ul = (1-(n1-x1)*(1-p0up)/(x2+n1-(n2+n1)*p0up))/p0up
ll = 1/phiu1
else:
ul = phiu1
else:
ul = (1-(n1-x1)*(1-p0up)/(x2+n1-(n2+n1)*p0up))/p0up
ll = (1-(n1-x1)*(1-p0low)/(x2+n1-(n2+n1)*p0low))/p0low
return np.array([ll, ul, rr_est]) | 28,331 |
def blend(a, b, alpha=0.5):
"""
Alpha blend two images.
Parameters
----------
a, b : numpy.ndarray
Images to blend.
alpha : float
Blending factor.
Returns
-------
result : numpy.ndarray
Blended image.
"""
a = skimage.img_as_float(a)
b = skimage.img_as_float(b)
return a*alpha+(1-alpha)*b | 28,332 |
def GetWsdlMethod(ns, wsdlName):
""" Get wsdl method from ns, wsdlName """
with _lazyLock:
method = _wsdlMethodMap[(ns, wsdlName)]
if isinstance(method, ManagedMethod):
# The type corresponding to the method is loaded,
# just return the method object
return method
elif method:
# The type is not loaded, the map contains the info
# to load the type. Load the actual type and
# return the method object
LoadManagedType(*method)
return _wsdlMethodMap[(ns, wsdlName)]
else:
raise KeyError("{0} {1}".format(ns, name)) | 28,333 |
def displayBoard(board):
"""Displays the board on the screen."""
tensDigitsLine = ' ' # Indentation for the number labels.
for i in range(1, 6):
tensDigitsLine += (' ' * 9) + str(i)
# Print the numbers across the top of the board.
print(tensDigitsLine)
print(' ' + ('0123456789' * 6))
print()
# Print each of the 15 rows.
for row in range(BOARD_HEIGHT):
# Single-digit numbers need to be padded with an extra space.
if row < 10:
extraSpace = ' '
else:
extraSpace = ''
# Create the string for this row on the board.
boardRow = ''
for column in range(BOARD_WIDTH):
boardRow += board[(column, row)]
print('{}{} {} {}'.format(extraSpace, row, boardRow, row))
# Print the numbers across the bottom of the board.
print()
print(' ' + ('0123456789' * 6))
print(tensDigitsLine) | 28,334 |
def plot_feature_wise(indicators, plot=False, show=True, ax=None, nf_max=40):
"""Plot the statistics feature-wise."""
n_mv_fw = indicators['feature-wise']
n_rows = indicators['global'].at[0, 'n_rows']
if show:
with pd.option_context('display.max_rows', None):
print(
f'\n'
f'Statistics feature-wise:\n'
f'------------------------\n'
f'\n'
f'{n_mv_fw}'
)
if plot:
# Plot proportion of missing values in each feature
# Copy index in a column for the barplot method
n_mv_fw['feature'] = n_mv_fw.index
n_mv_fw['feature_shortened'] = n_mv_fw['id'].astype(str) + ': ' + n_mv_fw.index
# Truncate
if n_mv_fw.shape[0] <= nf_max:
def truncate(string):
if len(string) <= 20:
return string
return string[:27]+'...'
n_mv_fw['feature_shortened'] = n_mv_fw['feature_shortened'].apply(truncate)
# Add the total number of values for each feature
n_mv_fw['N V'] = n_rows
# Get rid of the features with no missing values
n_mv_fw_l = n_mv_fw[(n_mv_fw['N MV1'] != 0) | (n_mv_fw['N MV2'] != 0)]
n_mv_fw_l = n_mv_fw_l.head(20)
if ax is None:
fig, ax = plt.subplots(figsize=(10, 8))
else:
fig = plt.gcf()
if n_mv_fw_l.empty:
return fig, ax
sns.set_color_codes('pastel')
sns.barplot(x='N V', y='feature_shortened', data=n_mv_fw_l, ax=ax,
color='lightgray', label=f'Not missing', dodge=False)
sns.set_color_codes('muted')
sns.barplot(x='N MV', y='feature_shortened', data=n_mv_fw_l, ax=ax,
color='b', label=f'Missing - Not applicable')
sns.set_color_codes("dark")
sns.barplot(x='N MV2', y='feature_shortened', data=n_mv_fw_l, ax=ax,
color="b", label=f'Missing - Not available')
ax.legend(ncol=1, loc='lower right', frameon=True,
title='Type of values')
ax.set(ylabel='Features', xlabel='Number of values')
ax.tick_params(labelsize=7)
sns.despine(left=True, bottom=True, ax=ax)
# Remove y labels if more than 40
if n_mv_fw_l.shape[0] > nf_max:
ax.tick_params(axis='y', which='both', left=False, labelleft=False)
fig.tight_layout(rect=(0, 0, 1, .92))
else:
fig.tight_layout(rect=(0., 0, 1, .92))
return fig, ax | 28,335 |
def normalize(string: str) -> str:
"""
Normalize a text string.
:param string: input string
:return: normalized string
"""
string = string.replace("\xef\xbb\xbf", "") # remove UTF-8 BOM
string = string.replace("\ufeff", "") # remove UTF-16 BOM
# string = unicodedata.normalize("NFKD", string) # convert to NFKD normal form
string = re.compile(r"[0-9]").sub("0", string) # map all numbers to "0"
string = re.compile(r"(?:''|``|[\"„“”‘’«»])").sub("'", string) # normalize quotes
string = re.compile(r"(?:[‒–—―]+|-{2,})").sub("--", string) # normalize dashes
string = re.compile(r"\s+").sub(" ", string) # collapse whitespace characters
return string.strip() | 28,336 |
def get_images(image_dir: str, image_url: str = DEFAULT_IMAGE_URL):
"""Gets image.
Args:
image (str): Image filename
image_url (str): Image url
Returns:
str: Output image filename
"""
images = list_images(image_dir)
if not images and image_url is not None:
print("No images found. Downloading ...")
try:
images = [download_file(image_url)]
except RuntimeError:
print("Unable to download file ({0}).".format(image_url))
if not images:
raise RuntimeError("Unable to provide images.")
return images | 28,337 |
def generate_key(length=128):
"""Generate a suitable client secret"""
rand = SystemRandom()
return "".join(
rand.choice(string.ascii_letters + string.digits + string.punctuation)
for x in range(length)
) | 28,338 |
def export(mv_file, idx_file, dst_path, start=0, end=0, max_workers=5):
"""
:param mv_file: mv所在目录
:param idx_file: idx所在目录
:param dst_path: 结果存放目录
:param start:
:param end:
:param max_workers:
:return:
"""
mv_op = resolvemv_ope(mv_file, idx_file) # 实例化对象
nums = mv_op.getFrameNum() # 获取总数
end = end or nums # 导出多少张,不传end意味着导出全部
logging.warning("总共有:{}张图片".format(end - start))
if not os.path.exists(dst_path): # 没有目录则创建
os.makedirs(dst_path)
thread_pool = ThreadPool(max_workers=max_workers) # 线程池的方式
# 分拆
width = (end - start) // max_workers or 1 # 每个分支多少张图片
logging.warning("总计{},每组{}个".format(end - start, width))
for i in range(start, end, width):
logging.warning("拆分提图:[{},{})".format(i, min(end, i + width)))
thread_pool.submit(mv_op.save_img_list, i, i + width, dst_path, 3)
thread_pool.shutdown()
return end - start | 28,339 |
def correlateFFT(FFTdata_i, FFTdata_j, out):
"""given two FFT data arrays, upsample, correlate, and inverse fourier transform."""
#### upsample, and multiply A*conj(B), using as little memory as possible
in_len = FFTdata_i.shape[0]
out_len = out.shape[0]
A = 0
B = (in_len + 1) // 2
##set lower positive
np.conjugate(FFTdata_j[A:B], out=out[A:B])
out[A:B] *= FFTdata_i[A:B]
### all upper frequencies
A = (in_len + 1) // 2
B = out_len - (in_len - 1) // 2
out[A:B] = 0.0
## set lower negative
A = out_len - (in_len - 1) // 2
B = in_len - (in_len - 1) // 2
np.conjugate(FFTdata_j[B:], out=out[A:])
out[A:] *= FFTdata_i[B:]
# special treatment if low number of points is even. So far we have set Y[-N/2]=X[-N/2]
out[out_len-in_len//2] *= 0.5 # halve the component at -N/2
temp = out[out_len-in_len//2]
out[in_len//2:in_len//2+1] = temp # set that equal to the component at -Nx/2
## ifft in place
fftpack.ifft(out, n=out_len, overwrite_x=True) | 28,340 |
def do_service_list(cc, args):
"""List Services."""
try:
service = cc.smc_service.list()
except exc.Forbidden:
raise exc.CommandError("Not authorized. The requested action "
"requires 'admin' level")
else:
fields = ['id', 'name', 'node_name', 'state']
field_labels = ['id', 'service_name', 'hostname', 'state']
# remove the entry in the initial state
clean_list = [x for x in service if x.state != 'initial']
for s in clean_list:
if s.status:
s.state = (s.state + '-' + s.status)
if getattr(s, 'node_name', None) is None:
s.node_name = socket.gethostname()
utils.print_list(clean_list, fields, field_labels, sortby=1) | 28,341 |
def upload_tosca_template(file): # noqa: E501
"""upload a tosca template description file
upload and validate a tosca template description file # noqa: E501
:param file: tosca Template description
:type file: werkzeug.datastructures.FileStorage
:rtype: str
"""
res = tosca_template_service.save(file)
if res:
return res
return 'Bad Request', 400 | 28,342 |
def extract_data(mask, dra, ddc, dra_err, ddc_err, ra_rad, dc_rad, ra_dc_cor=None):
"""Get a clean sample based on mask
Parameters
----------
mask : array of boolean
mask for extract data
dra/ddc : array of float
R.A.(*cos(Dec.))/Dec. differences
dra_err/ddc_err : array of float
formal uncertainty of dra(*cos(dc_rad))/ddc
ra_rad/dc_rad : array of float
Right ascension/Declination in radian
Returns
----------
dra_new/ddc_new: array of float
R.A.(*cos(Dec.))/Dec for the clean sample. differences
dra_err_new/ddc_err_new: array of float
formal uncertainty of dra(*cos(dc_rad))/ddc for the clean sample
ra_rad_new/dc_rad_new: array of float
Right ascension/Declination in radian for the clean sample
ra_dc_cor_new: array of float
covariance/correlation coefficient between dra and ddc for the clean sample
"""
# Extract the clean sample
dra_new, ddc_new = dra[mask], ddc[mask]
dra_err_new, ddc_err_new = dra_err[mask], ddc_err[mask]
ra_rad_new, dc_rad_new = ra_rad[mask], dc_rad[mask]
if ra_dc_cor is None:
ra_dc_cor_new = ra_dc_cor
else:
ra_dc_cor_new = ra_dc_cor[mask]
return dra_new, ddc_new, dra_err_new, ddc_err_new, ra_rad_new, dc_rad_new, ra_dc_cor_new | 28,343 |
def holes_filler(arr_segm_with_holes, holes_label=-1, labels_sequence=(), verbose=1):
"""
Given a segmentation with holes (holes are specified by a special labels called holes_label)
the holes are filled with the closest labels around.
It applies multi_lab_segmentation_dilate_1_above_selected_label until all the holes
are filled.
:param arr_segm_with_holes:
:param holes_label:
:param labels_sequence: As multi_lab_segmentation_dilate_1_above_selected_label is not invariant
for the selected sequence.
:param verbose:
:return:
"""
num_rounds = 0
arr_segm_no_holes = np.copy(arr_segm_with_holes)
if verbose:
print('Filling holes in the segmentation:')
while holes_label in arr_segm_no_holes:
arr_segm_no_holes = multi_lab_segmentation_dilate_1_above_selected_label(arr_segm_no_holes,
selected_label=holes_label, labels_to_dilate=labels_sequence)
num_rounds += 1
if verbose:
print('Number of dilations required to remove the holes: {}'.format(num_rounds))
return arr_segm_no_holes | 28,344 |
def isAbsolute(uri : str) -> bool:
""" Check whether a URI is Absolute. """
return uri is not None and uri.startswith('//') | 28,345 |
def token_converter(tokens: Iterable[str]) -> Iterable[str]:
"""Convert tokens."""
def convert(token: str) -> str:
return token.lower().replace("-", "_")
return map(convert, tokens) | 28,346 |
def update_instance(instance, validated_data):
"""Update all the instance's fields specified in the validated_data"""
for key, value in validated_data.items():
setattr(instance, key, value)
return instance.save() | 28,347 |
def pupilresponse_nnls(tx, sy, event_onsets, fs, npar=10.1, tmax=930):
"""
Estimate single-event pupil responses based on canonical PRF (`pupil_kernel()`)
using non-negative least-squares (NNLS).
Parameters
-----------
tx : np.ndarray
time-vector in milliseconds
sy : np.ndarray
(baseline-corrected) pupil signal
event_onsets : list
onsets of events (stimuli/responses) in seconds
fs : float
sampling rate in Hz
npar,tmax: float
parameters for :py:func:`pypillometry.pupil.pupil_kernel()`
Returns
--------
(coef,pred,resid): tuple
coef: purely-positive regression coefficients
pred: predicted signal
resid: residuals (sy-pred)
"""
x1=pupil_build_design_matrix(tx, event_onsets, fs, npar, tmax, "estimate")
## we use a non-negative least squares solver to force the PRF-coefficients to be positive
coef=scipy.optimize.nnls(x1.T, sy)[0]
pred=np.dot(x1.T, coef) ## predicted signal
resid=sy-pred ## residual
return coef,pred,resid | 28,348 |
def _eval_expression(
expression,
params,
x,
ind_var="x",
aux_params=None,
domain=DEFAULT_DOMAIN,
rng=DEFAULT_RANGE,
):
"""Evaluate the expression at x.
Parameters
----------
expression : string
The expression that defines the calibration function.
params : array_like
List of floating point parameters for the calibration function,
referred to by the symbol "p".
x : float or array_like
The argument at which to evaluate the expression.
ind_var : str
The symbol of the independent variable. Default "x", "y" also allowed.
aux_params : array_like
Auxiliary floating point parameters for the calibration function,
referred to by the symbol "a". By default an empty array.
domain : array_like
The domain of the function. Will raise an error if the independent
variable is outside this interval. Must be finite. By default
DEFAULT_DOMAIN.
rng : array_like
The range of the function. Expression outputs will be clipped to this
interval. Must be finite. By default DEFAULT_RANGE.
Returns
-------
y : float or array_like
Result of evaluating the expression for x.
"""
_validate_domain_range(domain, rng)
x = np.asarray(x)
if not np.all(x >= domain[0]):
raise CalibrationError(f"{ind_var} must be >= {domain[0]}: {x}")
if not np.all(x <= domain[1]):
raise CalibrationError(f"{ind_var} must be <= {domain[1]}: {x}")
if ind_var not in ["x", "y"]:
raise CalibrationError(f"Independent variable {ind_var} must be 'x' or 'y'")
if aux_params is None:
aux_params = np.array([])
else:
aux_params = np.asarray(aux_params)
safe_eval.symtable["p"] = params
safe_eval.symtable["a"] = aux_params
safe_eval.symtable[ind_var] = x
y = safe_eval(expression)
if len(safe_eval.error) > 0:
raise CalibrationError(
"asteval failed with errors:\n"
+ "\n".join(str(err.get_error()) for err in safe_eval.error)
)
if not np.all(np.isreal(y)):
raise CalibrationError(f"Function evaluation resulted in complex values: {y}")
# clip values of y to the range
y = np.clip(y, rng[0], rng[1])
return y | 28,349 |
def too_few_peaks_or_valleys(
peak_indices: NDArray[int],
valley_indices: NDArray[int],
min_number_peaks: int = MIN_NUMBER_PEAKS,
min_number_valleys: int = MIN_NUMBER_VALLEYS,
) -> None:
"""Raise an error if there are too few peaks or valleys detected.
Args:
peak_indices: NDArray
a 1D array of integers representing the indices of the peaks
valley_indices: NDArray
a 1D array of integeres representing the indices of the valleys
min_number_peaks: int
minimum number of required peaks
min_number_valleys: int
minumum number of required valleys
Raises:
TooFewPeaksDetectedError
"""
if len(peak_indices) < min_number_peaks:
raise TooFewPeaksDetectedError(
f"A minimum of {min_number_peaks} peaks is required to extract twitch metrics, however only {len(peak_indices)} peak(s) were detected."
)
if len(valley_indices) < min_number_valleys:
raise TooFewPeaksDetectedError(
f"A minimum of {min_number_valleys} valleys is required to extract twitch metrics, however only {len(valley_indices)} valley(s) were detected."
) | 28,350 |
def negative_frequency(P_m):
"""get the negative probability"""
sample_num = []
sample_prob = []
for key, value in P_m.items():
sample_num.append(key)
sample_prob.append(value)
return sample_num, np.array(sample_prob)/sum(sample_prob) | 28,351 |
def _ShellQuote(command_part):
"""Escape a part of a command to enable copy/pasting it into a shell.
"""
return pipes.quote(command_part) | 28,352 |
def evaluate_coco(model, dataset, coco, eval_type="bbox", limit=0, image_ids=None):
"""Runs official COCO evaluation.
dataset: A Dataset object with valiadtion data
eval_type: "bbox" or "segm" for bounding box or segmentation evaluation
limit: if not 0, it's the number of images to use for evaluation
"""
# Pick COCO images from the dataset
image_ids = image_ids or dataset.image_ids
# Limit to a subset
if limit:
image_ids = image_ids[:limit]
# Get corresponding COCO image IDs.
coco_image_ids = [dataset.image_info[id]["id"] for id in image_ids]
t_prediction = 0
t_start = time.time()
results = []
for i, image_id in enumerate(image_ids):
# Load image
image = dataset.load_image(image_id)
# Run detection
t = time.time()
r = model.detect([image])[0]
t_prediction += (time.time() - t)
# Convert results to COCO format
image_results = build_coco_results(dataset, coco_image_ids[i:i + 1],
r["rois"], r["class_ids"],
r["scores"], r["masks"])
results.extend(image_results)
if len(results) == 0:
return
# Load results. This modifies results with additional attributes.
coco_results = coco.loadRes(results)
# Evaluate
cocoEval = COCOeval(coco, coco_results, eval_type)
cocoEval.params.imgIds = coco_image_ids
cocoEval.evaluate()
cocoEval.accumulate()
cocoEval.summarize()
print("Prediction time: {}. Average {}/image".format(
t_prediction, t_prediction / len(image_ids)))
print("Total time: ", time.time() - t_start)
print_result(cocoEval) | 28,353 |
def LogLEVEL(val):
"""
Return a sane loglevel given some value.
"""
if isinstance(val, (float, int)):
return int(val)
if isinstance(val, basestring):
return getattr(logging, val.upper())
return val | 28,354 |
def decoMakerApiCallChangePosToOptArg(argPos, argName):
""" creates a decorator, which change the positional argument ARGPOS into
an optional argument ARGNAME.
argPos=1 is the first positional arg
"""
# for understanding, what we are doing here please read
# # see http://stackoverflow.com/questions/739654/how-can-i-make-a-chain-of-function-decorators-in-python
# - Passing arguments to the decorator
def decoApiCallChangePosToOptArg(methodAPI):
""" Decorator to change a positional argument into an optional ARGNAME """
@wraps(methodAPI)
def wrapperChangePosToOptArg(self, *argsPositional, **argsOptional):
argsPositionalChanged = list(argsPositional)
if (argPos > 0) and (len(argsPositional) >= argPos):
argValue = argsPositionalChanged.pop(argPos - 1)
argsOptional[argName] = argValue
return methodAPI(self, *argsPositionalChanged, **argsOptional)
return wrapperChangePosToOptArg
return decoApiCallChangePosToOptArg | 28,355 |
def wrangle_address_dist_matrix(ba_rel, ba_asso):
""" Get all buffer level derived variables based on "AH, Relatives and Associates" file
Note: we have some participants that list relatives but no associates (or vice versa), so we have to fill NA values
Keyword Arguments:
- ba_ba_comparison:
- ba_rel_comparison:
- ba_asso_comparison:
Returns:
"""
# lex_bestAddressSame_*_rel_c
ba_rel['zero'] = 0
g = ba_rel.groupby(['ssn_altkey', 'timeseries1'])
g2 = ba_rel[ba_rel['distance_spheroid_m'] == 0].groupby(['ssn_altkey', 'timeseries1'])
concordant_rel = g2.size().combine_first(g.first().zero)
concordant_rel.index.names = ['ssn_altkey', 'best_address_num']
concordant_rel.name = 'adr_lex_bestAddressSameRel_c'
# lex_bestAddressSame_*_asso_c
ba_asso['zero'] = 0
g = ba_asso.groupby(['ssn_altkey', 'timeseries1'])
g2 = ba_asso[ba_asso['distance_spheroid_m'] == 0].groupby(['ssn_altkey', 'timeseries1'])
concordant_asso = g2.size().combine_first(g.first().zero)
concordant_asso.index.names = ['ssn_altkey', 'best_address_num']
concordant_asso.name = 'adr_lex_bestAddressSameAsso_c_2014'
return pd.concat([concordant_rel, concordant_asso], axis=1).fillna(0) | 28,356 |
def circlePoints(x, r, cx, cy):
"""Ther dunction returns the y coordinate of a
circonference's point
:x: x's coordinate value.
:r: length of the radius.
:cx: x coordinate of the center.
:cy: y coordinate of the center."""
return math.sqrt(math.pow(r,2) - math.pow(x-cx, 2)) + cy | 28,357 |
def aaa():
"""AAA command line"""
pass | 28,358 |
def get_thread_info():
"""
Returns a pair of:
- map of LWP -> thread ID
- map of blocked threads LWP -> potential mutex type
"""
# LWP -> thread ID
lwp_to_thread_id = {}
# LWP -> potential mutex type it is blocked on
blocked_threads = {}
output = gdb.execute("info threads", from_tty=False, to_string=True)
lines = output.strip().split("\n")[1:]
regex = re.compile(r"[\s\*]*(\d+).*Thread.*\(LWP (\d+)\).*")
for line in lines:
try:
thread_id = int(regex.match(line).group(1))
thread_lwp = int(regex.match(line).group(2))
lwp_to_thread_id[thread_lwp] = thread_id
mutex_type = MutexType.get_mutex_type(thread_id, line)
if mutex_type:
blocked_threads[thread_lwp] = mutex_type
except Exception:
continue
return (lwp_to_thread_id, blocked_threads) | 28,359 |
def _maybe_convert_labels(y_true):
"""Converts binary labels into -1/1."""
are_zeros = math_ops.equal(y_true, 0)
are_ones = math_ops.equal(y_true, 1)
is_binary = math_ops.reduce_all(math_ops.logical_or(are_zeros, are_ones))
def _convert_binary_labels():
# Convert the binary labels to -1 or 1.
return 2. * y_true - 1.
updated_y_true = smart_cond.smart_cond(is_binary,
_convert_binary_labels, lambda: y_true)
return updated_y_true | 28,360 |
def test_currency_deepcopy():
"""test_currency_deepcopy."""
new_currency = currency.__deepcopy__()
assert new_currency == currency
assert new_currency is not currency
assert new_currency.numeric_code == '978'
assert new_currency.alpha_code == 'EUR'
assert new_currency.decimal_places == 2
assert new_currency.decimal_sign == ','
assert new_currency.grouping_places == 3
assert new_currency.grouping_sign == '.'
assert new_currency.international
assert new_currency.symbol == '€'
assert new_currency.symbol_ahead
assert new_currency.symbol_separator == ''
assert new_currency.localized_symbol == '€'
assert new_currency.convertion == ''
assert new_currency.__repr__() == (
'Currency(amount: 0.1428571428571428571428571429, '
'alpha_code: "EUR", '
'symbol: "€", '
'symbol_ahead: True, '
'symbol_separator: "", '
'localized_symbol: "€", '
'numeric_code: "978", '
'decimal_places: "2", '
'decimal_sign: ",", '
'grouping_places: "3", '
'grouping_sign: ".", '
'convertion: "", '
'international: True)')
assert new_currency.__str__() == 'EUR 0.14' | 28,361 |
def trim_listing(obj):
"""Remove 'listing' field from Directory objects that are keep references.
When Directory objects represent Keep references, it is redundant and
potentially very expensive to pass fully enumerated Directory objects
between instances of cwl-runner (e.g. a submitting a job, or using the
RunInSingleContainer feature), so delete the 'listing' field when it is
safe to do so.
"""
if obj.get("location", "").startswith("keep:") and "listing" in obj:
del obj["listing"] | 28,362 |
def create_plot(feature="bar"):
"""provided random generated plots"""
if feature == "bar":
N = 40
x = np.linspace(0, 1, N)
y = np.random.randn(N)
df = pd.DataFrame({'x': x, 'y': y}) # creating a sample dataframe
data = [
go.Bar(
x=df['x'], # assign x as the dataframe column 'x'
y=df['y']
)
]
else:
N = 1000
random_x = np.random.randn(N)
random_y = np.random.randn(N)
# Create a trace
data = [go.Scatter(
x=random_x,
y=random_y,
mode='markers'
)]
graphJSON = json.dumps(data, cls=plotly.utils.PlotlyJSONEncoder)
return graphJSON | 28,363 |
def get_gram_matrix(tensor):
"""
Returns a Gram matrix of dimension (distinct_filer_count, distinct_filter_count) where G[i,j] is the
inner product between the vectorised feature map i and j in layer l
"""
G = torch.mm(tensor, tensor.t())
return G | 28,364 |
def calc_stat_moments(ds, dim_aggregator='time', time_constraint=None):
"""Calculates the first two statistical moments and
the coefficient of variation in the specified dimension.
Parameters:
-----------
ds : xr.Dataset
dim_aggregator : str
coordinate to calculate the statistical moments over
time_constraint : str
longitude
Returns
-------
xr.DataArray
covariance array
"""
if dim_aggregator == 'spatial':
dim_aggregator = ['latitude', 'longitude']
else:
dim_aggregator = 'time'
if time_constraint == 'seasonally':
mu = ds.groupby('time.season').mean(dim=dim_aggregator)
sig = ds.groupby('time.season').std(dim=dim_aggregator)
elif time_constraint == 'monthly':
mu = ds.groupby('time.month').mean(dim=dim_aggregator)
sig = ds.groupby('time.month').std(dim=dim_aggregator)
else:
mu = ds.mean(dim=dim_aggregator)
sig = ds.std(dim=dim_aggregator)
vc = sig**2/mu
ds_new = xr.concat([mu, sig, vc], dim='stat_moments')
ds_new.coords['stat_moments'] = ['mean', 'std', 'vc']
return ds_new | 28,365 |
def validate_case_matches_checkpoint(do_lower_case, init_checkpoint):
"""Checks whether the casing config is consistent with the checkpoint name."""
# The casing has to be passed in by the user and there is no explicit check
# as to whether it matches the checkpoint. The casing information probably
# should have been stored in the bert_config.json file, but it's not, so
# we have to heuristically detect it to validate.
if not init_checkpoint:
return
m = re.match("^.*?([A-Za-z0-9_-]+)/bert_model.ckpt", init_checkpoint)
if m is None:
return
model_name = m.group(1)
lower_models = [
"uncased_L-24_H-1024_A-16", "uncased_L-12_H-768_A-12",
"multilingual_L-12_H-768_A-12", "chinese_L-12_H-768_A-12"
]
cased_models = [
"cased_L-12_H-768_A-12", "cased_L-24_H-1024_A-16",
"multi_cased_L-12_H-768_A-12"
]
is_bad_config = False
if model_name in lower_models and not do_lower_case:
is_bad_config = True
actual_flag = "False"
case_name = "lowercased"
opposite_flag = "True"
if model_name in cased_models and do_lower_case:
is_bad_config = True
actual_flag = "True"
case_name = "cased"
opposite_flag = "False"
if is_bad_config:
raise ValueError(
"You passed in `--do_lower_case=%s` with `--init_checkpoint=%s`. "
"However, `%s` seems to be a %s model, so you "
"should pass in `--do_lower_case=%s` so that the fine-tuning matches "
"how the model was pre-training. If this error is wrong, please "
"just comment out this check." % (actual_flag, init_checkpoint,
model_name, case_name, opposite_flag)) | 28,366 |
def test_refreshes_token_with_valid_refresh_token_cookie(
app: Flask, client: FlaskClient
) -> None:
"""Assert that the GifSync API will respond with an auth token and the auth token's
max age when POST /auth/refresh is requested with a cookie named "refresh_token"
containing a valid refresh token.
Args:
app (:obj:`~flask.Flask`): The Flask app fixture.
client (:obj:`~flask.testing.FlaskClient`): The Client fixture.
"""
username = create_random_username()
refresh_token = create_refresh_token(username)
assert refresh_token.signed is not None
add_refresh_token_cookie_to_client(app, client, refresh_token.signed)
response = post_refresh(client)
# Assert that API accepts the request
assert response.status_code == HTTPStatus.OK
assert_token_in_response(response) | 28,367 |
def move_cursor_left() -> "None":
"""Move back a character, or up a line."""
get_app().current_buffer.cursor_position -= 1 | 28,368 |
def submit_ride_request():
"""
submit a ride request with the following required fields:
- netId (string) - netId of requester
- date (date object) - date of travel
- time (time object) - time of travel
- origin (string) - chosen from dropdown of origins
- destination (string) - chosen from dropdown of destinations
- preferred_car_type (string) - "regular" or "XL"
- preferred_group_size (int) - number of desired riders in group
"""
if REQUIRE_KEY:
try:
key = request.headers["api-key"]
except:
return jsonify({"error": "No API key provided - permission denied"}), 403
if not is_key_valid(key):
return jsonify({"error": "Invalid API key - permission denied"}), 403
netid = request.json.get("netId")
if not netid:
return jsonify({"error": "Please provide netId to request a ride"}), 400
date = request.json.get("date")
if not date:
return jsonify({"error": "Please provide date to request a ride"}), 400
time = request.json.get("time")
if not time:
return jsonify({"error": "Please provide time to request a ride"}), 400
origin = request.json.get("origin")
if not origin:
return jsonify({"error": "Please provide origin to request a ride"}), 400
destination = request.json.get("destination")
if not destination:
return jsonify({"error": "Please provide destination to request a ride"}), 400
preferred_car_type = request.json.get("preferred_car_type")
if not preferred_car_type:
return (
jsonify(
{"error": "Please provide preferred_car_type to request a ride"}),
400,
)
preferred_group_size = request.json.get("preferred_group_size")
if not preferred_group_size:
return (
jsonify(
{"error": "Please provide preferred_group_size to request a ride"}),
400,
)
uid = str(uuid.uuid4()) # generate unique identifier for request
tz = timezone("EST")
curr_time = datetime.now(tz).strftime("%Y-%m-%d %H:%M:%S")
resp = client.put_item(
TableName=REQUESTS_TABLE,
Item={
"requestId": {"S": uid},
"netId": {"S": netid},
"request_time": {"S": curr_time},
"date": {"S": date},
"time": {"S": time},
"origin": {"S": origin},
"destination": {"S": destination},
"preferred_car_type": {"S": preferred_car_type},
"preferred_group_size": {"N": preferred_group_size},
"matched": {"BOOL": False},
"groupId": {"S": ""},
"confirmed": {"BOOL": False},
"allConfirmed": {"BOOL": False},
"rematch": {"BOOL": False},
},
)
return jsonify(
{
"requestId": uid,
"netId": netid,
"reesponse_time": curr_time,
"date": date,
"time": time,
"origin": origin,
"destination": destination,
"preferred_car_type": preferred_car_type,
"preferred_group_size": preferred_group_size,
"matched": False,
"groupId": "",
"confirmed": False,
"allConfirmed": False,
}
) | 28,369 |
def update_youtube_dl():
"""This block for updating youtube-dl module in the freezed application folder in windows"""
current_directory = config.current_directory
# check if the application runs from a windows cx_freeze executable
# if run from source code, we will update system installed package and exit
if not config.FROZEN:
cmd = f'"{sys.executable}" -m pip install youtube_dl --upgrade'
success, output = run_command(cmd)
if success:
log('successfully updated youtube_dl')
return
# make temp folder
log('making temp folder in:', current_directory)
if 'temp' not in os.listdir(current_directory):
os.mkdir(os.path.join(current_directory, 'temp'))
# paths
target_module = os.path.join(current_directory, 'lib/youtube_dl')
bkup_module = os.path.join(current_directory, 'lib/youtube_dl_bkup')
new_module = os.path.join(current_directory, 'temp/youtube-dl-master/youtube_dl')
def bkup():
# backup current youtube-dl module folder
log('delete previous backup')
delete_folder(bkup_module)
log('backup current youtube-dl module')
shutil.copytree(target_module, bkup_module)
def unzip():
# extract zipped module
with zipfile.ZipFile('temp/youtube-dl.zip', 'r') as zip_ref:
zip_ref.extractall(path=os.path.join(current_directory, 'temp'))
def compile_file(q):
while q.qsize():
file = q.get()
if file.endswith('.py'):
try:
py_compile.compile(file, cfile=file + 'c')
os.remove(file)
except Exception as e:
log('compile_file()> error', e)
else:
print(file, 'not .py file')
def compile_all():
q = queue.Queue()
# get files list and add it to queue
for item in os.listdir(new_module):
item = os.path.join(new_module, item)
if os.path.isfile(item):
file = item
# compile_file(file)
q.put(file)
else:
folder = item
for file in os.listdir(folder):
file = os.path.join(folder, file)
# compile_file(file)
q.put(file)
tot_files_count = q.qsize()
last_percent_value = 0
# create 10 worker threads
threads = []
for _ in range(10):
t = Thread(target=compile_file, args=(q,), daemon=True)
threads.append(t)
t.start()
# watch threads until finished
while True:
live_threads = [t for t in threads if t.is_alive()]
processed_files_count = tot_files_count - q.qsize()
percent = processed_files_count * 100 // tot_files_count
if percent != last_percent_value:
last_percent_value = percent
log('#', start='', end='' if percent < 100 else '\n')
if not live_threads and not q.qsize():
break
time.sleep(0.1)
log('Finished compiling to .pyc files')
def overwrite_module():
delete_folder(target_module)
shutil.move(new_module, target_module)
log('new module copied to:', target_module)
# start processing -------------------------------------------------------
log('start updating youtube-dl please wait ...')
try:
# backup
bkup()
# download from github
log('step 1 of 4: downloading youtube-dl raw files')
url = 'https://github.com/ytdl-org/youtube-dl/archive/master.zip'
response = download(url, 'temp/youtube-dl.zip')
if response is False:
log('failed to download youtube-dl, abort update')
return
# extract zip file
log('step 2 of 4: extracting youtube-dl.zip')
# use a thread to show some progress while unzipping
t = Thread(target=unzip)
t.start()
while t.is_alive():
log('#', start='', end='')
time.sleep(0.5)
log('\n', start='')
log('youtube-dl.zip extracted to: ', current_directory + '/temp')
# compile files from py to pyc
log('step 3 of 4: compiling files, please wait')
compile_all()
# delete old youtube-dl module and replace it with new one
log('step 4 of 4: overwrite old youtube-dl module')
overwrite_module()
# clean old files
delete_folder('temp')
log('delete temp folder')
log('youtube_dl module ..... done updating')
log('please restart Application now .................................')
except Exception as e:
log('update_youtube_dl()> error', e) | 28,370 |
def _heatmap(data,
row_ticks=None,
col_ticks=None,
row_labels=None,
col_labels=None,
ax=None,
cbar_kw={},
cbarlabel="",
**kwargs):
"""Create a heatmap from a numpy array and two lists of labels.
(Code from `matplotlib documentation <https://matplotlib.org/stable/gallery/images_contours_and_fields/image_annotated_heatmap.html>`_)
Args:
data (np.ndarray): A 2D numpy array of shape ``[H, W]``.
row_ticks (list[float]): List of row (y-axis) tick locations.
col_ticks (list[float]): List of column (x-axis) tick locations.
row_labels (list[str]): A list labels for the rows. Its length
should be equal to that of ``row_ticks`` if ``row_ticks`` is not None.
Otherwise, it should have a length of ``H``.
col_labels (list[str]): A list of labels for the columns. Its length
should be equal to that of ``col_ticks`` if ``col_ticks`` is not None.
Otherwise, it should have a length of ``W``.
ax (matplotlib.axes.Axes): instance to which the heatmap is plotted.
If None, use current axes or create a new one.
cbar_kw (dict): A dictionary with arguments to ``matplotlib.Figure.colorbar``.
cbarlabel (str): The label for the colorbar.
**kwargs: All other arguments that are forwarded to ``ax.imshow``.
Returns:
tuple:
- matplotlib.image.AxesImage: the heatmap image
- matplotlib.pyplot.colorbar: the colorbar of the heatmap
"""
if not ax:
ax = plt.gca()
# Plot the heatmap
im = ax.imshow(data, **kwargs)
# Create colorbar
cbar = ax.figure.colorbar(im, ax=ax, **cbar_kw)
cbar.ax.set_ylabel(cbarlabel, rotation=-90, va="bottom")
if col_ticks is None:
# show all the ticks by default
col_ticks = np.arange(data.shape[1] + 1) - .5
ax.set_xticks(col_ticks, minor=True)
if row_ticks is None:
# show all the ticks by default
row_ticks = np.arange(data.shape[0] + 1) - .5
ax.set_yticks(row_ticks, minor=True)
# ... and label them with the respective list entries.
if col_labels is not None:
assert len(col_ticks) == len(col_labels), (
"'col_ticks' should have the "
"same length as 'col_labels'")
ax.set_xticklabels(col_labels)
if row_labels is not None:
assert len(row_ticks) == len(row_labels), (
"'row_ticks' should have the "
"same length as 'row_labels'")
ax.set_yticklabels(row_labels)
# Let the horizontal axes labeling appear on top.
ax.tick_params(top=True, bottom=False, labeltop=True, labelbottom=False)
# Rotate the tick labels and set their alignment.
plt.setp(
ax.get_xticklabels(), rotation=-30, ha="right", rotation_mode="anchor")
# Turn spines off and create white grid.
ax.spines[:].set_visible(False)
ax.grid(which="minor", color="w", linestyle='-', linewidth=3)
ax.tick_params(which="minor", bottom=False, left=False)
return im, cbar | 28,371 |
def clustermap(gene_values, out_pdf, color=None, table=False):
""" Generate a clustered heatmap using seaborn. """
if table:
np.save(out_pdf[:-4], gene_values)
plt.figure()
g = sns.clustermap(
gene_values,
metric='euclidean',
cmap=color,
xticklabels=False,
yticklabels=False)
g.ax_heatmap.set_xlabel('Experiments')
g.ax_heatmap.set_ylabel('Genes')
plt.savefig(out_pdf)
plt.close() | 28,372 |
def test_similarityAccuracyAggregate(mongoURI):
"""
Tests for basic accuracy against a brute-force constructed Python 'database'
at thresholds 0.2, 0.4, 0.6, 0.8, and 1. This test is relatively long and
will modify your local MongoDB instance.
"""
db_python = utils.setupPythonDB('data/test_data/first_200.props.sdf')
if mongoURI == 'local':
db_mongo = utils.setupMongoDB()
else:
db_mongo = utils.setupMongoDB(mongoURI)
write.WriteFromSDF(db_mongo.molecules, 'data/test_data/first_200.props.sdf')
similarity.AddMorganFingerprints(db_mongo.molecules, db_mongo.mfp_counts)
thresholds = [0.2, 0.4, 0.6, 0.8, 1]
counter = 0
for t in thresholds:
for i in range(200):
mol = Chem.Mol(db_python[i]['rdmol'])
search_python = utils.similaritySearchPython(mol, db_python, t)
search_mongo_aggregate = similarity.SimSearchAggregate(mol, db_mongo.molecules, db_mongo.mfp_counts, t)
assert sorted(search_python) == sorted(search_mongo_aggregate)
print(counter)
counter += 1 | 28,373 |
def html_colour_to_rgba(html_colour: str) -> ():
"""Convers HTML colout to its RGB values"""
html_colour = html_colour.strip()
if html_colour[0] == '#':
html_colour = html_colour[1:]
return tuple([int(x, 16) for x in (html_colour[:2], html_colour[2:4], html_colour[4:], '0')]) | 28,374 |
def index(request):
"""Displays form."""
data = {"menu": "index", "max_characters": settings.PASTE["max_characters"]}
if request.method == "POST":
paste = Paste(slug=random_id(Paste))
if request.FILES:
for language_name, any_file in request.FILES.items():
break
language = Language.by_name(language_name)
form = PasteForm(
{
"language": language.id,
"title": any_file.name,
"private": settings.PASTE["private_by_default"],
"lifetime": settings.PASTE["default_lifetime"],
"content": any_file.read().decode(),
},
instance=paste,
)
else:
form = PasteForm(request.POST, instance=paste)
if not form.is_valid():
data["form"] = form
return render(request, "paste/index.html", data)
form.save() # Some logic added to overrided method, see forms.py
location = request.build_absolute_uri(
reverse("short_paste", kwargs={"slug": paste.slug})
)
return HttpResponseRedirect(
location, content=location + "\n", content_type="text/plain"
)
data["form"] = PasteForm(
initial={
"private": settings.PASTE["private_by_default"],
"lifetime": settings.PASTE["default_lifetime"],
"language": Language.by_name(settings.PASTE["default_language"]).id,
}
)
data["absolute_index_url"] = request.build_absolute_uri(reverse("index"))
return render(request, "paste/index.html", data) | 28,375 |
def setup(bot):
"""Setup."""
check_folder()
check_file()
n = RoyaleRant(bot)
bot.add_cog(n) | 28,376 |
def lambda_handler(event, context):
"""
Assesses instance reservations and produces a report on them
"""
import json
event_settings = copy(LAMBDA_DEFAULTS)
event_settings.update(event)
# Set the local timezone for datetime formatting if the parameter was
# passed to the Lambda function
if 'Local_TZ' in event_settings:
LOCAL_TZ = tz.gettz(event_settings['Local_TZ'])
# Instatiate checker
rc = ReservationChecker(
unreserved_days=int(event_settings['UnreservedDays']),
region_name=event_settings['Region'])
# Should a report be generated?
report_text = None
if 'ReportOn' in event_settings:
# Instantiate display class
rcd = ReservationDisplay(rc)
# Compose the report text
report_text = "\n".join([
'###############################',
'# Instance Reservation Report #',
'###############################',
'',
'',
])
for report_item in event_settings['ReportOn']:
report_text += "\n".join([
":" * (4 + len(report_item)),
": " + report_item + " :",
":" * (4 + len(report_item)),
"",
])
for data_row in getattr(rcd, report_item + '_data'):
report_text += str(data_row) + "\n"
report_text += "\n"
print(report_text)
if report_text is not None and event_settings['SNS_Send']:
print("\n\nSending report to SNS:" + event_settings['SNS_Topic'])
sns = boto3.client('sns', region_name=event_settings['Region'])
resp = sns.publish(
TargetArn=event_settings['SNS_Topic'],
Subject=event_settings.get('SNS_Subject', DEFAULT_SNS_SUBJECT).format(
account=get_aws_account_id(),
region=event_settings['Region'] or sns.meta.region_name),
Message=report_text)
print(json.dumps(resp,
indent=4,
separators=(',', ': '),
sort_keys=False)) | 28,377 |
def trafficking_service():
"""Connect to Google's DFA Reporting service with oauth2 using the discovery service."""
return google_service(DDM_TRAFFICKING_SCOPE) | 28,378 |
def question_freq(freq_range):
"""Page for individual question's word frequency."""
# drop columns with all na
questions = st.multiselect(
label="Select specific questions below:",
options=selected_nan_df.columns[2:],
)
plots_range = st.sidebar.slider(
"Select the number of plots per row", 1, 5, value=1
)
question_df = ut.make_questions_df(questions, main_df)
if len(questions) != 0:
freq_question_df = ut.compute_quest_df(
questions, freq_range, question_df
)
st.altair_chart(
vis.facet_freq_barplot(
freq_question_df,
questions,
"question",
plots_per_row=plots_range,
)
) | 28,379 |
def launch_testing(model_epoch, input_type=0):
"""Function that launches a model over the test dataset"""
testset = MSCOCO(IMAGES_FOLDER_TEST, ANNOTATION_FILE_TEST,input_type=input_type)
#Load the training model
checkpoint = torch.load(os.path.join(MAIN_FOLDER, model_epoch))
net = Model(input_type=input_type)
net.load_state_dict(checkpoint['state_dict'])
# Loss
criterion = nn.MSELoss()
# Batch sizes
batch_size_test = 1
#TestLoader
evaloader = torch.utils.data.DataLoader(testset,
batch_size=batch_size_test,
shuffle=True,
num_workers=4
)
loss_test = 0.0
for i, data in enumerate(evaloader):
inputs, labels = data[0], data[1]
inputs, labels = Variable(inputs), Variable(labels)
outputs = net(inputs)
loss = criterion(y, outputs)
loss_test += loss.data[0]
if i % 500 ==0:
print("Current loss over the test dataset: {0} after {1}ème iteration".format(loss_test/(i+1),i+1))
loss_test = loss_test/len(testset)
print("Average loss over the test dataset: {}".format(loss_test)) | 28,380 |
def cd(path):
"""
A Fabric-inspired cd context that temporarily changes directory for
performing some tasks, and returns to the original working directory
afterwards. E.g.,
with cd("/my/path/"):
do_something()
Args:
path: Path to cd to.
"""
cwd = os.getcwd()
os.chdir(path)
try:
yield
finally:
os.chdir(cwd) | 28,381 |
def deduplicate(s, ch):
"""
From http://stackoverflow.com/q/42216559/610569
s = 'this is an irritating string with random spacing .'
deduplicate(s)
'this is an irritating string with random spacing .'
"""
return ch.join([substring for substring in s.strip().split(ch) if substring]) | 28,382 |
def optimize(mod):
"""Optimize all the functions in a module.
Modules are the only mutable piece of Relay. We write an
optimization pass over the module which destructively updates each
function while optimizing.
"""
return pass_set(mod) | 28,383 |
def sum_series(n):
"""Calculate sum of n+(n-2)+(n-4)..."""
return n if n < 2 else n + sum_series(n - 2) | 28,384 |
def get_preconditioner():
"""Compute the preconditioner M"""
diags_x = zeros((3, nx))
diags_x[0,:] = 1/hx/hx
diags_x[1,:] = -2/hx/hx
diags_x[2,:] = 1/hx/hx
Lx = spdiags(diags_x, [-1,0,1], nx, nx)
diags_y = zeros((3, ny))
diags_y[0,:] = 1/hy/hy
diags_y[1,:] = -2/hy/hy
diags_y[2,:] = 1/hy/hy
Ly = spdiags(diags_y, [-1,0,1], ny, ny)
J1 = kron(Lx, eye(ny)) + kron(eye(nx), Ly)
# Now we have the matrix `J_1`. We need to find its inverse `M` --
# however, since an approximate inverse is enough, we can use
# the *incomplete LU* decomposition
J1_ilu = spilu(J1)
# This returns an object with a method .solve() that evaluates
# the corresponding matrix-vector product. We need to wrap it into
# a LinearOperator before it can be passed to the Krylov methods:
M = LinearOperator(shape=(nx*ny, nx*ny), matvec=J1_ilu.solve)
return M | 28,385 |
def compute_noise_ceiling(y, scoring=roc_auc_score, K=None, soft=True, return_pred=False, doubles_only=False):
""" Computes the noise ceiling for data with repetitions.
Parameters
----------
y : pd.Series
Series with numeric values and index corresponding to stim ID.
K : int
Number of classes (if categorical; does not work for regression yet)
soft : bool
Whether to use a probabilistic estimate (True) or "hard" label (False)
Returns
-------
ceiling : ndarray
Numpy ndarray (shape: K,) with ceiling estimates
"""
labels = convert_doubles_to_single_labels(y, K, soft, keepdims=True, doubles_only=doubles_only)
labels = labels.sort_index()
y_flat = y.loc[labels.index.unique()].sort_index()
# Needed to convert 1D to 2D
ohe = OneHotEncoder(categories='auto', sparse=False)
y_ohe = ohe.fit_transform(y_flat.values[:, np.newaxis])
# Compute best possible score ("ceiling")
ceiling = scoring(y_ohe, labels.values, average=None)
if return_pred:
return ceiling, labels
else:
return ceiling | 28,386 |
def main(clear_cache, debug, zwift_user, zwift_pass, tplvars, output_file, rider_list, template):
"""
Output some sort of rider list with data downloaded from ZwiftPower
\b
Arguments:
- RIDERLIST: Source of riders. Supported sources:
- team:13264
- riders:514482,399078
- race_results:2692522
- race_unfiltered:2692522
- race_signups:2692522
- TEMPLATE: Jinja2 template to use to generate the output. Will be
searched for either in CWD or from builtin templates.
"""
level = logging.DEBUG if debug else logging.INFO
logging.basicConfig(level=level)
source, id_ = rider_list
expire_after = timedelta(hours=12)
cache_dir = Path(user_cache_dir('riderlist'))
cache_dir.mkdir(parents=True, exist_ok=True)
cached = CachedSession(str(cache_dir / 'zp_cache'), expire_after=expire_after)
if clear_cache:
cached.cache.clear()
s = Scraper(username=zwift_user, password=zwift_pass, sleep=2.0, session=cached)
ctx = {
'scraper': s,
'now': pendulum.now()
}
searchpaths = [
Path(os.getcwd()),
Path(__file__).parent / 'templates',
]
env = Environment(loader=FileSystemLoader(searchpaths), undefined=StrictUndefined)
env.filters['catstr'] = filter_catstr
env.filters['ttts'] = filter_ttts
env.filters['races'] = filter_races
env.filters['flag2unicode'] = flag_unicode
env.filters['cp_svg'] = filter_cp_svg
env.filters['sdur'] = filter_sdur
env.filters['powerbars_svg'] = filter_power_bars
env.filters['csv_dict'] = filter_csv_dict
tpl = env.get_template(template)
ctx.update(getattr(Getters, source)(s, id_))
result = tpl.render(args=dict(tplvars), **ctx)
with click.open_file(output_file, mode='w') as f:
f.write(result) | 28,387 |
def maskw(m, edge, xray_image, imax, jmax, maxr, roundR, listcounter):
"""
Takes in a bunch of parameters for masking the image
Parameters
----------
Stuff ;
Returns
-------
highper, lowper, totalper, old_files, iMask, StatsA
"""
# old_files=np.zeros((imax,jmax))
# highper=np.zeros(maxr+1)
# lowper=np.zeros(maxr+1)
# StatsA=[]
# r=0
# TODO: Is this a bottle neck? If so maybe implement this in weave/C/GPU?
# while r<maxr+1:
# Im contains all the pixel intensities for the ring with radius r
# Im=xray_image[roundR==r]
# mean = np.mean(Im)
# std = np.std(Im)
# thresh = m * std
# Im1 filters out the pixels that are beyond the threshold
# Im1=Im[abs(Im - mean) <= thresh]
# note that the means, and medians hold the integrated intensities
# StatsA.append([r, np.mean(Im1),np.median(Im1),mean + thresh,mean - thresh,std,mean])
# r+=1
# StatsA = np.array(StatsA)
mean = mean1d(xray_image, max=maxr, step=1, position_matrix=roundR)
# std=sqrt(Variance1D(xray_image, max=maxr, step=1, position_matrix=roundR))
std = ringstd(xray_image, max=maxr, step=1, position_matrix=roundR)
plt.plot(std)
plt.show()
threshold = m * std
lower = mean - threshold
upper = mean + threshold
print('start masking')
# start masking based on too high/too low/not enough pixels
# print np.min(roundR)
# TODO: there is a problem here with the indexing
toolow = xray_image < lower[roundR - 1]
toohi = xray_image > upper[roundR - 1]
lowcounter = np.bincount(roundR[toolow], minlength=maxr + 1)
highcounter = np.bincount(roundR[toohi], minlength=maxr + 1)
# TODO: maybe read magic number for number of pixels from config file or calculate based on detector info?
# combine toolow, toohigh, and number of pixels too low
Mask = toolow | toohi | (listcounter[roundR] < 10)
# mask edges
Mask[:, :edge] = 1
Mask[:, -edge:] = 1
Mask[:edge, :] = 1
Mask[-edge:, :] = 1
# percentages for graphing
highper = (highcounter[:] / listcounter[:]) * 100
lowper = (lowcounter[:] / listcounter[:]) * 100
totalper = highper + lowper
iMask = np.abs(Mask - 1)
print 'finished masking'
return highper, lowper, totalper, Mask, iMask, | 28,388 |
def check_shadow_dirs(processes, cwd):
"""
Assures that tmp0, tmp1 etc. exist
"""
nonExistingDirs = []
for iprocess in range(processes):
tmpDir = cwd + os.sep + 'tmp' + str(iprocess)
if not os.path.exists(tmpDir):
nonExistingDirs.append(tmpDir)
if len(nonExistingDirs) > 0:
if len(nonExistingDirs) == 1:
raise Exception("directory %s must exist!" % nonExistingDirs[0])
else:
raise Exception("directories %s must exist!" % nonExistingDirs) | 28,389 |
def check_regularizer(layer_config, class_name='L1L2', l1=None, l2=None):
"""
Check if the kernel L1 and/or L2 regularizer coefficients are (about) the
same as the one that one expects. The regularizer coefficients are in
general not exactly the same as the chosen one (because of Keras), but it
should be about the same. This is why np.round is used in this check.
:type layer_config: dict
:param layer_config: the layer configuraion dictionary.
:type class_name: str
:param class_name: The expected class name of the regularizer
:type l1,l2: float
:param l1,l2: the expected coefficent values of the l1 and/or coefficients.
"""
kernel_regularizer = layer_config['kernel_regularizer']
assert kernel_regularizer['class_name'] == class_name
regularizer_config = kernel_regularizer['config']
if l1 is not None:
assert np.round(regularizer_config['l1']/l1, 1) == 1.
if l2 is not None:
assert np.round(regularizer_config['l2']/l2, 1) == 1. | 28,390 |
def get_all_contained_items(item, stoptest=None):
"""
Recursively retrieve all items contained in another item
:param text_game_maker.game_objects.items.Item item: item to retrieve items\
from
:param stoptest: callback to call on each sub-item to test whether\
recursion should continue. If stoptest() == True, recursion will\
continue
:return: list of retrieved items
:rtype: [text_game_maker.game_objects.items.Item]
"""
ret = []
if not item.is_container:
return ret
stack = [item]
while stack:
subitem = stack.pop(0)
for i in subitem.items:
ret.append(i)
if i.is_container:
if stoptest and stoptest(i):
continue
stack.append(i)
return ret | 28,391 |
def get_br_el_sub_name_list(year, dep, sem_num, br_dict, el_dict, dep_dict, content = ''):
""" Return list of name of breadth and elective subjects for this batch
Called by get_br_el_sub_name_list_helper for every roll number in the dep.
"""
grade_list = ['EX', 'A', 'B', 'C', 'D', 'P', 'F', 'X']
sub_grade_dict = {'EX' : 0, 'A' : 1, 'B' : 2, 'C' : 3, 'D' : 4, 'P' : 5, 'F' : 6, 'X' : 7}
index = 0
sem_found = False
for line in content:
if (line.find("<tr bgcolor=\"#FFF3FF\">") != -1 or line.find("<tr bgcolor=\"pink\">") != -1) and sem_found:
currentLine = content[index + 2]
matchObj = re.match( r'<td>(.*)</td>', currentLine, re.M|re.I)
if matchObj and str(matchObj.group(1)).find("<b>") == -1:
sub_name = str(matchObj.group(1))
currentLine = content[index + 6]
matchObj = re.match(r'<td align="center">(.*)</td>', currentLine, re.M|re.I)
sub_type = str(matchObj.group(1))
currentLine = content[index + 5]
matchObj = re.match(r'<td align="center">(.*?)<(.+)', currentLine, re.M|re.I)
sub_grade = str(matchObj.group(1))
if sub_type.find('Elective') != -1:
try:
el_dict[sub_name]
el_dict[sub_name][sub_grade] += 1
except KeyError:
el_dict[sub_name] = {}
for item in grade_list:
el_dict[sub_name][item] = 0
#el_dict[sub_name][sub_grade] += 1
elif sub_type.find('Breadth') != -1 or sub_type.find('HSS') != -1:
try:
br_dict[sub_name]
br_dict[sub_name][sub_grade] += 1
except KeyError:
br_dict[sub_name] = {}
for item in grade_list:
br_dict[sub_name][item] = 0
#br_dict[sub_name][sub_grade] += 1
if sub_type.find('Depth') != -1:
try:
dep_dict[sub_name]
dep_dict[sub_name][sub_grade_dict[sub_grade]] += 1
except KeyError:
dep_dict[sub_name] = {}
for item in grade_list:
dep_dict[sub_name] = [0] * 8
#dep_dict[sub_name][sub_grade_dict[sub_grade]] += 1
elif line.find("<tr><td bgcolor=\"#FFF3FF\" colspan=\"2\"><h3 align=\"center\">Semester no:") != -1:
matchObj = re.match(r'<tr><td bgcolor="#FFF3FF" colspan="2"><h3 align="center">Semester no: ([1-9]).*', line, re.M|re.I)
if matchObj:
if sem_found:
break
elif int(matchObj.group(1)) == sem_num:
sem_found = True
index += 1 | 28,392 |
def validate_context_for_visiting_vertex_field(location, context):
"""Ensure that the current context allows for visiting a vertex field."""
if is_in_fold_innermost_scope_scope(context):
raise GraphQLCompilationError(u'Traversing inside a @fold block after output is '
u'not supported! Location: {}'.format(location)) | 28,393 |
def make_from_file(fn : str, robotModel : RobotModel, *args,**kwargs) -> RobotInterfaceBase:
"""Create a RobotInterfaceBase from a Python file or module containing the
``make()`` function.
args and kwargs will be passed to ``make``.
Example::
iface = make_from_file('klampt.control.simrobotcontroller', robot)
"""
import importlib
if fn.endswith('py') or fn.endswith('pyc'):
import os
import sys
path,base = os.path.split(fn)
mod_name,file_ext = os.path.splitext(base)
sys.path.append(os.path.abspath(path))
mod = importlib.import_module(mod_name,base)
sys.path.pop(-1)
else:
mod = importlib.import_module(fn)
try:
maker = mod.make
except AttributeError:
print("Module",mod.__name__,"must have a make() method")
raise
return maker(robotModel,*args,**kwargs) | 28,394 |
def page_id_url(context, reverse_id, lang=None):
"""
Show the url of a page with a reverse id in the right language
This is mostly used if you want to have a static link in a template to a page
"""
request = context.get('request', False)
if not request:
return {'content':''}
if lang is None:
lang = get_language_from_request(request)
if hasattr(settings, 'CMS_CONTENT_CACHE_DURATION'):
key = 'page_url_pid:'+reverse_id+'_l:'+str(lang)+'_type:absolute_url'
url = cache.get(key)
if not url:
try:
page = Page.objects.get(reverse_id=reverse_id)
except:
if settings.DEBUG:
raise
else:
site = request.site
send_mail(_('Reverse ID not found on %(domain)s') % {'domain':site.domain},
_("A page_url template tag didn't found a page with the reverse_id %(reverse_id)s\nThe url of the page was: http://%(host)s%(path)s")%{'reverse_id':reverse_id, 'host':request.host, 'path':request.path},
settings.DEFAULT_FROM_EMAIL,
settings.MANAGERS,
fail_silently=True)
url = page.get_absolute_url(language=lang)
cache.set(key, url, settings.CMS_CONTENT_CACHE_DURATION)
else:
url = page.get_absolute_url(language=lang)
if url:
return {'content':url}
return {'content':''} | 28,395 |
def test_calculate_spend_cost_basis1_buy_used_by_2_sells_taxable(accountant):
""" Make sure that when 1 buy is used by 2 sells bought cost is correct
Regression test for taxable part of:
https://github.com/rotki/rotki/issues/223
"""
asset = A_BTC
cost_basis = accountant.pots[0].cost_basis
asset_events = cost_basis.get_events(asset)
asset_events.acquisitions.append(
AssetAcquisitionEvent(
amount=FVal(5),
timestamp=1446979735, # 08/11/2015
rate=FVal(268.1),
index=1,
),
)
spending_amount = FVal(3)
cinfo = cost_basis.calculate_spend_cost_basis(
spending_amount=spending_amount,
spending_asset=asset,
timestamp=1467378304, # 31/06/2016
)
assert cinfo.taxable_amount == 3, '3 out of 3 should be taxable (within a year)'
assert cinfo.taxfree_bought_cost.is_close(FVal('0'))
assert cinfo.taxable_bought_cost.is_close(FVal('804.3'))
assert len(cinfo.matched_acquisitions) == 1
assert sum(x.amount for x in cinfo.matched_acquisitions) == spending_amount
assert cinfo.is_complete is True
assert cinfo.matched_acquisitions[0].amount == spending_amount
assert cinfo.matched_acquisitions[0].event.amount == FVal(5)
assert cinfo.matched_acquisitions[0].event.remaining_amount == FVal(2)
acquisitions_num = len(asset_events.acquisitions)
assert acquisitions_num == 1, 'whole buy was not used'
remaining_amount = asset_events.acquisitions[0].remaining_amount
assert remaining_amount == FVal(2), '3 of 5 should have been consumed'
# now eat up all the rest
spending_amount = FVal(2)
cinfo = cost_basis.calculate_spend_cost_basis(
spending_amount=spending_amount,
spending_asset=asset,
timestamp=1467378404, # bit after previous sell
)
assert cinfo.taxable_amount == 2, '2 out of 2 should be taxable (within a year)'
assert cinfo.taxfree_bought_cost.is_close(FVal('0'))
assert cinfo.taxable_bought_cost.is_close(FVal('536.2'))
assert len(cinfo.matched_acquisitions) == 1
assert sum(x.amount for x in cinfo.matched_acquisitions) == spending_amount
assert cinfo.is_complete is True
assert cinfo.matched_acquisitions[0].amount == spending_amount
assert cinfo.matched_acquisitions[0].event.amount == FVal(5)
assert cinfo.matched_acquisitions[0].event.remaining_amount == ZERO
acquisitions_num = len(asset_events.acquisitions)
assert acquisitions_num == 0, 'the buy should have been used' | 28,396 |
def convert(string):
""" the convert() function takes simple-formatted string and returns a 'lingtree description string' suitable for pasting into the SIL LingTree program directly
the string should contain multiple lines, one line per 'node relationship'. E.G. :
S = NP VP
NP = \L Juan
Juan = \G John
VP = V
V = \L duerme
duerme = \G sleeps
The left and right side are separated by an equals sign ( = ). ( => ) and ( -> ) also work fine. The right side may contain special backslash codes, but the left side should not contain any special codes.
"""
ref = {}
top = 0
for line in string.split('\n'):
line.strip()
if line == '': continue
if line == '\r': continue
if line.startswith('#'): continue # ignore comment lines
# normalize 'equals' syntax
line = line.replace('->','=')
line = line.replace('=>','=')
try:
leftside,rightside = line.split('=')
leftside = leftside.strip()
rightside = rightside.strip()
if top == 0: # remember the top node
ref[leftside] = Node(leftside)
top = ref[leftside]
# the leftside must always already exist in the ref
if not leftside in ref:
raise NameError
# right side contains a special code
# support multiple codes on right side, like '\L lexical \G gloss'
if rightside.find('\\') != -1:
c_last = ''
for c in rightside.split('\\'):
if c == '': continue
code = c[0]
c = c[1:].strip()
ref[c] = Node(c)
ref[c].setCode(code)
if c_last == '':
ref[leftside].addChild(ref[c])
else:
ref[c_last].addChild(ref[c])
c_last = c
# normal right side
else:
for r in rightside.split():
r = r.strip()
if r == '': continue
ref[r] = Node(r)
ref[leftside].addChild(ref[r])
except NameError:
return "Line may not be in top-down order: %s" % line
except:
return "Error occurred processing line: %s" % line
# tell the top node to print itself
return top.tell() | 28,397 |
def Hij_to_cijkl(H):
"""Convert a Hooke's matrix to the corresponding rigidity tensor
Parameters
----------
H: 6x6 iterable or dict
The Hooke's matrix to be converted. If not already a np.ndarray, an iterable must
be castable to one.
Returns
-------
c: 3x3x3x3 np.ndarray
The corresponding 4th order rigidity tensor
"""
if type(H) == dict:
c = np.empty((3,3,3,3), dtype=type(H[1][1]))
c[0,0,0,0] = H[1][1]
c[0,0,1,1] = H[1][2]
c[0,0,2,2] = H[1][3]
c[0,0,1,2] = H[1][4]; c[0,0,2,1] = H[1][4]
c[0,0,0,2] = H[1][5]; c[0,0,2,0] = H[1][5]
c[0,0,0,1] = H[1][6]; c[0,0,1,0] = H[1][6]
c[1,1,0,0] = H[1][2]
c[1,1,1,1] = H[2][2]
c[1,1,2,2] = H[2][3]
c[1,1,1,2] = H[2][4]; c[1,1,2,1] = H[2][4]
c[1,1,0,2] = H[2][5]; c[1,1,2,0] = H[2][5]
c[1,1,0,1] = H[2][6]; c[1,1,1,0] = H[2][6]
c[2,2,0,0] = H[1][3]
c[2,2,1,1] = H[2][3]
c[2,2,2,2] = H[3][3]
c[2,2,1,2] = H[3][4]; c[2,2,2,1] = H[3][4]
c[2,2,0,2] = H[3][5]; c[2,2,2,0] = H[3][5]
c[2,2,0,1] = H[3][6]; c[2,2,1,0] = H[3][6]
c[2,1,0,0] = H[1][4]
c[2,1,1,1] = H[2][4]
c[2,1,2,2] = H[3][4]
c[2,1,1,2] = H[4][4]; c[2,1,2,1] = H[4][4]
c[2,1,0,2] = H[4][5]; c[2,1,2,0] = H[4][5]
c[2,1,0,1] = H[4][6]; c[2,1,1,0] = H[4][6]
c[1,2,0,0] = H[1][4]
c[1,2,1,1] = H[2][4]
c[1,2,2,2] = H[3][4]
c[1,2,1,2] = H[4][4]; c[1,2,2,1] = H[4][4]
c[1,2,0,2] = H[4][5]; c[1,2,2,0] = H[4][5]
c[1,2,0,1] = H[4][6]; c[1,2,1,0] = H[4][6]
c[2,0,0,0] = H[1][5]
c[2,0,1,1] = H[2][5]
c[2,0,2,2] = H[3][5]
c[2,0,1,2] = H[4][5]; c[2,0,2,1] = H[4][5]
c[2,0,0,2] = H[5][5]; c[2,0,2,0] = H[5][5]
c[2,0,0,1] = H[5][6]; c[2,0,1,0] = H[5][6]
c[0,2,0,0] = H[1][5]
c[0,2,1,1] = H[2][5]
c[0,2,2,2] = H[3][5]
c[0,2,1,2] = H[4][5]; c[0,2,2,1] = H[4][5]
c[0,2,0,2] = H[5][5]; c[0,2,2,0] = H[5][5]
c[0,2,0,1] = H[5][6]; c[0,2,1,0] = H[5][6]
c[1,0,0,0] = H[1][6]
c[1,0,1,1] = H[2][6]
c[1,0,2,2] = H[3][6]
c[1,0,1,2] = H[4][6]; c[1,0,2,1] = H[4][6]
c[1,0,0,2] = H[5][6]; c[1,0,2,0] = H[5][6]
c[1,0,0,1] = H[6][6]; c[1,0,1,0] = H[6][6]
c[0,1,0,0] = H[1][6]
c[0,1,1,1] = H[2][6]
c[0,1,2,2] = H[3][6]
c[0,1,1,2] = H[4][6]; c[0,1,2,1] = H[4][6]
c[0,1,0,2] = H[5][6]; c[0,1,2,0] = H[5][6]
c[0,1,0,1] = H[6][6]; c[0,1,1,0] = H[6][6]
else:
if not type(H) == np.ndarray:
H = np.array(H)
if H.shape[0] != 6 or H.shape[1] != 6:
raise ValueError('H must be a 6x6 iterable and castable to np.ndarray')
c = np.empty((3,3,3,3), dtype=H.dtype)
c[0,0,0,0] = H[0,0]
c[0,0,1,1] = H[0,1]
c[0,0,2,2] = H[0,2]
c[0,0,1,2] = H[0,3]; c[0,0,2,1] = H[0,3]
c[0,0,0,2] = H[0,4]; c[0,0,2,0] = H[0,4]
c[0,0,0,1] = H[0,5]; c[0,0,1,0] = H[0,5]
c[1,1,0,0] = H[1,0]
c[1,1,1,1] = H[1,1]
c[1,1,2,2] = H[1,2]
c[1,1,1,2] = H[1,3]; c[1,1,2,1] = H[1,3]
c[1,1,0,2] = H[1,4]; c[1,1,2,0] = H[1,4]
c[1,1,0,1] = H[1,5]; c[1,1,1,0] = H[1,5]
c[2,2,0,0] = H[2,0]
c[2,2,1,1] = H[2,1]
c[2,2,2,2] = H[2,2]
c[2,2,1,2] = H[2,3]; c[2,2,2,1] = H[2,3]
c[2,2,0,2] = H[2,4]; c[2,2,2,0] = H[2,4]
c[2,2,0,1] = H[2,5]; c[2,2,1,0] = H[2,5]
c[2,1,0,0] = H[3,0]
c[2,1,1,1] = H[3,1]
c[2,1,2,2] = H[3,2]
c[2,1,1,2] = H[3,3]; c[2,1,2,1] = H[3,3]
c[2,1,0,2] = H[3,4]; c[2,1,2,0] = H[3,4]
c[2,1,0,1] = H[3,5]; c[2,1,1,0] = H[3,5]
c[1,2,0,0] = H[3,0]
c[1,2,1,1] = H[3,1]
c[1,2,2,2] = H[3,2]
c[1,2,1,2] = H[3,3]; c[1,2,2,1] = H[3,3]
c[1,2,0,2] = H[3,4]; c[1,2,2,0] = H[3,4]
c[1,2,0,1] = H[3,5]; c[1,2,1,0] = H[3,5]
c[2,0,0,0] = H[4,0]
c[2,0,1,1] = H[4,1]
c[2,0,2,2] = H[4,2]
c[2,0,1,2] = H[4,3]; c[2,0,2,1] = H[4,3]
c[2,0,0,2] = H[4,4]; c[2,0,2,0] = H[4,4]
c[2,0,0,1] = H[4,5]; c[2,0,1,0] = H[4,5]
c[0,2,0,0] = H[4,0]
c[0,2,1,1] = H[4,1]
c[0,2,2,2] = H[4,2]
c[0,2,1,2] = H[4,3]; c[0,2,2,1] = H[4,3]
c[0,2,0,2] = H[4,4]; c[0,2,2,0] = H[4,4]
c[0,2,0,1] = H[4,5]; c[0,2,1,0] = H[4,5]
c[1,0,0,0] = H[5,0]
c[1,0,1,1] = H[5,1]
c[1,0,2,2] = H[5,2]
c[1,0,1,2] = H[5,3]; c[1,0,2,1] = H[5,3]
c[1,0,0,2] = H[5,4]; c[1,0,2,0] = H[5,4]
c[1,0,0,1] = H[5,5]; c[1,0,1,0] = H[5,5]
c[0,1,0,0] = H[5,0]
c[0,1,1,1] = H[5,1]
c[0,1,2,2] = H[5,2]
c[0,1,1,2] = H[5,3]; c[0,1,2,1] = H[5,3]
c[0,1,0,2] = H[5,4]; c[0,1,2,0] = H[5,4]
c[0,1,0,1] = H[5,5]; c[0,1,1,0] = H[5,5]
return c | 28,398 |
def get_cymon_feed_size(jwt, feed_id):
"""Determine the number of results a feed will return (max: 1000).
Params:
- jwt: (type: string) JWT token.
- feed_id: (type: string) Cymon feed ID.
Returns:
- total: (type: int) feed size.
"""
try:
today = datetime.utcnow()
threshold = today - timedelta(days=BASECONFIG.malware_days)
headers = {'Authorization': 'Bearer {0}'.format(jwt)}
payload = {
'startDate': threshold.strftime('%Y-%m-%d'),
'endDate': today.strftime('%Y-%m-%d'),
'size': 1}
LOGGING.info('Determining feed size...')
request = requests.get(
'https://api.cymon.io/v2/ioc/search/feed/{0}'.format(feed_id),
params=payload,
headers=headers,
verify=False)
if request.status_code == 200:
LOGGING.info('Request successful!')
response = json.loads(request.text)
if 'total' in response:
total = int(response['total'])
if total > 1000:
LOGGING.warning(
'API request returned more than 1000 results.')
total = 1000
return total
else:
LOGGING.error(
'Problem connecting to Cymon. Status code:{0}. Please try again later.'.format(
request.status_code))
except requests.exceptions.ConnectionError as e:
LOGGING.warning('Problem connecting to Cymon. Error: {0}'.format(e))
except Exception as e:
LOGGING.warning('Problem connecting to Cymon. Aborting task.')
LOGGING.exception(sys.exc_info())
LOGGING.exception(type(e))
LOGGING.exception(e.args)
LOGGING.exception(e)
return 0 | 28,399 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.