content stringlengths 22 815k | id int64 0 4.91M |
|---|---|
def process_mailbox(M):
"""
Dump all emails in the folder to files in output directory.
"""
rv, data = M.search(None, "ALL")
if rv != 'OK':
print("No messages found!")
return
for num in data[0].split():
rv, data = M.fetch(num, '(RFC822)')
if rv != 'OK':
print("ERROR getting message", num)
return
print("Writing message ", num)
f = open('%s/%s.eml' % (OUTPUT_DIRECTORY, num), 'wb')
f.write(data[0][1])
f.close() | 5,323,800 |
def mkdir_p(path):
"""mkdir_p attempts to get the same functionality as mkdir -p
:param path: the path to create.
"""
try:
os.makedirs(path)
except OSError as e:
if e.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
bot.error("Error creating path %s, exiting." % path)
sys.exit(1) | 5,323,801 |
def sync_local(operations, sync_dir):
""" Transfer snapshots to local target """
trace = operations.trace
# find out what kind of snapshots exist on the remote host
targetsnaps = set(operations.listdir_path(sync_dir))
localsnaps = set(operations.listdir())
if len(localsnaps) == 0:
# nothing to do here, no snaps here
return
parents = targetsnaps.intersection(localsnaps)
# no parent exists so
if len(parents) == 0:
# start transferring the oldest snapshot
# by that snapbtrex will transfer all snapshots that have been created
operations.sync_single(min(localsnaps), sync_dir)
parents.add(min(localsnaps))
# parent existing, use the latest as parent
max_parent = max(parents)
parent = max_parent
trace(LOG_LOCAL + "Sync: last possible parent = %s", max_parent)
for s in sorted(localsnaps):
if s > max_parent:
trace(LOG_LOCAL + "transfer: parent=%s snap=%s", parent, s)
operations.sync_withparent(parent, s, sync_dir)
# if link_dir is not None:
# operations.link_current(target_host, target_dir, s, link_dir, ssh_port)
parent = s | 5,323,802 |
def remove_element(list, remove):
"""[summary]
Args:
list ([list]): [List of objects]
remove ([]): [What element to remove]
Returns:
[list]: [A new list where the element has been removed]
"""
for object in list:
if object._id == remove[0]:
list.remove(object)
return list | 5,323,803 |
def warmup_cosine_decay_schedule(
init_value: float,
peak_value: float,
warmup_steps: int,
decay_steps: int,
end_value: float = 0.0
) -> base.Schedule:
"""Linear warmup followed by cosine decay.
Args:
init_value: Initial value for the scalar to be annealed.
peak_value: Peak value for scalar to be annealed at end of warmup.
warmup_steps: Positive integer, the length of the linear warmup.
decay_steps: Positive integer, the total length of the schedule. Note that
this includes the warmup time, so the number of steps during which cosine
annealing is applied is `decay_steps - warmup_steps`.
end_value: End value of the scalar to be annealed.
Returns:
schedule: A function that maps step counts to values.
"""
schedules = [
linear_schedule(
init_value=init_value,
end_value=peak_value,
transition_steps=warmup_steps),
cosine_decay_schedule(
init_value=peak_value,
decay_steps=decay_steps - warmup_steps,
alpha=end_value/peak_value)]
return join_schedules(schedules, [warmup_steps]) | 5,323,804 |
def download_mission(vehicle):
"""
Download the current mission from the vehicle.
"""
cmds = vehicle.commands
cmds.download()
cmds.wait_ready() | 5,323,805 |
def test_add_list_returns_unprocessable_entity(client, token):
"""Check whether an UNPROCESSABLE ENTITY response is returned when POST body is invalid"""
res = client.post(
'/lists',
json={},
headers={'Authorization': f'Bearer {token}'},
allow_redirects=True
)
assert res.status_code == 422 | 5,323,806 |
def test_input_json(dict_srv):
#Description
"""Test the entries in the json file.
"""
dict_srv_template = {"host": "", \
"user": "", \
"password": "", \
"port": "", \
"http_interface": "", \
"http_port": "", \
"servername": "", \
"serveradmin": "", \
"documentroot": "", \
"file_vhost": "", \
"url_source_website": ""}
for item in dict_srv.keys():
for key in dict_srv_template.keys():
if not key in dict_srv[item]:
try:
print(dict_srv[key])
break
except KeyError:
print(f"""The "{key}" for server "{item}": this information is missing or incorrectly entered in the "info_in.json" file.
Please consult the documentation...""")
raise | 5,323,807 |
def save_url(url, path, filename, session):
"""
Args:
session: requests.Session()
url: str
path: str
filename: str
Returns: None, file written to path+filename
"""
result = session.get(url)
if result.status_code == 200:
f = open(path + filename, 'wb')
f.write(result.content)
f.close()
print('contents of URL written to ' + path + filename)
else:
print('requests.get() returned an error code ' + str(result.status_code)) | 5,323,808 |
def cleanup():
"""
deletes all primaryFsets that matches pattern pvc-
used for cleanup in case of parallel pvc.
Args:
None
Returns:
None
Raises:
None
"""
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
get_link = f'https://{test["guiHost"]}:{test["port"]}/scalemgmt/v2/filesystems/{test["primaryFs"]}/filesets/'
response = requests.get(get_link, verify=False, auth=(test["username"], test["password"]))
lst = re.findall(r'\S+pvc-\S+', response.text)
lst2 = []
for la in lst:
lst2.append(la[1:-2])
for res in lst2:
volume_name = res
unlink_link = f'https://{test["guiHost"]}:{test["port"]}/scalemgmt/v2/filesystems/{test["primaryFs"]}/filesets/{volume_name}/link'
response = requests.delete(
unlink_link, verify=False, auth=(test["username"], test["password"]))
LOGGER.debug(response.text)
LOGGER.info(f"Fileset {volume_name} unlinked")
time.sleep(5)
delete_link = f'https://{test["guiHost"]}:{test["port"]}/scalemgmt/v2/filesystems/{test["primaryFs"]}/filesets/{volume_name}/link'
response = requests.delete(
delete_link, verify=False, auth=(test["username"], test["password"]))
LOGGER.debug(response.text)
time.sleep(10)
get_link = f'https://{test["guiHost"]}:{test["port"]}/scalemgmt/v2/filesystems/{test["primaryFs"]}/filesets/'
response = requests.get(get_link, verify=False,
auth=(test["username"], test["password"]))
LOGGER.debug(response.text)
search_result = re.search(volume_name, str(response.text))
if search_result is None:
LOGGER.info(f'Fileset {volume_name} deleted successfully')
else:
LOGGER.error(f'Fileset {volume_name} is not deleted')
assert False | 5,323,809 |
def plot_corr(data, figsize=None, xrot=0, yrot=0):
"""
corr_mat: Correlation matrix to visualize
figsize: Overall figure size for the overall plot
xrot: (Default = 0) Rotate x-labels
yrot: (Default = 0) Rotate y-labels
"""
fig, axis = plt.subplots(1, 1, figsize=figsize)
corr_mat = data.corr()
sns.heatmap(round(corr_mat, 2), annot=True, cmap='RdBu', vmin=-1, vmax=1)
axis.set_xticklabels(axis.get_xticklabels(), rotation=xrot)
axis.set_yticklabels(axis.get_yticklabels(), rotation=yrot) | 5,323,810 |
def uninstall():
"""Remove the import and regex compile timing hooks."""
__builtins__['__import__'] = _real_import
re._compile = _real_compile | 5,323,811 |
def __transform_template_to_graph(j):
"""
Transforms the simple format to a graph.
:param j:
:return:
"""
g = nx.DiGraph()
for a in j["nodes"]:
g.add_node(a[0], label = a[1], id = str(uuid.uuid4()))
for e in j["edges"]:
g.add_edge(e[0], e[1], label = e[2])
return g | 5,323,812 |
def datetime_to_string(dt):
"""
Convert a datetime object to the preferred format for the shopify api. (2016-01-01T11:00:00-5:00)
:param dt: Datetime object to convert to timestamp.
:return: Timestamp string for the datetime object.
"""
if not dt:
return None
if not isinstance(dt, datetime.datetime):
raise ValueError('Must supply an instance of `datetime`.')
# Calculate the utc offset of the current timezone
# 1 is added to the total seconds to account for the time which it takes the operation to calculate
# utcnow and local now.
offset = int(divmod((datetime.datetime.utcnow() - datetime.datetime.now()).total_seconds() + 1, 60)[0] / 60)
offset_str = '-%d:00' % offset
dt_str = dt.strftime('%Y-%m-%dT%H:%M:%S')
return dt_str + offset_str | 5,323,813 |
def names(package: str) -> List[str]:
"""List all plug-ins in one package"""
_import_all(package)
return sorted(_PLUGINS[package].keys(), key=lambda p: info(package, p).sort_value) | 5,323,814 |
def _get_inner_text(html_node):
"""Returns the plaintext of an HTML node.
This turns out to do exactly what we want:
- strips out <br>s and other markup
- replace <a> tags with just their text
- converts HTML entities like and smart quotes into their
unicode equivalents
"""
return lxml.html.tostring(html_node, encoding='utf-8',
method='text', with_tail=False).decode('utf-8') | 5,323,815 |
def test_hbonds():
"""H-Bonds test"""
hbonds_count = [hbonds(rec, mol)[2].sum() for mol in mols]
assert_array_equal(hbonds_count,
[6, 7, 5, 5, 6, 5, 6, 4, 6, 5, 4, 6, 6, 5, 8, 5, 6, 6,
6, 7, 6, 6, 5, 6, 7, 5, 5, 7, 6, 6, 7, 6, 6, 6, 6, 6,
6, 5, 5, 6, 4, 5, 5, 6, 6, 3, 5, 5, 4, 6, 4, 8, 6, 6,
6, 4, 6, 6, 6, 6, 7, 6, 7, 6, 6, 7, 6, 6, 6, 5, 4, 5,
5, 6, 6, 6, 6, 6, 6, 4, 7, 5, 6, 6, 5, 6, 6, 5, 6, 5,
6, 5, 5, 7, 7, 6, 8, 6, 4, 5]) | 5,323,816 |
def get_all_commands() -> Iterable[Type[Cog]]:
"""
List all applications.
"""
loader = pkgutil.get_loader('figtag.apps')
filename = cast(Any, loader).get_filename()
pkg_dir = os.path.dirname(filename)
for (module_loader, name, ispkg) in pkgutil.iter_modules([pkg_dir]):
importlib.import_module('.apps.' + name, __package__)
return Cog.__subclasses__() | 5,323,817 |
def zern_normalisation(nmodes=30):
"""
Calculate normalisation vector.
This function calculates a **nmodes** element vector with normalisation constants for Zernike modes that have not already been normalised.
@param [in] nmodes Size of normalisation vector.
@see <http://research.opt.indiana.edu/Library/VSIA/VSIA-2000_taskforce/TOPS4_2.html> and <http://research.opt.indiana.edu/Library/HVO/Handbook.html>.
"""
nolls = (noll_to_zern(j+1) for j in xrange(nmodes))
norms = [(2*(n+1)/(1+(m==0)))**0.5 for n, m in nolls]
return np.asanyarray(norms) | 5,323,818 |
def sample_without_replacement(n, N, dtype=np.int64):
"""Returns uniform samples in [0, N-1] without replacement. It will use
Knuth sampling or rejection sampling depending on the parameters n and N.
.. note::
the values 0.6 and 100 are based on empirical tests of the
functions and would need to be changed if the functions are
changed
"""
if N > 100 and n / float(N) < 0.6:
sample = rejection_sampling(n, N, dtype)
else:
sample = Knuth_sampling(n, N, dtype)
return sample | 5,323,819 |
def bustypes(bus, gen):
"""Builds index lists of each type of bus (C{REF}, C{PV}, C{PQ}).
Generators with "out-of-service" status are treated as L{PQ} buses with
zero generation (regardless of C{Pg}/C{Qg} values in gen). Expects C{bus}
and C{gen} have been converted to use internal consecutive bus numbering.
@param bus: bus data
@param gen: generator data
@return: index lists of each bus type
@author: Ray Zimmerman (PSERC Cornell)
@author: Richard Lincoln
changes by Uni Kassel (Florian Schaefer): If new ref bus is chosen -> Init as numpy array
"""
# get generator status
# nb = bus.shape[0]
# ng = gen.shape[0]
# gen connection matrix, element i, j is 1 if, generator j at bus i is ON
#Cg = sparse((gen[:, GEN_STATUS] > 0,
# (gen[:, GEN_BUS], range(ng))), (nb, ng))
# number of generators at each bus that are ON
#bus_gen_status = (Cg * ones(ng, int)).astype(bool)
# form index lists for slack, PV, and PQ buses
ref = find((bus[:, BUS_TYPE] == REF)) # ref bus index
pv = find((bus[:, BUS_TYPE] == PV)) # PV bus indices
pq = find((bus[:, BUS_TYPE] == PQ)) # PQ bus indices
return ref, pv, pq | 5,323,820 |
def update_and_return_dict(
dict_to_update: dict, update_values: Union[Mapping, Iterable[Tuple[Any, Any]]]
) -> dict:
"""Update a dictionary and return the ref to the dictionary that was updated.
Args:
dict_to_update (dict): the dict to update
update_values (Union[Mapping, Iterable[Tuple[Any, Any]]]): the values to update the dict
with
Returns:
dict: the dict that was just updated.
"""
dict_to_update.update(update_values)
return dict_to_update | 5,323,821 |
def get_max_value_key(dic):
"""Gets the key for the maximum value in a dict."""
v = np.array(list(dic.values()))
k = np.array(list(dic.keys()))
maxima = np.where(v == np.max(v))[0]
if len(maxima) == 1:
return k[maxima[0]]
# In order to be consistent, always selects the minimum key
# (guaranteed to be unique) when there are multiple maximum values.
return k[maxima[np.argmin(k[maxima])]] | 5,323,822 |
def load(*path: str):
"""
Load multiple instances of fruit configurations into the current running instance of fruit.
Parameters
----------
*path: str
List of paths to load
"""
# TODO: Implement loading of multiple fruit configs
# TODO: Add load local option
for each_path in path:
configpath = obtain_config(each_path)
compile_config(configpath) | 5,323,823 |
def test_datetime():
"""Test serialization of datetime.
Procedure:
- Create a new serializer.
- Create a datetime object in proto for following cases.
* Without timezone.
* With timezone.
- Set values for hours, minutes and seconds.
- Set values for year, month and day.
- Make the expected output string for both cases in ISO8601 format.
- Call the serialize_proto function of serializer along with schema
and datetime object for both cases.
Verification:
- Check if the returned string is equal to expected string and is in
ISO8601 format.
- Check if the timezone is added correctly.
"""
j = serializer.JSONLDSerializer()
dtt = schema.DateTime()
dt = dtt.date
dt.year = 2000
dt.month = 2
dt.day = 9
tm = dtt.time
tm.hours = 6
tm.minutes = 30
tm.seconds = 15
expected = '2000-02-09T06:30:15'
output = j.serialize_proto(dtt, schema)
assert output == expected, 'DateTime(without timezone) serialization failed.'
tm.timezone = '+05:30'
expected = '2000-02-09T06:30:15+05:30'
output = j.serialize_proto(dtt, schema)
assert output == expected, 'DateTime(with timezone) serialization failed.' | 5,323,824 |
def main(event, context):
"""一个对时间序列进行线性插值的函数, 并且计算线性意义上的可信度。
"""
timeAxis = event["timeAxis"]
valueAxis = event["valueAxis"]
timeAxisNew = event["timeAxisNew"]
reliable_distance = event["reliable_distance"]
timeAxis = [totimestamp(parser.parse(i)) for i in timeAxis]
timeAxisNew = [totimestamp(parser.parse(i)) for i in timeAxisNew]
valueAxisNew = linear_interpolate(timeAxis, valueAxis, timeAxisNew)
reliabAxis = exam_reliability(timeAxis, timeAxisNew, reliable_distance)
result = {
"valueAxisNew": valueAxisNew.tolist(),
"reliabAxis": reliabAxis,
}
return result | 5,323,825 |
def Addon_Info(id='',addon_id=''):
"""
Retrieve details about an add-on, lots of built-in values are available
such as path, version, name etc.
CODE: Addon_Setting(id, [addon_id])
AVAILABLE PARAMS:
(*) id - This is the name of the id you want to retrieve.
The list of built in id's you can use (current as of 15th April 2017)
are: author, changelog, description, disclaimer, fanart, icon, id, name,
path, profile, stars, summary, type, version
addon_id - By default this will use your current add-on id but you
can access any add-on you want by entering an id in here.
EXAMPLE CODE:
dialog.ok('ADD-ON INFO','We will now try and pull name and version details for our current running add-on.')
version = koding.Addon_Info(id='version')
name = koding.Addon_Info(id='name')
dialog.ok('NAME AND VERSION','[COLOR=dodgerblue]Add-on Name:[/COLOR] %s' % name,'[COLOR=dodgerblue]Version:[/COLOR] %s' % version)
~"""
import xbmcaddon
if addon_id == '':
addon_id = Caller()
ADDON = xbmcaddon.Addon(id=addon_id)
if id == '':
dialog.ok('ENTER A VALID ID','You\'ve called the Addon_Info function but forgot to add an ID. Please correct your code and enter a valid id to pull info on (e.g. "version")')
else:
return ADDON.getAddonInfo(id=id) | 5,323,826 |
def _hash_string_to_color(string):
"""
Hash a string to color (using hashlib and not the built-in hash for consistency
between runs)
"""
return COLOR_ARRAY[
int(hashlib.sha1(string.encode("utf-8")).hexdigest(), 16) % len(COLOR_ARRAY)
] | 5,323,827 |
def get_build(id):
"""Show metadata for a single build.
**Example request**
.. code-block:: http
GET /builds/1 HTTP/1.1
**Example response**
.. code-block:: http
HTTP/1.0 200 OK
Content-Length: 367
Content-Type: application/json
Date: Tue, 01 Mar 2016 17:21:28 GMT
Server: Werkzeug/0.11.3 Python/3.5.0
{
"bucket_name": "an-s3-bucket",
"bucket_root_dir": "lsst_apps/builds/b1",
"date_created": "2016-03-01T10:21:27.583795Z",
"date_ended": null,
"git_refs": [
"master"
],
"github_requester": "jonathansick",
"product_url": "http://localhost:5000/products/lsst_apps",
"self_url": "http://localhost:5000/builds/1",
"slug": "b1",
"surrogate_key": "d290d35e579141e889e954a0b1f8b611",
"uploaded": true
}
:param id: ID of the Build.
:>json string bucket_name: Name of the S3 bucket hosting the built
documentation.
:>json string bucket_root_dir: Directory (path prefix) in the S3 bucket
where this documentation build is located.
:>json string date_created: UTC date time when the build was created.
:>json string date_ended: UTC date time when the build was deprecated;
will be ``null`` for builds that are *not deprecated*.
:>json array git_refs: Git ref array that describes the version of the
documentation being built. Typically this array will be a single
string, e.g. ``['master']`` but may be a list of several refs for
multi-package builds with ltd-mason.
:>json string github_requester: GitHub username handle of person
who triggered the build (null is not available).
:>json string slug: slug of build; URL-safe slug.
:>json string product_url: URL of parent product entity.
:>json string published_url: Full URL where this build is published to
the reader.
:>json string self_url: URL of this build entity.
:>json string surrogate_key: The surrogate key attached to the headers
of all files on S3 belonging to this build. This allows LTD Keeper
to notify Fastly when an Edition is being re-pointed to a new build.
The client is responsible for uploading files with this value as
the ``x-amz-meta-surrogate-key`` value.
:>json bool uploaded: True if the built documentation has been uploaded
to the S3 bucket. Use :http:patch:`/builds/(int:id)` to
set this to `True`.
:statuscode 200: No error.
:statuscode 404: Build not found.
"""
return jsonify(Build.query.get_or_404(id).export_data()) | 5,323,828 |
def get_unity_snapshotschedule_parameters():
"""This method provide parameters required for the ansible snapshot
schedule module on Unity"""
return dict(
name=dict(type='str'),
id=dict(type='str'),
type=dict(type='str', choices=['every_n_hours', 'every_day',
'every_n_days', 'every_week',
'every_month']),
interval=dict(type='int'),
hours_of_day=dict(type='list', elements='int'),
day_interval=dict(type='int'),
days_of_week=dict(type='list', elements='str',
choices=['SUNDAY', 'MONDAY', 'TUESDAY', 'WEDNESDAY',
'THURSDAY', 'FRIDAY', 'SATURDAY']),
day_of_month=dict(type='int'),
hour=dict(type='int'),
minute=dict(type='int'),
desired_retention=dict(type='int'),
retention_unit=dict(type='str', choices=['hours', 'days'],
default='hours'),
auto_delete=dict(type='bool'),
state=dict(required=True, type='str', choices=['present', 'absent'])
) | 5,323,829 |
def create_bootloader_win(interpreter_zip, executable, argv):
"""
Prepares executable for execution on target machine. Appends client code to `interpreter_zip` archive. Embeds new
archive into `executable`.
:param interpreter_zip: Zip file containing python runtime, stdlib and essential dependencies.
:param executable: Loader executable.
:param argv: list of arguments passed to client.
:return: binary string containing payload ready for execution.
"""
with open(interpreter_zip, 'rb') as fp:
zip_data = io.BytesIO(fp.read())
with zipfile.ZipFile(zip_data, 'a', zipfile.ZIP_DEFLATED, False) as fp:
fp.writestr('argv.txt', '\n'.join(argv))
base_path = settings.SOURCE_DIR / 'common'
for archive_path in enumerate_files(base_path, '.py'):
file_path = base_path / archive_path
archive_path = 'common/' + archive_path
code = compile_file(file_path, archive_path)
fp.writestr(archive_path + 'c', code)
base_path = settings.SOURCE_DIR / 'client'
for archive_path in enumerate_files(base_path, '.py'):
file_path = base_path / archive_path
if archive_path == 'main.py':
archive_path = '__main__.py'
else:
archive_path = 'client/' + archive_path
code = compile_file(file_path, archive_path)
fp.writestr(archive_path + 'c', code)
zip_data.seek(0, os.SEEK_SET)
zip_data = zip_data.read()
pe = pefile.PE(executable)
pe_add_section(pe, zip_data, '.py')
return pe.write() | 5,323,830 |
def gaussian_kernel(X, kernel_type="gaussian", sigma=3.0, k=5):
"""gaussian_kernel: Build an adjacency matrix for data using a Gaussian kernel
Args:
X (N x d np.ndarray): Input data
kernel_type: "gaussian" or "adaptive". Controls bandwidth
sigma (float): Scalar kernel bandwidth
k (integer): nearest neighbor kernel bandwidth
Returns:
W (N x N np.ndarray): Weight/adjacency matrix induced from X
"""
_g = "gaussian"
_a = "adaptive"
kernel_type = kernel_type.lower()
D = squareform(pdist(X))
if kernel_type == "gaussian": # gaussian bandwidth checking
print("fixed bandwidth specified")
if not all([type(sigma) is float, sigma > 0]): # [float, positive]
print("invalid gaussian bandwidth, using sigma = max(min(D)) as bandwidth")
D_find = D + np.eye(np.size(D, 1)) * 1e15
sigma = np.max(np.min(D_find, 1))
del D_find
sigma = np.ones(np.size(D, 1)) * sigma
elif kernel_type == "adaptive": # adaptive bandwidth
print("adaptive bandwidth specified")
# [integer, positive, less than the total samples]
if not all([type(k) is int, k > 0, k < np.size(D, 1)]):
print("invalid adaptive bandwidth, using k=5 as bandwidth")
k = 5
knnDST = np.sort(D, axis=1) # sorted neighbor distances
sigma = knnDST[:, k] # k-nn neighbor. 0 is self.
del knnDST
else:
raise ValueError
W = ((D**2) / sigma[:, np.newaxis]**2).T
W = np.exp(-1 * (W))
W = (W + W.T) / 2 # symmetrize
W = W - np.eye(W.shape[0]) # remove the diagonal
return W | 5,323,831 |
def get_all_camera_shapes(full_path=True):
"""
Returns all cameras shapes available in the current scene
:param full_path: bool, Whether tor return full path to camera nodes or short ones
:return: list(str)
"""
return maya.cmds.ls(type='camera', long=full_path) or list() | 5,323,832 |
def CreateVGGishNetwork(hop_size=0.96): # Hop size is in seconds.
"""Define VGGish model, load the checkpoint, and return a dictionary that points
to the different tensors defined by the model.
"""
vggish_slim.define_vggish_slim()
checkpoint_path = 'vggish_model.ckpt'
vggish_params.EXAMPLE_HOP_SECONDS = hop_size
vggish_slim.load_vggish_slim_checkpoint(sess, checkpoint_path)
features_tensor = sess.graph.get_tensor_by_name(
vggish_params.INPUT_TENSOR_NAME)
embedding_tensor = sess.graph.get_tensor_by_name(
vggish_params.OUTPUT_TENSOR_NAME)
layers = {'conv1': 'vggish/conv1/Relu',
'pool1': 'vggish/pool1/MaxPool',
'conv2': 'vggish/conv2/Relu',
'pool2': 'vggish/pool2/MaxPool',
'conv3': 'vggish/conv3/conv3_2/Relu',
'pool3': 'vggish/pool3/MaxPool',
'conv4': 'vggish/conv4/conv4_2/Relu',
'pool4': 'vggish/pool4/MaxPool',
'fc1': 'vggish/fc1/fc1_2/Relu',
'fc2': 'vggish/fc2/Relu',
'embedding': 'vggish/embedding',
'features': 'vggish/input_features',
}
g = tf.get_default_graph()
for k in layers:
layers[k] = g.get_tensor_by_name( layers[k] + ':0')
return {'features': features_tensor,
'embedding': embedding_tensor,
'layers': layers,
} | 5,323,833 |
def create_self_signed_cert():
""" Generates self signed SSL certificate. """
# Creates key pair.
k = crypto.PKey()
k.generate_key(crypto.TYPE_RSA, 1024)
# Creates self-signed certificate.
cert = crypto.X509()
cert.get_subject().C = "US"
cert.get_subject().ST = "New York"
cert.get_subject().L = "New York"
cert.get_subject().O = "."
cert.get_subject().OU = "."
cert.get_subject().CN = gethostname()
cert.set_serial_number(1000)
cert.gmtime_adj_notBefore(0)
cert.gmtime_adj_notAfter(10 * 365 * 24 * 60 * 60)
cert.set_issuer(cert.get_subject())
cert.set_pubkey(k)
cert.sign(k, "sha1")
if not os.path.exists("ssl"):
os.makedirs("ssl")
with open("ssl/server.crt", "wb") as f:
f.write(crypto.dump_certificate(crypto.FILETYPE_PEM, cert))
with open("ssl/server.key", "wb") as f:
f.write(crypto.dump_privatekey(crypto.FILETYPE_PEM, k)) | 5,323,834 |
def _inserir_dados() -> None:
"""Estrutura de Formulário para inserir novos dados de turno para o Banco de Dados.
"""
# Header
st.header("Inserir Dados")
col_shift, col_empty = st.columns([1,5])
with col_shift:
st.selectbox(label="Turno: ", options=["Selecione", "A", "B", "C"], key="sft")
with col_empty:
st.empty()
# FORMS
with st.form(key='form_in', clear_on_submit=False):
col1, col2 = st.columns(2)
with col1:
st.subheader("Linha 571")
st.text_area("Lavadora", placeholder="Lavadora da Linha 571", key="w1")
st.text_area("SOS", placeholder="SOS da Linha 571", key="s1")
st.text_area("UVBC", placeholder="UVBC da Linha 571", key="u1")
with col2:
st.subheader("Linha 572")
st.text_area("Lavadora", placeholder="Lavadora da Linha 572", key="w2")
st.text_area("SOS", placeholder="SOS da Linha 572", key="s2")
st.text_area("UVBC", placeholder="UVBC da Linha 572", key="u2")
st.subheader("Geral")
st.text_area("Pendências", placeholder="Pendências", key="pends")
st.text_area("Observações", placeholder="Observações", key="obs")
st.form_submit_button(label="Enviar", on_click=_submit_callback) | 5,323,835 |
def ping_observing_task(ext_io_connection, ping_ip):
"""
Here external-IO connection is abstract - we don't know its type.
What we know is just that it has .moler_connection attribute.
"""
logger = logging.getLogger('moler.user.app-code')
conn_addr = str(ext_io_connection)
# Layer 2 of Moler's usage (ext_io_connection + runner):
# 3. create observers on Moler's connection
net_down_detector = NetworkDownDetector(ping_ip,
connection=ext_io_connection.moler_connection,
runner=get_runner(variant="asyncio-in-thread"))
net_up_detector = NetworkUpDetector(ping_ip,
connection=ext_io_connection.moler_connection,
runner=get_runner(variant="asyncio-in-thread"))
info = '{} on {} using {}'.format(ping_ip, conn_addr, net_down_detector)
logger.debug('observe ' + info)
# 4. start observer (nonblocking, using as future)
net_down_detector.start() # should be started before we open connection
# to not loose first data on connection
with ext_io_connection:
# 5. await that observer to complete
try:
net_down_time = net_down_detector.await_done(timeout=10) # =2 --> TimeoutError
timestamp = time.strftime("%H:%M:%S", time.localtime(net_down_time))
logger.debug('Network {} is down from {}'.format(ping_ip, timestamp))
except ConnectionObserverTimeout:
logger.debug('Network down detector timed out')
# 6. call next observer (blocking till completes)
info = '{} on {} using {}'.format(ping_ip, conn_addr, net_up_detector)
logger.debug('observe ' + info)
# using as synchronous function (so we want verb to express action)
detect_network_up = net_up_detector
net_up_time = detect_network_up() # if you want timeout - see code above
timestamp = time.strftime("%H:%M:%S", time.localtime(net_up_time))
logger.debug('Network {} is back "up" from {}'.format(ping_ip, timestamp))
logger.debug('exiting ping_observing_task({})'.format(ping_ip)) | 5,323,836 |
def YumInstall(vm):
"""Installs the php package on the VM."""
_Install(vm) | 5,323,837 |
def create_directory_structure():
"""Generates the output mod directory structure
Raises:
If fails to create directory
"""
def ensure_directory(path):
try:
os.makedirs(path)
except OSError as e:
if e.errno != errno.EEXIST:
raise
ensure_directory('./out/textures')
ensure_directory('./out/data') | 5,323,838 |
def polyMergeFacetCtx(q=1,e=1,anq=1,ex=1,i1="string",i2="string",i3="string",im=1,n="string",pv=1,rs=1,tnq=1,cch=1,ch=1,ff="int",mm="int",nds="int",sf="int"):
"""
http://help.autodesk.com/cloudhelp/2019/ENU/Maya-Tech-Docs/CommandsPython/polyMergeFacetCtx.html
-----------------------------------------
polyMergeFacetCtx is undoable, queryable, and editable.
The second face becomes a hole in the first face.
The new holed face is located either on the first, last, or between both
selected faces, depending on the mode.
Both faces must belong to the same object.
Facet flags are mandatory.
Create a new context to merge facets on polygonal objects
-----------------------------------------
Return Value:
string The node name.
In query mode, return type is based on queried flag.
-----------------------------------------
Flags:
-----------------------------------------
anq : activeNodes [boolean] ['query']
Return the active nodes in the tool
-----------------------------------------
ex : exists [boolean] []
Returns true or false depending upon whether the specified object exists. Other flags are ignored.
-----------------------------------------
i1 : image1 [string] ['query', 'edit']
First of three possible icons representing the tool associated with the context.
-----------------------------------------
i2 : image2 [string] ['query', 'edit']
Second of three possible icons representing the tool associated with the context.
-----------------------------------------
i3 : image3 [string] ['query', 'edit']
Third of three possible icons representing the tool associated with the context.
-----------------------------------------
im : immediate [boolean] ['edit']
Acts on the object not the tool defaults
-----------------------------------------
n : name [string] []
If this is a tool command, name the tool appropriately.
-----------------------------------------
pv : previous [boolean] ['edit']
Reset to previously stored values
-----------------------------------------
rs : reset [boolean] ['edit']
Reset to default values
-----------------------------------------
tnq : toolNode [boolean] ['query']
Return the node used for tool defaults
-----------------------------------------
cch : caching [boolean] ['query', 'edit']
Toggle caching for all attributes so that no recomputation is needed
-----------------------------------------
ch : constructionHistory [boolean] ['query']
Turn the construction history on or off (where applicable). If construction history is on then the corresponding node will be inserted into the history chain for the mesh. If construction history is off then the operation will be performed directly on the object. Note: If the object already has construction history then this flag is ignored and the node will always be inserted into the history chain.
-----------------------------------------
ff : firstFacet [int] ['query', 'edit']
The number of the first (outer) face to merge.
-----------------------------------------
mm : mergeMode [int] ['query', 'edit']
This flag specifies how faces are merged: 0: moves second face to first one 1: moves both faces to average 2: moves first face to second one 3, 4, 5: same as above, except faces are projected but not centred 6: Nothing moves. C: Default is None (6).
-----------------------------------------
nds : nodeState [int] ['query', 'edit']
Maya dependency nodes have 6 possible states. The Normal (0), HasNoEffect (1), and Blocking (2) states can be used to alter how the graph is evaluated. The Waiting-Normal (3), Waiting-HasNoEffect (4), Waiting-Blocking (5) are for internal use only. They temporarily shut off parts of the graph during interaction (e.g., manipulation). The understanding is that once the operation is done, the state will be reset appropriately, e.g. Waiting-Blocking will reset back to Blocking. The Normal and Blocking cases apply to all nodes, while HasNoEffect is node specific; many nodes do not support this option. Plug-ins store state in the MPxNode::state attribute. Anyone can set it or check this attribute. Additional details about each of these 3 states follow. | State | Description
-----------------------------------------
sf : secondFacet [int]
The number of the second (hole) face to merge.
""" | 5,323,839 |
def display_missing_info_OWNERS_files(stats, num_output_depth):
"""Display OWNERS files that have missing team and component by depth.
OWNERS files that have no team and no component information will be shown
for each depth level (up to the level given by num_output_depth).
Args:
stats (dict): The statistics in dictionary form as produced by the
owners_file_tags module.
num_output_depth (int): number of levels to be displayed.
"""
print("OWNERS files that have missing team and component by depth:")
max_output_depth = len(stats['OWNERS-count-by-depth'])
if (num_output_depth < 0
or num_output_depth > max_output_depth):
num_output_depth = max_output_depth
for depth in range(0, num_output_depth):
print('at depth %(depth)d' % {'depth': depth})
print(stats['OWNERS-missing-info-by-depth'][depth]) | 5,323,840 |
def test_invalid_token(request, login, navigator, access_token):
"""
Send request via API docs `Authentication Providers Admin Portal List` endpoint
should return status code 200 for valid access token and 403 for invalid one.
"""
api_docs = navigator.navigate(APIDocsView)
token = request.getfixturevalue(access_token)[0]
code = request.getfixturevalue(access_token)[1]
status_code = api_docs\
.endpoint("Authentication Providers Admin Portal List")\
.send_request(rawobj.ApiDocParams(token))
assert status_code == code | 5,323,841 |
def estimate_mpk_parms_1d(
pk_pos_0, x, f,
pktype='pvoigt', bgtype='linear',
fwhm_guess=0.07, center_bnd=0.02
):
"""
Generate function-specific estimate for multi-peak parameters.
Parameters
----------
pk_pos_0 : TYPE
DESCRIPTION.
x : TYPE
DESCRIPTION.
f : TYPE
DESCRIPTION.
pktype : TYPE, optional
DESCRIPTION. The default is 'pvoigt'.
bgtype : TYPE, optional
DESCRIPTION. The default is 'linear'.
fwhm_guess : TYPE, optional
DESCRIPTION. The default is 0.07.
center_bnd : TYPE, optional
DESCRIPTION. The default is 0.02.
Returns
-------
p0 : TYPE
DESCRIPTION.
bnds : TYPE
DESCRIPTION.
"""
npts = len(x)
assert len(f) == npts, "ordinate and data must be same length!"
num_pks = len(pk_pos_0)
min_val = np.min(f)
# estimate background with SNIP1d
bkg = snip1d(np.atleast_2d(f),
w=int(np.floor(0.25*len(f)))).flatten()
# fit linear bg and grab params
bp, _ = optimize.curve_fit(lin_fit_obj, x, bkg, jac=lin_fit_jac)
bg0 = bp[-1]
bg1 = bp[0]
if pktype == 'gaussian' or pktype == 'lorentzian':
p0tmp = np.zeros([num_pks, 3])
p0tmp_lb = np.zeros([num_pks, 3])
p0tmp_ub = np.zeros([num_pks, 3])
# x is just 2theta values
# make guess for the initital parameters
for ii in np.arange(num_pks):
pt = np.argmin(np.abs(x - pk_pos_0[ii]))
p0tmp[ii, :] = [
(f[pt] - min_val),
pk_pos_0[ii],
fwhm_guess
]
p0tmp_lb[ii, :] = [
(f[pt] - min_val)*0.1,
pk_pos_0[ii] - center_bnd,
fwhm_guess*0.5
]
p0tmp_ub[ii, :] = [
(f[pt] - min_val)*10.0,
pk_pos_0[ii] + center_bnd,
fwhm_guess*2.0
]
elif pktype == 'pvoigt':
p0tmp = np.zeros([num_pks, 4])
p0tmp_lb = np.zeros([num_pks, 4])
p0tmp_ub = np.zeros([num_pks, 4])
# x is just 2theta values
# make guess for the initital parameters
for ii in np.arange(num_pks):
pt = np.argmin(np.abs(x - pk_pos_0[ii]))
p0tmp[ii, :] = [
(f[pt] - min_val),
pk_pos_0[ii],
fwhm_guess,
0.5
]
p0tmp_lb[ii, :] = [
(f[pt] - min_val)*0.1,
pk_pos_0[ii] - center_bnd,
fwhm_guess*0.5,
0.0
]
p0tmp_ub[ii, :] = [
(f[pt] - min_val+1.)*10.0,
pk_pos_0[ii] + center_bnd,
fwhm_guess*2.0,
1.0
]
elif pktype == 'split_pvoigt':
p0tmp = np.zeros([num_pks, 6])
p0tmp_lb = np.zeros([num_pks, 6])
p0tmp_ub = np.zeros([num_pks, 6])
# x is just 2theta values
# make guess for the initital parameters
for ii in np.arange(num_pks):
pt = np.argmin(np.abs(x - pk_pos_0[ii]))
p0tmp[ii, :] = [
(f[pt] - min_val),
pk_pos_0[ii],
fwhm_guess,
fwhm_guess,
0.5,
0.5
]
p0tmp_lb[ii, :] = [
(f[pt] - min_val)*0.1,
pk_pos_0[ii] - center_bnd,
fwhm_guess*0.5,
fwhm_guess*0.5,
0.0,
0.0
]
p0tmp_ub[ii, :] = [
(f[pt] - min_val)*10.0,
pk_pos_0[ii] + center_bnd,
fwhm_guess*2.0,
fwhm_guess*2.0,
1.0,
1.0
]
if bgtype == 'linear':
num_pk_parms = len(p0tmp.ravel())
p0 = np.zeros(num_pk_parms+2)
lb = np.zeros(num_pk_parms+2)
ub = np.zeros(num_pk_parms+2)
p0[:num_pk_parms] = p0tmp.ravel()
lb[:num_pk_parms] = p0tmp_lb.ravel()
ub[:num_pk_parms] = p0tmp_ub.ravel()
p0[-2] = bg0
p0[-1] = bg1
lb[-2] = minf
lb[-1] = minf
ub[-2] = inf
ub[-1] = inf
elif bgtype == 'constant':
num_pk_parms = len(p0tmp.ravel())
p0 = np.zeros(num_pk_parms+1)
lb = np.zeros(num_pk_parms+1)
ub = np.zeros(num_pk_parms+1)
p0[:num_pk_parms] = p0tmp.ravel()
lb[:num_pk_parms] = p0tmp_lb.ravel()
ub[:num_pk_parms] = p0tmp_ub.ravel()
p0[-1] = np.average(bkg)
lb[-1] = minf
ub[-1] = inf
elif bgtype == 'quadratic':
num_pk_parms = len(p0tmp.ravel())
p0 = np.zeros(num_pk_parms+3)
lb = np.zeros(num_pk_parms+3)
ub = np.zeros(num_pk_parms+3)
p0[:num_pk_parms] = p0tmp.ravel()
lb[:num_pk_parms] = p0tmp_lb.ravel()
ub[:num_pk_parms] = p0tmp_ub.ravel()
p0[-3] = bg0
p0[-2] = bg1
lb[-3] = minf
lb[-2] = minf
lb[-1] = minf
ub[-3] = inf
ub[-2] = inf
ub[-1] = inf
return p0, (lb, ub) | 5,323,842 |
def raise_(exc):
""" Raise provided exception.
Just a helper for raising exceptions from lambdas. """
raise exc | 5,323,843 |
async def _parse_collection_from_search(
request: Request,
) -> Tuple[Optional[str], Optional[str]]:
"""
Parse the collection id from a search request.
The search endpoint is a bit of a special case. If it's a GET, the collection
and item ids are in the querystring. If it's a POST, the collection and item may
be in either a CQL-JSON or CQL2-JSON filter body, or a query/stac-ql body.
"""
if request.method.lower() == "get":
collection_id = request.query_params.get("collections")
item_id = request.query_params.get("ids")
return (collection_id, item_id)
elif request.method.lower() == "post":
try:
body = await request.json()
if "collections" in body:
return _parse_queryjson(body)
elif "filter" in body:
return _parse_cqljson(body["filter"])
except json.JSONDecodeError:
logger.warning(
"Unable to parse search body as JSON. Ignoring collection parameter."
)
return (None, None) | 5,323,844 |
def make_cnf_clauses_by_group(N_, board_group, varboard_group):
"""
:param board_group: e.g. a row of sudoku board, of shape (M...)
:param varboard_group: e.g. a row of sudoku variable id,
of shape (M..., N_)
"""
cclauses_local = []
board_group = board_group.reshape(-1)
varboard_group = varboard_group.reshape((board_group.shape[0], -1))
oh = inv_oh(N_, board_group[board_group > 0])
vidx = varboard_group[np.where(board_group == 0)[0]]
poh = (oh > 0)
ohpvidx = vidx[:, np.where(poh)[0]].T
ohnvidx = vidx[:, np.where(~poh)[0]].reshape(-1)
cclauses_local.extend(itertools.chain.from_iterable(
(cnf.load_precomputed_xorcnf(x.tolist()) for x in ohpvidx)))
cclauses_local.extend((-ohnvidx[:, np.newaxis]).tolist())
return cclauses_local | 5,323,845 |
def cacheFeatures():
"""
Utility function to get training data and parse into desired format writing it to file.
Writes to file:
Raw data converted to feature vector and its corresponding labelling yTr
"""
f = open(TRAINING_LOCATION)
a = f.readlines()
data = []
feature_vector = []
yTr = []
cachedFeatures = open(CACHED_LOCATION, "r+")
cachedFeatures.write("cache ready!\n")
for l in a:
p = 0
tmp = l.strip()
tmp = eval(tmp)
s = SHAPE[tmp[-1]]
# if tmp[-1] in ("loops"):
# print("Lib: " + tmp[-1])
# print("tiraste os loops")
# continue
points = np.array(tmp[:-1])
features = extract_features(points)
splitter = ","
if features:
for i in range(0, len(features)):
if i == len(features) - 1:
splitter = ""
cachedFeatures.write("{:.9f}".format(features[i]) + splitter)
cachedFeatures.write(";" + str(s) + "\n") | 5,323,846 |
def get_operations(
archive_action: str, archive_type: str, compression_type: str
) -> Tuple[Operation]:
"""
A function to fetch relevant operations based on type of archive
and compression if any.
"""
operations = {
"archive_ops": {
"zip": {
"extract": extract_zip_archive,
"archive": make_zip_archive,
},
"tar": {
"extract": extract_tar_archive,
"archive": make_tar_archive,
},
},
"compression_ops": {
"gzip": {"compress": gz_compress, "decompress": gz_decompress},
"xz": {"compress": xz_compress, "decompress": xz_decompress},
"bzip2": {"compress": bz2_compress, "decompress": bz2_decompress,},
},
}
archive_op = operations["archive_ops"][archive_type][archive_action]
compression_op = None
if compression_type is not None:
compression_action = (
"compress" if archive_action == "archive" else "decompress"
)
compression_op = operations["compression_ops"][compression_type][
compression_action
]
return archive_op, compression_op | 5,323,847 |
def import_spyview_dat(data_dir, filename):
"""
Returns a np.array in the same shape as the raw .dat file
"""
with open(os.path.join(data_dir, filename)) as f:
dat = np.loadtxt(f)
return dat | 5,323,848 |
def Norm(norm, *args, **kwargs):
"""
Return an arbitrary `~matplotlib.colors.Normalize` instance. Used to
interpret the `norm` and `norm_kw` arguments when passed to any plotting
method wrapped by `~proplot.axes.cmap_changer`. See
`this tutorial \
<https://matplotlib.org/tutorials/colors/colormapnorms.html>`__
for more info.
Parameters
----------
norm : str or `~matplotlib.colors.Normalize`
The normalizer specification. If a `~matplotlib.colors.Normalize`
instance already, the input argument is simply returned. Otherwise,
`norm` should be a string corresponding to one of the "registered"
colormap normalizers (see below table).
If `norm` is a list or tuple and the first element is a "registered"
normalizer name, subsequent elements are passed to the normalizer class
as positional arguments.
.. _norm_table:
========================== =====================================
Key(s) Class
========================== =====================================
``'null'``, ``'none'`` `~matplotlib.colors.NoNorm`
``'diverging'``, ``'div'`` `~proplot.colors.DivergingNorm`
``'segmented'`` `~proplot.colors.LinearSegmentedNorm`
``'linear'`` `~matplotlib.colors.Normalize`
``'log'`` `~matplotlib.colors.LogNorm`
``'power'`` `~matplotlib.colors.PowerNorm`
``'symlog'`` `~matplotlib.colors.SymLogNorm`
========================== =====================================
Other parameters
----------------
*args, **kwargs
Passed to the `~matplotlib.colors.Normalize` initializer.
Returns
-------
`~matplotlib.colors.Normalize`
A `~matplotlib.colors.Normalize` instance.
"""
if isinstance(norm, mcolors.Normalize):
return norm
# Pull out extra args
if np.iterable(norm) and not isinstance(norm, str):
norm, args = norm[0], (*norm[1:], *args)
if not isinstance(norm, str):
raise ValueError(f'Invalid norm name {norm!r}. Must be string.')
# Get class
if norm not in NORMS:
raise ValueError(
f'Unknown normalizer {norm!r}. Options are: '
+ ', '.join(map(repr, NORMS.keys())) + '.'
)
if norm == 'symlog' and not args and 'linthresh' not in kwargs:
kwargs['linthresh'] = 1 # special case, needs argument
return NORMS[norm](*args, **kwargs) | 5,323,849 |
def run_trajectory(
model, time_stop, time_step, initial_state,
seed, n_points=500, docker=None):
"""
Run one trajectory using the given model and initial state
Parameters
----------
model: str
smoldyn model description
time_stop: float
Simulation duration
time_step: Float
Interval between two timesteps
initial_state: [Mol]
list of molecules at t=0
seed: int
seed used to run smoldyn
n_points: int
number of time samples
docker: str
name of docker container to be used
"""
input_string = fill_model(
model, time_stop, time_step, initial_state,
seed if seed is not None else npr.randint(10**9),
n_points)
raw_data = run_smoldyn(input_string, docker)
# Collect results
history, last_state = [
e.strip().split("\n")
for e in raw_data.split("--Simulation ends--\n")]
return (parse_history(history), parse_last_state(last_state)) | 5,323,850 |
def get_project_root_dir() -> Path:
"""
Gets the Root path of Project
Returns:
Path: of Root project
"""
root_path = _get_script_file()
return root_path.parent | 5,323,851 |
def get_deployment_statuses() -> Dict[str, DeploymentStatusInfo]:
"""Returns a dictionary of deployment statuses.
A deployment's status is one of {UPDATING, UNHEALTHY, and HEALTHY}.
Example:
>>> from ray.serve.api import get_deployment_statuses
>>> statuses = get_deployment_statuses() # doctest: +SKIP
>>> status_info = statuses["deployment_name"] # doctest: +SKIP
>>> status = status_info.status # doctest: +SKIP
>>> message = status_info.message # doctest: +SKIP
Returns:
Dict[str, DeploymentStatus]: This dictionary maps the running
deployment's name to a DeploymentStatus object containing its
status and a message explaining the status.
"""
return get_global_client().get_deployment_statuses() | 5,323,852 |
def dumps_tikz(g, scale='0.5em'):
"""Return TikZ code as `str` for `networkx` graph `g`."""
s = []
s.append(padding_remove(r"""
\begin{{tikzpicture}}[
signal flow,
pin distance=1pt,
label distance=-2pt,
x={scale}, y={scale},
baseline=(current bounding box.center),
]""").format(scale=scale))
def fix(n):
n = str(n)
return "{" + n.replace('.', '/') + "}"
for n, d in g.nodes(data=True):
n = fix(n)
# label
label = d.get('label', None)
angle = d.get('angle', '-45')
X, Y = d['pos']
if label is not None:
label = 'pin={{{ang}: {label}}}'.format(ang=angle, label=label)
# geometry
color = d.get('color', None)
shape = d.get('shape', 'nodeS')
# style
style = r', '.join(filter(None, [shape, label]))
s.append(r'\node[{style}] ({n}) at ({X}, {Y}) {{}};'.format(style=style, n=n, X=X, Y=Y))
s.append('')
s.append(r'\path')
for u, v, d in g.edges(data=True):
u2 = fix(u)
v2 = fix(v)
edge_text = d.get('edge_text', None)
handed = d.get('handed', 'l')
dist = d.get('handed', 0.5)
label = d.get('label', '')
color = d.get('color', '')
bend = d.get('bend', 0)
suppress = d.get('suppress', False)
if suppress:
continue
if edge_text is None:
if label:
label = ' node {{{label}}}'.format(label=label)
if handed == 'l':
etype = "sflow={}".format(dist)
elif handed == 'r':
etype = "sflow'={}".format(dist)
else:
raise NotImplementedError("unknown handedness")
if bend != 0:
bend = 'bend right={}'.format(bend)
else:
bend = None
if u == v:
loop = g.nodes[u].get('loop', 70)
loop_width = g.nodes[u].get('loop_width', 70)
loop = 'min distance=5mm, in={i}, out={o}, looseness=25'.format(i=loop + loop_width/2, o=loop - loop_width/2)
bend = None
else:
loop = None
style = r', '.join(filter(None, [etype, bend, loop, color]))
s.append(r'({u}) edge[{style}]{label} ({v})'.format(style=style, label=label, u=u2, v=v2))
else:
s.append("({u}) {etext} ({v})".format(u=u2, v=v2, etext=edge_text))
s.append(';')
s.append(r'\end{tikzpicture}')
return '\n'.join(s) | 5,323,853 |
def drawModel(ax, model):
"""
将模型的分离超平面可视化
"""
x1 = np.linspace(ax.get_xlim()[0], ax.get_xlim()[1], 100)
x2 = np.linspace(ax.get_ylim()[0], ax.get_ylim()[1], 100)
X1, X2 = np.meshgrid(x1, x2)
Y = model.predict_proba(np.c_[X1.ravel(), X2.ravel()])[:, 1]
Y = Y.reshape(X1.shape)
ax.contourf(X1, X2, Y, levels=[0, 0.5], colors=["gray"], alpha=0.4)
return ax | 5,323,854 |
def keyboard_interrupt(func):
"""Decorator to be used on a method to check if there was a keyboard interrupt error that was raised."""
def wrap(self, *args, **kwargs):
try:
return func(self, *args, **kwargs)
except KeyboardInterrupt:
self.close() # this will close the visualizer if necessary
sys.exit(0)
return wrap | 5,323,855 |
def correct_mpl(obj):
"""
This procedure corrects MPL data:
1.) Throw out data before laser firing (heights < 0).
2.) Remove background signal.
3.) Afterpulse Correction - Subtraction of (afterpulse-darkcount).
NOTE: Currently the Darkcount in VAPS is being calculated as
the afterpulse at ~30km. But that might not be absolutely
correct and we will likely start providing darkcount profiles
ourselves along with other corrections.
4.) Range Correction.
5.) Overlap Correction (Multiply).
Note: Deadtime and darkcount corrections are not being applied yet.
Parameters
----------
obj : Dataset object
The ACT object.
Returns
-------
obj : Dataset object
The ACT Object containing the corrected values.
"""
# Get some variables before processing begins
act = obj.act
# Overlap Correction Variable
op = obj['overlap_correction'].values[0, :]
op_height = obj['overlap_correction_heights'].values[0, :]
# 1 - Remove negative height data
obj = obj.where(obj.height > 0, drop=True)
height = obj['height'].values
# The drop strips out the ACT data so re-populating
obj.act = act
# Get indices for calculating background
var_names = ['signal_return_co_pol', 'signal_return_cross_pol']
ind = [obj.height.shape[1] - 50, obj.height.shape[1] - 2]
# Subset last gates into new dataset
dummy = obj.isel(range_bins=xr.DataArray(np.arange(ind[0], ind[1])))
# Turn off warnings
warnings.filterwarnings("ignore")
# Run through co and cross pol data for corrections
co_bg = dummy[var_names[0]]
co_bg = co_bg.where(co_bg > -9998.)
co_bg = co_bg.mean(dim='dim_0').values
x_bg = dummy[var_names[1]]
x_bg = x_bg.where(x_bg > -9998.)
x_bg = x_bg.mean(dim='dim_0').values
# Seems to be the fastest way of removing background signal at the moment
co_data = obj[var_names[0]].where(obj[var_names[0]] > 0).values
x_data = obj[var_names[1]].where(obj[var_names[1]] > 0).values
for i in range(len(obj['time'].values)):
co_data[i, :] = co_data[i, :] - co_bg[i]
x_data[i, :] = x_data[i, :] - x_bg[i]
# After Pulse Correction Variable
co_ap = obj['afterpulse_correction_co_pol'].values
x_ap = obj['afterpulse_correction_cross_pol'].values
for j in range(len(obj['range_bins'].values)):
# Afterpulse Correction
co_data[:, j] = co_data[:, j] - co_ap[:, j]
x_data[:, j] = x_data[:, j] - x_ap[:, j]
# R-Squared Correction
co_data[:, j] = co_data[:, j] * height[:, j] ** 2.
x_data[:, j] = x_data[:, j] * height[:, j] ** 2.
# Overlap Correction
idx = (np.abs(op_height - height[0, j])).argmin()
co_data[:, j] = co_data[:, j] * op[idx]
x_data[:, j] = x_data[:, j] * op[idx]
# Create the co/cross ratio variable
ratio = (x_data / co_data) * 100.
obj['cross_co_ratio'] = obj[var_names[0]].copy(data=ratio)
# Convert data to decibels
co_data = 10. * np.log10(co_data)
x_data = 10. * np.log10(x_data)
# Write data to object
obj[var_names[0]].values = co_data
obj[var_names[1]].values = x_data
return obj | 5,323,856 |
def main():
"""Provide BEL Statements from BEL graph."""
df = get_relations_df()
graph = build_relations_graph(df)
with open("famplex.bel", "w") as file:
to_bel(graph, file) | 5,323,857 |
def cutoff_depth(d: int):
"""A cutoff function that searches to depth d."""
return lambda game, state, depth: depth > d | 5,323,858 |
def players_age_in_days():
"""
This function will read the players.csv file
and convert the players age in years to days.
Use of DictWriter instead of writer.
Instead of nesting the read of the file and the
write of the new file, as in the function
decorator_enforce_argument_type, the players file
is open, read and the iterator is stored in a
list. Since the next 'with open' statement is not
nested, the players.csv file gets closed and the
iterator csv_reader is no longer available.
The list has data and can be iterated to create
the new file.
"""
with open("players.csv") as original_file:
csv_reader = DictReader(original_file)
players = list(csv_reader) # csv_reader no longer has data after this
# using DictWriter!
with open("players_age_in_days.csv", "w") as new_file:
headers = ("name", "position", "batting average", "age in days")
csv_writer = DictWriter(new_file, fieldnames=headers)
csv_writer.writeheader()
# using the list instead of csv_reader
for player in players:
# print(player)
csv_writer.writerow({
"name": player["name"],
"position": player["position"],
"batting average": player["batting average"],
# the header name must be the same as in new file headers
"age in days": years_to_days(player["age"]) # decorated funct.
})
print("*** New file: players_age_in_days.csv") | 5,323,859 |
def create_contributor_node(d: Dict, label: str = "Contributor") -> Node:
""" Using the k, v pairs in `d`, create a Node object with those properties.
Takes k, as-is except for 'uuid', which is cast to int.
Args:
d (dict): property k, v pairs
label (str): The py2neo.Node.__primarylabel__ to assign
Returns:
(Node): py2neo.Node instance of type `label`
"""
uuid = int(d.get('uuid', -1))
contributor = Node("Contributor",
uuid=uuid,
name=d.get('name'),
github_id=d.get('github_id'),
login=d.get('login'),
host_type=d.get('host_type'))
return contributor | 5,323,860 |
def extractLipsHaarCascade(haarDetector, frame):
"""Function to extract lips from a frame"""
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
roi_gray = 0
faces = haarDetector.detectMultiScale(gray, 1.3, 5)
if len(faces) == 0:
roi_gray = cv2.resize(gray, (150, 100))
return roi_gray
for (x, y, w, h) in faces:
roi_gray = gray[y + (2 * h // 3):y + h, x:x + w]
roi_gray = cv2.resize(roi_gray, (150, 100))
return roi_gray | 5,323,861 |
def we_are_frozen():
"""Returns whether we are frozen via py2exe.
This will affect how we find out where we are located."""
return hasattr(sys, "frozen") | 5,323,862 |
def rgb_to_hex(red, green, blue):
"""Return color as #rrggbb for the given RGB color values."""
return '#%02x%02x%02x' % (int(red), int(green), int(blue)) | 5,323,863 |
def rescue(
function: Callable[
[_SecondType],
KindN[_RescuableKind, _FirstType, _UpdatedType, _ThirdType],
],
) -> Kinded[Callable[
[KindN[_RescuableKind, _FirstType, _SecondType, _ThirdType]],
KindN[_RescuableKind, _FirstType, _UpdatedType, _ThirdType],
]]:
"""
Turns function's input parameter from a regular value to a container.
In other words, it modifies the function
signature from: ``a -> Container[b]`` to: ``Container[a] -> Container[b]``
Similar to :func:`returns.pointfree.bind`, but works for failed containers.
This is how it should be used:
.. code:: python
>>> from returns.pointfree import rescue
>>> from returns.result import Success, Failure, Result
>>> def example(argument: int) -> Result[str, int]:
... return Success(argument + 1)
>>> assert rescue(example)(Success('a')) == Success('a')
>>> assert rescue(example)(Failure(1)) == Success(2)
Note, that this function works for all containers with ``.rescue`` method.
See :class:`returns.interfaces.rescuable.Rescuable` for more info.
"""
@kinded
def factory(
container: KindN[_RescuableKind, _FirstType, _SecondType, _ThirdType],
) -> KindN[_RescuableKind, _FirstType, _UpdatedType, _ThirdType]:
return internal_rescue(container, function)
return factory | 5,323,864 |
def disable():
"""
Disables logging to FreeCAD console (or STDOUT).
Note, logging may be enabled by another imported module, so this isn't a
guarentee; this function undoes logging_enable(), nothing more.
"""
global _logging_handler
if _logging_handler:
root_logger = logging.getLogger()
root_logger.handlers.remove(_logging_handler)
_logging_handler = None | 5,323,865 |
def test_one_mw_failing(client: FlaskClient):
"""test GET method with one middlewares"""
resp = client.get('/get-with-auth')
assert resp.status_code == 403
assert not resp.json.get('success') | 5,323,866 |
def try_get_resource(_xmlroot, parent_node: str, child_node: str, _lang: str):
""" Получить ресурс (решение / условия) """
for tutorial in _xmlroot.find(parent_node).iter(child_node):
lang = tutorial.attrib['language']
_type = tutorial.attrib['type']
if lang == _lang and _type == 'application/x-tex':
found = True
path = tutorial.attrib['path']
encoding = tutorial.attrib['charset']
break
return ResourceSearchResult(found, path, encoding) | 5,323,867 |
def main(pprmeta, finder, sorter, assembly, outdir):
"""Parse VirSorter, VirFinder and PPR-Meta outputs and merge the results.
"""
hc_contigs, lc_contigs, prophage_contigs, sorter_hc, sorter_lc, sorter_prophages = \
merge_annotations(pprmeta, finder, sorter, assembly)
at_least_one = False
if len(hc_contigs):
SeqIO.write(hc_contigs, join(
outdir, "high_confidence_putative_viral_contigs.fna"), "fasta")
at_least_one = True
if len(lc_contigs):
SeqIO.write(lc_contigs, join(
outdir, "low_confidence_putative_viral_contigs.fna"), "fasta")
at_least_one = True
if len(prophage_contigs):
SeqIO.write(prophage_contigs, join(
outdir, "putative_prophages.fna"), "fasta")
at_least_one = True
# VirSorter provides some metadata on each annotation
# - is circular
# - prophage start and end within a contig
if sorter_hc or sorter_lc or sorter_prophages:
with open(join(outdir, "virsorter_metadata.tsv"), "w") as pm_tsv_file:
header = ["contig", "category", "circular",
"prophage_start", "prophage_end"]
tsv_writer = csv.writer(pm_tsv_file, delimiter="\t")
tsv_writer.writerow(header)
tsv_writer.writerows([shc.to_tsv() for _, shc in sorter_hc.items()])
tsv_writer.writerows([slc.to_tsv() for _, slc in sorter_lc.items()])
for _, plist in sorter_prophages.items():
tsv_writer.writerows([ph.to_tsv() for ph in plist])
if not at_least_one:
print("Overall, no putative _viral contigs or prophages were detected"
" in the analysed metagenomic assembly", file=sys.stderr)
exit(1) | 5,323,868 |
def test_fail_missing_api(mock_va, config):
"""
Test fail missing api
"""
mock_va.return_value.auth_from_file = Mock(return_value=True)
mock_va.return_value.load = Mock(return_value=config)
with pytest.raises(RuntimeError):
bot.configure("test_config.yaml", "test_vault.yaml", "test_creds.yaml") | 5,323,869 |
def download_song(file_name, content):
""" Download the audio file from YouTube. """
_, extension = os.path.splitext(file_name)
if extension in ('.webm', '.m4a'):
link = content.getbestaudio(preftype=extension[1:])
else:
log.debug('No audio streams available for {} type'.format(extension))
return False
if link:
log.debug('Downloading from URL: ' + link.url)
filepath = os.path.join(const.args.folder, file_name)
log.debug('Saving to: ' + filepath)
link.download(filepath=filepath)
return True
else:
log.debug('No audio streams available')
return False | 5,323,870 |
def load_distributed_dataset(split,
batch_size,
name,
drop_remainder,
use_bfloat16,
normalize=False,
with_info=False,
proportion=1.0):
"""Loads CIFAR dataset for training or testing.
Args:
split: tfds.Split.
batch_size: The global batch size to use.
name: A string indicates whether it is cifar10 or cifar100.
drop_remainder: A boolean indicates whether to drop the remainder of the
batches. If True, the batch dimension will be static.
use_bfloat16: data type, bfloat16 precision or float32.
normalize: Whether to apply mean-std normalization on features.
with_info: bool.
proportion: float, the proportion of dataset to be used.
Returns:
Tuple of (tf.data.Dataset, tf.data.DatasetInfo) if with_info else only
the dataset.
"""
if use_bfloat16:
dtype = tf.bfloat16
else:
dtype = tf.float32
if proportion == 1.0:
dataset, ds_info = tfds.load(name,
split=split,
with_info=True,
as_supervised=True)
else:
name = '{}:3.*.*'.format(name)
# TODO(ywenxu): consider the case where we have splits of train, val, test.
if split == tfds.Split.TRAIN:
split_str = 'train[:{}%]'.format(int(100 * proportion))
else:
split_str = 'test[:{}%]'.format(int(100 * proportion))
dataset, ds_info = tfds.load(name,
split=split_str,
with_info=True,
as_supervised=True)
# Disable intra-op parallelism to optimize for throughput instead of
# latency.
options = tf.data.Options()
options.experimental_threading.max_intra_op_parallelism = 1
dataset = dataset.with_options(options)
# Prefetches a batch at a time to smooth out the time taken to load input
# files for shuffling and processing.
if split == tfds.Split.TRAIN:
dataset_size = ds_info.splits['train'].num_examples
dataset = dataset.shuffle(buffer_size=dataset_size).repeat()
image_shape = ds_info.features['image'].shape
def preprocess(image, label):
"""Image preprocessing function."""
if split == tfds.Split.TRAIN:
image = tf.image.resize_with_crop_or_pad(
image, image_shape[0] + 4, image_shape[1] + 4)
image = tf.image.random_crop(image, image_shape)
image = tf.image.random_flip_left_right(image)
image = tf.image.convert_image_dtype(image, dtype)
if normalize:
mean = tf.constant([0.4914, 0.4822, 0.4465])
std = tf.constant([0.2023, 0.1994, 0.2010])
image = (image - mean) / std
label = tf.cast(label, dtype)
return image, label
dataset = dataset.map(preprocess,
num_parallel_calls=tf.data.experimental.AUTOTUNE)
dataset = dataset.batch(batch_size, drop_remainder=drop_remainder)
# Operations between the final prefetch and the get_next call to the
# iterator will happen synchronously during run time. We prefetch here again
# to background all of the above processing work and keep it out of the
# critical training path. Setting buffer_size to tf.contrib.data.AUTOTUNE
# allows DistributionStrategies to adjust how many batches to fetch based on
# how many devices are present.
dataset = dataset.prefetch(buffer_size=tf.data.experimental.AUTOTUNE)
if with_info:
return dataset, ds_info
return dataset | 5,323,871 |
def get_sample_media():
"""Gets the sample media.
Returns:
bytes
"""
path = request.args.get("path")
# `conditional`: support partial content
return send_file(path, conditional=True) | 5,323,872 |
def _is_swiftmodule(path):
"""Predicate to identify Swift modules/interfaces."""
return path.endswith((".swiftmodule", ".swiftinterface")) | 5,323,873 |
def _unpack_array(fmt, buff, offset, count):
"""Unpack an array of items.
:param fmt: The struct format string
:type fmt: str
:param buff: The buffer into which to unpack
:type buff: buffer
:param offset: The offset at which to start unpacking
:type offset: int
:param count: The number of items in the array
:type count: int
"""
output = []
for i in range(count):
item, offset = _unpack(fmt, buff, offset)
output.append(item)
if len(fmt) == 1:
output = list(itertools.chain.from_iterable(output))
return output, offset | 5,323,874 |
def _add_column_and_sort_table(sources, pointing_position):
"""Sort the table and add the column separation (offset from the source) and phi (position angle from the source)
Parameters
----------
sources : `~astropy.table.Table`
Table of excluded sources.
pointing_position : `~astropy.coordinates.SkyCoord`
Coordinates of the pointing position
Returns
-------
sources : `~astropy.table.Table`
given sources table sorted with extra column "separation" and "phi"
"""
sources = sources.copy()
source_pos = SkyCoord(sources["RA"], sources["DEC"], unit="deg")
sources["separation"] = pointing_position.separation(source_pos)
sources["phi"] = pointing_position.position_angle(source_pos)
sources.sort("separation")
return sources | 5,323,875 |
async def test_get_config_parameters(hass, multisensor_6, integration, hass_ws_client):
"""Test the get config parameters websocket command."""
entry = integration
ws_client = await hass_ws_client(hass)
node = multisensor_6
# Test getting configuration parameter values
await ws_client.send_json(
{
ID: 4,
TYPE: "zwave_js/get_config_parameters",
ENTRY_ID: entry.entry_id,
NODE_ID: node.node_id,
}
)
msg = await ws_client.receive_json()
result = msg["result"]
assert len(result) == 61
key = "52-112-0-2"
assert result[key]["property"] == 2
assert result[key]["property_key"] is None
assert result[key]["metadata"]["type"] == "number"
assert result[key]["configuration_value_type"] == "enumerated"
assert result[key]["metadata"]["states"]
key = "52-112-0-201-255"
assert result[key]["property_key"] == 255
# Test getting non-existent node config params fails
await ws_client.send_json(
{
ID: 5,
TYPE: "zwave_js/get_config_parameters",
ENTRY_ID: entry.entry_id,
NODE_ID: 99999,
}
)
msg = await ws_client.receive_json()
assert not msg["success"]
assert msg["error"]["code"] == ERR_NOT_FOUND
# Test sending command with not loaded entry fails
await hass.config_entries.async_unload(entry.entry_id)
await hass.async_block_till_done()
await ws_client.send_json(
{
ID: 6,
TYPE: "zwave_js/get_config_parameters",
ENTRY_ID: entry.entry_id,
NODE_ID: node.node_id,
}
)
msg = await ws_client.receive_json()
assert not msg["success"]
assert msg["error"]["code"] == ERR_NOT_LOADED | 5,323,876 |
async def test_wifi_hotspot_start_stop(
lifecycle: WiFiHotspotLifeCycle,
) -> None:
"""Test that we can start and stop the hotspot."""
assert not lifecycle._running
for _ in range(3):
# Start it
asyncio.ensure_future(lifecycle.run_hotspot())
await asyncio.sleep(0.02)
assert lifecycle._proc is not None
assert lifecycle._config_file is not None # Should generate the config
config_file = Path(lifecycle._config_file.name)
assert lifecycle._running
# Stop it
await lifecycle.stop_hotspot()
assert not lifecycle._running
assert lifecycle._proc is None
assert lifecycle._config_file is not None
assert not config_file.exists() | 5,323,877 |
def jsmin(content):
""" Minify your JavaScript code.
Use `jsmin <https://pypi.python.org/pypi/jsmin>`_ to compress JavaScript.
You must manually install jsmin if you want to use this processor.
Args:
content: your JavaScript code
Returns:
the minified version of your JavaScript code, or the original content
if the Flask application is in Debug mode
Raises:
CompressorProcessorException: if jsmin is not installed.
"""
try:
from jsmin import jsmin as jsmin_processor
except ImportError:
raise CompressorProcessorException("'jsmin' is not installed. Please"
" install it if you want to use "
"the 'jsmin' processor.")
if current_app.debug is True:
# do not minify
return content
return jsmin_processor(content) | 5,323,878 |
def quit_app():
"""
Quits the script
"""
sys.exit() | 5,323,879 |
def generate_server_config() -> IO[bytes]:
"""Returns a temporary generated file for use as the server config."""
boards = stm32f429i_detector.detect_boards()
if not boards:
_LOG.critical('No attached boards detected')
sys.exit(1)
config_file = tempfile.NamedTemporaryFile()
_LOG.debug('Generating test server config at %s', config_file.name)
_LOG.debug('Found %d attached devices', len(boards))
for board in boards:
test_runner_args = [
'--stlink-serial', board.serial_number, '--port', board.dev_name
]
config_file.write(
generate_runner(_TEST_RUNNER_COMMAND,
test_runner_args).encode('utf-8'))
config_file.flush()
return config_file | 5,323,880 |
def smooth_l1_loss_detectron2(input, target, beta: float, reduction: str = "none"):
"""
Smooth L1 loss defined in the Fast R-CNN paper as:
| 0.5 * x ** 2 / beta if abs(x) < beta
smoothl1(x) = |
| abs(x) - 0.5 * beta otherwise,
where x = input - target.
Smooth L1 loss is related to Huber loss, which is defined as:
| 0.5 * x ** 2 if abs(x) < beta
huber(x) = |
| beta * (abs(x) - 0.5 * beta) otherwise
Smooth L1 loss is equal to huber(x) / beta. This leads to the following
differences:
- As beta -> 0, Smooth L1 loss converges to L1 loss, while Huber loss
converges to a constant 0 loss.
- As beta -> +inf, Smooth L1 converges to a constant 0 loss, while Huber loss
converges to L2 loss.
- For Smooth L1 loss, as beta varies, the L1 segment of the loss has a constant
slope of 1. For Huber loss, the slope of the L1 segment is beta.
Smooth L1 loss can be seen as exactly L1 loss, but with the abs(x) < beta
portion replaced with a quadratic function such that at abs(x) = beta, its
slope is 1. The quadratic segment smooths the L1 loss near x = 0.
Args:
input (Tensor): input tensor of any shape
target (Tensor): target value tensor with the same shape as input
beta (float): L1 to L2 change point.
For beta values < 1e-5, L1 loss is computed.
reduction: 'none' | 'mean' | 'sum'
'none': No reduction will be applied to the output.
'mean': The output will be averaged.
'sum': The output will be summed.
Returns:
The loss with the reduction option applied.
Note:
PyTorch's builtin "Smooth L1 loss" implementation does not actually
implement Smooth L1 loss, nor does it implement Huber loss. It implements
the special case of both in which they are equal (beta=1).
See: https://pytorch.org/docs/stable/nn.html#torch.nn.SmoothL1Loss.
"""
if beta < 1e-5:
# if beta == 0, then torch.where will result in nan gradients when
# the chain rule is applied due to pytorch implementation details
# (the False branch "0.5 * n ** 2 / 0" has an incoming gradient of
# zeros, rather than "no gradient"). To avoid this issue, we define
# small values of beta to be exactly l1 loss.
loss = torch.abs(input - target)
else:
n = torch.abs(input - target)
cond = n < beta
loss = torch.where(cond, 0.5 * n ** 2 / beta, n - 0.5 * beta)
if reduction == "mean":
loss = loss.mean()
elif reduction == "sum":
loss = loss.sum()
return loss | 5,323,881 |
def png_to_jpg(png_path, jpg_path):
""" convert image format: png -> jpg, then save picture with jpg
Args:
png_path (str)
jpg_path (str)
Return:
True or False (bool)
"""
img = Image.open(png_path)
try:
if len(img.split()) == 4:
# prevent IOError: cannot write mode RGBA as BMP
r, g, b, a = img.split()
img = Image.merge("RGB", (r, g, b))
img.convert('RGB').save(jpg_path, quality=100)
else:
img.convert('RGB').save(jpg_path, quality=100)
return True
except Exception:
return False | 5,323,882 |
def split_lines_to_df(in_lines_trunc_df):
"""
For a column of strings that each represent the line of a CSV
(and each line may have a different number of separators),
read them into a DataFrame.
in_lines_trunc_df: Assumes that the relevant column is `0`
Returns: The resulting DataFrame
"""
with warnings.catch_warnings():
# Ignore dtype warnings at this point, because we check them later on (after casting)
warnings.filterwarnings(
"ignore", message='.*Specify dtype option on import or set low_memory=False',
category=pd.errors.DtypeWarning,
)
with io.StringIO('\n'.join(in_lines_trunc_df[0])) as in_lines_trunc_stream:
df_trimmed = pd.read_csv(
in_lines_trunc_stream, header=None, index_col=0, sep=INPUT_SEPARATOR,
names=range(in_lines_trunc_df[0].str.count(INPUT_SEPARATOR).max() + 1),
).rename_axis(index=ROW_ID_NAME)
return df_trimmed | 5,323,883 |
def run_front(url_prefix='', port=None):
""" Run the front end one.
"""
run_app([add_views_front], url_prefix, port) | 5,323,884 |
def init_seed(seed):
"""Disable cudnn to maximize reproducibility 禁用cudnn以最大限度地提高再现性"""
torch.cuda.cudnn_enabled = False
"""
cuDNN使用非确定性算法,并且可以使用torch.backends.cudnn.enabled = False来进行禁用
如果设置为torch.backends.cudnn.enabled =True,说明设置为使用使用非确定性算法
然后再设置:torch.backends.cudnn.benchmark = True,当这个flag为True时,将会让程序在开始时花费一点额外时间,
为整个网络的每个卷积层搜索最适合它的卷积实现算法,进而实现网络的加速
但由于其是使用非确定性算法,这会让网络每次前馈结果略有差异,如果想要避免这种结果波动,可以将下面的flag设置为True
"""
torch.backends.cudnn.deterministic = True
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed) | 5,323,885 |
def generate_full_vast_beleg_ids_request_xml(form_data, th_fields=None, use_testmerker=False):
""" Generates the full xml for the Verfahren "ElsterDatenabholung" and the Datenart "ElsterVaStDaten",
including "Anfrage" field.
An example xml can be found in the Eric documentation under
common/Schnittstellenbeschreibungen/Sonstige/ElsterDatenabholung/Beispiele/1_ElsterDatenabholung_Liste_Anfrage.xml """
if not th_fields:
th_fields = get_vast_beleg_ids_request_th_fields(use_testmerker)
return generate_full_xml(th_fields, _add_vast_xml_nutzdaten_header, _add_vast_beleg_ids_request_nutzdaten,
form_data) | 5,323,886 |
def apply_recursive(dic, func):
""" param dic is usually a dictionary, e.g. 'target' or 'condition'
child node. It can also be a child dict or child list of
these nodes/dicts
param func is a func to be applied to each child dictionary, taking
the dictionary as the only param
"""
if isinstance(dic, dict):
func(dic)
for key, val in dic.items():
if isinstance(val, dict) or isinstance(val, list):
apply_recursive(val, func)
if isinstance(dic, list):
for elem in dic:
apply_recursive(elem, func) | 5,323,887 |
def get_q_vocab(ques, count_thr=0, insert_unk=False):
"""
Args:
ques: ques[qid] = {tokenized_question, ...}
count_thr: int (not included)
insert_unk: bool, insert_unk or not
Return:
vocab: list of vocab
"""
counts = {}
for qid, content in ques.iteritems():
word_tokens = content['tokenized_question']
for word in word_tokens:
counts[word] = counts.get(word, 0) + 1
cw = sorted([(count,w) for w,count in counts.iteritems()], reverse=True)
print('top words and their counts:')
print('\n'.join(map(str,cw[:20])))
total_words = sum(counts.itervalues())
print('total words:', total_words)
bad_words = [w for w,n in counts.iteritems() if n <= count_thr]
vocab = [w for w,n in counts.iteritems() if n > count_thr]
bad_count = sum(counts[w] for w in bad_words)
print('number of bad words: %d/%d = %.2f%%' %
(len(bad_words), len(counts), len(bad_words)*100.0/len(counts)))
print('number of words in vocab would be %d' % (len(vocab), ))
print('number of UNKs: %d/%d = %.2f%%' %
(bad_count, total_words, bad_count*100.0/total_words))
if insert_unk:
print('inserting the special UNK token')
vocab.append('<UNK>')
return vocab | 5,323,888 |
def update_callable(buildable: Buildable,
new_callable: TypeOrCallableProducingT):
"""Updates `config` to build `new_callable` instead.
When extending a base configuration, it can often be useful to swap one class
for another. For example, an experiment may want to swap in a subclass that
has augmented functionality.
`update_callable` updates `config` in-place (preserving argument history).
Args:
buildable: A `Buildable` (e.g. a `fdl.Config`) to mutate.
new_callable: The new callable `config` should call when built.
Raises:
TypeError: if `new_callable` has varargs, or if there are arguments set on
`config` that are invalid to pass to `new_callable`.
"""
# TODO: Consider adding a "drop_invalid_args: bool = False" argument.
# Note: can't just call config.__init__(new_callable, **config.__arguments__)
# to preserve history.
#
# Note: can't call `setattr` on all the args to validate them, because that
# will result in duplicate history entries.
original_args = buildable.__arguments__
signature = inspect.signature(new_callable)
if any(param.kind == param.VAR_POSITIONAL
for param in signature.parameters.values()):
raise NotImplementedError(
'Variable positional arguments (aka `*args`) not supported.')
has_var_keyword = any(param.kind == param.VAR_KEYWORD
for param in signature.parameters.values())
if not has_var_keyword:
invalid_args = [
arg for arg in original_args.keys() if arg not in signature.parameters
]
if invalid_args:
raise TypeError(f'Cannot switch to {new_callable} (from '
f'{buildable.__fn_or_cls__}) because the Buildable would '
f'have invalid arguments {invalid_args}.')
object.__setattr__(buildable, '__fn_or_cls__', new_callable)
object.__setattr__(buildable, '__signature__', signature)
object.__setattr__(buildable, '_has_var_keyword', has_var_keyword)
buildable.__argument_history__['__fn_or_cls__'].append(
history.entry('__fn_or_cls__', new_callable)) | 5,323,889 |
def get_desc_dist(descriptors1, descriptors2):
""" Given two lists of descriptors compute the descriptor distance
between each pair of feature. """
#desc_dists = 2 - 2 * (descriptors1 @ descriptors2.transpose())
desc_sims = - descriptors1 @ descriptors2.transpose()
# desc_sims = desc_sims.astype('float64')
# # Weight the descriptor distances
# desc_sims = np.exp(desc_sims)
# desc_sims /= np.sum(desc_sims, axis=1, keepdims=True)
# desc_sims = 1 - desc_sims*desc_sims
#desc_dist = np.linalg.norm(descriptors1[:, None] - descriptors2[None], axis=2)
#desc_dist = 2 - 2 * descriptors1 @ descriptors2.transpose()
return desc_sims | 5,323,890 |
def test_multiple_speakers():
"""
Test output exists with multiple speaker input
# GIVEN a sample file containing multiple speakers
# WHEN calling tscribe.write(...)
# THEN produce the .docx without errors
"""
# Setup
input_file = "sample_material/03-speaker-identification.json"
output_file = "sample_material/03-speaker-identification.docx"
assert os.access(input_file, os.F_OK), "Input file not found"
# Function
tscribe.write(input_file)
assert os.access(output_file, os.F_OK), "Output file not found"
# Teardown
os.remove(output_file)
os.remove("sample_material/chart.png") | 5,323,891 |
def _parse_cells_icdar(xml_table):
"""
Gets the table cells from a table in ICDAR-XML format.
"""
cells = list()
xml_cells = xml_table.findall(".//cell")
cell_id = 0
for xml_cell in xml_cells:
text = get_text(xml_cell)
start_row = get_attribute(xml_cell, "start-row")
start_col = get_attribute(xml_cell, "start-col")
end_row = get_attribute(xml_cell, "end-row")
end_col = get_attribute(xml_cell, "end-col")
cells.append(Cell(cell_id, text, start_row, start_col, end_row, end_col))
cell_id += 1
return cells | 5,323,892 |
def download(link: str,
method: str = "GET",
to_file: Optional[BinaryIO] = None,
headers: Optional[dict] = None,
allow_redirects: bool = True,
max_retries: int = 3) -> "Response":
"""
Return Response named tuple
Response.response - requests.Response object
Response.size - size of downloaded file, 0 if to_file is None
Response.hash - md5 hash of the downloaded file, empty string if to_file is None
"""
exp_delay = [2**(x+1) for x in range(max_retries)]
retry_count = 0
query = requests.Request(method, link)
query = TWITTER_SESSION.prepare_request(query)
LOGGER.debug("Making %s request to %s", method, link)
if headers:
query.headers.update(headers)
while True:
try:
response = TWITTER_SESSION.send(query, allow_redirects=allow_redirects, stream=True, timeout=15)
response.raise_for_status()
if to_file:
size = 0
md5_hash = md5()
for chunk in response.iter_content(chunk_size=(1024**2)*3):
to_file.write(chunk)
md5_hash.update(chunk)
size += len(chunk)
#LOGGER.info("left=%s right=%s", size, response.headers["content-length"])
assert size == int(response.headers["content-length"])
return Response(response=response, size=size, hash=md5_hash.hexdigest())
return Response(response)
except requests.HTTPError:
LOGGER.error("Received HTTP error code %s", response.status_code)
if response.status_code in [404] or retry_count >= max_retries:
raise
except requests.Timeout:
LOGGER.error("Connection timed out")
if retry_count >= max_retries:
raise
except requests.ConnectionError:
LOGGER.error("Could not establish a new connection")
#most likely a client-side connection error, do not retry
raise
except requests.RequestException as err:
LOGGER.error("Unexpected request exception")
LOGGER.error("request url = %s", query.url)
LOGGER.error("request method = %s", query.method)
LOGGER.error("request headers = %s", query.headers)
LOGGER.error("request body = %s", query.body)
raise err
retry_count += 1
delay = exp_delay[retry_count-1]
print(f"Retrying ({retry_count}/{max_retries}) in {delay}s")
LOGGER.error("Retrying (%s/%s) in %ss", retry_count, max_retries, delay)
time.sleep(delay) | 5,323,893 |
def get_mem_usage():
"""returns percentage and vsz mem usage of this script"""
pid = os.getpid()
psout = os.popen( "ps -p %s u"%pid ).read()
parsed_psout = psout.split("\n")[1].split()
return float(parsed_psout[3]), int( parsed_psout[4] ) | 5,323,894 |
def download_file(url, local_folder=None):
"""Downloads file pointed to by `url`.
If `local_folder` is not supplied, downloads to the current folder.
"""
filename = os.path.basename(url)
if local_folder:
filename = os.path.join(local_folder, filename)
# Download the file
print("Downloading: " + url)
response = requests.get(url, stream=True)
if response.status_code != 200:
raise Exception("download file failed with status code: %d, fetching url '%s'" % (response.status_code, url))
# Write the file to disk
with open(filename, "wb") as handle:
handle.write(response.content)
return filename | 5,323,895 |
def test_ap_wpa2_psk_supp_proto_unexpected_group_msg(dev, apdev):
"""WPA2-PSK supplicant protocol testing: unexpected group message"""
(bssid,ssid,hapd,snonce,pmk,addr,rsne) = eapol_test(apdev[0], dev[0])
# Wait for EAPOL-Key msg 1/4 from hostapd to determine when associated
msg = recv_eapol(hapd)
dev[0].dump_monitor()
# Build own EAPOL-Key msg 1/4
anonce = binascii.unhexlify('2222222222222222222222222222222222222222222222222222222222222222')
counter = 1
msg = build_eapol_key_1_4(anonce, replay_counter=counter)
counter += 1
send_eapol(dev[0], bssid, build_eapol(msg))
msg = recv_eapol(dev[0])
snonce = msg['rsn_key_nonce']
(ptk, kck, kek) = pmk_to_ptk(pmk, addr, bssid, snonce, anonce)
logger.debug("Group key 1/2 instead of msg 3/4")
dev[0].dump_monitor()
wrapped = aes_wrap(kek, binascii.unhexlify('dd16000fac010100dc11188831bf4aa4a8678d2b41498618'))
msg = build_eapol_key_3_4(anonce, kck, wrapped, replay_counter=counter,
key_info=0x13c2)
counter += 1
send_eapol(dev[0], bssid, build_eapol(msg))
ev = dev[0].wait_event(["WPA: Group Key Handshake started prior to completion of 4-way handshake"])
if ev is None:
raise Exception("Unexpected group key message not reported")
dev[0].wait_disconnected(timeout=1) | 5,323,896 |
def gist_ncar(range, **traits):
""" Generator for the 'gist_ncar' colormap from GIST.
"""
_data = dict(
red = [(0.0, 0.0, 0.0),
(0.0050505050458014011, 0.0, 0.0),
(0.010101010091602802, 0.0, 0.0),
(0.015151515603065491, 0.0, 0.0),
(0.020202020183205605, 0.0, 0.0),
(0.025252524763345718, 0.0, 0.0),
(0.030303031206130981, 0.0, 0.0),
(0.035353533923625946, 0.0, 0.0),
(0.040404040366411209, 0.0, 0.0),
(0.045454546809196472, 0.0, 0.0),
(0.050505049526691437, 0.0, 0.0),
(0.0555555559694767, 0.0, 0.0),
(0.060606062412261963, 0.0, 0.0),
(0.065656565129756927, 0.0, 0.0),
(0.070707067847251892, 0.0, 0.0),
(0.075757578015327454, 0.0, 0.0),
(0.080808080732822418, 0.0, 0.0),
(0.085858583450317383, 0.0, 0.0),
(0.090909093618392944, 0.0, 0.0),
(0.095959596335887909, 0.0, 0.0),
(0.10101009905338287, 0.0, 0.0),
(0.10606060922145844, 0.0, 0.0),
(0.1111111119389534, 0.0, 0.0),
(0.11616161465644836, 0.0, 0.0),
(0.12121212482452393, 0.0, 0.0),
(0.12626262009143829, 0.0, 0.0),
(0.13131313025951385, 0.0, 0.0),
(0.13636364042758942, 0.0, 0.0),
(0.14141413569450378, 0.0, 0.0),
(0.14646464586257935, 0.0, 0.0),
(0.15151515603065491, 0.0, 0.0),
(0.15656565129756927, 0.0, 0.0),
(0.16161616146564484, 0.0, 0.0),
(0.1666666716337204, 0.0, 0.0),
(0.17171716690063477, 0.0, 0.0),
(0.17676767706871033, 0.0, 0.0),
(0.18181818723678589, 0.0, 0.0),
(0.18686868250370026, 0.0, 0.0),
(0.19191919267177582, 0.0, 0.0),
(0.19696970283985138, 0.0, 0.0),
(0.20202019810676575, 0.0, 0.0),
(0.20707070827484131, 0.0, 0.0),
(0.21212121844291687, 0.0, 0.0),
(0.21717171370983124, 0.0, 0.0),
(0.2222222238779068, 0.0, 0.0),
(0.22727273404598236, 0.0, 0.0),
(0.23232322931289673, 0.0, 0.0),
(0.23737373948097229, 0.0, 0.0),
(0.24242424964904785, 0.0, 0.0),
(0.24747474491596222, 0.0, 0.0),
(0.25252524018287659, 0.0, 0.0),
(0.25757575035095215, 0.0, 0.0),
(0.26262626051902771, 0.0, 0.0),
(0.26767677068710327, 0.0, 0.0),
(0.27272728085517883, 0.0, 0.0),
(0.27777779102325439, 0.0, 0.0),
(0.28282827138900757, 0.0, 0.0),
(0.28787878155708313, 0.0, 0.0),
(0.29292929172515869, 0.0, 0.0),
(0.29797980189323425, 0.0, 0.0),
(0.30303031206130981, 0.0, 0.0),
(0.30808082222938538, 0.0, 0.0),
(0.31313130259513855, 0.0, 0.0),
(0.31818181276321411, 0.0039215688593685627, 0.0039215688593685627),
(0.32323232293128967, 0.043137256056070328, 0.043137256056070328),
(0.32828283309936523, 0.08235294371843338, 0.08235294371843338),
(0.3333333432674408, 0.11764705926179886, 0.11764705926179886),
(0.33838382363319397, 0.15686275064945221, 0.15686275064945221),
(0.34343433380126953, 0.19607843458652496, 0.19607843458652496),
(0.34848484396934509, 0.23137255012989044, 0.23137255012989044),
(0.35353535413742065, 0.27058824896812439, 0.27058824896812439),
(0.35858586430549622, 0.30980393290519714, 0.30980393290519714),
(0.36363637447357178, 0.3490196168422699, 0.3490196168422699),
(0.36868685483932495, 0.38431373238563538, 0.38431373238563538),
(0.37373736500740051, 0.40392157435417175, 0.40392157435417175),
(0.37878787517547607, 0.41568627953529358, 0.41568627953529358),
(0.38383838534355164, 0.42352941632270813, 0.42352941632270813),
(0.3888888955116272, 0.43137255311012268, 0.43137255311012268),
(0.39393940567970276, 0.44313725829124451, 0.44313725829124451),
(0.39898988604545593, 0.45098039507865906, 0.45098039507865906),
(0.40404039621353149, 0.45882353186607361, 0.45882353186607361),
(0.40909090638160706, 0.47058823704719543, 0.47058823704719543),
(0.41414141654968262, 0.47843137383460999, 0.47843137383460999),
(0.41919192671775818, 0.49019607901573181, 0.49019607901573181),
(0.42424243688583374, 0.50196081399917603, 0.50196081399917603),
(0.42929291725158691, 0.52549022436141968, 0.52549022436141968),
(0.43434342741966248, 0.54901963472366333, 0.54901963472366333),
(0.43939393758773804, 0.57254904508590698, 0.57254904508590698),
(0.4444444477558136, 0.60000002384185791, 0.60000002384185791),
(0.44949495792388916, 0.62352943420410156, 0.62352943420410156),
(0.45454546809196472, 0.64705884456634521, 0.64705884456634521),
(0.4595959484577179, 0.67058825492858887, 0.67058825492858887),
(0.46464645862579346, 0.69411766529083252, 0.69411766529083252),
(0.46969696879386902, 0.72156864404678345, 0.72156864404678345),
(0.47474747896194458, 0.7450980544090271, 0.7450980544090271),
(0.47979798913002014, 0.76862746477127075, 0.76862746477127075),
(0.4848484992980957, 0.7921568751335144, 0.7921568751335144),
(0.48989897966384888, 0.81568628549575806, 0.81568628549575806),
(0.49494948983192444, 0.83921569585800171, 0.83921569585800171),
(0.5, 0.86274510622024536, 0.86274510622024536),
(0.50505048036575317, 0.88627451658248901, 0.88627451658248901),
(0.51010102033615112, 0.90980392694473267, 0.90980392694473267),
(0.5151515007019043, 0.93333333730697632, 0.93333333730697632),
(0.52020204067230225, 0.95686274766921997, 0.95686274766921997),
(0.52525252103805542, 0.98039215803146362, 0.98039215803146362),
(0.53030300140380859, 1.0, 1.0),
(0.53535354137420654, 1.0, 1.0),
(0.54040402173995972, 1.0, 1.0),
(0.54545456171035767, 1.0, 1.0),
(0.55050504207611084, 1.0, 1.0),
(0.55555558204650879, 1.0, 1.0),
(0.56060606241226196, 1.0, 1.0),
(0.56565654277801514, 1.0, 1.0),
(0.57070708274841309, 1.0, 1.0),
(0.57575756311416626, 1.0, 1.0),
(0.58080810308456421, 1.0, 1.0),
(0.58585858345031738, 1.0, 1.0),
(0.59090906381607056, 1.0, 1.0),
(0.59595960378646851, 1.0, 1.0),
(0.60101008415222168, 1.0, 1.0),
(0.60606062412261963, 1.0, 1.0),
(0.6111111044883728, 1.0, 1.0),
(0.61616164445877075, 1.0, 1.0),
(0.62121212482452393, 1.0, 1.0),
(0.6262626051902771, 1.0, 1.0),
(0.63131314516067505, 1.0, 1.0),
(0.63636362552642822, 1.0, 1.0),
(0.64141416549682617, 1.0, 1.0),
(0.64646464586257935, 1.0, 1.0),
(0.65151512622833252, 1.0, 1.0),
(0.65656566619873047, 1.0, 1.0),
(0.66161614656448364, 1.0, 1.0),
(0.66666668653488159, 1.0, 1.0),
(0.67171716690063477, 1.0, 1.0),
(0.67676764726638794, 1.0, 1.0),
(0.68181818723678589, 1.0, 1.0),
(0.68686866760253906, 1.0, 1.0),
(0.69191920757293701, 1.0, 1.0),
(0.69696968793869019, 1.0, 1.0),
(0.70202022790908813, 1.0, 1.0),
(0.70707070827484131, 1.0, 1.0),
(0.71212118864059448, 1.0, 1.0),
(0.71717172861099243, 1.0, 1.0),
(0.72222220897674561, 1.0, 1.0),
(0.72727274894714355, 1.0, 1.0),
(0.73232322931289673, 1.0, 1.0),
(0.7373737096786499, 1.0, 1.0),
(0.74242424964904785, 1.0, 1.0),
(0.74747473001480103, 1.0, 1.0),
(0.75252526998519897, 1.0, 1.0),
(0.75757575035095215, 1.0, 1.0),
(0.7626262903213501, 1.0, 1.0),
(0.76767677068710327, 1.0, 1.0),
(0.77272725105285645, 1.0, 1.0),
(0.77777779102325439, 1.0, 1.0),
(0.78282827138900757, 1.0, 1.0),
(0.78787881135940552, 1.0, 1.0),
(0.79292929172515869, 1.0, 1.0),
(0.79797977209091187, 0.96470588445663452, 0.96470588445663452),
(0.80303031206130981, 0.92549020051956177, 0.92549020051956177),
(0.80808079242706299, 0.89019608497619629, 0.89019608497619629),
(0.81313133239746094, 0.85098040103912354, 0.85098040103912354),
(0.81818181276321411, 0.81568628549575806, 0.81568628549575806),
(0.82323235273361206, 0.7764706015586853, 0.7764706015586853),
(0.82828283309936523, 0.74117648601531982, 0.74117648601531982),
(0.83333331346511841, 0.70196080207824707, 0.70196080207824707),
(0.83838385343551636, 0.66666668653488159, 0.66666668653488159),
(0.84343433380126953, 0.62745100259780884, 0.62745100259780884),
(0.84848487377166748, 0.61960786581039429, 0.61960786581039429),
(0.85353535413742065, 0.65098041296005249, 0.65098041296005249),
(0.85858583450317383, 0.68235296010971069, 0.68235296010971069),
(0.86363637447357178, 0.7137255072593689, 0.7137255072593689),
(0.86868685483932495, 0.7450980544090271, 0.7450980544090271),
(0.8737373948097229, 0.77254903316497803, 0.77254903316497803),
(0.87878787517547607, 0.80392158031463623, 0.80392158031463623),
(0.88383835554122925, 0.83529412746429443, 0.83529412746429443),
(0.8888888955116272, 0.86666667461395264, 0.86666667461395264),
(0.89393937587738037, 0.89803922176361084, 0.89803922176361084),
(0.89898991584777832, 0.92941176891326904, 0.92941176891326904),
(0.90404039621353149, 0.93333333730697632, 0.93333333730697632),
(0.90909093618392944, 0.93725490570068359, 0.93725490570068359),
(0.91414141654968262, 0.93725490570068359, 0.93725490570068359),
(0.91919189691543579, 0.94117647409439087, 0.94117647409439087),
(0.92424243688583374, 0.94509804248809814, 0.94509804248809814),
(0.92929291725158691, 0.94509804248809814, 0.94509804248809814),
(0.93434345722198486, 0.94901961088180542, 0.94901961088180542),
(0.93939393758773804, 0.9529411792755127, 0.9529411792755127),
(0.94444441795349121, 0.9529411792755127, 0.9529411792755127),
(0.94949495792388916, 0.95686274766921997, 0.95686274766921997),
(0.95454543828964233, 0.96078431606292725, 0.96078431606292725),
(0.95959597826004028, 0.96470588445663452, 0.96470588445663452),
(0.96464645862579346, 0.9686274528503418, 0.9686274528503418),
(0.96969699859619141, 0.97254902124404907, 0.97254902124404907),
(0.97474747896194458, 0.97647058963775635, 0.97647058963775635),
(0.97979795932769775, 0.98039215803146362, 0.98039215803146362),
(0.9848484992980957, 0.9843137264251709, 0.9843137264251709),
(0.98989897966384888, 0.98823529481887817, 0.98823529481887817),
(0.99494951963424683, 0.99215686321258545, 0.99215686321258545),
(1.0, 0.99607843160629272, 0.99607843160629272)],
green = [(0.0, 0.0, 0.0),
(0.0050505050458014011, 0.035294119268655777, 0.035294119268655777),
(0.010101010091602802, 0.074509806931018829, 0.074509806931018829),
(0.015151515603065491, 0.10980392247438431, 0.10980392247438431),
(0.020202020183205605, 0.14901961386203766, 0.14901961386203766),
(0.025252524763345718, 0.18431372940540314, 0.18431372940540314),
(0.030303031206130981, 0.22352941334247589, 0.22352941334247589),
(0.035353533923625946, 0.25882354378700256, 0.25882354378700256),
(0.040404040366411209, 0.29803922772407532, 0.29803922772407532),
(0.045454546809196472, 0.3333333432674408, 0.3333333432674408),
(0.050505049526691437, 0.37254902720451355, 0.37254902720451355),
(0.0555555559694767, 0.36862745881080627, 0.36862745881080627),
(0.060606062412261963, 0.3333333432674408, 0.3333333432674408),
(0.065656565129756927, 0.29411765933036804, 0.29411765933036804),
(0.070707067847251892, 0.25882354378700256, 0.25882354378700256),
(0.075757578015327454, 0.21960784494876862, 0.21960784494876862),
(0.080808080732822418, 0.18431372940540314, 0.18431372940540314),
(0.085858583450317383, 0.14509804546833038, 0.14509804546833038),
(0.090909093618392944, 0.10980392247438431, 0.10980392247438431),
(0.095959596335887909, 0.070588238537311554, 0.070588238537311554),
(0.10101009905338287, 0.035294119268655777, 0.035294119268655777),
(0.10606060922145844, 0.0, 0.0),
(0.1111111119389534, 0.074509806931018829, 0.074509806931018829),
(0.11616161465644836, 0.14509804546833038, 0.14509804546833038),
(0.12121212482452393, 0.21568627655506134, 0.21568627655506134),
(0.12626262009143829, 0.28627452254295349, 0.28627452254295349),
(0.13131313025951385, 0.36078432202339172, 0.36078432202339172),
(0.13636364042758942, 0.43137255311012268, 0.43137255311012268),
(0.14141413569450378, 0.50196081399917603, 0.50196081399917603),
(0.14646464586257935, 0.57254904508590698, 0.57254904508590698),
(0.15151515603065491, 0.64705884456634521, 0.64705884456634521),
(0.15656565129756927, 0.71764707565307617, 0.71764707565307617),
(0.16161616146564484, 0.7607843279838562, 0.7607843279838562),
(0.1666666716337204, 0.78431373834609985, 0.78431373834609985),
(0.17171716690063477, 0.80784314870834351, 0.80784314870834351),
(0.17676767706871033, 0.83137255907058716, 0.83137255907058716),
(0.18181818723678589, 0.85490196943283081, 0.85490196943283081),
(0.18686868250370026, 0.88235294818878174, 0.88235294818878174),
(0.19191919267177582, 0.90588235855102539, 0.90588235855102539),
(0.19696970283985138, 0.92941176891326904, 0.92941176891326904),
(0.20202019810676575, 0.9529411792755127, 0.9529411792755127),
(0.20707070827484131, 0.97647058963775635, 0.97647058963775635),
(0.21212121844291687, 0.99607843160629272, 0.99607843160629272),
(0.21717171370983124, 0.99607843160629272, 0.99607843160629272),
(0.2222222238779068, 0.99215686321258545, 0.99215686321258545),
(0.22727273404598236, 0.99215686321258545, 0.99215686321258545),
(0.23232322931289673, 0.99215686321258545, 0.99215686321258545),
(0.23737373948097229, 0.98823529481887817, 0.98823529481887817),
(0.24242424964904785, 0.98823529481887817, 0.98823529481887817),
(0.24747474491596222, 0.9843137264251709, 0.9843137264251709),
(0.25252524018287659, 0.9843137264251709, 0.9843137264251709),
(0.25757575035095215, 0.98039215803146362, 0.98039215803146362),
(0.26262626051902771, 0.98039215803146362, 0.98039215803146362),
(0.26767677068710327, 0.98039215803146362, 0.98039215803146362),
(0.27272728085517883, 0.98039215803146362, 0.98039215803146362),
(0.27777779102325439, 0.9843137264251709, 0.9843137264251709),
(0.28282827138900757, 0.9843137264251709, 0.9843137264251709),
(0.28787878155708313, 0.98823529481887817, 0.98823529481887817),
(0.29292929172515869, 0.98823529481887817, 0.98823529481887817),
(0.29797980189323425, 0.99215686321258545, 0.99215686321258545),
(0.30303031206130981, 0.99215686321258545, 0.99215686321258545),
(0.30808082222938538, 0.99607843160629272, 0.99607843160629272),
(0.31313130259513855, 0.99607843160629272, 0.99607843160629272),
(0.31818181276321411, 0.99607843160629272, 0.99607843160629272),
(0.32323232293128967, 0.97647058963775635, 0.97647058963775635),
(0.32828283309936523, 0.95686274766921997, 0.95686274766921997),
(0.3333333432674408, 0.93725490570068359, 0.93725490570068359),
(0.33838382363319397, 0.92156863212585449, 0.92156863212585449),
(0.34343433380126953, 0.90196079015731812, 0.90196079015731812),
(0.34848484396934509, 0.88235294818878174, 0.88235294818878174),
(0.35353535413742065, 0.86274510622024536, 0.86274510622024536),
(0.35858586430549622, 0.84705883264541626, 0.84705883264541626),
(0.36363637447357178, 0.82745099067687988, 0.82745099067687988),
(0.36868685483932495, 0.80784314870834351, 0.80784314870834351),
(0.37373736500740051, 0.81568628549575806, 0.81568628549575806),
(0.37878787517547607, 0.83529412746429443, 0.83529412746429443),
(0.38383838534355164, 0.85098040103912354, 0.85098040103912354),
(0.3888888955116272, 0.87058824300765991, 0.87058824300765991),
(0.39393940567970276, 0.89019608497619629, 0.89019608497619629),
(0.39898988604545593, 0.90980392694473267, 0.90980392694473267),
(0.40404039621353149, 0.92549020051956177, 0.92549020051956177),
(0.40909090638160706, 0.94509804248809814, 0.94509804248809814),
(0.41414141654968262, 0.96470588445663452, 0.96470588445663452),
(0.41919192671775818, 0.9843137264251709, 0.9843137264251709),
(0.42424243688583374, 1.0, 1.0),
(0.42929291725158691, 1.0, 1.0),
(0.43434342741966248, 1.0, 1.0),
(0.43939393758773804, 1.0, 1.0),
(0.4444444477558136, 1.0, 1.0),
(0.44949495792388916, 1.0, 1.0),
(0.45454546809196472, 1.0, 1.0),
(0.4595959484577179, 1.0, 1.0),
(0.46464645862579346, 1.0, 1.0),
(0.46969696879386902, 1.0, 1.0),
(0.47474747896194458, 1.0, 1.0),
(0.47979798913002014, 1.0, 1.0),
(0.4848484992980957, 1.0, 1.0),
(0.48989897966384888, 1.0, 1.0),
(0.49494948983192444, 1.0, 1.0),
(0.5, 1.0, 1.0),
(0.50505048036575317, 1.0, 1.0),
(0.51010102033615112, 1.0, 1.0),
(0.5151515007019043, 1.0, 1.0),
(0.52020204067230225, 1.0, 1.0),
(0.52525252103805542, 1.0, 1.0),
(0.53030300140380859, 0.99215686321258545, 0.99215686321258545),
(0.53535354137420654, 0.98039215803146362, 0.98039215803146362),
(0.54040402173995972, 0.96470588445663452, 0.96470588445663452),
(0.54545456171035767, 0.94901961088180542, 0.94901961088180542),
(0.55050504207611084, 0.93333333730697632, 0.93333333730697632),
(0.55555558204650879, 0.91764706373214722, 0.91764706373214722),
(0.56060606241226196, 0.90588235855102539, 0.90588235855102539),
(0.56565654277801514, 0.89019608497619629, 0.89019608497619629),
(0.57070708274841309, 0.87450981140136719, 0.87450981140136719),
(0.57575756311416626, 0.85882353782653809, 0.85882353782653809),
(0.58080810308456421, 0.84313726425170898, 0.84313726425170898),
(0.58585858345031738, 0.83137255907058716, 0.83137255907058716),
(0.59090906381607056, 0.81960785388946533, 0.81960785388946533),
(0.59595960378646851, 0.81176471710205078, 0.81176471710205078),
(0.60101008415222168, 0.80000001192092896, 0.80000001192092896),
(0.60606062412261963, 0.78823530673980713, 0.78823530673980713),
(0.6111111044883728, 0.7764706015586853, 0.7764706015586853),
(0.61616164445877075, 0.76470589637756348, 0.76470589637756348),
(0.62121212482452393, 0.75294119119644165, 0.75294119119644165),
(0.6262626051902771, 0.74117648601531982, 0.74117648601531982),
(0.63131314516067505, 0.729411780834198, 0.729411780834198),
(0.63636362552642822, 0.70980393886566162, 0.70980393886566162),
(0.64141416549682617, 0.66666668653488159, 0.66666668653488159),
(0.64646464586257935, 0.62352943420410156, 0.62352943420410156),
(0.65151512622833252, 0.58039218187332153, 0.58039218187332153),
(0.65656566619873047, 0.5372549295425415, 0.5372549295425415),
(0.66161614656448364, 0.49411764740943909, 0.49411764740943909),
(0.66666668653488159, 0.45098039507865906, 0.45098039507865906),
(0.67171716690063477, 0.40392157435417175, 0.40392157435417175),
(0.67676764726638794, 0.36078432202339172, 0.36078432202339172),
(0.68181818723678589, 0.31764706969261169, 0.31764706969261169),
(0.68686866760253906, 0.27450981736183167, 0.27450981736183167),
(0.69191920757293701, 0.24705882370471954, 0.24705882370471954),
(0.69696968793869019, 0.21960784494876862, 0.21960784494876862),
(0.70202022790908813, 0.19607843458652496, 0.19607843458652496),
(0.70707070827484131, 0.16862745583057404, 0.16862745583057404),
(0.71212118864059448, 0.14509804546833038, 0.14509804546833038),
(0.71717172861099243, 0.11764705926179886, 0.11764705926179886),
(0.72222220897674561, 0.090196080505847931, 0.090196080505847931),
(0.72727274894714355, 0.066666670143604279, 0.066666670143604279),
(0.73232322931289673, 0.039215687662363052, 0.039215687662363052),
(0.7373737096786499, 0.015686275437474251, 0.015686275437474251),
(0.74242424964904785, 0.0, 0.0),
(0.74747473001480103, 0.0, 0.0),
(0.75252526998519897, 0.0, 0.0),
(0.75757575035095215, 0.0, 0.0),
(0.7626262903213501, 0.0, 0.0),
(0.76767677068710327, 0.0, 0.0),
(0.77272725105285645, 0.0, 0.0),
(0.77777779102325439, 0.0, 0.0),
(0.78282827138900757, 0.0, 0.0),
(0.78787881135940552, 0.0, 0.0),
(0.79292929172515869, 0.0, 0.0),
(0.79797977209091187, 0.015686275437474251, 0.015686275437474251),
(0.80303031206130981, 0.031372550874948502, 0.031372550874948502),
(0.80808079242706299, 0.050980392843484879, 0.050980392843484879),
(0.81313133239746094, 0.066666670143604279, 0.066666670143604279),
(0.81818181276321411, 0.086274512112140656, 0.086274512112140656),
(0.82323235273361206, 0.10588235408067703, 0.10588235408067703),
(0.82828283309936523, 0.12156862765550613, 0.12156862765550613),
(0.83333331346511841, 0.14117647707462311, 0.14117647707462311),
(0.83838385343551636, 0.15686275064945221, 0.15686275064945221),
(0.84343433380126953, 0.17647059261798859, 0.17647059261798859),
(0.84848487377166748, 0.20000000298023224, 0.20000000298023224),
(0.85353535413742065, 0.23137255012989044, 0.23137255012989044),
(0.85858583450317383, 0.25882354378700256, 0.25882354378700256),
(0.86363637447357178, 0.29019609093666077, 0.29019609093666077),
(0.86868685483932495, 0.32156863808631897, 0.32156863808631897),
(0.8737373948097229, 0.35294118523597717, 0.35294118523597717),
(0.87878787517547607, 0.38431373238563538, 0.38431373238563538),
(0.88383835554122925, 0.41568627953529358, 0.41568627953529358),
(0.8888888955116272, 0.44313725829124451, 0.44313725829124451),
(0.89393937587738037, 0.47450980544090271, 0.47450980544090271),
(0.89898991584777832, 0.5058823823928833, 0.5058823823928833),
(0.90404039621353149, 0.52941179275512695, 0.52941179275512695),
(0.90909093618392944, 0.55294120311737061, 0.55294120311737061),
(0.91414141654968262, 0.57254904508590698, 0.57254904508590698),
(0.91919189691543579, 0.59607845544815063, 0.59607845544815063),
(0.92424243688583374, 0.61960786581039429, 0.61960786581039429),
(0.92929291725158691, 0.64313727617263794, 0.64313727617263794),
(0.93434345722198486, 0.66274511814117432, 0.66274511814117432),
(0.93939393758773804, 0.68627452850341797, 0.68627452850341797),
(0.94444441795349121, 0.70980393886566162, 0.70980393886566162),
(0.94949495792388916, 0.729411780834198, 0.729411780834198),
(0.95454543828964233, 0.75294119119644165, 0.75294119119644165),
(0.95959597826004028, 0.78039216995239258, 0.78039216995239258),
(0.96464645862579346, 0.80392158031463623, 0.80392158031463623),
(0.96969699859619141, 0.82745099067687988, 0.82745099067687988),
(0.97474747896194458, 0.85098040103912354, 0.85098040103912354),
(0.97979795932769775, 0.87450981140136719, 0.87450981140136719),
(0.9848484992980957, 0.90196079015731812, 0.90196079015731812),
(0.98989897966384888, 0.92549020051956177, 0.92549020051956177),
(0.99494951963424683, 0.94901961088180542, 0.94901961088180542),
(1.0, 0.97254902124404907, 0.97254902124404907)],
blue = [(0.0, 0.50196081399917603, 0.50196081399917603),
(0.0050505050458014011, 0.45098039507865906, 0.45098039507865906),
(0.010101010091602802, 0.40392157435417175, 0.40392157435417175),
(0.015151515603065491, 0.35686275362968445, 0.35686275362968445),
(0.020202020183205605, 0.30980393290519714, 0.30980393290519714),
(0.025252524763345718, 0.25882354378700256, 0.25882354378700256),
(0.030303031206130981, 0.21176470816135406, 0.21176470816135406),
(0.035353533923625946, 0.16470588743686676, 0.16470588743686676),
(0.040404040366411209, 0.11764705926179886, 0.11764705926179886),
(0.045454546809196472, 0.070588238537311554, 0.070588238537311554),
(0.050505049526691437, 0.019607843831181526, 0.019607843831181526),
(0.0555555559694767, 0.047058824449777603, 0.047058824449777603),
(0.060606062412261963, 0.14509804546833038, 0.14509804546833038),
(0.065656565129756927, 0.23921568691730499, 0.23921568691730499),
(0.070707067847251892, 0.3333333432674408, 0.3333333432674408),
(0.075757578015327454, 0.43137255311012268, 0.43137255311012268),
(0.080808080732822418, 0.52549022436141968, 0.52549022436141968),
(0.085858583450317383, 0.61960786581039429, 0.61960786581039429),
(0.090909093618392944, 0.71764707565307617, 0.71764707565307617),
(0.095959596335887909, 0.81176471710205078, 0.81176471710205078),
(0.10101009905338287, 0.90588235855102539, 0.90588235855102539),
(0.10606060922145844, 1.0, 1.0),
(0.1111111119389534, 1.0, 1.0),
(0.11616161465644836, 1.0, 1.0),
(0.12121212482452393, 1.0, 1.0),
(0.12626262009143829, 1.0, 1.0),
(0.13131313025951385, 1.0, 1.0),
(0.13636364042758942, 1.0, 1.0),
(0.14141413569450378, 1.0, 1.0),
(0.14646464586257935, 1.0, 1.0),
(0.15151515603065491, 1.0, 1.0),
(0.15656565129756927, 1.0, 1.0),
(0.16161616146564484, 1.0, 1.0),
(0.1666666716337204, 1.0, 1.0),
(0.17171716690063477, 1.0, 1.0),
(0.17676767706871033, 1.0, 1.0),
(0.18181818723678589, 1.0, 1.0),
(0.18686868250370026, 1.0, 1.0),
(0.19191919267177582, 1.0, 1.0),
(0.19696970283985138, 1.0, 1.0),
(0.20202019810676575, 1.0, 1.0),
(0.20707070827484131, 1.0, 1.0),
(0.21212121844291687, 0.99215686321258545, 0.99215686321258545),
(0.21717171370983124, 0.95686274766921997, 0.95686274766921997),
(0.2222222238779068, 0.91764706373214722, 0.91764706373214722),
(0.22727273404598236, 0.88235294818878174, 0.88235294818878174),
(0.23232322931289673, 0.84313726425170898, 0.84313726425170898),
(0.23737373948097229, 0.80392158031463623, 0.80392158031463623),
(0.24242424964904785, 0.76862746477127075, 0.76862746477127075),
(0.24747474491596222, 0.729411780834198, 0.729411780834198),
(0.25252524018287659, 0.69019609689712524, 0.69019609689712524),
(0.25757575035095215, 0.65490198135375977, 0.65490198135375977),
(0.26262626051902771, 0.61568629741668701, 0.61568629741668701),
(0.26767677068710327, 0.56470590829849243, 0.56470590829849243),
(0.27272728085517883, 0.50980395078659058, 0.50980395078659058),
(0.27777779102325439, 0.45098039507865906, 0.45098039507865906),
(0.28282827138900757, 0.39215686917304993, 0.39215686917304993),
(0.28787878155708313, 0.3333333432674408, 0.3333333432674408),
(0.29292929172515869, 0.27843138575553894, 0.27843138575553894),
(0.29797980189323425, 0.21960784494876862, 0.21960784494876862),
(0.30303031206130981, 0.16078431904315948, 0.16078431904315948),
(0.30808082222938538, 0.10588235408067703, 0.10588235408067703),
(0.31313130259513855, 0.047058824449777603, 0.047058824449777603),
(0.31818181276321411, 0.0, 0.0),
(0.32323232293128967, 0.0, 0.0),
(0.32828283309936523, 0.0, 0.0),
(0.3333333432674408, 0.0, 0.0),
(0.33838382363319397, 0.0, 0.0),
(0.34343433380126953, 0.0, 0.0),
(0.34848484396934509, 0.0, 0.0),
(0.35353535413742065, 0.0, 0.0),
(0.35858586430549622, 0.0, 0.0),
(0.36363637447357178, 0.0, 0.0),
(0.36868685483932495, 0.0, 0.0),
(0.37373736500740051, 0.0, 0.0),
(0.37878787517547607, 0.0, 0.0),
(0.38383838534355164, 0.0, 0.0),
(0.3888888955116272, 0.0, 0.0),
(0.39393940567970276, 0.0, 0.0),
(0.39898988604545593, 0.0, 0.0),
(0.40404039621353149, 0.0, 0.0),
(0.40909090638160706, 0.0, 0.0),
(0.41414141654968262, 0.0, 0.0),
(0.41919192671775818, 0.0, 0.0),
(0.42424243688583374, 0.0039215688593685627, 0.0039215688593685627),
(0.42929291725158691, 0.027450980618596077, 0.027450980618596077),
(0.43434342741966248, 0.050980392843484879, 0.050980392843484879),
(0.43939393758773804, 0.074509806931018829, 0.074509806931018829),
(0.4444444477558136, 0.094117648899555206, 0.094117648899555206),
(0.44949495792388916, 0.11764705926179886, 0.11764705926179886),
(0.45454546809196472, 0.14117647707462311, 0.14117647707462311),
(0.4595959484577179, 0.16470588743686676, 0.16470588743686676),
(0.46464645862579346, 0.18823529779911041, 0.18823529779911041),
(0.46969696879386902, 0.21176470816135406, 0.21176470816135406),
(0.47474747896194458, 0.23529411852359772, 0.23529411852359772),
(0.47979798913002014, 0.22352941334247589, 0.22352941334247589),
(0.4848484992980957, 0.20000000298023224, 0.20000000298023224),
(0.48989897966384888, 0.17647059261798859, 0.17647059261798859),
(0.49494948983192444, 0.15294118225574493, 0.15294118225574493),
(0.5, 0.12941177189350128, 0.12941177189350128),
(0.50505048036575317, 0.10980392247438431, 0.10980392247438431),
(0.51010102033615112, 0.086274512112140656, 0.086274512112140656),
(0.5151515007019043, 0.062745101749897003, 0.062745101749897003),
(0.52020204067230225, 0.039215687662363052, 0.039215687662363052),
(0.52525252103805542, 0.015686275437474251, 0.015686275437474251),
(0.53030300140380859, 0.0, 0.0),
(0.53535354137420654, 0.0, 0.0),
(0.54040402173995972, 0.0, 0.0),
(0.54545456171035767, 0.0, 0.0),
(0.55050504207611084, 0.0, 0.0),
(0.55555558204650879, 0.0, 0.0),
(0.56060606241226196, 0.0, 0.0),
(0.56565654277801514, 0.0, 0.0),
(0.57070708274841309, 0.0, 0.0),
(0.57575756311416626, 0.0, 0.0),
(0.58080810308456421, 0.0, 0.0),
(0.58585858345031738, 0.0039215688593685627, 0.0039215688593685627),
(0.59090906381607056, 0.0078431377187371254, 0.0078431377187371254),
(0.59595960378646851, 0.011764706112444401, 0.011764706112444401),
(0.60101008415222168, 0.019607843831181526, 0.019607843831181526),
(0.60606062412261963, 0.023529412224888802, 0.023529412224888802),
(0.6111111044883728, 0.031372550874948502, 0.031372550874948502),
(0.61616164445877075, 0.035294119268655777, 0.035294119268655777),
(0.62121212482452393, 0.043137256056070328, 0.043137256056070328),
(0.6262626051902771, 0.047058824449777603, 0.047058824449777603),
(0.63131314516067505, 0.054901961237192154, 0.054901961237192154),
(0.63636362552642822, 0.054901961237192154, 0.054901961237192154),
(0.64141416549682617, 0.050980392843484879, 0.050980392843484879),
(0.64646464586257935, 0.043137256056070328, 0.043137256056070328),
(0.65151512622833252, 0.039215687662363052, 0.039215687662363052),
(0.65656566619873047, 0.031372550874948502, 0.031372550874948502),
(0.66161614656448364, 0.027450980618596077, 0.027450980618596077),
(0.66666668653488159, 0.019607843831181526, 0.019607843831181526),
(0.67171716690063477, 0.015686275437474251, 0.015686275437474251),
(0.67676764726638794, 0.011764706112444401, 0.011764706112444401),
(0.68181818723678589, 0.0039215688593685627, 0.0039215688593685627),
(0.68686866760253906, 0.0, 0.0),
(0.69191920757293701, 0.0, 0.0),
(0.69696968793869019, 0.0, 0.0),
(0.70202022790908813, 0.0, 0.0),
(0.70707070827484131, 0.0, 0.0),
(0.71212118864059448, 0.0, 0.0),
(0.71717172861099243, 0.0, 0.0),
(0.72222220897674561, 0.0, 0.0),
(0.72727274894714355, 0.0, 0.0),
(0.73232322931289673, 0.0, 0.0),
(0.7373737096786499, 0.0, 0.0),
(0.74242424964904785, 0.031372550874948502, 0.031372550874948502),
(0.74747473001480103, 0.12941177189350128, 0.12941177189350128),
(0.75252526998519897, 0.22352941334247589, 0.22352941334247589),
(0.75757575035095215, 0.32156863808631897, 0.32156863808631897),
(0.7626262903213501, 0.41568627953529358, 0.41568627953529358),
(0.76767677068710327, 0.50980395078659058, 0.50980395078659058),
(0.77272725105285645, 0.60784316062927246, 0.60784316062927246),
(0.77777779102325439, 0.70196080207824707, 0.70196080207824707),
(0.78282827138900757, 0.79607844352722168, 0.79607844352722168),
(0.78787881135940552, 0.89411765336990356, 0.89411765336990356),
(0.79292929172515869, 0.98823529481887817, 0.98823529481887817),
(0.79797977209091187, 1.0, 1.0),
(0.80303031206130981, 1.0, 1.0),
(0.80808079242706299, 1.0, 1.0),
(0.81313133239746094, 1.0, 1.0),
(0.81818181276321411, 1.0, 1.0),
(0.82323235273361206, 1.0, 1.0),
(0.82828283309936523, 1.0, 1.0),
(0.83333331346511841, 1.0, 1.0),
(0.83838385343551636, 1.0, 1.0),
(0.84343433380126953, 1.0, 1.0),
(0.84848487377166748, 0.99607843160629272, 0.99607843160629272),
(0.85353535413742065, 0.98823529481887817, 0.98823529481887817),
(0.85858583450317383, 0.9843137264251709, 0.9843137264251709),
(0.86363637447357178, 0.97647058963775635, 0.97647058963775635),
(0.86868685483932495, 0.9686274528503418, 0.9686274528503418),
(0.8737373948097229, 0.96470588445663452, 0.96470588445663452),
(0.87878787517547607, 0.95686274766921997, 0.95686274766921997),
(0.88383835554122925, 0.94901961088180542, 0.94901961088180542),
(0.8888888955116272, 0.94509804248809814, 0.94509804248809814),
(0.89393937587738037, 0.93725490570068359, 0.93725490570068359),
(0.89898991584777832, 0.93333333730697632, 0.93333333730697632),
(0.90404039621353149, 0.93333333730697632, 0.93333333730697632),
(0.90909093618392944, 0.93725490570068359, 0.93725490570068359),
(0.91414141654968262, 0.93725490570068359, 0.93725490570068359),
(0.91919189691543579, 0.94117647409439087, 0.94117647409439087),
(0.92424243688583374, 0.94509804248809814, 0.94509804248809814),
(0.92929291725158691, 0.94509804248809814, 0.94509804248809814),
(0.93434345722198486, 0.94901961088180542, 0.94901961088180542),
(0.93939393758773804, 0.9529411792755127, 0.9529411792755127),
(0.94444441795349121, 0.9529411792755127, 0.9529411792755127),
(0.94949495792388916, 0.95686274766921997, 0.95686274766921997),
(0.95454543828964233, 0.96078431606292725, 0.96078431606292725),
(0.95959597826004028, 0.96470588445663452, 0.96470588445663452),
(0.96464645862579346, 0.9686274528503418, 0.9686274528503418),
(0.96969699859619141, 0.97254902124404907, 0.97254902124404907),
(0.97474747896194458, 0.97647058963775635, 0.97647058963775635),
(0.97979795932769775, 0.98039215803146362, 0.98039215803146362),
(0.9848484992980957, 0.9843137264251709, 0.9843137264251709),
(0.98989897966384888, 0.98823529481887817, 0.98823529481887817),
(0.99494951963424683, 0.99215686321258545, 0.99215686321258545),
(1.0, 0.99607843160629272, 0.99607843160629272)],
)
return ColorMapper.from_segment_map(_data, range=range, **traits) | 5,323,897 |
def main(request, username):
"""
User > Main
"""
namespace = CacheHelper.ns('user:views:main', username=username)
response_data = CacheHelper.io.get(namespace)
if response_data is None:
response_data, user = MainUserHelper.build_response(request, username)
if response_data['status'] == 'not_found':
raise Http404
response_data.update(
UserTimelineHelper.build_response(
request=request,
user=user,
)
)
CacheHelper.io.set(namespace, response_data, 30)
return render(request, 'user/user_main.jade', response_data) | 5,323,898 |
def test_quiet(capsys: CaptureFixture, logger_name: str):
"""Test quiet.
:param capsys: pytest fixture.
:param logger_name: conftest fixture.
"""
log = setup_logging(logger_name=logger_name, verbose=-1)
assert not generate_log_statements(log)
stdout, stderr = [i.splitlines() for i in capsys.readouterr()]
assert not stdout
assert not stderr | 5,323,899 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.