content
stringlengths 35
416k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
|---|---|---|
def list_to_string_with_comma(thing):
"""Input a list, returns every item in the list as a string with commas in between"""
string = ""
for item in thing:
string += str(item) + ','
return string[:-1]
|
a7fff928137c3f7041b030c1fe4dd099f0ff8ca2
| 14,743
|
def try_section(line):
"""
Checks to see if the given line is a section. If so return the section
name, otherwise return 'None'.
"""
# leading spaces were stripped when checking for comments
if not line.startswith('['):
return None, False, []
section, delim, templates = line.partition(']')
if not templates:
return section[1:], False, []
# strip out the parens and parse into an array
templates = templates.replace('(', "").replace(')', "").split(',')
# go ahead and remove extra whitespace
templates = [i.strip() for i in templates]
try:
templates.remove('!')
return section[1:], True, templates
except:
return section[1:], False, templates
|
30a1043651e4a8ce3949d2d13d64c8310d5d8336
| 14,745
|
import json
def read_geo_info(file_path):
"""
Reads geographic point and region infomation from a json containing
individual locations and regions.
Parameters:
file_path (str) : path to the json file
Return:
dictionary with geographic info
"""
with open(file_path) as json_file:
geo_dict = json.load(json_file)
# pop out the "information" key \\ useless
geo_dict.pop('information')
return geo_dict
|
d4e8b34bee16183ac535c146e4f9b6026ed57b7d
| 14,746
|
import click
def name_argument(func):
"""Add a machine name argument."""
def _callback(ctx, unused_param, value):
if not value:
return ctx.obj['config'].get_selected()
return value
return click.argument('name', metavar='NAME', default='', callback=_callback)(func)
|
39e82c4836c196d48a29b370a9bd5310927d5da1
| 14,748
|
def maximum(lst):
"""(list) -> int
Computes the maximum element in a list of integers
"""
return 0
|
645ffb1dede15e1848eec67f581e23ef6a071233
| 14,749
|
def adjust_index_pair(index_pair, increment):
"""Returns pair of indices incremented by given number."""
return [i + increment for i in index_pair]
|
43968980998f6d12457e922c3c70d2ceba6d6b2e
| 14,750
|
def paragraph_detokenizer(sentences, breaks):
"""Restore original paragraph format from indexes of sentences and newlines
Args:
sentences (list): List of sentences
breaks (list): List of indexes of sentences and newlines
Returns:
text (str): Text with original format
"""
output = []
for br in breaks:
if br == "\n":
output.append("\n")
else:
output.append(sentences[br] + " ")
text = "".join(output)
return text
|
fbba0099326156bff3ba5d76dfb3b9bc197c3269
| 14,751
|
def is_start_with_vowel(string):
"""判断一个字符串是不是元音开头
:param string: Str
"""
if string[0] in ('a', 'e', 'i', 'o', 'u', 'A', 'E', 'I', 'O', 'U'):
return True
return False
|
902a893eae5a554e530127ba43bc79ea977da6f7
| 14,752
|
def custom_lineplot(ax, x, y, error, xlims, ylims, color='red'):
"""Customized line plot with error bars."""
ax.errorbar(x, y, yerr=error, color=color, ls='--', marker='o', capsize=5, capthick=1, ecolor='black')
ax.set_xlim(xlims)
ax.set_ylim(ylims)
return ax
|
f80b76b8f9f60a44bf62bc64829c40793747ada2
| 14,753
|
def generate_role_with_colon_format(content, defined_role, generated_role):
"""Generate role data with input as Compute:ComputeA
In Compute:ComputeA, the defined role 'Compute' can be added to
roles_data.yaml by changing the name to 'ComputeA'. This allows duplicating
the defined roles so that hardware specific nodes can be targeted with
specific roles.
:param content defined role file's content
:param defined_role defined role's name
:param generated_role role's name to generate from defined role
:exception ValueError if generated role name is of invalid format
"""
# "Compute:Compute" is invalid format
if generated_role == defined_role:
msg = ("Generated role name cannot be same as existing role name ({}) "
"with colon format".format(defined_role))
raise ValueError(msg)
# "Compute:A" is invalid format
if not generated_role.startswith(defined_role):
msg = ("Generated role name ({}) name should start with existing role "
"name ({})".format(generated_role, defined_role))
raise ValueError(msg)
name_line = "name:%s" % defined_role
name_line_match = False
processed = []
for line in content.split('\n'):
stripped_line = line.replace(' ', '')
# Only 'name' need to be replaced in the existing role
if name_line in stripped_line:
line = line.replace(defined_role, generated_role)
name_line_match = True
processed.append(line)
if not name_line_match:
raise ValueError(" error")
return '\n'.join(processed)
|
a303bf17a8416b2b4df410095d76faaa7d2466ca
| 14,755
|
import os
def get_ids(): #might be useful later on
"""Returns a list of the ids in the directory"""
return (f for f in os.listdir('dataset/'))
|
b78bdc314933f3132ec9cc72c54e4a5fde02e074
| 14,756
|
import os
def remove(filepath):
"""
Remove file.
Parameters
----------
filepath : str
File path.
"""
return os.remove(filepath)
|
1235fe9fb61cdbe730fc8c4bc5236b967e7f122f
| 14,757
|
import locale
def decode_byte_str(byte_str):
"""
Decodes byte string into unicode string.
Args:
byte_str(byte): Byte string.
Returns:
Unicode string.
"""
# first try to decode with utf-8 and if that fails try with system default
for encoding in ("utf-8", locale.getpreferredencoding()):
try:
res = byte_str.decode(encoding)
return res
except Exception:
continue
else:
return str(byte_str)
|
1ae790ef9fee9ce5053f55e1d8757a4a385dc56d
| 14,759
|
import re
def find_tilesets(page_text):
"""
This method uses a simple heuristic to extract tileset info from a page's text.
Specifically, it looks at the tileset template.
:param page_text: The Wiki markup of a page.
:return: A list of image names.
"""
global min_tile_size
# Reformat for easy regex
page_text = page_text.replace("\n", " ")
# Get tileset templates
tilesets = re.findall("\{\{[^{}]*?tileset.*?\}\}", page_text, re.IGNORECASE)
# Search templates for image files
tileset_filenames = [re.findall("\[\[[^\]]*((file:|image:)[^\]]*)\]\]|$", snippet, re.IGNORECASE)[0][0] for snippet in tilesets]
tileset_filenames = [name.split("|")[0].strip() for name in tileset_filenames]
return tileset_filenames
|
5a080ca8fe5894e73aafe866d11b01f3a691f165
| 14,761
|
import subprocess
import yaml
from typing import List
def _get_service_port(service: str, namespace: str, target_port: int) -> int:
"""Given a K8s service and a port targetted by the service, returns the
corresponding port exposed by the service.
Args:
service: Name of a K8s service.
namespace: Namespace to which the service belongs.
target_port: Port targeted by the service.
Returns:
service_port: The port exposed by the service.
"""
service_str = (
subprocess.check_output(
["kubectl", "-n", namespace, "get", "service", service, "-o", "yaml"]
)
.decode()
.strip()
)
service_dict = yaml.safe_load(service_str)
service_ports: List = service_dict["spec"]["ports"]
matching_ports = [
port for port in service_ports if port["targetPort"] == target_port
]
assert matching_ports
service_port = matching_ports[0]["port"]
return service_port
|
274c610a6c70ac4650619154445eae532c88a71f
| 14,762
|
def make_options(obs_settings, obs_names):
"""Constructs a dict of configuration options for a set of named observables.
Args:
obs_settings: An `ObservationSettings` instance.
obs_names: An `ObservableNames` instance.
Returns:
A nested dict containing `{observable_name: {option_name: value}}`.
"""
observable_options = {}
for category, spec in obs_settings._asdict().items():
for observable_name in getattr(obs_names, category):
observable_options[observable_name] = spec._asdict()
return observable_options
|
a9c0d605835533b0e41cf8df4c934c42f5601a4a
| 14,764
|
import time
def time2hide(starttime):
"""A helper function for blinking things.
The return value is a Boolean that changes twice per second.
"""
return (time.time() - starttime) % 1 < 0.5
|
2990bd0387b7c3e7b0f7d966b8210ef1ab6bca89
| 14,765
|
def decode_val(val_text, hexnum):
"""
Decode tile value text to its numerical equvalent.
@param val_text: Value text to decode.
@type val_text: C{str}
@param hexnum: Value is a hexadecimal number.
@type hexnum: C{bool}
@return: Its numeric value if it can be decoded, C{None} if it cannot be
decoded.
@rtype: C{int} or C{None}
"""
try:
if hexnum:
return int(val_text, 16)
else:
return int(val_text, 10)
except ValueError:
return None
|
d68ad3c3dcac9d023a430c4a17aa045742f3ae87
| 14,766
|
import random
def generate_bdays(num_bdays):
"""Generates n birthdays and returns them as a list"""
bdays = []
for bday in range(num_bdays):
bdays.append(random.randint(1, 365))
return bdays
|
ef2c5988db714bebea81301edc9f2aa3e979766d
| 14,767
|
def get_colors(img):
"""pull the colors out of an image"""
return [ c[1] for c in img.getcolors(img.size[0]*img.size[1]) ]
|
a8bc950dd365742c38784ce4e75d37e27b63c0c3
| 14,768
|
def decode_subs(s, mapping):
"""
>>> decode_subs('hi there', [])
'hi there'
>>> decode_subs('g0SUB;there', [('hi ', 'g0SUB;')])
'hi there'
>>> decode_subs('g0SUB; theg1SUB;', [('hi', 'g0SUB;'), ('re', 'g1SUB;')])
'hi there'
"""
for tup in mapping:
s = s.replace(tup[1], tup[0])
return s
|
e0be61cae24956557c3d40348a0445cd237a2a0c
| 14,769
|
def shared_area(polygon1, polygon2, normalized=False):
""" Get area shared by 2 polygons
:param polygon1:
:param polygon2:
:param normalized:
:return:
"""
if not polygon1.intersects(polygon2):
return 0
else:
new_poly = polygon1.intersection(polygon2)
if normalized:
return new_poly.area / polygon1.area
else:
return new_poly.area
|
63cb169cd2a7f408b9298342b6830674fb5a43ea
| 14,770
|
import argparse
def get_parser():
"""
Get parser.
:return: parser
:rtype: argparse.ArgumentParser
"""
parser = argparse.ArgumentParser()
parser.add_argument('--code_size', type=int)
parser.add_argument('--number', type=int)
return parser
|
2fb4d8399e75579b8bd0454af47434e3e61cba3f
| 14,771
|
import inspect
def inline_help(func):
"""Return function spec and first line of help in line"""
fun_name = '%s%s' % (func.__name__, inspect.signature(func))
fun_doc = func.__doc__.strip().split('\n')[0] if func.__doc__ else ""
return "%s\n\t%s" % (fun_name, fun_doc)
|
dcc19fa82dbb21735ef2199dbc4f136d722a1621
| 14,772
|
def MissingNumber3(array, n) :
""" In case we can use an XOR operation """
x1 = array[0]
x2 = 1
for i in range(1, n-1) :
x1 = x1 ^ array[i]
for i in range(2, n+1) :
x2 = x2 ^ i
return x1 ^ x2
|
2c9602c035af9f0269707ce3aab9af3ad2d0e156
| 14,775
|
def unescapeEndpointDescription(desc):
"""
Takes escaped endpoint descriptions from our configuration and unescapes
them to pass as a Twisted endpoint description.
"""
result = []
escape = []
depth = 0
desc = iter(desc)
for char in desc:
if char == "\\":
try:
char = desc.next()
except StopIteration:
raise ValueError ("Endpoint description not valid: escaped end of string")
if char not in "{}":
char = "\\{}".format(char)
if depth == 0:
result.extend(char)
else:
escape.extend(char)
elif char == "{":
if depth > 0:
escape.append("{")
depth += 1
elif char == "}":
depth -= 1
if depth < 0:
raise ValueError ("Endpoint description not valid: mismatched end brace")
if depth == 0:
result.extend(unescapeEndpointDescription("".join(escape)).replace("\\", "\\\\").replace(":", "\\:").replace("=", "\\="))
else:
escape.append("}")
else:
if depth == 0:
result.append(char)
else:
escape.append(char)
if depth != 0:
raise ValueError ("Endpoint description not valid: mismatched opening brace")
return "".join(result)
|
6197fded9a1366eeab3da93fd26cd22557f11267
| 14,776
|
from typing import List
from typing import Tuple
def append_per_tuple(
dataset_2tuples: List[Tuple[str,str]],
new_val: int
) -> List[Tuple[str,str,int]]:
"""
Given a list of 2-tuple elements, append to every 2-tuple another fixed
item, such that a list of 3-tuples is returned.
"""
dataset_3tuples = []
for (val0, val1) in dataset_2tuples:
dataset_3tuples += [(val0,val1,new_val)]
return dataset_3tuples
|
f0518a29f9d4d4219a19d41d1dd2dee4d271ee30
| 14,778
|
import os
def renumber_file(file_path, max_previous):
"""
given a file path, renumber the img_num portion of the filename
to avoid naming collision, when moving training data over to the
test directory
Parameters:
------------
file_path: string
file path to rename
max_previous: int
maximum img_num of the existing test data
Returns:
--------
string
"""
prefix = os.sep.join(file_path.split(os.sep)[:-1])
final_path = file_path.split(os.sep)[-1]
split_final = final_path.split("_")
new_img_num = int(split_final[2]) + max_previous
split_final[2] = new_img_num
final_joined = "_".join(split_final)
return os.path.join(prefix, final_joined)
|
1906d89498ce3818407d3f5de523e5a4e85c33df
| 14,779
|
def cidr_mask_to_ip_int(mask_num):
"""
掩码位数转换为整数值
:param mask_num: 掩码位数, 如 16
:return: 一个整数值
"""
cidr_num = int(mask_num)
if 0 < cidr_num <= 32:
return ((1 << cidr_num) - 1) << (32 - cidr_num)
raise ValueError('% is not valid cidr code.' % cidr_num)
|
d62cd2c52f3a4fdbbceccb0f3fc1b1988754d410
| 14,780
|
import argparse
import sys
def parse_args():
"""
Parse input arguments
"""
parser = argparse.ArgumentParser(description='Test a Fast R-CNN network')
parser.add_argument('vid_file')
parser.add_argument('box_file')
parser.add_argument('annot_file', default=None,
help='Ground truth annotation file. [None]')
parser.add_argument('save_file', help='Save zip file')
parser.add_argument('--job', dest='job_id', help='Job slot, GPU ID + 1. [1]',
default=1, type=int)
parser.add_argument('--length', type=int, default=20,
help='Propagation length. [20]')
parser.add_argument('--window', type=int, default=5,
help='Prediction window. [5]')
parser.add_argument('--sample_rate', type=int, default=1,
help='Temporal subsampling rate. [1]')
parser.add_argument('--offset', type=int, default=0,
help='Offset of sampling. [0]')
parser.add_argument('--overlap', type=float, default=0.5,
help='GT overlap threshold for tracking. [0.5]')
parser.add_argument('--wait', dest='wait',
help='wait until net file exists',
default=True, type=bool)
parser.set_defaults(vis=False, zip=False, keep_feat=False)
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
args = parser.parse_args()
return args
|
a53befdfeca33f8eed631b7f6a5336ac4d83c2d7
| 14,781
|
def index(array, i, j):
"""
Return minimum index.
"""
if i == j:
return i
k = index(array, i+1, j)
return i if array[i] < array[k] else k
|
0c1ab5bb1498002ead3271e4dd414592f4f37cd4
| 14,782
|
from pathlib import Path
import argparse
def get_path_to_application(path_and_appname):
"""
Returns the path to given application if it exists at given location.
Raises an exception if the application does not exists at specified
location.
Parameters
----------
path_and_appname: str
Location and application name
Returns
------
path_to_app: Path
Path to the application if it exists.
Otherwise, it raises ArgumentTypeError exception.
"""
path_to_app = Path(path_and_appname)
if path_to_app.is_file():
return path_to_app
else:
raise argparse.ArgumentTypeError(f'Does not exist: <{path_to_app}>')
|
d4082ba6f8f09ccacf0fe35a4eed05b593b69ed8
| 14,783
|
from bs4 import BeautifulSoup
def get_img_url(offer_markup):
""" Searches for images in offer markup
:param offer_markup: Class "offerbody" from offer page markup
:type offer_markup: str
:return: Images of offer in list
:rtype: list
"""
html_parser = BeautifulSoup(offer_markup, "html.parser")
images = html_parser.find_all(class_="bigImage")
output = []
for img in images:
output.append(img.attrs["src"])
return output
|
b04a1e886fb520f33a325c425dc8afcb4ee58950
| 14,784
|
def sorted(iterable, key=None, reverse=False):
"""Return a new list containing all items from the iterable in ascending order.
A custom key function can be supplied to customize the sort order, and the
reverse flag can be set to request the result in descending order.
"""
result = list(iterable)
result.sort(key=key, reverse=reverse)
return result
|
da629fec69583e7d4e56c1c25d710304d2e2e800
| 14,785
|
def filter_by_title(combined_list, pref_titles):
"""Include substrings of preferred titles to filter list."""
final_list = []
for dic in combined_list:
for title in pref_titles:
if title in dic['title'].lower():
final_list.append(dic)
return final_list
|
3230dd06ed0e8f256cfa86643bde1e45518c9b5b
| 14,787
|
def clean_file_record(raw_record):
# type: (str) -> str
"""
Removes NUL values from the raw_record
"""
return raw_record.replace('\x00', '')
|
6eaad2c7e8e687ea038fe17c7f10618be6ebf4c1
| 14,788
|
import os
def ckin_path():
""" Set path and make directories
"""
starting_path = os.getcwd()
path = os.path.join(starting_path, 'ckin')
if not os.path.exists(path):
os.makedirs(path)
return path
|
36605cfc2baa469cad1076ab0c0fda6ffcf43e52
| 14,789
|
import binascii
import hmac
import hashlib
def sign_message(message: str, key: str) -> str:
"""Signs a message with a key.
Args:
message:
String of the message we want to sign.
key:
String of the SHA256 hex encoded key we want to use to sign the message.
Returns:
A string containing the SHA256 hex encoded signature of the message.
"""
key_bytes = binascii.unhexlify(key)
message_bytes = message.encode()
return hmac.new(key_bytes, message_bytes, hashlib.sha256).hexdigest()
|
20eb1a354c9ec452a1f705f6f808082d55b4e331
| 14,790
|
import os
import subprocess
def play(path, cmd='play', shell=False, wait=True):
"""
Executes custom command with specified .wav file path as an argument.
Parameters
----------
path : string
File path with or without the .wav extension.
cmd : string
Command to execute, e.g. play (http://sox.sourceforge.net).
shell : bool
If true, the command will be executed through the shell.
wait : bool
If true, wait for child process to terminate.
"""
if path.startswith('~'):
path = os.path.expanduser(path)
if not path.lower().endswith('.wav'):
path += '.wav'
process = subprocess.Popen([cmd, path], shell=shell)
if wait:
return process.wait()
else:
return process.pid
|
c36d876c8864ffb82f11138b21bfe1b1e576783c
| 14,792
|
import time
def get_local_stamp():
"""产生毫秒级时间戳-本地时间
:return:
Examples:
| ${time_stamp}= | Get Utc Stamp |
=>
| ${time_stamp} = 1535596350000
"""
return str(int(round(time.time()) * 1000))
|
68fc419d79158286e82015374f1c7d1f2c102f91
| 14,793
|
def conv_vec_test(x_init, x_new, be):
""" A simple convergence test using vector values"""
convergence = be.empty((1, 1))
convergence[:] = be.sqrt(x_init * x_init)
# Avoids numerical error:
convergence[:] = max(1, convergence.get())
convergence[:] = be.absolute(x_new - x_init) / convergence
return convergence.get()[0]
|
4c5c91b317d2dbc0eeffcfa26096906fae6cc77e
| 14,794
|
import os
def is_exists(path):
"""Check directory exists."""
if not os.path.exists(path):
print("Not exists: {}".format(path))
return False
return True
|
b5149e92d6a1f7cde6957e41bacb5aad00081889
| 14,795
|
import warnings
def are_basal_rates_valid(start_times, rates, minutes_active):
""" Checks that scheduled basal rates are reasonable """
if any(value < 0 for value in rates):
warnings.warn(
"Error: data contains negative scheduled basal rates; stopping run"
)
return False
elif any(value > 35 for value in rates):
warnings.warn(
"Warning: data contains scheduled basal rates > 35 U/hr;"
+ " continuing anyway"
)
if any(duration > 1440 for duration in minutes_active):
warnings.warn(
"Error: data contains basal rates with scheduled duration greater"
+ " than a day (1440 mins); stopping run"
)
return False
return True
|
5bc7aa35383dad8cdf98fb637dcb456a6421bb1e
| 14,796
|
def succ(B, T):
"""Return the successor of the tuple T over the base represented by B.
Returns None if T is the last tuple in the ordering.
See the library description for how base representations work."""
Tnew = T[:]
for i in range(len(B)):
# Increase the entry in position len(B)-i-1 if possible (i.e. we work
# right-to-left).
idx = len(B)-i-1
if type(B[idx]) == int:
Tnew[idx] = (Tnew[idx] + 1) % B[idx]
# If we are not 0, then the increment was successful and we
# can stop. Otherwise, we reached the end of the base set and
# had to loop back to the beginning, so we must continue.
if Tnew[idx] > 0:
return Tnew
else:
# Move to the next position in the list.
basis, lookup = B[idx]
Tnew[idx] = basis[(lookup[Tnew[idx]] + 1) % len(basis)]
# If we re not the first element in the base set, the increment
# was successful and we can stop. otherwise, we reached the end
# of the base set and had to loop back to the beginning, so we
# must continue.
if Tnew[idx] != basis[0]:
return Tnew
# We could not increment anything, and thus were at the end of the list.
return None
|
4cd072b762551887ee7f4ad64b0f7b574bd58eea
| 14,797
|
def _validate_argument(arg, argname, valid_args):
"""
Validate interpolation method for quantile function.
"""
if arg not in valid_args:
msg = 'Invalid value for {} ({}). Must be on of {}.'
raise ValueError(msg.format(argname, arg, valid_args))
return arg
|
ef0f2a56b2253624f5f00b7d3183db3ba12bcece
| 14,798
|
def find_approximate_rt(peaklist, rt):
"""
Find a peak with the closest retention time if it's within a tolerance.
:param peaklist: list, of Peaks
:param rt: float, retention time to match to
:return: Peak, the peak matching the retention time within the tolerance, if any
"""
peaklist = [peak for peak in peaklist if peak.rt] # clean list for only those with RTs
return next((peak for peak in peaklist if rt - .011 < peak.rt < rt + .011), None)
|
4cc7e933b02892fd41ff250764b67e7b2a75ff6e
| 14,799
|
import re
def dataset_labels(dataset):
"""
Returns a sorted list of label name
"""
labels = set([x[0] for x in dataset])
if all(re.match("^[0-9]+$", label) for label in labels):
# if all of the labels are integers, sort numerically
# maybe not super important, but it would be nicer than having
# 10 before 2
labels = [str(x) for x in sorted(map(int, list(labels)))]
else:
labels = sorted(list(labels))
return labels
|
5b97eab5447bbf1c09af3887197077db99638ab6
| 14,800
|
def find_image_center(xsize,ysize):
"""
Find the pixel coordinates of the image center
"""
if xsize % 2 == 0:
cen_x = (xsize+1)/2.0
else:
cen_x = xsize/2.0
if ysize % 2 == 0:
cen_y = (ysize+1)/2.0
else:
cen_y = ysize/2.0
return (cen_x, cen_y)
|
95b70c27352a514433be28fdd2ee1a42334cd183
| 14,801
|
def retrieve(model, primary_keys):
"""Try to retrieve an object from the repossitory."""
return model.repository.get(**primary_keys)
|
114bbb5c06cc9e92c064652611a3b5e48b5b933b
| 14,802
|
def health_check():
"""
Sends "OK" on "/" endpoint as a Health check.
"""
return "OK"
|
75882214faea621a5b066826b19be57a65617129
| 14,804
|
from os import popen
def detect_process_by_name(proc_name, exec_path, port):
"""Checks if process of given name runs on given ip_address and port.
Args:
proc_name -- process name,
exec_path -- path to executable,
port -- process port.
"""
pids = []
for line in popen("ps ax | grep " + proc_name + " | grep -v grep"):
if exec_path in line and str(port) in line:
line = line.strip().split()
pids.append(line[0])
if pids:
return True
return False
|
93aa7d63e07869b16325333d90a793587d7fe8dc
| 14,805
|
import socket
def get_ip():
"""This method returns the "primary" IP on the local box
(the one with a default route).
- Does NOT need routable net access or any connection at all.
- Works even if all interfaces are unplugged from the network.
- Does NOT need or even try to get anywhere else.
- Works with NAT, public, private, external, and internal IP's
- Pure Python 2 (or 3) with no external dependencies.
- Works on Linux, Windows, and OSX.
Source: Jamieson Becker:
https://stackoverflow.com/questions/166506/finding-local-ip-addresses-using-pythons-stdlib
"""
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
# noinspection PyBroadException
try:
# doesn't even have to be reachable
s.connect(('10.255.255.255', 1))
ip = s.getsockname()[0]
except Exception:
ip = '127.0.0.1'
finally:
s.close()
return ip
|
3cc712baaa7006e9ebd17af4069e3f6a08e0518f
| 14,806
|
def int_to_letter(i: int) -> str:
"""Converts an integer in range [0, 25] into a latter from 'A' to 'Z'.
Behavior is unspecified for other inputs!
"""
return chr(i + ord("A"))
|
593896ad2b9ca3cfa592b87d6a04f7ffaaf7dd21
| 14,808
|
from typing import List
def sanitize_args(cmd: List[str]) -> List[str]:
""" Filter the command so that it no longer contains passwords
"""
sanitized = []
for idx, fieldname in enumerate(cmd):
def _is_password(cmdstr):
return 'wifi-sec.psk' in cmdstr\
or 'password' in cmdstr.lower()
if idx > 0 and _is_password(cmd[idx-1]):
sanitized.append('****')
else:
sanitized.append(fieldname)
return sanitized
|
bca8b5820198bf682de3c08bcbfd45ba61d961f4
| 14,810
|
import logging
def auto_start(config):
""" Connect to Moodle and create the course using command-line/config"""
logging.info("Logging into Moodle")
call = config.login_callable()
moodle = call()
moodle.cache = config.cache_location
course = moodle.course(config.course)
return moodle, course
|
b464450a0e580318ec5a1deae0e42df2fee613fa
| 14,811
|
def _translate_ip_xml_json(ip):
"""
Convert the address version to int.
"""
ip = dict(ip)
version = ip.get('version')
if version:
ip['version'] = int(version)
if ip.get('type'):
ip['type'] = ip.get('type')
if ip.get('mac_addr'):
ip['mac_addr'] = ip.get('mac_addr')
return ip
|
6c6656e27fe8e67103783f714cb12e75acf82329
| 14,812
|
import os
def get_version_path() -> str:
"""Summary.
Returns:
str: Description
"""
return os.path.dirname(os.path.abspath(__file__)) + '/version.py'
|
e9dc3286b0783967c69b49c705fc64c5934d1429
| 14,814
|
import numpy as np
def mean_and_error(_data):
"""A helper for creating error bar"""
_data = np.array(_data)
_two_sigma = 2*np.std(_data)
_mean = np.mean(_data)
print(f"{_mean:.0f} +- {_two_sigma:.0f}")
return _mean, _two_sigma
|
a26cde22d1197e964297e04473df2a309b567f05
| 14,815
|
def _explicit_module_name(tags):
"""Returns an explicit module name specified by a tag of the form `"swift_module=Foo"`.
Since tags are unprocessed strings, nothing prevents the `swift_module` tag from being listed
multiple times on the same target with different values. For this reason, the aspect uses the
_last_ occurrence that it finds in the list.
Args:
tags: The list of tags from the `cc_library` target to which the aspect is being applied.
Returns:
The desired module name if it was present in `tags`, or `None`.
"""
module_name = None
for tag in tags:
if tag.startswith("swift_module="):
_, _, module_name = tag.partition("=")
return module_name
|
88cfba0731e3d1503d6a377150ed12534d7c8688
| 14,816
|
def value_index_map(array):
"""
Given input array, returns dict with key/values k,i,
where i is the 0-index where value k appeared in the input array
Assumes array elements are unique
Used to get a mapping from pk's of an query set axis to the 0-index
:param array:
:return: dict
"""
output_map = {v: i for i, v in enumerate(array)}
return output_map
|
aa623b0a6fed762caa005506f4729b079f97fbb2
| 14,817
|
def strip_quotes(string: str) -> str:
"""
This function is used as a get_parser for various
parameters in this driver
"""
return string.strip('"')
|
4eba25b52d7959bf88e9059663201086a3a00335
| 14,818
|
import subprocess
def _guess_openci_devices():
""" Guess which OpenCI device to use.
Assumes they're ranked and the first is the "best".
May need a better strategy.
Returns
-------
str
"""
out = subprocess.check_output(['cas-offinder'], stderr = subprocess.STDOUT)
out = out.decode('ascii')
pos = out.find('Available device list:')
assert pos != -1
for line in out[pos:].split('\n'):
if line.startswith('Type:'):
_pu, _id, _name = line.split(', ')
pu = 'G' if 'GPU' in _pu else 'C'
id = _id.split(' ')[1]
return pu + id
|
dc401d58b261e725efcde15cfa44c0b0cc1383de
| 14,819
|
import os
import itertools
def check_redo(
in_filepaths,
out_filepaths,
force=False,
make_out_dirpaths=False,
no_empty_input=False):
"""
Check if input files are newer than output files, to force calculation.
Args:
in_filepaths (iterable[str]|None): Input filepaths for computation.
out_filepaths (iterable[str]): Output filepaths for computation.
force (bool): Force computation to be re-done.
make_out_dirpaths (bool): Create output dirpaths if not existing.
no_empty_input (bool): Check if the input filepath list is empty.
Returns:
force (bool): True if the computation is to be re-done.
Raises:
IndexError: If the input filepath list is empty.
Only if `no_empty_input` is True.
IOError: If any of the input files do not exist.
"""
# check if output exists
if not force:
for out_filepath in out_filepaths:
if out_filepath and not os.path.exists(out_filepath):
force = True
break
# create output directories
if force and make_out_dirpaths:
for out_filepath in out_filepaths:
out_dirpath = os.path.dirname(out_filepath)
if not os.path.isdir(out_dirpath):
os.makedirs(out_dirpath)
# check if input is older than output
if not force:
# check if input is not empty
if in_filepaths:
# check if input exists
for in_filepath in in_filepaths:
if not os.path.exists(in_filepath):
raise IOError('Input file does not exists.')
for in_filepath, out_filepath in \
itertools.product(in_filepaths, out_filepaths):
if os.path.getmtime(in_filepath) > os.path.getmtime(
out_filepath):
force = True
break
elif no_empty_input:
raise IOError('Input file list is empty.')
return force
|
3dd4b56625ec81693f08405f0a0879aa30f37e90
| 14,821
|
def functional_domains(gene_descriptors, location_descriptors):
"""Provide possible functional_domains input."""
return [
{
"status": "preserved",
"name": "WW domain",
"id": "interpro:IPR001202",
"gene_descriptor": gene_descriptors[5],
"location_descriptor": location_descriptors[6]
},
{
"status": "lost",
"name": "Tyrosine-protein kinase, catalytic domain",
"id": "interpro:IPR020635",
"gene_descriptor": gene_descriptors[3],
"location_descriptor": location_descriptors[7],
}
]
|
a6153f34c1ee241542696ce9638ca7667769af97
| 14,822
|
def get_list_index(imageList, index):
"""
This function returns the imageList value at an index
:param imageList:
:param index:
:return:
"""
return imageList[index]
|
d27319ee73a250421d7f4a68158dcd796c438ba4
| 14,823
|
import logging
import subprocess
def _call(args, check=True):
"""Wrapper for calling a subprocess.
args is the first argument of subprocess.Popen, typically
an array, e.g., ["echo", "hi"].
If check is set, raise an exception on failure.
"""
logging.info("Calling: %s", args)
if check:
subprocess.check_call(args, stdin=None)
else:
return subprocess.call(args, stdin=None)
|
06681d6b0f3a06e248b3395d7eaa3e59f6d9e84e
| 14,824
|
def question_cleaner(df_query):
"""
used in the notebook "model_funetuning" for insuranceQA dataset to extract those questions which have answers
Parameters:
df_query: all the questions and answers
Returns:
only questions that have answers
"""
kb=([int(xx) for xx in (df_query[3].iloc[0]).split(' ')])
gt = [int(xx) for xx in (df_query[2].iloc[0]).split(' ')]
ct=0
negg=0
withans=[]
for ii in range(len(df_query)):
kb=[int(xx) for xx in (df_query[3].iloc[ii]).split(' ')]
gt = [int(xx) for xx in (df_query[2].iloc[ii]).split(' ')]
if bool(set(gt) & set(kb)):
withans.append(ii)
else:
negg+=1
print('total:{}, removed:{}, remainder:{}'.format(len(df_query), negg, len(withans)))
return df_query.iloc[withans]
|
ba294e7adebf9ddc56249406ee3c6ce2e7fc8068
| 14,829
|
import os
import shutil
def create_egg(
path_to_recommenders_repo_root=os.getcwd(),
local_eggname="Recommenders.egg",
overwrite=False,
):
"""
Packages files in the reco_utils directory as a .egg file that can be uploaded to dbfs and installed as a library on a databricks cluster.
Args:
path_to_recommenders_repo_root (str): the (relative or absolute) path to the root of the recommenders repository
local_eggname (str): the basename of the egg you want to create (NOTE: must have .egg extension)
overwrite (bool): whether to overwrite local_eggname if it already exists.
Returns:
the path to the created egg file.
"""
# create the zip archive:
myzipfile = shutil.make_archive(
"reco_utils",
"zip",
root_dir=path_to_recommenders_repo_root,
base_dir="reco_utils",
)
# overwrite egg if it previously existed
if os.path.exists(local_eggname) and overwrite:
os.unlink(local_eggname)
os.rename(myzipfile, local_eggname)
return local_eggname
|
90e9e062e4183220450dddbbb362469cd0d9c747
| 14,830
|
import torch
def argmax_mae(output, target):
"""age range, categorized by [0-12, 13-18, 19-25,
26-35, 36-45, 46-55, 56-65, >= 66]"""
with torch.no_grad():
batch_size = output.size(0)
predicted_age_ranges=torch.argmax(output,1)
mae=torch.sum(torch.abs(predicted_age_ranges-target)).float()/batch_size
return mae
|
eafb7c069c4ac37875bea25cca442126c2c6e08f
| 14,833
|
import re
def escape_ansi(line):
"""It removes the ansi codes from a string."""
ansi_escape=re.compile(r'(\x9B|\x1B\[)[0-?]*[ -\/]*[@-~]')
return ansi_escape.sub('',line)
|
ae9383b102a07a811211a88327c63e169438bc19
| 14,834
|
def count_unique_peaks(peaks):
"""Returns the number of unique peaks."""
n_unique = 0
for chrom in peaks.chroms:
for peak in peaks.fetch(chrom):
if not peak.iscommon:
n_unique += 1
return n_unique
|
53da151e1e3c95ecd0bf73234878d5d1e63cce33
| 14,835
|
import requests
def _get_latest_version():
"""Return current ELK stack major version as an integer"""
URL = 'https://www.elastic.co/guide/en/elasticsearch/reference/{}.x/deb.html'
version = 5
while requests.get(URL.format(version + 1)).status_code == 200:
version += 1
return version
|
4b5de5ad1d5305a88e9d24854e7aa77b4b4d58fa
| 14,837
|
from random import randint
def quick_sort(array):
"""
SORTING FUNC USING QUICK SORT ALGORITHM
ARG array = LIST(ARRAY) of NUMBERS
"""
if len(array) <= 1:
return array
smaller, equal, larger = [], [], []
pivot = array[randint(0, len(array)-1)]
for n in array:
if n < pivot: smaller.append(n)
elif n == pivot: equal.append(n)
else: larger.append(n)
return quick_sort(smaller) + equal + quick_sort(larger)
|
5b9ff6782874094a9cd997e05747e4ae9988fa53
| 14,838
|
def hex_str_to_int(hex_str: str):
"""'#ffffff' -> 0xffffff"""
if "#" in hex_str:
return int(hex_str[1:], 16)
else:
return int(hex_str, 16)
|
65fc72fcec909a5062a4881f032d9f1bbe4aeecb
| 14,840
|
def get_page_url(page_num):
""" generate a javbus page url when assign different page for multi-threading"""
page_url = 'https://www.javbus2.pw/genre/hd/' + str(page_num)
return page_url
|
1fef6cd0971ee1b6bfa6d143f937976dc9b5e715
| 14,841
|
def xlsw_write_row(ws, row_idx, row, fmt=None):
"""
ws:
row_idx: row number
row: a list, data to write
fmt: format for cell
"""
for col_idx in range(len(row)):
ws.write(row_idx, col_idx, row[col_idx], fmt)
row_idx += 1
return row_idx
|
4d6da85ff95c97a30c05511cb6995e946b7ccb80
| 14,842
|
def get_repository_metadata_by_id(app, id):
"""Get repository metadata from the database"""
sa_session = app.model.session
return sa_session.query(app.model.RepositoryMetadata).get(app.security.decode_id(id))
|
ec7d532e3463b29f2e9dfb72cec71212f0c49f85
| 14,845
|
import base64
def numpy_to_json(np_arr):
"""Encodes a numpy array to a json-serializable dict"""
# TFJS only has types float32, int32, bool, string and complex64
dtype_map = {
'b': 'bool',
'i': 'int32',
'u': 'int32',
'S': 'string',
'O': 'string',
'U': 'string',
'c': 'complex64',
'f': 'float32',
}
dtype = dtype_map[np_arr.dtype.kind]
result = {
'shape': list(np_arr.shape),
'dtype': dtype,
}
if dtype == 'string':
result['data'] = np_arr.flatten().tolist()
else:
# This is faster for JSON to parse and can represent inf/nan values
result['dataEnc'] = base64.encodebytes(np_arr.astype(dtype).tobytes()).decode()
return result
|
426b6c38388008ed2ca02e119ea07169dd39d38e
| 14,846
|
def read_features(path):
"""
Read a list of features in feature-per-line format, where each
feature is a repr and needs to be evaled.
@param path path to read from
"""
with open(path) as f:
return map(eval, f)
|
8bc32f064b31cef4f5d3e4ebfe0ecda5da729e1f
| 14,847
|
def PGCD(m, n):
"""
Détermine le PGCD de deux entiers.
@param m premier entier
@param n second entier
@return PGCD
.. exref::
:title: calcul du PGCD avec la méthode des soustractions
:tag: Algorithme
La fonction qui suit est l'implémentation en Python de la méthode décrite
ici :
`Algorithme de calcul du PGCD par soustractions successives
<http://www.kartable.fr/terminale-s/mathematiques/1640/exercice/algorithme-de-calcul-du-pgcd-par-soustractions-successives,TS01505>`_.
.. runpython::
:showcode:
def PGCD (m,n) :
if m == 1 or n == 1 : return 1
if m == n : return m
if m < n : return PGCD (m, n-m)
return PGCD (n, m-n)
print(PGCD(50, 40))
"""
if m <= 0 or n <= 0:
raise Exception("impossible de calculer le PGCD")
if m == 1 or n == 1:
return 1
if m == n:
return m
if m < n:
return PGCD(m, n - m)
return PGCD(n, m - n)
|
a77ee9a5e83b1748bff30a1d7db672456b408907
| 14,848
|
import torch
def gram_matrix(input_data):
""" Gram Matrix for Style Loss Module
a = batch size (1)
b = number of feature maps
c*d = number of features in a feature map
Note: This specification is specific to 2d convolution
"""
a, b, c, d = input_data.size()
features = input_data.view(b, a * c * d)
G = torch.mm(features, features.t())
return G.div(a * b * c * d)
|
6bf808534a52576a2559789c8d9080363e564a0c
| 14,850
|
def humanize(memory, suffix="B", kilo=1024):
"""
Scale memory to its proper format
e.g:
1253656 => '1.20 MiB'
1253656678 => '1.17 GiB'
"""
if kilo == 1000:
units = ["", "k", "M", "G", "T", "P"]
elif kilo == 1024:
units = ["", "Ki", "Mi", "Gi", "Ti", "Pi"]
else:
raise ValueError("kilo must be 1000 or 1024!")
for unit in units:
if memory < kilo:
return f"{memory:.2f} {unit}{suffix}"
memory /= kilo
|
7afd033e2ead94ca8853a347ad9c8d1fa47a5e64
| 14,851
|
def split_decimal(flt):
"""
split decimal
params:
flt : <float> float to convert
returns: <tuple> ( whole number, decimal part)
"""
whole, dec = str(flt).split('.')
# dec = int(dec)
# if dec >= 5:
# return int(whole) + 1, dec
return [int(whole), int(dec)]
|
4175b3370591028bb96b5bf3eaceee3391c0b1f3
| 14,852
|
import six
def get_val(root, items, sep='.', **kwargs):
"""
Swagger client objects don't behave like dicts, so need a custom func
to step down through keys when defined as string vars etc.
Warnings:
If you try to retrieve a key that doesn't exist you will get None
instead of an Attribute Error. Code defensively, or abuse it, whatever.
Args:
root [dict, client obj]: The dict or Object to recurse through
items (list, str): either list or dot notation string of keys to walk
through
sep (str): The character expected as a separator when parsing strings
Returns (varies): The target val at the last key
"""
assert isinstance(items, (list, six.string_types))
for key in items if isinstance(items, list) else items.split(sep):
if root is None:
return root
elif isinstance(root, list):
if '|' not in key:
raise ValueError("Found list but key {0} does not match list "
"filter format 'x|y'".format(key))
field, value = key.split('|')
list_filter = [x for x in root if x.get(field) == value]
if list_filter:
root = list_filter[0]
elif isinstance(root, dict):
root = root.get(key)
else:
root = root.__getattribute__(key)
return root
|
82c13dba6a93374ae1f8595f9dec49557cdb5651
| 14,853
|
def lines():
""" Return lines of test data.
"""
return ["abc\n", "def\n", "ghi\n"]
|
0e00c0bb925ed3a9cc945578e5f879bbff7899f2
| 14,854
|
import operator
def find_n_maximum(items, n, skip=False):
"""
This function is used to find the n max elem in a list
:param items: list of items
:param n: maximum number
:param skip: if True, return every elem in the n maximum, not only the first n elem of a list
:return: two list, the first containing the index of the elem and the second the list of the elem
"""
if not skip:
indexed = list(enumerate(items))
sorted_list = sorted(indexed, key=operator.itemgetter(1), reverse=True)
top_n_index = []
top_n_elem = []
for i in range(n):
top_n_index.append(sorted_list[i][0])
top_n_elem.append(sorted_list[i][1])
else:
without_duplicates = list(dict.fromkeys(items))
top_index, top_elem = find_n_maximum(without_duplicates, min(n, len(without_duplicates)))
top_n_index = []
top_n_elem = []
for i in range(len(items)):
if items[i] in top_elem:
top_n_elem.append(items[i])
top_n_index.append(i)
return top_n_index, top_n_elem
|
ee31b4194ba5ad7c56544efd1ef18e663750680b
| 14,855
|
def team_rm_from_repository_payload(team_default_payload):
"""Provide a team payload for removing a team from a repository."""
removed_from_repository_payload = team_default_payload
removed_from_repository_payload["action"] = "removed_from_repository"
return removed_from_repository_payload
|
9ffd60f86fa0e4daee0b5c4d73890653e64a47c7
| 14,856
|
def extract_parts(note):
"""Search a given chunk of markup for specific references.
:param note: the `BeautifulSoup` version of the markup to parse
:returns: a tuple of the title, headers, and content
"""
parts = note.find_all('div')
if len(parts) == 3:
title, headers, content = parts
elif len(parts) == 2:
title, headers = parts
content = ''
else:
title, headers = parts[:2]
content = parts[2]
return title, headers, content
|
cfda6dc2a8ebe51dd5c72e4bdcf2125bc1e9f60a
| 14,857
|
def get_diffs(routes1, routes2, route_ids):
"""Get difference in number of routes using each segment.
params
- routes1: Dict{str : List[(lon, lat)]} - first set of routes
- routes2: Dict{str : List[(lon, lat)]} - second set of routes
- route_ids: List[str] - IDs of routes to consider
return
- I have no idea
"""
diffs = {}
for routeid in route_ids:
for pt1, pt2 in zip(routes1[routeid], routes1[routeid][1:]):
segment = (pt1, pt2)
diffs[segment] = diffs.get(segment, 0) + 1
for pt1, pt2 in zip(routes2[routeid], routes2[routeid][1:]):
segment = (pt1, pt2)
diffs[segment] = diffs.get(segment, 0) - 1
return diffs
|
08853ca746adc459d5d094a4d81af4358f58607f
| 14,858
|
from pathlib import Path
from typing import List
def get_packages_from_requirements(basedirpath: Path,) -> List:
"""Extract packages from requirements.txt as Python list"""
with open(basedirpath, "r") as f:
lines = f.readlines()
return lines
|
ca0c85e6175f7026781607bce06ed42d6344db1a
| 14,859
|
def get_attributes_and_labels(data):
"""
:param data: The dataset to be divided
:return: Two panda frames which are in order of classes and attributes
"""
# Here we divide our attributes and classes features for a given dataset
return [data.iloc[:, -1], data.iloc[:, :-1]]
|
c383dad4720093b4002415b48d793bab5834c3fd
| 14,860
|
def _code_snippet(snippet: str) -> str:
"""Generates a markdown code snippet based on python code.
Args:
snippet (str): Python code.
Returns:
str: Markdown code snippet.
"""
result = "```python\n"
result += snippet + "\n"
result += "```\n\n"
return result
|
5c44bccba225dbd6d60f19c712393b291fe065b5
| 14,861
|
def registration(father, gender):
"""
>>> f = registration('Donkey', 'F')
>>> f("coco")
'coco-Elizabeth Donkey'
>>> f = registration('Donkey', 'm')
>>> f('Will')
'Will Donkey'
"""
def inner(name):
if gender.lower() == 'f':
return name + '-Elizabeth ' + father
return name + ' ' + father
return inner
|
6b529d556d5c8cb8658f7a7086ee6905d7e53976
| 14,862
|
def remove_conflicts(applicant, potential_editors):
"""
Remove editors from potential editors who might be sources of conflict of
interest. These are typically by name or university.
"""
return [editor for editor in potential_editors if
not (editor["name"] in applicant["conflicts-faculty"] or
editor["universities"] in applicant["conflicts-university"])]
|
49fe7dc482e7f53114f78cf9ed353eded87ecba4
| 14,863
|
import math
def solve(X, Y, D):
"""
https://app.codility.com/demo/results/trainingP7B888-UFF/
For example, given:
X = 10
Y = 85
D = 30
the function should return 3, because the frog will be positioned as follows:
after the first jump, at position 10 + 30 = 40
after the second jump, at position 10 + 30 + 30 = 70
after the third jump, at position 10 + 30 + 30 + 30 = 100
:return:
"""
# total distance divide by jump
# if fraction then take upper value example 2.5 = 3 - reason is after dividing some remaining part is there - 75/30 = 2.5
return math.ceil((Y - X) / D)
|
842d18b2dd1b93e0edeaf1f39937abbcba464084
| 14,864
|
def create_lifetime_tech(connector, technology_list):
"""
This function writes the lifetime tech table in Temoa.
TO DO: Update this function to handle technologies with
technology lifetimes that vary by region.
"""
table_command = """CREATE TABLE "LifetimeTech" (
"regions" text,
"tech" text,
"life" real,
"life_notes" text,
PRIMARY KEY("regions","tech"),
FOREIGN KEY("tech") REFERENCES "technologies"("tech")
);"""
insert_command = """
INSERT INTO "LifetimeTech" VALUES (?,?,?,?)
"""
entries = []
for tech in technology_list:
tech_name = tech.tech_name
data = [(place,
tech_name,
tech.tech_lifetime[place],
'NULL') for place in tech.regions]
entries += data
cursor = connector.cursor()
cursor.execute(table_command)
cursor.executemany(insert_command, entries)
connector.commit()
return table_command
|
86f33ecdd2a8db4a0bdceccd0ab8131b175aba14
| 14,865
|
def remove_glyph_from_jp_font(_font, jp_font_name):
"""
JuliaMono を採用したいグリフを源柔ゴシックから削除
"""
if jp_font_name == "GenJyuuGothicL-Monospace-Heavy.ttf":
over_finish = 0x110618
else:
over_finish = 0x11061a
glyphs = (
list(range(0x0000, 0x2E7F + 0x1))
# + list(range(0x3248, 0x325f + 0x1))
+ list(range(0x1D400, 0x1D7FF + 0x1))
+ list(range(0x1f100, 0x1f1a0 + 0x1))
+ list(range(0x110000, over_finish + 0x1))
)
for g in glyphs:
_font.selection.select(g)
_font.clear()
return _font
|
0770eae9187a370e6631adf35adcefd9ea0e45ee
| 14,867
|
def mat_diff(mat_a, mat_b):
"""
Function that subtracts two matrices: mat_a and mat_b. The
subtraction can be carried out if the two matrices have same
dimension, i.e. same number of rows and columns. The elements of
the resulting matrix, mat_c, are c_ij = a_ij - b_ij
:param mat_a: list of lists with user defined a_ij elements
:param mat_b: list of lists with user defined b_ij elements
:return: mat_c = mat_a - mat_b, list of lists with elements c_ij = a_ij - b_ij
"""
# check if operation can be done
if len(mat_a) == len(mat_b) and len(mat_a[0]) == len(mat_b[0]):
print("The subtraction of the two matrices is:")
pass
else:
return "You cannot subtract these matrices! They need to have same dimensions!\n"
# contain number of rows and columns
nr_rows = len(mat_a)
nr_cols = len(mat_a[0])
# initialize the resulting mat_c
mat_c = [[0.0 for idx in range(nr_cols)] for jdx in range(nr_rows)]
# update elements of mat_c: c_ij = a_ij + b_ij
for row in range(nr_rows):
mat_c[row] = [(elements[0]-elements[1]) for elements in zip(mat_a[row], mat_b[row])]
return mat_c
|
0441ea751a3a6c9fb64e9b3a33420fd4b0f8aa3a
| 14,868
|
from typing import Optional
import mimetypes
import os
def guess_mime(path: str) -> Optional[str]:
"""Guess the mime type for a given path.
Arguments:
root (str): the root path of the file tree
path (str): the sub-path within the file tree
Returns:
str: the guessed mime-type
"""
mtypes = mimetypes.guess_type(path)
ftype = None
if os.path.isdir(path):
ftype = "directory"
elif os.access(path, os.F_OK) and mtypes[0]:
ftype = mtypes[0]
else:
ftype = "application/octet-stream"
return ftype
|
0a1b7d4d3212230f7491d244b01f8a9f1caa0ef5
| 14,872
|
import sys
def pyDecoder(text):
"""Python 2/3 `utf-8` decoder.
The return will depend on which version of python is being used. With py2
will return a `unicode` and py3 a `str`. If wrong type is passed will do
nothing.
Args:
(str) text: text to be decoded.
Returns:
(str|unicode): decoded utf-8 unicode text
"""
if sys.version_info > (3, 0):
if isinstance(text, bytes):
return text.decode('utf-8')
return text
if isinstance(text, str):
return text.decode('utf-8')
return text
|
6844020c9d1f8f8d0310fbc00ed57c4a9d1b5481
| 14,874
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.