content
stringlengths 35
416k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
|---|---|---|
import copy
def findNDDProportionalAllocation(prefProfile):
"""
INPUT:
prefProfile: a PrefProfile object.
OUTPUT:
If an NDDPR allocation exists - it is returned as a dictionary that maps agents to their bundles.
Otherwise - None is returned.
>>> prefProfile = PrefProfile({"Alice":Pref([6,5,4,3,2,1]), "Bob":Pref([6,5,4,3,2,1]), "Carl":Pref([6,5,4,3,2,1])})
>>> findNDDProportionalAllocation(prefProfile) is None
True
>>> prefProfile = PrefProfile({"Alice":Pref([6,5,4,3,2,1]), "Bob":Pref([5,6,4,3,2,1]), "Carl":Pref([4,5,6,3,2,1])})
>>> allocation = findNDDProportionalAllocation(prefProfile)
>>> dicttools.stringify(allocation)
'{Alice:[6, 1], Bob:[5, 2], Carl:[4, 3]}'
"""
itemsPerAgent = prefProfile.itemCount // prefProfile.agentCount
# First necessary condition for NDDPR allocation: it is possible to give each agent the same num of items:
if itemsPerAgent * prefProfile.agentCount < prefProfile.itemCount:
return None
# Second necessary condition for NDDPR allocation: it is possible to give each agent its best item.
bestItems = {pref.bestItem() for pref in prefProfile.prefs}
if len(bestItems) < prefProfile.agentCount:
return None
allocation = {agent:list() for agent in prefProfile.agents}
prefProfile = copy.deepcopy(prefProfile)
for iteration in range(itemsPerAgent):
for agent in prefProfile.agents:
item = prefProfile.agentsToPrefs[agent].bestItem()
allocation[agent].append(item)
prefProfile.removeItem(item)
prefProfile.agents.reverse()
return allocation
|
a25e74afa571929a0c567c9ff38ca9045a9f85ab
| 21,674
|
def concat_fm(fm):
"""
Concatenates Directional feature maps as shown in original paper.
This function is used for visualization purposes only.
:param fm: 12 ltrp feature maps
:return: list of 4 concatenated feature maps
"""
d1 = fm[0]+fm[1]+fm[2]
d2 = fm[3]+fm[4]+fm[5]
d3 = fm[6]+fm[7]+fm[8]
d4 = fm[9]+fm[10]+fm[11]
return [d1,d2,d3,d4]
|
bb6d6b1f9c6d0441be78e1b04d31e1eb24a6ce28
| 21,676
|
def _bound(color_component: float, minimum: float=0,
maximum: float=255) -> float:
"""
Bound the given color component value between the given min and max values.
The minimum and maximum values will be included in the valid output.
i.e. Given a color_component of 0 and a minimum of 10, the returned value
will be 10.
"""
color_component_out = max(color_component, minimum)
return min(color_component_out, maximum)
|
15f335051d228069e7ca0be3c0768e99b009b3e6
| 21,677
|
def orange(text):
""" Return this text formatted orange (olive) """
return '\x0307%s\x03' % text
|
aff6e5495b8e9c8f8b763539c6d9308236e313fe
| 21,678
|
def merge_sort(list):
"""Merge sort, duh?
:param list: List of things to sort
:return: Cool sorted list
"""
if len(list) > 1:
middle = len(list) // 2
left = merge_sort(list[:middle]) # Sort left partition
right = merge_sort(list[middle:]) # Sort right partition
list = []
# As long as we have stuff to sort, we run this routine
while len(left) > 0 and len(right) > 0:
if left[0] < right[0]:
list.append(left[0])
left.pop(0)
else:
list.append(right[0])
right.pop(0)
# Merge left partition items to list
for item in left:
list.append(item)
# and then the right
for item in right:
list.append(item)
return list
|
3ddff642f8bd3a00b744c0af953cb8bd3da9c30d
| 21,679
|
import csv
def csv_file(dict_list: list, filename: str):
"""write a CSV File with given filename and data.
Parameters
----------
dict_list : list
List of dicts.
filename : str
FIlename.
Returns
-------
dict
Result {status, csv_filename}.
"""
result = {
'status': 0,
'csv_filename': None
}
try:
fieldnames = dict_list[0].keys() if dict_list else []
with open(filename, 'w', newline='') as csvfile:
writer = csv.DictWriter(
csvfile,
fieldnames=fieldnames,
quoting=csv.QUOTE_MINIMAL
)
writer.writeheader()
for row in dict_list:
writer.writerow(row)
result['csv_filename'] = filename
except Exception as err:
result['status'] = 1
result['error'] = err
return result
|
cd8fe9285a92bdbb7f9f80c7f15dbe8e222b2227
| 21,680
|
def position(table, _seat, postflop=False):
""" Returns how many seats from the button the seat is. """
# Raise an exception if the button is not set
if postflop:
seats = table.get_players(hascards=True)
else:
seats = table.get_players()
return len(seats) - seats.index(_seat) - 1
|
253b7685a42db9c4b344f8492fdd9710c8f24585
| 21,681
|
def __repr__(self):
"""Override of default repr() implementation."""
return "<%s %s at 0x%x>" % (self.__class__.__name__, self.id, id(self))
|
eafa71a99ca628c326a07752d3ee81714c90f6d0
| 21,682
|
def get_default_compartments():
"""Returns default compartments.
Returns
-------
list of str
Default compartments.
"""
return ["cells", "cytoplasm", "nuclei"]
|
92c5e2c71928cc176cb6c1c8db6c91768e16291f
| 21,684
|
from configparser import ConfigParser
import os
def read_local_settings(log):
"""reads settings from local config file,
returns ConfigParser object
"""
local_config_file = os.path.join(os.path.dirname(os.path.abspath(__file__)), "config_local.ini")
log.info("Reading local settings from {}...".format(local_config_file))
if os.path.exists(local_config_file):
cf = ConfigParser()
cf.read(local_config_file)
else:
return False
return cf
|
9cc233690b8d95918f9a65212d246a323132d283
| 21,686
|
def Candidate_1(dataSet):
"""
To create the first array of candidate
:param dataSet: dataset
:return: return te fisrt layer of candidate
"""
C1 = []
# Candidates in the first layer are all the single items
for transaction in dataSet:
for item in transaction:
if not [item] in C1:
C1.append([item])
# we frozen the itemset to use it as a key dict
return list(map(frozenset, C1))
|
88fe32e771d57d7ee34c4c321e4439c9670fb526
| 21,687
|
def __calculate_moving_sums(points, window):
""" Calculates hr moving sums of the window len """
time, hrs = zip(*points)
moving_sum = sum(hrs[0:window])
sums = [(time[0], moving_sum)]
for i, t in enumerate(time[1:-1 * window]):
moving_sum += hrs[i + window] - hrs[i]
sums.append((t, moving_sum))
return sums
|
f2c29fdb17c1a88c80a1f4ab06db546597ea57b0
| 21,688
|
def subat(orig, index, replace):
"""Substitutes the replacement string/character at the given index in the
given string, returns the modified string.
**Examples**:
::
auxly.stringy.subat("bit", 2, "n")
"""
return "".join([(orig[x] if x != index else replace) for x in range(len(orig))])
|
e419785dab05b0626454c9dd8e8b59aa45162a41
| 21,691
|
def fact(number):
"""
Calculating the factorial of a number
"""
result = 1
for number in range(1, number + 1):
result *= number
return result
|
59f766f57ca71c487b640cd6d08841884657c8fd
| 21,692
|
import os
import glob
def data_files(globname):
"""retrieves filenames under the data directory, matching the given file glob pattern (relative to the data dir)"""
here = os.path.dirname(__file__)
data_dir = os.path.join(here, "data")
matches = [permfile for permfile in glob.glob(os.path.join(data_dir, globname))]
return matches
|
97d0424d09a675d8ef3e664980c0cfd075c79ab8
| 21,694
|
import copy
def get_term_indegree(graph):
""""
Gets dictionnary of words and indegrees for the @graph
returns empty dictionnary in empty graph
"""
# work on clone of g to preserve g
graph_copy = copy.deepcopy(graph)
indegree_dic = {}
indegrees = graph_copy.indegree()
if(indegrees):
terms = graph.vs['name']
else:
terms = []
return dict(zip(terms, indegrees))
|
4cb3b78c02a729b35d9fa0e95995ad045417101e
| 21,695
|
import random
def one_run(wr: float, fr: float, ranks: dict, start_rank: int, end_rank: int,
max_battles: int) -> int:
"""
Simulate battles required to complete ranked season.
:param wr: win rate
:param fr: first place rate
:param ranks: information on stars in each rank
:param start_rank: initial rank for the simulation
:param end_rank: final rank (simulation ends when reaching this rank)
:param max_battles: maximum battles before simulation ends (prevents infinite loops).
:return: number of battles
"""
battles = 0
stars = 0
simulated_rank = start_rank
while simulated_rank != end_rank:
battles += 1
if battles > max_battles:
break
# Determine battle outcome
if random.random() < wr:
stars += 1 # win
elif random.random() >= fr: # best player doesn't lose star
stars -= 1 # loss and no star saved
# Check if player moved up a rank
if stars == ranks[simulated_rank]['stars']:
simulated_rank -= 1 # move "up" a rank
if ranks[simulated_rank]['free-star']:
stars = 1 # get a free star for next rank
else:
stars = 0 # no free star
# Check if a player moved down a rank
if stars < 0:
if ranks[simulated_rank]['irrevocable']:
stars = 0
else:
simulated_rank += 1 # move "down" a rank
stars = ranks[simulated_rank]['stars'] - 1 # 1 star away from next rank
return battles
|
fb655a374696430d80668ae79423b7400cfc94c8
| 21,696
|
import pandas
def _get_node_data(ZZ, leaf_data, groups):
"""Return data of nodes.
Parameters
----------
ZZ : pandas.DataFrame
leaf_data : pandas.DataFrame
groups : list[int]
Returns
-------
df : pandas.DataFrame
"""
df = leaf_data["breadth"].to_frame()
df["height"] = 0.
df["children"] = [[] for _ in range(len(df))]
df = pandas.concat([df, ZZ[["breadth", "height", "children"]]])
df.index.name = "cluster"
df["side"] = [
"first" if is_first else "last"
for is_first in df.index.isin(ZZ["children"]
.apply(lambda children: children[0]))]
df["is_group"] = df.index.isin(groups)
return df
|
b05df743dfce7616c1103c461dc38c483150a074
| 21,697
|
def sparkAggCollect_MakeAllMetricsIntoLists(node):
"""
Notes:
Used in a Spark map operation
"""
for k in node.error_metrics.keys():
node.error_metrics[k] = [(node.geocode, node.error_metrics[k])]
return node
|
96a856cc9f168f5284f332fa19153f670054b3d0
| 21,699
|
import os
def generate_FA_map(dti_file, output_fa):
""" Use DTI-TK TVtool command to generate the FA map of a dti file.
Parameters
----------
dti_file: str
path to the dti volume file.
output_fa: str
path to the output fa file.
Returns
-------
output_fa_file: str
path to the output fa file.
"""
# Generate FA maps
cmd = ["TVtool", "-in", dti_file, "-fa"]
cmd = " ".join(cmd)
os.system(cmd)
fa_file = dti_file.replace(".nii.gz", "_fa.nii.gz")
output_fa_file = os.path.join(os.path.dirname(fa_file), output_fa)
# Rename the FA map to be consistent with the TBSS pipeline
cmd = ["mv", fa_file, output_fa_file]
cmd = " ".join(cmd)
os.system(cmd)
return output_fa_file
|
a1a2f93060236866fdc91cc75d51d28efc423a83
| 21,700
|
def _shift_parts(family_parts, subfamily_parts, stop_fn):
"""Iterate over subfamily parts, removing from
subfamily and appending to family, until stop_fn(part)
returns true. If subfamily_parts is empty, add
'Regular'. This works because for both original and
wws subfamilies the order of parts is such that all
parts that fail the stop_fn precede any that pass.
Does not modify the input parts lists."""
result_family_parts = family_parts[:]
limit = len(subfamily_parts)
i = 0
while i < limit:
part = subfamily_parts[i]
if stop_fn(part):
break
result_family_parts.append(part)
i += 1
result_subfamily_parts = subfamily_parts[i:]
if not result_subfamily_parts:
result_subfamily_parts.append('Regular')
return result_family_parts, result_subfamily_parts
|
29c45bcaf667e42d5edf1602302649fb58661359
| 21,701
|
from typing import Dict
import gzip
def read_fasta(filename: str) -> Dict[str, str]:
"""
Reads a file containing multiple FASTA sequences and returns a dictionary of the header: sequence
:param str filename: should be the name of the open file to be read
:return: dict containing the header: the sequence
"""
seq_dict = {}
with gzip.open(filename, 'rt') as fc:
all_lines = str(fc.read())
seqs = all_lines.split('>')
for seq in seqs[1:]:
seq = seq.strip('"').split('\n')
ref_id, prot_name = seq[0].replace(' [Homo sapiens]', '').split(' ', 1)
if "NP_" in ref_id:
seq_dict[ref_id] = ''.join(seq[1:])
return seq_dict
|
2e14ec124ec0a9c3273fe8f6d3d23da0a7da9b2a
| 21,702
|
def char_replacement(list_smiles):
"""
Replace the double characters into single character in a list of SMILES string.
Parameters
----------
list_smiles: list
list of SMILES string describing a compound.
Returns
-------
list
list of SMILES with character replacement.
"""
return [
smile.replace("Cl", "L")
.replace("Br", "R")
.replace("Se", "E")
.replace("Zn", "Z")
.replace("Si", "T")
.replace("@@", "$")
for smile in list_smiles
]
|
b0a7e2a09cb966b826ee1cb9cf5afa734a2d2ed1
| 21,704
|
import tempfile
import os
def _get_dump_file():
"""Returns file object and its path."""
fd, path = tempfile.mkstemp(prefix='record_', suffix='.json')
# Will never be `close`d because we don't know when user stops the program.
# We'll live with this.
file = os.fdopen(fd, 'w')
return file, path
|
a3c3b1b212813415fef20a073ce89ad661dca2f2
| 21,705
|
def _edge_match(edge_a_attr, edge_b_attr):
"""
Compares attributes of the edges for equality.
:param edge_a_attr: Attributes of first edge.
:param edge_b_attr: Attributes of second edge.
:return: True is equal - otherwise False
"""
if edge_a_attr == edge_b_attr:
return True
return False
|
0afa7dd6402f1c954753c27e0ab4244740eb5ffe
| 21,708
|
import re
from datetime import datetime
def modify_log(config, log):
"""Format log kibana."""
for i in range(len(log)):
# regex to match type err
type_err = re.search(config.REG_ERR, log[i]['_source']['message'])
for pattern in config.REG_PIC:
# regex to match fpc_slot, pic_slot
re_pic = re.search(pattern, log[i]['_source']['message'])
try:
fpc_slot, pic_slot = re_pic.group(1), re_pic.group(2)
time_stamp = ' '.join([log[i]['_source']['@timestamp'].split('T')[0],
log[i]['_source']['syslog_timestamp'].split(' ')[-1]])
time_stamp = datetime.strptime(time_stamp, '%Y-%d-%m %H:%M:%S')
log[i] = {
'device_ip': log[i]['_source']['host'],
'device_name': log[i]['_source']['logsource'],
'fpc_slot': fpc_slot,
'pic_slot': pic_slot,
'log_message': type_err.group(),
'time_stamp': time_stamp.strftime("%Y-%m-%d %H:%M:%S"),
'card': '{}/{}/0'.format(fpc_slot, pic_slot)
}
break
except:
pass
return log
|
653b634000d0f0c4d339cb6f14cdb8a134c01bfb
| 21,709
|
from typing import List
def _encode_strings(strings: List[str]) -> List[bytes]:
"""
Encodes a list of strings as bytes
Parameters
----------
strings : list
List of any-codification strings
Returns
-------
strings: list
List of ASCII encoded bytes
"""
return [s.encode("ascii", "ignore") for s in strings]
|
58e4aafa7aca4a65f1d62fde37023f7f1a638b33
| 21,710
|
def box_sizing(keyword):
"""Validation for the ``box-sizing`` property from css3-ui"""
return keyword in ('padding-box', 'border-box', 'content-box')
|
42666cee49ba77d3089633a923872d2064e8f080
| 21,711
|
import random
def RANDBETWEEN(low, high):
"""
Returns a uniformly random integer between two values, inclusive.
"""
return random.randrange(low, high + 1)
|
6382801f41af2b05304b7fbf1c8d22b6f10b90a8
| 21,712
|
import re
def unindent(value):
"""Remove indentation from string."""
return re.sub("\n +", "\n", value)
|
f455d1d7b24d708e73a0307af6ee333cfffe91f2
| 21,713
|
def generate_mapping(length, positions):
"""generate mapping"""
start_mapping = [0] * length
end_mapping = [0] * length
for _, (start, end) in enumerate(positions):
start_mapping[start] = 1
end_mapping[end] = 1
return start_mapping, end_mapping
|
4337bc52023641d3a1ba0408d46cc039cf1c6f0b
| 21,714
|
def append_search_info_to_docstring(cls):
"""Class decorator for viewsets which adds documentation on search filtering.
Documentation is displayed either via Browseable API or upon receiving OPTIONS request.
"""
if cls.__doc__ is not None:
cls.__doc__ = '{}\n' \
'Specify "?search=<search terms here>" query parameter to search items.\n' \
.format(cls.__doc__)
return cls
|
85193e1c05b32c520b2da3ecc2b4bb723ec0f7d9
| 21,715
|
from typing import List
from typing import Tuple
def bono_tasa_fija(
start_time: float,
yf: float,
num_cupones: int,
valor_tasa: float) -> List[Tuple[float, float]]:
"""
Retorna los plazos y flujos de un bono a tasa fija bullet con nominal = 1.
params:
- start_time: fecha (expresada en fracción de año) en que comienza el devengo del primer cupón.
- yf: fracción de año que representa la periodicidad del bono (yf = .5 -> bono semestral).
- num_cupones: número de cupones del bono
- valor_tasa: valor de la tasa fija del bono. Los intereses se calculan de forma lineal.
return:
- Una `list` de `tuple` con la fecha de pago del cupón (como instante de tiempo) y el monto del cupón.
"""
result = []
nominal = 100.0
flujo = nominal * valor_tasa * yf
for i in range(1, num_cupones + 1):
if i == num_cupones:
flujo += nominal
result.append((i * yf + start_time, flujo))
return result
|
a3d792762458b7b30facf7ead74e2899e545425f
| 21,716
|
import os
import subprocess
def get_git_revision():
"""Return the git revision."""
if os.path.exists('PKG-INFO'):
with open('PKG-INFO') as package_info:
for key, value in (line.split(':', 1) for line in package_info):
if key.startswith('Version'):
return value.strip()
return subprocess.check_output(['git', 'rev-parse', 'HEAD']).strip()
|
96798570d9ecbf477208ae3c1e20c5359e368510
| 21,717
|
from datetime import datetime
def find_todays_file(dir_list=None):
"""Identifies filename of most recent Sierra report"""
fhdate = datetime.strftime(datetime.now(), '%Y%m%d')
if dir_list is not None:
# grab the latest file with date
for fh in sorted(dir_list, reverse=True):
if 'BookOpsQC.{}'.format(fhdate) in fh:
return fh
return None
|
789040c743375bc85c5ac2f8eaed4fec11c46158
| 21,718
|
def mock_uri_hit(uri: str) -> dict:
"""
Returns a mock hit with the desired uri
:param uri:
:return:
"""
return {
"_id": uri,
"_type": "ghostbuster",
"_source": {
"name": "Egon Spengler",
"occupation": "Ghostbuster",
"location": "New York City, New York",
"description": {
"keywords": ["Test"]
}
}
}
|
1d4a449a01670ab345cebc024cb42b4fffdd5df6
| 21,719
|
def get_testif_lines_generator():
"""Returns a generator containing testifs (conditional tests)."""
testif_line = "t%d=testif(t%d, executable = my_checker, " \
"clas = t%d.outname, nosrun=True)\n"
return (testif_line % (testif_num, testif_num - 1, testif_num - 1)
for testif_num in range(2, 45, 2))
|
8b12553ad45403dc05e4e57de630035882873b32
| 21,720
|
def format_location(text):
"""Replace all the spaces with plus signs."""
return text.replace(' ', '+')
|
e95002ae4b385e346628cf363f27b4db2475465d
| 21,721
|
def parse_ebi_search_entry(entry, fields):
"""Convert EBI Search json entry to tuple
"""
row = []
entry_fields = entry.get("fields", {})
for f in fields:
value = entry_fields.get(f, [])
if value:
row.append(value[0] if len(value) else "")
return row
|
4fd8970395b7cc6097ea0b56e2ccaac2b0f69afe
| 21,722
|
from typing import Counter
def get_adjacent_diff_counts(num_list, max_diff=3):
"""Returns counts of differences between adjacent numbers in sorted num_list"""
prev_num = 0
diff_counts = Counter()
for num in num_list:
diff = num - prev_num
diff_counts[diff] += 1
if diff > max_diff:
raise ValueError(
f"List has gap that is too big: {diff} = {num} - {prev_num}"
)
prev_num = num
return diff_counts
|
2d5f2d6839e4cb015126bd9ba6136339ef01d38e
| 21,723
|
def retorna_repeticao():
"""retorna a repeticao que significa o numero de itens """
while True: # loop so sai de entrar com um numero
try:
repetiçaos = int(input("Numero de itens para comprar? "))
#Numero de vezes para o loop >:)
break
except ValueError:
continue
return repetiçaos
|
3a8c4083478f3a5dc209cf1bba18a2c6063daf61
| 21,724
|
import os
def get_files_with_extension(dir, names, ext):
""" Get all the files in folder with specific extension.
:param dir: Folder of data
:param names: Names of all the files in folder
:param ext: Extension.
:return: All files with specific extension.
"""
list = []
for name in names:
e = os.path.splitext(name)[1]
if e == ext:
list.append(name)
list.sort()
return [os.path.join(dir, n) for n in list]
|
cae0d654b0a78e7940da73e27f16bdaf5fb0b41e
| 21,726
|
def url_remove_user_pwd(url):
"""
Given a URL, remove the username and password if any::
print(url_remove_user_pwd("https://user:password@host:port/path"))
https://host:port/path
"""
_url = url.scheme + "://" + url.hostname
if url.port:
_url += ":%d" % url.port
if url.path:
_url += url.path
return _url
|
0713a0973e6fac666198462145eab3d15179b6e0
| 21,728
|
import numpy
def prepare_lattice_for_MD(lattice):
""" Allocate arrays ready for a local MD simulation. """
lattice.V = numpy.zeros((3*lattice.NAtoms), dtype=float)
lattice.KE = numpy.zeros((lattice.NAtoms), dtype=float)
lattice.Time = 0.0
lattice.Temp = 0.0
return lattice
|
9473c58208f231af9dbc3a332117ed03b20adc50
| 21,730
|
import json
import random
def create_prices():
"""Creates some fake prices from the existing files."""
prices = []
users = json.load(open("users.json"))
businesses = json.load(open("businesses.json"))
products = json.load(open("products.json"))
for _ in range(0, 1000):
user = random.choice(users)
business = random.choice(businesses)
product = random.choice(products)
new_price = {"product": product["description"],
"business": business["name"],
"user": user["username"],
"price": random.randint(1000, 2000)}
prices.append(new_price)
with open("prices.json", "w") as f:
f.write(json.dumps(prices))
return prices
|
e8f1d295214d249cde8d5b54df7e92831b790477
| 21,731
|
def list_mounts(root_mount_point):
""" List mount points.
:param root_mount_point:
:return:
"""
lst = []
for line in open("/etc/mtab").readlines():
device, mount_point, _ = line.split(" ", 2)
if mount_point.startswith(root_mount_point):
lst.append((device, mount_point))
return lst
|
b2a01e131f6db0aa0e1cc2a31e0e9aba0a3cfe7a
| 21,732
|
def _remove_unwanted(args):
"""
Lower cases tokens and removes numbers and possibly names.
Parameters
----------
args : list of tuples
The following arguments zipped.
text : list
The text to clean.
words_to_ignore : str or list
Strings that should be removed from the text body.
stop_words : str or list
Stopwords for the given language.
Returns
-------
text_words_removed : list
The text without unwanted tokens.
"""
text, words_to_ignore, stop_words = args
return [
token.lower()
for token in text
if token not in words_to_ignore and token not in stop_words
]
|
3031b24e4581adf3ed701c999e995e2779e48cf2
| 21,733
|
def retrieve_cached_citations(citations_list):
""" loads cached citations """
citations_ids_list = "/".join(map(str, [c.pk for c in citations_list]))
return {"citations_ids_list": citations_ids_list}
|
d70879d1896830823223087c4c8284a8480621b2
| 21,734
|
def dict_to_cols(dict_obj, cols):
"""Converts a dict to a cliff-compatible value list.
For cliff lister.Lister objects, you should use list_to_cols() instead
of this function.
'cols' shouls be a list of (key, Name) tuples.
"""
values = []
for col in cols:
values.append(dict_obj.get(col[0]))
return values
|
85ce6809ead9f9543c49004da668366caa4fe9c6
| 21,735
|
import logging
import subprocess
def GetCmdStatusAndOutput(args, cwd=None, shell=False):
"""Executes a subprocess and returns its exit code and output.
Args:
args: A string or a sequence of program arguments. The program to execute is
the string or the first item in the args sequence.
cwd: If not None, the subprocess's current directory will be changed to
|cwd| before it's executed.
shell: Whether to execute args as a shell command.
Returns:
The tuple (exit code, output).
"""
logging.info(str(args) + ' ' + (cwd or ''))
p = subprocess.Popen(args=args, cwd=cwd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, shell=shell)
stdout, stderr = p.communicate()
exit_code = p.returncode
if stderr:
logging.critical(stderr)
logging.info(stdout[:4096]) # Truncate output longer than 4k.
return (exit_code, stdout)
|
2463494c6b4cea3e8c46d1e0bc1cda04bca8c062
| 21,737
|
def merge_none(a, b):
"""
Compare two sequences elementwise and merge them discarding None entries.
Raises ValueError exception if the two sequances do not have the same
length or if they have different non-None elements.
Parameters
----------
a, b : sequences
The sequences to be compared.
Example
-------
>>> merge_none([1,None,3],[None,2,3])
[1, 2, 3]
"""
if a is b is None:
return None
if len(a) != len(b):
raise ValueError('The input sequences do not have the same length.')
if any(p != q for p, q in zip(a, b) if None not in (p, q)):
raise ValueError('The input sequences have incompatible values.')
return tuple(p if p is not None else q for p, q in zip(a, b))
|
e25747cb2c8aeaa647ed9f3b19fbf311a7b0b701
| 21,738
|
import os
def script_based_path(relative_path):
"""実行ファイル基準の相対パスを、絶対パスに変換する関数
読み込みファイルを、実行ファイルからの相対パスで指定(スクリプトの実行場所によらず読み込めるようにするため)
Pythonファイル実行時の相対パスは、実行時のshellのカレントディレクトリからの相対パスになってしまうため、
実行場所によらず同じファイルを読むようにしたい
Args:
relative_path: 実行ファイルからの相対パス
Returns:
str: 実行ファイルからの相対パスで指定した場所の絶対パス
"""
dir_path = os.path.dirname(os.path.abspath(__file__))
script_based_relative_path = os.path.normpath(os.path.join(dir_path, relative_path))
return script_based_relative_path
|
339a44143e6562c9cc05eeefbee421453b3fa3f0
| 21,739
|
import sys
def add_is_cough_symptom(filename):
""" Returns whether cough is a symptom, using the filename (index of the dataframe). """
# Filename convention comes useful here.
# Filename convention comes useful here.
is_symptom = filename.split('_')[2]
if 'with' in is_symptom:
return 1
elif 'no' in is_symptom:
return 0
else:
print('Make sure the filename convention is followed.')
sys.exit(1)
|
393e022335a85878b4772ef9d4911f4d44944f06
| 21,740
|
def _identifier_split(identifier):
"""Return (name, start, end) string tuple from an identifier (PRIVATE)."""
id, loc, strand = identifier.split(":")
start, end = map(int, loc.split("-"))
start -= 1
return id, start, end, strand
|
9fa8f1850fe628b7a26c5813b6d738e72c0d7ae5
| 21,741
|
import torch
import os
def get_cuda_device_list():
"""Retuns the list of avaiable cuda devices"""
if torch.cuda.is_available():
hpo_env = os.environ.copy()
cuda_visible_devices = hpo_env.get("CUDA_VISIBLE_DEVICES", None)
if cuda_visible_devices is None:
return list(range(0, torch.cuda.device_count()))
return [int(i) for i in cuda_visible_devices.split(",")]
return None
|
aecfa3b338dba6f0266ca3aa948b008481f27153
| 21,746
|
import re
import time
import requests
def check(url, regexp=None):
"""
Make a get request to a given url and return some metrics about the request.
If the regexp parameter is present,
check if this regular expression is present within the page returned by the request.
:param url: url to be checked. if it does not start with http:// or https://, it will be prefixed with http://
:param regexp: (compiled) regular expression, or None
:return: if connection is successful, it returns a dictionary with the following keys:
'timestamp': of the check,
'url': the actual url checked (which may have been prefixed with 'http://'),
'response_time': in seconds, see below note
'status_code': from the response, as string
'matched': True/False if the regular expression was matched within the page returned by the url
if connection is unsuccessful, it returna a dictionary with the following keys:
'timestamp': of the check,
'url': the actual url checked (which may have been prefixed with 'http://'),
'error_msg': the message explaining the connection failure
Note that as the HTTP response time, the "elapsed" time provided by the request library is used,
that is, the amount of time elapsed between sending the request and the arrival of the response (as a timedelta).
This property specifically measures the time taken between sending
the first byte of the request and finishing parsing the headers.
It is therefore unaffected by consuming the response content or the value of the stream keyword argument.
See also https://2.python-requests.org/en/master/api/#requests.Response.elapsed
"""
if isinstance(regexp, str):
regexp = re.compile(regexp)
if not (url.startswith('http://') or url.startswith('https://')):
url = 'http://' + url
timestamp = time.time()
metrics = {
'timestamp': timestamp,
'url': url,
}
try:
resp = requests.get(url)
metrics['response_time'] = resp.elapsed.total_seconds()
metrics['status_code'] = str(resp.status_code)
if regexp:
metrics['matched'] = bool(regexp.search(resp.text))
except requests.exceptions.RequestException as e:
# we catch with this all exceptions explicitly raised from requests
metrics['error_msg'] = "connection error" #str(e)
return metrics
|
879069e2763e3be793e77bdcc540005adcec1435
| 21,750
|
import inspect
def get_for_param_by_type(dct, *, param, kind):
"""Grab the appropriate element out of dict based on param type.
Ordering:
1. param.name (i.e., something custom specified by user)
2. param.annotation
3. underlying type if typing.Optional
"""
if elem := dct.get(param.name, dct.get(param.annotation, dct.get(kind))):
return elem
for k, v in dct.items():
if inspect.isclass(k) and issubclass(kind, k) or k == kind:
return v
|
690ef7aad8c6b365112e32af47c54fa96bc52737
| 21,751
|
import numpy
def handle_time(ods, time_location, time_index, time):
"""
Given either time_index or time returns both time_index and time consistent with one another
NOTE: time takes precedence over time_index
:param time_location: location of which to get the time
:param time_index: int or list of ints
:param time: float or list of floats
:return: time_index, time
"""
if time is not None:
tds = ods.time(time_location)
time_index = []
for t in numpy.atleast_1d(time):
time_index.append(numpy.argmin(abs(tds - t)))
if time_index is None:
time = ods.time(time_location)
if time is None:
time_index = 0
else:
time_index = numpy.arange(len(time))
return time_index, numpy.atleast_1d(time)
|
e1adeb89590d9ae1581663da8456add98c4e85a6
| 21,752
|
def generate_index(modulename, docstring):
"""Creates the index page for the documentation"""
html = '<h3><i>{title}:</i></h3><hr/>\n<p class="bodytext">{doc}</p>\n'
# format docstring so it appears with spaces/newlines as added by documentor
docstring = "<pre>" + docstring + "</pre>"
return html.format(title = modulename, doc = docstring)
|
c4d182c2e59d7e0c77420114e5453ccdcaaf4a9f
| 21,753
|
def load_adr_lexicon(ard_lexicon_file):
"""loads ADR Lexicon from provided file into dict
# Arguments
ard_lexicon_file - path to ADR Lexicon file
# Returns
dict with ADR Lexicon entries
"""
print("Loading ADRMine Lexicon from {}...".format(ard_lexicon_file))
adr_lexicon_dict = []
with open(ard_lexicon_file) as f:
for line in f:
# Each line contains the UMLS (Unified Medical Language System) concept ID,
# concept name and the source that the concept and the ID are taken from (tab separated).
# e.g. c1328274 infection vascular SIDER
try:
(UMLS_id, concept_name, source) = line.rstrip().split('\t')
#print("{}, {}, {}".format(UMLS_id, concept_name, source))
adr_lexicon_dict.append(concept_name)
except:
#print("Ignoring line: {}".format(line))
pass
print(" {} entries loaded".format(len(adr_lexicon_dict)))
return adr_lexicon_dict
|
65fb3c345f71023f68b0e6144bae8e2449964e06
| 21,758
|
import typing
import shlex
def chroot_command(command: str, chroot_path: typing.Optional[str] = None) -> str:
"""Prepare command for chroot execution.
:param command: original command.
:type command: str
:param chroot_path: chroot path
:type chroot_path: typing.Optional[str]
:return: command to be executed with chroot rules if applicable
:rtype: str
"""
if chroot_path and chroot_path != "/":
chroot_dst: str = shlex.quote(chroot_path.strip())
quoted_command = shlex.quote(command)
return f'chroot {chroot_dst} sh -c {shlex.quote(f"eval {quoted_command}")}'
return command
|
1cab686e35d8f0e9c88c41b87f548a487cf77ac3
| 21,761
|
def partition(arr, low, high):
""" Partition is a helper function for the quicksort. It takes a pivot and
places lower values to the left and higher values to the right
"""
i = (low-1)
pivot = arr[high]
for j in range(low, high):
if arr[j] <= pivot:
i = i+1
arr[i], arr[j] = arr[j], arr[i]
arr[i+1], arr[high] = arr[high], arr[i+1]
return (i+1)
|
1bacd1c407542087bd161253e129f4fff098f05d
| 21,762
|
def build_hypo(hypo_indicator, nhypo):
""" inpiut: hypo_indicator: val = [ihypo]; ind = ipatt
output: hypo (nested list): ind = ihypo, val = patterns """
n_pattern = len(hypo_indicator)
hypo = [[] for i in range(nhypo)]
for ipatt in range(n_pattern):
for hypo_ind in hypo_indicator[ipatt]:
hypo[hypo_ind].append(ipatt)
return hypo
|
dfb1d2426c55d0ff5343acba13fec8b0740f8dc0
| 21,763
|
def fixHTML(text):
"""replaces html-markup parts on tags"""
return str(text).replace("&", "&") \
.replace("<", "<") \
.replace(">", ">")
|
ed9d79281b173e8460fc375e7abe83843ec2a8e4
| 21,764
|
def deep_update_dict(origin_dict, override_dict):
""" update origin dict with override dict recursively
e.g. origin_dict = {'a': 1, 'b': {'c': 2, 'd': 4}}
override_dict = {'b': {'c': 3}}
return: {'a': 1, 'b': {'c': 3, 'd': 4}}
"""
for key, val in override_dict.items():
if isinstance(val, dict):
tmp = deep_update_dict(origin_dict.get(key, {}), val)
origin_dict[key] = tmp
else:
origin_dict[key] = override_dict[key]
return origin_dict
|
09c0f8bb1656aef387a4a8a96f756a520d1dc23d
| 21,766
|
def getBFromString(x, A):
"""
:param x: string of binary values: it represents a node of the tree used to represent a subset B of A.
:param A: list of vectors of floats: set of alternatives.
:return: list of vectors: set of alternatives corresponding to the set B represented by x.
"""
B = []
indexes_B = []
for i in range(len(A)):
if x[i] == 1:
B.append(A[i])
indexes_B.append(i)
return B, indexes_B
|
27e33a21bc86777c1e43cefcf6cba32fe90ce48c
| 21,768
|
def get_coordinates_from_kml(coordinates):
"""Returns list of tuples of coordinates.
Args:
coordinates: coordinates element from KML.
"""
if coordinates:
return [tuple(float(x.strip()) for x in c.split(',')) for c in str(coordinates[0]).split(' ') if c.strip()]
|
921741e0e157a7d635f59cf75800fb32fcdd4ba2
| 21,769
|
def isArray(v, N=None):
"""Check if v is an array or a vector, with optional size.
Examples
--------
>>> import pygimli as pg
>>> print(pg.isArray([0, 1]))
True
>>> print(pg.isArray(np.array(5)))
True
>>> print(pg.isArray(pg.Vector(5)))
True
>>> print(pg.isArray(pg.Vector(5), N=5))
True
>>> print(pg.isArray(pg.Vector(5), N=2))
False
>>> print(pg.isArray('foo'))
False
"""
if N is None:
return hasattr(v, '__iter__') and not isinstance(v, (str))
return isArray(v) and len(v) == N
|
32b05b6810a9cfc2d97fbfcfdbdc2da0c1b47104
| 21,770
|
def get_chain_terminals(filepath):
"""
get the terminal indexes in the case of complexes with multiple protein chains
"""
idxes = []
idx = 0
with open(filepath, 'r') as f:
for line in f:
if line.startswith('TER'):
idxes.append(idx)
elif line.startswith('ENDMDL'):
break
idx+=1
return idxes
|
4fcd34d590904406ef27c552f3eceb572970699e
| 21,771
|
def build_connection_between_hosts_id(srcIP, dstIP):
"""
Creates identification of flows by the ip adresses
:param srcIP:
:param dstIP:
:return:
"""
return '{}'.format(srcIP + '_' + dstIP)
|
56f7d8be7588c9a4cafa8cda9ffd276648519b2f
| 21,772
|
def filter_collections_exist(hsp, collection_names):
"""
Filters a list of collections to return only those that do exist
"""
filtered = []
for entry in hsp.collections.apiget('list'):
if entry['name'] in collection_names:
filtered.append(entry['name'])
return filtered
|
54122ca77d6cb4acd5bd1492d1dcd15d6406c1a7
| 21,773
|
def prepare_aligned_crop():
""" Prepare for aligned crop. """
# Re-implement the logic in deploy.prototxt and
# /hed/src/caffe/layers/crop_layer.cpp of official repo.
# Other reference materials:
# hed/include/caffe/layer.hpp
# hed/include/caffe/vision_layers.hpp
# hed/include/caffe/util/coords.hpp
# https://groups.google.com/forum/#!topic/caffe-users/YSRYy7Nd9J8
def map_inv(m):
""" Mapping inverse. """
a, b = m
return 1 / a, -b / a
def map_compose(m1, m2):
""" Mapping compose. """
a1, b1 = m1
a2, b2 = m2
return a1 * a2, a1 * b2 + b1
def deconv_map(kernel_h, stride_h, pad_h):
""" Deconvolution coordinates mapping. """
return stride_h, (kernel_h - 1) / 2 - pad_h
def conv_map(kernel_h, stride_h, pad_h):
""" Convolution coordinates mapping. """
return map_inv(deconv_map(kernel_h, stride_h, pad_h))
def pool_map(kernel_h, stride_h, pad_h):
""" Pooling coordinates mapping. """
return conv_map(kernel_h, stride_h, pad_h)
x_map = (1, 0)
conv1_1_map = map_compose(conv_map(3, 1, 35), x_map)
conv1_2_map = map_compose(conv_map(3, 1, 1), conv1_1_map)
pool1_map = map_compose(pool_map(2, 2, 0), conv1_2_map)
conv2_1_map = map_compose(conv_map(3, 1, 1), pool1_map)
conv2_2_map = map_compose(conv_map(3, 1, 1), conv2_1_map)
pool2_map = map_compose(pool_map(2, 2, 0), conv2_2_map)
conv3_1_map = map_compose(conv_map(3, 1, 1), pool2_map)
conv3_2_map = map_compose(conv_map(3, 1, 1), conv3_1_map)
conv3_3_map = map_compose(conv_map(3, 1, 1), conv3_2_map)
pool3_map = map_compose(pool_map(2, 2, 0), conv3_3_map)
conv4_1_map = map_compose(conv_map(3, 1, 1), pool3_map)
conv4_2_map = map_compose(conv_map(3, 1, 1), conv4_1_map)
conv4_3_map = map_compose(conv_map(3, 1, 1), conv4_2_map)
pool4_map = map_compose(pool_map(2, 2, 0), conv4_3_map)
conv5_1_map = map_compose(conv_map(3, 1, 1), pool4_map)
conv5_2_map = map_compose(conv_map(3, 1, 1), conv5_1_map)
conv5_3_map = map_compose(conv_map(3, 1, 1), conv5_2_map)
score_dsn1_map = conv1_2_map
score_dsn2_map = conv2_2_map
score_dsn3_map = conv3_3_map
score_dsn4_map = conv4_3_map
score_dsn5_map = conv5_3_map
upsample2_map = map_compose(deconv_map(4, 2, 0), score_dsn2_map)
upsample3_map = map_compose(deconv_map(8, 4, 0), score_dsn3_map)
upsample4_map = map_compose(deconv_map(16, 8, 0), score_dsn4_map)
upsample5_map = map_compose(deconv_map(32, 16, 0), score_dsn5_map)
crop1_margin = int(score_dsn1_map[1])
crop2_margin = int(upsample2_map[1])
crop3_margin = int(upsample3_map[1])
crop4_margin = int(upsample4_map[1])
crop5_margin = int(upsample5_map[1])
return crop1_margin, crop2_margin, crop3_margin, crop4_margin, crop5_margin
|
48a0b04976ef6033603a89e4e8ddeb5fe7e06642
| 21,774
|
def anagram_solution_1(s1, s2):
"""
解法一:冒泡检查
检查第一个字符串是不是出现在第二个字符串中,如果可以检验到每一个字符,
那这两个字符串一定是乱序。
可以通过用 None 替换字符来了解一个字符是否完成检查。
但是,由于 Python 字符串是不可变的,所以第一步是将第二个字符串转换为列表。
检查第一个字符串中的每个字符是否存在于第二个列表中,如果存在,替换成 None。
T = O(n^2)
"""
if len(s1) != len(s2):
return False
alist = list(s2)
pos1 = 0
still_ok = True
while pos1 < len(s1) and still_ok:
pos2 = 0
found = False
while pos2 < len(alist) and not found:
if s1[pos1] == alist[pos2]:
found = True
else:
pos2 = pos2 + 1
if found:
alist[pos2] = None
else:
still_ok = False
pos1 = pos1 + 1
return still_ok
|
e45ba0e0607c57a9b21ca1dd59a86b98dcac6f89
| 21,776
|
import os
def makeOutput(opts,filename):
"""Create an output filename with the requested filename, in the -C directory if requested."""
outdir = opts.get('--C','')
if not outdir:
return filename
elif filename.startswith(outdir):
return filename
else:
return os.path.join(outdir,filename)
|
dd2fd52f49178252137090daff586a93a38ec68f
| 21,779
|
def has_argument(command: str) -> bool:
"""
Check if command has an argument.
This is helper function for process_command
:param command: an alphabetic string
:precondition: command must be an alphabetic string and part of the list returned by get_command_list
:postcondition: returns True if command has an argument, else False
:return: True if command has an argument, otherwise False
>>> has_argument("h")
False
>>> has_argument("b")
True
"""
commands_dictionary = {
"q": False,
"h": False,
"b": True,
"s": True,
"i": True,
"c": True
}
return commands_dictionary[command]
|
50bc475fa910ab0637f19bb07819fcc4fb78e325
| 21,780
|
def ParseMySQL(mysql, callback="dict"):
"""解析MYSQL配置段"""
if not mysql:return None
protocol, dburl = mysql.split("://")
if "?" in mysql:
dbinfo, dbargs = dburl.split("?")
else:
dbinfo, dbargs = dburl, "charset=utf8&timezone=+8:00"
host,port,user,password,database = dbinfo.split(":")
charset, timezone = dbargs.split("&")[0].split("charset=")[-1] or "utf8", dbargs.split("&")[-1].split("timezone=")[-1] or "+8:00"
if callback in ("list", "tuple"):
return protocol,host,port,user,password,database,charset, timezone
else:
return {"Protocol": protocol, "Host": host, "Port": port, "Database": database, "User": user, "Password": password, "Charset": charset, "Timezone": timezone}
|
d99d2da71f1a8e246a33581f7a1bf144f1a40766
| 21,781
|
from datetime import datetime
def convert_date(timestamp):
"""Converts API timestamp to publication-ready dateline"""
day = timestamp[5:7]
month = datetime.strptime(timestamp[8:11], '%b').strftime('%B')
year = timestamp[12:16]
date = month + ' ' + day + ", " + year
return date
|
661e91a9c037d65db7ea9621bb47e0230a833c31
| 21,782
|
def _check_before_comment(commented_map, key_to_check, first=False):
"""Check if commented_map has a comment before key_to_check or not.
All our default comments are before a key, so we just check for that.
:param commented_map:
:type commented_map: ruamel.yaml.comments.CommentedMap
:param key_to_check:
:type key_to_check: str
:param first: True if the key is the first key in the yaml file, as that comment is associated with the file
and not with the key.
:type first: bool
:return: True if there is a comment before a key.
:rtype: bool
"""
if first:
# In the first case, the comment is associated to the CommentedMap, and not to the key.
comments_list = commented_map.ca.comment
if not comments_list:
return False
# This is the comment structure in ruamel. They don't have any good method for us to check.
return len(comments_list) > 1 and comments_list[1] and len(comments_list[1]) > 0
else:
comments_dict = commented_map.ca.items
if not comments_dict:
return False
if key_to_check in comments_dict:
comments_list = comments_dict[key_to_check]
# This is the comment structure in ruamel. They don't have nay good method for us to check.
if len(comments_list) > 1 and comments_list[1] and len(comments_list[1]) > 0:
return True
else:
return False
else:
# No key exists, so no comments.
return False
|
92997028120026e3ae604704510c082f9b201543
| 21,783
|
def timeflip(path):
"""
timeflip transform described around the end of the article
"""
new_path = path.copy()
for e in new_path:
e.reverse_gear()
return new_path
|
c6e84174ead36e4a5b04078982216389a6e4fb4c
| 21,785
|
import os
def buildNumber():
"""
The Jenkins build number, if defined, else None.
"""
return os.getenv('BUILD_NUMBER')
|
49f46769a7290efbae26fe18002300fe27c2d9d2
| 21,786
|
import re
def is_comment(line):
"""Determines if a string is entirely a fortran comment."""
return bool(re.match('\A\s*!', line))
|
942bf9b780f7c890c75a18aac0a4380d96825c04
| 21,789
|
import pathlib
def validate_filepath(value):
"""Argument verifier: a file path.
"""
path = pathlib.Path(value)
if not path.exists():
raise ValueError(f"Invalid file path, expecting an absolute file path: {value}")
return value
|
f2b906c1f078ae213f25314a287665adf871f34c
| 21,790
|
def header(table):
"""
Return the header row for the given table. E.g.::
>>> from petl import header
>>> table = [['foo', 'bar'], ['a', 1], ['b', 2]]
>>> header(table)
['foo', 'bar']
See also :func:`fieldnames`.
"""
it = iter(table)
return next(it)
|
c4e772b40cdda2ffcbaf383dc4787f2e146a97e1
| 21,791
|
def makeChanges(today_data, yesterday_data):
"""
取得变化值
"""
changes = []
for i in range(len(today_data)):
change = int(today_data[i])-int(yesterday_data[i])
if change >= 0:
change = "+"+str(change)
else:
change = str(change)
changes.append(str(today_data[i]))
changes.append(str(change))
return changes
|
a74513a7f8b22fe72bdfa6471a06c0ddeb5864c9
| 21,792
|
import six
import base64
def is_base64(s):
"""
is_base64 tests whether a string is valid base64 by testing that it round-trips accurately.
This is required because python 2.7 does not have a Validate option to the decoder.
"""
try:
s = six.ensure_binary(s, "utf-8")
return base64.b64encode(base64.b64decode(s)) == s
except Exception as e:
return False
|
8c869fd96217e70dd896d8309719d3b0c754c388
| 21,793
|
def add_line_props_to_dict(line, visual_properties_dict):
"""Extracts the visual properties of the line and adds it to the dictionary.
Returns the dictionary where the values have been "updated" with the current line's values."""
if line['font_size'] not in visual_properties_dict['font_size'].keys():
visual_properties_dict['font_size'][line['font_size']] = 1
else:
visual_properties_dict['font_size'][line['font_size']] += 1
if line['font_color'] not in visual_properties_dict['font_color'].keys():
visual_properties_dict['font_color'][line['font_color']] = 1
else:
visual_properties_dict['font_color'][line['font_color']] += 1
if line['left_margin'] not in visual_properties_dict['left_margin'].keys():
visual_properties_dict['left_margin'][line['left_margin']] = 1
else:
visual_properties_dict['left_margin'][line['left_margin']] += 1
if line['font_family'] not in visual_properties_dict['font_family'].keys():
visual_properties_dict['font_family'][line['font_family']] = 1
else:
visual_properties_dict['font_family'][line['font_family']] += 1
return visual_properties_dict
|
def52a6b49caa3a733a46c8f404ae613f68af9c8
| 21,794
|
def _n2(a, b):
"""Return (a - b).evalf(2) if a and b are comparable, else None.
This should only be used when a and b are already sympified.
"""
# /!\ it is very important (see issue 8245) not to
# use a re-evaluated number in the calculation of dif
if a.is_comparable and b.is_comparable:
dif = (a - b).evalf(2)
if dif.is_comparable:
return dif
|
bb1c3ebcea8af5ddf248bd90ef9810093840bbcf
| 21,795
|
def create_non_dupe(base_name: str, opt_num: int, comparison) -> str:
"""Makes sure base_name is not in comparison, and if it is it's renamed.
:param base_name: Name to check/make unique.
:param opt_num: Number of the option base_name belongs to, used in making it unique.
:param comparison: Dictionary or set to search for base_name in.
:return: Unique name.
"""
h = base_name
if h in comparison:
n = 0
h = h + '_O' + str(opt_num)
h_end = len(h)
while h in comparison:
h = h[:h_end] + '_' + str(n)
n += 1
return h
|
cb12092838d6e0482f28b7b00682a3390fcac790
| 21,797
|
def get_min_value_from_matrix(matrix_filename):
"""
Returns the minimum value of a matrix file
:param matrix_filename: str
:rtype: float
"""
matrix = []
with open(matrix_filename) as file:
for line in file:
matrix.extend([float(val) for val in line.rstrip().split()])
return min(matrix)
|
69413010834b4e4fb903e164e41677619ac88bb3
| 21,799
|
def interpret_instruction(instruction, parameter):
""" Interprets an instruction and returns offset to next command and accumulator value.
:param instruction: acc, jmp or nop
:param parameter: signed integer
:return: (jump_offset, accumulator_offset)
"""
if instruction == 'acc':
return 1, parameter
elif instruction == 'jmp':
return parameter, 0
else:
return 1, 0
|
4f1b0ba7b1d92256e299a3f2dab9eb6c42a5314b
| 21,800
|
import re
def _contains_expression(repr_str: str) -> bool:
"""
Checks whether or not a `repr_str` contains an expression. (Single unary expressions are excluded)
"""
repr_str = re.sub(r"\s+", "", repr_str)
repr_str = repr_str.replace("(", "")
repr_str = repr_str.replace(")", "")
symbols = re.findall(r"[\w']+", repr_str)
non_symbols = re.findall(r"[^\w']+", repr_str)
if len(non_symbols) == 0:
return False
if len(non_symbols) == 1 and len(symbols) == 1:
return False
return True
|
af758c8874d22ea1cfa25bb73c7605ed5e4d2d75
| 21,801
|
def issafe(arg):
"""Returns False if arg contains ';' or '|'."""
return arg.find(';') == -1 and arg.find('|') == -1
|
f6746d5290e21eb84d7343792d277bce4c1871ff
| 21,804
|
def crf_viterbi_accuracy(y_true, y_pred):
"""
Use Viterbi algorithm to get best path, and compute its accuracy.
`y_pred` must be an output from CRF.
"""
crf, idx = y_pred._keras_history[:2]
# X = crf._inbound_nodes[idx].input_tensors[0]
# mask = crf._inbound_nodes[idx].input_masks[0]
# y_pred = crf.viterbi_decoding(X, mask)
# return _get_accuracy(y_true, y_pred, mask, crf.sparse_target)
return crf.get_accuracy(y_true, y_pred)
|
97e79a77c35376a61feba97d7f99d86da3eda146
| 21,806
|
def _get_resampled_data_size(xscale, yscale, data):
"""convenience function mimicking the Legacy output size"""
xscale = int(xscale)
yscale = int(yscale)
ysize, xsize = data.shape
xres, yres = int(xsize / xscale), int(ysize / yscale)
return xres, yres
|
9d361fa089e140ac8699ece9178640ba80597cef
| 21,807
|
def _bisect_blocks(web3, timestamp, use_left_bound=True):
"""
Perform a binary search on the blockchain for the block that matches the
given timestamp. The `use_left_bound` parameter determines whether to
return the block to the left or right of the timestamp in the event that no
block matches the timestamp exactly.
"""
left_bound = 1
right_bound = web3.eth.blockNumber
left_block = web3.eth.getBlock(left_bound)
if left_block['timestamp'] >= timestamp:
return 'earliest'
right_block = web3.eth.getBlock(right_bound)
if right_block['timestamp'] <= timestamp:
return 'latest'
while left_bound < right_bound - 1:
middle = (left_bound + right_bound) // 2
middle_block = web3.eth.getBlock(middle)
if middle_block['timestamp'] < timestamp:
left_bound = middle
elif middle_block['timestamp'] > timestamp:
right_bound = middle
else:
return middle
else:
if use_left_bound:
return left_bound
else:
return right_bound
|
9eb011ca488b7262e78efd29fe11f3c0136a5933
| 21,809
|
def menu_principal(request):
"""
Funcão que contém as configurações do menu de administração do sistema.
Retorna um dicionário com as configurações
"""
menu_buttons = [
{'link': '/admin', 'text': 'Administração Django'},
{'link': '/administracao/acesso', 'text': 'Controle de acessos'},
{'link': '/administracao/usuarios', 'text': 'Usuários'},
{'link': '/administracao/cadastro', 'text': 'Cadastros'},
]
button_return = {'link': '/', 'text': 'Voltar'}
context = {
'app': 'Administração',
'menu': 'Menu principal',
'menu_buttons': menu_buttons,
'button_return': button_return,
}
return context
|
05e7b8f7e9c822dc95fb34dc2151e06a55d7b7a6
| 21,810
|
def __get_app_config(app_cfg):
"""
将数据库存储的应用配置转换成容易使用的dict()
:param app_cfg: 数据库存储的应用配置
:return: 易于使用的用dict类型应用配置
"""
cfg = dict()
cfg['app_template_id'] = app_cfg.app_template.id
cfg['app_type'] = app_cfg.app_type
cfg['app_name'] = app_cfg.app_name
cfg['app_alias'] = app_cfg.app_alias
cfg['base_path'] = app_cfg.base_path
cfg['host_ip'] = app_cfg.server.host_ip
cfg['host_name'] = app_cfg.server.host_name
cfg['root_user'] = app_cfg.root_user.login_name
cfg['root_password'] = app_cfg.root_user.pass_word
cfg['app_user'] = app_cfg.app_user.login_name
cfg['app_user_password'] = app_cfg.app_user.pass_word
cfg['app_cfg_id'] = app_cfg.id
cfg['server_id'] = app_cfg.server.id
cfg['root_user_id'] = app_cfg.root_user.id
cfg['app_user_id'] = app_cfg.app_user.id
cfg['service_name'] = app_cfg.service_name
cfg['ssh_port'] = app_cfg.server.ssh_port
return cfg
|
d19e4a810dd644e2f8ce8654451cc50b15c0408d
| 21,813
|
import glob
import os
def get_configlet(src_folder=str(), prefix='AVD', extension='cfg'):
"""
Get available configlets to deploy to CVP.
Parameters
----------
src_folder : str, optional
Path where to find configlet, by default str()
prefix : str, optional
Prefix to append to configlet name, by default 'AVD'
extension : str, optional
File extension to lookup configlet file, by default 'cfg'
Returns
-------
dict
Dictionary of configlets found in source folder.
"""
src_configlets = glob.glob(src_folder + '/*.' + extension)
configlets = dict()
for file in src_configlets:
if prefix != 'none':
name = prefix + '_' + os.path.splitext(os.path.basename(file))[0]
else:
name = os.path.splitext(os.path.basename(file))[0]
with open(file, 'r') as file:
data = file.read()
configlets[name] = data
return configlets
|
2b0331aa44fa353ea147b993eb60e9ae146e98ee
| 21,814
|
def chunks(l, n):
"""
Successive n-sized chunks from l.
"""
res = []
for i in range(0, len(l), n):
assert len(l[i:i + n]) == n
res += [l[i:i + n]]
return res
|
148467d681e545487ea1a52c3b4d548726c77f6c
| 21,815
|
def cli(ctx, stdout, stderr):
"""Get job output
Output:
Output information
"""
return ctx.gi.job.get_logs(stdout, stderr)
|
c897c46f024d0fa21debfd96e4372c263cf2b80e
| 21,816
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.