content stringlengths 22 815k | id int64 0 4.91M |
|---|---|
def test_download_vcs_link(script):
"""
It should allow -d flag for vcs links, regression test for issue #798.
"""
result = script.pip(
'download', '-d', '.', 'git+git://github.com/pypa/pip-test-package.git'
)
result.did_create(Path('scratch') / 'pip-test-package-0.1.1.zip')
result.did_not_create(script.site_packages / 'piptestpackage') | 5,325,100 |
def test_size():
"""Test size() function"""
mf = MasterFile(join("..","sample","cds.mst"))
record = mf[1]
expr_list = (("size(v24)", '68'),
("size(v70)", '27'),
("size('teste')", '5'),
("size('')",'0'),
("size('\n')",'1'),
("if size(v69) > 10 then 'OK' fi",'OK'),
("size((v70))", '27'),
("size((|AU|v70|AU|))", '35'),
)
run_list(expr_list, record) | 5,325,101 |
def find_first_in_register_stop(seq):
"""
Find first stop codon on lowercase seq that starts at an index
that is divisible by three
"""
# Compile regexes for stop codons
regex_stop = re.compile('(taa|tag|tga)')
# Stop codon iterator
stop_iterator = regex_stop.finditer(seq)
# Find next stop codon that is in register
for stop in stop_iterator:
if stop.end() % 3 == 0:
return stop.end()
# Return -1 if we failed to find a stop codon
return -1 | 5,325,102 |
def computeScaling( filt1, filt2, camera1=None, camera2=None ) :
"""determine the flux scaling factor that should be multiplied to
filt1 to match the throughput of filt2. This returns just a
single number, effectively assuming the source SED is flat across
the bandpass, so that we just need to correct for total
throughput, not for the shape of the filter.
"""
from scipy import integrate as scint
if filt1.lower().startswith('f') :
filt1 = filtername2datfile( filt1, camera=camera1 )
if filt2.lower().startswith('f') :
filt2 = filtername2datfile( filt2, camera=camera2 )
if not filt1.endswith('.dat') or not filt2.endswith('.dat') :
print("Must specify a filter name (e.g. F160W) or a .dat file.")
return( None )
# read in the transmission curves for filters 1 and 2
topdir = os.path.abspath( '.' )
sndataroot = os.environ['SNDATA_ROOT']
os.chdir( sndataroot+'/filters/HST')
w1, f1 = np.loadtxt( filt1, unpack=True )
w2, f2 = np.loadtxt( filt2, unpack=True )
os.chdir( topdir )
# integrate
int1 = scint.simps( f1, w1 )
int2 = scint.simps( f2, w2 )
# divide
return( int2 / int1 ) | 5,325,103 |
def test_ch_set_with_invalid_settingpatch_find_ports():
"""Test '--ch-set with a_bad_setting'."""
return_value, out = subprocess.getstatusoutput('meshtastic --host localhost --ch-set invalid_setting foo --ch-index 0')
assert re.search(r'Choices in sorted order', out)
assert return_value == 0 | 5,325,104 |
def setup_filters():
"""Registers the voldemort filters
"""
log.info('Initializing voldemort filters')
for filter_method in filters.__all__:
env.filters[filter_method] = getattr(filters, filter_method) | 5,325,105 |
def greet_person(person: Person) -> str:
"""Return a greeting message for the given person.
The message should have the form 'Hello, <given_name> <family_name>!'
>>> david = Person('David', 'Liu', 110, '110 St. George Street')
>>> greet_person(david)
'Hello, David Liu!'
"""
return f'Hello, {person.given_name} {person.family_name}!' | 5,325,106 |
def sw(s1, s2, pen, matrix):
"""
Takes as input two sequences, gap penalty, BLOSUM or PAM dictionary
and returns the scoring matrix(F) and traceback matrix(P)
"""
N = len(s1) + 1
M = len(s2) + 1
F = [] #initialize scoring matrix(F) and traceback matrix(P)
P = []
F = [[0] * (N) for i in range(M)] # fill F and P with 0, defining
P = [[0] * (N) for i in range(M)] # their dimensions
for i in range(1, M):
P[i][0] = 'u'
for j in range(1, N):
P[0][j] = 'l'
for i in range(1, M):
for j in range(1, N): # core of the function: for each i,j position
voc = {} # the best score is added to F matrix,
up = F[i - 1][j] + pen # adding the gap penalty when necessary
left = F[i][j - 1] + pen # and its direction to P matrix
diag = F[i - 1][j - 1] + int(matrix[s1[j - 1] + s2[i - 1]])
voc[up] = 'u'
voc[left] = 'l' # u = up, l = left, d = diagonal
voc[diag] = 'd'
max_score = max(up, left, diag)
if max_score < 0: # all negative values are excluded and
F[i][j] = 0 # recorded as 0
else:
F[i][j] = max_score
P[i][j] = voc.get(max_score)
return(F, P) | 5,325,107 |
def open_data(num=None, folder=None, groupname="main", datasetname="data", date=None):
"""Convenience Load data from an `AuspexDataContainer` given a file number and folder.
Assumes that files are named with the convention `ExperimentName-NNNNN.auspex`
Parameters:
num (int)
File number to be loaded.
folder (string)
Base folder where file is stored. If the `date` parameter is not None, assumes file is a dated folder. If no folder is specified, open a dialogue box. Open the folder with the desired ExperimentName-NNNN.auspex, then press OK
groupname (string)
Group name of data to be loaded.
datasetname (string, optional)
Data set name to be loaded. Default is "data".
date (string, optional)
Date folder from which data is to be loaded. Format is "YYMMDD" Defaults to today's date.
Returns:
data (numpy.array)
Data loaded from file.
desc (DataSetDescriptor)
Dataset descriptor loaded from file.
Examples:
Loading a data container
>>> data, desc = open_data(42, '/path/to/my/data', "q1-main", date="190301")
"""
if num is None or folder is None or date is None:
return load_data()
else:
if date == None:
date = datetime.date.today().strftime('%y%m%d')
folder = path.join(folder, date)
assert path.isdir(folder), f"Could not find data folder: {folder}"
p = re.compile(r".+-(\d+).auspex")
files = [x.name for x in os.scandir(folder) if x.is_dir()]
data_file = [x for x in files if p.match(x) and int(p.match(x).groups()[0]) == num]
if len(data_file) == 0:
raise ValueError("Could not find file!")
elif len(data_file) > 1:
raise ValueError(f"Ambiguous file information: found {data_file}")
data_container = AuspexDataContainer(path.join(folder, data_file[0]))
return data_container.open_dataset(groupname, datasetname) | 5,325,108 |
def read_item() -> Any:
"""
Get item by ID.
"""
print("read item")
pass | 5,325,109 |
def random_multiplex_ER(n,l,p,directed=False):
""" random multilayer ER """
if directed:
G = nx.MultiDiGraph()
else:
G = nx.MultiGraph()
for lx in range(l):
network = nx.fast_gnp_random_graph(n, p, seed=None, directed=directed)
for edge in network.edges():
G.add_edge((edge[0],lx),(edge[1],lx),type="default")
## construct the ppx object
no = multi_layer_network(network_type="multiplex").load_network(G,input_type="nx",directed=directed)
return no | 5,325,110 |
def download(
districts: Optional[str], areas: Optional[str], surnames: Tuple[str], headless: bool, debug: bool, console: bool
):
"""download name"""
output = f"cica-{datetime.now():%Y%m%d-%H%M}"
set_up_logger(debug, console, log_file=f"{output}.log") # set up logger
districts, areas = list(districts), list(areas) # convert tuple to list
logger.info(f"will be searched in districts {districts}")
logger.info(f"will be searched in areas {areas}")
letters = list(set([remove_slovak_alphabet(surname[0].upper()) for surname in surnames]))
logger.info(f"will be searched letters {letters}")
surnames = list(set([remove_slovak_alphabet(surname.lower()) for surname in surnames]))
logger.info(f"will be searched surnames {surnames}")
with cica(headless) as driver:
for district in tqdm(districts or get_districts(driver), desc="district"):
logger.info(f"district {district}")
for area in tqdm(areas or get_cadastral_areas(driver, district), desc="area", leave=False, position=1):
logger.info(f"area {area}")
for letter in tqdm(
letters or get_letters(driver, district, area), desc="letter", leave=False, position=2,
):
logger.info(f"letter {letter}")
for surname in tqdm(
get_surnames(driver, district, area, letter), desc="surname", leave=False, position=3,
):
if verify_name(surname, surnames):
logger.info(f"surname {surname}")
get_owners(f"{output}.csv", driver, district, area, letter, surname)
else:
logger.debug(f"omit the surname {surname}") | 5,325,111 |
def _save_current_regression_range_indices(testcase_id, regression_range_start,
regression_range_end):
"""Save current regression range indices in case we die in middle of task."""
testcase = data_handler.get_testcase_by_id(testcase_id)
testcase.set_metadata(
'last_regression_min', regression_range_start, update_testcase=False)
testcase.set_metadata(
'last_regression_max', regression_range_end, update_testcase=False)
testcase.put() | 5,325,112 |
def activation_summary(x):
"""Helper to create summaries for activations.
Creates a summary that provides a histogram of activations.
Creates a summary that measures the sparsity of activations.
Args:
x: Tensor
Returns:
nothing
"""
if FLAGS.verbose:
tf.summary.histogram('activation', x)
tf.summary.scalar('sparsity', tf.nn.zero_fraction(x))
else:
pass | 5,325,113 |
def testTaskRunnerSuspend(databases, data):
"""Test suspend functionality."""
resourceFactory = databases.resourceDB.factory
record = resourceFactory.newTaskRunner('runner1', '', set())
record.sync(databases.jobDB, data)
# Check if initially not suspended.
assert not record.isSuspended()
# Check False -> True transition of suspended.
record.setSuspend(True, None)
assert record.isSuspended()
# Check True -> True transition of suspended.
record.setSuspend(True, None)
assert record.isSuspended()
# Check True -> False transition of suspended.
record.setSuspend(False, None)
assert not record.isSuspended()
# Check False -> False transition of suspended.
record.setSuspend(False, None)
assert not record.isSuspended() | 5,325,114 |
def compare_structures(structure_a, structure_b):
"""Compare two StructureData objects A, B and return a delta (A - B) of the relevant properties."""
delta = AttributeDict()
delta.absolute = AttributeDict()
delta.relative = AttributeDict()
volume_a = structure_a.get_cell_volume()
volume_b = structure_b.get_cell_volume()
delta.absolute.volume = np.absolute(volume_a - volume_b)
delta.relative.volume = np.absolute(volume_a - volume_b) / volume_a
pos_a = np.array([site.position for site in structure_a.sites])
pos_b = np.array([site.position for site in structure_b.sites])
delta.absolute.pos = pos_a - pos_b
site_vectors = [delta.absolute.pos[i, :] for i in range(delta.absolute.pos.shape[0])]
a_lengths = np.linalg.norm(pos_a, axis=1)
delta.absolute.pos_lengths = np.array([np.linalg.norm(vector) for vector in site_vectors])
delta.relative.pos_lengths = np.array([np.linalg.norm(vector) for vector in site_vectors]) / a_lengths
cell_lengths_a = np.array(structure_a.cell_lengths)
delta.absolute.cell_lengths = np.absolute(cell_lengths_a - np.array(structure_b.cell_lengths))
delta.relative.cell_lengths = np.absolute(cell_lengths_a - np.array(structure_b.cell_lengths)) / cell_lengths_a
cell_angles_a = np.array(structure_a.cell_angles)
delta.absolute.cell_angles = np.absolute(cell_angles_a - np.array(structure_b.cell_angles))
delta.relative.cell_angles = np.absolute(cell_angles_a - np.array(structure_b.cell_angles)) / cell_angles_a
return delta | 5,325,115 |
def parse_response(expected: str) -> Callable:
"""
Decorator for a function that returns a requests.Response object.
This decorator parses that response depending on the value of <expected>.
If the response indicates the request failed (status >= 400) a dictionary
containing the response status and message will be returned. Otherwise,
the content will be parsed and a dictionary or list will be returned if
expected == 'json', a string will be returned if expected == 'text' and
a binary string will be returned if expected == 'content'.
This also updates the return annotation for the wrapped function according
to the expected return value type.
"""
def _parser(f):
@wraps(f)
def _f(*args, **kwargs):
response = f(*args, **kwargs)
if not response.ok or expected == "json":
return response.json()
if expected == "content":
return response.content
if expected == "text":
return response.text
return response.json()
f.__annotations__["return"] = _get_expected_return(expected)
return _f
return _parser | 5,325,116 |
def find_parents(candidate, branches):
"""Find parents genre of a given genre, ordered from the closest to
the further parent.
"""
for branch in branches:
try:
idx = branch.index(candidate.lower())
return list(reversed(branch[:idx + 1]))
except ValueError:
continue
return [candidate] | 5,325,117 |
def swig_ptr_from_FloatTensor(x):
""" gets a Faiss SWIG pointer from a pytorch tensor (on CPU or GPU) """
assert x.is_contiguous()
assert x.dtype == torch.float32
return faiss.cast_integer_to_float_ptr(
x.storage().data_ptr() + x.storage_offset() * 4) | 5,325,118 |
def generate_reference_user_status(user,references):
"""Generate reference user status instances for a given set of references.
WARNING: the new instances are not saved in the database!
"""
new_ref_status = []
for ref in references:
source_query = ref.sources.filter(userprofile=user.userprofile)\
.distinct().order_by("pub_date")
try:
s = source_query.get()
except MultipleObjectsReturned:
s = source_query.all()[0]
except ObjectDoesNotExist:
s = get_unknown_reference()
rust = ReferenceUserStatus()
rust.main_source = s
rust.owner = user
rust.reference = ref
rust.reference_pub_date = ref.pub_date
new_ref_status.append(rust)
return new_ref_status | 5,325,119 |
def binned_bitsets_by_chrom( f, chrom, chrom_col=0, start_col=1, end_col=2):
"""Read a file by chrom name into a bitset"""
bitset = BinnedBitSet( MAX )
for line in f:
if line.startswith("#"): continue
fields = line.split()
if fields[chrom_col] == chrom:
start, end = int( fields[start_col] ), int( fields[end_col] )
bitset.set_range( start, end-start )
return bitset | 5,325,120 |
def timeperiod_contains(
timeperiod: spec.Timeperiod,
other_timeperiod: spec.Timeperiod,
) -> bool:
"""return bool of whether timeperiod contains other timeperiod"""
start, end = timeperiod_crud.compute_timeperiod_start_end(timeperiod)
other_start, other_end = timeperiod_crud.compute_timeperiod_start_end(
other_timeperiod
)
return (start <= other_start) and (end >= other_end) | 5,325,121 |
def sort_vertices_by_degree(G, vs, within=None):
"""
Stable inplace sort of vs by degree in G, optionally
only considering neighbors in `within`.
"""
if within is None:
vs.sort(cmp=lambda u,v: cmp(G.degree(u), G.degree(v)))
else:
vs.sort(cmp=lambda u,v: cmp(degree_within(G, u, within),
degree_within(G, v, within))) | 5,325,122 |
def PlotColumns(data_list, ax, args):
""" Plot one dimensional data as column / bar plot.
Args:
data_list: a list of Data objects.
ax: a matplotlib axis object.
args: an argparse arguments object.
"""
width = 2.0 / 3.0 / len(data_list)
data_min = min(map(numpy.min, map(lambda x: x.y, data_list)))
data_max = max(map(numpy.max, map(lambda x: x.y, data_list)))
args.xmin = 0
args.xmax = max(map(len, map(lambda data: data.y, data_list)))
for i, data in enumerate(data_list, 0):
data.x = range(0, len(data.y))
data.x = numpy.add(data.x, width * i) # offset
rects = ax.bar(data.x,
data.y,
width,
color=ColorPicker(i, args),
linewidth=0.0,
alpha=1.0)
ax.xaxis.set_ticklabels([])
xmin, xmax, ymin, ymax = ax.axis()
xmin, xmax = HandleLimits(xmin, xmax, args.user_xmin, args.user_xmax)
ymin, ymax = HandleLimits(min(0.0, data_min), ymax,
args.user_ymin, args.user_ymax)
args.ymin = ymin
args.ymax = ymax
ax.set_ylim([ymin, ymax])
if args.xtick_label_column is not None:
ax.xaxis.set_ticks(numpy.arange(0, len(data.xtick_labels)) + width / 2.)
ax.xaxis.set_ticklabels(data.xtick_labels, rotation=35,
horizontalalignment='right')
args.xmin = xmin
args.xmax = xmax
ax.set_xlim([xmin, xmax]) | 5,325,123 |
def preprocess_all(num_beats):
"""
Preprocess all patients
:return:
"""
flags = [True, False]
for control_patients in flags:
indicies = get_patient_ids(control=control_patients)
for idx, filename in zip(indicies, get_filenames(original=(not control_patients), control=control_patients)):
idx = str(idx)
try:
preprocess_sum(filename, idx, beats_per_datapoint=num_beats, control=control_patients)
except:
print(idx + 'bad')
# preprocess_seperate(filename, idx) | 5,325,124 |
def asdict(obj, dict_factory=dict, filter_field_type=None):
"""
Version of dataclasses.asdict that can use field type infomation.
"""
if _is_dataclass_instance(obj):
result = []
for f in fields(obj):
if filter_field_type is None:
continue
field_type_from_metadata = f.metadata.get('type', None)
if field_type_from_metadata != filter_field_type and field_type_from_metadata is not None:
continue
value = asdict(getattr(obj, f.name), dict_factory, filter_field_type)
result.append((f.name, value))
return dict_factory(result)
elif isinstance(obj, tuple) and hasattr(obj, '_fields'):
return type(obj)(*[asdict(v, dict_factory, filter_field_type) for v in obj])
elif isinstance(obj, (list, tuple)):
return type(obj)(asdict(v, dict_factory, filter_field_type) for v in obj)
elif isinstance(obj, dict):
return type(obj)((asdict(k, dict_factory, filter_field_type),
asdict(v, dict_factory, filter_field_type))
for k, v in obj.items())
else:
return copy.deepcopy(obj) | 5,325,125 |
def resolve(jira_issue, resolution_name, resolve_comment):
"""
Given a JIRA ticket, mark the ticket as resolved.
:param jira_issue: the specified JIRA ticket
:param resolution_name: the specified type of resolution for this ticket
"""
state_name = config.get('jira', 'resolve_transition_name')
state_id = None
resolution_id = None
for transition in jira.transitions(jira_issue):
if state_name in transition['name']:
state_id = transition['id']
break
for resolution in jira.resolutions():
if resolution_name in resolution.name:
resolution_id = resolution.id
break
if state_id and resolution_id:
fields_dict = config_get_dict(config, 'jira', 'resolve_fields')
fields_dict['resolution'] = { 'id': resolution_id }
logger.info('Resolving ticket (' + jira_issue.key + ')')
syslog.syslog(syslog.LOG_INFO, 'Resolving ticket (' + jira_issue.key + ')')
jira.transition_issue(jira_issue, state_id, fields=fields_dict, comment=resolve_comment) | 5,325,126 |
def process_pdb_file(pdb_file, atom_info_only=False):
"""
Reads pdb_file data and returns in a dictionary format
:param pdb_file: str, the location of the file to be read
:param atom_info_only: boolean, whether to read the atom coordinates only or all atom data
:return: pdb_data, dict organizing pdb data by section
"""
pdb_data = {NUM_ATOMS: 0, SEC_HEAD: [], SEC_ATOMS: [], SEC_TAIL: []}
if atom_info_only:
pdb_data[SEC_ATOMS] = {}
atom_id = 0
with open(pdb_file) as f:
for line in f:
line = line.strip()
if len(line) == 0:
continue
line_head = line[:PDB_LINE_TYPE_LAST_CHAR]
# head_content to contain Everything before 'Atoms' section
# also capture the number of atoms
# match 5 letters so don't need to set up regex for the ones that have numbers following the letters
# noinspection SpellCheckingInspection
if line_head[:-1] in ['HEADE', 'TITLE', 'REMAR', 'CRYST', 'MODEL', 'COMPN',
'NUMMD', 'ORIGX', 'SCALE', 'SOURC', 'AUTHO', 'CAVEA',
'EXPDT', 'MDLTY', 'KEYWD', 'OBSLT', 'SPLIT', 'SPRSD',
'REVDA', 'JRNL ', 'DBREF', 'SEQRE', 'HET ', 'HETNA',
'HETSY', 'FORMU', 'HELIX', 'SHEET', 'SSBON', 'LINK ',
'CISPE', 'SITE ', ]:
# noinspection PyTypeChecker
pdb_data[SEC_HEAD].append(line)
# atoms_content to contain everything but the xyz
elif line_head == 'ATOM ' or line_head == 'HETATM':
# By renumbering, handles the case when a PDB template has ***** after atom_id 99999.
# For renumbering, making sure prints in the correct format, including num of characters:
atom_id += 1
if atom_id > 99999:
atom_num = format(atom_id, 'x')
else:
atom_num = '{:5d}'.format(atom_id)
# Alternately, use this:
# atom_num = line[cfg[PDB_LINE_TYPE_LAST_CHAR]:cfg[PDB_ATOM_NUM_LAST_CHAR]]
atom_type = line[PDB_ATOM_NUM_LAST_CHAR:PDB_ATOM_TYPE_LAST_CHAR]
res_type = line[PDB_ATOM_TYPE_LAST_CHAR:PDB_RES_TYPE_LAST_CHAR]
mol_num = int(line[PDB_RES_TYPE_LAST_CHAR:PDB_MOL_NUM_LAST_CHAR])
pdb_x = float(line[PDB_MOL_NUM_LAST_CHAR:PDB_X_LAST_CHAR])
pdb_y = float(line[PDB_X_LAST_CHAR:PDB_Y_LAST_CHAR])
pdb_z = float(line[PDB_Y_LAST_CHAR:PDB_Z_LAST_CHAR])
last_cols = line[PDB_Z_LAST_CHAR:]
element_type = line[PDB_BEFORE_ELE_LAST_CHAR:PDB_ELE_LAST_CHAR]
if atom_info_only:
atom_xyz = np.array([pdb_x, pdb_y, pdb_z])
pdb_data[SEC_ATOMS][atom_id] = {ATOM_TYPE: element_type, ATOM_COORDS: atom_xyz}
else:
line_struct = [line_head, atom_num, atom_type, res_type, mol_num, pdb_x, pdb_y, pdb_z, last_cols]
# noinspection PyTypeChecker
pdb_data[SEC_ATOMS].append(line_struct)
elif line_head == 'END':
pdb_data[SEC_TAIL].append(line)
break
# tail_content to contain everything after the 'Atoms' section
else:
# noinspection PyTypeChecker
pdb_data[SEC_TAIL].append(line)
pdb_data[NUM_ATOMS] = len(pdb_data[SEC_ATOMS])
return pdb_data | 5,325,127 |
def get_insns(*, cls=None, variant: Variant = RV32I):
"""
Get all Instructions. This is based on all known subclasses of `cls`. If non
is given, all Instructions are returned. Only such instructions are returned
that can be generated, i.e., that have a mnemonic, opcode, etc. So other
classes in the hierarchy are not matched.
:param cls: Base class to get list :type cls: Instruction :return: List of
instruction classes
"""
insns = []
if cls is None:
cls = Instruction
# This filters out abstract classes
if cls.mnemonic:
if variant is None or cls.variant <= variant:
insns = [cls]
for subcls in cls.__subclasses__():
insns += get_insns(cls=subcls, variant=variant)
insns = list(dict.fromkeys(insns)) # Remove duplicates
return insns | 5,325,128 |
def get_review(cursor, hpage_html, movie_title):
"""Retrieves reviews from hpage_html.
All reviews are stored in database.
Args:
cursor: cursor of a connection to sqlite.
hpage_html: string, homepage of a movie, e.g. HTML text of URL 'https://www.imdb.com/title/tt2015381/'
movie_title: movie title extracted from hpage_html.
"""
print('获取用户评论...')
# 1. fetch review page
result = etree.HTML(hpage_html).xpath('//a[starts-with(text(), "See all") and contains(text(), "user reviews")]/@href')
review_url = imdb_url + str(result[0])
logger.debug('review_url: ' + review_url)
# 2. extract reviews
review_page = pyquery.PyQuery(requests.get(review_url).text)
reviews = review_page('.lister-list .review-container')
count = save_reviews(cursor, reviews, movie_title)
logger.debug('# of reviews in this page: ' + str(count))
data_ajaxurl = review_page('.load-more-data').attr('data-ajaxurl')
noticed = False
while True:
data_url = load_more_reviews_url(review_page, data_ajaxurl)
logger.debug('next review_page url: ' + str(data_url))
if data_url is None:
break
review_page = pyquery.PyQuery(requests.get(data_url).text)
reviews = review_page('.lister-list .review-container')
count_of_this_page = save_reviews(cursor, reviews, movie_title)
logger.debug('# of reviews in this page: ' + str(count))
count += count_of_this_page
if count > 600 and not noticed:
noticed = True
print('评论较多,稍等')
print('{}条'.format(count))
logger.debug('# of reviews: ' + str(count)) | 5,325,129 |
def _assert_all_equal_and_return(tensors, name=None):
"""Asserts that all tensors are equal and returns the first one."""
with backend.name_scope(name or 'assert_all_equal'):
if len(tensors) == 1:
return tensors[0]
assert_equal_ops = []
for t in tensors[1:]:
assert_equal_ops.append(check_ops.assert_equal(tensors[0], t))
with ops.control_dependencies(assert_equal_ops):
return array_ops.identity(tensors[0]) | 5,325,130 |
def _get_igraph(G, edge_weights=None, node_weights=None):
"""
Transforms a NetworkX graph into an iGraph graph.
Parameters
----------
G : NetworkX DiGraph or Graph
The graph to be converted.
edge_weights: list or string
weights stored in edges in the original graph to be kept in new graph.
If None, no weight will be carried. See get_full_igraph to get all
weights and attributes into the graph.
node_weights: list or string
weights stored in nodes in the original graph to be kept in new graph.
If None, no weight will be carried. See get_full_igraph to get all
weights and attributes into the graph.
Returns
-------
iGraph graph
"""
if type(edge_weights) == str:
edge_weights = [edge_weights]
if type(node_weights) == str:
node_weights = [node_weights]
G = G.copy()
G = nx.relabel.convert_node_labels_to_integers(G)
Gig = ig.Graph(directed=True)
Gig.add_vertices(list(G.nodes()))
Gig.add_edges(list(G.edges()))
if 'kind' not in G.graph.keys():
G.graph['kind']=primal # if not specified, assume graph id primal
if G.graph['kind']=='primal':
Gig.vs['osmid'] = list(nx.get_node_attributes(G, 'osmid').values())
elif G.graph['kind']=='dual':
Gig.vs['osmid'] = list(G.edges)
if edge_weights != None:
for weight in edge_weights:
Gig.es[weight] = [n for _,_,n in G.edges(data=weight)]
if node_weights != None:
for weight in node_weights:
Gig.vs[weight] = [n for _,n in G.nodes(data=weight)]
for v in Gig.vs:
v['name'] = v['osmid']
return Gig | 5,325,131 |
def add_extra_args(parser, files):
"""Add additional arguments defined in other files."""
for f in files:
m = importlib.import_module(f)
if 'setup_extra_args' in m.__dict__:
m.setup_extra_args(parser) | 5,325,132 |
def rename(isamAppliance, instance_id, id, new_name, check_mode=False, force=False):
"""
Deleting a file or directory in the administration pages root
:param isamAppliance:
:param instance_id:
:param id:
:param name:
:param check_mode:
:param force:
:return:
"""
dir_id = None
if force is False:
dir_id = _check(isamAppliance, instance_id, id, '')
if force is True or dir_id != None:
if check_mode is True:
return isamAppliance.create_return_object(changed=True)
else:
return isamAppliance.invoke_put(
"Renaming a directory in the administration pages root",
"/wga/reverseproxy/{0}/management_root/{1}".format(instance_id, id),
{
'id': dir_id,
'new_name': new_name,
'type': 'directory'
})
return isamAppliance.create_return_object() | 5,325,133 |
def write_hdf5(filename, data, dataset_name='dataset', dtype='f'):
"""
Create hdf5 file
Inputs
filename : str
hdf5 filename
data : array, shape (n_vertices, n_times)
raw data for whose the dataset is created
dataset_name : str
name of dataset to create
dtype : str
data type for new dataset
"""
hf = h5py.File(filename, 'w')
hf.create_dataset(dataset_name, data=data, dtype=dtype)
hf.close() | 5,325,134 |
def deepset_update_global_fn(feats: jnp.ndarray) -> jnp.ndarray:
"""Global update function for graph net."""
# we want to sum-pool all our encoded nodes
#feats = feats.sum(axis=-1) # sum-pool
net = hk.Sequential(
[hk.Linear(128), jax.nn.elu,
hk.Linear(30), jax.nn.elu,
hk.Linear(11)]) # number of variabilities
return net(feats) | 5,325,135 |
def max_sum_naive(arr: list, length: int, index: int, prev_max: int) -> int:
"""
We can either take or leave the current number depending on previous max number
"""
if index >= length:
return 0
cur_max = 0
if arr[index] > prev_max:
cur_max = arr[index] + max_sum_naive(arr, length, index + 1, arr[index])
return max(cur_max, max_sum_naive(arr, length, index + 1, prev_max)) | 5,325,136 |
def accuracy(X, X_ref):
""" Compute classification accuracy.
Parameters
----------
X : torch.Tensor
The classification score tensor of shape [..., num_classes]
X_ref : torch.Tensor
The target integer labels of shape [...]
Returns
-------
The average accuarcy
"""
X_label = torch.argmax(X, dim=-1)
correct = (X_label == X_ref).sum()
return correct / np.prod(X.shape[:-1]) | 5,325,137 |
def download_dataset(filepath=DATASET_PATH, url=DATASET_URL, **kwargs) -> pd.DataFrame:
"""
Download dataset
:param: file Output filename
"""
kwargs.setdefault("session", requests.Session())
r = kwargs['session'].get(url)
# Jos lataus epäonnistuu, epäonnistu nyt.
# `raise_for_status()` tarkastaa onko webpalvelin palauttanut
# tiedoston vai ei, ja onko se ladattu onnistuneesti.
# Nimensä mukaan aiheuttaa keskeytyksen - raise - jos virhe havaittiin.
r.raise_for_status()
if r.headers['content-type'] != "application/zip":
raise ConnectionError(f"Expected zip file, received {r.headers['content-type']}")
with ZipFile(BytesIO(r.content)) as pakattu_tiedosto:
# Avataan saatu data Bitteinä, tällöin meidän ei tarvitse tallettaa
# zip tiedostoa tiedostojärjestelmään odottamaan.
# Puretaan haluttu tiedosto, ja kääritään pandan dataframen ympärille.
data = pd.read_csv(BytesIO(pakattu_tiedosto.read(DATASET_NAME)))
# Add names, if data has none.
if "name" not in data.columns:
logger.debug("Names are missing. Generating fake names.")
names = pd.Series(generate_names(data.shape[0]), name="name")
data = data.assign(name=names)
data.to_csv(filepath, index_label=INDEX)
logger.debug("File downloaded as: %s", filepath)
# Scrape extra bits.
if Config.getboolean("build", "allow_dirty", fallback=False):
hae_dataa()
return data | 5,325,138 |
def create_cert_builder(subject, issuer_name, public_key, days=365, is_ca=False):
"""
The method to create a builder for all types of certificates.
:param subject: The subject of the certificate.
:param issuer_name: The name of the issuer.
:param public_key: The public key of the certificate.
:param days: The number of days for which the certificate is valid. The default is 1 year or 365 days.
:param is_ca: Boolean to indicate if a cert is ca or non ca.
:return: The certificate builder.
:rtype: :class `x509.CertificateBuilder`
"""
builder = x509.CertificateBuilder()
builder = builder.subject_name(subject)
builder = builder.issuer_name(issuer_name)
builder = builder.public_key(public_key)
builder = builder.not_valid_before(datetime.today())
builder = builder.not_valid_after(datetime.today() + timedelta(days=days))
builder = builder.serial_number(int(uuid.uuid4()))
builder = builder.add_extension(
x509.BasicConstraints(ca=is_ca, path_length=None), critical=True
)
return builder | 5,325,139 |
def _loadvid_test_vanilla(filename, width, height):
"""Tests the usual loadvid call.
The input file, an encoded video corresponding to `filename`, is repeatedly
decoded (with a random seek). The first and last of the returned frames are
plotted using `matplotlib.pyplot`.
"""
with open(filename, 'rb') as f:
encoded_video = f.read()
num_frames = 32
for _ in range(10):
start = time.perf_counter()
result = lintel.loadvid(encoded_video,
should_random_seek=True,
width=width,
height=height,
num_frames=num_frames)
# NOTE(brendan): dynamic size returns (frames, width, height,
# seek_distance).
if (width == 0) and (height == 0):
decoded_frames, width, height, _ = result
else:
decoded_frames, _ = result
decoded_frames = np.frombuffer(decoded_frames, dtype=np.uint8)
decoded_frames = np.reshape(decoded_frames,
newshape=(num_frames, height, width, 3))
end = time.perf_counter()
print('time: {}'.format(end - start))
plt.imshow(decoded_frames[0, ...])
plt.show()
plt.imshow(decoded_frames[-1, ...])
plt.show() | 5,325,140 |
def _table_row(line):
"""
Return all elements of a data line.
Return all elements of a data line. Simply splits it.
Parameters
----------
line: string
A stats line.
Returns
-------
list of strings
A list of strings, containing the data on the line, split at white space.
"""
return line.split() | 5,325,141 |
def ack_alert_alarm_definition(definition_id):
""" Acknowledge all alert(s) or an alarm(s) associated with the definition identified by definition_id.
"""
try:
# Get definition identified in request
definition = SystemEventDefinition.query.get(definition_id)
if definition is None:
message = 'Failed to retrieve SystemEventDefinition for id provided: %d' % definition_id
return bad_request(message)
# Verify definition is not in active state; otherwise error
if definition.active:
message = '%s definition must be disabled before clearing any associated instances.' % definition.event_type
return bad_request(message)
# Determine current user who is auto clearing alert or alarm instances (written to log)
assigned_user = User.query.get(g.current_user.id)
if assigned_user is not None:
name = assigned_user.first_name + ' ' + assigned_user.last_name
else:
name = 'Unknown/unassigned user with g.current_user.id: %s' % str(g.current_user.id)
# Identify default user and message for auto acknowledgment; log activity
ack_by = 1
ack_value = 'Log: Auto acknowledge (ooi-ui-services) OBO user \'%s\'; %s definition id: %d' % \
(name,definition.event_type, definition.id)
current_app.logger.info(ack_value)
# Get all active instances for this definition which have not been acknowledged.
instances = SystemEvent.query.filter_by(system_event_definition_id=definition.id,acknowledged=False).all()
for instance in instances:
if instance.event_type=='alarm':
if not (uframe_acknowledge_alert_alarm(instance.uframe_event_id, ack_value)):
message = 'Failed to acknowledge alarm (id:%d) in uframe, prior to clearing instance.' % instance.id
current_app.logger.info('[clear_alert_alarm] %s ' % message)
return bad_request(message)
# Update alert_alarm acknowledged, ack_by and ts_acknowledged
instance.acknowledged = True
instance.ack_by = ack_by
instance.ts_acknowledged = dt.datetime.strftime(dt.datetime.now(), "%Y-%m-%dT%H:%M:%S")
try:
db.session.add(instance)
db.session.commit()
except:
db.session.rollback()
return bad_request('IntegrityError during auto-acknowledgment of %s by %s.' %
(instance.event_type, str(ack_by)))
result = 'ok'
return jsonify( {'result' : result }), 200
except Exception as err:
message = 'Insufficient data, or bad data format. %s' % str(err.message)
current_app.logger.info(message)
return conflict(message) | 5,325,142 |
def range2d(range_x, range_y):
"""Creates a 2D range."""
range_x = list(range_x)
return [ (x, y) for y in range_y for x in range_x ] | 5,325,143 |
def _auto_run(args):
"""This function executes when the script is called as a stand-alone
executable. It is used both for development/testing as well as the
primary executable for generating the cross-run-ID PDF report.
To use as a stand-alone script (primarily for development purspoes)
the script has to replicate some of the functionality in
"cross_run_id.py" to generate json_results for use here.
A more complete description of this code can be found in the
docstring at the beginning of this file.
Args:
'-r' or '--benchmark_results_dir' - Path of top-level folder
that contains the benchmark results folders/files to be
processed. For the purposes of testing a folder with at least
two run IDs must be indicated.
Returns:
(nothing)
"""
# TDH (2020-01-14) For developement testing the following section
# replicates the functionality of "cross_run_id.py" so that
# json_results can be created and used to create the graph image
# files.
import cross_run_id as cri
import standard_analysis as sa
import benchmark_postprocessing as bmpp
import make_dataframe as md
# TDH (2020-01-14)
# Hard-coding some inputs for development purposes
# The "benchmark_results_name" folder and the values in "run_id_list"
# must be manually made to match.
benchmark_results_name = '2019-12-02'
run_id_list = ['aUZF6', 'Zu60n']
dir_name = 'aUZF6_Zu60n_report'
comparison_results_root = 'cross_case_comparison'
delete_report = True
comparison_parameter = 'mhz_per_cpu'
parameter_list = [
'date', 'helics_version', 'generator',
'system', 'system_version', 'platform',
'cxx_compiler', 'cxx_compiler_version', 'build_flags_string',
'host_name', 'host_processor', 'num_cpus',
'mhz_per_cpu'
]
# TDH (2020-01-14)
# Generating results path and output path
script_path = os.path.dirname(os.path.realpath(__file__))
head, tail = os.path.split(script_path)
output_dir = os.path.join(head, comparison_results_root)
output_path = os.path.join(output_dir, dir_name)
benchmark_results_dir = os.path.join(head,
'benchmark_results',
benchmark_results_name)
run_id_dict = cri.find_specific_run_id(
benchmark_results_dir, run_id_list)
cri.create_output_path(output_path, delete_report)
file_list = []
for run_id in run_id_dict:
file_list.extend(run_id_dict[run_id]['files'])
bm_files, bmk_files = sa.sort_results_files(file_list)
file_list = bm_files
json_results = bmpp.parse_files(file_list)
json_results = bmpp.parse_and_add_benchmark_metadata(json_results)
meta_bmk_df = md.make_dataframe(json_results)
bm_list = cri.find_common_bm_to_graph(json_results, run_id_dict)
for bm in bm_list:
cri.make_cross_run_id_graphs(
meta_bmk_df, bm['bm_name'], list(run_id_dict.keys()),
output_path, comparison_parameter)
create_cross_run_id_report(
json_results, run_id_list, output_path, parameter_list) | 5,325,144 |
def test_sch_t3_sch_t3_v(mode, save_output, output_format):
"""
TEST :schema collection and schema location : redefine with a
attributeGroup, attribute group's content items are a subset of the
redefined group, (SRC 7.2.2)
"""
assert_bindings(
schema="msData/schema/schT3_a.xsd",
instance="msData/schema/schT3.xml",
class_name="Root",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
) | 5,325,145 |
def kmeans_(X, sample_weights, n_clusters, init='kmeans++', max_iter=300):
"""
Weighted K-Means implementation (Lloyd's Algorithm).
:param X:
:param sample_weights:
:param n_clusters:
:param init: string in {'random', 'kmeans++'}, default 'kmeans++'
:param max_iter: maximum number of iterations
:return cluster_centers_:
"""
n_samples, n_features = X.shape
# TODO: find a better way to handle negtive weights
cluster_centers_ = None
if init == 'kmeans++':
cluster_centers_ = kmeans_pp_(X, np.clip(sample_weights, 0, np.inf), n_clusters)
elif init == 'random':
centers_idxs = np.random.choice(n_samples, n_clusters, replace=False)
cluster_centers_ = X[centers_idxs]
elif isinstance(init, np.ndarray):
cluster_centers_ = init
diff = np.inf
i = 0
while diff > 1e-3 and i < max_iter:
clusters = update_clusters_(X, cluster_centers_)
new_centers = update_centers_(X, sample_weights, clusters)
if len(new_centers) == len(cluster_centers_):
diff = np.linalg.norm(new_centers - cluster_centers_)
cluster_centers_ = new_centers
i += 1
# if the program finishes before finding k'<k centers, we use the FarthestNeighbor
# method to produce the remained k-k' centers
if len(cluster_centers_) < n_clusters:
centers = [c for c in cluster_centers_]
_, dists_to_centers = pairwise_distances_argmin_min(X, np.atleast_2d(centers))
for i in range(0, n_clusters - len(cluster_centers_)):
next_idx = np.argmax(dists_to_centers)
centers.append(X[next_idx])
_, next_dist = pairwise_distances_argmin_min(X, np.atleast_2d(centers[-1]))
dists_to_centers = np.minimum(dists_to_centers, next_dist)
cluster_centers_ = np.array(centers)
return cluster_centers_ | 5,325,146 |
def display_cim_objects(context, cim_objects, output_format, summary=False,
sort=False, property_list=None, quote_strings=True):
"""
Display CIM objects in form determined by input parameters.
Input is either a list of cim objects or a single object. It may be
any of the CIM types. This is used to display:
* CIMClass
* CIMClassName:
* CIMInstance
* CIMInstanceName
* CIMQualifierDeclaration
* Or list of the above
This function may override output type choice in cases where the output
choice is not available for the object type. Thus, for example,
mof output makes no sense for class names. In that case, the output is
the str of the type.
Parameters:
context (:class:`ContextObj`):
Click context contained in ContextObj object.
objects (iterable of :class:`~pywbem.CIMInstance`,
:class:`~pywbem.CIMInstanceName`, :class:`~pywbem.CIMClass`,
:class:`~pywbem.CIMClassName`,
or :class:`~pywbem.CIMQualifierDeclaration`):
Iterable of zero or more CIM objects to be displayed.
output_format (:term:`string`):
String defining the preferred output format. Must not be None since
the correct output_format must have been selected before this call.
Note that the output formats allowed may depend on a) whether
summary is True, b)the specific type because we do not have a table
output format for CIMClass.
summary (:class:`py:bool`):
Boolean that defines whether the data in objects should be displayed
or just a summary of the objects (ex. count of number of objects).
sort (:class:`py:bool`):
Boolean that defines whether the objects will be sorted.
property_list (iterable of :term:`string`):
List of property names to be displayed, in the desired order, when the
output format is a table, or None to use the default of sorting
the properties alphabetically within key and non-key groups.
quote_strings (:class:`py.bool`):
If False, strings are not encased by quote marks in the table view for
instance displays. The default is True so that strings are encased in
quotes in all views.
"""
# Note: In the docstring above, the line for parameter 'objects' was way too
# long. Since we are not putting it into docmentation, we folded it.
context.spinner_stop()
if summary:
display_cim_objects_summary(context, cim_objects, output_format)
return
if not cim_objects and context.verbose:
click.echo("No objects returned")
return
if sort:
cim_objects = sort_cimobjects(cim_objects)
# default when displaying cim objects is mof
assert output_format
if isinstance(cim_objects, (list, tuple)):
# Table format output is processed as a group
if output_format_is_table(output_format):
_display_objects_as_table(cim_objects, output_format,
context=context,
property_list=property_list,
quote_strings=quote_strings)
else:
# Call to display each object
for obj in cim_objects:
display_cim_objects(context, obj, output_format=output_format)
return
# Display a single item.
object_ = cim_objects
# This allows passing single objects to the table formatter (i.e. not lists)
if output_format_is_table(output_format):
_display_objects_as_table([object_], output_format, context=context,
property_list=property_list,
quote_strings=quote_strings)
elif output_format == 'mof':
try:
click.echo(object_.tomof())
except AttributeError:
# insert NL between instance names for readability
if isinstance(object_, CIMInstanceName):
click.echo("")
click.echo(object_)
elif isinstance(object_, (CIMClassName, six.string_types)):
click.echo(object_)
else:
raise click.ClickException('output_format {} invalid for {} '
.format(output_format,
type(object_)))
elif output_format == 'xml':
try:
click.echo(object_.tocimxmlstr(indent=4))
except AttributeError:
# no tocimxmlstr functionality
raise click.ClickException('Output Format {} not supported. '
'Default to\n{!r}'
.format(output_format, object_))
elif output_format == 'repr':
try:
click.echo(repr(object_))
except AttributeError:
raise click.ClickException('"repr" display of {!r} failed'
.format(object_))
elif output_format == 'txt':
try:
click.echo(object_)
except AttributeError:
raise click.ClickException('"txt" display of {!r} failed'
.format(object_))
# elif output_format == 'tree':
# raise click.ClickException('Tree output format not allowed')
else:
raise click.ClickException('Invalid output format {}'
.format(output_format)) | 5,325,147 |
def CPCT_LambdaPitch(refdir,main_fastfile,Lambda=None,Pitch=np.linspace(-10,40,5),WS=None,Omega=None, # operating conditions
TMax=20,bStiff=True,bNoGen=True,bSteadyAero=True, # simulation options
reRun=True,
fastExe=None,showOutputs=True,nCores=4): # execution options
""" Computes CP and CT as function of tip speed ratio (lambda) and pitch.
There are two main ways to define the inputs:
- Option 1: provide Lambda and Pitch (deg)
- Option 2: provide WS (m/s), Omega (in rpm) and Pitch (deg), in which case len(WS)==len(Omega)
"""
WS_default=5 # If user does not provide a wind speed vector, wind speed used
# if the user provided a full path to the main file, we scrap the directory. TODO, should be cleaner
if len(os.path.dirname(main_fastfile))>0:
main_fastfile=os.path.basename(main_fastfile)
# --- Reading main fast file to get rotor radius
fst = fi.FASTInputFile(os.path.join(refdir,main_fastfile))
ed = fi.FASTInputFile(os.path.join(refdir,fst['EDFile'].replace('"','')))
R = ed['TipRad']
# --- Making sure we have
if (Omega is not None):
if (Lambda is not None):
WS = np.ones(Omega.shape)*WS_default
elif (WS is not None):
if len(WS)!=len(Omega):
raise Exception('When providing Omega and WS, both vectors should have the same dimension')
else:
WS = np.ones(Omega.shape)*WS_default
else:
Omega = WS_default * Lambda/R*60/(2*np.pi) # TODO, use more realistic combinations of WS and Omega
WS = np.ones(Omega.shape)*WS_default
# --- Defining flat vectors of operating conditions
WS_flat = []
RPM_flat = []
Pitch_flat = []
for pitch in Pitch:
for (rpm,ws) in zip(Omega,WS):
WS_flat.append(ws)
RPM_flat.append(rpm)
Pitch_flat.append(pitch)
# --- Setting up default options
baseDict={'TMax': TMax, 'DT': 0.01, 'DT_Out': 0.1} # NOTE: Tmax should be at least 2pi/Omega
baseDict = paramsNoController(baseDict)
if bStiff:
baseDict = paramsStiff(baseDict)
if bNoGen:
baseDict = paramsNoGen(baseDict)
if bSteadyAero:
baseDict = paramsSteadyAero(baseDict)
# --- Creating set of parameters to be changed
# TODO: verify that RtAeroCp and RtAeroCt are present in AeroDyn outlist
PARAMS = paramsWS_RPM_Pitch(WS_flat,RPM_flat,Pitch_flat,baseDict=baseDict, FlatInputs=True)
# --- Generating all files in a workDir
workDir = refdir.strip('/').strip('\\')+'_CPLambdaPitch'
print('>>> Generating inputs files in {}'.format(workDir))
RemoveAllowed=reRun # If the user want to rerun, we can remove, otherwise we keep existing simulations
fastFiles=templateReplace(PARAMS, refdir, outputDir=workDir,removeRefSubFiles=True,removeAllowed=RemoveAllowed,main_file=main_fastfile)
# --- Running fast simulations
print('>>> Running {} simulations...'.format(len(fastFiles)))
runner.run_fastfiles(fastFiles, showOutputs=showOutputs, fastExe=fastExe, nCores=nCores, reRun=reRun)
# --- Postpro - Computing averages at the end of the simluation
print('>>> Postprocessing...')
outFiles = [os.path.splitext(f)[0]+'.outb' for f in fastFiles]
# outFiles = glob.glob(os.path.join(workDir,'*.outb'))
ColKeepStats = ['RotSpeed_[rpm]','BldPitch1_[deg]','RtAeroCp_[-]','RtAeroCt_[-]','Wind1VelX_[m/s]']
result = postpro.averagePostPro(outFiles,avgMethod='periods',avgParam=1,ColKeep=ColKeepStats,ColSort='RotSpeed_[rpm]')
# print(result)
# --- Adding lambda, sorting and keeping only few columns
result['lambda_[-]'] = result['RotSpeed_[rpm]']*R*2*np.pi/60/result['Wind1VelX_[m/s]']
result.sort_values(['lambda_[-]','BldPitch1_[deg]'],ascending=[True,True],inplace=True)
ColKeepFinal=['lambda_[-]','BldPitch1_[deg]','RtAeroCp_[-]','RtAeroCt_[-]']
result=result[ColKeepFinal]
print('>>> Done')
# --- Converting to a matrices
CP = result['RtAeroCp_[-]'].values
CT = result['RtAeroCt_[-]'].values
MCP =CP.reshape((len(Lambda),len(Pitch)))
MCT =CT.reshape((len(Lambda),len(Pitch)))
LAMBDA, PITCH = np.meshgrid(Lambda, Pitch)
# --- CP max
i,j = np.unravel_index(MCP.argmax(), MCP.shape)
MaxVal={'CP_max':MCP[i,j],'lambda_opt':LAMBDA[j,i],'pitch_opt':PITCH[j,i]}
return MCP,MCT,Lambda,Pitch,MaxVal,result | 5,325,148 |
def cylinders_instantiate(part, dims, dest):
"""Fonction pour creer des cylindres en serie"""
names = []
for i, d in enumerate(dims):
n = "{}_cylinder_{}".format(part, i)
names.append(n)
print("\t\t<Creation du cylindre : {} de dimension {}>".format(n, d))
dest.append(cmds.polyCylinder(n=n, sx=subDivisionsX, sy=subDivisionsY, sz=subDivisionsZ, r=d[0], h=d[1]))
sets.append(cmds.sets(names, n=part)) | 5,325,149 |
def segment(im, pad=0, caffemodel=None):
"""
Function which segments an input image. uses pyramidal method of scaling, performing
inference, upsampling results, and averaging results.
:param im: image to segment
:param pad: number of pixels of padding to add
:param caffemodel: path to caffemodel file
:return: The upsampled and averaged results of inference on input image at 3 scales.
"""
caffe.set_mode_gpu()
padded_image = add_padding(im, pad) # Add padding to original image
resized_images = resize_images(padded_image) # Resize original images
outputs = [classify(image, caffemodel=caffemodel) for image in resized_images] # Perform classification on images
upsample_start = time.time()
average_prob_maps = get_average_prob_maps(outputs, im.shape, pad)
print("Total segmenting time: {:.3f} ms".format((time.time() - upsample_start) * 1000))
return average_prob_maps | 5,325,150 |
async def test_call_with_no_args_async():
"""
It should be able to invoke functions with no-args.
"""
breaker = CircuitBreaker()
assert await breaker.call_async(func_succeed_async) | 5,325,151 |
def morpheme_tokenizer(st):
"""
Tokenize a string splitting it on typical morpheme boundaries: [ - . : = ( ) ]
:param st:
"""
pieces = re.finditer('[^\s\-\.:/\(\)=]+', st)
for match in pieces:
if match.group().strip():
yield Morph(match.group(0), start=match.start(), stop=match.end()) | 5,325,152 |
def register(handlerclass):
"""
Adds a custom handler.
"""
if not issubclass(handlerclass, Handler):
message = u"parameter it's not a Handler class"
raise TypeError(message)
__HANDLERS.append(handlerclass) | 5,325,153 |
async def test_read_write(dut):
"""Test handle inheritance"""
tlog = logging.getLogger("cocotb.test")
cocotb.start_soon(Clock(dut.clk, 10, "ns").start())
await Timer(10, "ns")
tlog.info("Checking Generics/Parameters:")
_check_logic(tlog, dut.param_logic, 1)
_check_logic(tlog, dut.param_logic_vec, 0xDA)
if cocotb.LANGUAGE in ["vhdl"]:
_check_int(tlog, dut.param_bool, 1)
_check_int(tlog, dut.param_int, 6)
_check_real(tlog, dut.param_real, 3.14)
_check_int(tlog, dut.param_char, ord("p"))
_check_str(tlog, dut.param_str, b"ARRAYMOD")
if not cocotb.SIM_NAME.lower().startswith("riviera"):
_check_logic(tlog, dut.param_rec.a, 0)
_check_logic(tlog, dut.param_rec.b[0], 0)
_check_logic(tlog, dut.param_rec.b[1], 0)
_check_logic(tlog, dut.param_rec.b[2], 0)
_check_logic(tlog, dut.param_cmplx[0].a, 0)
_check_logic(tlog, dut.param_cmplx[0].b[0], 0)
_check_logic(tlog, dut.param_cmplx[0].b[1], 0)
_check_logic(tlog, dut.param_cmplx[0].b[2], 0)
_check_logic(tlog, dut.param_cmplx[1].a, 0)
_check_logic(tlog, dut.param_cmplx[1].b[0], 0)
_check_logic(tlog, dut.param_cmplx[1].b[1], 0)
_check_logic(tlog, dut.param_cmplx[1].b[2], 0)
tlog.info("Checking Constants:")
_check_logic(tlog, dut.const_logic, 0)
_check_logic(tlog, dut.const_logic_vec, 0x3D)
if cocotb.LANGUAGE in ["vhdl"]:
_check_int(tlog, dut.const_bool, 0)
_check_int(tlog, dut.const_int, 12)
_check_real(tlog, dut.const_real, 6.28)
_check_int(tlog, dut.const_char, ord("c"))
_check_str(tlog, dut.const_str, b"MODARRAY")
if not cocotb.SIM_NAME.lower().startswith("riviera"):
_check_logic(tlog, dut.const_rec.a, 1)
_check_logic(tlog, dut.const_rec.b[0], 0xFF)
_check_logic(tlog, dut.const_rec.b[1], 0xFF)
_check_logic(tlog, dut.const_rec.b[2], 0xFF)
_check_logic(tlog, dut.const_cmplx[1].a, 1)
_check_logic(tlog, dut.const_cmplx[1].b[0], 0xFF)
_check_logic(tlog, dut.const_cmplx[1].b[1], 0xFF)
_check_logic(tlog, dut.const_cmplx[1].b[2], 0xFF)
_check_logic(tlog, dut.const_cmplx[2].a, 1)
_check_logic(tlog, dut.const_cmplx[2].b[0], 0xFF)
_check_logic(tlog, dut.const_cmplx[2].b[1], 0xFF)
_check_logic(tlog, dut.const_cmplx[2].b[2], 0xFF)
dut.select_in.value = 2
await Timer(10, "ns")
tlog.info("Writing the signals!!!")
dut.sig_logic.value = 1
dut.sig_logic_vec.value = 0xCC
dut.sig_t2.value = [0xCC, 0xDD, 0xEE, 0xFF]
dut.sig_t4.value = [
[0x00, 0x11, 0x22, 0x33],
[0x44, 0x55, 0x66, 0x77],
[0x88, 0x99, 0xAA, 0xBB],
[0xCC, 0xDD, 0xEE, 0xFF],
]
if cocotb.LANGUAGE in ["vhdl"]:
dut.sig_bool.value = 1
dut.sig_int.value = 5000
dut.sig_real.value = 22.54
dut.sig_char.value = ord("Z")
dut.sig_str.value = "Testing"
dut.sig_rec.a.value = 1
dut.sig_rec.b[0].value = 0x01
dut.sig_rec.b[1].value = 0x23
dut.sig_rec.b[2].value = 0x45
dut.sig_cmplx[0].a.value = 0
dut.sig_cmplx[0].b[0].value = 0x67
dut.sig_cmplx[0].b[1].value = 0x89
dut.sig_cmplx[0].b[2].value = 0xAB
dut.sig_cmplx[1].a.value = 1
dut.sig_cmplx[1].b[0].value = 0xCD
dut.sig_cmplx[1].b[1].value = 0xEF
dut.sig_cmplx[1].b[2].value = 0x55
await Timer(10, "ns")
tlog.info("Checking writes:")
_check_logic(tlog, dut.port_logic_out, 1)
_check_logic(tlog, dut.port_logic_vec_out, 0xCC)
_check_value(tlog, dut.sig_t2, [0xCC, 0xDD, 0xEE, 0xFF])
_check_logic(tlog, dut.sig_t2[7], 0xCC)
_check_logic(tlog, dut.sig_t2[4], 0xFF)
_check_logic(tlog, dut.sig_t4[1][5], 0x66)
_check_logic(tlog, dut.sig_t4[3][7], 0xCC)
if cocotb.LANGUAGE in ["vhdl"]:
_check_int(tlog, dut.port_bool_out, 1)
_check_int(tlog, dut.port_int_out, 5000)
_check_real(tlog, dut.port_real_out, 22.54)
_check_int(tlog, dut.port_char_out, ord("Z"))
_check_str(tlog, dut.port_str_out, b"Testing")
_check_logic(tlog, dut.port_rec_out.a, 1)
_check_logic(tlog, dut.port_rec_out.b[0], 0x01)
_check_logic(tlog, dut.port_rec_out.b[1], 0x23)
_check_logic(tlog, dut.port_rec_out.b[2], 0x45)
_check_logic(tlog, dut.port_cmplx_out[0].a, 0)
_check_logic(tlog, dut.port_cmplx_out[0].b[0], 0x67)
_check_logic(tlog, dut.port_cmplx_out[0].b[1], 0x89)
_check_logic(tlog, dut.port_cmplx_out[0].b[2], 0xAB)
_check_logic(tlog, dut.port_cmplx_out[1].a, 1)
_check_logic(tlog, dut.port_cmplx_out[1].b[0], 0xCD)
_check_logic(tlog, dut.port_cmplx_out[1].b[1], 0xEF)
_check_logic(tlog, dut.port_cmplx_out[1].b[2], 0x55)
tlog.info("Writing a few signal sub-indices!!!")
dut.sig_logic_vec[2].value = 0
if cocotb.LANGUAGE in ["vhdl"] or not (
cocotb.SIM_NAME.lower().startswith(("ncsim", "xmsim"))
or (
cocotb.SIM_NAME.lower().startswith("riviera")
and cocotb.SIM_VERSION.startswith(("2016.06", "2016.10", "2017.02"))
)
):
dut.sig_t6[1][3][2].value = 1
dut.sig_t6[0][2][7].value = 0
if cocotb.LANGUAGE in ["vhdl"]:
dut.sig_str[2].value = ord("E")
dut.sig_rec.b[1][7].value = 1
dut.sig_cmplx[1].b[1][0].value = 0
await Timer(10, "ns")
tlog.info("Checking writes (2):")
_check_logic(tlog, dut.port_logic_vec_out, 0xC8)
if cocotb.LANGUAGE in ["vhdl"] or not (
cocotb.SIM_NAME.lower().startswith(("ncsim", "xmsim"))
or (
cocotb.SIM_NAME.lower().startswith("riviera")
and cocotb.SIM_VERSION.startswith(("2016.06", "2016.10", "2017.02"))
)
):
_check_logic(tlog, dut.sig_t6[1][3][2], 1)
_check_logic(tlog, dut.sig_t6[0][2][7], 0)
if cocotb.LANGUAGE in ["vhdl"]:
_check_str(
tlog, dut.port_str_out, b"TEsting"
) # the uppercase "E" from a few lines before
_check_logic(tlog, dut.port_rec_out.b[1], 0xA3)
_check_logic(tlog, dut.port_cmplx_out[1].b[1], 0xEE) | 5,325,154 |
def check_consistency( # pylint: disable=too-many-arguments
num_users=None,
num_items=None,
users_hat=None,
items_hat=None,
users=None,
items=None,
user_item_scores=None,
default_num_users=None,
default_num_items=None,
default_num_attributes=None,
num_attributes=None,
attributes_must_match=True,
):
"""Validate that the inputs to the recommender system are consistent
based on their dimensions. Furthermore, if all of the inputs
are consistent, we return the number of users and items that are inferred
from the inputs, or fall back to a provided default number.
Parameters
-----------
num_users: int, optional
An integer representing the number of users in the system
num_items: int, optional
An integer representing the number of items in the system
users_hat: :obj:`numpy.ndarray`, optional
A 2D matrix whose first dimension should be equal to the number of
users in the system. Typically this matrix refers to the system's
internal representation of user profiles, not the "true" underlying
user profiles, which are unknown to the system.
items_hat: :obj:`numpy.ndarray`, optional
A 2D matrix whose second dimension should be equal to the number of
items in the system. Typically this matrix refers to the system's
internal representation of item attributes, not the "true" underlying
item attributes, which are unknown to the system.
users: :obj:`numpy.ndarray`, optional
A 2D matrix whose first dimension should be equal to the number of
users in the system. This is the "true" underlying user profile
matrix.
items: :obj:`numpy.ndarray`, optional
A 2D matrix whose second dimension should be equal to the number of
items in the system. This is the "true" underlying item attribute
matrix.
user_item_scores: :obj:`numpy.ndarray`, optional
A 2D matrix whose first dimension is the number of users in the system
and whose second dimension is the number of items in the system.
default_num_users: int, optional
If the number of users is not specified anywhere in the inputs, we return
this value as the number of users to be returned.
default_num_items: int, optional
If the number of items is not specified anywhere in the inputs, we return
this value as the number of items to be returned.'
default_num_attributes: int, optional
If the number of attributes in the item/user representations is not
specified or cannot be inferred, this is the default number
of attributes that should be used. (This applies only to users_hat
and items_hat.)
num_attributes: int, optional
Check that the number of attributes per user & per item are equal to
this specified number. (This applies only to users_hat and items_hat.)
attributes_must_match: bool (optional, default: True)
Check that the user and item matrices match up on the attribute dimension.
If False, the number of columns in the user matrix and the number of
rows in the item matrix are allowed to be different.
Returns
--------
num_users: int
Number of users, inferred from the inputs (or provided default).
num_items: int
Number of items, inferred from the inputs (or provided default).
num_attributes: int (optional)
Number of attributes per item/user profile, inferred from inputs
(or provided default).
"""
if not is_array_valid_or_none(items_hat, ndim=2):
raise ValueError("items matrix must be a 2D matrix or None")
if not is_array_valid_or_none(users_hat, ndim=2):
raise ValueError("users matrix must be a 2D matrix or None")
if not is_valid_or_none(num_attributes, int):
raise TypeError("num_attributes must be an int")
num_items_vals = non_none_values(
getattr(items_hat, "shape", [None, None])[1],
getattr(items, "shape", [None, None])[1],
getattr(user_item_scores, "shape", [None, None])[1],
num_items,
)
num_users_vals = non_none_values(
getattr(users, "shape", [None])[0],
getattr(users_hat, "shape", [None])[0],
getattr(user_item_scores, "shape", [None])[0],
num_users,
)
num_users = resolve_set_to_value(
num_users_vals, default_num_users, "Number of users is not the same across inputs"
)
num_items = resolve_set_to_value(
num_items_vals, default_num_items, "Number of items is not the same across inputs"
)
if attributes_must_match:
# check attributes matching for users_hat and items_hat
num_attrs_vals = non_none_values(
getattr(users_hat, "shape", [None, None])[1],
getattr(items_hat, "shape", [None])[0],
num_attributes,
)
num_attrs = resolve_set_to_value(
num_attrs_vals,
default_num_attributes,
"User representation and item representation matrices are not "
"compatible with each other",
)
return num_users, num_items, num_attrs
else:
return num_users, num_items | 5,325,155 |
def verify_forgot_password(request):
"""
Check the forgot-password verification and possibly let the user
change their password because of it.
"""
# get form data variables, and specifically check for presence of token
formdata = _process_for_token(request)
if not formdata['has_userid_and_token']:
return render_404(request)
formdata_token = formdata['vars']['token']
formdata_userid = formdata['vars']['userid']
formdata_vars = formdata['vars']
# check if it's a valid user id
user = User.query.filter_by(id=formdata_userid).first()
if not user:
return render_404(request)
# check if we have a real user and correct token
if ((user and user.fp_verification_key and
user.fp_verification_key == unicode(formdata_token) and
datetime.datetime.now() < user.fp_token_expire
and user.email_verified and user.status == 'active')):
cp_form = auth_forms.ChangePassForm(formdata_vars)
if request.method == 'POST' and cp_form.validate():
user.pw_hash = auth_lib.bcrypt_gen_password_hash(
request.form['password'])
user.fp_verification_key = None
user.fp_token_expire = None
user.save()
messages.add_message(
request,
messages.INFO,
_("You can now log in using your new password."))
return redirect(request, 'mediagoblin.auth.login')
else:
return render_to_response(
request,
'mediagoblin/auth/change_fp.html',
{'cp_form': cp_form})
# in case there is a valid id but no user with that id in the db
# or the token expired
else:
return render_404(request) | 5,325,156 |
def test_noop_load() -> None:
"""
Load a checkpoint
"""
experiment_id = exp.run_basic_test(
conf.fixtures_path("no_op/single.yaml"), conf.fixtures_path("no_op"), 1
)
trials = exp.experiment_trials(experiment_id)
checkpoint = Determined(conf.make_master_url()).get_trial(trials[0].trial.id).top_checkpoint()
assert checkpoint.task_id == trials[0].trial.taskId | 5,325,157 |
def requirement(alpha=0.2, w=2.):
"""
Plots the requirements, both for PSF and MTF and compares them.
:param alpha: power law slope
:param w: wavenumber
:return: None
"""
#from MTF
wave = [550, 750, 900]
mtf = np.asarray([0.3, 0.35, 0.4])
forGaussian = np.sqrt(- np.log(mtf) * 4 * np.log(2) / np.pi**2 / w**2)
# fit
fitfunc = lambda p, x: p[0] * x ** p[1]
errfunc = lambda p, x, y: fitfunc(p, x) - y
fit, success = optimize.leastsq(errfunc, [1, -0.2], args=(wave, forGaussian))
#requirement
x = np.arange(500, 950, 1)
y = x**-alpha
# compute the best fit function from the best fit parameters
corrfit = fitfunc(fit, x)
plt.plot(x, y, label=r'PERD: $\alpha = - %.1f$' % alpha)
plt.plot(wave, forGaussian, 'rs', label='MTF Requirement')
plt.plot(x, corrfit, 'r--', label=r'Fit: $\alpha \sim %.3f $' % (fit[1]))
plt.xlabel('Wavelength [nm]')
plt.ylabel('FWHM [Arbitrary, u=%i]' % w)
plt.legend(shadow=True, fancybox=True, numpoints=1)
plt.savefig('requirementAlpha.pdf')
plt.close()
logx = np.log10(x)
logy = np.log10(y)
print 'Slope in log-log:'
print (logy[1] - logy[0]) / (logx[1] - logx[0]), (logy[11] - logy[10]) / (logx[11] - logx[10])
logyy = np.log(forGaussian)
logxx = np.log(wave)
print '\nSlope from MTF:'
print (logyy[1] - logyy[0]) / (logxx[1] - logxx[0])
print (logyy[2] - logyy[0]) / (logxx[2] - logxx[0])
print (logyy[2] - logyy[1]) / (logxx[2] - logxx[1]) | 5,325,158 |
def setup(hass, config):
"""Set up for WeMo devices."""
import pywemo
global SUBSCRIPTION_REGISTRY
SUBSCRIPTION_REGISTRY = pywemo.SubscriptionRegistry()
SUBSCRIPTION_REGISTRY.start()
def stop_wemo(event):
"""Shutdown Wemo subscriptions and subscription thread on exit."""
_LOGGER.debug("Shutting down subscriptions.")
SUBSCRIPTION_REGISTRY.stop()
hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP, stop_wemo)
def discovery_dispatch(service, discovery_info):
"""Dispatcher for WeMo discovery events."""
# name, model, location, mac
model_name = discovery_info.get('model_name')
serial = discovery_info.get('serial')
# Only register a device once
if serial in KNOWN_DEVICES:
return
_LOGGER.debug('Discovered unique device %s', serial)
KNOWN_DEVICES.append(serial)
component = WEMO_MODEL_DISPATCH.get(model_name, 'switch')
discovery.load_platform(hass, component, DOMAIN, discovery_info,
config)
discovery.listen(hass, SERVICE_WEMO, discovery_dispatch)
def setup_url_for_device(device):
"""Determine setup.xml url for given device."""
return 'http://{}:{}/setup.xml'.format(device.host, device.port)
def setup_url_for_address(host, port):
"""Determine setup.xml url for given host and port pair."""
if not port:
port = pywemo.ouimeaux_device.probe_wemo(host)
if not port:
return None
return 'http://{}:{}/setup.xml'.format(host, port)
devices = []
for host, port in config.get(DOMAIN, {}).get(CONF_STATIC, []):
url = setup_url_for_address(host, port)
if not url:
_LOGGER.error(
'Unable to get description url for %s',
'{}:{}'.format(host, port) if port else host)
continue
try:
device = pywemo.discovery.device_from_description(url, None)
except (requests.exceptions.ConnectionError,
requests.exceptions.Timeout) as err:
_LOGGER.error('Unable to access %s (%s)', url, err)
continue
devices.append((url, device))
disable_discovery = config.get(DOMAIN, {}).get(CONF_DISABLE_DISCOVERY)
if not disable_discovery:
_LOGGER.debug("Scanning for WeMo devices.")
devices.extend(
(setup_url_for_device(device), device)
for device in pywemo.discover_devices())
for url, device in devices:
_LOGGER.debug('Adding wemo at %s:%i', device.host, device.port)
discovery_info = {
'model_name': device.model_name,
'serial': device.serialnumber,
'mac_address': device.mac,
'ssdp_description': url,
}
discovery.discover(hass, SERVICE_WEMO, discovery_info)
return True | 5,325,159 |
def extend_node(node, out_size, axis=-1, value=0):
"""Extend size of `node` array
For now, this function works same with `extend_array` method,
this is just an alias function.
Args:
node (numpy.ndarray): the array whose `axis` to be extended.
first axis is considered as "batch" axis.
out_size (int): target output size for specified `axis`.
axis (int): node feature axis to be extended.
Default is `axis=-1`, which extends only last axis.
value (int or float): value to be filled for extended place.
Returns (numpy.ndarray): extended `node` array, extended place is filled
with `value`
"""
return extend_arrays_to_size(
node, out_size=out_size, axis=axis, value=value) | 5,325,160 |
def add_edge(graph, edge):
"""Adds an edge to a given graph.
If an edge between the two nodes already exists, the one with the largest
weight is retained.
Args:
graph: A `dict`: source_id -> (target_id -> weight) to be augmented.
edge: A `list` (or `tuple`) of the form `[source, target, weight]`, where
`source` and `target` are strings, and `weight` is a numeric value of
type `string` or `float`. The 'weight' component is optional; if not
supplied, it defaults to 1.0.
Returns:
`None`. Instead, this function has a side-effect on the `graph` argument.
"""
source = edge[0]
if source not in graph: graph[source] = {}
t_dict = graph[source]
target = edge[1]
weight = float(edge[2]) if len(edge) > 2 else 1.0
if target not in t_dict or weight > t_dict[target]:
t_dict[target] = weight | 5,325,161 |
def _post_amplitude(d, method, amp):
"""
Internal small helper function for repeated tests of method - posts the
computed amplitudes to metadata with a different key for each method
used to compute amplitude.
"""
if method == 'rms' or method == 'RMS':
d['rms_amplitude'] = amp
elif method == 'perc':
d['perc_amplitude'] = amp
elif method == 'MAD' or method == 'mad':
d['mad_amplitude'] = amp
else:
d['amplitude'] = amp | 5,325,162 |
def forward(X, weights, bias):
"""
Simulate the forward pass on one layer.
:param X: input matrix.
:param weights: weight matrix.
:param bias: bias vector.
:return:
"""
a = np.matmul(weights, np.transpose(X))
b = np.reshape(np.repeat(bias, np.shape(X)[0], axis=0), np.shape(a))
output = sigmoid_activation(a + b)
y_pred = np.where(output < 0.5, 0, 1)
return y_pred | 5,325,163 |
def sanitize_for_params(x: Any) -> Any:
"""Sanitizes the input for a more flexible usage with AllenNLP's `.from_params()` machinery.
For now it is mainly used to transform numpy numbers to python types
Parameters
----------
x
The parameter passed on to `allennlp.common.FromParams.from_params()`
Returns
-------
sanitized_x
"""
# AllenNLP has a similar function (allennlp.common.util.sanitize) but it does not work for my purpose, since
# numpy types are checked only after the float type check, and:
# isinstance(numpy.float64(1), float) == True !!!
if isinstance(x, util.numpy.number):
return x.item()
elif isinstance(x, util.numpy.bool_):
# Numpy bool_ need to be converted to python bool.
return bool(x)
if isinstance(x, (str, float, int, bool)):
return x
elif isinstance(x, dict):
# Dicts need their values sanitized
return {key: sanitize_for_params(value) for key, value in x.items()}
# Lists and Tuples need their values sanitized
elif isinstance(x, list):
return [sanitize_for_params(x_i) for x_i in x]
elif isinstance(x, tuple):
return tuple(sanitize_for_params(x_i) for x_i in x)
# We include `to_json` function customize sanitization for user defined classes
elif hasattr(x, "to_json"):
return x.to_json()
return x | 5,325,164 |
def demo(epd):
"""simple partial update demo - draw draw a clock"""
# initially set all white background
image = Image.new('1', epd.size, WHITE)
# prepare for drawing
draw = ImageDraw.Draw(image)
width, height = image.size
timestamp_font = ImageFont.truetype(FONT_FILE, TIMESTAMP_FONT_SIZE)
today_font = ImageFont.truetype(FONT_FILE, TODAY_FONT_SIZE)
tomorrow_font = ImageFont.truetype(FONT_FILE, TOMORROW_FONT_SIZE)
rest_font = ImageFont.truetype(FONT_FILE, REST_FONT_SIZE)
(cloth_dried_today, cloth_dried_tomorrow,cloth_dried_r1,cloth_dried_r2,cloth_dried_r3,cloth_dried_r4,cloth_dried_r5) = get_cloth_dried()
now = datetime.today()
# clear the display buffer
draw.rectangle((0, 0, width, height), fill=WHITE, outline=WHITE)
draw.rectangle((3, 3, width - 3, height - 3), fill=WHITE, outline=BLACK)
# print (width - X_OFFSET), " ", (height - Y_OFFSET)
# draw.rectangle((0,86,264,176), fill=WHITE, outline=WHITE)
draw.text((4, 4), now.strftime("%Y/%m/%d %H:%M:%S"), fill=BLACK, font=timestamp_font)
draw.text((5, 15), cloth_dried_today, fill=BLACK, font=today_font)
draw.text((5, 110), cloth_dried_tomorrow, fill=BLACK, font=tomorrow_font)
draw.text((200, 20), cloth_dried_r1, fill=BLACK, font=rest_font)
draw.text((200, 50), cloth_dried_r2, fill=BLACK, font=rest_font)
draw.text((200, 80), cloth_dried_r3, fill=BLACK, font=rest_font)
draw.text((200, 110), cloth_dried_r4, fill=BLACK, font=rest_font)
draw.text((200, 140), cloth_dried_r5, fill=BLACK, font=rest_font)
# display image on the panel
# epd.clear()
epd.display(image)
epd.update()
today_value = int(cloth_dried_today)
if today_value > 80:
turn_led(LED_RED)
elif today_value > 50:
turn_led(LED_GREEN)
else:
turn_led(LED_BLUE) | 5,325,165 |
def filename_fixture():
"""The name of the cities csv file for testing"""
return os.path.join('tests', 'fixtures', 'cities.csv') | 5,325,166 |
def _get_color_context():
""" Run at beginning of color workflow functions (ex start() or resume()) to orient the function.
Assumes python current working directory = the relevant AN subdirectory with session.log in place.
Adapted from package mp_phot, workflow_session.py._get_session_context(). Required for .resume().
TESTED OK 2021-01-08.
:return: 3-tuple: (this_directory, mp_string, an_string) [3 strings]
"""
this_directory = os.getcwd()
defaults_dict = ini.make_defaults_dict()
color_log_filename = defaults_dict['color log filename']
color_log_fullpath = os.path.join(this_directory, color_log_filename)
if not os.path.isfile(color_log_fullpath):
raise ColorLogFileError('No color log file found. You probably need to run start() or resume().')
with open(color_log_fullpath, mode='r') as log_file:
lines = log_file.readlines()
if len(lines) < 5:
raise ColorLogFileError('Too few lines.')
if not lines[0].lower().startswith('color log file'):
raise ColorLogFileError('Header line cannot be parsed.')
directory_from_color_log = lines[1].strip().lower().replace('\\', '/').replace('//', '/')
directory_from_cwd = this_directory.strip().lower().replace('\\', '/').replace('//', '/')
if directory_from_color_log != directory_from_cwd:
print()
print(directory_from_color_log, directory_from_cwd)
raise ColorLogFileError('Header line does not match current working directory.')
mp_string = lines[2][3:].strip().upper()
an_string = lines[3][3:].strip()
# definition_string = lines[4][len('Definition:'):].strip()
return this_directory, mp_string, an_string | 5,325,167 |
def validate_user(doc, method):
"""
validate user their should be only one department head
"""
if doc.name == "Administrator":
return
query = """ SELECT name FROM `tabUser` WHERE department='%s' AND
name IN (SELECT parent FROM `tabUserRole` WHERE role='Department Head')"""%(doc.department)
record = frappe.db.sql(query, as_list=True)
dept_head = [ch.role for ch in doc.user_roles if ch.role == "Department Head"]
record = [r[0] for r in record]
if record and dept_head and doc.name not in record:
frappe.throw("Their can be only one Department Head for %s"%(doc.department))
elif not record and not dept_head:
frappe.msgprint("[Warning] Their is no Department Head for the <b>{0}</b> Department<br>\
Please set the Department Head for <b>{0}</b>".format(doc.department)) | 5,325,168 |
def host_ip():
"""Test fixture to resolve and return host_ip as a string."""
import dns.resolver
query = dns.resolver.query("scanme.nmap.org")
assert len(query) > 0, "could not resolve target host name"
return query[0].address | 5,325,169 |
def with_behavior(strict=UNSET, extras=UNSET, hook=UNSET):
"""
Args:
strict (bool | Exception | callable): False: don't perform any schema validation
True: raise ValidationException when schema is not respected
Exception: raise given exception when schema is not respected
callable: call callable(reason) when schema is not respected
extras (bool | Exception | callable | (callable, list)):
False: don't do anything when there are extra fields in deserialized data
True: call LOG.debug(reason) to report extra (not in schema) fields seen in data
Exception: raise given Exception(reason) when extra fields are seen in data
callable: call callable(reason) when extra fields are seen in data
(callable, list): call callable(reason), except for extras mentioned in list
hook (callable): If provided, call callable(meta: ClassMetaDescription) at the end of ClassMetaDescription initialization
Returns:
(type): Internal temp class (compatible with `Serializable` metaclass) indicating how to handle Serializable type checking
"""
return BaseMetaInjector("_MBehavior", tuple(), {"behavior": DefaultBehavior(strict=strict, extras=extras, hook=hook)}) | 5,325,170 |
def get_subnet_from_list_by_id(subnet_id, subnets_list):
"""Get Neutron subnet by id from provided subnets list.
:param subnet_id: Neutron subnet ID
:param subnets_list: List of Neutron subnets, where target subnet should
be searched
"""
for subnet in subnets_list:
if subnet['id'] == subnet_id:
return subnet
LOG.warning("Cannot obtain subnet with id='%s' from provided subnets "
"list", subnet_id) | 5,325,171 |
def grafana_logo(dispatcher):
"""Construct an image_element containing the locally hosted Grafana logo."""
return dispatcher.image_element(dispatcher.static_url(GRAFANA_LOGO_PATH), alt_text=GRAFANA_LOGO_ALT) | 5,325,172 |
def is_json(payload):
"""Check if a payload is valid JSON."""
try:
json.loads(payload)
except (TypeError, ValueError):
return False
else:
return True | 5,325,173 |
def gram_schmidt(M):
"""
@param M:
A mxn matrix whose columns to be orthogonalized
@return ret
Matrix whose columns being orthogonalized
"""
columns = M.T
res = []
res.append(columns[0])
for x in range(1, columns.shape[0]):
tmp = np.array([0 for x in range(M.shape[0])])
for vec in res:
y = (np.dot(vec, columns[x]) / np.dot(vec, vec)) * vec
tmp = tmp + y
res.append(columns[x] - tmp)
return np.array(res).T | 5,325,174 |
def contract_variation_linear(G, A=None, K=10, r=0.5, mode='neighborhood'):
"""
Sequential contraction with local variation and general families.
This is an implemmentation that improves running speed,
at the expense of being more greedy (and thus having slightly larger error).
See contract_variation() for documentation.
"""
N, deg, W_lil = G.N, G.dw, G.W.tolil()
# The following is correct only for a single level of coarsening.
# Normally, A should be passed as an argument.
if A is None:
lk, Uk = sp.sparse.linalg.eigsh(G.L, k=K, which='SM', tol=1e-3) # this is not optimized!
lk[0] = 1
lsinv = lk**(-0.5)
lsinv[0] = 0
lk[0] = 0
D_lsinv = np.diag(lsinv)
A = Uk @ D_lsinv
# cost function for the subgraph induced by nodes array
def subgraph_cost(nodes):
nc = len(nodes)
ones = np.ones(nc)
W = W_lil[nodes,:][:,nodes]#.tocsc()
L = np.diag(2*deg[nodes] - W.dot(ones)) - W
B = (np.eye(nc) - np.outer(ones, ones) / nc ) @ A[nodes,:]
unnormalized_cost = np.linalg.norm(B.T @ L @ B)
return unnormalized_cost / (nc-1) if nc != 1 else 0.
class CandidateSet:
def __init__(self, candidate_list):
self.set = candidate_list
self.cost = subgraph_cost(candidate_list)
def __lt__(self, other):
return self.cost < other.cost
family = []
W_bool = G.A + sp.sparse.eye(G.N, dtype=np.bool, format='csr')
if 'neighborhood' in mode:
for i in range(N):
#i_set = G.A[i,:].indices # get_neighbors(G, i)
#i_set = np.append(i_set, i)
i_set = W_bool[i,:].indices
family.append(CandidateSet(i_set))
if 'cliques' in mode:
import networkx as nx
Gnx = nx.from_scipy_sparse_matrix(G.W)
for clique in nx.find_cliques(Gnx):
family.append(CandidateSet(np.array(clique)))
else:
if 'edges' in mode:
edges = np.array(G.get_edge_list()[0:2])
for e in range(0,edges.shape[1]):
family.append(CandidateSet(edges[:,e]))
if 'triangles' in mode:
triangles = set([])
edges = np.array(G.get_edge_list()[0:2])
for e in range(0,edges.shape[1]):
[u,v] = edges[:,e]
for w in range(G.N):
if G.W[u,w] > 0 and G.W[v,w] > 0:
triangles.add(frozenset([u,v,w]))
triangles = list(map(lambda x: np.array(list(x)), triangles))
for triangle in triangles:
family.append(CandidateSet(triangle))
family = SortedList(family)
marked = np.zeros(G.N, dtype=np.bool)
# ----------------------------------------------------------------------------
# Construct a (minimum weight) independent set.
# ----------------------------------------------------------------------------
coarsening_list = []
#n, n_target = N, (1-r)*N
n_reduce = np.floor(r*N) # how many nodes do we need to reduce/eliminate?
while len(family) > 0:
i_cset = family.pop(index=0)
i_set = i_cset.set
# check if marked
i_marked = marked[i_set]
if not any(i_marked):
n_gain = len(i_set) - 1
if n_gain > n_reduce: continue # this helps avoid over-reducing
# all vertices are unmarked: add i_set to the coarsening list
marked[i_set] = True
coarsening_list.append(i_set)
#n -= len(i_set) - 1
n_reduce -= n_gain
#if n <= n_target: break
if n_reduce <= 0: break
# may be worth to keep this set
else:
i_set = i_set[~i_marked]
if len(i_set) > 1:
# todo1: check whether to add to coarsening_list before adding to family
# todo2: currently this will also select contraction sets that are disconnected
# should we eliminate those?
i_cset.set = i_set
i_cset.cost = subgraph_cost(i_set)
family.add(i_cset)
return coarsening_list | 5,325,175 |
def horizon_main_nav(context):
""" Generates top-level dashboard navigation entries. """
if 'request' not in context:
return {}
current_dashboard = context['request'].horizon.get('dashboard', None)
dashboards = []
for dash in Horizon.get_dashboards():
if callable(dash.nav) and dash.nav(context):
dashboards.append(dash)
elif dash.nav:
dashboards.append(dash)
return {'components': dashboards,
'user': context['request'].user,
'current': getattr(current_dashboard, 'slug', None)} | 5,325,176 |
def _filter_out_bad_segments(img1, seg1, img2, seg2):
"""
It's possible for shearing or scaling augmentation to sample
one segment completely out of the image- use this function
to filter out those cases
"""
minval = tf.reduce_min(tf.reduce_sum(seg1, [0,1])*tf.reduce_sum(seg2, [0,1]))
if minval < 0.5:
warnings.warn("filtering bad segment")
return False
else:
return True | 5,325,177 |
def test_rq_predict_nans():
"""
Tests that the predictions are not nans
"""
model = pf.GPNARX(data=data, ar=2, kernel=pf.RationalQuadratic())
x = model.fit()
x.summary()
assert(len(model.predict(h=5).values[np.isnan(model.predict(h=5).values)]) == 0) | 5,325,178 |
def _seconds_to_hours(time):
"""Convert time: seconds to hours"""
return time / 3600.0 | 5,325,179 |
def execute_command(api_instance, pod_info, exec_command):
"""
Execute a command inside a specified pod
exec_command = list of strings
"""
name = pod_info['name']
resp = None
try:
resp = api_instance.read_namespaced_pod(name=name,
namespace='default')
except ApiException as excep:
if excep.status != 404:
print("Unknown error: %s" % excep)
sys.exit(0)
if not resp:
print("Pod %s does not exist. Creating it..." % name)
return -1
# Calling exec and waiting for response
resp = stream(api_instance.connect_get_namespaced_pod_exec,
name,
'default',
command=exec_command,
stderr=True, stdin=False,
stdout=True, tty=False)
print("Response: " + resp)
return resp | 5,325,180 |
def test_should_detect_consistency_for_notebook_and_script_with_conf(work_dir):
"""
Test check_script_consistency exit without error when there is consistency
between notebook and script with conf.
Without conf there is inconsistency but with the right ignore cell there is consistency
"""
notebook_path = join(CURRENT_DIR, 'data', 'notebooks', 'notebook_blank_and_comment_diff.ipynb')
script_path = join(CURRENT_DIR, 'data', 'script_no_blank_no_comment_disable_cell.py')
write_conf(work_dir, conf_path=join(work_dir, DEFAULT_CONF_FILENAME), ignore_keys=['# A Tag'])
check_call(['check_script_consistency', '-n', notebook_path, '-s', script_path, '-w', work_dir]) | 5,325,181 |
async def process_cmd_entry_erase(guild_id: int, txt_channel: str, bosses: list,
channel = None):
"""Processes boss `entry` `erase` subcommand.
Args:
guild_id (int): the id of the Discord guild of the originating message
txt_channel (str): the id of the channel of the originating message,
belonging to Discord guild of `guild_id`
bosses (list): a list of bosses to check
channel (int, optional): the channel for the record;
defaults to None
Returns:
str: an appropriate message for success or fail of command,
e.g. confirmation or list of entries
"""
if type(bosses) is str:
bosses = [bosses]
vdb = vaivora.db.Database(guild_id)
if channel and bosses in boss_conf['bosses']['world']:
records = await vdb.rm_entry_db_boss(bosses=bosses, channel=channel)
else:
records = await vdb.rm_entry_db_boss(bosses=bosses)
if records:
records = [f'**{record}**' for record in records]
return cleandoc(
f"""Your queried records ({len(records)}) have been """
f"""successfully erased.
- {bullet_point.join(records)}
"""
)
else:
return '*(But **nothing** happened...)*' | 5,325,182 |
def mle_iid_gamma(n):
"""Perform maximum likelihood estimates for parameters for i.i.d.
NBinom measurements, parametrized by alpha, b=1/beta"""
with warnings.catch_warnings():
warnings.simplefilter("ignore")
res = scipy.optimize.minimize(
fun=lambda log_params, n: -log_like_iid_gamma_log_params(log_params, n),
x0=np.array([2, 1/300]),
args=(n,),
method='L-BFGS-B',
)
if res.success:
return res.x
else:
raise RuntimeError('Convergence failed with message', res.message) | 5,325,183 |
def main():
""" Main function. """
set_random_seed(args)
if args.num_runs == -1: # run once
runner = Runner(args)
runner.run()
else:
xs = []
for i in range(args.num_runs):
best_acc = launch_random_run(args)
xs.append(best_acc.item())
print(xs)
print(
"Mean: {:.2f} Median: {:.2f} Var: {:.2f} Stddev: {:.2f}".format(
np.mean(xs), np.median(xs), np.var(xs), np.std(xs)
)
) | 5,325,184 |
def build_tabnet_results(dataset, image_ids, rois, class_ids, scores, masks):
"""Arrange resutls to match COCO specs in http://cocodataset.org/#format
"""
# If no results, return an empty list
if rois is None:
return []
results = []
for image_id in image_ids:
# Loop through detections
for i in range(rois.shape[0]):
class_id = class_ids[i]
score = scores[i]
bbox = np.around(rois[i], 1)
mask = masks[:, :, i]
result = {
"image_id": image_id,
"category_id": dataset.get_source_class_id(class_id, "tab"),
"bbox":
[bbox[1], bbox[0], bbox[3] - bbox[1], bbox[2] - bbox[0]],
"score": score,
"segmentation": maskUtils.encode(np.asfortranarray(mask))
}
results.append(result)
return results | 5,325,185 |
def test_sub_account_deposit_address():
"""Tests the API endpoint to get deposit history"""
client = Client(key, secret)
response = client.sub_account_deposit_history(**params)
response.should.equal(mock_item) | 5,325,186 |
def make_raw_serving_input_receiver_fn(
feature_spec: features_specs_type,
transform_input_tensor: Callable[[Dict[str, tf.Tensor]], None],
is_model_canned_estimator: bool = False,
batched_predictions: bool = True
) -> Callable[[], tf.estimator.export.ServingInputReceiver]:
"""
Build the serving_input_receiver_fn used for serving/inference.
transform_input_tensor: method that takes the input tensors and will mutate them so prediction
will have its correct input. For instance, it could be to generate feature transfo from
"raw dimensions" tensors.
is_model_canned_estimator: if the model you want to serve is a canned estimator, the serving
function has to be generated differently
"""
def serving_input_receiver_fn() -> Any:
# generate all tensor placeholders:
raw_tensors, prediction_input_tensors = featurespec_to_input_placeholders(
feature_spec, batched_predictions)
# Add transformations (for instance, feature transfos) to prediction_input_tensors
transform_input_tensor(prediction_input_tensors)
if is_model_canned_estimator:
return tf.estimator.export.ServingInputReceiver(
features=prediction_input_tensors, receiver_tensors={},
receiver_tensors_alternatives={"raw_input": raw_tensors})
else:
return tf.estimator.export.ServingInputReceiver(
features=prediction_input_tensors, receiver_tensors=raw_tensors)
return serving_input_receiver_fn | 5,325,187 |
def load_parentheses_dataset(path: str, depths: List[int]) -> torchtext.data.Dataset:
"""
Load equation verification data as a sequential torchtext Dataset, in infix
notation with parentheses.
The Dataset is additionally populated with `leaf_vocab`, `unary_vocab`, and
`binary_vocab` sets.
"""
with open(path, "r") as f:
data_by_depth = json.load(f)
leaf_vocab: Set[str] = set()
unary_vocab: Set[str] = set()
binary_vocab: Set[str] = set()
def make_example(serialized):
tree = ExpressionTree.from_serialized(serialized["equation"])
label = int(serialized["label"] == "1")
left_root_index = sequence_root_index(tree.left)
right_root_index = sequence_root_index(tree.right)
nonlocal leaf_vocab, unary_vocab, binary_vocab
leaf_vocab = leaf_vocab.union(tree.leaf_vocab())
unary_vocab = unary_vocab.union(tree.unary_vocab())
binary_vocab = binary_vocab.union(tree.binary_vocab())
return torchtext.data.Example.fromlist(
[str(tree.left), str(tree.right), label, left_root_index, right_root_index],
list(_PARENTHESES_FIELD_MAP.items()),
)
examples = []
for depth in depths:
examples.extend(list(map(make_example, data_by_depth[depth - 1])))
dataset = torchtext.data.Dataset(examples, _PARENTHESES_FIELD_MAP)
dataset.leaf_vocab = leaf_vocab
dataset.unary_vocab = unary_vocab
dataset.binary_vocab = binary_vocab
return dataset | 5,325,188 |
def configure(config, import_error="raise", unknown_key="raise"):
"""Configure xtas.
Parameters
----------
config : dict
Dict with keys ``CELERY``, ``ELASTICSEARCH`` and ``EXTRA_MODULES``
will be used to configure the xtas Celery app.
``config.CELERY`` will be passed to Celery's ``config_from_object``
with the flag ``force=True``.
``ELASTICSEARCH`` should be a list of dicts with at least the key
'host'. These are passed to the Elasticsearch constructor (from the
official client) unchanged.
``EXTRA_MODULES`` should be a list of module names to load.
Failure to supply ``CELERY`` or ``ELASTICSEARCH`` causes the default
configuration to be re-set. Extra modules will not be unloaded,
though.
import_error : string
Action to take when one of the ``EXTRA_MODULES`` cannot be imported.
Either "log", "raise" or "ignore".
unknown_key : string
Action to take when a member not matching the ones listed above is
encountered (except when its name starts with an underscore).
Either "log", "raise" or "ignore".
"""
members = {'CELERY', 'ELASTICSEARCH', 'EXTRA_MODULES'}
if unknown_key != 'ignore':
unknown_keys = set(config.keys()) - members
if unknown_keys:
msg = ("unknown keys %r found on config object %r"
% (unknown_keys, config))
if unknown_action == 'raise':
raise ValueError(msg)
else:
logger.warn(msg)
app.config_from_object(config.get('CELERY', 'xtas._defaultconfig.CELERY'))
es = config.get('ELASTICSEARCH', _defaultconfig.ELASTICSEARCH)
_config['ELASTICSEARCH'] = es
logger.info('Using Elasticsearch at %s' % es)
for m in config.get('EXTRA_MODULES', []):
try:
importlib.import_module(m)
except ImportError as e:
if import_error == 'raise':
raise
elif import_error == 'log':
logger.warn(str(e)) | 5,325,189 |
async def test_volume_services(hass):
"""Test the volume service."""
assert await async_setup_component(
hass, mp.DOMAIN, {"media_player": {"platform": "demo"}}
)
await hass.async_block_till_done()
state = hass.states.get(TEST_ENTITY_ID)
assert state.attributes.get("volume_level") == 1.0
with pytest.raises(vol.Invalid):
await common.async_set_volume_level(hass, None, TEST_ENTITY_ID)
state = hass.states.get(TEST_ENTITY_ID)
assert state.attributes.get("volume_level") == 1.0
await common.async_set_volume_level(hass, 0.5, TEST_ENTITY_ID)
state = hass.states.get(TEST_ENTITY_ID)
assert state.attributes.get("volume_level") == 0.5
await common.async_volume_down(hass, TEST_ENTITY_ID)
state = hass.states.get(TEST_ENTITY_ID)
assert state.attributes.get("volume_level") == 0.4
await common.async_volume_up(hass, TEST_ENTITY_ID)
state = hass.states.get(TEST_ENTITY_ID)
assert state.attributes.get("volume_level") == 0.5
assert False is state.attributes.get("is_volume_muted")
with pytest.raises(vol.Invalid):
await common.async_mute_volume(hass, None, TEST_ENTITY_ID)
state = hass.states.get(TEST_ENTITY_ID)
assert state.attributes.get("is_volume_muted") is False
await common.async_mute_volume(hass, True, TEST_ENTITY_ID)
state = hass.states.get(TEST_ENTITY_ID)
assert state.attributes.get("is_volume_muted") is True | 5,325,190 |
def get_angle(v1: List[int], v2: List[int]):
"""
:param v1: 2D vector
:param v2: 2D vector
:return: the angle of v1 and v2 in degree
"""
dot = np.dot(v1, v2)
norm = np.linalg.norm(v1) * np.linalg.norm(v2)
return np.degrees(np.arccos(dot / norm)) | 5,325,191 |
def get_plasma_intersection(lon, lat, alt, plasma_alt=300., tx_lon=-75.552,
tx_lat=45.403, tx_alt=0.07):
"""
This function finds where a ray from a transmitter toward a satellite
intersects the peak plasma in the middle.
*** PARAMS ***
Satellite ephemeris point(s): lon, lat, alt (deg, deg, km)
Transmitter location [optionally]: tx_lon, tx_lat, tx_alt (deg, deg, km)
Altitude of peak plasma density: plasma_alt (km.)
***RETURNS***
plasma_lon (float): longitude of plasma intersection(s)
plasma_lat (float): latitude of plasma intersection(s)
"""
vec_inp = True if (type(lon)==list or type(lon)==np.ndarray) else False
#lon = (lon + 360.) % 360.
#tx_lon = (tx_lon + 360.) % 360.
dist = haversine(lon, lat, tx_lon, tx_lat)
if dist > 2500.:
logging.error("This approximation isn't valid for large distances")
logging.error("dist: {0}".format(dist))
return (-1,-1)
if plasma_alt > np.min(alt):
logging.error("Input altitudes are too low for the plasma")
logging.error('plasma_alt: {0}'.format(plasma_alt))
logging.error('alt: {0}'.format(alt))
return (-1,-1)
if vec_inp:
tx_lon = tx_lon*np.ones(len(lon))
tx_lat = tx_lat*np.ones(len(lat))
tx_alt = tx_alt*np.ones(len(alt))
x = (plasma_alt/alt)*dist
#only need initial bearing
bearing,__ = get_bearing(tx_lon, tx_lat, lon, lat)
delta_EW = x*np.sin(np.deg2rad(bearing))
delta_NS = x*np.cos(np.deg2rad(bearing))
# convert to longitude (deg):
delta_lon = delta_EW*360./(2*np.pi*6371.*np.sin(np.deg2rad(lat)))
delta_lat = delta_NS*360./(2*np.pi*6371.)
plasma_lon = tx_lon + delta_lon
plasma_lat = tx_lat + delta_lat
logging.info('delta_EW, delta_NS: {0},{1}'.format(delta_EW, delta_NS))
logging.info('delta_lon, delta_lat: {0},{1}'.format(delta_lon, delta_lat))
logging.info('plasma_lon, plasma_lat: {0},{1}'.format(plasma_lon, plasma_lat))
return (plasma_lon, plasma_lat) | 5,325,192 |
def send_warnings_to_print(message, category, filename, lineno, file=None, *args):
"""Will be sent to stdout, formatted similar to LogRecords. """
location = filename.split('/')[-1].rstrip('.py') if isinstance(filename, str) else 'UNKNOWN'
bonus_string = ''
if file:
bonus_string += f"file: {file} "
if args and args[0] is not None:
bonus_string += f"args: {args} "
string = '%s:%s: %s:%s' % (location, lineno, category.__name__, message) + bonus_string
print(string)
return | 5,325,193 |
def plot_clusters(g, c):
"""
Draws a given graph g with vertex colours corresponding to clusters c and
displays the corresponding sizes of the clusters.
===========================================================================
Parameters
---------------------------------------------------------------------------
g : a graph
c : a list of vertex colours (clusters)
---------------------------------------------------------------------------
"""
if type(c) == dict:
c = list(c.values())
g.vs['color'] = c
g.vs['label'] = c
palette = ig.ClusterColoringPalette(len(g.vs))
df = pd.DataFrame(columns=['Frequency'])
df.index.name = 'Colour'
for i in set(c):
df.loc[int(i)] = [c.count(i)]
display(df)
visual_style = {}
visual_style['vertex_color'] = [palette[col] for col in g.vs['color']]
visual_style['vertex_label'] = [col for col in g.vs['color']]
visual_style['vertex_frame_width'] = 0
visual_style['bbox'] = (300, 300)
visual_style['margin'] = 10
return ig.plot(g, **visual_style) | 5,325,194 |
def _check(expression, section, option, message='Not a valid value'):
"""check whether an operation if valid and report error message if invalid.
Parameters
----------
expression : bool
The judgement conditions in main2.
section : string
The section of configuration file.
option : string
The option of the section.
message : string
The error message to be reported.
"""
assert expression, message + '.\noption `%s` in section `%s`' \
% (option, section) | 5,325,195 |
def make_cat_advanced(simple=True, yolo=False):
"""fill the categories manually"""
cat_list = get_cat_list(simple)
if simple:
if yolo:
cat_mapping = {
"benign": 0,
"malign": 1,
}
else:
cat_mapping = [0, 1]
return cat_list, cat_mapping
# The names from datainfo are used here!
cat_mapping = {
# malign
"Chondrosarkom": 1,
"Osteosarkom": 2,
"Ewing-Sarkom": 3,
"Plasmozytom / Multiples Myelom": 4,
"NHL vom B-Zell-Typ": 5,
# benign
"Osteochondrom": 6,
"Enchondrom": 7,
"Chondroblastom": 8,
"Osteoidosteom": 9,
"NOF": 10,
"Riesenzelltumor": 11,
"Chordom": 12,
"Hämangiom": 13,
"Knochenzyste, aneurysmatische": 14,
"Knochenzyste, solitär": 15,
"Dysplasie, fibröse": 16,
}
return cat_list, cat_mapping | 5,325,196 |
def process_fuel(context):
"""
Reformats Fuel consumed
"""
fuel = {
0: 'Petrol',
1: 'Desiel'
}
data = []
totals = []
for index, type in enumerate(context['Fuel']):
litresSold = operator.sub(type.closing_meter, type.opening_meter)
total = operator.mul(litresSold, type.unit_price)
totals.append(total)
data.append([
{'type': fuel[index],
'opening_meter': type.opening_meter,
'closing_meter': type.closing_meter,
'unit_price': type.unit_price,
'litresSold': litresSold,
'total': total}])
return {
'data': data,
'total': totals
} | 5,325,197 |
def prepare_df_financials(
ticker: str, statement: str, quarter: bool = False
) -> pd.DataFrame:
"""Builds a DataFrame with financial statements for a given company
Parameters
----------
ticker : str
Company's stock ticker
statement : str
Either income, balance or cashflow
quarter : bool, optional
Return quarterly financial statements instead of annual, by default False
Returns
-------
pd.DataFrame
A DataFrame with financial info
Raises
------
ValueError
If statement is not income, balance or cashflow
"""
financial_urls = {
"income": {
"quarter": "https://www.marketwatch.com/investing/stock/{}/financials/income/quarter",
"annual": "https://www.marketwatch.com/investing/stock/{}/financials/income",
},
"balance": {
"quarter": "https://www.marketwatch.com/investing/stock/{}/financials/balance-sheet/quarter",
"annual": "https://www.marketwatch.com/investing/stock/{}/financials/balance-sheet",
},
"cashflow": {
"quarter": "https://www.marketwatch.com/investing/stock/{}/financials/cash-flow/quarter",
"annual": "https://www.marketwatch.com/investing/stock/{}/financials/cash-flow",
},
}
if statement not in financial_urls:
raise ValueError(f"type {statement} is not in {financial_urls.keys()}")
period = "quarter" if quarter else "annual"
text_soup_financials = BeautifulSoup(
requests.get(
financial_urls[statement][period].format(ticker),
headers={"User-Agent": get_user_agent()},
).text,
"lxml",
)
# Define financials columns
a_financials_header = [
financials_header.text.strip("\n").split("\n")[0]
for financials_header in text_soup_financials.findAll(
"th", {"class": "overflow__heading"}
)
]
s_header_end_trend = ("5-year trend", "5- qtr trend")[quarter]
df_financials = pd.DataFrame(
columns=a_financials_header[0 : a_financials_header.index(s_header_end_trend)]
)
find_table = text_soup_financials.findAll(
"div", {"class": "element element--table table--fixed financials"}
)
if not find_table:
return df_financials
financials_rows = find_table[0].findAll(
"tr", {"class": ["table__row is-highlighted", "table__row"]}
)
for a_row in financials_rows:
constructed_row = []
financial_columns = a_row.findAll(
"td", {"class": ["overflow__cell", "overflow__cell fixed--column"]}
)
if not financial_columns:
continue
for a_column in financial_columns:
column_to_text = a_column.text.strip()
if "\n" in column_to_text:
column_to_text = column_to_text.split("\n")[0]
if column_to_text == "":
continue
constructed_row.append(column_to_text)
df_financials.loc[len(df_financials)] = constructed_row
return df_financials | 5,325,198 |
def print_transaction(context, transaction):
"""
Prints the transaction summary
"""
if transaction.success:
if transaction.changed:
title_color = "[1;34m"
status_char = "[32m+[m"
else:
title_color = "[1m"
status_char = "[37m.[m"
else:
title_color = "[1;31m"
status_char = "[1;31m![m"
# Print title and name, overwriting the transitive status
print("\r", end="")
print_transaction_title(transaction, title_color, status_char)
# Print key: value pairs with changes
state_infos = []
for k,final_v in transaction.final_state.items():
initial_v = transaction.initial_state[k]
# Add ellipsis on long strings
str_k = ellipsis(k, 12)
str_initial_v = ellipsis(str(initial_v), 9)
str_final_v = ellipsis(str(final_v), 9+3+9 if initial_v is None else 9)
if initial_v == final_v:
if context.verbose >= 1:
entry_str = f"[37m{str_k}: {str_initial_v}[m"
state_infos.append(entry_str)
else:
if initial_v is None:
entry_str = f"[33m{str_k}: [32m{str_final_v}[m"
else:
entry_str = f"[33m{str_k}: [31m{str_initial_v}[33m → [32m{str_final_v}[m"
state_infos.append(entry_str)
print("[37m,[m ".join(state_infos))
if context.verbose >= 1 and transaction.extra_info is not None:
extra_infos = []
for k,v in transaction.extra_info.items():
extra_infos.append(f"[37m{str(k)}: {str(v)}[m")
print(" " * 15 + "[37m,[m ".join(extra_infos)) | 5,325,199 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.