content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
|---|---|---|
def tf_box_3d_diagonal_length(boxes_3d):
"""Returns the diagonal length of box_3d
Args:
boxes_3d: An tensor of shape (N x 7) of boxes in box_3d format.
Returns:
Diagonal of all boxes, a tensor of (N,) shape.
"""
lengths_sqr = tf.square(boxes_3d[:, 3])
width_sqr = tf.square(boxes_3d[:, 4])
height_sqr = tf.square(boxes_3d[:, 5])
lwh_sqr_sums = lengths_sqr + width_sqr + height_sqr
diagonals = tf.sqrt(lwh_sqr_sums)
return diagonals
|
acf1788f8e035a3adf96f3b303f6344bcee0a1f1
| 3,641,500
|
async def employment_plot(current_city:City):
"""
Visualize employment information for city
- see industry breakdown and employment type
### Query Parameters
- city
### Response
JSON string to render with react-plotly.js
"""
city = validate_city(current_city)
city_data = CityData(city)
# Industry
industry_type = city_data.subset[city_data.industry()]
industry_melt = pd.melt(industry_type)
industry_melt.columns = ['industry', 'percentage']
# Employment Type
employment_type = city_data.subset[city_data.employment()]
type_melt = pd.melt(employment_type)
type_melt.columns = ['employment type', 'percentage']
#Create subplots
fig = make_subplots(rows=1, cols=2, subplot_titles = (f'Industry in {city}', f'Employment Types in {city}'))
fig.add_trace(go.Bar(x = industry_melt['industry'], y = industry_melt['percentage'],
marker = dict(color = industry_melt['percentage'], coloraxis = "coloraxis")),
row = 1, col = 1)
fig.add_trace(go.Bar(x =type_melt['employment type'], y =type_melt['percentage'],
marker = dict(color = type_melt['percentage'], coloraxis = "coloraxis")),
row = 1, col = 2)
fig.update_layout(
coloraxis=dict(colorscale = 'Bluered_r'),
coloraxis_showscale = False,
showlegend = False)
fig.show()
# fig.write_html("path/to/file.html")
return fig.to_json()
|
4db7f3f0973391c7294be486088a24c6ffa2770a
| 3,641,501
|
def get_freesurfer_matrix_ras2vox():
"""
Get standard matrix to convert RAS coordinate to voxel index for Freesurfer conformed space volumes.
Get matrix to convert RAS coordinate to voxel index for Freesurfer conformed space volumes. See the documentation for get_freesurfer_matrix_vox2ras for background information.
Returns
-------
2D numpy array
The affine transformation matrix, a float matrix with shape (4, 4).
"""
return npl.inv(get_freesurfer_matrix_vox2ras())
|
5d5ee8d7bec4f632e494f468f6ebc7ff20cdf85c
| 3,641,502
|
def parse_create_table(string):
"""Parse the create table sql query and return metadata
Args:
string(sql): SQL string from a SQL Statement
Returns:
table_data(dict): table_data dictionary for instantiating a table
"""
# Parse the base table definitions
table_data = to_dict(get_base_parser().parseString(string))
# Parse the columns and append to the list
table_data['columns'] = list()
table_data['constraints'] = list()
column_position = 0
for field in table_data['raw_fields']:
try:
column = to_dict(get_column_parser().parseString(field))
# Add position of the column
column['position'] = column_position
column_position += 1
# Change fk_reference_column to string from list
if FK_REFERENCE in column:
column[FK_REFERENCE] = column[FK_REFERENCE][0]
table_data['columns'].append(column)
except ParseException:
try:
constraint = to_dict(
get_constraints_parser().parseString(field))
table_data['constraints'].append(constraint)
except ParseException:
logger.error(field)
raise
return table_data
|
e82875dfcc3cd052aeecac8c38277c26f0d15e8f
| 3,641,503
|
def retrieve_context_connection_connection_by_id(uuid): # noqa: E501
"""Retrieve connection by ID
Retrieve operation of resource: connection # noqa: E501
:param uuid: ID of uuid
:type uuid: str
:rtype: Connection
"""
return 'do some magic!'
|
4de55de3a799f7c41168fa9072b1a03345dd61de
| 3,641,504
|
def read_filenames(path):
"""
Read all file names from `path` and match them against FILENAME_REGEX.
Arguments:
- path: path to the directory containing CSV data files.
Returns:
- list of tuples of every filename and regex match to the CSV filename
format in the specified directory
"""
daily_filenames = [(f, FILENAME_REGEX.match(f))
for f in listdir(path) if isfile(join(path, f))]
return daily_filenames
|
970b00dc5947426960110fa646c9c1c91114ef9f
| 3,641,505
|
def _sp_sleep_for(t: int) -> str:
"""Return the subprocess cmd for sleeping for `t` seconds."""
return 'python -c "import time; time.sleep({})"'.format(t)
|
20ac8022a2438ceb62123f534ba5911b7c560502
| 3,641,506
|
import re
def verify_show_environment(dut, verify_str_list):
"""
To get show environment.
Author: Prudvi Mangadu (prudvi.mangadu@broadcom.com)
"""
command = "show environment"
output = utils.remove_last_line_from_string(st.show(dut, command, skip_tmpl=True))
result = True
for item in verify_str_list:
if not re.findall(item, output, re.IGNORECASE):
st.error("Item '{}' is NOT found".format(item))
result = False
return result
|
9334045f2b4ff2e33085398b871ff7a905b995ee
| 3,641,507
|
def get_labelset_keys():
"""get labelset keys
Given DATA_CFG, return slideviewer labelsets
Args:
none
Returns:
list: a list of labelset names
"""
cfg = ConfigSet()
label_config = cfg.get_value(path=const.DATA_CFG+'::LABEL_SETS')
labelsets = [cfg.get_value(path=const.DATA_CFG+'::USE_LABELSET')]
if cfg.get_value(path=const.DATA_CFG+'::USE_ALL_LABELSETS'):
labelsets = list(label_config.keys())
return labelsets
|
824d15b529bccb576c359fb50614ed1e33aa561c
| 3,641,508
|
from typing import List
def create_instrument_level_pattern(instrument_symbols: List[str]) -> str:
"""Creates a regular expression pattern to target all the instrument symbols in a list.
The function creates a regular expression pattern to target, within a specific DC
message, the portion of the message containing the complete instrument symbol, for
each instrument symbol included in the list passed as an input of the function.
Parameters
----------
instrument_symbols: List[str]
A list of the stable components of the futures instrument symbols.
Returns
-------
str
A regular expression pattern.
"""
specific_instrument_regexes = [
create_specific_instrument_regex(name)
for name in instrument_symbols
]
return rf"({'|'.join(specific_instrument_regexes)})"
|
25e1e9cc52b009e8e4fa95f8502e5b10cad29209
| 3,641,509
|
def localtime(nist_lookup=0,
localtime=DateTime.localtime,utctime=utctime):
""" Returns the current local time as DateTime instance.
Same notes as for utctime().
"""
return localtime(utctime(nist_lookup).gmticks())
|
312bb973edd62b03d2d251e4d8e215cd00bd470d
| 3,641,510
|
from datetime import datetime
def device_now():
"""Return datetime object constructed from 'now' on device."""
cmd = "adb shell date '+%Y:%m:%d:%H:%M:%S'"
lines = u.docmdlines(cmd)
line = lines.pop(0)
if line is None:
u.error("unable to interpret output from '%s'" % cmd)
d = line.split(":")
try:
dt = datetime(int(d[0]), int(d[1]), int(d[2]),
int(d[3]), int(d[4]), int(d[5]))
return dt
except ValueError:
u.error("unable to parse/interpret output "
"from cmd '%s' (value %s)" % (cmd, line))
|
93e927194390e77fcc7b26cb22db2e5d1debd164
| 3,641,511
|
def copy_safe_request(request):
"""
Copy selected attributes from a request object into a new fake request object. This is needed in places where
thread safe pickling of the useful request data is needed.
"""
meta = {
k: request.META[k]
for k in HTTP_REQUEST_META_SAFE_COPY
if k in request.META and isinstance(request.META[k], str)
}
return NautobotFakeRequest(
{
"META": meta,
"POST": request.POST,
"GET": request.GET,
"FILES": request.FILES,
"user": request.user,
"path": request.path,
"id": getattr(request, "id", None), # UUID assigned by middleware
}
)
|
a0e2b670732a2d09ac51678059bac80d115b350b
| 3,641,512
|
import hashlib
def sha256(firmware_filename, firmware_size=None):
"""Returns the sha256 hash of the firmware"""
hasher = hashlib.sha256()
# If firmware size is supplied, then we want a sha256 of the firmware with its header
if firmware_size is not None:
hasher.update(b"\x00" + firmware_size.to_bytes(4, "little"))
with open(firmware_filename, "rb", buffering=0) as file:
while True:
chunk = file.read(128)
if not chunk:
break
hasher.update(chunk)
return hasher.digest()
|
62fabc35796b9fe21ca2489b317550f93f6774ca
| 3,641,513
|
async def async_unload_entry(hass, entry):
"""Unload a config entry."""
unload_ok = await hass.config_entries.async_unload_platforms(entry, PLATFORMS)
if unload_ok:
hass.data[DOMAIN][entry.entry_id].stop()
return unload_ok
|
38460ec92c350cdfcae0094039e834c3344369d8
| 3,641,514
|
def is_serial_increased(old, new):
""" Return true if serial number was increased using RFC 1982 logic. """
old, new = (int(n) for n in [old, new])
diff = (new - old) % 2**32
return 0 < diff < (2**31 - 1)
|
44a33a1c7e8caebe3b74284002c7c4be6ac29b40
| 3,641,515
|
def svn_relpath_skip_ancestor(parent_relpath, child_relpath):
"""svn_relpath_skip_ancestor(char const * parent_relpath, char const * child_relpath) -> char const *"""
return _core.svn_relpath_skip_ancestor(parent_relpath, child_relpath)
|
23ca0f2e91f0c69e7b410983603ef98e9dea4c13
| 3,641,516
|
def rnn_model(input_dim, units, activation, output_dim=29):
""" Build a recurrent network for speech
"""
# Main acoustic input
input_data = Input(name='the_input', shape=(None, input_dim))
# Add recurrent layer
simp_rnn = GRU(units, activation=activation,
return_sequences=True, implementation=2, name='rnn')(input_data)
bn_rnn = BatchNormalization(name='bn_rnn')(simp_rnn)
time_dense = TimeDistributed(Dense(output_dim))(bn_rnn)
# Add softmax activation layer
y_pred = Activation('softmax', name='softmax')(time_dense)
# Specify the model
model = Model(inputs=input_data, outputs=y_pred)
model.output_length = lambda x: x
print(model.summary())
return model
|
2b0c4614e0e80888db89fcc8e43ef0a6614400cb
| 3,641,517
|
def _pad_statistic(arr, pad_width, stat_length, stat_op):
"""
pads the array with values calculated along the given axis, used in mode: "maximum",
"minimum", "mean"
"""
ndim = arr.ndim
shape = arr.shape
if stat_length is None:
stat_length = _make_stat_length(shape)
else:
stat_length = _convert_pad_to_nd(stat_length, ndim)
stat_length = _limit_stat_length(stat_length, shape)
for i in range(ndim):
pad_before = stat_op(_slice_along_axis(arr, i, 0, stat_length[i][0]), i)
pad_before = (F.tile(pad_before, _tuple_setitem((1,)*ndim, i, pad_width[i][0])),)
pad_after = stat_op(_slice_along_axis(arr, i, shape[i]-stat_length[i][1], shape[i]), i)
pad_after = (F.tile(pad_after, _tuple_setitem((1,)*ndim, i, pad_width[i][1])),)
tensor_with_pad = pad_before + (arr,) + pad_after
arr = concatenate(tensor_with_pad, axis=i)
return arr
|
4976615e4d41f48d5063ed9af0719801dbe1f9db
| 3,641,518
|
def register_do(mysql, json):
"""
helper function that registers data objects into MySQL DB
@param mysql: a mysql object for MySQL database
@param json: metadata that contains information for data source and device
"""
cnx = mysql.connect()
cursor = cnx.cursor()
dataSource = json["dataSource"]
device = json["device"]
deviceSummary = json["deviceSummary"]
dataSource_arr = [dataSource["name"], int(dataSource["srcID"])]
device_arr =
[int(device["ID"]),
int(device["dataSize"]),
device["location"],
device["name"],
int(device["srcID"]),
device["type"]]
deviceSummary_arr =
[int(deviceSummary["ID"]),
deviceSummary["accessDuration"],
int(deviceSummary["deviceID"])]
cursor.execute("INSERT INTO dataSource (name, srcID) VALUES (%s, %d)", dataSource_arr)
cursor.execute("INSERT INTO device (ID, dataSize, location, name, srcID, type) VALUES (%d, %d, %s, %s, %d, %s)", device_arr)
cursor.execute("INSERT INTO deviceSummary (ID, accessDuration, deviceID) VALUES (%d, %s, %d)", deviceSummary_arr)
cnx.commit()
return "data object registration success"
|
bfb8aa83e91955691d1b3a15c73c5e36d5e3b6b8
| 3,641,519
|
import decimal
def split_amount(amount, splits, places=2):
"""Return list of ``splits`` amounts where sum of items equals ``amount``.
>>> from decimal import Decimal
>>> split_amount(Decimal('12'), 1)
Decimal('12.00')
>>> split_amount(Decimal('12'), 2)
[Decimal('6.00'), Decimal('6.00')]
Amounts have a max of ``places`` decimal places. Last amount in the list
may not be the same as others (will always be lower than or equal to
others).
>>> split_amount(Decimal('100'), 3)
[Decimal('33,34'), Decimal('33,34'), Decimal('33,32')]
>>> split_amount(Decimal('100'), 3, 4)
[Decimal('33,3334'), Decimal('33,3334'), Decimal('33,3332')]
>>> split_amount(Decimal('12'), 7) # Doctest: +ELLIPSIS
[Decimal('1.72'), ..., Decimal('1.72'), ..., Decimal('1.68')]
>>> split_amount(Decimal('12'), 17) # Doctest: +ELLIPSIS
[Decimal('0.71'), ..., Decimal('0.71'), Decimal('0.64')]
"""
one = decimal.Decimal(10) ** -places
amount = amount.quantize(one)
with decimal.localcontext() as decimal_context:
decimal_context.rounding = decimal.ROUND_UP
upper_split = (amount / splits).quantize(one)
splitted_amounts = [upper_split] * (splits - 1)
lower_split = amount - sum(splitted_amounts)
splitted_amounts.append(lower_split)
return splitted_amounts
|
8c8a17ed9bbcab194550ea78a9b414f51ca5610d
| 3,641,520
|
from datetime import timedelta
def shift_compare_date(df, date_field, smaller_eq_than_days=1, compare_with_next=False):
""" ATENTION: This Dataframe need to be sorted!!!
"""
if compare_with_next:
s = (
(df[date_field].shift(-1) - df[date_field]
) <= timedelta(days=smaller_eq_than_days)
) & (
(df[date_field].shift(-1) - df[date_field]) > timedelta(days=0)
)
else:
s = (
(df[date_field] - df[date_field].shift(1)
) <= timedelta(days=smaller_eq_than_days)
) & (
(df[date_field] - df[date_field].shift(1)) >= timedelta(days=0)
)
return s
|
56d4466f61cb6329ec1e365ad74f349d6043dd0a
| 3,641,521
|
def format_alleles(variant):
"""Gets a string representation of the variant's alleles.
Args:
variant: nucleus.genomics.v1.Variant.
Returns:
A string ref_bases/alt1,alt2 etc.
"""
return '{}/{}'.format(variant.reference_bases, ','.join(
variant.alternate_bases))
|
775fe3e112ff0b7e73780600e0621a8695fa5ad0
| 3,641,522
|
import numbers
def _validate_inputs(input_list, input_names, method_name):
"""
This method will validate the inputs of other methods.
input_list is a list of the inputs passed to a method.
input_name is a list of the variable names associated with
input_list
method_name is the name of the method whose input is being validated.
_validate_inputs will verify that all of the inputs in input_list are:
1) of the same type
2) either numpy arrays or instances of numbers.Number (floats or ints)
3) if they are numpy arrays, they all have the same length
If any of these criteria are violated, a RuntimeError will be raised
returns True if the inputs are numpy arrays; False if not
"""
if isinstance(input_list[0], np.ndarray):
desired_type = np.ndarray
elif isinstance(input_list[0], numbers.Number):
desired_type = numbers.Number
else:
raise RuntimeError("The arg %s input to method %s " % (input_names[0], method_name) +
"should be either a number or a numpy array")
valid_type = True
bad_names = []
for ii, nn in zip(input_list, input_names):
if not isinstance(ii, desired_type):
valid_type = False
bad_names.append(nn)
if not valid_type:
msg = "The input arguments:\n"
for nn in bad_names:
msg += "%s,\n" % nn
msg += "passed to %s " % method_name
msg += "need to be either numbers or numpy arrays\n"
msg += "and the same type as the argument %s" % input_names[0]
msg += "\n\nTypes of arguments are:\n"
for name, arg in zip(input_names, input_list):
msg += '%s: %s\n' % (name, type(arg))
raise RuntimeError(msg)
if desired_type is np.ndarray:
same_length = True
for ii in input_list:
if len(ii) != len(input_list[0]):
same_length = False
if not same_length:
raise RuntimeError("The arrays input to %s " % method_name +
"all need to have the same length")
if desired_type is np.ndarray:
return True
return False
|
25a72bd99639b4aab23459635fce116e08299bdc
| 3,641,523
|
def server_base_url(environ):
"""
Using information in tiddlyweb.config, construct
the base URL of the server, sans the trailing /.
"""
return '%s%s' % (server_host_url(environ), _server_prefix(environ))
|
3919c9223039929530d6543c13e39b880c657d4f
| 3,641,524
|
def calc_ctrlg_ratio(rpl: sc2reader.resources.Replay,
pid: int) -> dict[str, float]:
"""Calculates the ratio between `ControlGroupEvents` and the union of
the `CommandEvents`, `SelectionEvents` and `ControlGroupCommand` sets
to quantify the players' level of awareness and use of this tactical
feature.
*Args*
- rpl (sc2reader.resources.Replay)
The replay being analysed.
- pid (int)
In-game player ID of the player being considered in the
analysis.
*Returns*
- (dict[str, float])
"""
command_secs = {e.second for e in rpl.events
if isinstance(e, sc2reader.events.game.CommandEvent)
and e.pid == (pid - 1)}
select_secs = {e.second for e in rpl.events
if isinstance(e, sc2reader.events.game.SelectionEvent)
and e.pid == (pid - 1)}
ctrlg_secs = {e.second for e in rpl.events
if isinstance(e, sc2reader.events.game.ControlGroupEvent)
and e.pid == (pid - 1)}
total_counted_events = len(command_secs | select_secs | ctrlg_secs)
if not total_counted_events:
return {"ctrlg_ratio": 0}
return {"ctrlg_ratio": len(ctrlg_secs)/total_counted_events}
|
74d79128bba3584a4966e0bb8f2ce0e4dfdf402e
| 3,641,525
|
import os
def data_exists(date, hour=None):
"""
Checks if there is a directory with daily data files for given date and hour(s)
Parameters
----------
date: str
Expected date format is yyyy/mm/dd
hour: int or array-like, default None
Specific hour(s) to check, has to be in the range of 0-23
Returns
-------
: bool, or array of bools
If all hours exist returns True.
If some of them do not exist returns a bool array.
"""
if hour is None:
file_path = get_day_folder_path(date)
if os.path.exists(file_path):
return True
else:
# get hours with data for relevant date
data_hours = get_hours_with_data(date)
# bool array - for every hour True data if data exists, otherwise False
mask_hours = np.isin(hour, data_hours)
# if all requested hours exist return True, if only some of them return an array
if mask_hours.all():
return True
else:
return mask_hours
return False
|
b3a97f4bee234ea09c5c40ac1febe43c69f14a54
| 3,641,526
|
import matplotlib.pyplot as plt
def plot_single_roccurve(signals, bkgs, cut_function, cut_values, ax=None):
"""
Main routine for plotting a single roccurve
"""
# Get a default ax if none is given
if ax is None:
fig = plt.figure(figsize=(8,8))
ax = fig.gca()
# Plot the base line
ax.plot([0.0,1.0], [0.0,1.0], linestyle='--', color='xkcd:gray')
# Plot the single roccurve
line = plot_roccurve(signals, bkgs, cut_function, cut_values, ax=ax)
line.set_label(bkgs[0].get_category())
# Plot settings
ax.set_xlim(0.0, 1.05)
ax.set_ylim(0.0, 1.05)
ax.set_ylabel('Signal eff.', fontsize=DEFAULT_FONTSIZE)
ax.set_xlabel('Bkg eff.', fontsize=DEFAULT_FONTSIZE)
ax.legend(fontsize=DEFAULT_FONTSIZE)
return ax
|
13341d3742fa97784cf552a1a7b3a1a5b285180a
| 3,641,527
|
import tqdm
def draw_normal_surface(pcd, scale, estimation_params=None):
"""Draw and return a mesh of arrows of normal vectors for each point
in the given cloud
Parameters
----------
pcd : o3d.geometry.PointCloud
Input point cloud
scale : float
Scale of the default arrow which is 1 meter length
estimation_params : dict, optional
Normal estimatino parameters if input does not contain normals, by default None
Returns
-------
o3d.geometry.TriangleMesh
Collection of normal arrows as a single triangle mesh
"""
if len(pcd.normals) != len(pcd.points):
pcd.estimate_normals(search_param=o3d.geometry.KDTreeSearchParamHybrid(**estimation_params))
arrow_params = get_default_arrow(scale)
normal_surface = None
pairs = zip(np.asarray(pcd.points), np.asarray(pcd.normals))
for point, normal in tqdm(pairs, total=len(pcd.points), ncols=100):
arrow = draw_arrow(point, normal, (0, 1, 0), arrow_params)
if normal_surface is None:
normal_surface = arrow
else:
normal_surface += arrow
return normal_surface
|
cb54f2a84febe82b03806b09af0a8c99fecc0669
| 3,641,528
|
def texture_from_clusters(clusters):
""" Compute the GLCM texture properties from image clusters.
:param clusters: clusters of pixels representing sections of the image
:returns: DataFrame -- of texture features for every cluster.
"""
thetas = np.arange(0, np.pi, np.pi/8)
props = ['contrast', 'dissimilarity', 'homogeneity', 'energy']
tex_features = []
for i, cluster in enumerate(clusters):
prop_suffix = '_cluster_%d' % (i+1)
col_names = [name + prop_suffix for name in props]
features = glcm_features(cluster, [1], thetas, props)
# compute mean across all orientations
features = np.mean(features, axis=2)
df = pd.DataFrame(features.T, columns=col_names)
tex_features.append(df)
return pd.concat(tex_features, axis=1)
|
353aa3bbc1fec765fd01e201bd769e00bbf8a1fa
| 3,641,529
|
def parse_dict(input_data):
"""Return a rules dict of the format:
{
'light red': [(1, 'bright white'), (2, 'muted yellow')],
'dark orange': [(3, bright white), (4, muted yellow)],
'faded blue': [(0, 'bags')]
}
"""
bags = dict()
for line in input_data.split('\n'):
outer, inner = line.strip().split(' bags contain ')
inner = [i.split(' ') for i in inner.split(", ")]
if 'no' in inner[0]:
bags[outer] = [(0, 'bags')]
else:
bags[outer] = [(int(i[0]), ' '.join(i[1:3])) for i in inner]
return bags
|
a1aad66a16e4754c35c9b3518d5641096e393530
| 3,641,530
|
def extract_vars(samples_file_name,n_burnin,v_names,debug,stride=1):
"""From a file with samples in ascii format, with
the first line containing the label for each column, extract
the columns with the labels in v_names and return them
in a numpy array. Remove n_burnin samples from the top.
Only read one in every stride number of lines after that."""
# Open text file with all samples,
samples_file = open(samples_file_name,"r")
#sample_lines = samples_file.readlines()
#samples_file.close()
# Extract first line with the column labels and find the column
# numbers corresponding to the variables of interest.
#labels_line = sample_lines[0].rstrip('\n')
labels_line = samples_file.readline().rstrip('\n')
col_labels = [lbl for lbl in labels_line.split()]
v_indices = []
for s_v in v_names:
try:
i_v = col_labels.index(s_v)
v_indices.append(i_v)
except ValueError:
print "Variable", s_v, "is not found in the list of labels", col_labels
sys.exit(1)
if (debug > 0):
print "Column labels in file",samples_file_name,"are:",col_labels
for i_v in range(len(v_names)):
print "The column number of",v_names[i_v],"is:",v_indices[i_v]
# Read subsequent lines, leaving out the first n_burnin, and only one
# in every stride lines after that
samples_list = []
line_no = 0
done = 0
while not done:
line = samples_file.readline()
if (line == ""):
done = 1
else:
line_no += 1
if (line_no > n_burnin and (line_no - n_burnin) % stride == 0):
records = line.split()
num_records = [float(s) for s in records]
samples_list.append(num_records)
# Close the file
samples_file.close()
# Remove the last line if is has a value < 0 (i.e. -1) in the acceptance_prob column
try:
i_ap = col_labels.index("acceptance_prob")
except ValueError:
i_ap = -1
# If this is a file with acceptance probabilities
if (i_ap >= 0):
# And the last line has a negative acceptance probability
if(samples_list[-1][i_ap] < 0):
# Remove the last line
del samples_list[-1]
if (debug > 0):
print "The last sample line has been deleted as it contained the MAP values"
# Convert list to array
steady_samples = np.array(samples_list)
# Remove burn-in samples from the top
#if (n_burnin > 0):
# steady_samples = all_samples[n_burnin:,:]
#else:
# steady_samples = all_samples
#if (debug > 0):
# print "Removed", n_burnin, "burn-in samples"
# Extract all columns of interest
samples_cols = []
for i_v in v_indices:
samples_cols.append(steady_samples[:,i_v])
samples = np.array(samples_cols).T
if (debug > 0):
print "Shape of samples array:",samples.shape
n_samples = len(samples[:,0])
n_vars = len(samples[0,:])
if (debug > 0):
print "Read in", n_samples, "regular samples of", n_vars, "variables from file", samples_file_name
return samples
|
e964378501eb32191b2b390e4f9a84d41cef911f
| 3,641,531
|
def distance_without_normalise(bin_image):
"""
Takes a binary image and returns a distance transform version of it.
"""
res = np.zeros_like(bin_image)
for j in range(1, bin_image.max() + 1):
one_cell = np.zeros_like(bin_image)
one_cell[bin_image == j] = 1
one_cell = distance_transform_cdt(one_cell)
res[bin_image == j] = one_cell[bin_image == j]
res = res.astype('uint8')
return res
|
ed4cf85498a74e2f7d030daefceebf66e460e0fd
| 3,641,532
|
def list_inventory():
""" Returns all of the Inventory """
app.logger.info('Request for inventory list')
inventory = []
category = request.args.get('category')
name = request.args.get('name')
condition = request.args.get('condition')
count = request.args.get('count')
available = request.args.get('available')
if category:
inventory = Inventory.find_by_category(category)
elif name:
inventory = Inventory.find_by_name(name)
else:
inventory = Inventory.all()
results = [inventory.serialize() for inventory in inventory]
return make_response(jsonify(results), status.HTTP_200_OK)
|
381de71a10d1626f44710643cd837523e9a930ed
| 3,641,533
|
import argparse
def parse_args():
"""Parse command line arguments"""
parser = argparse.ArgumentParser()
parser.add_argument('-i', '--input-dir', help="Directory which contains the input data", required=True)
parser.add_argument('-o', '--output-dir', help="Directory which will hold the output data", required=True)
parser.add_argument('-p', '--num-processes', default=4, help="Number of processes to spawn for file conversion")
parser.add_argument('-c', '--compression', default=None, help="Compression Type.")
return parser.parse_args()
|
aeabf459569b5981eba66460ba0bbf15fb4c96f7
| 3,641,534
|
def is_instance_method(obj):
"""Checks if an object is a bound method on an instance."""
if not isinstance(obj, MethodType):
return False # Not a method
elif obj.__self__ is None:
return False # Method is not bound
elif (
issubclass(obj.__self__.__class__, type)
or hasattr(obj.__self__, "__class__")
and obj.__self__.__class__
):
return False # Method is a classmethod
return True
|
82050391193388cdb6d9466442774e6b0fa6878c
| 3,641,535
|
from typing import List
from typing import Dict
def _clean_empty_and_duplicate_authors_from_grobid_parse(authors: List[Dict]) -> List[Dict]:
"""
Within affiliation, `location` is a dict with fields <settlement>, <region>, <country>, <postCode>, etc.
Too much hassle, so just take the first one that's not empty.
"""
# stripping empties
clean_authors_list = []
for author in authors:
clean_first = author['first'].strip()
clean_last = author['last'].strip()
clean_middle = [m.strip() for m in author['middle']]
clean_suffix = author['suffix'].strip()
if clean_first or clean_last or clean_middle:
author['first'] = clean_first
author['last'] = clean_last
author['middle'] = clean_middle
author['suffix'] = clean_suffix
clean_authors_list.append(author)
# combining duplicates (preserve first occurrence of author name as position)
key_to_author_blobs = {}
ordered_keys_by_author_pos = []
for author in clean_authors_list:
key = (author['first'], author['last'], ' '.join(author['middle']), author['suffix'])
if key not in key_to_author_blobs:
key_to_author_blobs[key] = author
ordered_keys_by_author_pos.append(key)
else:
if author['email']:
key_to_author_blobs[key]['email'] = author['email']
if author['affiliation'] and (author['affiliation']['institution'] or author['affiliation']['laboratory'] or author['affiliation']['location']):
key_to_author_blobs[key]['affiliation'] = author['affiliation']
dedup_authors_list = [key_to_author_blobs[key] for key in ordered_keys_by_author_pos]
return dedup_authors_list
|
5a02b877ee074270c544c7dbb06dd1ceab487e79
| 3,641,536
|
def get_distutils_display_options():
""" Returns a set of all the distutils display options in their long and
short forms. These are the setup.py arguments such as --name or --version
which print the project's metadata and then exit.
Returns
-------
opts : set
The long and short form display option arguments, including the - or --
"""
short_display_opts = set('-' + o[1] for o in Distribution.display_options
if o[1])
long_display_opts = set('--' + o[0] for o in Distribution.display_options)
# Include -h and --help which are not explicitly listed in
# Distribution.display_options (as they are handled by optparse)
short_display_opts.add('-h')
long_display_opts.add('--help')
# This isn't the greatest approach to hardcode these commands.
# However, there doesn't seem to be a good way to determine
# whether build *will be* run as part of the command at this
# phase.
display_commands = set([
'clean', 'register', 'setopt', 'saveopts', 'egg_info',
'alias'])
return short_display_opts.union(long_display_opts.union(display_commands))
|
86e87f22ea97db4a2642ef578999ad1f0cd67a66
| 3,641,537
|
def get_followers(api, user_id):
"""Returns list of followers"""
followers = []
next_max_id = ''
while next_max_id is not None:
_ = api.getUserFollowers(user_id, maxid=next_max_id)
followers.extend(api.LastJson.get('users', []))
next_max_id = api.LastJson.get('next_max_id', '')
return followers
|
debfb11fe0b8b22232b82e9a8ea360a4d2a8cdc1
| 3,641,538
|
def map(v, ds, de, ts, te):
"""\
Map the value v, in range [ds, de] to
the corresponding value in range [ts, te]
"""
d1 = de - ds
d2 = te - ts
v2 = v - ds
r = v2 / d1
return ts + d2 * r
|
2c2ba49b2acc283ca25b07c10b7ad717ad6a280d
| 3,641,539
|
def get_Q_body(hs_type, Theta_SW_hs):
"""温水暖房用熱源機の筐体放熱損失 (2)
Args:
hs_type(str): 温水暖房用熱源機の種類
Theta_SW_hs(ndarray): 温水暖房用熱源機の往き温水温度
Returns:
ndarray: 温水暖房用熱源機の筐体放熱損失
"""
if hs_type in ['石油従来型暖房機', '石油従来型温水暖房機', '石油従来型給湯温水暖房機', '不明']:
# (2a)
return [234 * 3600 * 10 ** (-6)] * 24 * 365
elif hs_type in ['石油潜熱回収型暖房機', '石油潜熱回収型温水暖房機', '石油潜熱回収型給湯温水暖房機']:
# (2b)
return (5.3928 * Theta_SW_hs - 71.903) * 3600 * 10 ** (-6)
else:
raise ValueError(hs_type)
|
60e35a31d9c9b2f5d77d3d6f1518b7a20484fad2
| 3,641,540
|
def softmax(inputs):
"""
Calculate the softmax for the give inputs (array)
:param inputs:
:return:
"""
return np.exp(inputs) / float(sum(np.exp(inputs)))
|
eb8e215e24fbc30e08e986d9b9498973a866cb9b
| 3,641,541
|
def get_config_list(ranking, ckpt_path2is_3class):
"""Assemble a model list for a specific task based on the ranking.
In addition to bundling information about the ckpt_path and whether to
model_uncertainty, the config_list also lists the value of the metric to
aid debugging.
Args:
ranking (list): list containing (Path, float), corresponding to
checkpoint-metric pairs ranked from best to worst by metric value
ckpt_path2is_3class (dict): mapping from ckpt_path to is_3class
(whether to model_uncertainty)
Returns:
config_list (list): list bundling information about ckpt_path,
model_uncertainty, and metric value
"""
config_list = []
for ckpt_path, value in ranking:
is3_class = ckpt_path2is_3class[ckpt_path]
ckpt_info = {'ckpt_path': str(ckpt_path),
'is_3class': is3_class,
'value': value}
config_list.append(ckpt_info)
return config_list
|
0c0819f2f4ea844468091fd395390be8038ef4a6
| 3,641,542
|
def _get_turn_angle(start_angle, target_angle):
"""
Difference in angle in the range -180 to +180 (where negative is counter clockwise)
Parameters
----------
start_angle, target_angle : float
Returns
-------
float
difference in angle.
"""
return _map_to_pm180(target_angle - start_angle)
|
7f41482ec69c4d3c4c4b3e1afb674ad46e7d607b
| 3,641,543
|
import ctypes
def load(fname):
"""Load symbol from a JSON file.
You can also use pickle to do the job if you only work on python.
The advantage of load/save is the file is language agnostic.
This means the file saved using save can be loaded by other language binding of mxnet.
You also get the benefit being able to directly load/save from cloud storage(S3, HDFS)
Parameters
----------
fname : str
The name of the file, examples:
- `s3://my-bucket/path/my-s3-symbol`
- `hdfs://my-bucket/path/my-hdfs-symbol`
- `/path-to/my-local-symbol`
Returns
-------
sym : Symbol
The loaded symbol.
See Also
--------
Symbol.save : Used to save symbol into file.
"""
if not isinstance(fname, string_types):
raise TypeError('fname need to be string')
handle = SymbolHandle()
check_call(_LIB.MXSymbolCreateFromFile(c_str(fname), ctypes.byref(handle)))
return Symbol(handle)
|
bbeb4f5eb63a5ad656814d0ded27d7edbd9936d8
| 3,641,544
|
def filter_pairs(pairs):
"""returns pairs of with filter_pair()==True"""
return [pair for pair in pairs if filter_pair(pair)]
|
ce65a6ec84ea8b637771d75a5334af7d90bafa15
| 3,641,545
|
def merge(list_geo, npts=5):
"""
merge a list of cad_geometries and update internal/external faces and connectivities
Args:
list_geo: a list of cad_geometries
Returns:
a cad_geometries
"""
geo_f = list_geo[0]
for geo in list_geo[1:]:
geo_f = geo_f.merge(geo, npts=npts)
return geo_f
|
70db1b52be8ae70d21f689c8f12e051d9c41cd64
| 3,641,546
|
def imshow(image: Imagelike, module: str = None, **kwargs) -> None:
"""Show the given image.
FIXME[todo]:
Showing an image can be done in different ways:
- blocking=True: the execution of the main program is blocked.
The display will run an event loop to guarantee a responsive
GUI behaviour. Blocking may stop on different occassions
- when the display window is closed
(either by GUI or programmatically)
- after some timeout (the display window may then either close
or switch into non-blocking mode, or stay open and unresponsive.
the last should only be used, if a new image will be shown by
the caller immediately after she regained control)
- blocking=False: the execution of the main program is continued.
The image display may start some background thread to ensure
a responsive GUI behaviour
- unblock: the unblock option specifies what should happen, when
a blocking display ends its block:
'close': close the display window
'show': continue showing the image in non-blocking mode.
'freeze': continue showing the image but without event loop,
leaving a frozen (unresponsive) image display window.
The caller is responsible for dealing with this window
(either closing it or showing some new image).
"""
display = get_display(module=module)
if image is None:
display.close()
else:
display.show(image, **kwargs)
return display
|
be56e4053269c7febd3a528af7e868d44717dcf8
| 3,641,547
|
from typing import Union
from typing import Sequence
from typing import Dict
import warnings
from pathlib import Path
import tqdm
import logging
def prepare_commonvoice(
corpus_dir: Pathlike,
output_dir: Pathlike,
languages: Union[str, Sequence[str]] = "auto",
splits: Union[str, Sequence[str]] = COMMONVOICE_DEFAULT_SPLITS,
num_jobs: int = 1,
) -> Dict[str, Dict[str, Dict[str, Union[RecordingSet, SupervisionSet]]]]:
"""
Returns the manifests which consist of the Recordings and Supervisions.
When all the manifests are available in the ``output_dir``, it will simply read and return them.
This function expects the input directory structure of::
>>> metadata_path = corpus_dir / language_code / "{train,dev,test}.tsv"
>>> # e.g. pl_train_metadata_path = "/path/to/cv-corpus-7.0-2021-07-21/pl/train.tsv"
>>> audio_path = corpus_dir / language_code / "clips"
>>> # e.g. pl_audio_path = "/path/to/cv-corpus-7.0-2021-07-21/pl/clips"
Returns a dict with 3-level structure (lang -> split -> manifest-type)::
>>> {'en/fr/pl/...': {'train/dev/test': {'recordings/supervisions': manifest}}}
:param corpus_dir: Pathlike, the path to the downloaded corpus.
:param output_dir: Pathlike, the path where to write the manifests.
:param languages: 'auto' (prepare all discovered data) or a list of language codes.
:param splits: by default ``['train', 'dev', 'test']``, can also include
``'validated'``, ``'invalidated'``, and ``'other'``.
:param num_jobs: How many concurrent workers to use for scanning of the audio files.
:return: a dict with manifests for all specified languagues and their train/dev/test splits.
"""
if not is_module_available("pandas"):
raise ValueError(
"To prepare CommonVoice data, please 'pip install pandas' first."
)
if num_jobs > 1:
warnings.warn(
"num_jobs>1 currently not supported for CommonVoice data prep;"
"setting to 1."
)
corpus_dir = Path(corpus_dir)
assert corpus_dir.is_dir(), f"No such directory: {corpus_dir}"
assert output_dir is not None, (
"CommonVoice recipe requires to specify the output "
"manifest directory (output_dir cannot be None)."
)
output_dir = Path(output_dir)
output_dir.mkdir(parents=True, exist_ok=True)
if languages == "auto":
languages = set(COMMONVOICE_LANGS).intersection(
path.name for path in corpus_dir.glob("*")
)
if not languages:
raise ValueError(
f"Could not find any of CommonVoice languages in: {corpus_dir}"
)
elif isinstance(languages, str):
languages = [languages]
manifests = {}
for lang in tqdm(languages, desc="Processing CommonVoice languages"):
logging.info(f"Language: {lang}")
lang_path = corpus_dir / lang
# Maybe the manifests already exist: we can read them and save a bit of preparation time.
# Pattern: "cv_recordings_en_train.jsonl.gz" / "cv_supervisions_en_train.jsonl.gz"
lang_manifests = read_cv_manifests_if_cached(
output_dir=output_dir, language=lang
)
for part in splits:
logging.info(f"Split: {part}")
if part in lang_manifests:
logging.info(
f"CommonVoice language: {lang} already prepared - skipping."
)
continue
recording_set, supervision_set = prepare_single_commonvoice_tsv(
lang=lang,
part=part,
output_dir=output_dir,
lang_path=lang_path,
)
lang_manifests[part] = {
"supervisions": supervision_set,
"recordings": recording_set,
}
manifests[lang] = lang_manifests
return manifests
|
1f2be866e9003224588a6e2cd4a29500854e9fb9
| 3,641,548
|
def plot_array_trans(pdata,a,copy=False):
"""
Warning!!!
----------
Latest Information: 22/05/2012 this is deprecated and plot_array_transg is used instead.
Purpose:
--------
Transform array according to speficication in list a. return a copy if copy is True.
Example:
--------
>>> b=np.arange(-9,9.1,0.5)
>>> pdata=np.ones((37,37))
>>> for i in range(37):
pdata[i]=b
>>> a=[(-9, -4), (-1, -0.5, 0, 0.5, 1), (4, 9)]
>>> plot_array_trans(pdata,a)
In [104]: plot_array_trans(pdata,a)
Out[104]:
(array([[-2. , -1.95, -1.9 , ..., 1.9 , 1.95, 2. ],
[-2. , -1.95, -1.9 , ..., 1.9 , 1.95, 2. ],
[-2. , -1.95, -1.9 , ..., 1.9 , 1.95, 2. ],
...,
[-2. , -1.95, -1.9 , ..., 1.9 , 1.95, 2. ],
[-2. , -1.95, -1.9 , ..., 1.9 , 1.95, 2. ],
[-2. , -1.95, -1.9 , ..., 1.9 , 1.95, 2. ]]),
[-2.0, -1.5, -1, -0.5, 0, 0.5, 1, 1.5, 2.0],
[-9, -4, -1, -0.5, 0, 0.5, 1, 4, 9])
"""
if copy:
pdata_trans=pcopy.deepcopy(pdata)
else:
pdata_trans=pdata
low_range=a[0]
mid_range=a[1]
high_range=a[2]
if len(mid_range)==1:
raise ValueError('there is only 1 element in middle range!')
else:
interval=mid_range[1]-mid_range[0]
#
if isinstance(low_range,tuple):
low_range_plot=pcopy.deepcopy(list(low_range))
else:
low_range_plot=pcopy.deepcopy(list([low_range]))
for i in range(len(low_range_plot)):
low_range_plot[i]=mid_range[0]-interval*(len(low_range_plot)-i)
if isinstance(high_range,tuple):
high_range_plot=pcopy.deepcopy(list(high_range))
else:
high_range_plot=pcopy.deepcopy(list([high_range]))
for i in range(len(high_range_plot)):
high_range_plot[i]=mid_range[-1]+interval*(i+1)
if len(low_range_plot)==1:
pdata_trans=arraylintrans(pdata_trans,(low_range,mid_range[0]),(low_range_plot[0],mid_range[0]))
else:
for i in range(len(low_range_plot))[::-1]:
if i != len(low_range_plot)-1:
pdata_trans=arraylintrans(pdata_trans,(low_range[i],low_range[i+1]),(low_range_plot[i],low_range_plot[i+1]))
else:
pdata_trans=arraylintrans(pdata_trans,(low_range[i],mid_range[0]),(low_range_plot[i],mid_range[0]))
if len(high_range_plot)==1:
pdata_trans=arraylintrans(pdata_trans,(mid_range[-1],high_range),(mid_range[-1],high_range_plot[0]))
else:
for i in range(len(high_range_plot)):
if i ==0:
pdata_trans=arraylintrans(pdata_trans,(mid_range[-1],high_range[0]),(mid_range[-1],high_range_plot[0]))
else:
pdata_trans=arraylintrans(pdata_trans,(high_range[i-1],high_range[i]),(high_range_plot[i-1],high_range_plot[i]))
if not hasattr(low_range,'__iter__'):
low_range=list([low_range])
if not hasattr(high_range,'__iter__'):
high_range=list([high_range])
levtemp=[low_range_plot,mid_range,high_range_plot]
levels=[j for i in levtemp for j in i]
labtemp=[low_range,mid_range,high_range]
lab=[j for i in labtemp for j in i]
return pdata_trans,levels,lab
|
0b885fba59fa34f567df5f6891ecdbe46d8a8be9
| 3,641,549
|
def process_generate_api_token_data(post_data):
"""
This expects the post_data to contain an array called ``user_to_form``.
Each item in this array is of the form:
.. code-block:: python
'<UserID>.<form_prefix>' (i.e. '1.form-0')
Each form then may add two form data key-value pairs:
.. code-block:: python
'<form_prefix>-expiration_date': '<date>' (i.e. 'form-0-expiration_date': '2021-06-04')
"""
user_to_form_pairs = [pair.split('.') for pair in post_data.getlist('user_to_form')]
user_form_data = []
for user_id, form_prefix in user_to_form_pairs:
user = User.objects.get(UserID=user_id)
form_data = dict_filter_keys_start_with(form_prefix, post_data)
date_str = '-'.join([form_data.get('ExpirationDate_year', ''),
form_data.get('ExpirationDate_month', ''),
form_data.get('ExpirationDate_day', '')])
expiration_date = set_date_from_str(date_str=date_str)
user_form_data.append({'user': user,
'expires': expiration_date})
return user_form_data
|
8da8c2566621bdc8710091daf604a292a30c602a
| 3,641,550
|
from bpy import context as C
from bpy import data as D
def add_vcolor(hemis, mesh=None, name='color'):
"""Seems like `hemis` is color you wish to apply to currently selected mesh."""
if mesh is None:
mesh = C.scene.objects.active.data
elif isinstance(mesh, str):
mesh = D.meshes[mesh]
bpy.ops.object.mode_set(mode='OBJECT')
color = hemis
if len(hemis) == 2:
color = hemis[0]
if len(mesh.vertices) == len(hemis[1]):
color = hemis[1]
vcolor = mesh.vertex_colors.new(name)
if hasattr(mesh, "loops"):
loopidx = [0]*len(mesh.loops)
mesh.loops.foreach_get('vertex_index', loopidx)
if not isinstance(color[0], (list, tuple)):
for i, j in enumerate(loopidx):
vcolor.data[i].color = [color[j]]*3
else:
for i, j in enumerate(loopidx):
vcolor.data[i].color = color[j]
else:
# older blender version, need to iterate faces instead
print("older blender found...")
if not isinstance(color[0], (list, tuple)):
for i in range(len(mesh.faces)):
v = mesh.faces[i].vertices
vcolor.data[i].color1 = [color[v[0]]] * 3
vcolor.data[i].color2 = [color[v[1]]] * 3
vcolor.data[i].color3 = [color[v[2]]] * 3
else:
for i in len(vcolor):
v = mesh.faces[i].vertices
vcolor.data[i].color1 = color[v[0]]
vcolor.data[i].color2 = color[v[1]]
vcolor.data[i].color3 = color[v[2]]
print("Successfully added vcolor '%s'"%name)
return vcolor
|
9199411ab0265c8e16e4a8bb2dfa45f9550d5d1a
| 3,641,551
|
import itertools
import pandas as pd
def gridSeach(model, parameters, features, response, train, test):
"""
This function performs a grid search over the parameter space.
It is simplistic and only allows certain range of values. If there
is a parameter in the models that needs to be a list it has to be modified.
"""
names = sorted(parameters)
combinations = list(itertools.product(*(parameters[name] for name in names)))
names.append('r2')
model_matrix = pd.DataFrame(columns=names)
for c in combinations:
dictionary = dict(zip(names, c))
model = model.set_params(**dictionary)
model.fit(features[train], response[train])
if 'hidden_layer_sizes' in dictionary:
dictionary.update({'hidden_layer_sizes':[dictionary['hidden_layer_sizes']],
'r2':model.score(features[test], response[test])})
else:
dictionary.update({'r2':model.score(features[test], response[test])})
model_matrix = model_matrix.append(dictionary, ignore_index=True)
dictionary = dict(model_matrix.ix[model_matrix['r2'].argmax(),:-1])
if 'hidden_layer_sizes' in dictionary:
dictionary.update({'hidden_layer_sizes':dictionary['hidden_layer_sizes'][0]})
if 'n_neighbors' in dictionary:
dictionary.update({'n_neighbors':int(dictionary['n_neighbors'])})
model = model.set_params(**dictionary)
model.fit(features[train], response[train])
return (model, model_matrix)
|
69d406cb16312d2777e7aa0562f77c77b20c44f7
| 3,641,552
|
def cvt_lambdef(node: pytree.Base, ctx: Ctx) -> ast_cooked.Base:
"""lambdef: 'lambda' [varargslist] ':' test"""
assert ctx.is_REF, [node]
name = xcast(ast_cooked.NameBindsNode, cvt(node.children[0], ctx.to_BINDING()))
ctx_func = new_ctx_from(ctx)
if len(node.children) == 4:
parameters = xcast(ast_cooked.BareTypedArgsListNode, cvt(node.children[1], ctx_func))
suite = cvt(node.children[3], ctx_func)
else:
parameters = ast_cooked.BareTypedArgsListNode(args=[])
suite = cvt(node.children[2], ctx_func)
return ast_cooked.FuncDefStmt(name=name,
parameters=parameters.args,
return_type=ast_cooked.OMITTED_NODE,
suite=suite,
scope_bindings=ctx_func.scope_bindings)
|
42ee22a02c2d003afc808bc3e28f18a57e3153fe
| 3,641,553
|
import re
def ischapter_name(text_str):
"""判断是否是章节名"""
if re.match(r'^第(.{1,9})([章节回卷集部篇])(\s*)(.*)', text_str):
return True
else:
return False
|
c89a34408def2c2f9026045925212c2dde88a41d
| 3,641,554
|
def calc_mean_onbit_density(bitsets, number_of_bits):
"""Calculate the mean density of bits that are on in bitsets collection.
Args:
bitsets (list[pyroaring.BitMap]): List of fingerprints
number_of_bits: Number of bits for all fingerprints
Returns:
float: Mean on bit density
"""
all_nr_onbits = [len(v) for v in bitsets]
mean_onbit = fsum(all_nr_onbits) / float(len(all_nr_onbits))
density = mean_onbit / number_of_bits
return float(density)
|
4d68ff5c280708d930d8e1525753804f831fc9da
| 3,641,555
|
import os
import pickle
def logger_client():
"""Authentification and service delivery from gmail API
"""
creds = None
# The file token.pickle stores the user's access and refresh tokens,
# and is created automatically when the authorization flow
# completes for the first time.
if os.path.exists('token.pickle'):
with open('token.pickle', 'rb') as token:
creds = pickle.load(token)
# If there are no (valid) credentials available, let the user log in.
if not creds or not creds.valid:
if creds and creds.expired and creds.refresh_token:
creds.refresh(Request())
else:
flow = InstalledAppFlow.from_client_secrets_file(
CREDENTIALS, SCOPES)
creds = flow.run_local_server(port=0)
# Save the credentials for the next run
with open('token.pickle', 'wb') as token:
pickle.dump(creds, token)
service = build('gmail', 'v1', credentials=creds)
return service
|
7a0a80a719ea5d5cfea586ead586ed0e6286d0fd
| 3,641,556
|
from typing import List
from typing import Dict
from typing import Any
from typing import Optional
def get_parameters(path: str) -> List[Dict[str, Any]]:
"""
Retrieve parameters from AWS SSM Parameter Store. Decrypts any encrypted parameters.
Relies on the appropriate environment variables to authenticate against AWS:
https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-envvars.html
"""
ssm = boto3.client("ssm")
next_token: Optional[bool] = True
parameters: List[Dict[str, Any]] = []
while next_token is not None:
kwargs = {"Path": path, "Recursive": False, "WithDecryption": True}
if next_token is not True:
kwargs["NextToken"] = next_token
response = ssm.get_parameters_by_path(**kwargs)
new_parameters = response.get("Parameters", [])
parameters.extend(new_parameters)
next_token = response.get("NextToken")
return parameters
|
0905e9e707dfa45b9dab8137676fac14e496e594
| 3,641,557
|
import re
def normalizeUrl(url):
"""
ParseResult(scheme='https', netloc='www.tWitch.tv', path='/ludwig/clip/MoldyNiceMarjoramCharlieBitMe-6EbApxzSGbjacptE', params='', query='a=b&c=d', fragment='')
Wish I could convert clips like this:
https://www.twitch.tv/ludwig/clip/MoldyNiceMarjoramCharlieBitMe-6EbApxzSGbjacptE
To ones like this:
https://clips.twitch.tv/MoldyNiceMarjoramCharlieBitMe-6EbApxzSGbjacptE
"""
f = furl(url)
f.path.normalize()
if f.host == 'www.twitch.tv' or f.host == 'twitch.tv':
m = re.match('^/[^/]+/clip/[^/]+.*$', str(f.path))
if m is not None:
# TODO: Yeah
pass
return f.url
|
c72572f07dd0755a1cef65ab328a5f6d5dcf774f
| 3,641,558
|
import requests
def _getPVGIS(lat, lon):
"""
This function uses the non-interactive version of PVGIS to extract a
tmy dataset to be used to predict VRE yields for future periods.
------ inputs ------
Latitude, in decimal degrees, south is negative.
Longitude, in decimal degrees, west is negative.
------- returns -------
tmy as dataframe with datetime as index, containing 9 timeseries
Temperature, humidity, global horizontal, beam normal, diffuse horizontal,
infrared horizontal, wind speed, wind direction and pressure.
From PVGIS [https://ec.europa.eu/jrc/en/PVGIS/tools/tmy]
"A typical meteorological year (TMY) is a set of meteorological data with
data values for every hour in a year for a given geographical location.
The data are selected from hourly data in a longer time period (normally
10 years or more). The TMY is generated in PVGIS following the procedure
described in ISO 15927-4.
The solar radiation database (DB) used is the default DB for the given
location, either PVGIS-SARAH, PVGIS-NSRDB or PVGIS-ERA5. The other
meteorogical variables are obtained from the ERA-Inteirm reanalysis."
"""
outputformat = "json"
request_url = f"https://re.jrc.ec.europa.eu/api/tmy?lat={lat}&lon={lon}&outputformat={outputformat}"
response = requests.get(request_url)
if not response.status_code == 200:
raise ValueError("API get request not succesfull, check your input")
# store to private df
df = pd.DataFrame(response.json()['outputs']['tmy_hourly'])
# send to private function to set the date column as index with parser
tmy = _tmy_dateparser(df)
# for dataframe off-line / in-session storage
tmy['lat'] = lat
tmy['lon'] = lon
tmy.columns = ['T', *tmy.columns[1:6].values, 'WS', 'WD', 'SP', 'lat', 'lon']
return tmy
|
e4d47cb3efab61bae1e5d38a87c642c687176ed3
| 3,641,559
|
def get_metric_key_samples(metricDict, metricNames, keyVal="means"):
"""
Returns a dictionary of samples for the given metric name, but only extracts
the samples for the given key
Args:
metricDict (dict): Dictionary of sampled metrics
metricNames (list): Names of the keys of the metric to return
keyVal (str): The value of the key for which data is to be extracted.
Must be one of {"mins", "maxs", "means", "vars"}
Returns:
Dictionary of samples of the given {"mins", "maxs", "means", "vars", "sums"}
"""
assert keyVal in ["mins", "maxs", "means", "vars", "sums"]
retDict = get_metric_samples(metricDict, metricNames)
for key in retDict:
retDict[key] = retDict[key][keyVal]
return retDict
|
f6b2bb32218654d90404812654623580ab4425df
| 3,641,560
|
def apply_nonbonded(nodes, scaling=1.0, suffix=""):
""" Nonbonded in nodes. """
# TODO: should this be 9-6 or 12-6?
return {
"u%s"
% suffix: scaling
* esp.mm.nonbonded.lj_9_6(
x=nodes.data["x"],
sigma=nodes.data["sigma%s" % suffix],
epsilon=nodes.data["epsilon%s" % suffix],
)
}
|
e54f96168ea238ee6c799a428e7325063e527d93
| 3,641,561
|
import requests
def swapi_films(episode):
"""
Gets the films listed in the api.
:param episode:
:return: response json
"""
response = requests.get(SWAPI_API + 'films/' + str(episode))
return response
|
fab283eeb2c96db1e509d4262fed79f7f4652fca
| 3,641,562
|
def prepare_qualifications(request, bids=[], lotId=None):
""" creates Qualification for each Bid
"""
new_qualifications = []
tender = request.validated["tender"]
if not bids:
bids = tender.bids
if tender.lots:
active_lots = [lot.id for lot in tender.lots if lot.status == "active"]
for bid in bids:
if bid.status not in ["invalid", "deleted"]:
for lotValue in bid.lotValues:
if lotValue.status == "pending" and lotValue.relatedLot in active_lots:
if lotId:
if lotValue.relatedLot == lotId:
qualification = Qualification({"bidID": bid.id, "status": "pending", "lotID": lotId})
qualification.date = get_now()
tender.qualifications.append(qualification)
new_qualifications.append(qualification.id)
else:
qualification = Qualification(
{"bidID": bid.id, "status": "pending", "lotID": lotValue.relatedLot}
)
qualification.date = get_now()
tender.qualifications.append(qualification)
new_qualifications.append(qualification.id)
else:
for bid in bids:
if bid.status == "pending":
qualification = Qualification({"bidID": bid.id, "status": "pending"})
qualification.date = get_now()
tender.qualifications.append(qualification)
new_qualifications.append(qualification.id)
return new_qualifications
|
53399716f029d4b7bebc45ddef8e6f39272e33d1
| 3,641,563
|
def int_format(x):
"""
Format an integer:
- upcast to a (u)int64
- determine buffer size
- use snprintf
"""
x = upcast(x)
buf = flypy.runtime.obj.core.newbuffer(flypy.types.char, ndigits(x) + 1)
formatting.sprintf(buf, getformat(x), x)
return flypy.types.String(buf)
|
363b4998bca8c45eb6a5a3b825270ce48bbb237e
| 3,641,564
|
import re
def pyccparser2cbmc(srcfile, libs):
"""
Transforms the result of a parsed file from pycparser to a valid cbmc
input.
"""
fd = open(srcfile, "r")
src = fd.read()
fd.close()
# Replace the definition of __VERIFIER_error with the one for CBMC
if "extern void __VERIFIER_error();" in src:
# print "__VERIFIER_error found"
pos = re.search("extern void __VERIFIER_error\(\);", src).pos
# print "position: " + str(pos)
vererr = "extern void __VERIFIER_error() __attribute__ ((__noreturn__));" + '\n'
src = re.sub("extern void __VERIFIER_error\(\);", vererr, src)
# Remove the strip lines with original libs
if "_____STARTSTRIPPINGFROMHERE_____" in src:
# print "_____STARTSTRIPPINGFROMHERE_____ found"
pos = src.find("typedef int _____STARTSTRIPPINGFROMHERE_____;", 0, len(src) )
# print "position: " + str(pos)
libstr = ""
for lib in reversed(libs):
libstr += '#include <' + lib + '>' + '\n'
src = src[:pos] + libstr + '\n' + src[pos:]
src = strip(src)
newfile = srcfile + "_cbmc.c"
fd = open(newfile, "w")
fd.write(src)
fd.close()
return newfile
|
499208680da71382d652d655a95c227d29129ee5
| 3,641,565
|
import dill
import base64
def check_finished(worker, exec_id):
"""
:param worker:
:param exec_id:
:return:
"""
result = worker.status(exec_id)
status = dill.loads(base64.b64decode(result.data))
if status["status"] == "FAILED":
raise Exception("Remote job execution failed")
elif status["status"] == "INVALID ID":
raise Exception("Invalid Id")
elif status["status"] == "COMPLETED":
return True, status
else:
return False, status
|
285090fd0fcdfce6964aa43f4af0fae836175ab1
| 3,641,566
|
def round_filters(filters, global_params):
""" Calculate and round number of filters based on depth multiplier. """
multiplier = global_params.width_coefficient
if not multiplier:
return filters
divisor = global_params.depth_divisor
min_depth = global_params.min_depth
filters *= multiplier
min_depth = min_depth or divisor
new_filters = max(min_depth,
int(filters + divisor / 2) // divisor * divisor)
if new_filters < 0.9 * filters: # prevent rounding by more than 10%
new_filters += divisor
return int(new_filters)
|
b39ca8a0b77ae1c134983e20725297fa6bccdac8
| 3,641,567
|
def admin_user_detail():
"""管理员信息编辑详情页"""
if not g.user.is_admin:
return redirect('/')
if request.method == 'GET':
# 获取参数
admin_id = request.args.get('admin_id')
if not admin_id:
abort(404)
try:
admin_id = int(admin_id)
except Exception as e:
current_app.logger.error(e)
return render_template('admin/admin_text_edit.html', data={"errmsg": "参数错误"})
# 通过id查询新闻
admin_user_dict = None
try:
admin_user_dict = User.query.get(admin_id)
except Exception as e:
current_app.logger.error(e)
if not admin_user_dict:
return render_template('admin/admin_text_edit.html', data={"errmsg": "未查询到此配置信息"})
# 返回数据
data = {
"admin_user_dict": admin_user_dict.to_dict(),
}
return render_template('admin/admin_user_detail.html', data=data)
# 获取post请求参数
admin_id = request.form.get("admin_id")
nick_name = request.form.get("nick_name")
password = request.form.get("password")
mobile = request.form.get("mobile")
signature = request.form.get("signature")
gender = request.form.get("gender")
avatar_url = request.files.get("avatar_url")
# 1.1 判断数据是否有值
if not all([nick_name, admin_id, mobile, gender]):
return jsonify(errno=RET.PARAMERR, errmsg="参数有误")
# 查询指定id的新闻
try:
user = User.query.get(admin_id)
except Exception as e:
current_app.logger.error(e)
return jsonify(errno=RET.PARAMERR, errmsg="参数错误")
if not user:
return jsonify(errno=RET.NODATA, errmsg="未查询到新闻数据")
# 1.2 尝试读取图片
if avatar_url:
try:
wxcode_image = avatar_url.read()
except Exception as e:
current_app.logger.error(e)
return jsonify(errno=RET.PARAMERR, errmsg="参数有误")
# 2. 将标题图片上传到七牛
try:
key = storage(wxcode_image)
except Exception as e:
current_app.logger.error(e)
return jsonify(errno=RET.THIRDERR, errmsg="上传图片错误")
user.avatar_url = constants.QINIU_DOMIN_PREFIX + key
if password:
user.password = password
# 3. 设置相关数据
user.nick_name = nick_name
user.mobile = mobile
user.signature = signature
user.gender = gender
return jsonify(errno=RET.OK, errmsg='OK')
|
2b8ec2201688d0e5fcc49e77fd1a238413d259e3
| 3,641,568
|
def splitBinNum(binNum):
"""Split an alternate block number into latitude and longitude parts.
Args:
binNum (int): Alternative block number
Returns:
:tuple Tuple:
1. (int) Latitude portion of the alternate block number.
Example: ``614123`` => ``614``
2. (int) Longitude portion of the alternate block number.
Example: ``614123`` => ``123``
"""
latBin = int(binNum / 1000)
longBin = binNum - (latBin * 1000)
return (latBin, longBin)
|
da9b9cc67d592e73da842f4b686c0d16985f3457
| 3,641,569
|
def load_model_from_params_file(model):
"""
case 0: CHECKPOINT.CONVERT_MODEL = True:
Convert the model
case 1: CHECKPOINT.RESUME = False and TRAIN.PARAMS_FILE is not none:
load params_file
case 2: CHECKPOINT.RESUME = True and TRAIN.PARAMS_FILE is not none:
case 2a: if checkpoint exist: use checkpoint
case 2b: if checkpoint not exist: use params_file
case 3: CHECKPOINT.RESUME = True and TRAIN.PARAMS_FILE is none:
case 3a: if checkpoint exist: use checkpoint
case 3b: if checkpoint not exist: set start_model_iter = 0
"""
use_checkpoint = cfg.CHECKPOINT.RESUME and find_checkpoint()
logger.info("Resume training: {}". format(cfg.CHECKPOINT.RESUME))
if cfg.TRAIN.PARAMS_FILE and cfg.CHECKPOINT.CONVERT_MODEL:
# After convert model, should use affine layer
assert(cfg.MODEL.USE_AFFINE)
converted_checkpoint = convert_model(cfg.TRAIN.PARAMS_FILE)
logger.info('Checkpoint model converted')
cfg.TRAIN.PARAMS_FILE = converted_checkpoint
if cfg.TRAIN.PARAMS_FILE and not use_checkpoint:
logger.info('Initializing from pre-trained file...')
start_model_iter, prev_lr = initialize_params_from_file(
model=model, weights_file=cfg.TRAIN.PARAMS_FILE,
load_momentum=False, # We don't load momentum if it is pretrained.
)
logger.info(('Loaded: start_model_iter: {}; prev_lr: {:.8f}').format(
start_model_iter, prev_lr))
model.current_lr = prev_lr
# Correct start_model_iter if pretraining uses a different batch size
# (mainly used for 1-node warmup).
if cfg.TRAIN.RESUME_FROM_BATCH_SIZE > 0:
start_model_iter = misc.resume_from(start_model_iter)
# If we only want the weights.
if cfg.TRAIN.RESET_START_ITER:
start_model_iter = 0
elif use_checkpoint:
logger.info('Initializing from checkpoints...')
start_model_iter, prev_lr = initialize_params_from_file(
model=model, weights_file=get_checkpoint_resume_file())
logger.info(('Loaded: start_model_iter: {}; prev_lr: {:.8f}').format(
start_model_iter, prev_lr))
model.current_lr = prev_lr
else:
start_model_iter = 0
logger.info('No checkpoint found; training from scratch...')
return start_model_iter
|
4f7c862829135e8b01038c6c9a540aeb1f55e285
| 3,641,570
|
def getPool(pool_type='avg', gmp_lambda=1e3, lse_r=10):
"""
# NOTE: this function is not used in writer_ident, s. constructor of
# ResNet50Encoder
params
pool_type: the allowed pool types
gmp_lambda: the initial regularization parameter for GMP
lse_r: the initial regularization parameter for LSE
"""
if pool_type == 'gmp':
pool_layer = GMP(lamb=gmp_lambda)
elif pool_type == 'avg':
pool_layer = nn.AdaptiveAvgPool2d(1)
elif pool_type == 'max':
pool_layer = nn.AdaptiveMaxPool2d(1)
elif pool_type == 'mixed-pool':
pool_layer = MixedPool(0.5)
elif pool_type == 'lse':
pool_layer = LSEPool(lse_r)
else:
raise RuntimeError('{} is not a valid pooling'
' strategy.'.format(pool_type))
return pool_layer
|
751bd851d57d37f7cf0749ba2183b67d59722c83
| 3,641,571
|
def draw_transperency(image, mask, color_f, color_b):
"""
image (np.uint8)
mask (np.float32) range from 0 to 1
"""
mask = mask.round()
alpha = np.zeros_like(image, dtype=np.uint8)
alpha[mask == 1, :] = color_f
alpha[mask == 0, :] = color_b
image_alpha = cv2.add(image, alpha)
return image_alpha
|
900269f7a36a4daa8c87cb2e2b5adc5b9be8728e
| 3,641,572
|
def split_in_pairs(s, padding = "0"):
"""
Takes a string and splits into an iterable of strings of two characters each.
Made to break up a hex string into octets, so default is to pad an odd length
string with a 0 in front. An alternative character may be specified as the
second argument.
"""
if not isinstance(padding, str) or len(padding) != 1:
raise TypeError("Padding must be a single character.")
s = padding + s if len(s) % 2 else s
v = iter(s)
return (a+b for a,b in zip(v,v))
|
8807448bb8125c80fa78ba32f887a54ba9bab1dd
| 3,641,573
|
def make_slicer_query_with_totals_and_references(
database,
table,
joins,
dimensions,
metrics,
operations,
filters,
references,
orders,
share_dimensions=(),
):
"""
:param dataset:
:param database:
:param table:
:param joins:
:param dimensions:
:param metrics:
:param operations:
:param filters:
:param references:
:param orders:
:param share_dimensions:
:return:
"""
"""
The following two loops will run over the spread of the two sets including a NULL value in each set:
- reference group (WoW, MoM, etc.)
- dimension with roll up/totals enabled (totals dimension)
This will result in at least one query where the reference group and totals dimension is NULL, which shall be
called base query. The base query will ALWAYS be present, even if there are zero reference groups or totals
dimensions.
For a concrete example, check the test case in :
```
fireant.tests.queries.test_build_dimensions.QueryBuilderDimensionTotalsTests
#test_build_query_with_totals_cat_dimension_with_references
```
"""
totals_dimensions = find_totals_dimensions(dimensions, share_dimensions)
totals_dimensions_and_none = [None] + totals_dimensions[::-1]
reference_groups = find_and_group_references_for_dimensions(dimensions, references)
reference_groups_and_none = [(None, None)] + list(reference_groups.items())
queries = []
for totals_dimension in totals_dimensions_and_none:
(dimensions_with_totals, filters_with_totals) = adapt_for_totals_query(
totals_dimension, dimensions, filters
)
for reference_parts, references in reference_groups_and_none:
dimensions_with_ref, metrics_with_ref, filters_with_ref = adapt_for_reference_query(
reference_parts,
database,
dimensions_with_totals,
metrics,
filters_with_totals,
references,
)
query = make_slicer_query(
database,
table,
joins,
dimensions_with_ref,
metrics_with_ref,
filters_with_ref,
orders,
)
# Add these to the query instance so when the data frames are joined together, the correct references and
# totals can be applied when combining the separate result set from each query.
query._totals = totals_dimension
query._references = references
queries.append(query)
return queries
|
ea77cf6729cc8b677758801d53338d96e67b167f
| 3,641,574
|
def corr_na(array1, array2, corr_method: str = 'spearmanr', **addl_kws):
"""Correlation method that tolerates missing values. Can take pearsonr or spearmanr.
Args:
array1: Vector of values
array2: Vector of values
corr_method: Which method to use, pearsonr or spearmanr.
**addl_kws: Additional keyword args to pass to scipy.stats corr methods.
Returns: R and p-value from correlation of 2 vectors.
"""
if corr_method not in ['pearsonr', 'spearmanr']:
raise ValueError(
'Method %s is a valid correlation method, must be: %s'
% (corr_method, ','.join(['pearsonr', 'spearmanr']))
)
nonull = np.logical_and(not_na(array1), not_na(array2))
if sum(nonull) > 2:
return eval(corr_method)(array1[nonull], array2[nonull], **addl_kws)
return np.nan, np.nan
|
b534898dee50b06488514de5b21d6ea7fcf025f6
| 3,641,575
|
from typing import Type
from typing import Callable
def analyze_member_access(name: str,
typ: Type,
node: Context,
is_lvalue: bool,
is_super: bool,
is_operator: bool,
builtin_type: Callable[[str], Instance],
not_ready_callback: Callable[[str, Context], None],
msg: MessageBuilder,
override_info: TypeInfo = None,
report_type: Type = None,
chk: 'mypy.checker.TypeChecker' = None) -> Type:
"""Analyse attribute access.
This is a general operation that supports various different variations:
1. lvalue or non-lvalue access (i.e. setter or getter access)
2. supertype access (when using super(); is_super == True and
override_info should refer to the supertype)
"""
report_type = report_type or typ
if isinstance(typ, Instance):
if name == '__init__' and not is_super:
# Accessing __init__ in statically typed code would compromise
# type safety unless used via super().
msg.fail(messages.CANNOT_ACCESS_INIT, node)
return AnyType()
# The base object has an instance type.
info = typ.type
if override_info:
info = override_info
if (experiments.find_occurrences and
info.name() == experiments.find_occurrences[0] and
name == experiments.find_occurrences[1]):
msg.note("Occurrence of '{}.{}'".format(*experiments.find_occurrences), node)
# Look up the member. First look up the method dictionary.
method = info.get_method(name)
if method:
if method.is_property:
assert isinstance(method, OverloadedFuncDef)
return analyze_var(name, method.items[0].var, typ, info, node, is_lvalue, msg,
not_ready_callback)
if is_lvalue:
msg.cant_assign_to_method(node)
typ = map_instance_to_supertype(typ, method.info)
if name == '__new__':
# __new__ is special and behaves like a static method -- don't strip
# the first argument.
signature = function_type(method, builtin_type('builtins.function'))
else:
signature = method_type_with_fallback(method, builtin_type('builtins.function'))
return expand_type_by_instance(signature, typ)
else:
# Not a method.
return analyze_member_var_access(name, typ, info, node,
is_lvalue, is_super, builtin_type,
not_ready_callback, msg,
report_type=report_type, chk=chk)
elif isinstance(typ, AnyType):
# The base object has dynamic type.
return AnyType()
elif isinstance(typ, NoneTyp):
if chk and chk.should_suppress_optional_error([typ]):
return AnyType()
# The only attribute NoneType has are those it inherits from object
return analyze_member_access(name, builtin_type('builtins.object'), node, is_lvalue,
is_super, is_operator, builtin_type, not_ready_callback, msg,
report_type=report_type, chk=chk)
elif isinstance(typ, UnionType):
# The base object has dynamic type.
msg.disable_type_names += 1
results = [analyze_member_access(name, subtype, node, is_lvalue, is_super,
is_operator, builtin_type, not_ready_callback, msg,
chk=chk)
for subtype in typ.items]
msg.disable_type_names -= 1
return UnionType.make_simplified_union(results)
elif isinstance(typ, TupleType):
# Actually look up from the fallback instance type.
return analyze_member_access(name, typ.fallback, node, is_lvalue, is_super,
is_operator, builtin_type, not_ready_callback, msg, chk=chk)
elif isinstance(typ, FunctionLike) and typ.is_type_obj():
# Class attribute.
# TODO super?
ret_type = typ.items()[0].ret_type
if isinstance(ret_type, TupleType):
ret_type = ret_type.fallback
if isinstance(ret_type, Instance):
if not is_operator:
# When Python sees an operator (eg `3 == 4`), it automatically translates that
# into something like `int.__eq__(3, 4)` instead of `(3).__eq__(4)` as an
# optimation.
#
# While it normally it doesn't matter which of the two versions are used, it
# does cause inconsistencies when working with classes. For example, translating
# `int == int` to `int.__eq__(int)` would not work since `int.__eq__` is meant to
# compare two int _instances_. What we really want is `type(int).__eq__`, which
# is meant to compare two types or classes.
#
# This check makes sure that when we encounter an operator, we skip looking up
# the corresponding method in the current instance to avoid this edge case.
# See https://github.com/python/mypy/pull/1787 for more info.
result = analyze_class_attribute_access(ret_type, name, node, is_lvalue,
builtin_type, not_ready_callback, msg)
if result:
return result
# Look up from the 'type' type.
return analyze_member_access(name, typ.fallback, node, is_lvalue, is_super,
is_operator, builtin_type, not_ready_callback, msg,
report_type=report_type, chk=chk)
else:
assert False, 'Unexpected type {}'.format(repr(ret_type))
elif isinstance(typ, FunctionLike):
# Look up from the 'function' type.
return analyze_member_access(name, typ.fallback, node, is_lvalue, is_super,
is_operator, builtin_type, not_ready_callback, msg,
report_type=report_type, chk=chk)
elif isinstance(typ, TypeVarType):
return analyze_member_access(name, typ.upper_bound, node, is_lvalue, is_super,
is_operator, builtin_type, not_ready_callback, msg,
report_type=report_type, chk=chk)
elif isinstance(typ, DeletedType):
msg.deleted_as_rvalue(typ, node)
return AnyType()
elif isinstance(typ, TypeType):
# Similar to FunctionLike + is_type_obj() above.
item = None
if isinstance(typ.item, Instance):
item = typ.item
elif isinstance(typ.item, TypeVarType):
if isinstance(typ.item.upper_bound, Instance):
item = typ.item.upper_bound
if item and not is_operator:
# See comment above for why operators are skipped
result = analyze_class_attribute_access(item, name, node, is_lvalue,
builtin_type, not_ready_callback, msg)
if result:
return result
fallback = builtin_type('builtins.type')
return analyze_member_access(name, fallback, node, is_lvalue, is_super,
is_operator, builtin_type, not_ready_callback, msg,
report_type=report_type, chk=chk)
if chk and chk.should_suppress_optional_error([typ]):
return AnyType()
return msg.has_no_attr(report_type, name, node)
|
d5fd897785bc857f075f0a50e3f4aef0082a2c84
| 3,641,576
|
def has_global(node, name):
"""
check whether node has name in its globals list
"""
return hasattr(node, "globals") and name in node.globals
|
7a2ef301cb25cba242d8544e2c191a537f63bf19
| 3,641,577
|
def make_generator_model(input_dim=100) -> tf.keras.Model:
"""Generator モデルを生成する
Args:
input_dim (int, optional): 入力次元. Defaults to 100.
Returns:
tf.keras.Model: Generator モデル
"""
dense_size = (7, 7, 256)
conv2d1_channel = 128
conv2d2_channel = 64
conv2d3_channel = 1
model = tf.keras.Sequential()
model.add(
layers.Dense(
dense_size[0] * dense_size[1] * dense_size[2],
use_bias=False,
input_shape=(input_dim,),
)
)
model.add(layers.BatchNormalization())
model.add(layers.LeakyReLU())
model.add(layers.Reshape(dense_size))
assert model.output_shape == (None, dense_size[0], dense_size[1], dense_size[2])
_add_conv2d_transpose_layer(
model,
conv2d1_channel,
(5, 5),
(1, 1),
(None, dense_size[0], dense_size[1], conv2d1_channel),
)
_add_conv2d_transpose_layer(
model,
conv2d2_channel,
(5, 5),
(2, 2),
(None, dense_size[0] * 2, dense_size[1] * 2, conv2d2_channel),
)
model.add(
layers.Conv2DTranspose(
conv2d3_channel,
(5, 5),
strides=(2, 2),
padding="same",
use_bias=False,
activation="tanh",
)
)
assert model.output_shape == (
None,
dense_size[0] * 4,
dense_size[1] * 4,
conv2d3_channel,
)
return model
|
3214afc37153471dae0c599a93cb95def1da8971
| 3,641,578
|
from unittest.mock import call
def deploy_gradle(app, deltas={}):
"""Deploy a Java application using Gradle"""
java_path = join(ENV_ROOT, app)
build_path = join(APP_ROOT, app, 'build')
env_file = join(APP_ROOT, app, 'ENV')
env = {
'VIRTUAL_ENV': java_path,
"PATH": ':'.join([join(java_path, "bin"), join(app, ".bin"), environ['PATH']])
}
if exists(env_file):
env.update(parse_settings(env_file, env))
if not exists(java_path):
makedirs(java_path)
if not exists(build_path):
echo("-----> Building Java Application")
call('gradle build', cwd=join(APP_ROOT, app), env=env, shell=True)
else:
echo("-----> Removing previous builds")
echo("-----> Rebuilding Java Application")
call('gradle clean build', cwd=join(APP_ROOT, app), env=env, shell=True)
return spawn_app(app, deltas)
|
d1be9ecd675389c05324d4e1f0e077414db814a5
| 3,641,579
|
from typing import Optional
def find_badge_by_slug(slug: str) -> Optional[Badge]:
"""Return the badge with that slug, or `None` if not found."""
badge = db.session \
.query(DbBadge) \
.filter_by(slug=slug) \
.one_or_none()
if badge is None:
return None
return _db_entity_to_badge(badge)
|
ec4102cf529b247c0b725e7c32d4b9de9c3a1e98
| 3,641,580
|
import os
import io
def extract_img_features(
input_path,
input_type,
output_path,
img=None,
img_meta=None,
feature_mask_shape="spot",
):
"""
Extract features from image. Works with IF or HE image from Visium tif files.
For block feature, a square will be drawn around each spot. Since it is bigger than
the spot itself, it is more suitable to extract texture features.
For Spot feature, only area in the actual sequencing spot will be uses.
It is more suitable to extract intensity features.
Parameters
----------
input_path : str
input folder containing all necessary files.
input_type : str
input image type, select from {'if','he'}.
output_path : str
output folder path.
img : None or np.array, optional
alternative input for image, will override input_path.
img_meta : None or np.array, optional
alternative input for image metadata, will override input_path.
feature_mask_shape : {'spot', 'block'}
type of feature extracted.
"""
intensity_fn = os.path.join(
os.path.abspath(output_path),
"{}_level_texture_features.csv".format(feature_mask_shape)
)
texture_fn = os.path.join(
os.path.abspath(output_path),
"{}_level_intensity_features.csv".format(feature_mask_shape)
)
if (os.path.exists(intensity_fn)) == (os.path.exists(texture_fn)) == True:
print('Features are already extracted.')
return
if img_meta is None:
img_meta = pd.read_csv(
os.path.join(input_path,"Spot_metadata.csv"), index_col=0)
if img is None:
img_tif = [x for x in os.listdir(input_path) if "tif" in x][0]
img_tif = os.path.join(input_path, img_tif)
if input_type == "if":
# the indexing is a workaround for the strange Visium if image channels.
img = io.imread(img_tif)
img = img_as_float32(img)
img = (255 * img).astype("uint8")
else:
img = io.imread(img_tif)
# normalize image with color deconv
print('Normalizing image...')
img = separate_stains(img, hdx_from_rgb)
img = minmax_scale(img.reshape(-1, 3)).reshape(img.shape)
img = np.clip(img, 0, 1)
img = exposure.equalize_adapthist(img, clip_limit=0.01)
img = (255 * img).astype("uint8")
# Hard coded type of Haralick features and Angles for searching for neighboring pixels
# hard coded number of angles to be 4, meaning horizontal, vertical and two diagonal directions.
# extracting block shaped features
if feature_mask_shape == "block":
tmp = img_meta.sort_values(["Row", "Col"])
block_y = int(np.median(tmp.Y.values[2:-1] - tmp.Y.values[1:-2]) // 2)
tmp = img_meta.sort_values(["Col", "Row"])
block_x = int(np.median(tmp.X.values[2:-1] - tmp.X.values[1:-2]) // 2)
block_r = min(block_x, block_y)
block_x = block_y = block_r
print("Prossessing {}".format(input_path))
feature_set = [
"contrast",
"dissimilarity",
"homogeneity",
"ASM",
"energy",
"correlation",
]
text_features = []
intensity_features = []
for i in range(img_meta.shape[0]):
if (i + 1) % 100 == 0:
print("Processing {} spot out of {} spots".format(i + 1, img_meta.shape[0]))
row = img_meta.iloc[i]
x, y, r = row[["X", "Y", "Spot_radius"]].astype(int)
if feature_mask_shape == "spot":
spot_img = img[x - r : x + r + 1, y - r : y + r + 1]
spot_mask = morphology.disk(r)
# only use the spot, not the bbox
spot_img = np.einsum("ij,ijk->ijk", spot_mask, spot_img)
else:
spot_img = img[x - block_x : x + block_x + 1, y - block_y : y + block_y + 1]
spot_mask = np.ones_like(spot_img[:, :, 0], dtype="bool")
# extract texture features
ith_texture_f = []
for c in range(img.shape[2]):
glcm = greycomatrix(
spot_img[:, :, c],
distances=[1],
# Angles are arranged in a counter clockwise manner, in radian.
angles=[0, np.pi / 4, np.pi / 2, 3 * np.pi / 4],
levels=256,
symmetric=True,
normed=False,
)
glcm = glcm[1:, 1:]
glcm = glcm / np.sum(glcm, axis=(0, 1))
for feature_name in feature_set:
ith_texture_f += greycoprops(glcm, feature_name)[0].tolist()
# The first 6 features are intensity features, and the rest are Haralicks.
text_features.append(ith_texture_f)
# extract intensity features
int_low = 0.2
int_high = 0.8
int_step = 0.1
q_bins = np.arange(int_low, int_high, int_step)
ith_int_f = []
for c in range(img.shape[2]):
for t in q_bins:
ith_int_f.append(np.quantile(spot_img[:, :, c][spot_mask == True], t))
intensity_features.append(ith_int_f)
# Naming the features. f stands for channels, A stands for angles.
# construct texture feature table
channels = ["f" + str(i) for i in range(img.shape[2])]
col_names = product(channels, feature_set, ["A1", "A2", "A3", "A4"])
col_names = ["_".join(x) for x in col_names]
text_features = pd.DataFrame(text_features, index=img_meta.index, columns=col_names)
# construct intensity feature table
intensity_features = pd.DataFrame(
intensity_features,
index=img_meta.index,
columns=[
"_".join(x) for x in product(channels, ["{:.1f}".format(x) for x in q_bins])
],
)
text_features.to_csv(intensity_fn)
intensity_features.to_csv(texture_fn)
return text_features, intensity_features
|
bad225053293205940928d65ee0cdfadee67fd9a
| 3,641,581
|
import logging
def validate_color(color,default,color_type):
"""Validate a color against known PIL values. Return the validated color if valid; otherwise return a default.
Keyword arguments:
color: color to test.
default: default color string value if color is invalid.
color_type: string name for color type, used for alerting users of defaults.
"""
# Use exception handling. If a given color throws an error, we may return false.
try:
c = ImageColor.getcolor(color,'RGB')
return color
except ValueError as e:
logging.warning('"%s" is not a valid color specifier. Defaulting to "%s" for %s color.',color,default,color_type)
return default
|
2a91a9f5db2cbed3d530af12e8c383b65c5e2fa8
| 3,641,582
|
def d_xx_yy_tt(psi):
"""Return the second derivative of the field psi by fft
Parameters
--------------
psi : array of complex64 for the field
Returns
--------------
cxx psi_xx+ cyy psi_yy + ctt psi_tt : second derivatives with respect to x
"""
# this function is to remove
global LAPL
return fft.ifft2(LAPL * fft.fft2(psi))
|
12980ca705f5a1f3f3514d792cfc4e06529d0600
| 3,641,583
|
from typing import Iterable
def negate_objective(objective):
"""Take the negative of the given objective (converts a gain into a loss and vice versa)."""
if isinstance(objective, Iterable):
return (list)((map)(negate_objective, objective))
else:
return -objective
|
e24877d00b7c84e04c0cb38b5facdba85694890f
| 3,641,584
|
from typing import Any
import json
def process_vm_size(file_name: str) -> Any:
"""
Extract VMs instance specification.
:file_name (str) File name
Return VMs specification object
"""
current_app.logger.info(f'Processing VM Size {file_name}...')
file = open(file_name,)
data = json.load(file)
return data
|
7afe372fa82769ac6add9e473bce082f0e268318
| 3,641,585
|
def gen_key(password, salt, dkLen=BLOCKSIZE):
"""
Implement PBKDF2 to make short passwords match the BLOCKSIZE.
Parameters
---------
password str
salt str
dkLen int
Returns
-------
- str
"""
return KDF.PBKDF2(password, salt, dkLen=BLOCKSIZE)
|
134d6c7b17f2aea869bfb79f72a0126367d44b36
| 3,641,586
|
import six
def _bytes_feature(value):
"""Wrapper for inserting bytes features into Example proto."""
if isinstance(value, six.string_types):
value = six.binary_type(value, encoding='utf-8')
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
|
85bdab9a6445ec224f8e5f54be5b775008582d48
| 3,641,587
|
def parse_plot_set(plot_set_string):
"""
Given one of the string arguments to the --plot-sets option, parse out a
data structure representing which conditions ought to be compared against
each other, and what those comparison plots/tables should be called.
The syntax of a plot set is [title:]condition[,condition[,condition...]].
The first condition is the comparison baseline, when applicable.
Returns a tuple of a plot set title, or None if unspecified, and a list of
condition names.
"""
colon_pos = plot_set_string.find(':')
if colon_pos != -1:
# Pull out the title before the colon
title = plot_set_string[0:colon_pos]
# And the rest of the specifier after it
plot_set_string = plot_set_string[colon_pos + 1:]
else:
# No title given
title = None
# Return the title and condition list tuple
return (title, plot_set_string.split(','))
|
1df83681aa3110dfd9302bd7918f15dfbfa497ab
| 3,641,588
|
def check_types_excel(row: tuple) -> bool:
"""Returns true if row from excel file has correct types"""
if not isinstance(row[1], (pd.Timestamp, str)):
return False
if not ((isinstance(row[2], dt.time) and isinstance(row[3], dt.time)) or
(isinstance(row[2], str) and isinstance(row[3], str))):
return False
if not all((isinstance(x, str) for x in row[4:5])):
return False
if not isinstance(row[6], (str, int)):
return False
if not isinstance(row[7], (str, int, float)):
# 3.27, 3.27a and 137 should all be supported
return False
return True
|
80ac33feff968de076bd29f34350bcf518cd34d5
| 3,641,589
|
def add(num1, num2):
""" Adds two numbers
>>> add(2,4)
6
"""
return num1 + num2
|
932981ca91c01817242e57e1be55c35441337fc4
| 3,641,590
|
def is_palindrome1(str):
"""
Create slice with negative step and confirm equality with str.
"""
return str[::-1] == str
|
39dbc19d0d73b956c9af24abc1babae18c816d73
| 3,641,591
|
from datetime import datetime
def number_generetor(view, form):
""" Генератор номера платежа (по умолчанию) """
if is_py2:
uuid_fields = uuid4().get_fields()
else:
uuid_fields = uuid4().fields
return u'{:%Y%m%d}-{:08x}'.format(datetime.now(), uuid_fields[0])
|
005cd8347b903be3adffe56d7c8c53ba79ebf2e8
| 3,641,592
|
def get_underlay_info():
"""
:return:
"""
return underlay_info
|
a48f2ede459a4ca8969e095e94ba09b99e59300d
| 3,641,593
|
async def get_guild_roles(id_: int):
"""
Get the roles of a guild
:param id_: Guild ID
:return: List of roles
"""
guild = await router.bot.rest.fetch_guild(id_)
if guild is None:
return status.HTTP_404_NOT_FOUND
roles = await guild.fetch_roles()
return [to_dict(role) for role in roles]
|
4d5084f62f29a5038dc3111b047b1644a96a958a
| 3,641,594
|
def prior_min_field(field_name, field_value):
"""
Creates prior min field with the
:param field_name: prior name (field name initial)
:param field_value: field initial properties
:return: name of the min field, updated field properties
"""
name = field_name
value = field_value.copy()
value.update({
'label': 'Min',
'required': False,
})
return name + '_min', value
|
9f331ee58e699318e678d881c0028486b746c05c
| 3,641,595
|
def checkpoint_save_config():
"""Fixture to create a config for saving attributes of a detector."""
toolset = {
"test_id": "Dummy_test",
"saved_attributes": {
"FeatureExtraction": [
"dummy_dict",
"dummy_list",
"dummy_tuple",
"dummy_tensor",
"dummy_val",
],
},
"save_attributes": True,
"attributes": {},
"save_elementwise": True,
}
return toolset
|
6cb7e05a5eb680f6915fc58f40e72403787eea8b
| 3,641,596
|
def matrix_sum_power(A, T):
"""Take the sum of the powers of a matrix, i.e.,
sum_{t=1} ^T A^t.
:param A: Matrix to be powered
:type A: np.ndarray
:param T: Maximum order for the matrixpower
:type T: int
:return: Powered matrix
:rtype: np.ndarray
"""
At = np.eye(A.shape[0])
As = np.zeros((A.shape[0], A.shape[0]))
for _ in range(T):
At = A @ At
As += At
return As
|
b590f0751c114bd7cfeaa39d3d03a3de49007c62
| 3,641,597
|
def mean_zero_unit_variance(arr, mean_vector=None, std_vector=None, samples_in='row'):
"""
Normalize input data to have zero mean and unit variance.
Return the normalized data, the mean, and the calculated standard
deviation which was used to normalize the data
[normalized, meanvec, stddev] = mean_zero_unit_variance(data)
or
[normalized, meanvec, stddev] = mean_zero(data, mean_vector=provided_mean_vector)
etc.
"""
samplesIn = 1 if samples_in == 'col' else 0
dimsIn = int(not samplesIn)
nSamples = arr.shape[samplesIn]
nDims = arr.shape[dimsIn]
theshape = [1, 1]
theshape[dimsIn] = nDims
if not mean_vector:
mean_vector = arr.mean(axis=samplesIn).reshape(theshape)
if not std_vector:
std_vector = arr.std(axis=samplesIn).reshape(theshape)
# If you have a row with absolutely no information, you will divide by zero. Hence...
std_vector[std_vector < 1e-6] = 1
norma = (arr - mean_vector) / std_vector
return norma, mean_vector, std_vector
|
38a1ca262362b3f04aed06f3f0d21836eca8d5ad
| 3,641,598
|
import torch
def soft_precision(scores: torch.FloatTensor,
mask: torch.FloatTensor) -> torch.FloatTensor:
"""
Helper function for computing soft precision in batch.
# Parameters
scores : torch.FloatTensor
Tensor of scores with shape: (num_refs, num_cands, max_ref_len, max_cand_len)
mask : torch.FloatTensor
Mask for the candidate tensor with shape: (num_cands, max_cand_len)
"""
max_scores, _ = scores.max(dim=-2)
masked_max_scores = max_scores * mask.unsqueeze(dim=0)
precision = masked_max_scores.sum(dim=-1) / mask.sum(dim=-1).view(1, -1)
return precision
|
e76552bde3ae58f5b976abbf58e5dac1d4995117
| 3,641,599
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.