content
stringlengths 22
815k
| id
int64 0
4.91M
|
|---|---|
def gradient(v, surf):
"""
:param v: vector of x, y, z coordinates
:param phase: which implicit surface is being used to approximate the structure of this phase
:return: The gradient vector (which is normal to the surface at x)
"""
x = v[0]
y = v[1]
z = v[2]
if surf == 'Ia3d' or surf == 'gyroid' or surf == 'ia3d':
a = np.cos(x)*np.cos(y) - np.sin(x)*np.sin(z)
b = -np.sin(y)*np.sin(x) + np.cos(y)*np.cos(z)
c = -np.sin(y)*np.sin(z) + np.cos(z)*np.cos(x)
elif surf == 'Pn3m' or surf == 'pn3m':
a = np.cos(x)*np.sin(y)*np.sin(z) + np.cos(x)*np.cos(y)*np.cos(z) - np.sin(x)*np.sin(y)*np.cos(z) - np.sin(x)*np.cos(y)*np.sin(z)
b = np.sin(x)*np.cos(y)*np.sin(z) - np.sin(x)*np.sin(y)*np.cos(z) + np.cos(x)*np.cos(y)*np.cos(z) - np.cos(x)*np.sin(y)*np.sin(z)
c = np.sin(x)*np.sin(y)*np.cos(z) - np.sin(x)*np.cos(y)*np.sin(z) - np.cos(x)*np.sin(y)*np.sin(z) + np.cos(x)*np.cos(y)*np.cos(z)
elif surf == 'sphere':
a = 2*x
b = 2*y
c = 2*z
return np.array([a, b, c])
| 5,339,600
|
def authenticated_client(client, user):
"""
"""
client.post(
'/login',
data={'username': user.username, 'password': 'secret'},
follow_redirects=True,
)
return client
| 5,339,601
|
def viterbi(obs, states, start_p, trans_p, emit_p):
"""
請參考李航書中的算法10.5(維特比算法)
HMM共有五個參數,分別是觀察值集合(句子本身, obs),
狀態值集合(all_states, 即trans_p.keys()),
初始機率(start_p),狀態轉移機率矩陣(trans_p),發射機率矩陣(emit_p)
此處的states是為char_state_tab_P,
這是一個用來查詢漢字可能狀態的字典
此處沿用李航書中的符號,令T=len(obs),令N=len(trans_p.keys())
"""
"""
維特比算法第1步:初始化
"""
#V:李航書中的delta,在時刻t狀態為i的所有路徑中之機率最大值
V = [{}] # tabular
#李航書中的Psi,T乘N維的矩陣
#表示在時刻t狀態為i的所有單個路徑(i_1, i_2, ..., i_t-1, i)中概率最大的路徑的第t-1個結點
mem_path = [{}]
#共256種狀態,所謂"狀態"是:"分詞標籤(BMES)及詞性(v, n, nr, d, ...)的組合"
all_states = trans_p.keys()
#obs[0]表示句子的第一個字
#states.get(obs[0], all_states)表示該字可能是由哪些狀態發射出來的
for y in states.get(obs[0], all_states): # init
#在時間點0,狀態y的log機率為:
#一開始在y的log機率加上在狀態y發射obs[0]觀察值的log機率
V[0][y] = start_p[y] + emit_p[y].get(obs[0], MIN_FLOAT)
#時間點0在狀態y,則前一個時間點會在哪個狀態
mem_path[0][y] = ''
"""
維特比算法第2步:遞推
"""
#obs: 觀察值序列
for t in xrange(1, len(obs)):
V.append({})
mem_path.append({})
#prev_states = get_top_states(V[t-1])
#mem_path[t - 1].keys(): 前一個時間點在什麼狀態,這裡以x代表
#只有在len(trans_p[x])>0(即x有可能轉移到其它狀態)的情況下,prev_states才保留x
prev_states = [
x for x in mem_path[t - 1].keys() if len(trans_p[x]) > 0]
#前一個狀態是x(prev_states中的各狀態),那麼現在可能在什麼狀態(y)
prev_states_expect_next = set(
(y for x in prev_states for y in trans_p[x].keys()))
#set(states.get(obs[t], all_states)):句子的第t個字可能在什麼狀態
#prev_states_expect_next:由前一個字推斷,當前的字可能在什麼狀態
#obs_states:以上兩者的交集
obs_states = set(
states.get(obs[t], all_states)) & prev_states_expect_next
#如果交集為空,則依次選取prev_states_expect_next或all_states
if not obs_states:
obs_states = prev_states_expect_next if prev_states_expect_next else all_states
for y in obs_states:
#李航書中的公式10.45
#y0表示前一個時間點的狀態
#max的參數是一個list of tuple: [(機率1,狀態1),(機率2,狀態2),...]
#V[t - 1][y0]:時刻t-1在狀態y0的機率對數
#trans_p[y0].get(y, MIN_INF):由狀態y0轉移到y的機率對數
#emit_p[y].get(obs[t], MIN_FLOAT):在狀態y發射出觀測值obs[t]的機率對數
#三項之和表示在時刻t由狀態y0到達狀態y的路徑的機率對數
prob, state = max((V[t - 1][y0] + trans_p[y0].get(y, MIN_INF) +
emit_p[y].get(obs[t], MIN_FLOAT), y0) for y0 in prev_states)
#挑選機率最大者將之記錄於V及mem_path
V[t][y] = prob
#時刻t在狀態y,則時刻t-1最有可能在state這個狀態
mem_path[t][y] = state
"""
維特比算法第3步:終止
"""
#mem_path[-1].keys():最後一個時間點可能在哪些狀態
#V[-1][y]:最後一個時間點在狀態y的機率
#把mem_path[-1]及V[-1]打包成一個list of tuple
last = [(V[-1][y], y) for y in mem_path[-1].keys()]
# if len(last)==0:
# print obs
#最後一個時間點最有可能在狀態state,其機率為prob
#在jieba/finalseg/__init__.py的viterbi函數中有限制句子末字的分詞標籤需為E或S
#這裡怎麼沒做這個限制?
prob, state = max(last)
"""
維特比算法第4步:最優路徑回溯
"""
route = [None] * len(obs)
i = len(obs) - 1
while i >= 0:
route[i] = state
#時間點i在狀態state,則前一個時間點最有可能在狀態mem_path[i][state]
state = mem_path[i][state]
i -= 1
return (prob, route)
| 5,339,602
|
def plot_antFeatureMap_2700ns(uvd, _data_sq, JD, pol='ee'):
"""
Plots the positions of all antennas that have data, colored by feature strength.
Parameters
----------
uvd: UVData object
Observation to extract antenna numbers and positions from
_data_sq: Dict
Dictionary structured as _data_sq[(antenna number, antenna number, pol)], where the values are the
feature strength that will determined the color on the map.
JD: Int
Julian date of the data
pol: String
Polarization to plot
"""
nd = {0: {'pos': [21.427320986820824, -30.722353385032143],
'ants': [0, 1, 2, 11, 12, 13, 14, 23, 24, 25, 26, 39]},
1: {'pos': [21.427906055943357, -30.722367970752067],
'ants': [3, 4, 5, 6, 15, 16, 17, 18, 27, 28, 29, 30]},
2: {'pos': [21.428502498826337, -30.722356438400826],
'ants': [7, 8, 9, 10, 19, 20, 21, 31, 32, 33, 321, 323]},
3: {'pos': [21.427102788863543, -30.72199587048034],
'ants': [36, 37, 38, 50, 51, 52, 53, 65, 66, 67, 68, 320]},
4: {'pos': [21.427671849802184, -30.7220282862175],
'ants': [40, 41, 42, 54, 55, 56, 57, 69, 70, 71, 72, 324]},
5: {'pos': [21.42829977472493, -30.722027118338183],
'ants': [43, 44, 45, 46, 58, 59, 60, 73, 74, 75, 76, 322]},
6: {'pos': [21.428836727299945, -30.72219119740069],
'ants': [22, 34, 35, 47, 48, 49, 61, 62, 63, 64, 77, 78]},
7: {'pos': [21.426862825121685, -30.72169978685838],
'ants': [81, 82, 83, 98, 99, 100, 116, 117, 118, 119, 137, 138]},
8: {'pos': [21.427419087275524, -30.72169615183073],
'ants': [84, 85, 86, 87, 101, 102, 103, 104, 120, 121, 122, 123]},
9: {'pos': [21.42802904166864, -30.721694142092485],
'ants': [88, 89, 90, 91, 105, 106, 107, 108, 124, 125, 126, 325]},
10: {'pos': [21.42863899600041, -30.721692129488424],
'ants': [92, 93, 94, 109, 110, 111, 112, 127, 128, 129, 130, 328]},
11: {'pos': [21.42914035998215, -30.721744794462655],
'ants': [79, 80, 95, 96, 97, 113, 114, 115, 131, 132, 133, 134]},
12: {'pos': [21.426763768223857, -30.72133448059758],
'ants': [135, 136, 155, 156, 157, 158, 176, 177, 178, 179, 329, 333]},
13: {'pos': [21.42734159294201, -30.72141297904905],
'ants': [139, 140, 141, 142, 159, 160, 161, 162, 180, 181, 182, 183]},
14: {'pos': [21.428012089958028, -30.721403280585722],
'ants': [143, 144, 145, 146, 163, 164, 165, 166, 184, 185, 186, 187]},
15: {'pos': [21.428561498114107, -30.721408957468245],
'ants': [147, 148, 149, 150, 167, 168, 169, 170, 188, 189, 190, 191]},
16: {'pos': [21.42914681969319, -30.721434635693182],
'ants': [151, 152, 153, 154, 171, 172, 173, 174, 192, 193, 194, 213]},
17: {'pos': [21.426857989080208, -30.72109992091893],
'ants': [196, 197, 198, 199, 215, 216, 217, 218, 233, 234, 235, 337]},
18: {'pos': [21.427443064426363, -30.7210702936363],
'ants': [200, 201, 202, 203, 219, 220, 221, 222, 236, 237, 238, 239]},
19: {'pos': [21.428053014877808, -30.72106828382215],
'ants': [204, 205, 206, 207, 223, 224, 225, 226, 240, 241, 242, 243]},
20: {'pos': [21.428662965267904, -30.721066271142263],
'ants': [208, 209, 210, 211, 227, 228, 229, 244, 245, 246, 261, 262]},
21: {'pos': [21.429383860959977, -30.721211242305866],
'ants': [175, 195, 212, 214, 231, 232, 326, 327, 331, 332, 336, 340]},
22: {'pos': [21.427060077987438, -30.720670550054763],
'ants': [250, 251, 252, 253, 266, 267, 268, 269, 281, 282, 283, 295]},
23: {'pos': [21.42767002595312, -30.720668542063535],
'ants': [254, 255, 256, 257, 270, 271, 272, 273, 284, 285, 286, 287]},
24: {'pos': [21.42838974031629, -30.720641805595115],
'ants': [258, 259, 260, 274, 275, 276, 288, 289, 290, 291, 302, 303]},
25: {'pos': [21.429052089734615, -30.720798251186455],
'ants': [230, 247, 248, 249, 263, 264, 265, 279, 280, 335, 339]},
26: {'pos': [21.427312432981267, -30.720413813332755],
'ants': [296, 297, 298, 308, 309, 310, 330, 334, 338, 341, 346, 347]},
27: {'pos': [21.42789750442093, -30.72038427427254],
'ants': [299, 300, 301, 311, 312, 313, 314, 342, 343]},
28: {'pos': [21.428507450517774, -30.72038226236355],
'ants': [304, 305, 315, 316, 317, 318, 348]},
29: {'pos': [21.42885912979846, -30.72052728164184],
'ants': [277, 278, 292, 293, 294, 306, 307, 319, 344, 345, 349]}}
freqs = uvd.freq_array[0]
taus = np.fft.fftshift(np.fft.fftfreq(freqs.size, np.diff(freqs)[0]))*1e9
idx_region1 = np.where(np.logical_and(taus > 2500, taus < 3000))[0]
idx_region2 = np.where(np.logical_and(taus > 2000, taus < 2500))[0]
fig = plt.figure(figsize=(14,10))
nodes, antDict, inclNodes = generate_nodeDict(uvd)
antnums = uvd.get_ants()
cmap = plt.get_cmap('inferno')
sm = plt.cm.ScalarMappable(cmap=cmap, norm=plt.Normalize(vmin=0, vmax=10))
sm._A = []
ampmax = 10
ampmin = 0
rang = ampmax-ampmin
for node in sorted(inclNodes):
ants = sorted(nodes[node]['ants'])
nodeamps = []
points = np.zeros((len(ants),2))
for i,antNum in enumerate(ants):
key = (antNum, antNum, pol)
idx = np.argwhere(uvd.antenna_numbers == antNum)[0][0]
antPos = uvd.antenna_positions[idx]
amp = 10*np.log10(np.sqrt(np.nanmean(np.abs(_data_sq[key][:,idx_region1]))/np.nanmean(np.abs(_data_sq[key][:,idx_region2]))))
nodeamps.append(amp)
points[i,:] = [antPos[1],antPos[2]]
hull = scipy.spatial.ConvexHull(points)
center = np.average(points,axis=0)
hullpoints = np.zeros((len(hull.simplices),2))
namp = np.nanmean(nodeamps)
ncolor = cmap(float((namp-ampmin)/rang))
plt.fill(points[hull.vertices,0], points[hull.vertices,1],alpha=0.5,color=ncolor)
for node in sorted(inclNodes):
ants = sorted(nodes[node]['ants'])
npos = nd[int(node)]['pos']
plt.plot(npos[0],npos[1],marker="s",markersize=15,color="black")
for antNum in ants:
idx = np.argwhere(uvd.antenna_numbers == antNum)[0][0]
antPos = uvd.antenna_positions[idx]
key = (antNum, antNum, pol)
amp = 10*np.log10(np.sqrt(np.nanmean(np.abs(_data_sq[key][:,idx_region1]))/np.nanmean(np.abs(_data_sq[key][:,idx_region2]))))
if math.isnan(amp):
marker="v"
color="r"
markersize=30
coloramp = [0]
else:
coloramp = cmap(float((amp-ampmin)/rang))
color = coloramp
marker="h"
markersize=40
plt.plot(antPos[1],antPos[2],marker=marker,markersize=markersize,color=color)
if coloramp[0]>0.6 or math.isnan(amp):
plt.text(antPos[1]-3,antPos[2],str(antNum),color='black')
else:
plt.text(antPos[1]-3,antPos[2],str(antNum),color='white')
plt.title('Antenna map - {} polarization (JD{})'.format(pol, JD))
cbar = fig.colorbar(sm)
cbar.set_label('2700ns Feature Amplitude (dB)')
| 5,339,603
|
def verifyInstrFomat(expresions, fomatStr, lineNum):
"""
Verify if instruction has a correct format and add errors to error list if not
expresions - array of srings as parts of instruction (letter code followed by parametrs)
fomatStr - format string of the instruction
lineNum - source file line number
return: none
"""
# check number of parametrs
if len(expresions) != len(fomatStr) + 1:
addError("invalid number of instruction parametrs, given: {}, expected: {}".format(len(expresions)-1, len(fomatStr)), lineNum)
# check parametr fomats
for i,ch in enumerate(fomatStr):
if ch == "R":
if re.match(r"[$][0-7]",expresions[i+1]):
continue
elif ch == "U":
if re.match(r"^[0-9]*$",expresions[i+1]):
continue
elif ch == "S":
if re.match(r"^-?[0-9]*$",expresions[i+1]):
continue
addError("invalid parametr number {}".format(i+1), lineNum)
| 5,339,604
|
def add_merge_variants_arguments(parser):
"""
Add arguments to a parser for sub-command "stitch"
:param parser: argeparse object
:return:
"""
parser.add_argument(
"-vp",
"--vcf_pepper",
type=str,
required=True,
help="Path to VCF file from PEPPER SNP."
)
parser.add_argument(
"-vd",
"--vcf_deepvariant",
type=str,
required=True,
help="Path to VCF file from DeepVariant."
)
parser.add_argument(
"-o",
"--output_dir",
type=str,
required=True,
help="Path to output directory."
)
return parser
| 5,339,605
|
def set_time_scale_alias(name: str, target: TimeScale):
"""Sets an alias named **name** of TimeScale **target**
Args:
name (str): name of the alias
target (TimeScale): TimeScale that **name** will refer to
"""
import graph_scheduler
name_aliased_time_scales = list(filter(
lambda e: _time_scale_aliases[e] == name,
_time_scale_aliases
))
if len(name_aliased_time_scales) > 0:
raise ValueError(f"'{name}' is already aliased to {name_aliased_time_scales[0]}")
try:
target = getattr(TimeScale, target)
except TypeError:
pass
except AttributeError as e:
raise ValueError(f'Invalid TimeScale {target}') from e
_time_scale_aliases[target] = name
setattr(TimeScale, name, target)
def getter(self):
return getattr(self, _time_scale_to_attr_str(target))
def setter(self, value):
setattr(self, _time_scale_to_attr_str(target), value)
prop = property(getter).setter(setter)
setattr(Time, name.lower(), prop)
setattr(SimpleTime, name.lower(), prop)
# alias name in style of a class name
new_class_segment_name = _time_scale_to_class_str(name)
for cls_name, cls in graph_scheduler.__dict__.copy().items():
# make aliases of conditions that contain a TimeScale name (e.g. AtEnvironmentStateUpdate)
target_class_segment_name = _time_scale_to_class_str(target)
if isinstance(cls, (type, types.ModuleType)):
if isinstance(cls, types.ModuleType):
try:
if _alias_docs_warning_str not in cls.__doc__:
cls.__doc__ = f'{_alias_docs_warning_str}{cls.__doc__}'
except TypeError:
pass
_multi_substitute_docstring(
cls,
{
target.name: name,
target_class_segment_name: new_class_segment_name,
}
)
if target_class_segment_name in cls_name:
new_cls_name = cls_name.replace(
target_class_segment_name,
new_class_segment_name
)
setattr(graph_scheduler.condition, new_cls_name, cls)
setattr(graph_scheduler, new_cls_name, cls)
graph_scheduler.condition.__all__.append(new_cls_name)
graph_scheduler.__all__.append(new_cls_name)
| 5,339,606
|
def process_shared_misc_options(args, include_remote=False):
"""Process shared miscellaneous options in args.
Parse options that the resync-sync, resync-build and resync-explorer scripts use.
"""
if args.checksum:
args.hash.append('md5')
if include_remote:
if args.access_token:
set_url_or_file_open_config('bearer_token', args.access_token)
if args.delay:
if args.delay < 0.0:
raise argparse.ArgumentTypeError("--delay must be non-negative!")
set_url_or_file_open_config('delay', args.delay)
| 5,339,607
|
def find_bounding_boxes(img):
"""
Find bounding boxes for blobs in the picture
:param img - numpy array 1xWxH, values 0 to 1
:return: bounding boxes of blobs [x0, y0, x1, y1]
"""
img = util.torch_to_cv(img)
img = np.round(img)
img = img.astype(np.uint8)
contours, hierarchy = cv2.findContours(img, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) # params copied from tutorial
bounding_boxes = []
for c in contours:
x, y, w, h = cv2.boundingRect(c)
bounding_boxes.append([x, y, x + w, y + h])
return bounding_boxes
| 5,339,608
|
def resolve_day(day: str, next_week: Optional[bool] = False) -> int:
"""Resolves day to index value."""
week = ['monday',
'tuesday',
'wednesday',
'thursday',
'friday',
'saturday',
'sunday']
today = datetime.now()
today_idx = date.weekday(today)
day_idx = week.index(day)
temp_list = list(islice(cycle(week), today_idx, 2 * today_idx + day_idx))
if next_week:
return len(temp_list) - 1
else:
return temp_list.index(day)
| 5,339,609
|
def write_image(image,
path,
bit_depth='float32',
method='OpenImageIO',
**kwargs):
"""
Writes given image at given path using given method.
Parameters
----------
image : array_like
Image data.
path : unicode
Image path.
bit_depth : unicode, optional
**{'float32', 'uint8', 'uint16', 'float16'}**,
Bit depth to write the image at, for the *Imageio* method, the image
data is converted with :func:`colour.io.convert_bit_depth` definition
prior to writing the image.
method : unicode, optional
**{'OpenImageIO', 'Imageio'}**,
Write method, i.e. the image library used for writing images.
Other Parameters
----------------
attributes : array_like, optional
{:func:`colour.io.write_image_OpenImageIO`},
An array of :class:`colour.io.ImageAttribute_Specification` class
instances used to set attributes of the image.
Returns
-------
bool
Definition success.
Notes
-----
- If the given method is *OpenImageIO* but the library is not available
writing will be performed by *Imageio*.
- If the given method is *Imageio*, ``kwargs`` is passed directly to the
wrapped definition.
Examples
--------
Basic image writing:
>>> import os
>>> import colour
>>> path = os.path.join(colour.__path__[0], 'io', 'tests', 'resources',
... 'CMS_Test_Pattern.exr')
>>> image = read_image(path) # doctest: +SKIP
>>> path = os.path.join(colour.__path__[0], 'io', 'tests', 'resources',
... 'CMSTestPattern.tif')
>>> write_image(image, path) # doctest: +SKIP
True
Advanced image writing while setting attributes using *OpenImageIO*:
>>> compression = ImageAttribute_Specification('Compression', 'none')
>>> write_image(image, path, bit_depth='uint8', attributes=[compression])
... # doctest: +SKIP
True
"""
method = validate_method(method, WRITE_IMAGE_METHODS)
if method == 'openimageio': # pragma: no cover
if not is_openimageio_installed():
usage_warning(
'"OpenImageIO" related API features are not available, '
'switching to "Imageio"!')
method = 'Imageio'
function = WRITE_IMAGE_METHODS[method]
if method == 'openimageio': # pragma: no cover
kwargs = filter_kwargs(function, **kwargs)
return function(image, path, bit_depth, **kwargs)
| 5,339,610
|
def set_seed(seed: int):
"""
Set the random seed for modules torch, numpy and random.
:param seed: random seed
"""
torch.manual_seed(seed)
np.random.seed(seed)
random.seed(seed)
| 5,339,611
|
def get_gsheet_data():
"""
Get's all of the data in the specified Google Sheet.
"""
# Get Credentials from JSON
logging.info('Attempting to read values to Google Sheet.')
creds = ServiceAccountCredentials.from_json_keyfile_name('TrackCompounds-1306f02bc0b1.json', SCOPES)
logging.info('Authorizing Google API credentials.')
service = build('sheets', 'v4', credentials=creds)
# Call the Sheets API
sheet = service.spreadsheets()
result = sheet.values().get(spreadsheetId=SPREADSHEET_ID,
range=READ_RANGE).execute()
data = result.get('values')
# Turn data into a DataFrame
df = pd.DataFrame(data[1:], columns=data[0])
logging.info('Successfully read G-Sheet data into a DataFrame.')
return df
| 5,339,612
|
def name_tensor(keras_tensor, name):
"""
Add a layer with this ``name`` that does nothing.
Usefull to mark a tensor.
"""
return Activation('linear', name=name)(keras_tensor)
| 5,339,613
|
def pluck(ind, seqs, default=no_default):
""" plucks an element or several elements from each item in a sequence.
``pluck`` maps ``itertoolz.get`` over a sequence and returns one or more
elements of each item in the sequence.
This is equivalent to running `map(curried.get(ind), seqs)`
``ind`` can be either a single string/index or a sequence of
strings/indices.
``seqs`` should be sequence containing sequences or dicts.
e.g.
>>> data = [{'id': 1, 'name': 'Cheese'}, {'id': 2, 'name': 'Pies'}]
>>> list(pluck('name', data))
['Cheese', 'Pies']
>>> list(pluck([0, 1], [[1, 2, 3], [4, 5, 7]]))
[(1, 2), (4, 5)]
See Also:
get
map
"""
if default is no_default:
if isinstance(ind, list):
return map(operator.itemgetter(*ind), seqs)
return map(operator.itemgetter(ind), seqs)
elif isinstance(ind, list):
return (tuple(_get(item, seq, default) for item in ind)
for seq in seqs)
return (_get(ind, seq, default) for seq in seqs)
| 5,339,614
|
def shape_metrics(model):
"""""
Calculates three different shape metrics of the current graph of the model.
Shape metrics: 1. Density 2. Variance of nodal degree 3. Centrality
The calculations are mainly based on the degree statistics of the current
graph
For more information one is referred to the article 'Geographical
influences of an emerging network of gang rivalries'
(Rachel A. Hegemann et al., 2011)
Input:
model = Model object
Output:
Tuple containing the three shape metrics in the order described above.
"""
# Determine total degree, average degree, max degree and density graph
degrees = [degree[1] for degree in model.gr.degree]
total_degree = sum(degrees)
ave_degree = total_degree / model.config.total_gangs
max_degree = max(degrees)
graph_density = nx.density(model.gr)
# Determine variance of nodal degree and centrality
variance_degree, centrality = 0, 0
for degree in degrees:
variance_degree += ((degree - ave_degree) * (degree - ave_degree))
centrality += max_degree - degree
# Normailize variance of nodal degree and centrality
variance_degree /= model.config.total_gangs
centrality /= ((model.config.total_gangs - 1) *
(model.config.total_gangs - 2))
# Returns a tuple containging the three statistics
return graph_density, variance_degree, centrality
| 5,339,615
|
def evaluate(model, k=10, seed=1234, evalcv=True, evaltest=False, use_feats=True):
"""
Run experiment
k: number of CV folds
test: whether to evaluate on test set
"""
print 'Preparing data...'
traintext, testtext, labels = load_data()
print 'Computing training skipthoughts...'
trainA = skipthoughts.encode(model, traintext[0], verbose=False)
trainB = skipthoughts.encode(model, traintext[1], verbose=False)
if evalcv:
print 'Running cross-validation...'
C = eval_kfold(trainA, trainB, traintext, labels[0], shuffle=True, k=10, seed=1234, use_feats=use_feats)
if evaltest:
if not evalcv:
C = 4 # Best parameter found from CV (combine-skip with use_feats=True)
print 'Computing testing skipthoughts...'
testA = skipthoughts.encode(model, testtext[0], verbose=False)
testB = skipthoughts.encode(model, testtext[1], verbose=False)
if use_feats:
train_features = np.c_[np.abs(trainA - trainB), trainA * trainB, feats(traintext[0], traintext[1])]
test_features = np.c_[np.abs(testA - testB), testA * testB, feats(testtext[0], testtext[1])]
else:
train_features = np.c_[np.abs(trainA - trainB), trainA * trainB]
test_features = np.c_[np.abs(testA - testB), testA * testB]
print 'Evaluating...'
clf = LogisticRegression(C=C)
clf.fit(train_features, labels[0])
yhat = clf.predict(test_features)
print 'Test accuracy: ' + str(clf.score(test_features, labels[1]))
print 'Test F1: ' + str(f1(labels[1], yhat))
| 5,339,616
|
def calculate_actual_sensitivity_to_removal(jac, weights, moments_cov, params_cov):
"""calculate the actual sensitivity to removal.
The sensitivity measure is calculated for each parameter wrt each moment.
It answers the following question: How much precision would be lost if the kth
moment was excluded from the estimation if "weights" is used as weighting
matrix?
Args:
sensitivity_to_bias (np.ndarray or pandas.DataFrame): See
``calculate_sensitivity_to_bias`` for details.
weights (np.ndarray or pandas.DataFrame): The weighting matrix used for
msm estimation.
moments_cov (numpy.ndarray or pandas.DataFrame): The covariance matrix of the
empirical moments.
params_cov (numpy.ndarray or pandas.DataFrame): The covariance matrix of the
parameter estimates.
Returns:
np.ndarray or pd.DataFrame: Sensitivity measure with shape (n_params, n_moments)
"""
m4 = []
_jac, _weights, _moments_cov, _params_cov, names = process_pandas_arguments(
jac=jac, weights=weights, moments_cov=moments_cov, params_cov=params_cov
)
for k in range(len(_weights)):
weight_tilde_k = np.copy(_weights)
weight_tilde_k[k, :] = 0
weight_tilde_k[:, k] = 0
sigma_tilde_k = cov_robust(_jac, weight_tilde_k, _moments_cov)
m4k = sigma_tilde_k - _params_cov
m4k = m4k.diagonal()
m4.append(m4k)
m4 = np.array(m4).T
params_variances = np.diagonal(_params_cov)
e4 = m4 / params_variances.reshape(-1, 1)
if names:
e4 = pd.DataFrame(e4, index=names.get("params"), columns=names.get("moments"))
return e4
| 5,339,617
|
def escape_html(text: str) -> str:
"""Replaces all angle brackets with HTML entities."""
return text.replace('<', '<').replace('>', '>')
| 5,339,618
|
def pptest(n):
"""
Simple implementation of Miller-Rabin test for
determining probable primehood.
"""
bases = [random.randrange(2,50000) for _ in range(90)]
# if any of the primes is a factor, we're done
if n<=1: return 0
for b in bases:
if n%b==0: return 0
tests,s = 0,0
m = n-1
# turning (n-1) into (2**s) * m
while not m&1: # while m is even
m >>= 1
s += 1
for b in bases:
tests += 1
isprob = algP(m,s,b,n)
if not isprob: break
if isprob: return (1-(1./(4**tests)))
else: return 0
| 5,339,619
|
def schedule_conv2d_NCHWc(outs):
"""Schedule for conv2d_NCHW[x]c
Parameters
----------
outs : Array of Tensor
The computation graph description of conv2d_NCHWc
in the format of an array of tensors.
The number of filter, i.e., the output channel.
Returns
-------
sch : Schedule
The computation schedule for the op.
"""
return _default_schedule(outs, False)
| 5,339,620
|
def calc_E_E_hs_d_t(W_dash_k_d_t, W_dash_s_d_t, W_dash_w_d_t, W_dash_b1_d_t, W_dash_ba1_d_t, W_dash_b2_d_t,
theta_ex_d_Ave_d, L_dashdash_ba2_d_t):
"""1時間当たりの給湯機の消費電力量 (1)
Args:
W_dash_k_d_t(ndarray): 1時間当たりの台所水栓における節湯補正給湯量 (L/h)
W_dash_s_d_t(ndarray): 1時間当たりの浴室シャワー水栓における節湯補正給湯量 (L/h)
W_dash_w_d_t(ndarray): 1時間当たりの洗面水栓における節湯補正給湯量 (L/h)
W_dash_b1_d_t(ndarray): 1時間当たりの浴槽水栓湯はり時における節湯補正給湯量 (L/h)
W_dash_ba1_d_t(ndarray): 1時間当たりの浴槽水栓さし湯時における節湯補正給湯量 (L/h)
W_dash_b2_d_t(ndarray): 1時間当たりの浴槽追焚時における節湯補正給湯量 (L/h)
theta_ex_d_Ave_d: 日平均外気温度 (℃)
L_dashdash_ba2_d_t(ndarray): 1時間当たりの浴槽追焚時における太陽熱補正給湯熱負荷 (MJ/h)
Returns:
ndarray: 1時間当たりの給湯機の消費電力量 (kWh/h)
"""
# 待機時及び水栓給湯時の補機による消費電力量 (2)
E_E_hs_aux1_d_t = get_E_E_hs_aux1_d_t(W_dash_k_d_t, W_dash_s_d_t, W_dash_w_d_t, W_dash_b1_d_t, W_dash_ba1_d_t,
theta_ex_d_Ave_d)
# 湯はり時の補機による消費電力量 (3)
E_E_hs_aux2_d_t = calc_E_E_hs_aux2_d_t(W_dash_b2_d_t)
# 保温時の補機による消費電力量 (4)
E_E_hs_aux3_d_t = calc_E_E_hs_aux3_d_t(L_dashdash_ba2_d_t)
# 1日当たりの給湯機の消費電力量 (1)
E_E_hs_d_t = E_E_hs_aux1_d_t + E_E_hs_aux2_d_t + E_E_hs_aux3_d_t
return E_E_hs_d_t
| 5,339,621
|
def keras_quantile_loss(q):
"""Return keras loss for quantile `q`."""
func = functools.partial(_tilted_loss_scalar, q)
func.__name__ = f'qunatile loss, q={q}'
return func
| 5,339,622
|
def writeSolverSettingsToHDF5(
solver: AmiciSolver,
file: Union[str, object],
location: Optional[str] = 'solverSettings'
) -> None:
"""
Convenience wrapper for :fun:`amici.writeSolverSettingsToHDF5`
:param file: hdf5 filename, can also be object created by
:fun:`amici.createOrOpenForWriting`
:param solver: Solver instance from which settings will stored
:param location: location of solver settings in hdf5 file
"""
if isinstance(solver, amici.SolverPtr):
amici.writeSolverSettingsToHDF5(solver.get(), file, location)
else:
amici.writeSolverSettingsToHDF5(solver, file, location)
| 5,339,623
|
def MACD(DF, a, b, c):
"""function to calculate MACD
typical values a = 12; b =26, c =9"""
| 5,339,624
|
def _load_template_file() -> Dict:
"""
Read and validate the registration definition template file, located in the
same directory as this source file
Returns
-------
Dict
Contents of the registration definition template file JSON, converted to
a Python dictionary
"""
src_path: str = f"{SCRIPT_PATH}/{TEMPLATE_FILENAME}"
if not os.path.exists(src_path):
print("Error: Cannot find registration definition template file")
with open(src_path) as registration_definition_template_file:
try:
data = registration_definition_template_file.read()
except:
print("Error: Cannot read registration definition template file")
return None
# Reconstruct the dict object
readin = json.loads(data)
if type(readin) != dict:
print("Error: Corrupted registration definition template file")
return None
return readin
| 5,339,625
|
def _reactions_table(reaction: reaction_pb2.Reaction, dataset_id: str) -> Mapping[str, Union[str, bytes, None]]:
"""Adds a Reaction to the 'reactions' table.
Args:
reaction: Reaction proto.
dataset_id: Dataset ID.
Returns:
Dict mapping string column names to values.
"""
values = {
"dataset_id": dataset_id,
"reaction_id": reaction.reaction_id,
"serialized": reaction.SerializeToString().hex(),
}
try:
reaction_smiles = message_helpers.get_reaction_smiles(reaction, generate_if_missing=True)
# Control for REACTION_CXSMILES.
values["reaction_smiles"] = reaction_smiles.split()[0]
except ValueError:
values["reaction_smiles"] = None
if reaction.provenance.doi:
values["doi"] = reaction.provenance.doi
else:
values["doi"] = None
return values
| 5,339,626
|
def get_incidents_for_alert(**kwargs) -> list:
"""
Return List of incidents for alert.
:param kwargs: Contains all required arguments.
:return: Incident List for alert.
"""
incidents: List[Dict[str, Any]] = []
headers = {
'X-FeApi-Token': kwargs['client'].get_api_token(),
'Accept': CONTENT_TYPE_JSON
}
params = {
'start_time': time.strftime(API_SUPPORT_DATE_FORMAT, time.localtime(kwargs['start_time'])),
'duration': '48_hours'
}
if kwargs['malware_type']:
params['malware_type'] = kwargs['malware_type']
# http call
resp = kwargs['client'].http_request(method="GET", url_suffix=URL_SUFFIX['GET_ALERTS'], params=params,
headers=headers)
total_records = resp.get('alertsCount', 0)
if total_records > 0:
if kwargs['replace_alert_url']:
replace_alert_url_key_domain_to_instance_url(resp.get('alert', []), kwargs['instance_url'])
count = kwargs['fetch_count']
for alert in resp.get('alert', []):
# set incident
context_alert = remove_empty_entities(alert)
context_alert['incidentType'] = ALERT_INCIDENT_TYPE
if count >= kwargs['fetch_limit']:
break
occurred_date = dateparser.parse(context_alert.get('occurred', ''))
assert occurred_date is not None
incident = {
'name': context_alert.get('name', ''),
'occurred': occurred_date.strftime(
DATE_FORMAT_WITH_MICROSECOND),
'rawJSON': json.dumps(context_alert)
}
if not kwargs['is_test'] and alert.get('uuid', '') and kwargs['fetch_artifacts']:
set_attachment_file(client=kwargs['client'], incident=incident, uuid=alert.get('uuid', ''),
headers=headers)
remove_nulls_from_dictionary(incident)
incidents.append(incident)
count += 1
return incidents
| 5,339,627
|
def pbar(*args, **kwargs):
"""
Progress bar.
This function is an alias of :func:`dh.thirdparty.tqdm.tqdm()`.
"""
return dh.thirdparty.tqdm.tqdm(*args, **kwargs)
| 5,339,628
|
def calc_driver_mask(n_nodes, driver_nodes: set, device='cpu', dtype=torch.float):
"""
Calculates a binary vector mask over graph nodes with unit value on the drive indeces.
:param n_nodes: numeber of driver nodes in graph
:param driver_nodes: driver node indeces.
:param device: the device of the `torch.Tensor`
:param dtype: the data type of the `torch.Tensor`
:return: the driver mask vector.
"""
driver_mask = torch.zeros(n_nodes, device=device, dtype=dtype)
driver_mask[list(driver_nodes)] = 1
return driver_mask
| 5,339,629
|
def all_fermions(fields: List[Field]) -> bool:
"""Checks if all fields are fermions."""
boolean = True
for f in fields:
boolean = boolean and f.is_fermion
return boolean
| 5,339,630
|
def open_file(name):
"""
Return an open file object.
"""
return open(name, 'r')
| 5,339,631
|
def test_init(mocker, mock_nncli):
"""test nominal initialization"""
nn_obj = nncli.nncli.Nncli(False)
nn_obj.config.get_config.assert_called_once()
nn_obj.ndb.set_update_view.assert_called_once()
assert os.mkdir.call_count == 0
| 5,339,632
|
def _bit_length(n):
"""Return the number of bits necessary to store the number in binary."""
try:
return n.bit_length()
except AttributeError: # pragma: no cover (Python 2.6 only)
import math
return int(math.log(n, 2)) + 1
| 5,339,633
|
def read_dwd_percentile_old(filename):
"""
Read data from .txt file into Iris cube
:param str filename: file to process
:returns: cube
"""
# use header to hard code the final array shapes
longitudes = np.arange(-179.5, 180.5, 1.)
latitudes = np.arange(89.5, -90.5, -1.)
data = np.ma.zeros((latitudes.shape[0], longitudes.shape[0]))
# read in the dat
indata = np.genfromtxt(filename, dtype=(float))
this_lat = []
tl = 0
# process each row, append until have complete latitude band
for row in indata:
this_lat += [row]
if len(this_lat) == longitudes.shape[0]:
# copy into final array and reset
data[tl, :] = this_lat
tl += 1
this_lat = []
# mask the missing values
data = np.ma.masked_where(data <= -999.000, data)
cube = utils.make_iris_cube_2d(data, latitudes, longitudes, "R90p", "%")
return cube
| 5,339,634
|
def assert_allclose(actual: numpy.float64, desired: float, atol: float, err_msg: str):
"""
usage.scipy: 4
"""
...
| 5,339,635
|
def media_post():
"""API call to store new media on the BiBli"""
data = request.get_json()
fname = "%s/%s" % (MUSIC_DIR, data["name"])
with open(fname, "wb") as file:
file.write(base64.decodestring(data["b64"]))
audiofile = MP3(fname)
track = {"file": data["name"], "title": "", "artist": "?"}
tags = audiofile.tags
if tags:
track["artist"] = tags["artist"][0] if "artist" in tags else "?"
track["title"] = tags["title"][0] if "title" in tags else None
if audiofile.info:
seconds = int(audiofile.info.length)
minutes = seconds / 60
seconds = seconds % 60
track["duration"] = "%s:%02d" % (minutes, seconds)
# make sure there's a title
if not track["title"]:
track["title"] = fname.replace(".mp3", "")
return jsonify({"music": track})
| 5,339,636
|
def obtain_stores_path(options, ensure_existence=True) -> Path:
"""
Gets the store path if present in options or asks the user to input it
if not present between parsed_args.
:param options: the parsed arguments
:param ensure_existence: whether abort if the path does not exist
:return: the store path
"""
path = Path(get_option_or_default(options, Options.STORE_PATH, DEFAULT_SECRETS_PATH))
if ensure_existence and not path.exists():
abort(f"Error: path does not exist ({path})")
return path
| 5,339,637
|
def data(interface, obj, _):
"""Communicate the object data. """
kind = xc_type(obj)
if kind in ['C', 'I', 'N', 'R']:
interface.data(obj)
elif kind == 'W':
interface.trans_num(len(obj))
interface.trans_name(obj)
else:
COMMUNICATE_DUMP[type(obj)](data, interface, obj, "")
| 5,339,638
|
def extract_res_from_files(exp_dir_base):
"""Takes a directory (or directories) and searches recursively for
subdirs that have a test train and settings file
(meaning a complete experiment was conducted).
Returns:
A list of dictionaries where each element in the list
is an experiment and the dictionary has the following form:
data_dict = {"train_df": df1, "test_df":df2,
"settings":settings, "path": path}
"""
if isinstance(exp_dir_base, str):
exp_dirs = [exp_dir_base]
elif isinstance(exp_dir_base, list):
exp_dirs = exp_dir_base
else:
raise ValueError("exp_dir_base must be a string or a list")
TEST = "test.csv"
TRAIN = "train.csv"
SETTINGS = "settings.txt"
results = []
for exp_dir_base in exp_dirs:
for path, subdirs, files in os.walk(exp_dir_base):
test, train, settings = None, None, None
for name in files:
if fnmatch(name, TEST):
test = os.path.join(path, name)
elif fnmatch(name, TRAIN):
train = os.path.join(path, name)
elif fnmatch(name, SETTINGS):
settings = os.path.join(path, name)
if settings and not test and not train:
test, train = [], []
for name in files:
if fnmatch(name, "*test.csv"):
test.append(os.path.join(path, name))
elif fnmatch(name, "*train.csv"):
train.append(os.path.join(path, name))
if test and train and settings:
if isinstance(test, list):
dftest = []
for fp in test:
dftest.append(pd.read_csv(fp))
dftrain = []
for fp in train:
dftrain.append(pd.read_csv(fp))
else:
dftest = pd.read_csv(test)
dftrain = pd.read_csv(train)
with open(settings, "rb") as f:
settings = pickle.load(f)
model_data = get_model_specific_data(settings, path)
DA_data, mean_DF, last_df = get_DA_info(path, "mse_DA")
data_dict = {"train_df": dftrain,
"test_df":dftest,
"test_DA_df_final": last_df,
"DA_mean_DF": mean_DF,
"settings":settings,
"path": path,
"model_data": model_data,}
results.append(data_dict)
print("{} experiments conducted".format(len(results)))
sort_res = sorted(results, key = lambda x: x['path'])
return sort_res
| 5,339,639
|
def compute_roc(distrib_noise, distrib_signal):
"""compute ROC given the two distribributions
assuming the distributions are the output of np.histogram
example:
dist_l, _ = np.histogram(acts_l, bins=n_bins, range=histrange)
dist_r, _ = np.histogram(acts_r, bins=n_bins, range=histrange)
tprs, fprs = compute_roc(dist_l, dist_r)
Parameters
----------
distrib_noise : 1d array
the noise distribution
distrib_signal : 1d array
the noise+signal distribution
Returns
-------
1d array, 1d array
the roc curve: true positive rate, and false positive rate
"""
# assert len(distrib_noise) == len(distrib_signal)
# assert np.sum(distrib_noise) == np.sum(distrib_signal)
n_pts = len(distrib_noise)
tpr, fpr = np.zeros(n_pts), np.zeros(n_pts)
# slide the decision boundary from left to right
for b in range(n_pts):
fn, tp = np.sum(distrib_signal[:b]), np.sum(distrib_signal[b:])
tn, fp = np.sum(distrib_noise[:b]), np.sum(distrib_noise[b:])
# calculate TP rate and FP rate
tpr[b] = tp / (tp + fn)
fpr[b] = fp / (tn + fp)
return tpr, fpr
| 5,339,640
|
def main(args):
"""
Please run the train and test methods with the correct arguments to train and test the models.
We have both Machine learning models and Transformer models. Machine learning models support two pre-processing types,
one is `tfidf` and the other is `word embeddings`.
Use the following syntax to train and model for both pre-processings.
There is a limitation in using embedding processing for Naive Bayes model because it assumes data should not have
negative values but in our glove embeddings have negative values so there is not support of embedding pre-processing for Naive Bayes model.
train_ml_models(model,
train_data=train_data,
model_name=model_name,
pre_processing_fun=(tfidf_process|tokenize_and_transform),
pre_processing="tfidf|embedding")
Example
train_ml_models(naive_bayes, train_data, 'NaiveBayes', pre_processing_fun=tfidf_process, pre_processing='embedding')
"""
data = load_data()
train_data, test_data = create_test_set(data)
train_data = create_folds(train_data)
tfidf_process = TFIDFTokenize()
tfidf_process.fit(train_data[['description', 'title']], train_data["level"])
# Train sklearn models.
if args["model"] in sklearn_models.keys() and \
'pre_process' in args and \
args["pre_process"] == "tfidf":
train_ml_models(sklearn_models[args["model"]], train_data, args["model"],
pre_processing_fun=tfidf_process, pre_processing='tfidf')
elif args["model"] in sklearn_models.keys() and \
'pre_process' in args and \
args["pre_process"] == "embedding":
train_ml_models(sklearn_models[args["model"]], train_data, args["model"],
pre_processing_fun=tokenize_and_transform, pre_processing='embedding')
elif args["model"] in transformer_models.keys():
# Train Transformer models
if args["model"] == "roberta":
roberta_tokenizer = RobertaTokenizer.from_pretrained("roberta-base")
train_transformer(transformer_models[args["model"]],
train_data=train_data,
tokenizer=roberta_tokenizer,
learning_rate=1e-3, epochs=50,
model_name=args["model"])
elif args["model"] == "bert":
distilled_bert_tokenizer = DistilBertTokenizer.from_pretrained('distilbert-base-cased')
train_transformer(transformer_models[args["model"]],
train_data=train_data,
tokenizer=distilled_bert_tokenizer,
learning_rate=1e-3, epochs=50,
model_name=args["model"])
elif "tune" in args:
# Hyper parameter tuning
tune(XGBClassifier, train_data, tokenize_and_transform)
elif "test" in args:
test_model(args["model"],
test_data=test_data,
pre_processing_fun=tokenize_and_transform)
| 5,339,641
|
def pmcfg(cfg):
"""
prints out the core config file to the prompt with a nice stagger Look for
readability
Args:
cfg: dict of dict in a hierarchy of {section, {items, values}}
"""
for sec in cfg.keys():
print(repr(sec))
for item in cfg[sec].keys():
print('\t' + item)
obj = cfg[sec][item]
for att in ['type', 'options', 'default', 'description']:
value = getattr(obj, att)
print('\t\t' + att)
if isinstance(value, list):
out = ", ".join(value)
else:
out = value
print('\t\t\t' + repr(out))
| 5,339,642
|
def zoom(clip, screensize, show_full_height=False):
"""Zooms preferably image clip for clip duration a little
To make slideshow more movable
Parameters
---------
clip
ImageClip on which to work with duration
screensize
Wanted (width, height) tuple
show_full_height
Should this image be shown in full height. This is usefull when 4:3
images are shown in 16:9 video and need to be shown in full.
Otherwise they are shown in full width and top and bottom is cut off.
Returns
------
VideoClip in desired size
"""
#We need to resize high imageč differently
if clip.h > clip.w or show_full_height:
clip_resized = (clip.fx(resize, width=screensize[0]*2)
.fx(resize, lambda t : 1+0.02*t)
.set_position(('center', 'center'))
)
clip_composited = CompositeVideoClip([clip_resized]) \
.fx(resize, height=screensize[1])
else:
clip_resized = (clip.fx(resize, height=screensize[1]*2)
.fx(resize, lambda t : 1+0.02*t)
.set_position(('center', 'center'))
)
clip_composited = CompositeVideoClip([clip_resized]) \
.fx(resize, width=screensize[0])
vid = CompositeVideoClip([clip_composited.set_position(('center', 'center'))],
size=screensize)
return vid
| 5,339,643
|
def create(
host_address: str,
topics: typing.Sequence[str]) -> Subscriber:
"""
Create a subscriber.
:param host_address: The server notify_server address
:param topics: The topics to subscribe to.
:return: A Subscriber instance.
"""
return Subscriber(create_subscriber(host_address, topics))
| 5,339,644
|
def solution(n):
"""
Return the product of a,b,c which are Pythagorean Triplet that satisfies
the following:
1. a < b < c
2. a**2 + b**2 = c**2
3. a + b + c = 1000
>>> solution(1000)
31875000
"""
product = -1
d = 0
for a in range(1, n // 3):
"""Solving the two equations a**2+b**2=c**2 and a+b+c=N eliminating c
"""
b = (n * n - 2 * a * n) // (2 * n - 2 * a)
c = n - a - b
if c * c == (a * a + b * b):
d = a * b * c
if d >= product:
product = d
return product
| 5,339,645
|
def python2_binary():
"""Tries to find a python 2 executable."""
# Using [0] instead of .major here to support Python 2.6.
if sys.version_info[0] == 2:
return sys.executable or "python"
else:
return "python2"
| 5,339,646
|
def write_submission_csv(output_dict, blank_submission_file_path, destination_path):
"""
Write output dictionnary in a csv submission file
"""
print "Writing predictions to csv file.. ",
df = pandas.read_csv(blank_submission_file_path)
df["Class"] = df["Class"].astype("float")
for line in range(len(df)):
row_file_name = df.get_value(line, "File")
try:
df = df.set_value(line, "Class", output_dict[row_file_name])
except:
print "File name not in dictionary"
continue
df.to_csv(destination_path, index=False)
print "done."
| 5,339,647
|
def with_metaclass(meta, *bases):
"""A Python 2/3 compatible way of declaring a metaclass.
Taken from `Jinja 2 <https://github.com/mitsuhiko/jinja2/blob/master/jinja2
/_compat.py>`_ via `python-future <http://python-future.org>`_. License:
BSD.
Use it like this::
class MyClass(with_metaclass(MyMetaClass, BaseClass)):
pass
"""
class _Metaclass(meta):
"""Inner class"""
__call__ = type.__call__
__init__ = type.__init__
def __new__(cls, name, this_bases, attrs):
if this_bases is None:
return type.__new__(cls, name, (), attrs)
return meta(name, bases, attrs)
return _Metaclass(str('temporary_class'), None, {})
| 5,339,648
|
def energy(particles):
"""total kinetic energy up to a constant multiplier"""
return np.sum([particle.size ** 2 * np.linalg.norm(particle.speed) ** 2 for particle in particles])
| 5,339,649
|
def post_adaptation_non_linear_response_compression_matrix(P_2, a, b):
"""
Returns the post adaptation non linear response compression matrix.
Parameters
----------
P_2 : numeric or array_like
Point :math:`P_2`.
a : numeric or array_like
Opponent colour dimension :math:`a`.
b : numeric or array_like
Opponent colour dimension :math:`b`.
Returns
-------
ndarray
Points :math:`P`.
Examples
--------
>>> P_2 = 24.2372054671
>>> a = -0.000624112068243
>>> b = -0.000506270106773
>>> post_adaptation_non_linear_response_compression_matrix(P_2, a, b)
... # doctest: +ELLIPSIS
array([ 7.9463202..., 7.9471152..., 7.9489959...])
"""
P_2 = as_float_array(P_2)
a = as_float_array(a)
b = as_float_array(b)
R_a = (460 * P_2 + 451 * a + 288 * b) / 1403
G_a = (460 * P_2 - 891 * a - 261 * b) / 1403
B_a = (460 * P_2 - 220 * a - 6300 * b) / 1403
RGB_a = tstack([R_a, G_a, B_a])
return RGB_a
| 5,339,650
|
async def submissions_search(self, chan, src, msg):
"""
:name: redditsearch
:hook: cmd
:help: search for posts on reddit.
:args: subreddit:str keywords:list
:aliases: rds
"""
await self.msg(modname, chan, ["searching..."])
args = msg.split(" ", 1)
keywords = msg[1:]
sub = msg[0]
if sub.startswith("r/"):
sub = sub[2:]
posts = reddit.search_by_keywords(keywords, subreddit=sub)
if len(posts) == 0:
await self.msg(modname, chan, ["no results found"])
return
results = []
for result in posts:
# no nsfw please
if result["over_18"]:
continue
formatted = reddit.fmt_post_info(result)
results.append(formatted)
await self.msg(modname, chan, results)
| 5,339,651
|
def forward(network, x):
"""
入力信号を出力に変換する関数
Args:
network: ネットワークのDict
x: Inputの配列
Returns:
出力信号
"""
w1, w2, w3 = network['W1'], network['W2'], network['W3']
b1, b2, b3 = network['B1'], network['B2'], network['B3']
# 1層目
a1 = np.dot(x, w1) + b1
z1 = sigmoid(a1)
# 2層目
a2 = np.dot(z1, w2) + b2
z2 = sigmoid(a2)
# 3層目
a3 = np.dot(z2, w3) + b3
y = identity(a3)
return y
| 5,339,652
|
def join_with_and(words: List[str]) -> str:
"""Joins list of strings with "and" between the last two."""
if len(words) > 2:
return ", ".join(words[:-1]) + ", and " + words[-1]
elif len(words) == 2:
return " and ".join(words)
elif len(words) == 1:
return words[0]
else:
return ""
| 5,339,653
|
def condition_generator(single_sub_data, params_name, duration = 2):
"""Build a bunch to show the relationship between each onset and parameter
Build a bunch for make a design matrix for next analysis. This bunch is for describing the relationship
between each onset and parameter.
Args:
single_sub_data: A pandas DataFrame which contains data for one subject.
It must contains the information about run, onsets, and parameters.
params_name: A list of names of parameters which you want to analysis.
The order of the names will be inherited to the design matrix next.
duration: The duration of a TR.
Returns:
subject_info: A list of bunch type which can be resolve by SpecifySPMModel interface in nipype.
"""
from nipype.interfaces.base import Bunch
run_num = set(single_sub_data.run)
subject_info = []
for i in run_num:
tmp_table = single_sub_data[single_sub_data.run == i]
tmp_onset = tmp_table.onset.values.tolist()
pmod_names = []
pmod_params = []
pmod_poly = []
for param in params_name:
pmod_params.append(tmp_table[param].values.tolist())
pmod_names.append(param)
pmod_poly.append(1)
tmp_Bunch = Bunch(conditions=["trial_onset_run"+str(i)], onsets=[tmp_onset], durations=[[duration]],
pmod=[Bunch(name = pmod_names, poly = pmod_poly, param = pmod_params)])
subject_info.append(tmp_Bunch)
return subject_info
| 5,339,654
|
def create_pkg(
meta: Dict, fpaths: Iterable[_res_t], basepath: _path_t = "", infer=True
):
"""Create a datapackage from metadata and resources.
If ``resources`` point to files that exist, their schema are inferred and
added to the package. If ``basepath`` is a non empty string, it is treated
as the parent directory, and all resource file paths are checked relative
to it.
Parameters
----------
meta : Dict
A dictionary with package metadata.
fpaths : Iterable[Union[str, Path, Dict]]
An iterator over different resources. Resources are paths to files,
relative to ``basepath``.
basepath : str (default: empty string)
Directory where the package files are located
infer : bool (default: True)
Whether to infer resource schema
Returns
-------
Package
A datapackage with inferred schema for all the package resources
"""
# for an interesting discussion on type hints with unions, see:
# https://stackoverflow.com/q/60235477/289784
# TODO: filter out and handle non-tabular (custom) data
existing = glom(meta.get("resources", []), Iter("path").map(Path).all())
basepath = basepath if basepath else getattr(meta, "basepath", basepath)
pkg = Package(resolve_licenses(meta), basepath=str(basepath))
def keep(res: _path_t) -> bool:
if Path(res) in existing:
return False
full_path = Path(basepath) / res
if not full_path.exists():
logger.warning(f"{full_path}: skipped, doesn't exist")
return False
return True
for res in fpaths:
spec = res if isinstance(res, dict) else {"path": res}
if not keep(spec["path"]):
continue
# NOTE: noop when Resource
_res = resource_(spec, basepath=basepath, infer=infer)
pkg.add_resource(_res)
return _ensure_posix(pkg)
| 5,339,655
|
def one_hot(
encoding_size: int, mapping_fn: Callable[[Any], int] = None, dtype="bool"
) -> DatasetTransformFn:
"""Transform data into a one-hot encoded label.
Arguments:
encoding_size {int} -- The size of the encoding
mapping_fn {Callable[[Any], int]} -- A function transforming the input data to an integer label. If not specified, labels are automatically inferred from the data.
Returns:
DatasetTransformFn -- A function to be passed to the Dataset.transform()
"""
mem, maxcount = {}, -1
def auto_label(x: Any) -> int:
nonlocal mem, maxcount, encoding_size
h = hash(str(x))
if not h in mem.keys():
maxcount += 1
if maxcount >= encoding_size:
raise ValueError(
"More unique labels found than were specified by the encoding size ({} given)".format(
encoding_size
)
)
mem[h] = maxcount
return mem[h]
label_fn = mapping_fn or auto_label
def encode(x):
nonlocal encoding_size, dtype, label_fn
o = np.zeros(encoding_size, dtype=dtype)
o[label_fn(x)] = True
return o
return _dataset_element_transforming(fn=encode)
| 5,339,656
|
def test_event_manager_installed():
"""Test that EventManager is installed on the Flask app"""
app = create_ctfd()
assert type(app.events_manager) == EventManager
destroy_ctfd(app)
| 5,339,657
|
def test_hash_evoked():
"""Test evoked hashing
"""
ave = read_evokeds(fname, 0)
ave_2 = read_evokeds(fname, 0)
assert_equal(hash(ave), hash(ave_2))
# do NOT use assert_equal here, failing output is terrible
assert_true(pickle.dumps(ave) == pickle.dumps(ave_2))
ave_2.data[0, 0] -= 1
assert_not_equal(hash(ave), hash(ave_2))
| 5,339,658
|
def compute_perplexity(result):
"""Compute and add the perplexity to the LogReport.
:param dict result: The current observations
"""
# Routine to rewrite the result dictionary of LogReport to add perplexity values
result["perplexity"] = np.exp(result["main/nll"] / result["main/count"])
if "validation/main/nll" in result:
result["val_perplexity"] = np.exp(
result["validation/main/nll"] / result["validation/main/count"]
)
| 5,339,659
|
def is_url_relative(url):
"""
True if a URL is relative, False otherwise.
"""
return url[0] == "/" and url[1] != "/"
| 5,339,660
|
def distance_matrix(lats, lons):
"""Compute distance matrix using great-circle distance formula
https://en.wikipedia.org/wiki/Great-circle_distance#Formulae
Parameters
----------
lats : array
Latitudes
lons : array
Longitudes
Returns
-------
dists : matrix
Entry `(i, j)` shows the great-circle distance between
point `i` and `j`, i.e. distance between `(lats[i], lons[i])`
and `(lats[j], lons[j])`.
"""
R = 6372795.477598
lats = np.array(lats)
lons = np.array(lons)
assert len(lats) == len(lons), "lats and lons should be of the same size"
assert not any(np.isnan(lats)), "nan in lats"
assert not any(np.isnan(lons)), "nan in lons"
# convert degree to radian
lats = lats * np.pi / 180.0
lons = lons * np.pi / 180.0
sins = np.sin(lats)
sin_matrix = sins.reshape(-1, 1) @ sins.reshape(1, -1)
coss = np.cos(lats)
cos_matrix = coss.reshape(-1, 1) @ coss.reshape(1, -1)
lons_matrix = lons * np.ones((len(lons), len(lons)))
lons_diff = lons_matrix - lons_matrix.T
lons_diff = np.cos(lons_diff)
# TODO: make this function more efficient
dists = R * np.arccos(sin_matrix + cos_matrix * lons_diff)
dists[np.isnan(dists)] = 0
return dists
| 5,339,661
|
def _split_link_ends(link_ends):
"""
Examples
--------
>>> from landlab.grid.unstructured.links import _split_link_ends
>>> _split_link_ends(((0, 1, 2), (3, 4, 5)))
(array([0, 1, 2]), array([3, 4, 5]))
>>> _split_link_ends([(0, 3), (1, 4), (2, 5)])
(array([0, 1, 2]), array([3, 4, 5]))
>>> _split_link_ends((0, 3))
(array([0]), array([3]))
"""
links = np.array(list(link_ends), ndmin=2, dtype=np.int)
if len(links) != 2:
links = links.transpose()
if links.size == 0:
return (np.array([], dtype=np.int), np.array([], dtype=np.int))
else:
return links[0], links[1]
| 5,339,662
|
def test_inpschema_dict(data_regression, schema_version):
"""
Test the produced inputschema dicts
"""
from masci_tools.io.parsers.fleur.fleur_schema import InputSchemaDict
inputschema = InputSchemaDict.fromVersion(version=schema_version)
data_regression.check(clean_for_reg_dump(inputschema.get_unlocked()))
| 5,339,663
|
def dijkstra(vertex_count: int, source: int, edges):
"""Uses Dijkstra's algorithm to find the shortest path in a graph.
Args:
vertex_count: The number of vertices.
source : Vertex number (0-indexed).
edges : List of (cost, edge) (0-indexed).
Returns:
costs : List of the shortest distance.
parents: List of parent vertices.
Landau notation: O(|Edges|log|Vertices|).
See:
https://atcoder.jp/contests/abc191/submissions/19964078
https://atcoder.jp/contests/abc191/submissions/19966232
"""
from heapq import heappop, heappush
hq = [(0, source)] # weight, vertex number (0-indexed)
costs = [float("inf") for _ in range(vertex_count)]
costs[source] = 0
pending = -1
parents = [pending for _ in range(vertex_count)]
while hq:
cost, vertex = heappop(hq)
if cost > costs[vertex]:
continue
for weight, edge in edges[vertex]:
new_cost = cost + weight
if new_cost < costs[edge]:
costs[edge] = new_cost
parents[edge] = vertex
heappush(hq, (new_cost, edge))
return costs, parents
| 5,339,664
|
def save_canvas_images(images, names):
"""
Saves the canvas images
"""
for image, name in zip(images, names):
cv2.imwrite(name, image)
| 5,339,665
|
def save_picture(form_picture):
"""
function for saving the path to the profile picture
"""
app = create_app(config_name=os.getenv('APP_SETTINGS'))
# random hex to be usedin storing the file name to avoid clashes
random_hex = secrets.token_hex(8)
# split method for splitting the filename from the file extension
_, pic_ext = os.path.split(form_picture.filename)
# pic_fn = picture filename which is a concatanation of the filename(hex name) and file extension
pic_fn = random_hex + pic_ext
# path to picture from the root to the profile_pics folder
pic_path = os.path.join(app.root_path, 'static/profile_pics', pic_fn)
output_size = (512, 512)
img = Image.open(form_picture)
img.thumbnail(output_size)
img.save(pic_path) # save the picture path to the file system
return pic_fn
| 5,339,666
|
def plot_labels(labels, lattice=None, coords_are_cartesian=False, ax=None, **kwargs):
"""
Adds labels to a matplotlib Axes
Args:
labels: dict containing the label as a key and the coordinates as value.
lattice: Lattice object used to convert from reciprocal to Cartesian coordinates
coords_are_cartesian: Set to True if you are providing.
coordinates in Cartesian coordinates. Defaults to False.
Requires lattice if False.
ax: matplotlib :class:`Axes` or None if a new figure should be created.
kwargs: kwargs passed to the matplotlib function 'text'. Color defaults to blue
and size to 25.
Returns:
matplotlib figure and matplotlib ax
"""
ax, fig, plt = get_ax3d_fig_plt(ax)
if "color" not in kwargs:
kwargs["color"] = "b"
if "size" not in kwargs:
kwargs["size"] = 25
for k, coords in labels.items():
label = k
if k.startswith("\\") or k.find("_") != -1:
label = "$" + k + "$"
off = 0.01
if coords_are_cartesian:
coords = np.array(coords)
else:
if lattice is None:
raise ValueError("coords_are_cartesian False requires the lattice")
coords = lattice.get_cartesian_coords(coords)
ax.text(*(coords + off), s=label, **kwargs)
return fig, ax
| 5,339,667
|
def test_nov_sample_problem():
"""Test the sample problem given in the Novendstern correlation paper;
parameters are all calculated and shown in the paper, just using them
to demonstrate that I get the same result with the implemented corr."""
# Dummy class to mock DASSH Coolant, Subchannel, RegionRodded objects
class Dummy(object):
def __init__(self, **kwargs):
for k in kwargs.keys():
setattr(self, k, kwargs[k])
# Dummy Coolant object
coolant_properties = {
'viscosity': 0.677 * 0.00041337887, # lb/hrft --> kg/m-s
'density': 53.5 * 16.0185} # lb/ft3 --> kg/m3
coolant = Dummy(**coolant_properties)
# Dummy Subchannel object
subchannel = Dummy(**{'n_sc': {'coolant': {'interior': 384,
'edge': 48,
'corner': 6,
'total': 438}}})
# Dummy Region object
fftf = {
'n_ring': 9,
'n_pin': 217,
'duct_ftf': [[4.335 * 2.54 / 100, 4.835 * 2.54 / 100]],
'pin_diameter': 0.23 * 2.54 / 100,
'pin_pitch': 0.2879 * 2.54 / 100,
'wire_diameter': 0.056 * 2.54 / 100,
'wire_pitch': 12 * 2.54 / 100,
'coolant': coolant,
'subchannel': subchannel,
'params': {'area': np.array([0.0139 * 2.54 * 2.54 / 100 / 100,
0.0278 * 2.54 * 2.54 / 100 / 100,
0.0099 * 2.54 * 2.54 / 100 / 100]),
'de': np.array([0.124 * 2.54 / 100,
0.151 * 2.54 / 100,
0.114 * 2.54 / 100])},
'bundle_params': {'area': 6.724 * 2.54 * 2.54 / 100 / 100,
'de': 0.128 * 2.54 / 100},
'int_flow_rate': 183000 * 0.000125998 # lb/hr --> kg/s
}
asm = Dummy(**fftf)
# Calculate the necessary coolant flow parameters: velocity, Re; then
# assign to the dummy assembly
v_tot = asm.int_flow_rate / asm.coolant.density / asm.bundle_params['area']
Re = (asm.coolant.density
* v_tot
* asm.bundle_params['de']
/ asm.coolant.viscosity)
asm.coolant_int_params = {'Re': Re, 'vel': v_tot}
# Calculate friction factor, use to determine pressure drop / L
ff = dassh.correlations.friction_nov.calculate_bundle_friction_factor(asm)
dp = ff * asm.coolant.density * v_tot**2 / 2 / asm.bundle_params['de']
ans = 4.64 * 6894.76 / 0.3048
diff = ans - dp
rel_diff = diff / ans
assert rel_diff < 0.002
| 5,339,668
|
def contains(poly0, poly1):
""" Does poly0 contain poly1?
As an initial implementation, returns True if any vertex of poly1 is within
poly0.
"""
# check for bounding box overlap
bb0 = (min(p[0] for p in poly0), min(p[1] for p in poly0),
max(p[0] for p in poly0), max(p[1] for p in poly0))
bb1 = (min(p[0] for p in poly1), min(p[1] for p in poly1),
max(p[0] for p in poly1), max(p[1] for p in poly1))
if ((bb0[0] > bb1[2])
or (bb0[2] < bb1[0])
or (bb0[1] > bb1[3])
or (bb0[3] < bb1[1])):
return False
# check each vertex
def _isleft(p, p0, p1):
return ((p1[0]-p0[0])*(p[1]-p0[1]) - (p[0]-p0[0])*(p1[1]-p0[1])) > 0
for p in poly1:
wn = 0
for i in range(len(poly0)-1):
p0 = poly0[i]
p1 = poly0[i+1]
if p0[1] <= p[1] < p1[1]: # upward crossing
if _isleft(p, p0, p1):
wn += 1
elif p0[1] >= p[1] > p1[1]:
if not _isleft(p, p0, p1):
wn -= 1
if wn != 0:
return True
return False
| 5,339,669
|
def get_ids_in_annotations(scene, frame, quality):
"""
Returns a set of all ids found in annotations.
"""
annotations_path = os.path.join(scene, '%sPose3d_stage1' % quality,
'body3DScene_%s.json' % frame)
if not os.path.exists(annotations_path):
return set()
with open(annotations_path, 'r') as f:
annots = json.load(f)
return set([b['id'] for b in annots['bodies']])
| 5,339,670
|
def new_halberd(game_state):
"""
A composite component representing a Sword item.
"""
c = Composite()
set_item_components(c, game_state)
set_melee_weapon_component(c)
c.set_child(Description("Halberd",
"A long stick with a with an axe-head at one end."
"It's a useful weapon when you want to keep danger at bay."))
c.set_child(GraphicChar(None, colors.GRAY, icon.HALBERD))
c.set_child(DataPoint(DataTypes.WEIGHT, 8))
c.set_child(accuracy_item_stat(10))
c.set_child(damage_item_stat(1, 5))
c.set_child(CritChanceBonusEffect(0.1))
c.set_child(crit_multiplier_item_stat(2))
c.set_child(DefenciveAttackEffect(0.75))
c.set_child(OffenciveAttackEffect(0.20))
return c
| 5,339,671
|
def get_regional_services(service_list: List[AWSService] = None) -> List[AWSService]:
"""List all services which are tied to specific regions."""
services = service_list or get_services()
return [s for s in services if s.is_regional]
| 5,339,672
|
def request_master(msys, mode=MasterMode.NORMAL,
timeout=CONF.pypowervm_job_request_timeout):
"""Request master mode for the provided Managed System.
:param msys: Managed System wrapper requesting master mode
:param mode: The requested master mode type.
There are 2 options:
MasterMode.NORMAL ("norm"): default
MasterMode.TEMP ("temp"): when released, the original
master is immediately restored
:param timeout: maximum number of seconds for job to complete
"""
resp = msys.adapter.read(ms.System.schema_type, msys.uuid,
suffix_type=c.SUFFIX_TYPE_DO,
suffix_parm=_SUFFIX_PARM_REQUEST_MASTER)
job_wrapper = job.Job.wrap(resp.entry)
job_parms = [job_wrapper.create_job_parameter(CO_MGMT_MASTER_STATUS,
mode)]
job_wrapper.run_job(msys.uuid, job_parms=job_parms, timeout=timeout)
| 5,339,673
|
def _ReapUntilProcessExits(monitored_pid):
"""Reap processes until |monitored_pid| exits, then return its exit status.
This will also reap any other processes ready to be reaped immediately after
|monitored_pid| is reaped.
"""
pid_status = None
options = 0
while True:
try:
(pid, status, _) = os.wait3(options)
# Capture status of monitored_pid so we can return it.
if pid == monitored_pid:
pid_status = status
# Switch to nohang so we can churn through the zombies w/out getting
# stuck on live orphaned processes.
options = os.WNOHANG
# There may be some more child processes still running, but none of them
# have exited/finished. Don't wait for those as we'll throw an error in
# the caller.
if pid_status is not None and pid == 0 and status == 0:
break
except OSError as e:
if e.errno == errno.ECHILD:
break
elif e.errno != errno.EINTR:
raise
return pid_status
| 5,339,674
|
def lookup_entry(override_values):
"""Retrieves Data Catalog entry for the given Google Cloud Platform resource."""
# [START datacatalog_lookup_dataset]
# [START data_catalog_lookup_entry]
from google.cloud import datacatalog_v1
datacatalog = datacatalog_v1.DataCatalogClient()
bigquery_project_id = "my_bigquery_project"
dataset_id = "my_dataset"
table_id = "my_table"
pubsub_project_id = "my_pubsub_project"
topic_id = "my_topic"
# [END data_catalog_lookup_entry]
# To facilitate testing, we replace values with alternatives
# provided by the testing harness.
bigquery_project_id = override_values.get(
"bigquery_project_id", bigquery_project_id
)
dataset_id = override_values.get("dataset_id", dataset_id)
table_id = override_values.get("table_id", table_id)
pubsub_project_id = override_values.get("pubsub_project_id", pubsub_project_id)
topic_id = override_values.get("topic_id", topic_id)
# [START data_catalog_lookup_entry]
# BigQuery Dataset via linked_resource
resource_name = f"//bigquery.googleapis.com/projects/{bigquery_project_id}/datasets/{dataset_id}"
entry = datacatalog.lookup_entry(request={"linked_resource": resource_name})
print(
f"Retrieved entry {entry.name} for BigQuery Dataset resource {entry.linked_resource}"
)
# BigQuery Dataset via sql_resource
sql_resource = f"bigquery.dataset.`{bigquery_project_id}`.`{dataset_id}`"
entry = datacatalog.lookup_entry(request={"sql_resource": sql_resource})
print(
f"Retrieved entry {entry.name} for BigQuery Dataset resource {entry.linked_resource}"
)
# BigQuery Table via linked_resource
resource_name = (
f"//bigquery.googleapis.com/projects/{bigquery_project_id}/datasets/{dataset_id}"
f"/tables/{table_id}"
)
entry = datacatalog.lookup_entry(request={"linked_resource": resource_name})
print(f"Retrieved entry {entry.name} for BigQuery Table {entry.linked_resource}")
# BigQuery Table via sql_resource
sql_resource = f"bigquery.table.`{bigquery_project_id}`.`{dataset_id}`.`{table_id}`"
entry = datacatalog.lookup_entry(request={"sql_resource": sql_resource})
print(
f"Retrieved entry {entry.name} for BigQuery Table resource {entry.linked_resource}"
)
# Pub/Sub Topic via linked_resource
resource_name = (
f"//pubsub.googleapis.com/projects/{pubsub_project_id}/topics/{topic_id}"
)
entry = datacatalog.lookup_entry(request={"linked_resource": resource_name})
print(
f"Retrieved entry {entry.name} for Pub/Sub Topic resource {entry.linked_resource}"
)
# Pub/Sub Topic via sql_resource
sql_resource = f"pubsub.topic.`{pubsub_project_id}`.`{topic_id}`"
entry = datacatalog.lookup_entry(request={"sql_resource": sql_resource})
print(
f"Retrieved entry {entry.name} for Pub/Sub Topic resource {entry.linked_resource}"
)
# [END data_catalog_lookup_entry]
# [END datacatalog_lookup_dataset]
| 5,339,675
|
def f_setup_config(v_config_filename):
"""This function read the configuration file"""
df_conf_file = pd.read_csv(v_config_filename, delimiter="|", header=0)
api_key = df_conf_file[df_conf_file.CONFIG_VAR == 'API_KEY']['VALUE'].values[0]
data_dir = df_conf_file[df_conf_file.CONFIG_VAR == 'DATA_DIR']['VALUE'].values[0]
json_log_dir = df_conf_file[df_conf_file.CONFIG_VAR == 'JSON_DIR']['VALUE'].values[0]
gcs_bucket = df_conf_file[df_conf_file.CONFIG_VAR == 'GCP_BUCKET']['VALUE'].values[0]
# gcs_service_account_key = df_conf_file[df_conf_file.CONFIG_VAR == 'GCP_SERVICE_ACOUNT_KEY']['VALUE'].values[0]
# aws_key = df_conf_file[df_conf_file.CONFIG_VAR == 'AWS_ACCESS_KEY']['VALUE'].values[0]
# aws_secret_key = df_conf_file[df_conf_file.CONFIG_VAR == 'AWS_SECRET_ASSES_KEY']['VALUE'].values[0]
aws_s3 = df_conf_file[df_conf_file.CONFIG_VAR == 'AWS_S3_BUCKET']['VALUE'].values[0]
export_csv = df_conf_file[df_conf_file.CONFIG_VAR == 'EXPORT_CSV']['VALUE'].values[0]
cleanup_days = df_conf_file[df_conf_file.CONFIG_VAR == 'CLEANUP_DAYS']['VALUE'].values[0]
# return api_key, gcs_bucket, gcs_service_account_key, aws_key, aws_secret_key, \
# aws_s3, data_dir, json_log_dir, export_csv, cleanup_days
return api_key, gcs_bucket, aws_s3, data_dir, json_log_dir, export_csv, cleanup_days
| 5,339,676
|
def choose_username(email):
"""
Chooses a unique username for the provided user.
Sets the username to the email parameter umodified if
possible, otherwise adds a numerical suffix to the email.
"""
def get_suffix(number):
return "" if number == 1 else "_"+str(number).zfill(3)
user_model = get_user_model()
num = 1
while user_model.objects.filter(username=email+get_suffix(num)).exists():
num += 1
return email + get_suffix(num)
| 5,339,677
|
def lms2rgb(image):
"""
Convert an array of pixels from the LMS colorspace to the RGB colorspace. This function assumes that
each pixel in an array of LMS values.
:param image: An np.ndarray containing the image data
:return: An np.ndarray containing the transformed image data
"""
return np.clip(apply_matrix_to_image(lms_matrix_inverse, image), 0.0, 1.0)
| 5,339,678
|
def __apply_to_property_set (f, property_set):
""" Transform property_set by applying f to each component property.
"""
properties = feature.split (property_set)
return '/'.join (f (properties))
| 5,339,679
|
def make_boxplot(ratios, facecolor, position):
"""Add box and whisker plot of the 300 overconfidence ratios.
Args:
ratios ([300] numpy array): overconfidence ratios on
300 points in middle segment of diagonal line.
facecolor (string): color of boxplot bar
position (float): horizontal location of boxplot
"""
boxplot = plt.boxplot(ratios,
whis=[0, 100], # whiskers at min and max of data
positions=[position],
patch_artist=True)
boxplot['boxes'][0].set_facecolor(facecolor)
boxplot['medians'][0].set_color('k')
| 5,339,680
|
def _zip_index(job_context: Dict) -> Dict:
"""Zips the index directory into a single .tar.gz file.
This makes uploading and retrieving the index easier since it will
only be a single file along with compressing the size of the file
during storage.
"""
temp_post_path = job_context["gtf_file"].get_temp_post_path(job_context["job_dir_prefix"])
try:
with tarfile.open(temp_post_path, "w:gz") as tar:
tar.add(job_context["output_dir"],
arcname=os.path.basename(job_context["output_dir"]))
except:
logger.exception("Exception caught while zipping index directory %s",
temp_post_path,
processor_job=job_context["job_id"],
batch=job_context["batches"][0].id)
job_context["gtf_file"].remove_temp_directory(job_context["job_dir_prefix"])
failure_template = "Exception caught while zipping index directory {}"
job_context["job"].failure_reason = failure_template.format(temp_post_path)
job_context["success"] = False
return job_context
job_context["files_to_upload"] = [job_context["gtf_file"]]
job_context["success"] = True
return job_context
| 5,339,681
|
def _return_feature_statistics(feature_number: int, feature_value: float, names: list):
"""
Arguments:
feature_number (int) -- number of the feature
feature_value (float) -- value of the feature (used to compute color)
names (list) -- list of feature names
Returns:
"""
percentile_score = int(
stats.percentileofscore(TRAIN_DATA.T[feature_number], feature_value)
)
color = matplotlib.colors.to_hex(MAPPABLE.to_rgba(percentile_score))
# ToDo: Maybe not only return the category but also the color which we used in the article
return percentile_score, color, feature_cat_dict[names[feature_number]]
| 5,339,682
|
def calc_eta_FC(Q_load_W, Q_design_W, phi_threshold, approach_call):
"""
Efficiency for operation of a SOFC (based on LHV of NG) including all auxiliary losses
Valid for Q_load in range of 1-10 [kW_el]
Modeled after:
- **Approach A (NREL Approach)**:
http://energy.gov/eere/fuelcells/distributedstationary-fuel-cell-systems
and
NREL : p.5 of [M. Zolot et al., 2004]_
- **Approach B (Empiric Approach)**: [Iain Staffell]_
:type Q_load_W : float
:param Q_load_W: Load at each time step
:type Q_design_W : float
:param Q_design_W: Design Load of FC
:type phi_threshold : float
:param phi_threshold: where Maximum Efficiency is reached, used for Approach A
:type approach_call : string
:param appraoch_call: choose "A" or "B": A = NREL-Approach, B = Empiric Approach
:rtype eta_el : float
:returns eta_el: electric efficiency of FC (Lower Heating Value), in abs. numbers
:rtype Q_fuel : float
:returns Q_fuel: Heat demand from fuel (in Watt)
..[M. Zolot et al., 2004] M. Zolot et al., Analysis of Fuel Cell Hybridization and Implications for Energy Storage
Devices, NREL, 4th International Advanced Automotive Battery.
http://www.nrel.gov/vehiclesandfuels/energystorage/pdfs/36169.pdf
..[Iain Staffell, 2009] Iain Staffell, For Domestic Heat and Power: Are They Worth It?, PhD Thesis, Birmingham:
University of Birmingham. http://etheses.bham.ac.uk/641/1/Staffell10PhD.pdf
"""
phi = 0.0
## Approach A - NREL Approach
if approach_call == "A":
phi = float(Q_load_W) / float(Q_design_W)
eta_max = 0.425 # from energy.gov
if phi >= phi_threshold: # from NREL-Shape
eta_el = eta_max - ((1 / 6.0 * eta_max) / (1.0 - phi_threshold)) * abs(phi - phi_threshold)
if phi < phi_threshold:
if phi <= 118 / 520.0 * phi_threshold:
eta_el = eta_max * 2 / 3 * (phi / (phi_threshold * 118 / 520.0))
if phi < 0.5 * phi_threshold and phi >= 118 / 520.0 * phi_threshold:
eta_el = eta_max * 2 / 3.0 + \
eta_max * 0.25 * (phi - phi_threshold * 118 / 520.0) / (phi_threshold * (0.5 - 118 / 520.0))
if phi > 0.5 * phi_threshold and phi < phi_threshold:
eta_el = eta_max * (2 / 3.0 + 0.25) + \
1 / 12.0 * eta_max * (phi - phi_threshold * 0.5) / (phi_threshold * (1 - 0.5))
eta_therm_max = 0.45 # constant, after energy.gov
if phi < phi_threshold:
eta_therm = 0.5 * eta_therm_max * (phi / phi_threshold)
else:
eta_therm = 0.5 * eta_therm_max * (1 + eta_therm_max * ((phi - phi_threshold) / (1 - phi_threshold)))
## Approach B - Empiric Approach
if approach_call == "B":
if Q_design_W > 0:
phi = float(Q_load_W) / float(Q_design_W)
else:
phi = 0
eta_el_max = 0.39
eta_therm_max = 0.58 # * 1.11 as this source gives eff. of HHV
eta_el_score = -0.220 + 5.277 * phi - 9.127 * phi ** 2 + 7.172 * phi ** 3 - 2.103 * phi ** 4
eta_therm_score = 0.9 - 0.07 * phi + 0.17 * phi ** 2
eta_el = eta_el_max * eta_el_score
eta_therm = eta_therm_max * eta_therm_score
if phi < 0.2:
eta_el = 0
return eta_el, eta_therm
| 5,339,683
|
def test_storage_profiling():
"""
This test tests the saving and loading of profiles
into HDF5 through pypesto.store.ProfileResultHDF5Writer
and pypesto.store.ProfileResultHDF5Reader. Tests all entries
aside from times and message.
"""
objective = pypesto.Objective(
fun=so.rosen, grad=so.rosen_der, hess=so.rosen_hess
)
dim_full = 10
lb = -5 * np.ones((dim_full, 1))
ub = 5 * np.ones((dim_full, 1))
n_starts = 5
startpoints = pypesto.startpoint.latin_hypercube(
n_starts=n_starts, lb=lb, ub=ub
)
problem = pypesto.Problem(
objective=objective, lb=lb, ub=ub, x_guesses=startpoints
)
optimizer = optimize.ScipyOptimizer()
result_optimization = optimize.minimize(
problem=problem,
optimizer=optimizer,
n_starts=n_starts,
filename=None,
progress_bar=False,
)
profile_original = profile.parameter_profile(
problem=problem,
result=result_optimization,
profile_index=[0],
optimizer=optimizer,
filename=None,
progress_bar=False,
)
fn = 'test_file.hdf5'
try:
pypesto_profile_writer = ProfileResultHDF5Writer(fn)
pypesto_profile_writer.write(profile_original)
pypesto_profile_reader = ProfileResultHDF5Reader(fn)
profile_read = pypesto_profile_reader.read()
for key in profile_original.profile_result.list[0][0].keys():
if (
profile_original.profile_result.list[0][0].keys is None
or key == 'time_path'
):
continue
elif isinstance(
profile_original.profile_result.list[0][0][key], np.ndarray
):
np.testing.assert_array_equal(
profile_original.profile_result.list[0][0][key],
profile_read.profile_result.list[0][0][key],
)
elif isinstance(
profile_original.profile_result.list[0][0][key], int
):
assert (
profile_original.profile_result.list[0][0][key]
== profile_read.profile_result.list[0][0][key]
)
finally:
if os.path.exists(fn):
os.remove(fn)
| 5,339,684
|
def erdos_renyi(
num_genes: int, prob_conn: float, spec_rad: float = 0.8
) -> Tuple[np.ndarray, float]:
"""Initialize an Erdos Renyi network as in Sun–Taylor–Bollt 2015.
If the spectral radius is positive, the matrix is normalized
to a spectral radius of spec_rad and the scale shows the
normalization. If the spectral radius is zero, the returned
matrix will have entries of 0, 1, and -1, and the scale is set
to zero.
Args:
num_genes: Number of genes/nodes.
prob_conn: Probability of connection.
spec_rad: The desired spectral radius.
Returns:
Adjacency matrix and its scale.
"""
signed_edges = erdos_renyi_ternary(num_genes, prob_conn)
return scale_by_spec_rad(signed_edges, spec_rad)
| 5,339,685
|
def new_window(window_name):
"""Create a new window in a byobu session and give it a name.
Parameters
----------
window_name : str
Name for the new byobu window.
"""
subprocess.call(f"byobu new-window", shell=True)
subprocess.call(f"byobu rename-window '{window_name}'", shell=True)
return
| 5,339,686
|
def IMG_LoadTextureTyped_RW(renderer, src, freesrc, type):
"""Loads an image file from a file object to a texture as a specific format.
This function allows you to explicitly specify the format type of the image
to load. The different possible format strings are listed in the
documentation for :func:`IMG_LoadTyped_RW`.
See :func:`IMG_LoadTexture` for more information.
Args:
renderer (:obj:`SDL_Renderer`): The SDL rendering context with which to
create the texture.
src (:obj:`SDL_RWops`): The file object from which to load the image.
freesrc (int): If non-zero, the input file object will be closed and
freed after it has been read.
type (bytes): A bytestring indicating the image format with which the
file object should be loaded.
Returns:
POINTER(:obj:`SDL_Texture`): A pointer to the new texture containing
the image, or a null pointer if there was an error.
"""
return _ctypes["IMG_LoadTextureTyped_RW"](renderer, src, freesrc, type)
| 5,339,687
|
def process_one_email(q, count, id_val, dt, email):
""" Submit an email address to the Full Contact Person API and process
the responses
Process the response object based on the return status code
Parameters
----------
q : an instance of a Priority Queue
count : the count from the original placement in the queue
id_valm : id associated with email address
dt : datetime when the id was created
email : email address
Returns
-------
null
"""
# import global
from fc import (OUT_DIR,
RETRY_TIME)
dt = dt.split()[0]
logger.info(('Post | email: {_email} id: {_id}'
' | {_email} posted to the Full Contact Person API')
.format(_email=email, _id=id_val))
# blocking operation - not to worry as each request is
# its own thread
r = query_person('email', email)
# log results
# if status code is not in 200, 202, 404 then the
# header values are not available
if r.status_code in (200, 202, 404):
post_msg = ('Return | email: {_email} id: {_id}'
' | return status code: {_status}'
' | datetime: {_dt}'
' | rate limit: {_rl} calls / 60 seconds'
' | rate limit remaining: '
'{_rlrem} calls / {_rlres} seconds')
post_msg = post_msg.format(_email=email,
_id=id_val,
_status=r.status_code,
_dt=r.headers['Date'],
_rl=r.headers['X-Rate-Limit-Limit'],
_rlrem=r.headers['X-Rate-Limit-Remaining'],
_rlres=r.headers['X-Rate-Limit-Reset'])
else:
post_msg = ('Return | email: {_email} id: {_id}'
' | return status code: {_status}')
post_msg = post_msg.format(_email=email,
_id=id_val,
_status=r.status_code)
logger.info(post_msg)
out_file = join(OUT_DIR,
'{_dt}_{_id}.json'.format(_dt=dt,
_id=id_val))
logging_desc = ('Results | email: {_email} id: {_id}'
' | status {_status}')
logging_desc = logging_desc.format(_email=email,
_id=id_val,
_status=r.status_code)
# process responses
if r.status_code == 200:
logging_desc += ' | success | writing to {_dt}_{_id}.json'
logging_desc = \
logging_desc.format(_dt=dt, _id=id)
logger.info(logging_desc)
utils.write_json(r.json(), out_file)
elif r.status_code == 202:
logging_desc += (' | request is being processed'
' | adding email: {_email} id: {_id}'
' back to the queue and waiting {_retry}'
' seconds before resubmitting')
logging_desc = logging_desc.format(_email=email,
_id=id_val,
_retry=RETRY_TIME)
logger.info(logging_desc)
utils.write_json(r.json(), out_file)
# adding back to the queue
execute_time = time.time() + RETRY_TIME
q.put((execute_time, count, id_val, dt, email))
elif r.status_code == 400:
logging_desc += ' | bad / malformed request'
logger.info(logging_desc)
utils.write_json(r.json(), out_file)
elif r.status_code == 403:
logging_desc += (' | forbidden'
' | api key is invalid, missing, or exceeded quota')
logger.info(logging_desc)
utils.write_json(r.json(), out_file)
elif r.status_code == 404:
logging_desc += (' | not found'
' | person searched in the past 24 hours'
' and nothing was found')
logger.info(logging_desc)
utils.write_json(r.json(), out_file)
elif r.status_code == 405:
logging_desc += (' | method not allowed'
' | queried the API with an unsupported HTTP method')
logger.info(logging_desc)
utils.write_json(r.json(), out_file)
elif r.status_code == 410:
logging_desc += ' | gone | the resource cannot be found'
logger.info(logging_desc)
utils.write_json(r.json(), out_file)
elif r.status_code == 422:
logging_desc += ' | invalid ==> invalid or missing API query parameter'
logger.info(logging_desc)
utils.write_json(r.json(), out_file)
elif r.status_code == 500:
logging_desc += (' | internal server error'
' | an unexpected error at Full Contact; please contact'
'support@fullcontact.com')
logger.info(logging_desc)
utils.write_json(r.json(), out_file)
elif r.status_code == 503:
logging_desc += (' | service temporarily down'
' | check the Retry-After header')
logger.info(logging_desc)
utils.write_json(r.json(), out_file)
| 5,339,688
|
def addOverride(cls, override):
"""Override the serializer to use 'override' as the identifier for instances of 'cls'
This is primarily to shorted the amount of data in the representation and to allow the
representation to remain constant even if classes are moving around or changing names.
override may not be a tuple
"""
assert cls not in locationTypeOverrides_
assert not isinstance(override, tuple)
locationTypeOverrides_[cls] = override
locationTypes_[override] = cls
| 5,339,689
|
def soft_l1(z: np.ndarray, f_scale):
"""
rho(z) = 2 * ((1 + z)**0.5 - 1)
The smooth approximation of l1 (absolute value) loss. Usually a good choice for robust least squares.
:param z: z = f(x)**2
:param f_scale: rho_(f**2) = C**2 * rho(f**2 / C**2), where C is f_scale
:return:
"""
loss = np.empty((3, z.shape[0]), dtype=np.float64)
c2 = f_scale * f_scale
ic2 = 1.0 / c2
z = ic2 * z
sqrt_1pz = np.sqrt(z + 1)
loss[0, :] = c2 * 2 * (sqrt_1pz - 1)
loss[1, :] = 1 / sqrt_1pz
loss[2, :] = -ic2 * 0.5 * np.power(loss[1, :], 3)
return loss
| 5,339,690
|
def calculate_hit_box_points_detailed(image: Image, hit_box_detail: float = 4.5):
"""
Given an image, this returns points that make up a hit box around it. Attempts
to trim out transparent pixels.
:param Image image: Image get hit box from.
:param int hit_box_detail: How detailed to make the hit box. There's a
trade-off in number of points vs. accuracy.
:Returns: List of points
"""
def sample_func(sample_point):
""" Method used to sample image. """
if sample_point[0] < 0 \
or sample_point[1] < 0 \
or sample_point[0] >= image.width \
or sample_point[1] >= image.height:
return 0
point_tuple = sample_point[0], sample_point[1]
color = image.getpixel(point_tuple)
if color[3] > 0:
return 255
else:
return 0
# Do a quick check if it is a full tile
p1 = 0, 0
p2 = 0, image.height - 1
p3 = image.width - 1, image.height - 1
p4 = image.width - 1, 0
if sample_func(p1) and sample_func(p2) and sample_func(p3) and sample_func(p4):
# Do a quick check if it is a full tile
p1 = (-image.width / 2, -image.height / 2)
p2 = (image.width / 2, -image.height / 2)
p3 = (image.width / 2, image.height / 2)
p4 = (-image.width / 2, image.height / 2)
return p1, p2, p3, p4
# Get the bounding box
logo_bb = pymunk.BB(-1, -1, image.width, image.height)
# Set of lines that trace the image
line_set = pymunk.autogeometry.PolylineSet()
# How often to sample?
downres = 1
horizontal_samples = int(image.width / downres)
vertical_samples = int(image.height / downres)
# Run the trace
# Get back one or more sets of lines covering stuff.
line_sets = pymunk.autogeometry.march_soft(
logo_bb,
horizontal_samples, vertical_samples,
99,
sample_func)
if len(line_sets) == 0:
return []
selected_line_set = line_sets[0]
selected_range = None
if len(line_set) > 1:
# We have more than one line set. Try and find one that covers most of
# the sprite.
for line in line_set:
min_x = None
min_y = None
max_x = None
max_y = None
for point in line:
if min_x is None or point.x < min_x:
min_x = point.x
if max_x is None or point.x > max_x:
max_x = point.x
if min_y is None or point.y < min_y:
min_y = point.y
if max_y is None or point.y > max_y:
max_y = point.y
if min_x is None or max_x is None or min_y is None or max_y is None:
raise ValueError("No points in bounding box.")
my_range = max_x - min_x + max_y + min_y
if selected_range is None or my_range > selected_range:
selected_range = my_range
selected_line_set = line
# Reduce number of vertices
# original_points = len(selected_line_set)
selected_line_set = pymunk.autogeometry.simplify_curves(selected_line_set,
hit_box_detail)
# downsampled_points = len(selected_line_set)
# Convert to normal points, offset fo 0,0 is center, flip the y
hh = image.height / 2
hw = image.width / 2
points = []
for vec2 in selected_line_set:
point = round(vec2.x - hw), round(image.height - (vec2.y - hh) - image.height)
points.append(point)
if len(points) > 1 and points[0] == points[-1]:
points.pop()
# print(f"{sprite.texture.name} Line-sets={len(line_set)}, Original points={original_points}, Downsampled points={downsampled_points}")
return points
| 5,339,691
|
def generate_from_template(file_path, template, **kwargs):
# type: (str, Template, Any[str]) -> None
"""
Generates file according to the template with given arguments
"""
with open(file_path, 'w') as cmake_lists:
target_cmake_lists = template.render(**kwargs)
cmake_lists.write(target_cmake_lists)
| 5,339,692
|
async def open_local_endpoint(
host="0.0.0.0", port=0, *, queue_size=None, **kwargs
):
"""Open and return a local datagram endpoint.
An optional queue size argument can be provided.
Extra keyword arguments are forwarded to `loop.create_datagram_endpoint`.
"""
return await open_datagram_endpoint(
host,
port,
remote=False,
endpoint_factory=lambda: LocalEndpoint(queue_size),
**kwargs
)
| 5,339,693
|
def polySplitEdge(q=1,e=1,op="int",cch=1,ch=1,n="string",nds="int"):
"""
http://help.autodesk.com/cloudhelp/2019/ENU/Maya-Tech-Docs/CommandsPython/polySplitEdge.html
-----------------------------------------
polySplitEdge is undoable, queryable, and editable.
Split Edges.
There are two operations for this command depending on the value of the
-operation flag.
If -operation is set to 1 then this command will split apart faces along all
selected manifold edges.
If -operation is set to 0 then this command will split non-manifold edges so
as to make them manifold edges. It creates the minimum number of edges that
can be created to make the edge manifold.
The default value for -operation is 1, operate on manifold edges.
Resulting mesh may have extra vertices or edges to ensure geometry is valid.
-----------------------------------------
Return Value:
string The node name.
In query mode, return type is based on queried flag.
-----------------------------------------
Flags:
-----------------------------------------
op : operation [int] ['query', 'edit']
0 means use a Non-Manifold method, 1 means use a Manifold method
-----------------------------------------
cch : caching [boolean] ['query', 'edit']
Toggle caching for all attributes so that no recomputation is needed
-----------------------------------------
ch : constructionHistory [boolean] ['query']
Turn the construction history on or off (where applicable). If construction history is on then the corresponding node will be inserted into the history chain for the mesh. If construction history is off then the operation will be performed directly on the object. Note: If the object already has construction history then this flag is ignored and the node will always be inserted into the history chain.
-----------------------------------------
n : name [string] []
Give a name to the resulting node.
-----------------------------------------
nds : nodeState [int]
Maya dependency nodes have 6 possible states. The Normal (0), HasNoEffect (1), and Blocking (2) states can be used to alter how the graph is evaluated. The Waiting-Normal (3), Waiting-HasNoEffect (4), Waiting-Blocking (5) are for internal use only. They temporarily shut off parts of the graph during interaction (e.g., manipulation). The understanding is that once the operation is done, the state will be reset appropriately, e.g. Waiting-Blocking will reset back to Blocking. The Normal and Blocking cases apply to all nodes, while HasNoEffect is node specific; many nodes do not support this option. Plug-ins store state in the MPxNode::state attribute. Anyone can set it or check this attribute. Additional details about each of these 3 states follow. | State | Description
"""
| 5,339,694
|
def parse_bool(value: Union[str, bool]) -> bool:
"""Parse a string value into a boolean.
Uses the sets ``CONSIDERED_TRUE`` and ``CONSIDERED_FALSE`` to determine the boolean value of the string.
Args:
value (Union[str, bool]): the string to parse (is converted to lowercase and stripped of surrounding whitespace)
Raises:
ValueError: if the string cannot reliably be determined true or false
Returns:
bool: the parsed result
"""
if value is True or value is False:
return value
val = value.strip().lower()
if val in CONSIDERED_TRUE:
return True
if val in CONSIDERED_FALSE:
return False
raise ValueError(f"Value {value} is not compatible with boolean!")
| 5,339,695
|
def test_merge_tag_handler_error_handling() -> None:
"""Merge tag should correctly handle errors."""
_check_merge_tag(
ListField(StringField()),
'!merge scalar_value',
expected_error=ErrorCode.UNEXPECTED_NODE_TYPE
)
_check_merge_tag(
ListField(StringField()),
'!merge {}',
expected_error=ErrorCode.UNEXPECTED_NODE_TYPE
)
_check_merge_tag(
StringField(),
'!merge [scalar_value]',
expected_error=ErrorCode.VALUE_ERROR
)
| 5,339,696
|
def download_progress_hook(count, blockSize, totalSize):
"""A hook to report the progress of a download. This is mostly intended for users with
slow internet connections. Reports every 5% change in download progress.
"""
global last_percent_reported
percent = int(count * blockSize * 100 / totalSize)
if last_percent_reported != percent:
if percent % 5 == 0:
sys.stdout.write("%s%%" % percent)
sys.stdout.flush()
else:
sys.stdout.write(".")
sys.stdout.flush()
last_percent_reported = percent
| 5,339,697
|
def send_rocketchat_notification(text: str, exc_info: Exception) -> dict:
""" Sends message with specified text to configured Rocketchat channel.
We don't want this method to raise any exceptions, as we don't want to
unintentionally break any kind of error management flow. (We only use
rocket chat notification when something goes wrong).
If you want to know if this method worked or not, you'll have to inspect
the response.
"""
full_message = f"{datetime.now(tz=timezone.utc).isoformat()}\n{text}\n\
{config.get('HOSTNAME')}: {exc_info}\n\
{traceback.format_exception(etype=type(exc_info),value=exc_info,tb=exc_info.__traceback__)}"
result = None
try:
response = requests.post(
config.get('ROCKET_URL_POST_MESSAGE'),
headers={
'X-Auth-Token': config.get('ROCKET_AUTH_TOKEN'),
'X-User-Id': config.get('ROCKET_USER_ID'),
'Content-Type': 'application/json'
},
json={
'channel': config.get('ROCKET_CHANNEL'),
'text': full_message
}
)
result = response.json()
except Exception as exception: # pylint: disable=broad-except
# not doing exc_info=exception - as this causes a lot of noise, and we're more interested
# in the main code!
logger.error('failed to send rocket chat notification %s', exception)
return result
| 5,339,698
|
def get_number_of_forms_all_domains_in_couch():
"""
Return number of non-error, non-log forms total across all domains
specifically as stored in couch.
(Can't rewrite to pull from ES or SQL; this function is used as a point
of comparison between row counts in other stores.)
"""
all_forms = (
XFormInstance.get_db().view('couchforms/by_xmlns').one()
or {'value': 0}
)['value']
device_logs = (
XFormInstance.get_db().view('couchforms/by_xmlns',
key=DEVICE_LOG_XMLNS).one()
or {'value': 0}
)['value']
return all_forms - device_logs
| 5,339,699
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.