content
stringlengths 22
815k
| id
int64 0
4.91M
|
|---|---|
def tweet_words(tweet):
"""Return the words in a tweet."""
return extract_words(tweet_text(tweet))
| 5,343,900
|
def meter_statistics(meter_id,api_endpoint,token,meter_list,web,**kwargs):
"""
Get the statistics for the specified meter.
Args:
meter_id(string): The meter name.
api_endpoint(string): The api endpoint for the ceilometer service.
token(string): X-Auth-token.
meter_list(list): The list of available meters.
Returns:
bool: True if successful, False otherwise.
list: The list with the meter statistics.
"""
meter_stat = [None]
headers = {
#'Accept': 'application/json',
'Content-Type': 'application/json;',
'Accept': 'application/json',
'X-Auth-Token': token
}
path = "/v2/meters/"+meter_id+"/statistics?"
q=kwargs.pop('q')
target = urlparse(api_endpoint+path+q)
method = 'GET'
logger.info('Inside meter-statistics: Path is %s',target)
if(web==False):
from_date,to_date,from_time,to_time,resource_id,user_id,status_q=query()
if(status_q==True):
q=set_query(from_date,to_date,from_time,to_time,resource_id,user_id,status_q)
body="{"+q
period=raw_input("Do you want to define a time period? Enter 'Y' if yes, 'N' if no.")
if(period=="Y"):
period_def=raw_input("Enter the desired time period in seconds: ")
body=body+',"period":'+period_def
groupby=raw_input("Do you want to define a group by value? Enter 'Y' if yes, 'N' if no.")
if (groupby=="Y") :
rid=raw_input("Do you want to group by the resource id? If yes, enter 'Y', else enter 'N'. ")
if(rid=="Y"):
groupby_def=',"groupby":['
groupby_def=groupby_def+'"resource_id"'
pid=raw_input("Do you want to group by the project id? If yes, enter 'Y', else enter 'N'. ")
if(pid=="Y"):
groupby_def=groupby_def+',"project_id"'
groupby_def=groupby_def+']'
body=body+groupby_def
else:
pid=raw_input("Do you want to group by the project id? If yes, enter 'Y', else enter 'N'. ")
if(pid=="Y"):
groupby_def=',"groupby":['
groupby_def=groupby_def+'"project_id"'
groupby_def=groupby_def+']'
body=body+groupby_def
body=body+"}"
else:
body="{"
period=raw_input("Do you want to define a time period? Enter 'Y' if yes, 'N' if no.")
if(period=="Y"):
period_def=raw_input("Enter the desired time period in seconds: ")
body=body+'"period":'+period_def
rid=raw_input("Do you want to group by the resource id? If yes, enter 'Y', else enter 'N'. ")
if(rid=="Y"):
groupby_def=',"groupby":['
groupby_def=groupby_def+'"resource_id"'
pid=raw_input("Do you want to group by the project id? If yes, enter 'Y', else enter 'N'. ")
if(pid=="Y"):
groupby_def=groupby_def+',"project_id"'
groupby_def=groupby_def+']'
body=body+groupby_def
else:
pid=raw_input("Do you want to group by the project id? If yes, enter 'Y', else enter 'N'. ")
if(pid=="Y"):
groupby_def=',"groupby":['
groupby_def=groupby_def+'"project_id"'
groupby_def=groupby_def+']'
body=body+groupby_def
body=body+"}"
else:
rid=raw_input("Do you want to group by the resource id? If yes, enter 'Y', else enter 'N'. ")
if(rid=="Y"):
groupby_def='"groupby":['
groupby_def=groupby_def+'"resource_id"'
pid=raw_input("Do you want to group by the project id? If yes, enter 'Y', else enter 'N'. ")
if(pid=="Y"):
groupby_def=groupby_def+',"project_id"'
groupby_def=groupby_def+']'
body=body+groupby_def
else:
pid=raw_input("Do you want to group by the project id? If yes, enter 'Y', else enter 'N'. ")
if(pid=="Y"):
groupby_def='"groupby":['
groupby_def=groupby_def+'"project_id"'
groupby_def=groupby_def+']'
body=body+groupby_def
body=body+"}"
else:
#q=kwargs.pop('q')
if 'period' in kwargs:
period=kwargs.pop('period')
body="{"+q
body=body+',"period":'+period+"}"
else:
body="{"+q+"}"
if is_in_mlist(meter_id,meter_list):
logger.info('Inside meter_statistics: body is %s',body)
h = http.Http()
#print method
#print body
#print headers
#print target.geturl()
#response, content = h.request(target.geturl(),method,body,headers)
response, content = h.request(target.geturl(),method,'',headers)
#print response
header = json.dumps(response)
#print header
json_header = json.loads(header)
#print json_header
server_response = json_header["status"]
if server_response not in {'200'}:
print "Inside meter_statistics(): Something went wrong!"
logger.warn('Inside meter_statistics: not a valid response ')
return False, meter_stat
else:
logger.info('Getting the meter statistics \n')
data = json.loads(content)
#print content
#print data
#print "========================="
meter_stat = [None]*len(data)
for i in range(len(data)):
meter_stat[i]={}
meter_stat[i]["average"] = data[i]["avg"]
meter_stat[i]["count"] = data[i]["count"]
meter_stat[i]["duration"] = data[i]["duration"]
meter_stat[i]["duration-end"] = data[i]["duration_end"]
meter_stat[i]["duration-start"] = data[i]["duration_start"]
meter_stat[i]["max"] = data[i]["max"]
meter_stat[i]["min"] = data[i]["min"]
meter_stat[i]["period"] = data[i]["period"]
meter_stat[i]["period-end"] = data[i]["period_end"]
meter_stat[i]["period-start"] = data[i]["period_start"]
meter_stat[i]["sum"] = data[i]["sum"]
meter_stat[i]["unit"] = data[i]["unit"]
meter_stat[i]["group-by"] = data[i]["groupby"]
return True, meter_stat
else:
logger.warn("Inside meter statistics: not an existing meter name")
print "Choose a meter from the meter list!"
return False,meter_stat
| 5,343,901
|
def parse_image_size(image_size: Union[Text, int, Tuple[int, int]]):
"""Parse the image size and return (height, width).
Args:
image_size: A integer, a tuple (H, W), or a string with HxW format.
Returns:
A tuple of integer (height, width).
"""
if isinstance(image_size, int):
# image_size is integer, with the same width and height.
return (image_size, image_size)
if isinstance(image_size, str):
# image_size is a string with format WxH
width, height = image_size.lower().split("x")
return (int(height), int(width))
if isinstance(image_size, tuple):
return image_size
raise ValueError(
"image_size must be an int, WxH string, or (height, width)"
"tuple. Was %r" % image_size
)
| 5,343,902
|
def find_last_match(view, what, start, end, flags=0):
"""Find last occurrence of `what` between `start`, `end`.
"""
match = view.find(what, start, flags)
new_match = None
while match:
new_match = view.find(what, match.end(), flags)
if new_match and new_match.end() <= end:
match = new_match
else:
return match
| 5,343,903
|
def convert_metrics_per_batch_to_per_sample(metrics, target_masks):
"""
Args:
metrics: list of len(num_batches), each element: list of len(num_metrics), each element: (num_active_in_batch,) metric per element
target_masks: list of len(num_batches), each element: (batch_size, seq_len, feat_dim) boolean mask: 1s active, 0s ignore
Returns:
metrics_array = list of len(num_batches), each element: (batch_size, num_metrics) metric per sample
"""
metrics_array = []
for b, batch_target_masks in enumerate(target_masks):
num_active_per_sample = np.sum(batch_target_masks, axis=(1, 2))
batch_metrics = np.stack(metrics[b], axis=1) # (num_active_in_batch, num_metrics)
ind = 0
metrics_per_sample = np.zeros((len(num_active_per_sample), batch_metrics.shape[1])) # (batch_size, num_metrics)
for n, num_active in enumerate(num_active_per_sample):
new_ind = ind + num_active
metrics_per_sample[n, :] = np.sum(batch_metrics[ind:new_ind, :], axis=0)
ind = new_ind
metrics_array.append(metrics_per_sample)
return metrics_array
| 5,343,904
|
async def get_series(database, series_id):
"""Get a series."""
series_query = """
select series.id, series.played, series_metadata.name, rounds.tournament_id, tournaments.id as tournament_id,
tournaments.name as tournament_name, events.id as event_id, events.name as event_name
from series join rounds on series.round_id=rounds.id join series_metadata on series.id=series_metadata.series_id
join tournaments on rounds.tournament_id=tournaments.id
join events on tournaments.event_id=events.id
where series.id=:id
"""
participants_query = 'select series_id, name, score, winner from participants where series_id=:id'
matches_query = 'select id, series_id from matches where series_id=:id'
values = {'id': series_id}
series, participants, matches = await asyncio.gather(
database.fetch_one(series_query, values=values),
database.fetch_all(participants_query, values=values),
database.fetch_all(matches_query, values=values)
)
return dict(
series,
participants=list(map(dict, participants)),
match_ids=list(map(lambda m: m['id'], matches)),
tournament=dict(
id=series['tournament_id'],
name=series['tournament_name'],
event=dict(
id=series['event_id'],
name=series['event_name']
)
)
)
| 5,343,905
|
def n_mpjpe(predicted, target):
"""
Normalized MPJPE (scale only), adapted from:
https://github.com/hrhodin/UnsupervisedGeometryAwareRepresentationLearning/blob/master/losses/poses.py
"""
assert predicted.shape == target.shape
norm_predicted = np.mean(np.sum(predicted**2, axis=2, keepdims=True), axis=1, keepdims=True)
norm_target = np.mean(np.sum(target*predicted, axis=2, keepdims=True), axis=1, keepdims=True)
scale = norm_target / norm_predicted
return euclidean_distance_3D(scale * predicted, target)
| 5,343,906
|
def calc_distance(p1, p2):
""" calculates a distance on a 2d euclidean space, between two points"""
dist = math.sqrt((p2[0] - p1[0]) ** 2 + (p2[1] - p1[1]) ** 2)
return dist
| 5,343,907
|
def rgb2ycbcr(img, range=255., only_y=True):
"""same as matlab rgb2ycbcr, please use bgr2ycbcr when using cv2.imread
img: shape=[h, w, 3]
range: the data range
only_y: only return Y channel
"""
in_img_type = img.dtype
img.astype(np.float32)
range_scale = 255. / range
img *= range_scale
# convert
if only_y:
rlt = np.dot(img, [65.481, 128.553, 24.966]) / 255.0 + 16.0
else:
rlt = np.matmul(img, [[65.481, -37.797, 112.0], [128.553, -74.203, -93.786],
[24.966, 112.0, -18.214]]) / 255.0 + [16, 128, 128]
rlt /= range_scale
if in_img_type == np.uint8:
rlt = rlt.round()
return rlt.astype(in_img_type)
| 5,343,908
|
def _create_xctest_bundle(name, actions, binary):
"""Creates an `.xctest` bundle that contains the given binary.
Args:
name: The name of the target being built, which will be used as the
basename of the bundle (followed by the .xctest bundle extension).
actions: The context's actions object.
binary: The binary that will be copied into the test bundle.
Returns:
A `File` (tree artifact) representing the `.xctest` bundle.
"""
xctest_bundle = derived_files.xctest_bundle(
actions = actions,
target_name = name,
)
args = actions.args()
args.add(xctest_bundle.path)
args.add(binary)
actions.run_shell(
arguments = [args],
command = (
'mkdir -p "$1/Contents/MacOS" && ' +
'cp "$2" "$1/Contents/MacOS"'
),
inputs = [binary],
mnemonic = "SwiftCreateTestBundle",
outputs = [xctest_bundle],
progress_message = "Creating test bundle for {}".format(name),
)
return xctest_bundle
| 5,343,909
|
def doFile(path_, *args, **kwargs):
"""Execute a given file from path with arguments."""
result, reason = loadfile(path_)
if result:
data = result(*args, **kwargs)
if data:
return data[1]
error(data[1])
error(reason)
| 5,343,910
|
def setEcnEnabled(enabled):
""" Enable/disable threshold-based ECN marking. """
val = 'true' if enabled else 'false'
for src in xrange(1, NUM_RACKS + 1):
for dst in xrange(1, NUM_RACKS + 1):
clickWriteHandler('hybrid_switch/q{}{}/q'.format(src, dst),
'marking_enabled', val)
time.sleep(0.1)
| 5,343,911
|
def pformat(obj, verbose=False):
"""
Prettyprint an object. Either use the `pretty` library or the
builtin `pprint`.
"""
try:
from pretty import pretty
return pretty(obj, verbose=verbose)
except ImportError:
from pprint import pformat
return pformat(obj)
| 5,343,912
|
def reanalyze_function(*args):
"""
reanalyze_function(func_t pfn, ea_t ea1=0, ea_t ea2=BADADDR, bool analyze_parents=False)
reanalyze_function(func_t pfn, ea_t ea1=0, ea_t ea2=BADADDR)
reanalyze_function(func_t pfn, ea_t ea1=0)
reanalyze_function(func_t pfn)
"""
return _idaapi.reanalyze_function(*args)
| 5,343,913
|
def categorical_sample_logits(logits):
"""
Samples (symbolically) from categorical distribution, where logits is a NxK
matrix specifying N categorical distributions with K categories
specifically, exp(logits) / sum( exp(logits), axis=1 ) is the
probabilities of the different classes
Cleverly uses gumbell trick, based on
https://github.com/tensorflow/tensorflow/issues/456
"""
U = tf.random_uniform(tf.shape(logits))
return tf.argmax(logits - tf.log(-tf.log(U)), dimension=1)
| 5,343,914
|
def mass(d, r):
""" computes the right hand side of the differential equation of mass continuity
"""
return 4 * pi * d * r * r
| 5,343,915
|
def bord(u):
"""
éxécution de bord("undébutuntructrucundébut")
i suffix estPréfixe
23 ndébutuntructrucundébut False
22 débutuntructrucundébut False
21 ébutuntructrucundébut False
20 butuntructrucundébut False
19 utuntructrucundébut False
18 tuntructrucundébut False
17 untructrucundébut False
16 ntructrucundébut False
15 tructrucundébut False
14 ructrucundébut False
13 uctrucundébut False
12 ctrucundébut False
11 trucundébut False
10 rucundébut False
9 ucundébut False
8 cundébut False
7 undébut True
"""
suffix = ""
for i in reversed(range(0, len(u))):
suffix = u[len(u)-i:len(u)]
if estPréfixe(u, suffix):
break
return suffix
| 5,343,916
|
def is_unique_n_bit_vector(string: str) -> bool:
"""
Similiar to the dict solution, it just uses a bit vector instead of a dict or array.
"""
vector = 0
for letter in string:
if vector & 1 << ord(letter):
return False
vector |= 1 << ord(letter)
return True
| 5,343,917
|
def print_error(err_str: str, once: bool = False) -> None:
"""Print info about an error along with pertinent context state.
category: General Utility Functions
Prints all positional arguments provided along with various info about the
current context.
Pass the keyword 'once' as True if you want the call to only happen
one time from an exact calling location.
"""
import traceback
try:
# If we're only printing once and already have, bail.
if once:
if not _ba.do_once():
return
# Most tracebacks are gonna have ugly long install directories in them;
# lets strip those out when we can.
print('ERROR:', err_str)
_ba.print_context()
# Basically the output of traceback.print_stack() slightly prettified:
stackstr = ''.join(traceback.format_stack())
for path in sys.path:
stackstr = stackstr.replace(path + '/', '')
print(stackstr, end='')
except Exception:
print('ERROR: exception in ba.print_error():')
traceback.print_exc()
| 5,343,918
|
def to_int_cmd(ctx, data: str):
"""
Convert ASCII string to a number.
"""
echo(NeoData.to_int(data))
| 5,343,919
|
def seq2msk(isq):
"""
Convert seqhis into mskhis
OpticksPhoton.h uses a mask but seq use the index for bit-bevity::
3 enum
4 {
5 CERENKOV = 0x1 << 0,
6 SCINTILLATION = 0x1 << 1,
7 MISS = 0x1 << 2,
8 BULK_ABSORB = 0x1 << 3,
9 BULK_REEMIT = 0x1 << 4,
"""
ifl = np.zeros_like(isq)
for n in range(16):
msk = 0xf << (4*n) ## nibble mask
nib = ( isq & msk ) >> (4*n) ## pick the nibble and shift to pole position
flg = 1 << ( nib[nib>0] - 1 ) ## convert flag bit index into flag mask
ifl[nib>0] |= flg
pass
return ifl
| 5,343,920
|
def split(ich):
""" Split a multi-component InChI into InChIs for each of its components.
(fix this for /s [which should be removed in split/join operations]
and /m, which is joined as /m0110.. with no separators)
:param ich: InChI string
:type ich: str
:rtype: tuple(str)
"""
fml_slyr = formula_sublayer(ich)
main_dct = main_sublayers(ich)
char_dct = charge_sublayers(ich)
ste_dct = stereo_sublayers(ich)
iso_dct = isotope_sublayers(ich)
fml_slyrs = _split_sublayer_string(
fml_slyr, count_sep_ptt='', sep_ptt=app.escape('.'))
count = len(fml_slyrs)
main_dcts = _split_sublayers(main_dct, count)
char_dcts = _split_sublayers(char_dct, count)
ste_dcts = _split_sublayers(ste_dct, count)
iso_dcts = _split_sublayers(iso_dct, count)
ichs = tuple(from_data(fml_slyr=fml_slyr,
main_lyr_dct=main_dct,
char_lyr_dct=char_dct,
ste_lyr_dct=ste_dct,
iso_lyr_dct=iso_dct)
for fml_slyr, main_dct, char_dct, ste_dct, iso_dct
in zip(fml_slyrs, main_dcts, char_dcts, ste_dcts, iso_dcts))
return ichs
| 5,343,921
|
def crtb_cb(client, crtb):
"""Wait for the crtb to have the userId populated"""
def cb():
c = client.reload(crtb)
return c.userId is not None
return cb
| 5,343,922
|
def test_basic():
"""Another test, to show how it appears in the results"""
pass
| 5,343,923
|
def create_network(network_input, n_alphabets):
""" create the structure of the neural network """
model = Sequential()
model.add(LSTM(512,input_shape=(network_input.shape[1], network_input.shape[2]),return_sequences=True))
model.add(Dropout(0.3))
model.add(Bidirectional(LSTM(512, return_sequences=True)))
model.add(Dropout(0.3))
model.add(Bidirectional(LSTM(512)))
model.add(Dense(256))
model.add(Dropout(0.3))
model.add(Dense(n_alphabets))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam')
return model
| 5,343,924
|
def create_derivative_graph(f, xrange, n):
"""Takes a function as an input with a specific interval xrange, then creates a list with the ouput
y-points for the nth derivative of f.
:param f: Input function that we wish to take the derivative of.
:type f: lambda
:param xrange: The interval on which to evaluate f^n(x).
:type xrange: list
:param n: The derivative (1st, 2nd, 3rd, etc)
:type n: int
:return: A list of all f^n(x) points for all x in xrange.
:rtype: list of floats
"""
plot_points = []
for x in xrange:
plot_points.append(nth_derivative(f, x, n))
return plot_points
| 5,343,925
|
def _matching_not_matching(on, **kwargs):
"""
Change the text for matching/not matching
"""
text = "matching" if not on else "not matching"
classname = "colour-off" if not on else "colour-on"
return text, classname
| 5,343,926
|
def prepare_testenv(config=None, template=None, args=None):
"""
prepare an engine-ready environment for a test
This utility method is used to provide an `RelengEngine` instance ready for
execution on an interim working directory.
Args:
config (optional): dictionary of options to mock for arguments
template (optional): the folder holding a template project to copy into
the prepared working directory
args (optional): additional arguments to add to the "forwarded options"
Yields:
the engine
"""
class MockArgs(object):
def __getattr__(self, name):
return self.name if name in self.__dict__ else None
if config is None:
config = {}
with generate_temp_dir() as work_dir, interim_working_dir(work_dir):
# force root directory to temporary directory; or configure all working
# content based off the generated temporary directory
if 'root_dir' not in config:
config['root_dir'] = work_dir
else:
if 'cache_dir' not in config:
config['cache_dir'] = os.path.join(work_dir, 'cache')
if 'dl_dir' not in config:
config['dl_dir'] = os.path.join(work_dir, 'dl')
if 'out_dir' not in config:
config['out_dir'] = os.path.join(work_dir, 'out')
if template:
templates_dir = os.path.join(find_test_base(), 'templates')
template_dir = os.path.join(templates_dir, template)
if not path_copy(template_dir, work_dir, critical=False):
assert False, 'failed to setup template into workdir'
# build arguments instance
test_args = MockArgs()
for k, v in config.items():
setattr(test_args, k, v)
# prepare engine options and build an engine instance
opts = RelengEngineOptions(args=test_args, forward_args=args)
engine = RelengEngine(opts)
yield engine
| 5,343,927
|
def sample(model, x, steps, temperature=1.0, sample=False, top_k=None):
"""
take a conditioning sequence of indices in x (of shape (b,t)) and predict the next token in
the sequence, feeding the predictions back into the model each time. Clearly the sampling
has quadratic complexity unlike an RNN that is only linear, and has a finite context window
of block_size, unlike an RNN that has an infinite context window.
"""
block_size = model.block_size
for k in range(steps):
x_cond = x if x.shape[1] <= block_size else x[:, -block_size:] # crop context if needed
logits = model(x_cond)
# pluck the logits at the final step and scale by temperature
logits = logits[:, -1, :] / temperature
# optionally crop probabilities to only the top k options
if top_k is not None:
logits = top_k_logits(logits, top_k)
# apply softmax to convert to probabilities
probs = tf.nn.softmax(logits, axis=-1)
# sample from the distribution or take the most likely
if sample:
ix = tf.random.categorical(logits,1,dtype=tf.int32)
else:
_, ix = tf.math.top_k(probs, k=1)
# append to the sequence and continue
x = tf.concat((x,ix), axis=1)
return x
| 5,343,928
|
def vstack(arg_list):
"""Wrapper on vstack to ensure list argument.
"""
return Vstack(*arg_list)
| 5,343,929
|
def metadata_to_list(metadata):
"""Transform a metadata dictionary retrieved from Cassandra to a list of
tuples. If metadata items are lists they are split into multiple pairs in
the result list
:param metadata: dict"""
res = []
for k, v in metadata.iteritems():
try:
val_json = json.loads(v)
val = val_json.get('json', '')
# If the value is a list we create several pairs in the result
if isinstance(val, list):
for el in val:
res.append((k, el))
else:
if val:
res.append((k, val))
except ValueError:
if v:
res.append((k, v))
return res
| 5,343,930
|
def GetSetUpAndResponse():
"""
This method is called by an API acting as a client while performing the PSI protocol.
This method initialises a server object. (This API acts as server)
This method uses the PSI Request ID given by the calling API to identify the corresponding node list from the server directory.
This method then encrypts all elements in the node list using a newly generated secret key and creates a server set up message (serialized protobuf) with the encrypted elements.
This method converts the byte stream of data received to a serialized protobuf.
This protobuf is the request message created by the client object in the calling API.
This request message contains a list of nodes that are encrypted by the client's secret key.
This method then encrypts the each encrypted node with its server secret key generated earlier and creates a response message (Protobuf Form)
Both the set up message and response message are converted to JSON and returned to the calling API.
---
parameters:
client_id:
type: string (API ID)
description: API ID of the API that has called this method. (API ID of the client)
required: True
set_size:
type: int
description: Size of client node list.
required: True
psi_request_id:
type: string (Request ID)
description: Request ID of the PSI protocol that is occuring currently. This is used to retrieved the corresponding node list from the server directory.
required: True
data:
description: Request message generated by the client object of the calling API. This message has been serialized to a bytestream from a protobuf format.
This message contains a list of nodes that have been encrypted by the secret key of the client object created by the calling API.
responses:
200:
description: Returns a set up and response message in JSON format.
schema:
setup:
type: JSON
description: Protobuf message containing a list of server nodes encrypted using the server's secret key. (Converted to JSON format)
resp:
type: JSON
description: Protobuf message containing a list of client nodes encrypted using both the client and server's secret key. (Converted to JSON format)
"""
PrintInfo("Server setup request received...")
PrintInfo("Client ID: " + str(request.args.get("client_id")))
ClientSetSize = int(request.args.get("set_size"))
ClientRequestMessage = request.data
PrintDebug("Client Set Size: " + str(ClientSetSize))
dstReq = psi.Request()
dstReq.ParseFromString(ClientRequestMessage)
ClientRequest = dstReq
fpr = 1.0 / (1000000000)
s = psi.server.CreateWithNewKey(True)
PsiRequestID = request.args.get("psi_request_id")
ServerSet = ServerDirectory.get(PsiRequestID)
setup = s.CreateSetupMessage(fpr, ClientSetSize, ServerSet)
resp = s.ProcessRequest(ClientRequest)
setupJson = MessageToJson(setup)
respJson = MessageToJson(resp)
DisposeServerSet(PsiRequestID)
return {"setup": setupJson, "resp": respJson}
| 5,343,931
|
def load_separator(
model_str_or_path: str = "umxhq",
niter: int = 1,
residual: bool = False,
slicq_wiener: bool = False,
wiener_win_len: Optional[int] = 300,
device: Union[str, torch.device] = "cpu",
pretrained: bool = True,
):
"""Separator loader
Args:
model_str_or_path (str): Model name or path to model _parent_ directory
E.g. The following files are assumed to present when
loading `model_str_or_path='mymodel', targets=['vocals']`
'mymodel/separator.json', mymodel/vocals.pth', 'mymodel/vocals.json'.
Defaults to `umxhq`.
targets (list of str or None): list of target names. When loading a
pre-trained model, all `targets` can be None as all targets
will be loaded
device (str): torch device, defaults to `cpu`
pretrained (bool): determines if loading pre-trained weights
"""
model_path = Path(model_str_or_path).expanduser()
# when path exists, we assume its a custom model saved locally
if model_path.exists():
with open(Path(model_path, "separator.json"), "r") as stream:
enc_conf = json.load(stream)
xumx_model, model_nsgt, jagged_slicq_sample = load_target_models(
model_str_or_path=model_path, pretrained=pretrained, sample_rate=enc_conf["sample_rate"], device=device
)
separator = model.Separator(
xumx_model,
model_nsgt,
jagged_slicq_sample,
stft_wiener=not slicq_wiener,
sample_rate=enc_conf["sample_rate"],
nb_channels=enc_conf["nb_channels"],
).to(device)
return separator
| 5,343,932
|
def IntermediateParticleConst_get_decorator_type_name():
"""IntermediateParticleConst_get_decorator_type_name() -> std::string"""
return _RMF.IntermediateParticleConst_get_decorator_type_name()
| 5,343,933
|
def multiply_add_plain_with_delta(ct, pt, context_data):
"""Add plaintext to ciphertext.
Args:
ct (Ciphertext): ct is pre-computed carrier polynomial where we can add pt data.
pt (Plaintext): A plaintext representation of integer data to be encrypted.
context (Context): Context for extracting encryption parameters.
Returns:
A Ciphertext object with the encrypted result of encryption process.
"""
ct_param_id = ct.param_id
coeff_modulus = context_data.param.coeff_modulus
pt = pt.data
plain_coeff_count = len(pt)
delta = context_data.coeff_div_plain_modulus
ct0, ct1 = ct.data # here ct = pk * u * e
# Coefficients of plain m multiplied by coeff_modulus q, divided by plain_modulus t,
# and rounded to the nearest integer (rounded up in case of a tie). Equivalent to
for i in range(plain_coeff_count):
for j in range(len(coeff_modulus)):
temp = round(delta[j] * pt[i]) % coeff_modulus[j]
ct0[j][i] = (ct0[j][i] + temp) % coeff_modulus[j]
return CipherText([ct0, ct1], ct_param_id)
| 5,343,934
|
def ginput(n=1, timeout=30, debug=False):
"""
Simple functional call for physicists. This will wait for n clicks
from the user and
return a list of the coordinates of each click.
"""
x = GaelInput()
return x(n, timeout, debug)
| 5,343,935
|
def retrieve_form_data(form, submission_type="solution"):
"""Quick utility function that groups together the processing of request data. Allows for easier handling of exceptions
Takes request object as argument
On Success, returns hashmap of processed data...otherwise raise an exception"""
if submission_type == "solution":
processed_data = {}
try:
print("FCD =>", form.cleaned_data)
processed_data["prob_id"] = int(form.cleaned_data.get("problem_id"))
processed_data["uid"] = int(form.cleaned_data.get("user_id"))
processed_data["code_data"] = form.cleaned_data.get("solution")
processed_data["course_id"] = form.cleaned_data.get("course_id", None)
except Exception as e:
print("POST NOT OK: Error during intial processing of uploaded data - {0}".format(str(e)))
return Response(ERROR_CODES["Form Submission Error"], status=status.HTTP_400_BAD_REQUEST)
return processed_data
elif submission_type == "problem_upload":
data = form.cleaned_data
processed_data = {}
try:
processed_data["author_id"] = int(data.get("author_id"))
processed_data["category"] = data.get("category")
processed_data["target_file"] = data.get("target_file", None)
processed_data["data_file"] = data.get("data_file", None)
processed_data["course_id"] = data.get("course_id", None)
if processed_data["data_file"] is not None:
processed_data["data_file"].seek(0)
processed_data["init_data"] = processed_data["data_file"].read().decode("utf-8")
try:
json.loads(processed_data["init_data"])
except Exception as e:
raise Exception("Invalid JSON in init_data_file! - {0}".format(str(e)))
else:
processed_data["init_data"] = None
processed_data["name"] = data.get("name").replace("(", "[").replace(")", "]")
if "(" in processed_data["name"] or ")" in processed_data["name"]:
print("POST NOT OK: Problem Name cannot contain parnetheses!")
return Response(ERROR_CODES["Form Submission Error"], status=status.HTTP_400_BAD_REQUEST)
description = data.get("description")
processed_data["program_file"] = data.get("program")
processed_data["code"] = [line.decode("utf-8") for line in processed_data["program_file"].read().splitlines()]
processed_data["metadata"] = data.get("meta_file")
processed_data["metadata"]["description"] = description
processed_data["date_submitted"] = datetime.now()
processed_data["inputs"] = data.get("inputs", None)
if processed_data["category"] == "file_io":
processed_data["metadata"]["inputs"] = "file"
else:
processed_data["metadata"]["inputs"] = True if processed_data["inputs"] is not None else False
processed_data["metadata"]["init_data"] = True if processed_data["init_data"] is not None else False
except Exception as e:
print("POST NOT OK: Error during intial processing of uploaded data - {0}".format(str(e)))
return Response(ERROR_CODES["Form Submission Error"], status=status.HTTP_400_BAD_REQUEST)
return processed_data
| 5,343,936
|
def existing_file(fname):
"""
Check if the file exists. If not raise an error
Parameters
----------
fname: string
file name to parse
Returns
-------
fname : string
"""
if os.path.isfile(fname):
return fname
else:
msg = "The file '{}' does not exist".format(fname)
raise ap.ArgumentTypeError(msg)
| 5,343,937
|
async def test_update_with_failed_get(hass, caplog):
"""Test attributes get extracted from a XML result with bad xml."""
respx.get(
"http://localhost",
status_code=200,
headers={"content-type": "text/xml"},
content="",
)
assert await async_setup_component(
hass,
"sensor",
{
"sensor": {
"platform": "rest",
"resource": "http://localhost",
"method": "GET",
"value_template": "{{ value_json.toplevel.master_value }}",
"json_attributes": ["key"],
"name": "foo",
"unit_of_measurement": DATA_MEGABYTES,
"verify_ssl": "true",
"timeout": 30,
}
},
)
await hass.async_block_till_done()
assert len(hass.states.async_all()) == 1
state = hass.states.get("sensor.foo")
assert state.state == STATE_UNKNOWN
assert "Erroneous XML" in caplog.text
assert "Empty reply" in caplog.text
| 5,343,938
|
def keep_english_for_spacy_nn(df):
"""This function takes the DataFrame for songs
and keep songs with english as main language
for english version of spacy neural network for word processing"""
#Keep only english for spacy NN English preprocessing words
#Network for other languages like french, spanish, portuguese are also available
df = df.loc[df['Main Language'] == 'en',:]
#Drop the translation column not use for lyrics in english
df.drop(['English Translation Lyrics'],axis =1,inplace = True)
return df
| 5,343,939
|
def make_dealer_cards_more_fun(deck, dealer):
"""
to make dealercards more fun to make dealer win this game more.
:param dealercards: dealercards
:return: none
maybe has a lot of memory work will arise.
"""
dealercards = card_sorting_dealer(dealer)
count = 0
if jokbo(dealercards) == 0 or jokbo(dealercards) == 1 or jokbo(dealercards) == 2:
deck.append({"suit": dealer[0]["suit"], "rank": dealer[0]["rank"]})
deck.append({"suit": dealer[1]["suit"], "rank": dealer[1]["rank"]})
deck.append({"suit": dealer[2]["suit"], "rank": dealer[2]["rank"]})
deck.append({"suit": dealer[3]["suit"], "rank": dealer[3]["rank"]})
deck.append({"suit": dealer[4]["suit"], "rank": dealer[4]["rank"]})
if jokbo(dealercards) == 0: #하이카드
while True:
count += 1
dealer = []
card, deck = hit(deck)
dealer.append(card)
card, deck = hit(deck)
dealer.append(card)
card, deck = hit(deck)
dealer.append(card)
card, deck = hit(deck)
dealer.append(card)
card, deck = hit(deck)
dealer.append(card)
dealercards = card_sorting_dealer(dealer)
if count == 30: break
if jokbo(dealercards) != 0 and jokbo(dealercards) != 1:
break
else:
deck.append({"suit": dealer[0]["suit"], "rank": dealer[0]["rank"]})
deck.append({"suit": dealer[1]["suit"], "rank": dealer[1]["rank"]})
deck.append({"suit": dealer[2]["suit"], "rank": dealer[2]["rank"]})
deck.append({"suit": dealer[3]["suit"], "rank": dealer[3]["rank"]})
deck.append({"suit": dealer[4]["suit"], "rank": dealer[4]["rank"]})
continue
return dealer
elif jokbo(dealercards) == 1: #원페어
while True:
count += 1
dealer = []
card, deck = hit(deck)
dealer.append(card)
card, deck = hit(deck)
dealer.append(card)
card, deck = hit(deck)
dealer.append(card)
card, deck = hit(deck)
dealer.append(card)
card, deck = hit(deck)
dealer.append(card)
dealercards = card_sorting_dealer(dealer)
if count == 30: break
if jokbo(dealercards) != 0 and jokbo(dealercards) != 1:
break
else:
deck.append({"suit": dealer[0]["suit"], "rank": dealer[0]["rank"]})
deck.append({"suit": dealer[1]["suit"], "rank": dealer[1]["rank"]})
deck.append({"suit": dealer[2]["suit"], "rank": dealer[2]["rank"]})
deck.append({"suit": dealer[3]["suit"], "rank": dealer[3]["rank"]})
deck.append({"suit": dealer[4]["suit"], "rank": dealer[4]["rank"]})
continue
return dealer
else:
return dealer
| 5,343,940
|
def subinit1_initpaths_config_log():
"""
Initializes the paths (stored in global __PATHS):
1 Finds the project location
2 Reads config.ini
3 Reads the paths defined in config.ini
4 Checks that the paths exist
"""
# -------------------------------------------------------------------------------- 1) FUNDAMENTAL PATHS
# - project root + directory
PATHS = {'Proj': os.path.dirname(os.path.abspath(__file__)),}
# Proj directory is well-defined. All paths are relative to the root (Paths['Proj'])
toAbsPath = lambda PATHS_key, relpath: os.path.join(PATHS[PATHS_key], relpath)
# ............................................................... a) Subdirs of root
PATHS['Inputs and resources'] = toAbsPath('Proj', 'Inputs and resources')
PATHS['Results'] = toAbsPath('Proj', 'Results')
PATHS['To WRF'] = toAbsPath('Proj', 'To WRF')
# ............................................................... b) Subdirs of Inputs and resources
PATHS['Fundamentals'] = toAbsPath('Inputs and resources', 'Fundamentals')
PATHS['Gen Parameters'] = toAbsPath('Inputs and resources', 'Gen Parameters')
PATHS['Other Parameters'] = toAbsPath('Inputs and resources', 'Other Parameters')
PATHS['Fundamentals', 'fuels'] = toAbsPath('Fundamentals', 'fuels')
PATHS['Gen Parameters', 'efficiency curves'] = toAbsPath('Gen Parameters', 'efficiency curves')
# ............................................................... c) Subdirs of To WRF
PATHS['WRF resources'] = toAbsPath('To WRF', 'Resources')
# -------------------------------------------------------------------------------- 2) Read CONFIG
PATHS['config'] = toAbsPath('Inputs and resources', 'config.ini')
config = configparser.ConfigParser()
config.read(PATHS['config'])
# -------------------------------------------------------------------------------- 3) Start log
PATHS['log'] = toAbsPath('Inputs and resources', config['log']['file_name'])
logging.basicConfig(filename=PATHS['log'], level=eval(config['log']['level']), filemode='w')
logging.info("[PROGRAM START] at {}.\nInitialization commencing. \n "
"---------------------------------------------------------------".format(log_time()))
# -------------------------------------------------------------------------------- 4) __PATHS from CONFIG
# Q: Why is the metadata file configurable?
# A: If all inputs are configurable, and the metadata is part of the input, then rightfully so.
PATHS['PP database'] = toAbsPath('Inputs and resources', config['paths']['fp_powerplant_database'])
PATHS['metadata'] = toAbsPath('Inputs and resources', config['paths']['fp_metadata'])
PATHS['pint defn'] = toAbsPath('Inputs and resources', config['data import settings']['pint_unitdefn'])
# -------------------------------------------------------------------------------- 5) Check that all dir/file exists
donotexist = tuple(key for key, fp in PATHS.items() if not os.path.exists(fp))
if donotexist:
strgen = ("\t{}: '{}'".format(key, PATHS[key]) for key in donotexist)
raise FileNotFoundError("The ff. paths or files were not found: \n{}\n\nPls. double check that "
"config.ini (section 'paths') points to these required paths in the project "
"directory, and that the project directory system was not altered.".format(
'\n'.join(strgen)))
return PATHS, config
| 5,343,941
|
def update_draft(
url: str,
access_key: str,
dataset_id: str,
draft_number: int,
*,
status: Optional[str] = None,
title: Optional[str] = None,
description: Optional[str] = None,
) -> None:
"""Execute the OpenAPI `PATCH /v1/datasets{id}/drafts{draftNumber}`.
Arguments:
url: The URL of the graviti website.
access_key: User's access key.
dataset_id: Dataset ID.
draft_number: The updated draft number.
status: The updated draft status which could be "CLOSED" or None.
Where None means no change in status.
title: The draft title.
description: The draft description.
Examples:
Update the title or description of the draft:
>>> update_draft(
... "https://gas.graviti.com/",
... "ACCESSKEY-********",
... "2bc95d506db2401b898067f1045d7f68",
... 2,
... title="draft-3"
... )
Close the draft:
>>> update_draft(
... "https://gas.graviti.com/",
... "ACCESSKEY-********",
... "2bc95d506db2401b898067f1045d7f68",
... 2,
... status="CLOSED"
... )
"""
url = urljoin(url, f"{URL_PATH_PREFIX}/datasets/{dataset_id}/drafts/{draft_number}")
patch_data: Dict[str, Any] = {"draftNumber": draft_number}
if status:
patch_data["status"] = status
if title is not None:
patch_data["title"] = title
if description is not None:
patch_data["description"] = description
open_api_do(url, access_key, "PATCH", json=patch_data)
| 5,343,942
|
def data(request):
"""Returns available albums from the database. Can be optionally filtered by year.
This is called from templates/albums/album/index.html when the year input is changed.
"""
year = request.GET.get('year')
if year:
try:
year = int(year)
except (ValueError, TypeError):
return HttpResponseBadRequest('invalid year parameter')
else:
year = None
return JsonResponse(list(get_albums(year)), safe=False)
| 5,343,943
|
def yaw_to_quaternion3d(yaw: float) -> Tuple[float,float,float,float]:
"""
Args:
- yaw: rotation about the z-axis
Returns:
- qx,qy,qz,qw: quaternion coefficients
"""
qx,qy,qz,qw = Rotation.from_euler('z', yaw).as_quat()
return qx,qy,qz,qw
| 5,343,944
|
def typecheck(session: Session) -> Session:
"""Run type checking (using mypy)."""
args = session.posargs or locations
session.install("types-requests", "types-termcolor", "types-colorama")
session.install("mypy")
session.run("mypy", *args)
| 5,343,945
|
def parse_coap_response_code(response_code):
"""
Parse the binary code from CoAP response and return the response code as a float.
See also https://tools.ietf.org/html/rfc7252#section-5.9 for response code definitions.
:rtype float
"""
response_code_class = response_code // 32
response_code_detail = response_code % 32
# Compose response code
return response_code_class + response_code_detail / 100
| 5,343,946
|
def modified_partial_sum_product(
sum_op, prod_op, factors, eliminate=frozenset(), plate_to_step=dict()
):
"""
Generalization of the tensor variable elimination algorithm of
:func:`funsor.sum_product.partial_sum_product` to handle markov dimensions
in addition to plate dimensions. Markov dimensions in transition factors
are eliminated efficiently using the parallel-scan algorithm in
:func:`funsor.sum_product.sequential_sum_product`. The resulting factors are then
combined with the initial factors and final states are eliminated. Therefore,
when Markov dimension is eliminated ``factors`` has to contain a pairs of
initial factors and transition factors.
:param ~funsor.ops.AssociativeOp sum_op: A semiring sum operation.
:param ~funsor.ops.AssociativeOp prod_op: A semiring product operation.
:param factors: A collection of funsors.
:type factors: tuple or list
:param frozenset eliminate: A set of free variables to eliminate,
including both sum variables and product variable.
:param dict plate_to_step: A dict mapping markov dimensions to
``step`` collections that contain ordered sequences of Markov variable names
(e.g., ``{"time": frozenset({("x_0", "x_prev", "x_curr")})}``).
Plates are passed with an empty ``step``.
:return: a list of partially contracted Funsors.
:rtype: list
"""
assert callable(sum_op)
assert callable(prod_op)
assert isinstance(factors, (tuple, list))
assert all(isinstance(f, Funsor) for f in factors)
assert isinstance(eliminate, frozenset)
assert isinstance(plate_to_step, dict)
# process plate_to_step
plate_to_step = plate_to_step.copy()
prev_to_init = {}
for key, step in plate_to_step.items():
# map prev to init; works for any history > 0
for chain in step:
init, prev = chain[: len(chain) // 2], chain[len(chain) // 2 : -1]
prev_to_init.update(zip(prev, init))
# convert step to dict type required for MarkovProduct
plate_to_step[key] = {chain[1]: chain[2] for chain in step}
plates = frozenset(plate_to_step.keys())
sum_vars = eliminate - plates
prod_vars = eliminate.intersection(plates)
markov_sum_vars = frozenset()
for step in plate_to_step.values():
markov_sum_vars |= frozenset(step.keys()) | frozenset(step.values())
markov_sum_vars &= sum_vars
markov_prod_vars = frozenset(
k for k, v in plate_to_step.items() if v and k in eliminate
)
markov_sum_to_prod = defaultdict(set)
for markov_prod in markov_prod_vars:
for k, v in plate_to_step[markov_prod].items():
markov_sum_to_prod[k].add(markov_prod)
markov_sum_to_prod[v].add(markov_prod)
var_to_ordinal = {}
ordinal_to_factors = defaultdict(list)
for f in factors:
ordinal = plates.intersection(f.inputs)
ordinal_to_factors[ordinal].append(f)
for var in sum_vars.intersection(f.inputs):
var_to_ordinal[var] = var_to_ordinal.get(var, ordinal) & ordinal
ordinal_to_vars = defaultdict(set)
for var, ordinal in var_to_ordinal.items():
ordinal_to_vars[ordinal].add(var)
results = []
while ordinal_to_factors:
leaf = max(ordinal_to_factors, key=len)
leaf_factors = ordinal_to_factors.pop(leaf)
leaf_reduce_vars = ordinal_to_vars[leaf]
for (group_factors, group_vars) in _partition(
leaf_factors, leaf_reduce_vars | markov_prod_vars
):
# eliminate non markov vars
nonmarkov_vars = group_vars - markov_sum_vars - markov_prod_vars
f = reduce(prod_op, group_factors).reduce(sum_op, nonmarkov_vars)
# eliminate markov vars
markov_vars = group_vars.intersection(markov_sum_vars)
if markov_vars:
markov_prod_var = [markov_sum_to_prod[var] for var in markov_vars]
assert all(p == markov_prod_var[0] for p in markov_prod_var)
if len(markov_prod_var[0]) != 1:
raise ValueError("intractable!")
time = next(iter(markov_prod_var[0]))
for v in sum_vars.intersection(f.inputs):
if time in var_to_ordinal[v] and var_to_ordinal[v] < leaf:
raise ValueError("intractable!")
time_var = Variable(time, f.inputs[time])
group_step = {
k: v for (k, v) in plate_to_step[time].items() if v in markov_vars
}
f = MarkovProduct(sum_op, prod_op, f, time_var, group_step)
f = f.reduce(sum_op, frozenset(group_step.values()))
f = f(**prev_to_init)
remaining_sum_vars = sum_vars.intersection(f.inputs)
if not remaining_sum_vars:
results.append(f.reduce(prod_op, leaf & prod_vars - markov_prod_vars))
else:
new_plates = frozenset().union(
*(var_to_ordinal[v] for v in remaining_sum_vars)
)
if new_plates == leaf:
raise ValueError("intractable!")
f = f.reduce(prod_op, leaf - new_plates - markov_prod_vars)
ordinal_to_factors[new_plates].append(f)
return results
| 5,343,947
|
def load_scicar_cell_lines(test=False):
"""Download sci-CAR cell lines data from GEO."""
if test:
adata = load_scicar_cell_lines(test=False)
adata = subset_joint_data(adata)
return adata
return load_scicar(
rna_url,
rna_cells_url,
rna_genes_url,
atac_url,
atac_cells_url,
atac_genes_url,
)
| 5,343,948
|
def ExtrudeNA(chain):
"""Computes ribbons for DNA/RNA"""
coord = []
coord.append(chain.DNARes[0].atoms[0].coords)
NA_type = chain.DNARes[0].type.strip()
atoms = chain.DNARes[0].atoms
missingAts = False
normal = numpy.array([0.,1.,0.])
if NA_type in ['A', 'G']:
listesel = ['N9.*','C8.*','C4.*']
listeCoord,missingAts = getAtsRes(atoms,listesel)
if not missingAts :
N9 = listeCoord[0]#numpy.array(atoms.objectsFromString('N9.*')[0].coords)
C8 = listeCoord[1]#numpy.array(atoms.objectsFromString('C8.*')[0].coords)
C4 = listeCoord[2]#numpy.array(atoms.objectsFromString('C4.*')[0].coords)
N9_C8 = C8-N9
N9_C4 = C4-N9
normal = numpy.array(crossProduct(N9_C8, N9_C4, normal=True))
else:
listesel = ['N1.*','C2.*','C6.*']
listeCoord = []
listeCoord,missingAts = getAtsRes(atoms,listesel)
if not missingAts :
N1 = listeCoord[0]#numpy.array(atoms.objectsFromString('N1.*')[0].coords)
C2 = listeCoord[1]#numpy.array(atoms.objectsFromString('C2.*')[0].coords)
C6 = listeCoord[2]#numpy.array(atoms.objectsFromString('C6.*')[0].coords)
N1_C2 = C2-N1
N1_C6 = C6-N1
normal = numpy.array(crossProduct(N1_C2, N1_C6, normal=True))
base_normal = numpy.array(chain.DNARes[0].atoms[0].coords)
coord.append((base_normal + normal).tolist())
for res in chain.DNARes[1:]:
normal = numpy.array([0.,1.,0.])
if res.atoms.objectsFromString('P.*'):
P_coord = res.atoms.objectsFromString('P.*')[0].coords
coord.append(P_coord)
else: # this in case last residue does not have P
P_coord = res.atoms[0].coords
NA_type = res.type.strip()
atoms = res.atoms
if NA_type in ['A', 'G']:
listesel = ['N9.*','C8.*','C4.*']
listeCoord,missingAts = getAtsRes(atoms,listesel)
if not missingAts :
N9 = listeCoord[0]#numpy.array(atoms.objectsFromString('N9.*')[0].coords)
C8 = listeCoord[1]#numpy.array(atoms.objectsFromString('C8.*')[0].coords)
C4 = listeCoord[2]#numpy.array(atoms.objectsFromString('C4.*')[0].coords)
N9_C8 = C8-N9
N9_C4 = C4-N9
normal = numpy.array(crossProduct(N9_C8, N9_C4, normal=True))
else:
listesel = ['N1.*','C2.*','C6.*']
listeCoord = []
listeCoord,missingAts = getAtsRes(atoms,listesel)
if not missingAts :
N1 = listeCoord[0]#numpy.array(atoms.objectsFromString('N1.*')[0].coords)
C2 = listeCoord[1]#numpy.array(atoms.objectsFromString('C2.*')[0].coords)
C6 = listeCoord[2]#numpy.array(atoms.objectsFromString('C6.*')[0].coords)
N1_C2 = C2-N1
N1_C6 = C6-N1
normal = numpy.array(crossProduct(N1_C2, N1_C6, normal=True))
base_normal = numpy.array(P_coord)
coord.append((base_normal + normal).tolist())
chain.sheet2D['ssSheet2D'] = Sheet2D()
chain.sheet2D['ssSheet2D'].compute(coord, len(chain.DNARes)*(False,),
width = 2.0,off_c = 0.9,offset=0.0, nbchords=4)
chain.sheet2D['ssSheet2D'].resInSheet = chain.DNARes
| 5,343,949
|
def binary_search(sorted_list, item):
"""
Implements a Binary Search, O(log n).
If item is is list, returns amount of steps.
If item not in list, returns None.
"""
steps = 0
start = 0
end = len(sorted_list)
while start < end:
steps += 1
mid = (start + end) // 2
# print("#", mid)
if sorted_list[mid] == item:
return steps
# If the item is lesser than the list
# item == 3 and sorted_list == [1, 2, 3, 4, 5, 6, 8]
# the END of my list becomes the middle (4), excluding all items from the middle to the end
# end == 4
# next time, when mid = (start + end) // 2 executes, mid == 2
if sorted_list[mid] > item:
end = mid
# If the item is bigger than the list
# item == 8 and sorted_list == [1, 2, 3, 4, 5, 6, 8]
# the START of my list will be the middle (4) plus 1, excluding all items from the middle to the begginning
# start == 5
# next time, when mid = (start + end) // 2 executes, mid == 8
if sorted_list[mid] < item:
start = mid + 1
return None
| 5,343,950
|
def _process_split(
pipeline,
*,
filename_template: naming.ShardedFileTemplate,
out_dir: utils.ReadWritePath,
file_infos: List[naming.FilenameInfo],
):
"""Process a single split."""
beam = lazy_imports_lib.lazy_imports.apache_beam
# Use unpack syntax on set to implicitly check that all values are the same
split_name, = {f.split for f in file_infos}
# Check that all the file-info from the given split are consistent
# (no missing file)
shard_ids = sorted(f.shard_index for f in file_infos)
num_shards, = {f.num_shards for f in file_infos}
assert shard_ids == list(range(num_shards)), 'Missing shard files.'
# Check that the file extension is correct.
file_suffix, = {f.filetype_suffix for f in file_infos}
file_format = file_adapters.file_format_from_suffix(file_suffix)
adapter = file_adapters.ADAPTER_FOR_FORMAT[file_format]
data_dir = utils.as_path(filename_template.data_dir)
# Build the pipeline to process one split
return (pipeline
| beam.Create(file_infos)
| beam.Map(_process_shard, data_dir=data_dir, adapter=adapter)
# Group everything in a single elem (_ShardInfo -> List[_ShardInfo])
| _group_all() # pytype: disable=missing-parameter # pylint: disable=no-value-for-parameter
| beam.Map(_merge_shard_info, filename_template=filename_template)
| beam.Map(_split_info_to_json_str)
| beam.io.WriteToText( # pytype: disable=missing-parameter
os.fspath(out_dir / _out_filename(split_name)),
num_shards=1,
shard_name_template='',
))
| 5,343,951
|
def _scale_by(number, should_fail=False):
"""
A helper function that creates a scaling policy and scales by the given
number, if the number is not zero. Otherwise, just triggers convergence.
:param int number: The number to scale by.
:param bool should_fail: Whether or not the policy execution should fail.
:return: A function that can be passed to :func:`_oob_disable_then` as the
``then`` parameter.
"""
def _then(helper, rcs, group):
policy = ScalingPolicy(scale_by=number, scaling_group=group)
return (policy.start(rcs, helper.test_case)
.addCallback(policy.execute,
success_codes=[403] if should_fail else [202]))
return _then
| 5,343,952
|
def test_read_response_error_received(handshake):
"""
Test the response returned by the database is an error.
"""
handshake.next_state = Mock()
handshake.state = HandshakeState.INITIAL_RESPONSE
with pytest.raises(ValueError):
handshake.next_message(bytes("ERROR", "utf-8"))
assert handshake.next_state.called is False
| 5,343,953
|
def selectgender(value):
"""格式化为是/否
:param value:M/F,
:return: 男/女
"""
absent = {"M": u'男', "F": u'女'}
try:
if value:
return absent[value]
return ""
except:
traceback.print_exc()
| 5,343,954
|
def Clifford_twirl_channel_one_qubit(K, rho, sys=1, dim=[2]):
"""
Twirls the given channel with Kraus operators in K by the one-qubit
Clifford group on the given subsystem (specified by sys).
"""
n = int(np.log2(np.sum([d for d in dim])))
C1 = eye(2**n)
C2 = Rx_i(sys, np.pi, n)
C3 = Rx_i(sys, np.pi / 2.0, n)
C4 = Rx_i(sys, -np.pi / 2.0, n)
C5 = Rz_i(sys, np.pi, n)
C6 = Rx_i(sys, np.pi, n) * Rz_i(sys, np.pi, n)
C7 = Rx_i(sys, np.pi / 2.0, n) * Rz_i(sys, np.pi, n)
C6 = Rx_i(sys, np.pi, n) * Rz_i(sys, np.pi, n)
C8 = Rx_i(sys, -np.pi / 2.0, n) * Rz_i(sys, np.pi, n)
C9 = Rz_i(sys, np.pi / 2.0, n)
C10 = Ry_i(sys, np.pi, n) * Rz_i(sys, np.pi / 2.0, n)
C11 = Ry_i(sys, -np.pi / 2.0, n) * Rz_i(sys, np.pi / 2.0, n)
C12 = Ry_i(sys, np.pi / 2.0, n) * Rz_i(sys, np.pi / 2.0, n)
C13 = Rz_i(sys, -np.pi / 2.0, n)
C14 = Ry_i(sys, np.pi, n) * Rz_i(sys, -np.pi / 2.0, n)
C15 = Ry_i(sys, -np.pi / 2.0, n) * Rz_i(sys, -np.pi / 2.0, n)
C16 = Ry_i(sys, np.pi / 2.0, n) * Rz_i(sys, -np.pi / 2.0, n)
C17 = (
Rz_i(sys, -np.pi / 2.0, n)
* Rx_i(sys, np.pi / 2.0, n)
* Rz_i(sys, np.pi / 2.0, n)
)
C18 = (
Rz_i(sys, np.pi / 2.0, n)
* Rx_i(sys, np.pi / 2.0, n)
* Rz_i(sys, np.pi / 2.0, n)
)
C19 = Rz_i(sys, np.pi, n) * Rx_i(sys, np.pi / 2.0, n) * Rz_i(sys, np.pi / 2.0, n)
C20 = Rx_i(sys, np.pi / 2.0, n) * Rz_i(sys, np.pi / 2.0, n)
C21 = (
Rz_i(sys, np.pi / 2.0, n)
* Rx_i(sys, -np.pi / 2.0, n)
* Rz_i(sys, np.pi / 2.0, n)
)
C22 = (
Rz_i(sys, -np.pi / 2.0, n)
* Rx_i(sys, -np.pi / 2.0, n)
* Rz_i(sys, np.pi / 2.0, n)
)
C23 = Rx_i(sys, -np.pi / 2.0, n) * Rz_i(sys, np.pi / 2.0, n)
C24 = Rx_i(sys, np.pi, n) * Rx_i(sys, -np.pi / 2.0, n) * Rz_i(sys, np.pi / 2.0, n)
C = [
C1,
C2,
C3,
C4,
C5,
C6,
C7,
C8,
C9,
C10,
C11,
C12,
C13,
C14,
C15,
C16,
C17,
C18,
C19,
C20,
C21,
C22,
C23,
C24,
]
rho_twirl = 0
for i in range(len(C)):
rho_twirl += (
(1.0 / 24.0)
* C[i]
@ apply_channel(K, dag(C[i]) @ rho @ C[i], sys, dim)
@ dag(C[i])
)
return rho_twirl, C
| 5,343,955
|
def unpack_batch(batch, use_cuda=False):
""" Unpack a batch from the data loader. """
input_ids = batch[0]
input_mask = batch[1]
segment_ids = batch[2]
boundary_ids = batch[3]
pos_ids = batch[4]
rel_ids = batch[5]
knowledge_feature = batch[6]
bio_ids = batch[1]
# knowledge_adjoin_matrix = batch[7]
# know_segment_ids = batch[6]
# know_input_ids = batch[7]
# know_input_mask = batch[8]
# knowledge_feature = (batch[6], batch[7], batch[8])
return input_ids, input_mask, segment_ids, boundary_ids, pos_ids, rel_ids, knowledge_feature,bio_ids
| 5,343,956
|
def truncate(text: str, length: int = 255, end: str = "...") -> str:
"""Truncate text.
Parameters
---------
text : str
length : int, default 255
Max text length.
end : str, default "..."
The characters that come at the end of the text.
Returns
-------
truncated text : str
Examples
--------
.. code-block:: html
<meta property="og:title" content="^^ truncate(title, 30) ^^">"""
return f"{text[:length]}{end}"
| 5,343,957
|
def write_matchcat(cat1,cat2,outfile,rmatch,c1fluxcol,c2fluxcol):
"""
Writes an output file in the format of the file produced by catcomb.c.
*** NOTE *** The catalog matching (with the find_match function in
matchcat.py) has to have been run before running this code.
Inputs:
cat1 - first catalog used in the matching
cat2 - second catalog used in the matching (needs to be fixed for more)
outfile - output file
rmatch - match radius used for the matching - used only to put info
into the output file.
"""
""" Get info on the matched objects """
c1d = cat1.data
c1id = n.arange(1,c1d.size+1)
c1mi = cat1.indmatch.copy()
ra1 = cat1.ra
dec1 = cat1.dec[cat1.mask]
ct1 = (n.arange(1,cat1.data.size+1))[cat1.mask]
c1m = cat1.data[cat1.mask]
c2m = cat2.data[cat2.mask]
dx = cat1.matchdx[cat1.mask]
dy = cat1.matchdy[cat1.mask]
dpos = n.sqrt(dx**2 + dy**2)
""" Write match info to output file """
ofile = open(outfile,'w')
#
# Need to fix format here to match matchcat.c
#
for i in range(cat1.ra.size):
c1dat = cat1.data[i]
c1flux = c1dat['f%d'% c1fluxcol]
if c1mi[i]>-1:
c2dat = cat2.data[c1mi[i]]
ind2 = c2dat['f0']
flux2 = c2dat['f%d' % c2fluxcol]
dpos = n.sqrt(cat1.matchdx[i]**2 + cat1.matchdy[i]**2)
else:
ind2 = 0
flux2 = 0.
dpos = 0.
ofile.write('%05d %11.7f %+11.7f 0.00 0.00 %5d 0.00 %7.4f ' % \
(c1id[i],cat1.ra[i],cat1.dec[i],c1id[i],c1flux))
ofile.write('%5d %8.2f %6.2f' % (ind2,dpos,flux2))
ofile.write('\n')
ofile.close()
| 5,343,958
|
def create_logger(log_dir, log_file, level="info"):
"""
Function used to create logger object based on log directory
and log file name
"""
handler = RotatingFileHandler(filename=path.join(log_dir, log_file),
mode='a', maxBytes=5000000, backupCount=10)
formatter = logging.Formatter('%(asctime)s [%(levelname)s] %(filename)s, %(lineno)d [%(name)s]: %(message)s', '%d-%b-%y %H:%M:%S')
handler.setFormatter(formatter)
logger = logging.getLogger(__name__)
log_level = _get_log_level(level)
logger.setLevel(log_level)
logger.addHandler(handler)
return logger
| 5,343,959
|
def test_github_recon_return_none_with_none_input():
"""Test recon function return None with None input."""
assert github_recon(None) is None
| 5,343,960
|
def gaussian_slice(x, sigma, mu):
"""
return a slice of x in which the gaussian is significant
exp(-0.5 * ((x - mu) / sigma) ** 2) < given_threshold
"""
r = sigma * sp.sqrt(-2.0 * sp.log(small_thr))
x_lo = bisect_left(x, mu - r)
x_hi = bisect_right(x, mu + r)
return slice(x_lo, x_hi)
| 5,343,961
|
def preview(delivery_id):
"""
打印预览
:param delivery_id:
:return:
"""
delivery_info = get_delivery_row_by_id(delivery_id)
# 检查资源是否存在
if not delivery_info:
abort(404)
# 检查资源是否删除
if delivery_info.status_delete == STATUS_DEL_OK:
abort(410)
delivery_print_date = time_utc_to_local(delivery_info.update_time).strftime('%Y-%m-%d')
delivery_code = '%s%s' % (g.ENQUIRIES_PREFIX, time_utc_to_local(delivery_info.create_time).strftime('%y%m%d%H%M%S'))
# 获取客户公司信息
customer_info = get_customer_row_by_id(delivery_info.customer_cid)
# 获取客户联系方式
customer_contact_info = get_customer_contact_row_by_id(delivery_info.customer_contact_id)
# 获取出货人员信息
user_info = get_user_row_by_id(delivery_info.uid)
delivery_items = get_delivery_items_rows(delivery_id=delivery_id)
# 文档信息
document_info = DOCUMENT_INFO.copy()
document_info['TITLE'] = _('delivery preview')
template_name = 'delivery/preview.html'
return render_template(
template_name,
delivery_id=delivery_id,
delivery_info=delivery_info,
customer_info=customer_info,
customer_contact_info=customer_contact_info,
user_info=user_info,
delivery_items=delivery_items,
delivery_print_date=delivery_print_date,
delivery_code=delivery_code,
**document_info
)
| 5,343,962
|
def request_with_json(json_payload):
"""
Load interpolations from the interp service into the DB
"""
test_response = requests.post(INTERP_URL, json=json_payload)
test_response_json = test_response.json()
return test_response_json
| 5,343,963
|
def correlation(df, rowvar=False):
"""
Calculate column-wise Pearson correlations using ``numpy.ma.corrcoef``
Input data is masked to ignore NaNs when calculating correlations. Data is returned as
a Pandas ``DataFrame`` of column_n x column_n dimensions, with column index copied to
both axes.
:param df: Pandas DataFrame
:return: Pandas DataFrame (n_columns x n_columns) of column-wise correlations
"""
# Create a correlation matrix for all correlations
# of the columns (filled with na for all values)
df = df.copy()
maskv = np.ma.masked_where(np.isnan(df.values), df.values)
cdf = np.ma.corrcoef(maskv, rowvar=False)
cdf = pd.DataFrame(np.array(cdf))
cdf.columns = df.columns
cdf.index = df.columns
cdf = cdf.sort_index(level=0, axis=1)
cdf = cdf.sort_index(level=0)
return cdf
| 5,343,964
|
def cost_zpk_fit(zpk_args, f, x,
error_func=kontrol.core.math.log_mse,
error_func_kwargs={}):
"""The cost function for fitting a frequency series with zero-pole-gain.
Parameters
----------
zpk_args: array
A 1-D list of zeros, poles, and gain.
Zeros and poles are in unit of Hz.
f: array
The frequency axis.
x: array
The frequecy series data.
error_func: func(x1: array, x2: array) -> float
The function that evaluate the error between arrays x1 and x2.
Defaults to kontrol.core.math.log_mse, which evaluates the
logarithmic mean square error.
error_func_kwargs: dict, optional
Keyword arguments passed to the error function.
Defaults {}.
Returns
-------
cost: float
The cost.
"""
x_zpk = abs(
kontrol.frequency_series.conversion.args2zpk(f=f, zpk_args=zpk_args))
cost = error_func(x, x_zpk, **error_func_kwargs)
return cost
| 5,343,965
|
def getTrainPredictions(img,subImgSize,model):
"""Makes a prediction for an image.
Takes an input of any size, crops it to specified size, makes
predictions for each cropped window, and stitches output together.
Parameters
----------
img : np.array (n x m x 3)
Image to be transformed
subImgSize : np.array (a x b)
Input size for model
model: keras.model
Keras model used to make predictions
Returns
-------
pred: np.array (n x m)
Prediction from image
"""
# get the size of the input image
l,w,_ = np.shape(img)
# init array for new image
pred = np.zeros(shape = (l,w))
r = l//subImgSize[0]
c = w//subImgSize[1]
roffset = 0
coffset = 0
if l%subImgSize[0] != 0:
roffset = 1
if w%subImgSize[1] != 0:
coffset = 1
x1 = 0
predX1 = 0
# Crop the image
for j in range(r + roffset):
y1 = 0
predY1 = 0
x2 = (j+1)*subImgSize[0]
if x2 > l:
x2 = l
x1 = l - subImgSize[0]
for k in range(c + coffset):
# find upper bounds of window
y2 = (k+1)*subImgSize[1]
# if outer dimension is larger than image size, adjust
if y2 > w:
y2 = w
y1 = w - subImgSize[1]
# crop area of picture
croppedArea = img[x1:x2,y1:y2,:]
# make prediction using model
modelPrediction = model.predict(np.expand_dims(croppedArea,axis = 0))
# update prediction image
pred[predX1:x2,predY1:y2] = modelPrediction[0,(predX1-x1):,(predY1-y1):,0]
# update the bounds
y1 = y2
predY1 = y1
# update the lower x bound
x1 = x2
predX1 = x1
return pred
| 5,343,966
|
def use_bcbio_variation_recall(algs):
"""Processing uses bcbio-variation-recall. Avoids core requirement if not used.
"""
for alg in algs:
jointcaller = alg.get("jointcaller", [])
if not isinstance(jointcaller, (tuple, list)):
jointcaller = [jointcaller]
for caller in jointcaller:
if caller not in set(["gatk-haplotype-joint", None, False]):
return True
return False
| 5,343,967
|
def _sa_model_info(Model: type, types: AttributeType) -> Mapping[str, AttributeInfo]:
""" Get the full information about the model
This function gets a full, cachable, information about the model's `types` attributes, once.
sa_model_info() can then filter it the way it likes, without polluting the cache.
"""
# Get a list of all available InfoClasses
info_classes = [
InfoClass
for InfoClass in AttributeInfo.all_implementations()
if InfoClass.extracts() & types # only enabled types
]
# Apply InfoClasses' extraction to every attribute
# If there is any weird attribute that is not supported, it is silently ignored.
return {
name: InfoClass.extract(attribute)
for name, attribute in all_sqlalchemy_model_attributes(Model).items()
for InfoClass in info_classes
if InfoClass.matches(attribute, types)
}
| 5,343,968
|
def bll6_models(estimators, cv_search={}, transform_search={}):
"""
Provides good defaults for transform_search to models()
Args:
estimators: list of estimators as accepted by models()
transform_search: optional LeadTransform arguments to override the defaults
"""
cvd = dict(
year=range(2011, 2014+1),
month=1,
day=1,
train_years=[6],
train_query=[None],
)
cvd.update(cv_search)
transformd = dict(
wic_sample_weight=[0],
aggregations=aggregations.args,
outcome_expr=['max_bll0 >= 6']
)
transformd.update(transform_search)
return models(estimators, cvd, transformd)
| 5,343,969
|
def bgColor(col):
""" Return a background color for a given column title """
# Auto-generated columns
if col in ColumnList._COLUMNS_GEN:
return BG_GEN
# KiCad protected columns
elif col in ColumnList._COLUMNS_PROTECTED:
return BG_KICAD
# Additional user columns
else:
return BG_USER
| 5,343,970
|
def _check_conv_shapes(name, lhs_shape, rhs_shape, window_strides):
"""Check that conv shapes are valid and are consistent with window_strides."""
if len(lhs_shape) != len(rhs_shape):
msg = "Arguments to {} must have same rank, got {} and {}."
raise TypeError(msg.format(name, len(lhs_shape), len(rhs_shape)))
if len(lhs_shape) < 2:
msg = "Arguments to {} must have rank at least 2, got {} and {}."
raise TypeError(msg.format(name, len(lhs_shape), len(rhs_shape)))
if lhs_shape[1] != rhs_shape[1]:
msg = "Arguments to {} must agree on input feature size, got {} and {}."
raise TypeError(msg.format(name, lhs_shape[1], rhs_shape[1]))
lax._check_shapelike(name, "window_strides", window_strides)
if not np.all(np.greater(window_strides, 0)):
msg = "All elements of window_strides must be positive, got {}."
raise TypeError(msg.format(window_strides))
if len(window_strides) != len(lhs_shape) - 2:
msg = "{} window_strides has wrong length: expected {}, got {}."
expected_length = len(lhs_shape) - 2
raise TypeError(msg.format(name, expected_length, len(window_strides)))
| 5,343,971
|
def merge_hedge_positions(df, hedge):
"""
将一个表中的多条记录进行合并,然后对冲
:param self:
:param df:
:return:
"""
# 临时使用,主要是因为i1709.与i1709一类在分组时会出问题,i1709.是由api中查询得到
if df.empty:
return df
df['Symbol'] = df['InstrumentID']
# 合并
df = df.groupby(by=['Symbol', 'InstrumentID', 'HedgeFlag', 'Side'])[
'Position'].sum().to_frame().reset_index()
# print(df)
# 对冲
if hedge:
df['Net'] = df['Side'] * df['Position']
df = df.groupby(by=['Symbol', 'InstrumentID', 'HedgeFlag'])['Net'].sum().to_frame().reset_index()
df['Position'] = abs(df['Net'])
df['Side'] = df['Net'] / df['Position']
df = df[df['Position'] != 0]
df = df[['Symbol', 'InstrumentID', 'HedgeFlag', 'Side', 'Position']]
# print(df)
return df
| 5,343,972
|
def BackwardSubTri(U,y):
"""
usage: x = BackwardSubTri(U,y)
Row-oriented backward substitution to solve the upper-triangular, 'tridiagonal'
linear system
U x = y
This function does not ensure that U has the correct nonzero structure. It does,
however, attempt to catch the case where U is singular.
Inputs:
U - square n-by-n matrix (assumed upper triangular and 'tridiagonal')
y - right-hand side vector (n-by-1)
Outputs:
x - solution vector (n-by-1)
"""
# check inputs
m, n = numpy.shape(U)
if (m != n):
raise ValueError("BackwardSubTri error: matrix must be square")
p = numpy.size(y)
if (p != n):
raise ValueError("BackwardSubTri error: right-hand side vector has incorrect dimensions")
if (numpy.min(numpy.abs(numpy.diag(U))) < 100*numpy.finfo(float).eps):
raise ValueError("BackwardSubTri error: matrix is [close to] singular")
# create output vector
x = y.copy()
# perform forward-subsitution algorithm
for i in range(n-1,-1,-1):
if (i<n-1):
x[i] -= U[i,i+1]*x[i+1]
x[i] /= U[i,i]
return x
| 5,343,973
|
def discrete_model(parents, lookup_table):
"""
Create CausalAssignmentModel based on a lookup table.
Lookup_table maps inputs values to weigths of the output values
The actual output values are sampled from a discrete distribution
of integers with probability proportional to the weights.
Lookup_table for the form:
Dict[Tuple(input_vales): (output_weights)]
Arguments
---------
parents: list
variable names of parents
lookup_table: dict
lookup table
Returns
-------
model: CausalAssignmentModel
"""
assert len(parents) > 0
# create input/output mapping
inputs, weights = zip(*lookup_table.items())
output_length = len(weights[0])
assert all(len(w) == output_length for w in weights)
outputs = np.arange(output_length)
ps = [np.array(w) / sum(w) for w in weights]
def model(**kwargs):
n_samples = kwargs["n_samples"]
a = np.vstack([kwargs[p] for p in parents]).T
b = np.zeros(n_samples) * np.nan
for m, p in zip(inputs, ps):
b = np.where(
(a == m).all(axis=1),
np.random.choice(outputs, size=n_samples, p=p), b)
if np.isnan(b).any():
raise ValueError("It looks like an input was provided which doesn't have a lookup.")
return b
return CausalAssignmentModel(model, parents)
| 5,343,974
|
def retrieve(datafile, provider):
"""
Retrieve a file from the remote provider
:param datafile:
:param provider:
:return: the path to a temporary file containing the data, or None
"""
r = _connect(provider)
try:
data = base64.b64decode(json.loads(r.get(datafile.storage_key))['data'])
with tempfile.NamedTemporaryFile(delete=False) as tmpfile:
tmpfile.write(data)
tmpfilename = tmpfile.name
return tmpfilename
except:
print('Download failed: %s' % traceback.format_exc())
return None
| 5,343,975
|
def _insert_default_stacors(session,network_code,station_code):
"""
inserts 0. (ml) and 1. (me) corrections for horizontal channels
"""
statement = text("select net, sta, seedchan, location, min(ondate), max(offdate) "
"from channel_data where seedchan in ('SNN', 'SNE', 'BNN', 'BNE', "
"'ENN', 'ENE', 'HNN', 'HNE', 'EHN', 'EHE', 'BHN', 'BHE', 'HHN', "
"'HHE', 'EH1','EH2','BH1','BH2','HH1','HH2') and "
"samprate in (20, 40, 50, 80, 100, 200) and net=:net and sta=:sta "
"group by net, sta, seedchan,location")
statement = statement.columns(Channel.net,Channel.sta,Channel.seedchan,Channel.location,Channel.ondate,Channel.offdate)
logging.debug(statement)
try:
horizontals = session.query(Channel).from_statement(statement).params(net=network_code,sta=station_code).all()
for chan in horizontals:
for corr_type in ['ml', 'me']:
scor = StaCorrection()
scor.net = chan.net
scor.sta = chan.sta
scor.seedchan = chan.seedchan
scor.location = chan.location
scor.ondate = chan.ondate
scor.offdate = chan.offdate
scor.auth="UW"
scor.corr_flag="C"
if corr_type == "ml":
scor.corr = 0.
else:
scor.corr = 1.
scor.corr_type=corr_type
session.add(scor)
except Exception as e:
logging.error("Unable to create default station correction for {}.{}: {}".format(network_code, station_code,e))
try:
session.commit()
except Exception as e:
logging.error("Unable to commit station correction: {}".format(e))
return
| 5,343,976
|
def edf_parse_message(EDFFILE):
"""Return message info."""
message = edf_get_event_data(EDFFILE).contents
time = message.sttime
message = string_at(byref(message.message[0]), message.message.contents.len + 1)[2:]
message = message.decode('UTF-8')
return (time, message)
| 5,343,977
|
def test_items_is_none_or_empty():
"""
:return:
"""
with pytest.raises(ValueError) as _:
Keyboa(items=list())
with pytest.raises(ValueError) as _:
Keyboa(items=None)
| 5,343,978
|
def read_abbrevs_and_add_to_db(abbrevs_path: str,
db: Connection) -> Dict[str, int]:
"""Add abbreviations from `abbrevs_path` to `idx` and `defns`."""
with open(abbrevs_path, 'rt') as ab:
abbrevs = read_abbrevs(ab)
abbrev_nid = add_abbrevs_to_db(abbrevs, db)
logging.info('Added %d abbreviations.', len(abbrevs))
return abbrev_nid
| 5,343,979
|
def is_encrypted(input_file: str) -> bool:
"""Checks if the inputted file is encrypted using PyPDF4 library"""
with open(input_file, 'rb') as pdf_file:
pdf_reader = PdfFileReader(pdf_file, strict=False)
return pdf_reader.isEncrypted
| 5,343,980
|
def get_parameter_by_name(device, name):
""" Find the given device's parameter that belongs to the given name """
for i in device.parameters:
if i.original_name == name:
return i
return
| 5,343,981
|
def LogPrint(email: str, fileName: str, materialType: str, printWeight: float, printPurpose: str, msdNumber: Optional[str], paymentOwed: bool) -> bool:
"""Logs a print. Returns if the task was successful.
:param email: Email of the user exporting the print.
:param fileName: Name of the file that was exported.
:param materialType: Type of the material being used.
:param printWeight: Weight of the print being exported.
:param printPurpose: Purpose of the print being exported.
:param msdNumber: MSD Number of the print being exported.
:param paymentOwed: Whether the payment is owed or not.
"""
# Get the hashed id and return if there is none.
hashedId = getUniversityIdHash(email)
if hashedId is None:
return False
# Check if this is a Senior Design print.
msd = printPurpose == "Senior Design Project (Reimbursed)"
# Create the payload.
arguments = {
"hashedId": hashedId,
"fileName": fileName,
"material": materialType,
"weight": printWeight,
"purpose": printPurpose,
"billTo": msdNumber if msd else None,
"owed": paymentOwed,
}
# Send the request and return the result.
printResult = requests.post(getHost() + "/print/add", json=arguments).json()
return "status" in printResult.keys() and printResult["status"] == "success"
| 5,343,982
|
def lun_ops() -> None:
"""Interface Operations"""
print()
print("THE FOLLOWING SCRIPT SHOWS LUN OPERATIONS USING REST API PYTHON CLIENT LIBRARY:- ")
print("=================================================================================")
print()
lunbool = input(
"Choose the LUN Operation would you like to do? [list/create/update/delete] ")
if lunbool == 'list':
list_lun()
if lunbool == 'create':
create_lun()
if lunbool == 'update':
patch_lun()
if lunbool == 'delete':
delete_lun()
| 5,343,983
|
def is_requirement(line):
"""
Return True if the requirement line is a package requirement;
that is, it is not blank, a comment, or editable.
"""
# Remove whitespace at the start/end of the line
line = line.strip()
# Skip blank lines, comments, and editable installs
return not (
line == '' or
line.startswith('-r') or
line.startswith('#') or
line.startswith('-e') or
line.startswith('git+')
)
| 5,343,984
|
def print_trajectory_results(alg, results, s, v, out):
"""Prints a formatted version of one repetition of trajectory reconstruction experiments"""
out(f"Algorithm: {alg}")
if alg in ['CRiSP', 'OC_SVM']:
out(f"s: {s}\t v: {v}")
if results['rmse_orientation']:
out(f"\t\tRMSE ori: {results['rmse_orientation']:7.6f} ± {results['var_orientation']:7.6f}")
out(f"\t\tRMSE pos: {results['rmse_position']:7.6f} ± {results['var_position']:7.6f}\n"
f"\t\tRMSE: {results['rmse']:7.6f} ± {results['var']:7.6f}")
if 'bias_error' in results:
out("#Biased model results#")
if results['bias_error']['rmse_orientation']:
out(f"\t\tRMSE ori: {results['bias_error']['rmse_orientation']:7.6f} ± {results['bias_error']['var_orientation']:7.6f}")
out(f"\t\tRMSE pos: {results['bias_error']['rmse_position']:7.6f} ± {results['bias_error']['var_position']:7.6f}\n"
f"\t\tRMSE: {results['bias_error']['rmse']:7.6f} ± {results['bias_error']['var']:7.6f}\n")
print("")
| 5,343,985
|
def read_pose_txt(pose_txt):
"""
Read the pose txt file and return a 4x4 rigid transformation.
"""
with open(pose_txt, "r") as f:
lines = f.readlines()
pose = np.zeros((4, 4))
for line_idx, line in enumerate(lines):
items = line.split(" ")
for i in range(4):
pose[line_idx, i] = float(items[i])
return pose
| 5,343,986
|
def gen_new_random_graph(nodecount: int, prob: float) -> None:
"""
Generate a new random graph using binomial generation.
Will save new network to file.
"""
newgraph = nx.binomial_graph(nodecount, prob)
np.save('./test/testnetwork.npy', nx.adjacency_matrix(newgraph).todense())
| 5,343,987
|
def get_futures(race_ids=list(range(1, 13000))):
"""Get Futures for all BikeReg race pages with given race_ids."""
session = FuturesSession(max_workers=8)
return [session.get(f'https://results.bikereg.com/race/{race_id}')
for race_id in race_ids if race_id not in BAD_IDS]
| 5,343,988
|
def sum_of_proper_divisors(number: int):
"""
Let d(n) be defined as the sum of proper divisors of n
(numbers less than n which divide evenly into n).
:param number:
:return:
"""
divisors = []
for n in range(1, number):
if number % n == 0:
divisors.append(n)
return sum(divisors)
| 5,343,989
|
def coddington_meridional(p, q, theta):
""" return radius of curvature """
f = p * q / (p + q)
R = 2 * f / np.sin(theta)
return R
| 5,343,990
|
def read_analysis_file(timestamp=None, filepath=None, data_dict=None,
file_id=None, ana_file=None, close_file=True, mode='r'):
"""
Creates a data_dict from an AnalysisResults file as generated by analysis_v3
:param timestamp: str with a measurement timestamp
:param filepath: (str) path to file
:param data_dict: dict where to store the file entries
:param file_id: suffix to the usual HDF measurement file found from giving
a measurement timestamp. Defaults to '_AnalysisResults,' the standard
suffix created by analysis_v3
:param ana_file: HDF file instance
:param close_file: whether to close the HDF file at the end
:param mode: str specifying the HDF read mode (if ana_file is None)
:return: the data dictionary
"""
if data_dict is None:
data_dict = {}
try:
if ana_file is None:
if filepath is None:
if file_id is None:
file_id = '_AnalysisResults'
folder = a_tools.get_folder(timestamp)
filepath = a_tools.measurement_filename(folder, file_id=file_id)
ana_file = h5py.File(filepath, mode)
read_from_hdf(data_dict, ana_file)
if close_file:
ana_file.close()
except Exception as e:
if close_file:
ana_file.close()
raise e
return data_dict
| 5,343,991
|
def get_player_macro_econ_df(rpl: sc2reader.resources.Replay,
pid: int) -> pd.DataFrame:
"""This function organises the records of a player's major
macroeconomic performance indicators.
The function uses a player's PlayerStatsEvents contained in a Replay
object to compose a DataFrame. In the DataFrame, each column points to
a particular indicator. Each row points to the records of all
indicators at a specific moment during the game.
*Arguments*
- rpl (sc2reader.resources.Replay)
Replay object generated with sc2reader containing a match's
data.
- pid (int)
A player's id number distinguishes them from the other
players in a match. It can be extracted from a Participant
object through the pid attribute.
*Returns*
- pd.DataFrame
This DataFrame contains all the time series that illustrate the
changes of each attribute during a match. Each column alludes
to an attribute, each row to a moment during the match.
"""
columns_names =[
'second',
'minerals_current',
'vespene_current',
'minerals_used_active_forces',
'vespene_used_active_forces',
'minerals_collection_rate',
'vespene_collection_rate',
'workers_active_count',
'minerals_used_in_progress',
'vespene_used_in_progress',
'resources_used_in_progress',
'minerals_used_current',
'vespene_used_current',
'resources_used_current',
'minerals_lost',
'vespene_lost',
'resources_lost',
'minerals_killed',
'vespene_killed',
'resources_killed',
'food_used',
'food_made'
]
# Generate a DataFrame with the columns listed above
pstatse_list = get_pstatse(rpl, pid)
pstatse_dicts_list = [event.__dict__ for event in pstatse_list]
pstatse_df = pd.DataFrame(pstatse_dicts_list, columns= columns_names)
# Complete the DataFrame with the real_time, unspent_rsrc columns and
# army_value.
# Also, eliminate possible duplicate last record.
return complete_pstatse_df(rpl, pstatse_df)
| 5,343,992
|
def test_collect_ref_info_return_list():
""" test fields in a reference file """
#GIVEN the variant info fields in input file
valid_variant = "0.00119999998;SNP;p.Glu17Lys"
allele_freq, variant_type, aa_hgvs = valid_variant.split(';')
valid_info_variant = "VARIANT_TYPE=SNP;AA_HGVS=p.Glu17Lys;AF=1e-05"
#WHEN calling the reference file
info = collect_ref_info(valid_variant)
#THEN it should return a list of info fields
assert valid_info_variant in info
| 5,343,993
|
def get_seconds_from_duration(time_str: str) -> int:
"""
This function will convert the TM1 time to seconds
:param time_str: P0DT00H01M43S
:return: int
"""
import re
pattern = re.compile('\w(\d+)\w\w(\d+)\w(\d+)\w(\d+)\w')
matches = pattern.search(time_str)
d, h, m, s = matches.groups()
seconds = (int(d) * 86400) + (int(h) * 3600) + (int(m) * 60) + int(s)
return seconds
| 5,343,994
|
def get_protecteds(object: Object) -> Dictionary:
"""Gets the protected namespaces of an object."""
return object.__protecteds__
| 5,343,995
|
def pah2area(_position, angle, height, shape):
"""Calculates area from position, angle, height depending on shape."""
if shape == "PseudoVoigt":
fwhm = np.tan(angle) * height
area = (height * (fwhm * np.sqrt(np.pi / ln2))
/ (1 + np.sqrt(1 / (np.pi * ln2))))
return area
elif shape == "DoniachSunjic":
fwhm = np.tan(angle) * height
area = height / pure_ds(0, amplitude=1, center=0, fwhm=fwhm, asym=0.5)
return area
elif shape == "Voigt":
fwhm = np.tan(angle) * height
area = height / voigt(0, amplitude=1, center=0, fwhm=fwhm, fwhm_l=0.5)
return area
raise NotImplementedError
| 5,343,996
|
def test_equals_identical():
"""Test comparing an conflict with it self."""
conflict = make_conflict()
assert conflict == conflict
| 5,343,997
|
def caching_query_s3(
s3_url: str,
query_fun: Callable,
force_query=False,
df_save_fun: Callable = lambda df, loc: df.to_pickle(loc, compression="gzip"),
df_load_fun: Callable = lambda loc: pd.read_pickle(loc, compression="gzip"),
):
"""
Retrieve cached data if available, query and cache otherwise.
Args:
s3_url (str): Location of the saved data.
query_fun (Callable): Function to query the data
force_query (Optional[bool]): Whether to force a query. Defaults to False.
df_save_fun (Optional[Callable]): Function to save the data. Defaults to pickling to a gzip file.
df_load_fun (Optional[Callable]): Function to load the data. Defaults to unpickling from a gzip file.
"""
# generate a unique one to avoid collisions when running in parallel
tmp_file_name = str(uuid4())
if file_exists_in_s3(s3_url) and not force_query:
get_file_from_s3(s3_url, tmp_file_name)
df = df_load_fun(tmp_file_name)
os.remove(tmp_file_name)
logging.info(f"File {s3_url} exists, loaded it")
else:
print(f"Didn't find {s3_url} so calling query_fun")
df = query_fun()
df_save_fun(df, tmp_file_name)
save_file_to_s3(tmp_file_name, s3_url)
os.remove(tmp_file_name)
logging.info(f"File {s3_url} didn't exist, created it")
return df
| 5,343,998
|
def config(key, values, axis=None):
"""Class decorator to parameterize the Chainer configuration.
This is a specialized form of `parameterize` decorator to parameterize
the Chainer configuration. For all `time_*` functions and `setup` function
in the class, this decorator wraps the function to be called inside the
context where specified Chainer configuration set.
This decorator adds parameter axis with the name of the configuration
by default. You can change the axis name by passing axis parameter.
You cannot apply `parameterize` decorator to the class already decorated
by this decorator. If you want to use `parameterize` along with this
decorator, make `parameterize` the most inner (i.e., the closest to the
class declaration) decorator.
Example of usage is as follows:
>>> @config('autotune', [True, False])
... class ConvolutionBenchmark(object):
... def time_benchmark(self):
... ...
"""
axis = key if axis is None else axis
def _wrap_class(klass):
assert isinstance(klass, type)
if not hasattr(chainer.config, key):
print(
'''Notice: Configuration '{}' unknown to this version of '''
'''Chainer'''.format(key))
return _inject_config(klass, axis, key, values)
return _wrap_class
| 5,343,999
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.