content
stringlengths 22
815k
| id
int64 0
4.91M
|
|---|---|
def get_distributed_mean(value: Union[float, torch.Tensor]):
"""Computes distributed mean among all nodes."""
if check_torch_distributed_initialized():
# Fix for runtime warning:
# To copy construct from a tensor, it is recommended to use
# sourceTensor.clone().detach() or
# sourceTensor.clone().detach().requires_grad_(True),
# rather than torch.tensor(sourceTensor).
if torch.is_tensor(value):
value = (
value.clone()
.detach()
.to(device=f"cuda:{torch.cuda.current_device()}")
)
else:
value = torch.tensor(
value,
dtype=torch.float,
device=f"cuda:{torch.cuda.current_device()}",
requires_grad=False,
)
torch.distributed.all_reduce(value)
value = float(value.item() / torch.distributed.get_world_size())
return value
| 5,335,900
|
def js_div(A, B):
""" Jensen-Shannon divergence between two discrete probability
distributions, represented as numpy vectors """
norm_A = A / A.sum()
norm_B = B / B.sum()
M = (norm_A+norm_B)/2
return 0.5 * (kl_div(norm_A,M)+kl_div(norm_B,M))
| 5,335,901
|
def test_outputs():
"""
testing correct functionality and outputs of function
"""
df = pd.DataFrame({'x': [1, 2, 11, 22, 7], 'y': [100, 110, 120, 130, 140]})
out1 = pythcat.suscat(df, [0, 1], n=80)
out2 = pythcat.suscat(df, [0], num='number', n=2)
assert isinstance(out1, dict)
assert len(out1.keys()) == 2 and len(out1[1]) == 4
assert max(out1[0]) <= len(df.loc[:, 'x']) and min(out1[0]) >= 0
assert max(out1[1]) <= len(df.loc[:, 'y']) and min(out1[1]) >= 0
assert len(out2.keys()) == 1 and len(out2[0]) == 2
| 5,335,902
|
def _fixTimeStrings(scModel, gopLoader):
"""
USed to correct the use of the third ':' which indicated frames since time.
This caused confusion. Users often used the the third ':' for milliseconds.
The journals are of course incorrect. Cannot fix that here.
:param scModel:
:param gopLoader:
:return:
"""
from tool_set import getMilliSecondsAndFrameCount,getDurationStringFromMilliseconds
extractor = MetaDataExtractor(scModel.getGraph())
for frm, to in scModel.G.get_edges():
edge = scModel.G.get_edge(frm, to)
args = getValue(edge,'arguments',{})
try:
for k,v in args.iteritems():
if 'Time' in k and v.count(':') == 3:
m,f = getMilliSecondsAndFrameCount(v)
rate = extractor.getMetaDataLocator(frm).get_frame_rate()
if rate is not None:
m += int(f*1000.0/rate)
v = getDurationStringFromMilliseconds(m)
setPathValue(edge,'arguments.{}'.format(k),v)
except:
pass
| 5,335,903
|
def test_getextensible():
"""py.test for getextensible"""
data = (
([{u'format': [u'singleLine'],
u'group': u'Simulation Parameters',
u'idfobj': u'Version',
u'memo': [u'Specifies the EnergyPlus version of the IDF file.'],
u'unique-object': [u'']}, {}, {}, {}
],
None), # objidd, expected
([{u'extensible:2': [u'- repeat last two fields, remembering to remove ; from "inner" fields.'],
u'group': u'Schedules',
u'idfobj': u'Schedule:Day:Interval',
u'memo': [u'A Schedule:Day:Interval contains a full day of values with specified end times for each value',
u'Currently, is set up to allow for 10 minute intervals for an entire day.'],
u'min-fields': [u'5']}, {}, {}, {}
],
2), # objidd, expected
)
for objidd, expected in data:
result = idfreader.getextensible(objidd)
assert result == expected
| 5,335,904
|
def run_collector(db_host, db_port, db_name,
watcher_cls, watcher_kwargs, updater_cls, updater_kwargs,
num_updaters=1):
""" Run up instances of the watcher and updaters with the configured watcher and updaters.
"""
num_updaters = int(num_updaters)
log.info("Starting {} watcher and {} {} updaters".format(watcher_cls, num_updaters, updater_cls))
pid = os.fork()
if pid == 0:
# We're now the watcher
log.info("Starting watcher PID {}".format(os.getpid()))
conn = db.connect(db_host, db_port, db_name)
watcher = watcher_cls(conn, **watcher_kwargs)
watcher.run_forever(watcher.watch)
log.info("Watcher shutting down PID {}".format(os.getpid()))
sys.exit(0)
for i in xrange(num_updaters):
pid = os.fork()
if pid == 0:
# We're now the updater
log.info("Starting updater {} of {} PID: {}".format(i + 1, num_updaters, os.getpid()))
conn = db.connect(db_host, db_port, db_name)
updater = updater_cls(conn, **updater_kwargs)
updater.run_forever(updater.do_updates)
log.info("Updater shutting down PID {}".format(os.getpid()))
sys.exit(0)
| 5,335,905
|
def _build_request_url(
base: str,
params_dict: Dict[str, str]) -> str:
"""Returns an URL combined from base and parameters
:param base: base url
:type base: str
:param params_dict: dictionary of parameter names and values
:type params_dict: Dict[str, str]
:return: a complete url
:rtype: str
"""
parameters = "&".join([f"{k}={v}" for k, v in params_dict.items()])
url = base + "?" + parameters
return url
| 5,335,906
|
def test_is_ten_prime():
"""Is five successfully determined to be prime?"""
assert not is_prime(10)
| 5,335,907
|
def add_DX(self, timeperiod=14, type="line", color="secondary", **kwargs):
"""Directional Movement Index."""
if not (self.has_high and self.has_low and self.has_close):
raise Exception()
utils.kwargs_check(kwargs, VALID_TA_KWARGS)
if "kind" in kwargs:
type = kwargs["kind"]
name = "DX({})".format(str(timeperiod))
self.sec[name] = dict(type=type, color=color)
self.ind[name] = talib.DX(
self.df[self.hi].values,
self.df[self.lo].values,
self.df[self.cl].values,
timeperiod,
)
| 5,335,908
|
def aesDecrypt(key, data):
"""AES decryption fucnction
Args:
key (str): packed 128 bit key
data (str): packed encrypted data
Returns:
Packed decrypted data string
"""
cipher = python_AES.new(key)
return cipher.decrypt(data)
| 5,335,909
|
def store_preparation_emails(recipients, daylist, jokes):
"""
Takes a message, a list of recipients and a list of datetimes to store
emails including a timestamp as basis of a scheduled mail sender.
If number of days is greater than number of jokes, I don't want to spam
my collegues with duplicate jokes :)
"""
number_of_jokes = len(jokes)
if number_of_jokes < len(daylist):
print('Info: Not enough jokes for so many days. reducing daycount to ' +
str(number_of_jokes))
joke_schedule = zip(daylist,jokes)
for sending_datetime, joke in joke_schedule:
for recipient in recipients:
msg = generate_email(recipient, sending_datetime, joke)
store(msg, sending_datetime)
| 5,335,910
|
def __format_number_input(number_input, language):
"""Formats the specified number input.
Args:
number_input (dict): A number input configuration to format.
language (dict): A language configuration used to help format the input configuration.
Returns:
dict: A formatted number input configuration.
"""
placeholder = number_input.get("placeholder")
if placeholder is not None:
number_input["placeholder"] = normalize_configuration_string(placeholder, language["default"])
return number_input
| 5,335,911
|
def get_players(picks):
"""Return the list of players in the team
"""
players = []
for rd in picks:
play = list(rd.keys())
players = players+play
players = list(set(players))
return players
| 5,335,912
|
def verify_file_details_exists(device,
root_path,
file,
max_time=30,
check_interval=10):
""" Verify file details exists
Args:
device ('obj'): Device object
root_path ('str'): Root path for command
file ('str'): File name
max_time (`int`): Max time, default: 30
check_interval (`int`): Check interval, default: 10
Returns:
Boolean
Raises:
None
"""
timeout = Timeout(max_time, check_interval)
while timeout.iterate():
out = None
try:
out = device.parse(
'file list {root_path} detail'.format(root_path=root_path))
except SchemaEmptyParserError as e:
timeout.sleep()
continue
file_found = Dq(out).contains_key_value('file-name',
file,
value_regex=True)
if file_found:
return True
timeout.sleep()
return False
| 5,335,913
|
def generateVoID(g, dataset=None, res=None, distinctForPartitions=True):
"""
Returns a new graph with a VoID description of the passed dataset
For more info on Vocabulary of Interlinked Datasets (VoID), see:
http://vocab.deri.ie/void
This only makes two passes through the triples (once to detect the types
of things)
The tradeoff is that lots of temporary structures are built up in memory
meaning lots of memory may be consumed :)
I imagine at least a few copies of your original graph.
the distinctForPartitions parameter controls whether
distinctSubjects/objects are tracked for each class/propertyPartition
this requires more memory again
"""
typeMap = collections.defaultdict(set)
classes = collections.defaultdict(set)
for e, c in g.subject_objects(RDF.type):
classes[c].add(e)
typeMap[e].add(c)
triples = 0
subjects = set()
objects = set()
properties = set()
classCount = collections.defaultdict(int)
propCount = collections.defaultdict(int)
classProps = collections.defaultdict(set)
classObjects = collections.defaultdict(set)
propSubjects = collections.defaultdict(set)
propObjects = collections.defaultdict(set)
for s, p, o in g:
triples += 1
subjects.add(s)
properties.add(p)
objects.add(o)
# class partitions
if s in typeMap:
for c in typeMap[s]:
classCount[c] += 1
if distinctForPartitions:
classObjects[c].add(o)
classProps[c].add(p)
# property partitions
propCount[p] += 1
if distinctForPartitions:
propObjects[p].add(o)
propSubjects[p].add(s)
if not dataset:
dataset = URIRef("http://example.org/Dataset")
if not res:
res = Graph()
res.add((dataset, RDF.type, VOID.Dataset))
# basic stats
res.add((dataset, VOID.triples, Literal(triples)))
res.add((dataset, VOID.classes, Literal(len(classes))))
res.add((dataset, VOID.distinctObjects, Literal(len(objects))))
res.add((dataset, VOID.distinctSubjects, Literal(len(subjects))))
res.add((dataset, VOID.properties, Literal(len(properties))))
for i, c in enumerate(classes):
part = URIRef(dataset + "_class%d" % i)
res.add((dataset, VOID.classPartition, part))
res.add((part, RDF.type, VOID.Dataset))
res.add((part, VOID.triples, Literal(classCount[c])))
res.add((part, VOID.classes, Literal(1)))
res.add((part, VOID["class"], c))
res.add((part, VOID.entities, Literal(len(classes[c]))))
res.add((part, VOID.distinctSubjects, Literal(len(classes[c]))))
if distinctForPartitions:
res.add(
(part, VOID.properties, Literal(len(classProps[c]))))
res.add((part, VOID.distinctObjects,
Literal(len(classObjects[c]))))
for i, p in enumerate(properties):
part = URIRef(dataset + "_property%d" % i)
res.add((dataset, VOID.propertyPartition, part))
res.add((part, RDF.type, VOID.Dataset))
res.add((part, VOID.triples, Literal(propCount[p])))
res.add((part, VOID.properties, Literal(1)))
res.add((part, VOID.property, p))
if distinctForPartitions:
entities = 0
propClasses = set()
for s in propSubjects[p]:
if s in typeMap:
entities += 1
for c in typeMap[s]:
propClasses.add(c)
res.add((part, VOID.entities, Literal(entities)))
res.add((part, VOID.classes, Literal(len(propClasses))))
res.add((part, VOID.distinctSubjects,
Literal(len(propSubjects[p]))))
res.add((part, VOID.distinctObjects,
Literal(len(propObjects[p]))))
return res, dataset
| 5,335,914
|
def add(filename, destination, number, time):
"""
Запросить данные о маршруте.
"""
if os.path.exists(filename):
routes = load_routes(filename)
else:
routes = []
routes.append(
{
'destination': destination,
'number': number,
'time': time,
}
)
try:
datetime.strptime(time, "%H:%M")
except ValueError:
print("Неправильный формат времени", file=sys.stderr)
exit(1)
with open(filename, "w", encoding="utf-8") as fl:
json.dump(routes, fl, ensure_ascii=False, indent=4)
click.secho("Маршрут добавлен")
| 5,335,915
|
def extract_screen_name_from_twitter_url(url):
"""
Function returning the screen_name from a given Twitter url.
Args:
url (str) : Url from which we extract the screen_name if found.
Returns:
str : screen_name if the url is a valid twitter url, None otherwise.
"""
parsed_twitter_url = parse_twitter_url(url)
if isinstance(parsed_twitter_url, TwitterUser):
return parsed_twitter_url.screen_name
if isinstance(parsed_twitter_url, TwitterTweet):
return parsed_twitter_url.user_screen_name
return None
| 5,335,916
|
def sub_vectors(a, b):
"""Subtracts two vectors.
Args:
pos1 (tuple[int]): first position
pos1:(tuple[int]): second position
Returns:
tuple[int]: element wise subtraction
Examples:
>>> sub_vectors((1,4,6), (1,3,7))
(0, 1, -1)
"""
return tuple(a[i] - b[i] for i in range(3))
| 5,335,917
|
def infer_on_stream(args, client):
"""
Initialize the inference network, stream video to network,
and output stats and video.
:param args: Command line arguments parsed by `build_argparser()`
:param client: MQTT client
:return: None
"""
# Initialize the Inference Engine
infer_network = Network()
# Set Probability threshold for detections
prob_threshold = args.prob_threshold
# Load the model through `infer_network`
infer_network.load_model(args.model, args.device, CPU_EXTENSION, num_requests=0)
# Get a Input blob shape
_, _, in_h, in_w = infer_network.get_input_shape()
# Get a output blob name
_ = infer_network.get_output_name()
# Handle the input stream
try:
cap = cv2.VideoCapture(args.input)
except FileNotFoundError:
print("Cannot locate video file: "+ args.input)
except Exception as e:
print("Something else went wrong with the video file: ", e)
cap.open(args.input)
_, frame = cap.read()
people_total_count = 0
people_in_a_frame = 0
g_elapsed = 0
entre_ROI_xmin = 400
entre_ROI_ymin = 450
exit_ROI_xmin = 550
exit_ROI_ymin = 410
fps = FPS().start()
# Process frames until the video ends, or process is exited
while cap.isOpened():
# Read the next frame
flag, frame = cap.read()
if not flag:
break
fh = frame.shape[0]
fw = frame.shape[1]
key_pressed = cv2.waitKey(50)
image_resize = cv2.resize(frame, (in_w, in_h), interpolation = cv2.INTER_AREA)
image = np.moveaxis(image_resize, -1, 0)
# Perform inference on the frame
infer_network.exec_net(image, request_id=0)
# Get the output of inference
if infer_network.wait(request_id=0) == 0:
result = infer_network.get_output(request_id=0)
for box in result[0][0]: # Output shape is 1x1x100x7
conf = box[2]
if conf >= prob_threshold:
xmin = int(box[3] * fw)
ymin = int(box[4] * fh)
xmax = int(box[5] * fw)
ymax = int(box[6] * fh)
cv2.rectangle(frame, (xmin, ymin), (xmax, ymax), (0, 0, 255), 3)
if xmin < entre_ROI_xmin and ymax < entre_ROI_ymin:
if fsm.current == "empty":
# Count a people
people_in_a_frame += 1
people_total_count += 1
# Start the timer
start_time = time.perf_counter()
# Person entered a room - fsm state change
fsm.enter()
print(xmax, ymax)
if args.output == "WEB":
# Publish people_count messages to the MQTT server
client.publish("person", json.dumps({"count": people_in_a_frame}))
log.info("#########################")
log.info("Person entered into frame")
log.info("#########################")
if xmin > exit_ROI_xmin and ymax < exit_ROI_ymin:
if fsm.current == "standing":
# Change the state to exit - fsm state change
fsm.exit()
stop_time = time.perf_counter()
elapsed = stop_time - start_time
# Update average time
log.info("elapsed time = {:.12f} seconds".format(elapsed))
g_elapsed = (g_elapsed + elapsed) / people_total_count
log.info("g_elapsed time = {:.12f} seconds".format(g_elapsed))
people_in_a_frame = 0
if args.output == "WEB":
# Publish duration messages to the MQTT server
client.publish("person/duration", json.dumps({"duration": g_elapsed}))
client.publish("person", json.dumps({"count": people_in_a_frame}))
log.info("#########################")
log.info("Person exited from frame")
log.info("#########################")
log.info("xmin:{} xmax:{} ymin:{} ymax:{}".format(xmin, xmax, ymin, ymax))
if args.output != "WEB":
# Update info on frame
info = [
("people_ccount", people_total_count),
]
# loop over the info tuples and draw them on our frame
for (i, (k, v)) in enumerate(info):
text = "{}: {}".format(k, v)
cv2.putText(frame, text, (10, fh - ((i * 20) + 20)),
cv2.FONT_HERSHEY_SIMPLEX, 0.6, (255, 0, 0), 2)
if args.output == "WEB":
# Push to FFmpeg server
sys.stdout.buffer.write(frame)
sys.stdout.flush()
else:
cv2.imshow('frame', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
#Break if escape key pressed
if key_pressed == 27:
break
fps.update()
# Release the out writer, capture, and destroy any OpenCV windows
cap.release()
if args.output == "WEB":
client.disconnect()
else:
cv2.destroyAllWindows()
fps.stop()
print("[INFO] approx. FPS: {:.2f}".format(fps.fps()))
| 5,335,918
|
def get_prediction_info(predicted_one_hot, predicted_int, y_test, PLOTS_DIR, filename = "test_file"):
"""
Saves useful information for error analysis in plots directory
:param predicted_one_hot:
:param predicted_int:
:param y_test:
:param PLOTS_DIR:
:return:
"""
def get_info_for_label(label):
false_dict = {}
number = 0
if label == False:
number = 1
for i in range(len(predicted_one_hot)):
false_dict[i] = predicted_one_hot[i][number]
temp_dict = false_dict
sorted_index = sorted(false_dict, key=false_dict.get, reverse=True)
file = str(label) + "\n"
file += "Index;probability;correct?\n"
for i in range(len(sorted_index)):
correct = "No"
index = sorted_index[i]
if predicted_int[index] == y_test[index]:
correct = "Yes"
file += str(index) + ";" + str(temp_dict[index]) + ";" + correct + "\n"
print(sorted_index[:5])
return file, sorted_index
file = "Predictions True;Predictions False;Correctly predicted?\n"
max_true_value = 0.0
max_false_value = 0.0
max_true_index = -1
worst_true_index = -1
max_false_index = -1
worst_false_index = -1
for i, pred in enumerate(predicted_one_hot):
correctly_pred = -1
if predicted_int[i] == y_test[i]:
correctly_pred = "Yes"
else:
correctly_pred = "No"
file += str(pred[0]) + ";" + str(pred[1]) + ";" + str(correctly_pred) + "\n"
if pred[0] > max_true_value:
max_true_value = pred[0]
max_true_index = i
if predicted_int[i] != y_test[i]:
worst_true_index = i
if pred[1] > max_false_value:
max_false_value = pred[1]
max_false_index = i
if predicted_int[i] != y_test[i]:
worst_false_index = i
file += "\nStatistics\n"
file += "max_true_value: " + str(max_true_value) + "\n"
file += "max_true_index: " + str(max_true_index) + "\n"
file += "max_false_value: " + str(max_false_value) + "\n"
file += "max_false_index: " + str(max_false_index) + "\n"
file += "worst_true_index: " + str(worst_true_index) + "\n"
file += "worst_false_index: " + str(worst_false_index) + "\n"
file += "===================================================\n"
file += "===================================================\n"
info_false, sorted_false = get_info_for_label(False)
info_true, sorted_true = get_info_for_label(True)
with open(PLOTS_DIR + filename+".txt", "w+") as text_file:
text_file.write(file + info_false + info_true)
return sorted_true, sorted_false, worst_true_index, worst_false_index
| 5,335,919
|
def save_excel_file():
"""File save dialog for an excel file.
Returns:
str: file path
"""
return pick_excel_file(save=True)
| 5,335,920
|
def load_app_paths(file_path=None, dir_path=None, user_file_path=None,
user_dir_path=None, default=None, paths=None, **kwargs):
"""Parse and merge user and app config files
User config will have precedence
:param file_path: Path to the base config file
:param dir_path: Path to the extension config directory
:param user_file_path: Path to the user base config file
:param user_dir_path: Path to the user base config file
:param default: Path to be preppended as the default config file embedded
in the app
:param paths: Extra paths to add to the parsing after the defaults
:param force_extension: only read files with given extension.
:returns: Single dict with all the loaded config
"""
files = [default, file_path, dir_path, user_file_path, user_dir_path]
files += (paths or [])
return load_paths([path for path in files if path], **kwargs)
| 5,335,921
|
def search_playlists(spotify_token, playlist):
"""
:param spotify_token:
:param playlist:
:return:
"""
return _search(spotify_token, query=playlist, type='playlist', limit=9, market='ES', offset=0)
| 5,335,922
|
def text_pre_process(result):
""" 이미지에서 인식된 글자를 정제 합니다.
특수문자 제거, 1-2단어 제거, 줄바꿈 및 공백 제거
:param result: 이미지에서 인식된 글자
:return: 문자를 전처리한 결과
"""
copy = str(result)
copy2 = copy.replace("\n", "")
copy3 = re.sub('[^ㄱ-힗]', '', copy2)
# re.sub('[^A-Za-z0-9]', '', copy2)
result = re.sub('[-=+,#}/\{:^$.@*\※~&%ㆍ!『「』\\‘|\(\)\[_ ""\]\<\>`\'…》]', '', copy3)
# shortword = re.compile(r'\W*\b\w{1,2}\b')
# shortword.sub('', result)
# text2 = re.sub(r'\d','',result)
if result is not None and len(result) > 3:
# print(result)
return result
| 5,335,923
|
def test_invalid_index(trimatrix):
"""Test various invalid indices."""
# Too many
with pytest.raises(ValueError):
trimatrix[:, :, :]
# Float scalar
with pytest.raises(TypeError):
trimatrix[0.5]
# Float array
with pytest.raises(ValueError):
trimatrix[np.linspace(0, 10)]
| 5,335,924
|
def pandas_data_get():
"""pandas api进行值的获取"""
df2 = pd.DataFrame({
'A': 1.,
# series对象作为一列
'B': pd.Series(1, index=list(range(4)), dtype='float32'),
'C': np.array([3] * 4, dtype='int32'),
'd': pd.Timestamp('20130102'),
# 单值也会与其他列长度保持一致
'F': 'foo',
'E': pd.Categorical(["test", "train", "test", "train"]),
'G': [2, 4, 6, 8]
})
# 获取单列的值,此写法不如df2['A']来的直观
print(df2.A)
dates = pd.date_range('20210101', periods=6)
df3 = pd.DataFrame(np.random.randn(6, 3), index=dates, columns=list('ABC'))
# 按标签获取某行数据,通过索引列进行获取整行数据,类似关系型数据库中的按id获取
print(df3.loc[dates[0]])
# 用标签切片,包括行切片和列切片
print(df3.loc['20210101':'20210102', ['A', 'B']])
# 提取标量值
print(df3.loc['20210101', 'A'])
# 按位置切片,通过行索引排序值
print(df3.iloc[3])
# 按位置切片,通过行列的索引的排序值
print(df3.iloc[3:5, 0:2])
# 通过类似坐标定位获取单元格值
print(df3.iloc[0, 0])
# 布尔筛选,通过单列条件提取值行数据
print(df2[df2['G'] > 4])
| 5,335,925
|
def push_thread_callback(app: Flask):
"""Process outstanding MDM commands by issuing a push to device(s).
TODO: A push with no response needs an exponential backoff time.
Commands that are ready to send must satisfy these criteria:
- Command is in Queued state.
- Command.after is null.
- Command.ttl is not zero.
- Device is enrolled (is_enrolled)
"""
while not push_thread_stopped.wait(push_time):
app.logger.info('Push Thread checking for outstanding commands...')
with app.app_context():
pending: Tuple[Device, int] = db.session.query(Device, func.Count(Command.id)).\
filter(Device.id == Command.device_id).\
filter(Command.status == CommandStatus.Queued).\
filter(Command.ttl > 0).\
filter(Command.after == None).\
filter(Device.is_enrolled == True).\
group_by(Device.id).\
all()
for d, c in pending:
app.logger.info('PENDING: %d command(s) for device UDID %s', c, d.udid)
if d.token is None or d.push_magic is None:
app.logger.warn('Cannot request push on a device that has no device token or push magic')
continue
try:
response = push_to_device(d)
except ssl.SSLError:
return stop()
app.logger.info("[APNS2 Response] Status: %d, Reason: %s, APNS ID: %s, Timestamp",
response.status_code, response.reason, response.apns_id.decode('utf-8'))
d.last_push_at = datetime.utcnow()
if response.status_code == 200:
d.last_apns_id = response.apns_id
db.session.commit()
| 5,335,926
|
def get_conventional_std_cell(atoms):
"""Given an ASE atoms object, return the ASE atoms object in the conventional standard cell.
It uses symmetries to find the conventional standard cell.
In particular, it gives a structure with a conventional cell according to the standard defined in
W. Setyawan, and S. Curtarolo, Comput. Mater. Sci.49(2), 299-312 (2010). \n
This is simply a wrapper around the pymatgen implementation:
http://pymatgen.org/_modules/pymatgen/symmetry/analyzer.html
Parameters:
atoms: `ase.Atoms` object
Atomic structure.
Returns:
`ase.Atoms` object
Return the structure in a conventional cell.
.. seealso:: To create a standard cell that it is independent from symmetry operations use
:py:mod:`ai4materials.utils.utils_crystals.get_conventional_std_cell_no_sym`
.. codeauthor:: Angelo Ziletti <angelo.ziletti@gmail.com>
"""
# save atoms.info dict otherwise it gets lost in the conversion
atoms_info = atoms.info
mg_structure = AseAtomsAdaptor.get_structure(atoms)
finder = SpacegroupAnalyzer(mg_structure)
mg_structure = finder.get_conventional_standard_structure()
conventional_standard_atoms = AseAtomsAdaptor.get_atoms(mg_structure)
conventional_standard_atoms.info = atoms_info
return conventional_standard_atoms
| 5,335,927
|
def close_ind_lst(ind_lst):
"""
Closes index objects supplied in input parameter list
"""
for index in ind_lst:
index.close()
| 5,335,928
|
def get_if_rcnn(inputs: Tensor):
"""
:param inputs: Tensor from Input Layer
:return:
"""
# get back bone outputs
if_backbones_out = backbones(inputs)
return if_backbones_out
| 5,335,929
|
def method_3(num_chars: int):
"""
Pythonicish way of generating random password
Args:
num_chars (int): Number of Characters the password will be
Returns:
string: The generated password
"""
chars = string.ascii_letters + string.digits + string.punctuation
password = "".join((secrets.choice(chars) for i in range(num_chars)))
return password
| 5,335,930
|
def get_animation_for_block(
block_start: int,
frame_num: int,
total_frames: int,
duration: int=5,
):
"""Generate CSS to pop a block from gray to red at the right frame
block_start: int
frame_num: int
total_frames: int
duration: int # seconds"""
animation_function = gray_red_blue
return animation_function(block_start, frame_num, total_frames, duration)
| 5,335,931
|
def get_all_pokemon_stats():
"""
Get all the pokemon and their stats by an API call.
"""
database_handler = DatabaseAPIHandler()
# There are 807 pokemon callable in the API.
for pokemon_number in range(1, 808):
pokemon_status_data = database_handler.get_pokemon_status_data(pokemon_number)
database_handler.save_pokemon_status_data(pokemon_status_data)
| 5,335,932
|
def find_student_by_username(usuario_id, test=False):
"""Consulta toda la información de un estudiante según su usuario."""
query = 'SELECT * FROM estudiante WHERE id_usuario = %s'
return execute_sql(query, args=[usuario_id], rows=1, test=test)
| 5,335,933
|
def download_rt_files(dst_dir, fs=None, date="2021-08-01", glob_path=None):
"""Download all files for an GTFS RT feed (or multiple feeds)
If date is specified, downloads daily data for all feeds. Otherwise, if
glob_path is specified, downloads data for a single feed.
Parameters:
date: date of desired feeds to download data from (e.g. 2021-09-01)
glob_path: if specified, the path (including a wildcard) for downloading a
single feed.
"""
if fs is None:
raise NotImplementedError("Must specify fs")
# {date}T{timestamp}/{itp_id}/{url_number}
all_files = fs.glob(glob_path) if glob_path else fs.glob(f"{RT_BUCKET_FOLDER}/{date}*/*/*/*")
to_copy = []
out_feeds = defaultdict(lambda: [])
for src_path in all_files:
dt, itp_id, url_number, src_fname = src_path.split("/")[-4:]
if glob_path:
dst_parent = Path(dst_dir)
else:
# if we are downloading multiple feeds, make each feed a subdir
dst_parent = Path(dst_dir) / itp_id / url_number
dst_parent.mkdir(parents=True, exist_ok=True)
out_fname = build_pb_validator_name(dt, itp_id, url_number, src_fname)
dst_name = str(dst_parent / out_fname)
to_copy.append([src_path, dst_name])
out_feeds[(itp_id, url_number)].append(dst_name)
print(f"Copying {len(to_copy)} files")
src_files, dst_files = zip(*to_copy)
fs.get(list(src_files), list(dst_files))
| 5,335,934
|
def reduce_mem_usage(df, use_float16=False):
"""
Iterate through all the columns of a dataframe and modify the data type to reduce memory usage.
"""
start_mem = df.memory_usage().sum() / 1024 ** 2
print("Memory usage of dataframe is {:.2f} MB".format(start_mem))
for col in df.columns:
if is_datetime(df[col]) or is_categorical_dtype(df[col]):
continue
col_type = df[col].dtype
if col_type != object:
c_min = df[col].min()
c_max = df[col].max()
if str(col_type)[:3] == "int":
if c_min > np.iinfo(np.int8).min and c_max < np.iinfo(np.int8).max:
df[col] = df[col].astype(np.int8)
elif c_min > np.iinfo(np.int16).min and c_max < np.iinfo(np.int16).max:
df[col] = df[col].astype(np.int16)
elif c_min > np.iinfo(np.int32).min and c_max < np.iinfo(np.int32).max:
df[col] = df[col].astype(np.int32)
elif c_min > np.iinfo(np.int64).min and c_max < np.iinfo(np.int64).max:
df[col] = df[col].astype(np.int64)
else:
if use_float16 and c_min > np.finfo(np.float16).min and c_max < np.finfo(np.float16).max:
df[col] = df[col].astype(np.float16)
elif c_min > np.finfo(np.float32).min and c_max < np.finfo(np.float32).max:
df[col] = df[col].astype(np.float32)
else:
df[col] = df[col].astype(np.float64)
else:
df[col] = df[col].astype("category")
end_mem = df.memory_usage().sum() / 1024 ** 2
print("Memory usage after optimization is: {:.2f} MB".format(end_mem))
print("Decreased by {:.1f}%".format(100 * (start_mem - end_mem) / start_mem))
return df
| 5,335,935
|
def ParseCommandYAML():
"""Function for parsing command line arguments for input to YAML HDIprep"""
# if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--im", nargs='*')
parser.add_argument("--pars")
parser.add_argument("--out_dir")
args = parser.parse_args()
# Create a dictionary object to pass to the next function
dict = {"im": args.im, "pars": args.pars, "out_dir": args.out_dir}
# Print the dictionary object
print(dict)
# Return the dictionary
return dict
| 5,335,936
|
def silence_flask_startup() -> None:
# pylint: disable=line-too-long
"""Calling this function monkey patches the function flask.cli.show_server_banner
(https://github.com/pallets/flask/blob/a3f07829ca03bf312b12b3732e917498299fa82d/src/flask/cli.py#L657-L683)
which by default outputs something like:
* Serving Flask app "webviz_app" (lazy loading)
* Environment: production
WARNING: This is a development server. Do not use it in a production deployment.
Use a production WSGI server instead.
* Debug mode: off
This warning is confusing to new users of flask and webviz-config, which thinks
something is wrong (even though having development/debug mode turned off, and
limit availability to localhost, is best practice wrt. security).
After calling this function the exact lines above are not shown
(all other information/output from the flask instance is untouched).
"""
def silent_function(*_args: Any, **_kwargs: Any) -> None:
pass
flask.cli.show_server_banner = silent_function
| 5,335,937
|
def creature_ability_093(field, player, opponent, virtual, target, itself):
"""
Last Words: Give Ward to a random allied follower.
"""
length = len(field.get_creature_location()[player.player_num])
if length > 0:
target_id = random.randint(0, length - 1)
target_creature = field.card_location[player.player_num][target_id]
add_ability_to_creature(field, player, target_creature, virtual, add_ability=[KeywordAbility.WARD.value])
| 5,335,938
|
def cat_files(src, dst, no_redundant_header=False, is_header_f=return_false):
"""Concatenate files in src and save to dst.
src --- source file names in a list
dst --- destinate file name
no_redundant_header --- Only keep headers in the 1st file, skip others.
is_header_f --- return True if a line is a header, otherwise, False
"""
if src is None or len(src) == 0:
raise ValueError("src should contain at least one file.")
if dst in src:
raise IOError("Unable to cat a file and save to itself.")
with open(real_ppath(dst), 'w') as writer:
for idx, src_f in enumerate(src):
with open(real_ppath(src_f), 'r') as reader:
for line in reader:
if idx != 0 and no_redundant_header and is_header_f(line):
continue
writer.write(line.rstrip() + '\n')
| 5,335,939
|
def create_global_step() -> tf.Variable:
"""Creates a `tf.Variable` suitable for use as a global step counter.
Creating and managing a global step variable may be necessary for
`AbstractTrainer` subclasses that perform multiple parameter updates per
`Controller` "step", or use different optimizers on different steps.
In these cases, an `optimizer.iterations` property generally can't be used
directly, since it would correspond to parameter updates instead of iterations
in the `Controller`'s training loop. Such use cases should simply call
`step.assign_add(1)` at the end of each step.
Returns:
A non-trainable scalar `tf.Variable` of dtype `tf.int64`, with only the
first replica's value retained when synchronizing across replicas in
a distributed setting.
"""
return tf.Variable(
0,
dtype=tf.int64,
name="global_step",
trainable=False,
aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA)
| 5,335,940
|
def convert_part_merging(argstuple):
"""
Convert a corpus part into plain text and merging multiple word entries.
Args:
argstuple: Tuple of methods arguments (``inpath`` (*str*): Path to this processes' corpus part / ``dir_outpath``
(*str*): Path to this processes' output / ``log_path`` (*str*): Path to this processes' log / ``interval``
(*int*): Logging interval in seconds)
"""
inpath, dir_outpath, log_path, interval = argstuple
@log_time(log_path, interval)
def _convert_part_merging(inpath, dir_outpath, log_path):
with codecs.open(log_path, "a", "utf-8") as log_file:
process_name = multiprocessing.current_process().name
log_file.write(alt("%s: Start logging processing of\n\t%s to \n\t%s...\n" % (process_name, inpath, dir_outpath)))
file_n = get_file_number(inpath)
outpath = dir_outpath + 'decow%s_out.txt' %(str(file_n))
with gz.open(inpath, 'rb') as infile, codecs.open(outpath, 'wb', 'utf-8') as outfile:
sentence = []
line, lcount = infile.readline().strip().decode("utf-8"), 1
while line != "":
if lcount % 100000 == 0:
log_file.write(alt("%s: Processing line nr. %i...\n" % (process_name, lcount)))
ne = extract_named_entity(line) # Extract possible named entity
if line.startswith(u'<s'):
outfile.write('%s\n' %(' '.join(sentence)))
sentence = []
# If there was a named entity found, try to complete it if it's a multi-word expression
elif ne is not None:
while True:
next_line = infile.readline().strip().decode("utf-8")
lcount += 1
if not contains_tag(next_line):
next_ne = extract_named_entity(next_line)
if next_ne is not None and next_ne[1] == ne[1]:
ne = ("%s_%s" %(ne[0], next_ne[0]), ne[1])
else:
break
else:
break
sentence.append(ne[0])
line = next_line
continue
elif not line.startswith(u'<'):
sentence.append(line.split('\t')[0])
line, lcount = infile.readline().strip().decode("utf-8"), lcount + 1
_convert_part_merging(inpath, dir_outpath, log_path)
| 5,335,941
|
def rl_label_weights(name=None):
"""Returns the weight for importance."""
with tf.variable_scope(name, 'rl_op_selection'):
num_classes = get_src_num_classes()
num_choices = FLAGS.num_choices
logits = tf.get_variable(
name='logits_rl_w',
initializer=tf.initializers.zeros(),
shape=[num_classes, num_choices],
dtype=tf.float32)
dist_logits_list = logits.value()
dist = tfp.distributions.Categorical(logits=logits)
dist_entropy = tf.reduce_sum(dist.entropy())
sample = dist.sample()
sample_masks = 1. * tf.cast(sample, tf.float32) / num_choices
sample_log_prob = tf.reduce_mean(dist.log_prob(sample))
return (dist_logits_list, dist_entropy, sample_masks, sample_log_prob)
| 5,335,942
|
def run_de_test(dataset1: Dataset, dataset2,
test_cells: List[str], control_cells: List[List[str]],
test_label: str = None, control_group_labels: list = None,
exp_frac_thresh: float = 0.25, log2_fc_thresh: float = 1,
qval_thresh: float = 0.05, tqdm_msg: str = '') -> pd.DataFrame:
"""
Identifies differentially expressed genes using Mann Whitney U test.
:param dataset1: nabo.Dataset instance
:param dataset2: nabo.Dataset instance or None
:param test_cells: list of cells for which markers has to be found.
These could be cells from a cluster,cells with high
mapping score, etc
:param control_cells: List of cell groups against which markers need to
be found. This could just one groups of cells or
multiple groups of cells.
:param test_label: Label for test cells.
:param control_group_labels: Labels of control cell groups
:param exp_frac_thresh: Fraction of cells that should have a non zero
value for a gene.
:param log2_fc_thresh: Threshold for log2 fold change
:param qval_thresh: Threshold for adjusted p value
:param tqdm_msg: Message to print while displaying progress
:return: pd.Dataframe
"""
from scipy.stats import mannwhitneyu
from statsmodels.sandbox.stats.multicomp import multipletests
test_cells_idx = [dataset1.cellIdx[x] for x in test_cells]
control_cells_idx_group = []
for i in control_cells:
if dataset2 is None:
control_cells_idx_group.append([dataset1.cellIdx[x] for x in i])
else:
control_cells_idx_group.append([dataset2.cellIdx[x] for x in i])
if test_label is None:
test_label = 'Test group'
if control_group_labels is None:
control_group_labels = ['Ctrl group %d' % x for x in range(len(
control_cells_idx_group))]
num_test_cells = len(test_cells_idx)
num_groups = len(control_cells_idx_group)
min_n = [min(num_test_cells, len(x)) for x in control_cells_idx_group]
n1n2 = [num_test_cells * x for x in min_n]
if dataset2 is None:
valid_genes = {dataset1.genes[x]: None for x in dataset1.keepGenesIdx}
else:
valid_genes = {}
control_gene_list = {x: None for x in dataset2.genes}
for i in dataset1.keepGenesIdx:
gene = dataset1.genes[i]
if gene in control_gene_list:
valid_genes[gene] = None
del control_gene_list
de = []
for gene in tqdm(valid_genes, bar_format=tqdm_bar, desc=tqdm_msg):
rbc, mw_p, log_fc = 0, 1, 0
all_vals = dataset1.get_norm_exp(gene)
test_vals = all_vals[test_cells_idx]
ef = np.nonzero(test_vals)[0].shape[0] / num_test_cells
if ef < exp_frac_thresh:
continue
if dataset2 is None:
all_control_vals = all_vals
else:
all_control_vals = dataset2.get_norm_exp(gene)
log_mean_test_vals = np.log2(test_vals.mean())
for i in range(num_groups):
control_vals = all_control_vals[control_cells_idx_group[i]]
control_vals.sort()
control_vals = control_vals[-min_n[i]:]
mean_control_vals = control_vals.mean()
if mean_control_vals == 0:
log_fc = np.inf
else:
log_fc = log_mean_test_vals - np.log2(mean_control_vals)
if log_fc < log2_fc_thresh:
continue
try:
u, mw_p = mannwhitneyu(test_vals, control_vals)
except ValueError:
pass
else:
rbc = 1 - ((2 * u) / n1n2[i])
de.append((gene, ef, control_group_labels[i], rbc, log_fc, mw_p))
de = pd.DataFrame(de, columns=['gene', 'exp_frac', 'versus_group',
'rbc', 'log2_fc', 'pval'])
if de.shape[0] > 1:
de['qval'] = multipletests(de['pval'].values, method='fdr_bh')[1]
else:
de['qval'] = [np.nan for _ in range(de.shape[0])]
de['test_group'] = [test_label for _ in range(de.shape[0])]
out_order = ['gene', 'exp_frac', 'test_group', 'versus_group',
'rbc', 'log2_fc', 'pval', 'qval']
de = de[out_order].sort_values(by='qval')
return de[(de.qval < qval_thresh)].reset_index().drop(columns=['index'])
| 5,335,943
|
def say_hello_twice(subject):
"""Says hello twice using `say_hello`."""
return say_hello(subject) + " " + say_hello(subject)
| 5,335,944
|
def get_zones(ec2):
"""
Return all available zones in the region
"""
zones = []
try:
aws_zones = ec2.describe_availability_zones()['AvailabilityZones']
except ClientError as e:
print(e.response['Error']['Message'])
return None
for zone in aws_zones:
if zone['State'] == 'available':
zones.append(zone['ZoneName'])
return zones
| 5,335,945
|
def main():
"""Main function"""
parser = argparse.ArgumentParser()
parser.add_argument('-Dir', type=str)
opt = parser.parse_args()
Dataset(opt)
| 5,335,946
|
def x_gate():
"""
Pauli x
"""
return torch.tensor([[0, 1], [1, 0]]) + 0j
| 5,335,947
|
def merge_dictionaries(default_dictionary, user_input_dictionary, path=None):
"""Merges user_input_dictionary into default dictionary;
default values will be overwritten by users input."""
return {**default_dictionary, **user_input_dictionary}
| 5,335,948
|
def create_frequencyvector(T_end, f_max_requested):
""" A function to create the vector of frequencies we need to solve using the reflectivity
method, to achieve the desired length of time and highest modelled frequency.
NOTE: Because we require the number of frequencies to be odd, the maximum frequency may
change.
Returns the frequency vector and the corresponding time step dt.
"""
# T_end : End time of simulation
# f_max_requested : Maximum desired frequency to be modelled
# Minimum modelled frequency (always 0 for now)
f_min = 0
# Frequency resolution
df = 1 / T_end
# Number of frequencies (round up if needed), + 1 for the first frequency (zero)
n_f = np.ceil((f_max_requested - f_min) / df) + 1
n_f = n_f.astype(int)
# Make sure the number of frequencies is odd
if n_f % 2 != 1:
n_f += 1
# Maximum modelled frequency (accurate), -1 for the first frequency which is zero
f_max_actual = (n_f - 1) * df
assert f_max_actual >= f_max_requested, 'Actual frequency too low'
dt = 1 / (2 * f_max_actual)
freq = np.linspace(0, f_max_actual, n_f)
return freq, dt
| 5,335,949
|
def get_fiber_protein_intake(
nutrients_lower_lists, nutrients_middle_lists,nutrients_upper_lists):
"""Gets financial class-wise fibee and protein intake data."""
lower_fiber_prot = nutrients_lower_lists.map(lambda x: (x[1], x[3]))
middle_fiber_prot = nutrients_middle_lists.map(lambda x: (x[1], x[3]))
upper_fiber_prot = nutrients_upper_lists.map(lambda x: (x[1], x[3]))
return lower_fiber_prot, middle_fiber_prot, upper_fiber_prot
| 5,335,950
|
def _add_fvar(font, axes, instances, axis_map):
"""
Add 'fvar' table to font.
axes is a dictionary mapping axis-id to axis (min,default,max)
coordinate values.
instances is list of dictionary objects with 'location', 'stylename',
and possibly 'postscriptfontname' entries.
axisMap is dictionary mapping axis-id to (axis-tag, axis-name).
"""
assert "fvar" not in font
font['fvar'] = fvar = newTable('fvar')
nameTable = font['name']
for iden in sorted(axes.keys(), key=lambda k: axis_map[k][0]):
axis = Axis()
axis.axisTag = Tag(axis_map[iden][0])
axis.minValue, axis.defaultValue, axis.maxValue = axes[iden]
axisName = tounicode(axis_map[iden][1])
axis.axisNameID = nameTable.addName(axisName)
fvar.axes.append(axis)
for instance in instances:
coordinates = instance['location']
name = tounicode(instance['stylename'])
psname = instance.get('postscriptfontname')
inst = NamedInstance()
inst.subfamilyNameID = nameTable.addName(name)
if psname is not None:
psname = tounicode(psname)
inst.postscriptNameID = nameTable.addName(psname)
inst.coordinates = {axis_map[k][0]:v for k,v in coordinates.items()}
fvar.instances.append(inst)
return fvar
| 5,335,951
|
def init_nornir(username, password):
"""INITIALIZES NORNIR SESSIONS"""
nr = InitNornir(
config_file="network_automation/topology_builder/graphviz/config/config.yml"
)
nr.inventory.defaults.username = username
nr.inventory.defaults.password = password
managed_devs = nr.filter(F(groups__contains="ios_devices") | F(groups__contains="nxos_devices"))
with tqdm(total=len(managed_devs.inventory.hosts)) as progress_bar:
results = managed_devs.run(task=get_data_task, progress_bar=progress_bar)
hosts_failed = list(results.failed_hosts.keys())
if hosts_failed != []:
auth_fail_list = list(results.failed_hosts.keys())
for dev in auth_fail_list:
dev_auth_fail_list.add(dev)
print(f"Authentication Failed: {auth_fail_list}")
print(
f"{len(list(results.failed_hosts.keys()))}/{len(managed_devs.inventory.hosts)} devices failed authentication..."
)
return managed_devs, results, dev_auth_fail_list
| 5,335,952
|
def _format_rest_url(host: str, append: str = "") -> str:
"""Return URL used for rest commands."""
return f"http://{host}:8001/api/v2/{append}"
| 5,335,953
|
def create(config, directory):
"""Add evaluation files to client storage"""
for path in directory.iterdir():
upload(config['name'], path)
# Add listening test files to client storage
if 'listening_test' in config:
upload(config['name'], reseval.LISTENING_TEST_DIRECTORY)
| 5,335,954
|
def load(name: str,
device: Union[str, torch.device] = "cuda" if torch.cuda.is_available() else "cpu",
jit: bool = False,
download_root: str = None):
"""Load a CLIP model
Parameters
----------
name : str
A model name listed by `clip.available_models()`, or the path to a model checkpoint containing the state_dict
device : Union[str, torch.device]
The device to put the loaded model
jit : bool
Whether to load the optimized JIT model or more hackable non-JIT model (default).
download_root: str
path to download the model files; by default, it uses "~/.cache/clip"
Returns
-------
model : torch.nn.Module
The CLIP model
preprocess : Callable[[PIL.Image], torch.Tensor]
A torchvision transform that converts a PIL image into a tensor that the returned model can take as its input
"""
if name in _MODELS:
model_path = _download(_MODELS[name], download_root or os.path.expanduser("~/.cache/clip"))
elif os.path.isfile(name):
model_path = name
else:
raise RuntimeError(f"Model {name} not found; available models = {available_models()}")
try:
# loading JIT archive
model = torch.jit.load(model_path, map_location=device if jit else "cpu").eval()
state_dict = None
except RuntimeError:
# loading saved state dict
if jit:
warnings.warn(f"File {model_path} is not a JIT archive. Loading as a state dict instead")
jit = False
state_dict = torch.load(model_path, map_location="cpu")
if not jit:
model = build_model(state_dict or model.state_dict()).to(device)
if str(device) == "cpu":
model.float()
return model, _transform(model.visual.input_resolution)
# patch the device names
device_holder = torch.jit.trace(lambda: torch.ones([]).to(torch.device(device)),
example_inputs=[])
device_node = [
n for n in device_holder.graph.findAllNodes("prim::Constant") if "Device" in repr(n)
][-1]
def patch_device(module):
try:
graphs = [module.graph] if hasattr(module, "graph") else []
except RuntimeError:
graphs = []
if hasattr(module, "forward1"):
graphs.append(module.forward1.graph)
for graph in graphs:
for node in graph.findAllNodes("prim::Constant"):
if "value" in node.attributeNames() and str(node["value"]).startswith("cuda"):
node.copyAttributes(device_node)
model.apply(patch_device)
patch_device(model.encode_image)
patch_device(model.encode_text)
# patch dtype to float32 on CPU
if str(device) == "cpu":
float_holder = torch.jit.trace(lambda: torch.ones([]).float(), example_inputs=[])
float_input = list(float_holder.graph.findNode("aten::to").inputs())[1]
float_node = float_input.node()
def patch_float(module):
try:
graphs = [module.graph] if hasattr(module, "graph") else []
except RuntimeError:
graphs = []
if hasattr(module, "forward1"):
graphs.append(module.forward1.graph)
for graph in graphs:
for node in graph.findAllNodes("aten::to"):
inputs = list(node.inputs())
for i in [1, 2]: # dtype can be the second or third argument to aten::to()
if inputs[i].node()["value"] == 5:
inputs[i].node().copyAttributes(float_node)
model.apply(patch_float)
patch_float(model.encode_image)
patch_float(model.encode_text)
model.float()
return model, _transform(model.input_resolution.item())
| 5,335,955
|
def show_components(im, comps, npixels=128, fig=None, vmax=None, vmin=None, title=''):
""" Show components against an image
:param im:
:param comps:
:param npixels:
:param fig:
:return:
"""
import matplotlib.pyplot as plt
if vmax is None:
vmax = numpy.max(im.data[0, 0, ...])
if vmin is None:
vmin = numpy.min(im.data[0, 0, ...])
if not fig:
fig = plt.figure()
plt.clf()
for isc, sc in enumerate(comps):
newim = copy_image(im)
plt.subplot(111, projection=newim.wcs.sub([1, 2]))
centre = numpy.round(skycoord_to_pixel(sc.direction, newim.wcs, 1, 'wcs')).astype('int')
newim.data = newim.data[:, :,
(centre[1] - npixels // 2):(centre[1] + npixels // 2),
(centre[0] - npixels // 2):(centre[0] + npixels // 2)]
newim.wcs.wcs.crpix[0] -= centre[0] - npixels // 2
newim.wcs.wcs.crpix[1] -= centre[1] - npixels // 2
plt.imshow(newim.data[0, 0, ...], origin='lower', cmap='Greys', vmax=vmax, vmin=vmin)
x, y = skycoord_to_pixel(sc.direction, newim.wcs, 0, 'wcs')
plt.plot(x, y, marker='+', color='red')
plt.title('Name = %s, flux = %s' % (sc.name, sc.flux))
plt.show()
| 5,335,956
|
def add_h(mol: oechem.OEMol):
"""Add explicit hydrogens to a molecule"""
for atom in mol.GetAtoms():
oechem.OEAddExplicitHydrogens(mol, atom)
| 5,335,957
|
def test_provision_no_vmx():
"""Test provisioning."""
mock_inst = MagicMock()
mock_inst.vmx = None
mock_inst.provider = 'vmware'
with raises(SystemExit, match=r"Need to provide vmx.*"):
mech.utils.provision(instance=mock_inst, show=None)
| 5,335,958
|
def _handle_sample(edf, res):
"""SAMPLE_TYPE"""
e = edf_get_sample_data(edf).contents
off = res['offsets']['sample']
res['samples'][:, off] = _to_list(e, res['edf_sample_fields'],
res['eye_idx'])
res['offsets']['sample'] += 1
| 5,335,959
|
def get_synonyms(prefix: str) -> Optional[Set[str]]:
"""Get the synonyms for a given prefix, if available."""
entry = get_resource(prefix)
if entry is None:
return None
return entry.get_synonyms()
| 5,335,960
|
def results(request):
""" Returns the actual body of the search results, for AJAX stuff """
query = request.GET.get("q", "")
if len(query) >= 4:
ctx = _search_context(query, request.user)
return TemplateResponse(request, "search/results.html", ctx)
return TemplateResponse(request, "search/too_short.html", {})
| 5,335,961
|
def is_correlated(corr_matrix, feature_pairs, rho_threshold=0.8):
"""
Returns dict where the key are the feature pairs and the items
are booleans of whether the pair is linearly correlated above the
given threshold.
"""
results = {}
for pair in feature_pairs:
f1, f2 = pair.split("__")
corr = corr_matrix[f1][f2]
results[pair] = round(corr, 3) >= rho_threshold
return results
| 5,335,962
|
def find_password(liste, login):
""" """
for user in liste:
if user[0] == login:
return user[1]
return None
| 5,335,963
|
def sample_weather_scenario():
"""
Generate a weather scenario with known values for the wind condition.
"""
times = pd.date_range('1/1/2000', periods=72, freq='6H')
latitude = np.linspace(0, 10, 11)
longitude = np.linspace(0, 10, 11)
wsp_vals = np.full((72, 11, 11), 10.0)
wdi_vals = np.full((72, 11, 11), 0.0)
cusp_vals = np.full((72, 11, 11), 0.0)
cudi_vals = np.full((72, 11, 11), 0.0)
wadi_vals = np.full((72, 11, 11), 0.0)
wahi_vals = np.full((72, 11, 11), 0.0)
wisp = xr.DataArray(wsp_vals, dims=['time', 'lon_b', 'lat_b'],
coords={'time': times,
'lon_b': longitude,
'lat_b': latitude})
widi = xr.DataArray(wdi_vals, dims=['time', 'lon_b', 'lat_b'],
coords={'time': times,
'lon_b': longitude,
'lat_b': latitude})
cusp = xr.DataArray(cusp_vals, dims=['time', 'lon_b', 'lat_b'],
coords={'time': times,
'lon_b': longitude,
'lat_b': latitude})
cudi = xr.DataArray(cudi_vals, dims=['time', 'lon_b', 'lat_b'],
coords={'time': times,
'lon_b': longitude,
'lat_b': latitude})
wahi = xr.DataArray(cusp_vals, dims=['time', 'lon_b', 'lat_b'],
coords={'time': times,
'lon_b': longitude,
'lat_b': latitude})
wadi = xr.DataArray(cudi_vals, dims=['time', 'lon_b', 'lat_b'],
coords={'time': times,
'lon_b': longitude,
'lat_b': latitude})
return wisp, widi, cusp, cudi, wahi, wadi
| 5,335,964
|
def parse_csv_file(csv_filepath, expect_negative_correlation = False, STDev_cutoff = 1.0, headers_start_with = 'ID', comments_start_with = None, separator = ','):
"""
Analyzes a CSV file.
Expects a CSV file with a header line starting with headers_start_with e.g. "ID,experimental value, prediction 1 value, prediction 2 value,"
Record IDs are expected in the first column.
Experimental values are expected in the second column.
Predicted values are expected in the subsequent columns.
:param csv_filepath: The path to a CSV file containing experimental and predicted data for some dataset.
:param expect_negative_correlation: See parse_csv.
:param STDev_cutoff: See parse_csv.
:param headers_start_with: See parse_csv.
:param comments_start_with: See parse_csv.
:param separator: See parse_csv.
"""
assert (os.path.exists(csv_filepath))
return parse_csv(get_file_lines(csv_filepath),
expect_negative_correlation = expect_negative_correlation, STDev_cutoff = STDev_cutoff, headers_start_with = headers_start_with,
comments_start_with = comments_start_with, separator = separator)
| 5,335,965
|
def ToTensor(pic):
"""Converts a PIL.Image or numpy.ndarray (H x W x C) in the range
[0, 255] to a torch.FloatTensor of shape (C x H x W) in the range [0.0, 1.0].
"""
if isinstance(pic, np.ndarray):
img = torch.from_numpy(pic.transpose((2, 0, 1)))
return img.float().div(255)
if pic.mode == "I":
img = torch.from_numpy(np.array(pic, np.int32, copy=False))
elif pic.mode == "I;16":
img = torch.from_numpy(np.array(pic, np.int16, copy=False))
else:
img = torch.ByteTensor(torch.ByteStorage.from_buffer(pic.tobytes()))
if pic.mode == "YCbCr":
nchannel = 3
elif pic.mode == "I;16":
nchannel = 1
else:
nchannel = len(pic.mode)
img = img.view(pic.size[1], pic.size[0], nchannel)
img = img.transpose(0, 1).transpose(0, 2).contiguous()
if isinstance(img, torch.ByteTensor):
return img.float().div(255)
else:
return img
| 5,335,966
|
def test_get_url_content_compressed(request):
"""should automatically decompress compressed URL content"""
url = 'https://www.bible.com/bible/59/psa.23'
gzip_buf = StringIO()
with GzipFile(fileobj=gzip_buf, mode='wb') as gzip_file:
gzip_file.write(html_content)
gzipped_content = gzip_buf.getvalue()
response_mock = NonCallableMock(
read=Mock(return_value=gzipped_content),
info=Mock(return_value=NonCallableMock(
get=Mock(return_value='gzip'))))
with patch('urllib2.urlopen', return_value=response_mock):
url_content = web.get_url_content(url).encode('utf-8')
nose.assert_equal(url_content, html_content)
| 5,335,967
|
def part2(data):
"""
>>> part2([[43, 19], [2, 29, 14]])
105
>>> part2([[9, 2, 6, 3, 1], [5, 8, 4, 7, 10]])
291
>>> part2(read_input())
32528
"""
deck_one = tuple(data[0])
deck_two = tuple(data[1])
_, winning_deck = combat(deck_one, deck_two)
return score(winning_deck)
| 5,335,968
|
def write_traces(directory, traces):
"""
Write traces locally to files
"""
for trace in traces:
traceid = trace["traceID"]
path = directory + "/" + traceid + ".json"
with open(path, 'w') as fd:
fd.write(json.dumps(trace))
| 5,335,969
|
def validate_recording(
ai_file_path, ephys_ap_data_path, debug=False, sampling_rate=30000
):
"""
Checks that an ephys recording and bonsai behavior recording
are correctly syncd. To do this:
1. check that number of recording sync signal pulses is the same for both sources
Arguments:
ai_file_pat: str. Path to .bin analog inputs file
ephys_ap_data_path: str. Path to .bin with AP ephys data.
"""
name = Path(ai_file_path).name
logger.debug(f"\nRunning validate RECORDING on {name}")
# load analog from bonsai
bonsai_probe_sync = load_or_open(
ephys_ap_data_path, "bonsai", ai_file_path, 3
)
# load data from ephys (from local file if possible)
ephys_probe_sync = load_or_open(
ephys_ap_data_path,
"ephys",
get_recording_local_copy(ephys_ap_data_path),
-1,
order="F",
dtype="int16",
nsigs=385,
)
# check for aberrant signals in bonsai
errors = np.where((bonsai_probe_sync < -0.1) & (bonsai_probe_sync > 5.1))[
0
]
if len(errors):
logger.info(
f"Found {len(errors)} samples with too high values in bonsai probe signal"
)
if len(errors) > 1000:
logger.warning(f"This value seems to long, retuirning gailure")
return False, 0, 0, "too_many_errors_in_behavior_sync_signal"
bonsai_probe_sync[errors] = bonsai_probe_sync[errors - 1]
# check for aberrant signals in ephys
errors = np.where(ephys_probe_sync > 75)[0]
if len(errors):
logger.warning(
f"Found {len(errors)} samples with too high values in probe signal"
)
if len(errors) > 1000:
return False, 0, 0, "too_many_errors_in_ephys_sync_signal"
ephys_probe_sync[errors] = ephys_probe_sync[errors - 1]
# find probe sync pulses in both
(
is_ok,
bonsai_sync_onsets,
bonsai_sync_offsets,
ephys_sync_onsets,
ephys_sync_offsets,
) = get_onsets_offsets(bonsai_probe_sync, ephys_probe_sync, sampling_rate)
# get time scaling factor
time_scaling_factor = 1 / (
(ephys_sync_onsets[-1] - ephys_sync_onsets[0])
/ (bonsai_sync_onsets[-1] - bonsai_sync_onsets[0])
)
# debugging plots
if debug or not is_ok:
plot_recording_triggers(
bonsai_probe_sync,
ephys_probe_sync,
bonsai_sync_onsets,
bonsai_sync_offsets,
ephys_sync_onsets,
ephys_sync_offsets,
sampling_rate,
time_scaling_factor,
)
# plt.show()
return is_ok, ephys_sync_onsets[0], time_scaling_factor, "nothing"
| 5,335,970
|
def manual(no_usb_hardware):
"""
Starts the manual mode application
"""
if no_usb_hardware:
print("Running in test mode with no USB hardware attached.")
options = {'hardware': "false"}
else:
print("USB hardware attached!")
options = {'hardware': "true"}
_roslaunch('manual_control.launch',options)
| 5,335,971
|
def dehyphenate(string):
"""Remove hyphenated linebreaks from 'string'."""
return hyphen_newline_re.sub("", string)
| 5,335,972
|
def grounder(img, dtype=None):
"""Tries to remove absolute offset
'img' must be a 3 colors image"""
shape = img.shape
"""
# Mise en forme
a = img.reshape((shape[0] * shape[1], 3))
min = np.zeros(a.shape)
max = np.zeros(a.shape)
# Minimas/maximas
min[:,0] = min[:,1] = min[:,2] = a.min(axis=1)
max[:,0] = max[:,1] = max[:,2] = a.max(axis=1)
# Remise en forme
min = min.reshape(shape)
max = max.reshape(shape)
# Remise au ras du sol
grounded = img - min
# return (grounded / max).astype(np.float32)
return (grounded / 255.0).astype(np.float32)
"""#"""
min = coloroffset(img)
grounded = img - min
if dtype is not None:
grounded = grouded.astype(dtype)
return grounded
| 5,335,973
|
def create_folders(path):
""" recursively create folders """
if not os.path.isdir(path):
while True:
try:
os.makedirs(path)
except:
pass
time.sleep(1)
else:
break
| 5,335,974
|
def arch_prob(arch, dims, **kwds):
""" Returns the combined probability of for arch given values """
values = dict(kwds)
dimkeys = list(dims.keys())
assert isinstance(arch, (tuple, list)), "Archictecture must be tuple or list"
serial = isinstance(arch, list)
probs = [None] * len(arch)
for i, subarch in enumerate(arch):
keyset = subarch.keylist
vals = {key: values[key] for key in dimkeys if key in keyset}
subdims = {key: dims[key] for key in dimkeys if key in keyset}
probs[i] = subarch.eval_prob(vals, subdims)
if serial:
return probs[-1]
pscales = [subarch.pscale for subarch in arch]
prob, pscale = prod_rule(*tuple(probs), pscales=pscales)
return prob
| 5,335,975
|
def test_has_permission_no_subscription(
api_rf, enable_premium_requirement, method, entry_factory
):
"""
If the owner of the journal entry does not have an active premium
subscription, a 404 error should be raised.
"""
entry = entry_factory(km_user__user__has_premium=False)
api_rf.user = entry.km_user.user
request = api_rf.generic(method, "/")
view = mock.Mock(name="Mock View")
view.kwargs = {"pk": entry.pk}
permission = permissions.HasEntryCommentListPermissions()
with pytest.raises(Http404):
permission.has_permission(request, view)
| 5,335,976
|
def get_bin_values(base_dataset, bin_value):
"""Gets the values to be used when sorting into bins for the given dataset, from the configured options."""
values = None
if bin_value == "results":
values = base_dataset.get_output()
elif bin_value == "all":
# We set all values to 0, assuming single bin will also set its value to 0.
values = [0] * base_dataset.get_number_of_samples()
else:
raise Exception(f"Invalid bin value configured: {bin_value}")
return values
| 5,335,977
|
def scan(
ws_spec: io.TextIOWrapper,
par_name: str,
lower_bound: Optional[float],
upper_bound: Optional[float],
n_steps: int,
asimov: bool,
figfolder: str,
) -> None:
"""Performs and visualizes a likelihood scan over a parameter.
Parameter bounds are determined automatically, unless both the ``lower_bound`` and
``upper_bound`` parameters are provided.
WS_SPEC: path to workspace used in fit
PAR_NAME: name of parameter to scan over
"""
_set_logging()
par_range: Optional[Tuple[float, float]]
if (lower_bound is not None) and (upper_bound is not None):
# both bounds specified
par_range = (lower_bound, upper_bound)
elif (lower_bound is not None) or (upper_bound is not None):
# mixed case not supported
raise ValueError(
"Need to either specify both lower_bound and upper_bound, or neither."
)
else:
# no bounds specified
par_range = None
ws = json.load(ws_spec)
model, data = cabinetry_model_utils.model_and_data(ws, asimov=asimov)
scan_results = cabinetry_fit.scan(
model, data, par_name, par_range=par_range, n_steps=n_steps
)
cabinetry_visualize.scan(scan_results, figure_folder=figfolder)
| 5,335,978
|
def get_reviews(revision_range):
"""Returns the list of reviews found in the commits in the revision range.
"""
log = check_output(['git',
'--no-pager',
'log',
'--no-color',
'--reverse',
revision_range]).strip()
review_ids = []
for line in log.split('\n'):
pos = line.find('Review: ')
if pos != -1:
pattern = re.compile('Review: ({url})$'.format(
url=os.path.join(REVIEWBOARD_URL, 'r', '[0-9]+')))
match = pattern.search(line.strip().strip('/'))
if match is None:
print "\nInvalid ReviewBoard URL: '{}'".format(line[pos:])
sys.exit(1)
url = match.group(1)
review_ids.append(os.path.basename(url))
return review_ids
| 5,335,979
|
def send_mail(sender_email, receiver_email, password, message):
"""
TODO: documentation
"""
port = 465 # For SSL
smtp_server = "smtp.gmail.com"
# Create a secure SSL context
context = ssl.create_default_context()
with smtplib.SMTP_SSL(smtp_server, port, context=context) as server:
server.login(sender_email, password)
# TODO: Send email here
server.sendmail(sender_email, receiver_email, message)
| 5,335,980
|
def get_lines(filename: str) -> Generator[str, None, None]:
"""
Yield each line of the given .bz2 compressed log file
"""
with bz2.open(filename, 'rb') as fh:
for line_bytes in fh:
line = line_bytes.decode('utf-8', 'backslashreplace')
line = line.rstrip('\r\n')
yield line
| 5,335,981
|
def create_cert_req(keyType=crypto.TYPE_RSA,
bits=1024,
messageDigest="md5"):
"""
Create certificate request.
Returns: certificate request PEM text, private key PEM text
"""
# Create certificate request
req = crypto.X509Req()
# Generate private key
pkey = crypto.PKey()
pkey.generate_key(keyType, bits)
req.set_pubkey(pkey)
req.sign(pkey, messageDigest)
return (crypto.dump_certificate_request(crypto.FILETYPE_ASN1, req),
crypto.dump_privatekey(crypto.FILETYPE_PEM, pkey))
| 5,335,982
|
def large_xyz_to_lab_star(large_xyz, white=const_d50_large_xyz):
"""
# 概要
L*a*b* から XYZ値を算出する
# 入力データ
numpy形式。shape = (N, M, 3)
# 参考
https://en.wikipedia.org/wiki/Lab_color_space
"""
if not common.is_img_shape(large_xyz):
raise TypeError('large_xyz shape must be (N, M, 3)')
x, y, z = np.dsplit(large_xyz, 3)
white = [x / white[1] for x in white]
l = 116 * _func_t(y/white[1]) - 16
a = 500 * (_func_t(x/white[0]) - _func_t(y/white[1]))
b = 200 * (_func_t(y/white[1]) - _func_t(z/white[2]))
return np.dstack((l, a, b))
| 5,335,983
|
def saveData(datalist,savepath):
"""
创建excel表格并将爬取到的数据写入excel
:param datalist:爬取到的数据的列表
:param savepath: 保存数据的excel表格的路径
:return:
"""
logger.info("开始保存书籍信息...")
workbook = xlwt.Workbook(encoding="utf-8") #创建workbook
worksheet = workbook.add_sheet('微信读书Top20',cell_overwrite_ok=True)
col = ['书名',"作者名","简介","评分","今日阅读人数","书籍封面"]
for i in range(0,len(col)):
worksheet.write(0,i,col[i])
for i in range(0,len(datalist)):
logger.info('正在保存第%d本....'%i)
data = datalist[i]
for j in range(0,len(col)):
worksheet.write(i+1,j,data[j])
workbook.save(savepath)
| 5,335,984
|
def return_value(value: Any) -> ObservableBase:
"""Returns an observable sequence that contains a single element,
using the specified scheduler to send out observer messages.
There is an alias called 'just'.
example
res = rx.Observable.return(42)
res = rx.Observable.return(42, rx.Scheduler.timeout)
Keyword arguments:
value -- Single element in the resulting observable sequence.
Returns an observable sequence containing the single specified
element.
"""
def subscribe(observer, scheduler=None):
scheduler = scheduler or current_thread_scheduler
def action(scheduler, state=None):
observer.on_next(value)
observer.on_completed()
return scheduler.schedule(action)
return AnonymousObservable(subscribe)
| 5,335,985
|
def compare_policies(current_policy, new_policy):
""" Compares the existing policy and the updated policy
Returns True if there is a difference between policies.
"""
return set(_hashable_policy(new_policy, [])) != set(_hashable_policy(current_policy, []))
| 5,335,986
|
def meanPSD(d0,win=np.hanning,dx=1.,axis=0,irregular=False,returnInd=False,minpx=10):
"""Return the 1D PSD averaged over a surface.
Axis indicates the axis over which to FFT
If irregular is True, each slice will be stripped
and then the power spectra
interpolated to common frequency grid
Presume image has already been interpolated internally
If returnInd is true, return array of power spectra
Ignores slices with less than minpx non-nans
"""
#Handle which axis is transformed
if axis==0:
d0 = np.transpose(d0)
#Create list of slices
if irregular is True:
d0 = [stripnans(di) for di in d0]
else:
d0 = [di for di in d0]
#Create power spectra from each slice
pows = [realPSD(s,win=win,dx=dx,minpx=minpx) for s in d0 \
if np.sum(~np.isnan(s)) >= minpx]
#Interpolate onto common frequency grid of shortest slice
if irregular is True:
#Determine smallest frequency grid
ln = [len(s[0]) for s in pows]
freq = pows[np.argmin(ln)][0]
#Interpolate
pp = [griddata(p[0],p[1],freq) for p in pows]
else:
pp = [p[1] for p in pows]
freq = pows[0][0]
#Average
pa = np.mean(pp,axis=0)
if returnInd is True:
return freq,pp
return freq,pa
| 5,335,987
|
async def get_temperatures(obj):
"""Get temperatures as read by the thermostat."""
return await obj["madoka"].temperatures.query()
| 5,335,988
|
def system_check():
"""
function: collected OS information
input : dataFileName
output: Successfully collected OS information
"""
g_logger.debug("Collecting OS information.")
g_jobInfo.jobName = "Collecting OS information"
dataFileName = "%s/systemfiles/OS_information_%s.txt" % (
g_resultdir, datetime.datetime.now().strftime("%Y%m%d_%H%M%S%f"))
cmds = []
# Add information to the document
cmds.append(
"echo '************************************\n* OS information"
" for host' > %s 2>&1" % dataFileName)
cmds.append("hostname >> %s 2>&1" % dataFileName)
cmds.append("echo '************************************' >> %s 2>&1" %
dataFileName)
appendCommand(cmds, "ps ux", dataFileName)
appendCommand(cmds, "iostat -xm 2 3", dataFileName)
appendCommand(cmds, "free -m", dataFileName)
# Executes each query command and redirects the results to the specified
# file
for cmd in cmds:
(status, output) = subprocess.getstatusoutput(cmd)
if ">>" in cmd:
cmd = cmd.split(">>")[0]
cmd = cmd.replace("\n", " ")
if "echo" in cmd:
continue
if (status != 0):
if "Permission denied" in output:
output = "can not print info to file: Permission denied"
g_jobInfo.failedTask[cmd] = replaceInvalidStr(output)
g_logger.debug(
"Failed to collect OS information. Error:\n%s" % output)
else:
g_jobInfo.successTask.append(cmd)
basic_info_check()
# Modify the file permissions
os.chmod(dataFileName, DefaultValue.FILE_MODE_PERMISSION)
g_logger.log(json.dumps(g_jobInfo.__dict__))
g_logger.debug("Successfully collected OS information.")
| 5,335,989
|
def get_zero_to_2pi_input(label, required, placeholder=None, initial=None, validators=()):
"""
Method to get a custom positive float number field
:param label: String label of the field
:param required: Boolean to define whether the field is required or not
:param placeholder: Placeholder to appear in the field
:param initial: Default input value for the field
:param validators: validators that should be attached with the field
:return: A custom floating number field that accepts only numbers greater than zero and less than 2pi(Math.pi)
"""
default_validators = [validate_positive_float, validate_less_than_2pi, ]
return CustomFloatField(
label=label,
required=required,
initial=initial,
placeholder=placeholder,
validators=list(itertools.chain(default_validators, validators)),
)
| 5,335,990
|
def compile_math(math):
""" Compile a mathematical expression
Args:
math (:obj:`str`): mathematical expression
Returns:
:obj:`_ast.Expression`: compiled expression
"""
math_node = evalidate.evalidate(math,
addnodes=[
'Eq', 'NotEq', 'Gt', 'Lt', 'GtE', 'LtE',
'Sub', 'Mult', 'Div' 'Pow',
'And', 'Or', 'Not',
'BitAnd', 'BitOr', 'BitXor',
'Call',
],
funcs=MATHEMATICAL_FUNCTIONS.keys())
compiled_math = compile(math_node, '<math>', 'eval')
return compiled_math
| 5,335,991
|
def test_z_to_cMpc_redshift_array():
"""
Test using a redshift array returns an array of distances
"""
redshift_array = np.array([0.0, 1.0])
# Must return an array
expected_distance_array = np.array([0.0, 3395.905416665515])
calculated_distance_array = pyxcosmo.z_to_cMpc(redshift_array).value
assert np.isclose(expected_distance_array, calculated_distance_array).all
| 5,335,992
|
def start_command(update: Update, context: CallbackContext = None) -> None:
"""Send a message when the command /start is issued."""
user = update.effective_user
update.message.reply_markdown_v2(
fr'Hallo {user.mention_markdown_v2()}\! Silahkan masukan artikel/topik yang hendak dianalisa',
reply_markup=ForceReply(selective=True),
)
| 5,335,993
|
def save_urdf(dir, urdfName,
meshName,
objMass=0.1,
x_scale=1, y_scale=1, z_scale=1):
"""
#* Save URDF file at the specified path with the name. Assume 0.1kg mass and random inertia. Single base link.
"""
# Write to an URDF file
f = open(dir + urdfName + '.urdf', "w+")
f.write("<?xml version=\"1.0\" ?>\n")
f.write("<robot name=\"%s.urdf\">\n" % urdfName)
f.write("\t<link name=\"baseLink\">\n")
f.write("\t\t<inertial>\n")
f.write("\t\t\t<origin rpy=\"0 0 0\" xyz=\"0 0 0\"/>\n")
f.write("\t\t\t\t<mass value=\"%.1f\"/>\n" % objMass)
f.write("\t\t\t\t<inertia ixx=\"6e-5\" ixy=\"0\" ixz=\"0\" iyy=\"6e-5\" iyz=\"0\" izz=\"6e-5\"/>\n")
f.write("\t\t</inertial>\n")
f.write("\t\t<visual>\n")
f.write("\t\t\t<origin rpy=\"0 0 0\" xyz=\"0 0 0\"/>\n")
f.write("\t\t\t<geometry>\n")
f.write("\t\t\t\t<mesh filename=\"%s\" scale=\"%.2f %.2f %.2f\"/>\n" % (meshName, x_scale, y_scale, z_scale))
f.write("\t\t\t</geometry>\n")
f.write("\t\t\t<material name=\"yellow\">\n")
f.write("\t\t\t\t<color rgba=\"0.98 0.84 0.35 1\"/>\n")
f.write("\t\t\t</material>\n")
f.write("\t\t</visual>\n")
f.write("\t\t<collision>\n")
f.write("\t\t\t<origin rpy=\"0 0 0\" xyz=\"0 0 0\"/>\n")
f.write("\t\t\t<geometry>\n")
f.write("\t\t\t\t<mesh filename=\"%s\" scale=\"%.2f %.2f %.2f\"/>\n" % (meshName, x_scale, y_scale, z_scale))
f.write("\t\t\t</geometry>\n")
f.write("\t\t</collision>\n")
f.write("\t</link>\n")
f.write("</robot>\n")
f.close()
| 5,335,994
|
def colfilter(
data,
skip: Optional[Union[str, List[str]]] = None,
only: Optional[Union[str, List[str]]] = None,
):
"""
Remove some variables (skip) or keep only certain variables (only)
Parameters
----------
data: pd.DataFrame
The DataFrame to be processed and returned
skip: str, list or None (default is None)
List of variables to remove
only: str, list or None (default is None)
List of variables to keep
Returns
-------
data: pd.DataFrame
The filtered DataFrame
Examples
--------
>>> import clarite
>>> female_logBMI = clarite.modify.colfilter(nhanes, only=['BMXBMI', 'female'])
================================================================================
Running colfilter
--------------------------------------------------------------------------------
Keeping 2 of 945 variables:
0 of 0 binary variables
0 of 0 categorical variables
2 of 945 continuous variables
0 of 0 unknown variables
================================================================================
"""
boolean_keep = _validate_skip_only(data, skip, only)
dtypes = _get_dtypes(data)
click.echo(f"Keeping {boolean_keep.sum():,} of {len(data.columns):,} variables:")
for kind in ["binary", "categorical", "continuous", "unknown"]:
is_kind = dtypes == kind
is_kept = is_kind & boolean_keep
click.echo(f"\t{is_kept.sum():,} of {is_kind.sum():,} {kind} variables")
return data.loc[:, boolean_keep]
| 5,335,995
|
def choice(seq: Sequence[Any]) -> Any:
"""
Returns a random element from the non-empty sequence ``seq``.
If ``seq`` is empty, raises ``IndexError``.
"""
| 5,335,996
|
def test_global_and_user_rate_limiter():
"""
Both Global/User Rate Limiter Test
+-----+------------------+--------------------+--------+
| No. | Quota Remainings | API Request Time | Result |
| +---------+--------+ | |
| | Global | User | | |
+-----+---------+--------+--------------------+--------+
| 001 | 5 | 3 | 16412735450.55083 | True |
| 002 | 5 | 2 | 16412735450.551521 | True |
| 003 | 5 | 1 | 16412735450.551832 | True |
| 004 | 5 | 0 | 16412735450.55211 | False |
| | quota exhausted. | request-004 is denied. |
+-----+---------+--------+--------------------+--------+
| 005 | 5 | 3 | 16412735451.55554 | True |
| 006 | 5 | 2 | 16412735451.556108 | True |
| 007 | 5 | 1 | 16412735451.556301 | True |
| 008 | 5 | 0 | 16412735451.55655 | False |
| | quota exhausted. | request-008 is denied. |
+-----+---------+--------+--------------------+--------+
| 009 | 5 | 3 | 16412735452.583208 | True |
| 010 | 5 | 2 | 16412735452.58446 | True |
+-----+---------+--------+--------------------+--------+
"""
user_id = "shawn"
clock = FakeClock(factor=10.0)
rate_limiter = RateLimiter(clock)
res = True
rate_limiter.configure_global_limit(rps=5)
rate_limiter.configure_limit(user_id=user_id, rps=3)
print("\n Both Global/User Rate Limiter Test\n")
print("+-----+------------------+--------------------+--------+")
print("| No. | Quota Remainings | API Request Time | Result |")
print("| +---------+--------+ | |")
print("| | Global | User | | |")
print("+-----+---------+--------+--------------------+--------+")
for i in range(10):
g_r = rate_limiter.cur_remaining(Level.GLOBAL)
u_r = rate_limiter.cur_remaining(user_id)
stime = clock.stime()
print(f"| {i+1:03} | {g_r:5} | {u_r:4} | {stime} |", end=" ")
res = rate_limiter.process_request(user_id)
print(f"{res!s:>5} |")
if not res and rate_limiter.is_exhausted(user_id):
print("| | quota exhausted. |" +
f" request-{i+1:03} is denied. |")
print("+-----+---------+--------+--------------------+--------+")
clock.sleep(1.0)
if res:
print("+-----+---------+--------+--------------------+--------+")
| 5,335,997
|
def acos(x):
"""
"""
return math.acos(x)
| 5,335,998
|
def get_callable_from_string(f_name):
"""Takes a string containing a function name (optionally module qualified) and returns a callable object"""
try:
mod_name, func_name = get_mod_func(f_name)
if mod_name == "" and func_name == "":
raise AttributeError("%s couldn't be converted to a module or function name" % f_name)
module = __import__(mod_name)
if func_name == "":
func_name = mod_name # The common case is an eponymous class
return getattr(module, func_name)
except (ImportError, AttributeError), exc:
raise RuntimeError("Unable to create a callable object for '%s': %s" % (f_name, exc))
| 5,335,999
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.