max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
|---|---|---|---|---|---|---|
machine/exceptions/machine.py
|
kraglik/machine
| 0
|
12776551
|
<gh_stars>0
from abc import ABC
class MachineError(Exception, ABC):
message = "Abstract error"
status_code = 500
def __init__(self) -> None:
super().__init__(self.message)
def __str__(self) -> str:
return self.message
class MachineSuspiciousOperationError(MachineError):
message = "Suspicious operation"
status_code = 500
class DisallowedHostError(MachineSuspiciousOperationError):
message = "Disallowed host"
status_code = 403
class UnexpectedContentType(MachineSuspiciousOperationError):
message = "Unexpected content type"
status_code = 403
class ResourceNotFound(MachineError):
message = "Resource not found"
status_code = 404
| 2.765625
| 3
|
SemiSupervisedLearning/Autoencoder.py
|
martahal/DeepLearning
| 0
|
12776552
|
#from Encoder import Encoder
#from Decoder import Decoder
#from Trainer import Trainer
#from Dataloader import load_fashion_mnist
from torch import nn
class Autoencoder(nn.Module):
def __init__(self,
encoder,
decoder,
reconstructed_image_shape):
super().__init__()
self.encoder = encoder
self.decoder = decoder
self.reconstructed_image_shape = reconstructed_image_shape
def forward(self, x):
"""
Performs the forward pass of the autoencoder
:param x: Input image, shape: [batch_size, image channels, width, height]
:return: reconstructed images, shape [batch_size, image channels, width, height]
latent vectors, shape [batch size, latent vector size]
"""
latent_vector = self.encoder(x)
reconstructed_image = self.decoder(latent_vector)
#self._test_correct_output(reconstructed_image)
return reconstructed_image, latent_vector
def _test_correct_output(self, output):
batch_size = output.shape[0]
expected_shape = (batch_size, self.reconstructed_image_shape)
assert output.shape == (batch_size, self.reconstructed_image_shape), \
f"Expected output of forward pass to be: {expected_shape}, but got: {output.shape}"
#def main():
# epochs = 10
# batch_size = 16
# learning_rate = 0.0002
# loss_function = 'binary_cross_entropy'
# optimizer = 'SGD'
#
# # Loading FashionMNIST
# num_classes = 10
# dataloaders = load_fashion_mnist(batch_size, D1_fraction=0.8, validation_fraction=0.1, test_fraction=0.1)
# input_size = (1,28,28) # TODO this should be detected on the fly when we determine which dataset to run
# latent_vector_size = 256
#
# encoder = Encoder(
# input_shape=input_size,
# num_filters=4,
# latent_vector_size=latent_vector_size)
# decoder = Decoder(
# input_size=latent_vector_size,
# encoder_last_layer_dim=encoder.last_layer_dim,
# output_size=input_size)
# Autoencoder_model = Autoencoder(encoder, decoder, input_size)
#
# SSN_trainer = Trainer(batch_size=batch_size,
# lr=learning_rate,
# epochs= epochs,
# model= Autoencoder_model,
# dataloaders=dataloaders,
# loss_function=loss_function,
# optimizer=optimizer)
#
# SSN_trainer.do_autoencoder_train()
#
#if __name__ == '__main__':
# main()
| 2.71875
| 3
|
brml/multpots.py
|
herupraptono/pybrml
| 136
|
12776553
|
<reponame>herupraptono/pybrml
#!/usr/bin/env python
"""
MULTPOTS Multiply potentials into a single potential
newpot = multpots(pots)
multiply potentials : pots is a cell of potentials
potentials with empty tables are ignored
if a table of type 'zero' is encountered, the result is a table of type
'zero' with table 0, and empty variables.
"""
def multpots(pots):
# import copy
newpot = pots[0]
for i in range(1, len(pots)): # loop over all the potentials
#FIX ME: did not check dimension consistency
newpot = newpot*pots[i]
return newpot
| 2.859375
| 3
|
solrSource.py
|
o19s/solr_dump
| 6
|
12776554
|
import pysolr
class InvalidPagingConfigError(RuntimeError):
def __init__(self, message):
super(RuntimeError, self).__init__(message)
class _SolrCursorIter:
""" Cursor-based iteration, most performant. Requires a sort on id somewhere
in required "sort" argument.
This is recommended approach for iterating docs in a Solr collection
"""
def __init__(self, solr_conn, query, sort='id desc', **options):
self.query = query
self.solr_conn = solr_conn
self.lastCursorMark = ''
self.cursorMark = '*'
self.sort = sort
try:
self.rows = options['rows']
del options['rows']
except KeyError:
self.rows = 0
self.options = options
self.max = None
self.docs = None
def __iter__(self):
response = self.solr_conn.search(self.query, rows=0, **self.options)
self.max = response.hits
return self
def __next__(self):
try:
if self.docs is not None:
try:
return next(self.docs)
except StopIteration:
self.docs = None
if self.docs is None:
if self.lastCursorMark != self.cursorMark:
response = self.solr_conn.search(self.query, rows=self.rows,
cursorMark=self.cursorMark,
sort=self.sort,
**self.options)
self.docs = iter(response.docs)
self.lastCursorMark = self.cursorMark
self.cursorMark = response.nextCursorMark
return next(self.docs)
else:
raise StopIteration()
except pysolr.SolrError as e:
print(e)
if "Cursor" in e.message:
raise InvalidPagingConfigError(e.message)
raise e
class _SolrPagingIter:
""" Traditional search paging, most flexible but will
gradually get slower on each request due to deep-paging
See graph here:
http://opensourceconnections.com/blog/2014/07/13/reindexing-collections-with-solrs-cursor-support/
"""
def __init__(self, solr_conn, query, **options):
self.current = 0
self.query = query
self.solr_conn = solr_conn
try:
self.rows = options['rows']
del options['rows']
except KeyError:
self.rows = 0
self.options = options
self.max = None
self.docs = None
def __iter__(self):
response = self.solr_conn.search(self.query, rows=0, **self.options)
self.max = response.hits
return self
def __next__(self):
if self.docs is not None:
try:
return self.docs.next()
except StopIteration:
self.docs = None
if self.docs is None:
if self.current * self.rows < self.max:
self.current += 1
response = self.solr_conn.search(self.query, rows=self.rows,
start=(self.current - 1) * self.rows,
**self.options)
self.docs = iter(response.docs)
return next(self.docs)
else:
raise StopIteration()
SolrDocs = _SolrCursorIter # recommended, see note for SolrCursorIter
SlowSolrDocs = _SolrPagingIter
def parse_args():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('solr_url',
type=str)
parser.add_argument('--query',
type=str,
default='*:*')
parser.add_argument('--sort',
type=str,
default='id desc')
parser.add_argument('--fields',
type=str,
default='')
parser.add_argument('--batch_size',
type=int,
default=500)
parser.add_argument('--dest',
type=argparse.FileType('w'))
return vars(parser.parse_args())
if __name__ == "__main__":
args = parse_args()
solr_conn = pysolr.Solr(args['solr_url'])
solr_fields = args['fields'].split() if args['fields'] else ''
solr_itr = SolrDocs(solr_conn, args['query'], rows=args['batch_size'], sort=args['sort'], fl=solr_fields)
destFile = args['dest']
import json
numDocs = 0
for doc in solr_itr:
destFile.write(json.dumps(doc) + "\n")
numDocs += 1
if (numDocs % 1000 == 0):
print("Wrote %s docs" % numDocs)
print("Wrote %s docs" % numDocs)
| 2.765625
| 3
|
2015/day17/day17.py
|
e-jameson/aoc
| 0
|
12776555
|
import sys
from itertools import combinations
from helpers import as_list_ints
containers = as_list_ints('2015/day17/input.txt')
# containers = as_list_ints('2015/day17/example-input.txt')
total = 150
count = 0
min_containers = sys.maxsize
min_count = 0
for i in range(len(containers)):
for c in combinations(containers, i):
liquid = sum(c)
if liquid == total:
if i <= min_containers:
min_containers = i
min_count += 1
count += 1
print('2015 Day 17 Part 1')
print(count)
print('2015 Day 17 Part 2')
print(min_containers, min_count)
| 3.09375
| 3
|
development/db_setup/process_frames.py
|
ocean-data-factory-sweden/koster_lab_development
| 0
|
12776556
|
import os, io, csv, json
import requests, argparse
import pandas as pd
import numpy as np
from ast import literal_eval
from datetime import datetime
from panoptes_client import Project, Panoptes
from collections import OrderedDict, Counter
from sklearn.cluster import DBSCAN
import kso_utils.db_utils as db_utils
from kso_utils.zooniverse_utils import auth_session
def bb_iou(boxA, boxB):
# Compute edges
temp_boxA = boxA.copy()
temp_boxB = boxB.copy()
temp_boxA[2], temp_boxA[3] = (
temp_boxA[0] + temp_boxA[2],
temp_boxA[1] + temp_boxA[3],
)
temp_boxB[2], temp_boxB[3] = (
temp_boxB[0] + temp_boxB[2],
temp_boxB[1] + temp_boxB[3],
)
# determine the (x, y)-coordinates of the intersection rectangle
xA = max(temp_boxA[0], temp_boxB[0])
yA = max(temp_boxA[1], temp_boxB[1])
xB = min(temp_boxA[2], temp_boxB[2])
yB = min(temp_boxA[3], temp_boxB[3])
# compute the area of intersection rectangle
interArea = abs(max((xB - xA, 0)) * max((yB - yA), 0))
if interArea == 0:
return 1
# compute the area of both the prediction and ground-truth
# rectangles
boxAArea = abs((temp_boxA[2] - temp_boxA[0]) * (temp_boxA[3] - temp_boxA[1]))
boxBArea = abs((temp_boxB[2] - temp_boxB[0]) * (temp_boxB[3] - temp_boxB[1]))
# compute the intersection over union by taking the intersection
# area and dividing it by the sum of prediction + ground-truth
# areas - the intersection area
iou = interArea / float(boxAArea + boxBArea - interArea)
# return the intersection over union value
return 1 - iou
def filter_bboxes(total_users, users, bboxes, obj, eps, iua):
# If at least half of those who saw this frame decided that there was an object
user_count = pd.Series(users).nunique()
if user_count / total_users >= obj:
# Get clusters of annotation boxes based on iou criterion
cluster_ids = DBSCAN(min_samples=1, metric=bb_iou, eps=eps).fit_predict(bboxes)
# Count the number of users within each cluster
counter_dict = Counter(cluster_ids)
# Accept a cluster assignment if at least 80% of users agree on annotation
passing_ids = [k for k, v in counter_dict.items() if v / user_count >= iua]
indices = np.isin(cluster_ids, passing_ids)
final_boxes = []
for i in passing_ids:
# Compute median over all accepted bounding boxes
boxes = np.median(np.array(bboxes)[np.where(cluster_ids == i)], axis=0)
final_boxes.append(boxes)
return indices, final_boxes
else:
return [], bboxes
def main():
"Handles argument parsing and launches the correct function."
parser = argparse.ArgumentParser()
parser.add_argument(
"--user", "-u", help="Zooniverse username", type=str, required=True
)
parser.add_argument(
"--password", "-p", help="Zooniverse password", type=str, required=True
)
parser.add_argument(
"-db",
"--db_path",
type=str,
help="the absolute path to the database file",
default=r"koster_lab.db",
required=True,
)
parser.add_argument(
"-obj",
"--object_thresh",
type=float,
help="Agreement threshold required among different users",
default=0.8,
)
parser.add_argument(
"-zw",
"--zoo_workflow",
type=float,
help="Number of the Zooniverse workflow of interest",
default=12852,
required=False,
)
parser.add_argument(
"-zwv",
"--zoo_workflow_version",
type=float,
help="Version number of the Zooniverse workflow of interest",
default=21.85,
required=False,
)
parser.add_argument(
"-eps",
"--iou_epsilon",
type=float,
help="threshold of iou for clustering",
default=0.5,
)
parser.add_argument(
"-iua",
"--inter_user_agreement",
type=float,
help="proportion of users agreeing on clustering",
default=0.8,
)
parser.add_argument(
"-nu",
"--n_users",
type=float,
help="Minimum number of different Zooniverse users required per clip",
default=5,
required=False,
)
parser.add_argument(
"-du",
"--duplicates_file_id",
help="Google drive id of list of duplicated subjects",
type=str,
required=False,
)
args = parser.parse_args()
project = auth_session(args.user, args.password)
# Get the export classifications
export = project.get_export("classifications")
# Save the response as pandas data frame
rawdata = pd.read_csv(
io.StringIO(export.content.decode("utf-8")),
usecols=[
"user_name",
"subject_ids",
"subject_data",
"classification_id",
"workflow_id",
"workflow_version",
"created_at",
"annotations",
],
)
# Filter w2 classifications
w2_data = rawdata[
(rawdata.workflow_id == args.zoo_workflow)
& (rawdata.workflow_version >= args.zoo_workflow_version)
].reset_index()
# Clear duplicated subjects
if args.duplicates_file_id:
w2_data = db_utils.combine_duplicates(w2_data, args.duplicates_file_id)
#Drop NaN columns
w2_data = w2_data.drop(['dupl_subject_id', 'single_subject_id'], 1)
## Check if subjects have been uploaded
# Get species id for each species
conn = db_utils.create_connection(args.db_path)
# Get subject table
uploaded_subjects = pd.read_sql_query(
"SELECT id FROM subjects WHERE subject_type='frame'", conn
)
# Add frame subjects to db that have not been uploaded
new_subjects = w2_data[(~w2_data.subject_ids.isin(uploaded_subjects))]
new_subjects["subject_dict"] = new_subjects["subject_data"].apply(lambda x: [v["retired"] for k,v in json.loads(x).items()][0])
new_subjects = new_subjects[~new_subjects.subject_dict.isnull()].drop("subject_dict", 1)
if len(new_subjects) > 0 and args.zoo_workflow_version > 30:
# Get info of subjects uploaded to the project
export = project.get_export("subjects")
# Save the subjects info as pandas data frame
subjects_df = pd.read_csv(
io.StringIO(export.content.decode("utf-8")),
usecols=["subject_id", "subject_set_id", "created_at"],
)
new_subjects = pd.merge(
new_subjects,
subjects_df,
how="left",
left_on="subject_ids",
right_on="subject_id",
)
# Extract the video filename and annotation details
new_subjects[
[
"frame_number",
"frame_exp_sp_id",
"movie_id",
"classifications_count",
"created_at",
"retired_at",
"retirement_reason",
]
] = pd.DataFrame(
new_subjects["subject_data"]
.apply(
lambda x: [
{
"frame_number": v["frame_number"],
"frame_exp_sp_id": v["frame_exp_sp_id"],
"movie_id": v["movie_id"],
"classifications_count": v["retired"]["classifications_count"],
"created_at": v["retired"]["created_at"],
"retired_at": v["retired"]["retired_at"],
"retirement_reason": v["retired"]["retirement_reason"],
}
for k, v in json.loads(x).items()
][0]
)
.tolist()
)
new_subjects["subject_type"] = "frame"
movies_df = pd.read_sql_query("SELECT id, filename FROM movies", conn)
movies_df = movies_df.rename(
columns={"id": "movie_id", "filename": "movie_filename"}
)
new_subjects = pd.merge(new_subjects, movies_df, how="left", on="movie_id")
new_subjects["filename"] = new_subjects.apply(
lambda x: x["movie_filename"] + "_" + str(x["frame_number"]) + ".jpg",
axis=1,
)
# Set irrelevant columns to None
new_subjects["clip_start_time"] = None
new_subjects["clip_end_time"] = None
new_subjects = new_subjects[
[
"subject_ids",
"subject_type",
"filename",
"clip_start_time",
"clip_end_time",
"frame_exp_sp_id",
"frame_number",
"workflow_id",
"subject_set_id",
"classifications_count",
"retired_at",
"retirement_reason",
"created_at",
"movie_id",
]
]
new_subjects = new_subjects.drop_duplicates(subset="subject_ids")
db_utils.test_table(new_subjects, "subjects", keys=["movie_id"])
# Add values to subjects
db_utils.add_to_table(
args.db_path, "subjects", [tuple(i) for i in new_subjects.values], 14
)
# Calculate the number of users that classified each subject
w2_data["n_users"] = w2_data.groupby("subject_ids")["classification_id"].transform(
"nunique"
)
# Select frames with at least n different user classifications
w2_data = w2_data[w2_data.n_users >= args.n_users]
# Drop workflow and n_users columns
w2_data = w2_data.drop(
columns=[
"workflow_id",
"workflow_version",
"n_users",
"created_at",
]
)
# Extract the video filename and annotation details
subject_data_df = pd.DataFrame(
w2_data["subject_data"]
.apply(
lambda x: [
{
"movie_id": v["movie_id"],
"frame_number": v["frame_number"],
"label": v["label"],
}
for k, v in json.loads(x).items() # if v['retired']
][0],
1,
)
.tolist()
)
w2_data = pd.concat(
[w2_data.reset_index().drop("index", 1), subject_data_df],
axis=1,
ignore_index=True,
)
w2_data = w2_data[w2_data.columns[1:]]
pd.set_option('display.max_columns', None)
w2_data.columns = [
"classification_id",
"user_name",
"annotations",
"subject_data",
"subject_ids",
"movie_id",
"frame_number",
"label",
]
movies_df = pd.read_sql_query("SELECT id, filename FROM movies", conn)
movies_df = movies_df.rename(columns={"id": "movie_id"})
w2_data = pd.merge(w2_data, movies_df, how="left", on="movie_id")
# Convert to dictionary entries
w2_data["movie_id"] = w2_data["movie_id"].apply(lambda x: {"movie_id": x})
w2_data["frame_number"] = w2_data["frame_number"].apply(
lambda x: {"frame_number": x}
)
w2_data["label"] = w2_data["label"].apply(lambda x: {"label": x})
w2_data["user_name"] = w2_data["user_name"].apply(lambda x: {"user_name": x})
w2_data["subject_id"] = w2_data["subject_ids"].apply(lambda x: {"subject_id": x})
w2_data["annotation"] = w2_data["annotations"].apply(
lambda x: literal_eval(x)[0]["value"], 1
)
# Extract annotation metadata
w2_data["annotation"] = w2_data[
["movie_id", "frame_number", "label", "annotation", "user_name", "subject_id"]
].apply(
lambda x: [
OrderedDict(
list(x["movie_id"].items())
+ list(x["frame_number"].items())
+ list(x["label"].items())
+ list(x["annotation"][i].items())
+ list(x["user_name"].items())
+ list(x["subject_id"].items())
)
for i in range(len(x["annotation"]))
]
if len(x["annotation"]) > 0
else [
OrderedDict(
list(x["movie_id"].items())
+ list(x["frame_number"].items())
+ list(x["label"].items())
+ list(x["user_name"].items())
+ list(x["subject_id"].items())
)
],
1,
)
# Convert annotation to format which the tracker expects
ds = [
OrderedDict(
{
"user": i["user_name"],
"movie_id": i["movie_id"],
"label": i["label"],
"start_frame": i["frame_number"],
"x": int(i["x"]) if "x" in i else None,
"y": int(i["y"]) if "y" in i else None,
"w": int(i["width"]) if "width" in i else None,
"h": int(i["height"]) if "height" in i else None,
"subject_id": i["subject_id"] if "subject_id" in i else None,
}
)
for i in w2_data.annotation.explode()
if i is not None and i is not np.nan
]
# Get prepared annotations
w2_full = pd.DataFrame(ds)
w2_annotations = w2_full[w2_full["x"].notnull()]
new_rows = []
final_indices = []
for name, group in w2_annotations.groupby(["movie_id", "label", "start_frame"]):
movie_id, label, start_frame = name
total_users = w2_full[
(w2_full.movie_id == movie_id)
& (w2_full.label == label)
& (w2_full.start_frame == start_frame)
]["user"].nunique()
# Filter bboxes using IOU metric (essentially a consensus metric)
# Keep only bboxes where mean overlap exceeds this threshold
indices, new_group = filter_bboxes(
total_users=total_users,
users=[i[0] for i in group.values],
bboxes=[np.array((i[4], i[5], i[6], i[7])) for i in group.values],
obj=args.object_thresh,
eps=args.iou_epsilon,
iua=args.inter_user_agreement,
)
subject_ids = [i[8] for i in group.values[indices]]
for ix, box in zip(subject_ids, new_group):
new_rows.append(
(
movie_id,
label,
start_frame,
ix,
)
+ tuple(box)
)
w2_annotations = pd.DataFrame(
new_rows,
columns=[
"movie_id",
"label",
"start_frame",
"subject_id",
"x",
"y",
"w",
"h",
],
)
# Get species id for each species
conn = db_utils.create_connection(args.db_path)
# Get subject table
subjects_df = pd.read_sql_query("SELECT id, frame_exp_sp_id FROM subjects", conn)
subjects_df = subjects_df.rename(
columns={"id": "subject_id", "frame_exp_sp_id": "species_id"}
)
w2_annotations = pd.merge(
w2_annotations,
subjects_df,
how="left",
left_on="subject_id",
right_on="subject_id",
validate="many_to_one",
)
# Filter out invalid movies
w2_annotations = w2_annotations[w2_annotations["movie_id"].notnull()][
["species_id", "x", "y", "w", "h", "subject_id"]
]
# Add values to agg_annotations_frame
db_utils.add_to_table(
args.db_path,
"agg_annotations_frame",
[(None,) + tuple(i) for i in w2_annotations.values],
7,
)
print(f"Frame Aggregation Complete: {len(w2_annotations)} annotations added")
if __name__ == "__main__":
main()
| 2.171875
| 2
|
Python/budget_app/budget.py
|
nehera/tutorials
| 0
|
12776557
|
class Category:
def __init__(self, name):
self.name = name
self.ledger = []
def __str__(self):
l = len(self.name)
n1 = 15-int(l/2)
n2 = 30-(n1+l)
title = "*"*n1+self.name+"*"*n2+"\n"
summary = ""
for item in self.ledger:
a= format(item["amount"], '.2f')[0:6]
d = item["description"][0:23]
sl = 30 - len(a)-len(d)
summary = summary + d + " " * sl + a + "\n"
total = "Total: " + str(self.get_balance())
summary = title + summary + total
return summary
def deposit(self, amount, description=""):
self.ledger.append({"amount": amount, "description": description})
def withdraw(self, amount, description=""):
if amount <= self.get_balance():
self.ledger.append({"amount": amount*-1, "description": description})
return True
else: return False
def get_balance(self):
nums = [d["amount"] for d in self.ledger]
return float(sum(nums))
def transfer(self, amount, category): # transfers $ from this category to another
if self.check_funds(amount) == True:
wd = "Transfer to " + category.name
dd = "Transfer from " + self.name
self.withdraw(amount, wd)
category.deposit(amount, dd)
return True
else: return False
def check_funds(self, amount):
b = self.get_balance()
if amount > b :
return False
else:
return True
def check_spending(self):
nums = [d["amount"] for d in self.ledger]
spen = [num for num in nums if num < 0]
spending = -sum(spen)
return format(spending, '.2f')
def create_spend_chart(categories):
title = "Percentage spent by category"
dashes = " "*4 + "-"*(len(categories)*3+1)
width = 4+len(dashes)
y = reversed(range(0,101,10))
spending = []
name_max = 0
for category in categories:
num = float(category.check_spending())
spending.append(num)
if len(category.name) > name_max:
name_max = len(category.name)
total_spending = sum(spending)
chart = title + "\n"
for num in y:
nl = len(str(num))
p1 = " "*(3-nl)+str(num)+"| "
p2 = ""
for s in spending:
percent = int(s/total_spending*100/10)*10
if percent >= num:
p2 = p2 + "o "
else:
p2 = p2 + " "
line = p1 + p2 + "\n"
chart = chart + line
chart = chart + dashes + "\n"
for i in range(0, name_max):
line = " "*5
for c in categories:
if i > (len(c.name)-1):
line = line + " "
else:
l = c.name[i]
pi = l + " "
line = line + pi
if i < (name_max-1):
chart = chart + line + "\n"
else:
chart = chart + line
return chart
| 3.203125
| 3
|
configs/deepim/lmPbrSO/FlowNet512_1.5AugCosyAAEGray_AggressiveV2_Flat_lmPbr_SO/cfg_1_to_all.py
|
THU-DA-6D-Pose-Group/self6dpp
| 33
|
12776558
|
<filename>configs/deepim/lmPbrSO/FlowNet512_1.5AugCosyAAEGray_AggressiveV2_Flat_lmPbr_SO/cfg_1_to_all.py
from mmcv import Config
import os.path as osp
import os
from tqdm import tqdm
cur_dir = osp.normpath(osp.dirname(osp.abspath(__file__)))
base_cfg_name = "FlowNet512_1.5AugCosyAAEGray_AggressiveV2_Flat_Pbr_01_ape.py"
base_id_obj = "01_ape"
base_obj_name = "ape"
# -----------------------------------------------------------------
id2obj = {
1: "ape",
2: "benchvise",
3: "bowl",
4: "camera",
5: "can",
6: "cat",
7: "cup",
8: "driller",
9: "duck",
10: "eggbox",
11: "glue",
12: "holepuncher",
13: "iron",
14: "lamp",
15: "phone",
}
obj2id = {_name: _id for _id, _name in id2obj.items()}
def main():
base_cfg_path = osp.join(cur_dir, base_cfg_name)
assert osp.exists(base_cfg_path), base_cfg_path # make sure base cfg is in this dir
cfg = Config.fromfile(base_cfg_path)
for obj_id, obj_name in tqdm(id2obj.items()):
if obj_name in [base_obj_name, "bowl", "cup"]: # NOTE: ignore base_obj and some unwanted objs
continue
print(obj_name)
# NOTE: what fields should be updated ---------------------------
new_cfg_dict = dict(
_base_="./{}".format(base_cfg_name),
OUTPUT_DIR=cfg.OUTPUT_DIR.replace(base_obj_name, obj_name),
DATASETS=dict(
TRAIN=("lm_pbr_{}_train".format(obj_name),),
TEST=("lm_real_{}_test".format(obj_name),),
),
)
# ----------------------------------------------------------------------
new_cfg_path = osp.join(cur_dir, base_cfg_name.replace(base_id_obj, f"{obj_id:02d}_{obj_name}"))
if osp.exists(new_cfg_path):
raise RuntimeError("new cfg exists!")
new_cfg = Config(new_cfg_dict)
with open(new_cfg_path, "w") as f:
f.write(new_cfg.pretty_text)
# re-format
os.system("black -l 120 {}".format(cur_dir))
if __name__ == "__main__":
main()
| 2.109375
| 2
|
blog/signals.py
|
Vicky-Rathod/django-blog
| 0
|
12776559
|
<reponame>Vicky-Rathod/django-blog<gh_stars>0
from django.db.models.signals import post_save, pre_save
from django.dispatch import receiver
from .models import Post
from .utils import random_string_generator
@receiver(post_save, sender=Post)
def create_user_profile(sender, instance, created, **kwargs):
if not instance.slug:
instance.slug = random_string_generator(size=50)
instance.save()
| 2.359375
| 2
|
tests/meh.py
|
awlange/brainsparks
| 3
|
12776560
|
"""
Script entry point
"""
from src.sandbox.network import Network
from src.sandbox.dense import Dense
import src.sandbox.linalg as linalg
import numpy as np
import time
def main():
n = 6000
v = [x for x in range(n)]
m = [[x for x in range(n)] for _ in range(n)]
time_start = time.time()
for _ in range(3):
linalg.mdotv(m, v)
print(time.time() - time_start)
def main2():
n = 8000
v = np.asarray([x for x in range(n)])
m = np.asarray([[x for x in range(n)] for _ in range(n)])
time_start = time.time()
z = None
for _ in range(3):
z = m.dot(v)
print(z.sum())
print(time.time() - time_start)
if __name__ == "__main__":
# main()
main2()
| 2.671875
| 3
|
scripts/light_server.py
|
jing-vision/lightnet
| 83
|
12776561
|
'''
pip install flask gevent requests pillow
https://github.com/jrosebr1/simple-keras-rest-api
https://gist.github.com/kylehounslow/767fb72fde2ebdd010a0bf4242371594
'''
''' Usage
python ..\scripts\classifier.py --socket=5000 --weights=weights\obj_last.weights
curl -X POST -F image=@dog.png http://localhost:5000/training/begin?plan=testplan
'''
import threading
import time
import csv
import datetime
import flask
import traceback
import sys
import os
import cv2 as cv
import argparse
import lightnet
import darknet
import socket
import requests
import get_ar_plan
import logging
logger = logging.getLogger(__name__)
app = flask.Flask(__name__)
from os.path import join
args = None
nets = []
metas = []
args_groups = []
csv_file = None
csv_writer = None
cap = None
gpu_lock = threading.Lock()
host_ip = 'localhost'
#
server_state_idle = 0
server_state_training = 1
server_state = None
server_training_status = {
'plan_name': '',
'percentage': 0,
}
server_training_status_internal = {
'folders': [],
}
def get_Host_name_IP():
try:
global host_ip
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect(("baidu.com", 80))
host_ip, _ = s.getsockname()
print("http://%s:5000" % host_ip)
except:
print("Unable to get Hostname and IP")
@app.route("/", methods=["GET"])
def index_get():
data = vars(args)
data['usage'] = "curl -X POST -F image=@dog.png http://%s:5000/predict" % (
host_ip)
return flask.jsonify(data)
def go_idle():
global server_state, server_training_status, server_training_status_internal
server_state = server_state_idle
server_training_status['plan_name'] = ''
server_training_status['percentage'] = 0
server_training_status_internal['folders'] = []
@app.route("/training/status", methods=["GET"])
def training_status():
return flask.jsonify(server_training_status)
def training_thread_function(training_folders):
global server_state, server_training_status, server_training_status_internal
server_training_status_internal['folders'] = training_folders
import subprocess
idx = 1 # start from 1
for folder in training_folders:
bat_file = join(folder, 'train.bat')
logging.info("%s: starting", bat_file)
p = subprocess.Popen(bat_file, shell=True, stdout = subprocess.PIPE)
stdout, stderr = p.communicate()
print(p.returncode) # is 0 if success
logging.info("%s: finishing", bat_file)
server_training_status['percentage'] = idx * 100 / len(training_folders)
idx += 1
go_idle()
@app.route("/training/begin", methods=["GET"])
def training_begin():
global server_state, server_training_status
if server_state != server_state_idle:
result = {
'errCode': 'Busy', # 'OK/Busy/Error'
'errMsg': 'Server is busy training %s' % server_training_status['plan_name']
}
return flask.jsonify(result)
try:
server_state = server_state_training
plan = flask.request.args.get("plan")
print(plan)
server_training_status['plan_name'] = plan
server_training_status['percentage'] = 0
url = 'http://localhost:8800/api/Training/plan?plan=%s' % plan
response = requests.get(url)
plan_json = response.json()
# return flask.jsonify(result)
training_folders = get_ar_plan.prepare_training_folders(plan_json)
x = threading.Thread(target=training_thread_function, args=(training_folders,))
x.start()
result = {
'errCode': 'OK', # 'OK/Busy/Error'
'errMsg': ''
}
except:
error_callstack = traceback.format_exc()
print(error_callstack)
result = {
'errCode': 'Error', # or 'Error'
'errMsg': error_callstack
}
go_idle()
return flask.jsonify(result)
def main():
# lightnet.set_cwd(dir)
global nets, metas, args, cap, args_groups
global server_state
server_state = server_state_idle
def add_bool_arg(parser, name, default=False):
group = parser.add_mutually_exclusive_group(required=False)
group.add_argument('--' + name, dest=name, action='store_true')
group.add_argument('--no-' + name, dest=name, action='store_false')
parser.set_defaults(**{name: default})
parser = argparse.ArgumentParser()
parser.add_argument('--group', default='default')
parser.add_argument('--cfg', default='obj.cfg')
parser.add_argument('--weights', default='weights/obj_last.weights')
parser.add_argument('--names', default='obj.names')
parser.add_argument('--socket', type=int, default=5000)
parser.add_argument('--top_k', type=int, default=5)
parser.add_argument('--gold_confidence', type=float, default=0.95)
parser.add_argument('--threshold', type=float, default=0.5)
add_bool_arg(parser, 'debug')
args = parser.parse_args()
logging.basicConfig(level=logging.INFO)
# flask routine
print('=========================================')
get_Host_name_IP()
print('=========================================')
app.run(host='0.0.0.0', port=args.socket, threaded=True)
if __name__ == "__main__":
main()
| 2.15625
| 2
|
tests/test_manage.py
|
rosshamish/classtime-implementation
| 1
|
12776562
|
<filename>tests/test_manage.py
from __future__ import absolute_import
import unittest
import manage
class Arguments(object): # pylint: disable=R0903
def __init__(self, command, term, startfrom):
self.command = command
self.term = term
self.startfrom = startfrom
class TestManageDatabase(unittest.TestCase): # pylint: disable=R0904
@classmethod
def setup_class(cls):
manage.delete_db()
@classmethod
def teardown_class(cls):
pass
def test_seed_db(self): #pylint: disable=R0201
args = Arguments('seed_db', '1490', None)
manage.seed_db(args)
assert_valid_terms()
assert_valid_courses()
def assert_valid_terms():
import classtime.models as models
for term_model in models.Term.query.all():
assert term_model.term is not None
assert term_model.termTitle is not None
assert term_model.courses is not None
def assert_valid_courses():
import classtime.models as models
for course_model in models.Course.query.all():
assert course_model.term is not None
assert course_model.course is not None
assert course_model.asString is not None
assert course_model.sections is not None
| 2.515625
| 3
|
Chapter06/Ch6/demo/indexing.py
|
henrryyanez/Tkinter-GUI-Programming-by-Example
| 127
|
12776563
|
import tkinter as tk
win = tk.Tk()
current_index = tk.StringVar()
text = tk.Text(win, bg="white", fg="black")
lab = tk.Label(win, textvar=current_index)
def update_index(event=None):
cursor_position = text.index(tk.INSERT)
cursor_position_pieces = str(cursor_position).split('.')
cursor_line = cursor_position_pieces[0]
cursor_char = cursor_position_pieces[1]
current_index.set('line: ' + cursor_line + ' char: ' + cursor_char + ' index: ' + str(cursor_position))
text.pack(side=tk.TOP, fill=tk.BOTH, expand=1)
lab.pack(side=tk.BOTTOM, fill=tk.X, expand=1)
text.bind('<KeyRelease>', update_index)
win.mainloop()
| 3.65625
| 4
|
MobileRevelator/python/android_quizkampen.py
|
ohunecker/MR
| 98
|
12776564
|
#Pluginname="Quizkampen (Android)"
#Filename="quizkampen"
#Type=App
import struct
import xml.etree.ElementTree
import tempfile
def convertdata(db):
#ctx.gui_clearData()
ctx.gui_setMainLabel("Quizkampen: Extracting userid");
tmpdir = tempfile.mkdtemp()
outuid = os.path.join(tmpdir, "userid")
uid=""
filenames=["/data/se.feomedia.quizkampen.pl.lite/shared_prefs/PREF_SETTINGS_NAME.xml","/se.feomedia.quizkampen.pl.lite/shared_prefs/PREF_SETTINGS_NAME.xml"]
for f in filenames:
if ctx.fs_file_extract(f,outuid):
ctx.gui_add_report_relevant_file(f)
e = xml.etree.ElementTree.parse(outuid).getroot()
for atype in e.findall("long"):
if atype.get("name")=="current_user":
uid=atype.get("value")
print("Userid: "+uid+"\n")
os.remove(outuid)
break;
ctx.gui_setMainLabel("Quizkampen: Extracting users");
waconn=ctx.sqlite_run_cmd(db,"SELECT DISTINCT id, name from qk_users;")
if (waconn==-1):
print ("Error: "+ctx.sqlite_last_error(db))
return
contacts={}
if waconn!=-1:
rows=ctx.sqlite_get_data_size(waconn)[0]
for i in range(0,rows):
id=str(ctx.sqlite_get_data(waconn,i,0))
name=str(ctx.sqlite_get_data(waconn,i,1))
if (id not in contacts):
if name != None:
contacts[id]=name
else:
contacts[id]=""
#print(contacts)
ctx.gui_setMainLabel("Quizkampen: Extracting messages");
ctx.sqlite_cmd_close(waconn)
conn=ctx.sqlite_run_cmd(db,"select rowid, to_id, from_id, text, datetime, is_message_read, is_deleted from qk_messages;")
rows=ctx.sqlite_get_data_size(conn)[0]
oldpos=0
r=0
for i in range(0,rows):
newpos=int(i/rows*100)
if (oldpos<newpos):
oldpos=newpos
ctx.gui_setMainProgressBar(oldpos)
rowid=ctx.sqlite_get_data(conn,i,0)
to_id=str(ctx.sqlite_get_data(conn,i,1))
to_id_alias=""
if to_id in contacts:
to_id_alias=contacts[to_id]
from_id=str(ctx.sqlite_get_data(conn,i,2))
from_id_alias=""
if from_id in contacts:
from_id_alias=contacts[from_id]
text=ctx.sqlite_get_data(conn,i,3)
timestamp=ctx.sqlite_get_data(conn,i,4)
timestamp=str(timestamp[:-3])
is_message_read=ctx.sqlite_get_data(conn,i,5)
is_deleted=ctx.sqlite_get_data(conn,i,6)
ctx.gui_set_data(r,0,rowid)
ctx.gui_set_data(r,1,to_id)
ctx.gui_set_data(r,2,to_id_alias)
ctx.gui_set_data(r,3,from_id)
ctx.gui_set_data(r,4,from_id_alias)
ctx.gui_set_data(r,5,text)
print(timestamp)
ctx.gui_set_data(r,6,timestamp)
ctx.gui_set_data(r,7,is_message_read)
ctx.gui_set_data(r,8,is_deleted)
if (uid==from_id):
from_me="yes"
else:
from_me="no"
if (uid==""):
from_me="unknown"
ctx.gui_set_data(r,9,from_me)
r+=1
ctx.sqlite_cmd_close(conn)
def main():
headers=["rowid (int)","to_id (QString)", "to_id_alias (QString)", "from_id (QString)", "from_id_alias (QString)", "text (QString)","timestamp (int)","is_message_read (QString)","is_deleted (QString)", "is_from_me (QString)"]
ctx.gui_set_headers(headers)
ctx.gui_setMainLabel("Quizkampen: Parsing Strings");
ctx.gui_setMainProgressBar(0)
db=ctx.sqlite_open("gui",True)
convertdata(db)
ctx.gui_update()
ctx.gui_setMainLabel("Status: Idle.")
ctx.gui_setMainProgressBar(0)
ctx.sqlite_close(db)
return "Finished running plugin."
| 2.40625
| 2
|
groups/bal/baljsn/generate_baljsn_encoder_testtypes.py
|
eddiepierce/bde
| 1
|
12776565
|
#!/opt/bb/bin/python3.7
"""This module defines a program that generates the 'baljsn_encoder_testtypes'
component and replace all uses of 'bdes' with 'bsls' within its files.
"""
from asyncio import create_subprocess_exec as aio_create_subprocess_exec
from asyncio import run as aio_run
from asyncio import subprocess as aio_subprocess
from mmap import mmap as mm_mmap
from re import compile as re_compile
from re import finditer as re_finditer
from sys import exit as sys_exit
from sys import version_info as sys_version_info
from typing import AsyncGenerator as ty_AsyncGenerator
from typing import TypeVar as ty_TypeVar
from typing import Union as ty_Union
from typing import cast as ty_cast
T = ty_TypeVar('T')
def not_none_cast(x: ty_Union[T, None]) -> T:
"""Return the specified `x` cast to the specified `T` type.
Args:
x (typing.Union[T, None]): the value to return
Returns:
T: the specified `x` cast to `T`
"""
return ty_cast(T, x)
async def generate_components() -> ty_AsyncGenerator[str, None]:
"""Generate the `baljsn_encoder_testtypes` components.
Spawn a subprocess that generates the C++ code for the
`baljsn_encoder_testtypes.xsd` schema. Return an async generator `G` that
yields each line of output from the subprocess as it is received and that
returns upon termination of the process. Note that this function does not
have a dependency on the shell of the user, but does depend on the user's
executable search path, since it directly executes `bas_codegen.pl`.
Returns:
typing.AsyncGenerator[str, None]: `G`
"""
process = await aio_create_subprocess_exec(
'bas_codegen.pl',
'--mode',
'msg',
'--noAggregateConversion',
'--noExternalization',
'--msgComponent=encoder_testtypes',
'--package=baljsn',
'baljsn_encoder_testtypes.xsd',
stdout=aio_subprocess.PIPE,
stderr=aio_subprocess.STDOUT)
stdout = not_none_cast(process.stdout)
while not stdout.at_eof():
line: bytes = await stdout.readline()
if not stdout.at_eof or len(line) != 0:
yield line.decode()
await process.communicate()
def rewrite_bdes_ident_to_bsls(file: str) -> None:
"""Replace all occurrences of "bdes_ident" with "bsls_ident" in the
specified `file`.
Args:
file (str): an absolute or relative path to a file
Returns:
None
"""
with open(file, "r+b") as f, mm_mmap(f.fileno(), 0) as filemap:
regex = b'(?P<LOWER_CASE>bdes_ident)|(?P<UPPER_CASE>BDES_IDENT)'
compiled_regex = re_compile(regex)
# mmap objects satisfy the bytearray interface
filemap_bytearray = ty_cast(bytearray, filemap)
for match in re_finditer(compiled_regex, filemap_bytearray):
group = match.lastgroup
if group == 'LOWER_CASE':
filemap[match.start():match.end()] = b'bsls_ident'
else:
assert group == 'UPPER_CASE'
filemap[match.start():match.end()] = b'BSLS_IDENT'
filemap.flush()
async def main() -> None:
"""Asynchronously generate the 'baljsn_encdoer_testypes' components and
replace all occurrences of "bdes_ident" with "bsls_ident" within them.
Return:
None
"""
print("Generating files with bas_codegen.pl")
lines = generate_components()
async for line in lines:
print(line.strip('\n'))
print("Replacing 'bdes_ident' with 'bsls_ident' in " +
"baljsn_encoder_testtypes.h")
rewrite_bdes_ident_to_bsls('./baljsn_encoder_testtypes.h')
print("Replacing 'bdes_ident' with 'bsls_ident' in " +
"baljsn_encoder_testtypes.cpp")
rewrite_bdes_ident_to_bsls('./baljsn_encoder_testtypes.cpp')
if __name__ == '__main__':
if not sys_version_info.major == 3 and sys_version_info.minor >= 6:
print("This program requires Python 3.6 or higher")
sys_exit(1)
aio_run(main())
# ----------------------------------------------------------------------------
# Copyright 2020 Bloomberg Finance L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ----------------------------- END-OF-FILE ----------------------------------
| 2.265625
| 2
|
ownblock/ownblock/apps/amenities/serializers.py
|
danjac/ownblock
| 3
|
12776566
|
from django.db.models.query import Q
from django.utils import timezone
from rest_framework import serializers
from ..accounts.serializers import UserSerializer
from .models import Amenity, Booking
class AmenityRelatedField(serializers.RelatedField):
def to_native(self, value):
return {
'id': value.id,
'name': value.name,
}
class BookingSerializer(serializers.ModelSerializer):
resident = UserSerializer(read_only=True)
amenity_detail = AmenityRelatedField('amenity', read_only=True)
is_editable = serializers.SerializerMethodField('is_obj_editable')
is_removable = serializers.SerializerMethodField('is_obj_removable')
class Meta:
model = Booking
fields = ('id',
'resident',
'amenity',
'amenity_detail',
'reserved_from',
'reserved_to',
'is_editable',
'is_removable')
def is_obj_editable(self, obj):
return obj.has_permission(self.context['request'].user,
'amenities.change_booking')
def is_obj_removable(self, obj):
return obj.has_permission(self.context['request'].user,
'amenities.delete_booking')
def validate_amenity(self, attrs, source):
value = attrs[source]
if not value.is_available:
raise serializers.ValidationError("Amenity not available")
if not value in self.context['request'].building.amenity_set.all():
raise serializers.ValidationError("Amenity not found")
return attrs
def validate_reserved_from(self, attrs, source):
value = attrs[source]
if value < timezone.now():
raise serializers.ValidationError("'From' date must be in future")
return attrs
def validate(self, attrs):
if attrs['reserved_from'] > attrs['reserved_to']:
raise serializers.ValidationError(
"The 'from' date is after the 'to' date")
bookings = attrs['amenity'].booking_set.all()
date_range = (attrs['reserved_from'], attrs['reserved_to'])
qs = bookings.filter(
Q(reserved_from__range=date_range) |
Q(reserved_to__range=date_range))
booking_id = self.init_data.get('id')
if booking_id:
qs = qs.exclude(pk=booking_id)
if qs.exists():
raise serializers.ValidationError("Booking conflict")
return attrs
class AmenitySerializer(serializers.ModelSerializer):
class Meta:
model = Amenity
fields = ('id', 'name', 'is_available', )
| 2.078125
| 2
|
tests/examples/web_driver_wait/web_driver_wait_test.py
|
bbornhau/python-opensdk
| 38
|
12776567
|
# Copyright 2020 TestProject (https://testproject.io)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
# Notice we import WebDriverWait from SDK classes!
from selenium.common.exceptions import TimeoutException
from selenium.webdriver.common.by import By
from src.testproject.classes import WebDriverWait
from src.testproject.sdk.drivers import webdriver
from selenium.webdriver.support import expected_conditions as ec
from tests.pageobjects.web import LoginPage, ProfilePage
@pytest.fixture
def driver():
driver = webdriver.Chrome()
yield driver
driver.quit()
@pytest.fixture()
def wait(driver):
wait = WebDriverWait(driver, 2) # Notice the imports, using WebDriverWait from 'src.testproject.classes'
yield wait
def test_wait_with_ec_invisible(driver, wait):
# Driver command will fail because element will not be found but this is the expected result so this step actually
# passes and will reported as passed as well.
LoginPage(driver).open().login_as("<NAME>", "12345")
# Check successful login.
assert ProfilePage(driver).greetings_are_displayed() is True
ProfilePage(driver).logout()
# Greeting label shouldn't be shown anymore after logout.
textlabel_greetings = (By.CSS_SELECTOR, "#greetings")
element_not_present = wait.until(ec.invisibility_of_element_located(textlabel_greetings))
assert element_not_present
# This step will fail because the example page's title is not the one we give below, step will be reported as failed
# and a TimeoutException will arose by the WebDriverWait instance.
try:
wait.until(ec.title_is("Title that is definitely not this one."))
except TimeoutException:
pass
| 2.21875
| 2
|
app/cascade/session.py
|
jillmnolan/cascade-server
| 0
|
12776568
|
# NOTICE
#
# This software was produced for the U. S. Government
# under Basic Contract No. W15P7T-13-C-A802, and is
# subject to the Rights in Noncommercial Computer Software
# and Noncommercial Computer Software Documentation
# Clause 252.227-7014 (FEB 2012)
#
# (C) 2017 The MITRE Corporation.
from __future__ import print_function
import json
import logging
from gevent.queue import Queue
from mongoengine.document import Document, EmbeddedDocument
from mongoengine.fields import EmbeddedDocumentField, StringField, ReferenceField, ListField
from analytics import AnalyticResult, Analytic, AnalyticConfiguration
from .database import DateRange, DisjointSet
from query_layers import DataModelQueryLayer
from .data_model.event import DataModelEvent
from .. import async
logger = logging.getLogger(__name__)
class QueryContext(DataModelQueryLayer):
def __init__(self, query_layer, time_range=None, session=None):
"""
:param DataModelQueryLayer query_layer: The query layer (likely the user object)
:param DateRange time_range: Time range if there is no session context
:param Session session: The session object to be queried over
"""
self.session = session
self.query_layer = query_layer
self._range = time_range
def _get_range(self):
""" :rtype: DateRange """
if self.session is not None:
return self.session.range
else:
return self._range
def query(self, expression, baseline=False, **kwargs):
new_args, expression = self._update_defaults(kwargs, expression)
results = self.query_layer.query(expression, **new_args)
return results
def external_analytics(self):
# this shouldn't make any sense
raise NotImplementedError()
def _update_defaults(self, kwargs, expression):
new_args = kwargs.copy()
time_range = self._get_range()
new_args['start'], new_args['end'] = time_range.constrain(start=kwargs.get('start'), end=kwargs.get('end'))
return new_args, expression
class SessionState(EmbeddedDocument):
analytics = ListField(EmbeddedDocumentField(AnalyticConfiguration))
class SessionStream(object):
__queues = {}
def __init__(self, session):
self.count = 0
self.queues = {}
self.session = session
@classmethod
def get_queue(cls, session):
s_id = str(session.id)
if str(session.id) not in cls.__queues:
cls.__queues[s_id] = cls(session)
return cls.__queues[s_id]
def add(self, item):
""" :type item: DataModelEvent """
for q in self.queues.values():
if isinstance(q, Queue):
q.put(json.dumps(item))
def stream(self):
self.count += 1
queue_id = self.count
if not async.enabled:
# print('WARNING! Stream functionality will not work without gevent')
raise StopIteration()
q = Queue()
self.queues[self.count] = q
try:
# Return all ready events
# yield 'stored', jsonify(self.session.events())
# Return events as they are placed into the queues
for item in q:
yield item
except GeneratorExit:
self.queues.pop(queue_id)
q.put(StopIteration)
class Session(Document):
__sessions = {}
domain = StringField()
range = EmbeddedDocumentField(DateRange, required=True)
name = StringField(required=True)
state = EmbeddedDocumentField(SessionState)
def __init__(self, *args, **kwargs):
super(Session, self).__init__(*args, **kwargs)
self._queue = None
def query_context(self, user):
return QueryContext(self, user)
@property
def queue(self):
""":rtype SessionStream """
if self._queue is None:
self._queue = SessionStream.get_queue(self)
return self._queue
def get_clusters(self):
events = list(DataModelEvent.objects(sessions=self).no_dereference())
results = list(AnalyticResult.objects(session=self).no_dereference())
event_keys = set(_.id for _ in events)
def get_neighbors(node):
neighbors = []
if isinstance(node, AnalyticResult):
neighbors.extend(event for event in node.events if event.id in event_keys)
elif isinstance(node, DataModelEvent):
# TODO: Error check to handle for events outside of current session
neighbors.extend(event for event in node.links if event.id in event_keys)
neighbors.extend(event for event in node.reverse_links if event.id in event_keys)
return neighbors
uptree = DisjointSet(events + results, get_neighbors)
clusters = []
for cluster in uptree.clusters():
new_cluster = {'events': [], 'results': []}
for item in cluster:
if isinstance(item, AnalyticResult):
new_cluster['results'].append(item)
elif isinstance(item, DataModelEvent):
new_cluster['events'].append(item)
clusters.append(new_cluster)
return clusters
| 1.992188
| 2
|
tests/integration/controller/test_stationelement_controller.py
|
faysal-ishtiaq/climsoft-api
| 0
|
12776569
|
<filename>tests/integration/controller/test_stationelement_controller.py
from datetime import datetime
import json
import pytest
from sqlalchemy.orm.session import Session
from opencdms.models.climsoft import v4_1_1_core as climsoft_models
from climsoft_api.api.stationelement import schema as stationelement_schema
from tests.datagen import (
stationelement as climsoft_station_element,
obsscheduleclass as climsoft_obsscheduleclass,
obselement as climsoft_obselement,
station as climsoft_station,
instrument as climsoft_instrument,
)
from fastapi.testclient import TestClient
@pytest.fixture
def get_station(session: Session):
station = climsoft_models.Station(
**climsoft_station.get_valid_station_input().dict()
)
session.add(station)
session.commit()
yield station
session.close()
@pytest.fixture
def get_obs_schedule_class(get_station: climsoft_models.Station, session: Session):
obs_schedule_class = climsoft_models.Obsscheduleclas(
**climsoft_obsscheduleclass.get_valid_obs_schedule_class_input(
station_id=get_station.stationId
).dict()
)
session.add(obs_schedule_class)
session.commit()
yield obs_schedule_class
session.close()
@pytest.fixture
def get_obselement(session: Session):
obselement = climsoft_models.Obselement(
**climsoft_obselement.get_valid_obselement_input().dict()
)
session.add(obselement)
session.commit()
yield obselement
session.close()
@pytest.fixture
def get_instrument(get_station: climsoft_models.Station, session: Session):
instrument = climsoft_models.Instrument(
**climsoft_instrument.get_valid_instrument_input(
station_id=get_station.stationId
).dict()
)
session.add(instrument)
session.commit()
yield instrument
session.close()
@pytest.fixture
def get_station_element(
get_station: climsoft_models.Station,
get_instrument: climsoft_models.Instrument,
get_obselement: climsoft_models.Obselement,
get_obs_schedule_class: climsoft_models.Obsscheduleclas,
session: Session,
):
station_element = climsoft_models.Stationelement(
**climsoft_station_element.get_valid_station_element_input(
station_id=get_station.stationId,
instrument_id=get_instrument.instrumentId,
element_id=get_obselement.elementId,
schedule_class=get_obs_schedule_class.scheduleClass,
).dict()
)
session.add(station_element)
session.commit()
yield station_element
session.close()
@pytest.fixture
def get_station_elements(session: Session):
for _ in range(1, 11):
station = climsoft_models.Station(
**climsoft_station.get_valid_station_input().dict()
)
session.add(station)
session.flush()
obs_element = climsoft_models.Obselement(
**climsoft_obselement.get_valid_obselement_input().dict()
)
session.add(obs_element)
session.flush()
obs_schedule_class = climsoft_models.Obsscheduleclas(
**climsoft_obsscheduleclass.get_valid_obs_schedule_class_input(
station_id=station.stationId
).dict()
)
session.add(obs_schedule_class)
session.flush()
instrument = climsoft_models.Instrument(
**climsoft_instrument.get_valid_instrument_input(
station_id=station.stationId
).dict()
)
session.add(instrument)
session.flush()
session.add(
climsoft_models.Stationelement(
**climsoft_station_element.get_valid_station_element_input(
station_id=station.stationId,
instrument_id=instrument.instrumentId,
element_id=obs_element.elementId,
schedule_class=obs_schedule_class.scheduleClass,
).dict()
)
)
session.commit()
def test_should_return_first_five_station_elements(
client: TestClient, get_station_elements
):
response = client.get(
"/v1/station-elements",
params={"limit": 5},
)
assert response.status_code == 200
response_data = response.json()
assert len(response_data["result"]) == 5
for s in response_data["result"]:
isinstance(s, stationelement_schema.StationElement)
def test_should_return_single_station_element(
client: TestClient,
get_station_element: climsoft_models.Stationelement,
):
response = client.get(
f"/v1/station-elements/{get_station_element.recordedFrom}/{get_station_element.describedBy}/{get_station_element.recordedWith}/{get_station_element.beginDate}",
)
assert response.status_code == 200
response_data = response.json()
assert len(response_data["result"]) == 1
for s in response_data["result"]:
isinstance(s, stationelement_schema.StationElement)
def test_should_create_a_station_element(
client: TestClient,
get_station: climsoft_models.Station,
get_instrument,
get_obselement,
get_obs_schedule_class,
):
station_element_data = climsoft_station_element.get_valid_station_element_input(
station_id=get_station.stationId,
element_id=get_obselement.elementId,
schedule_class=get_obs_schedule_class.scheduleClass,
instrument_id=get_instrument.instrumentId,
).dict(by_alias=True)
response = client.post(
"/v1/station-elements",
data=json.dumps(station_element_data, default=str),
)
assert response.status_code == 200
response_data = response.json()
assert len(response_data["result"]) == 1
for s in response_data["result"]:
isinstance(s, stationelement_schema.StationElement)
def test_should_raise_validation_error(
client: TestClient,
get_station: climsoft_models.Station,
get_instrument,
get_obselement,
get_obs_schedule_class,
):
response = client.post(
"/v1/station-elements",
data=json.dumps({"end_date": datetime.utcnow()}, default=str),
)
assert response.status_code == 422
def test_should_update_station_element(
client: TestClient,
get_station_element: climsoft_models.Stationelement,
):
station_element_data = climsoft_station_element.get_valid_station_element_input(
station_id=get_station_element.recordedFrom,
element_id=get_station_element.describedBy,
schedule_class=get_station_element.scheduledFor,
instrument_id=get_station_element.recordedWith,
).dict(
by_alias=True,
exclude={"beginDate", "describedBy", "recordedFrom", "recordedWith"},
)
updates = {**station_element_data, "height": 100}
response = client.put(
f"/v1/station-elements/{get_station_element.recordedFrom}/{get_station_element.describedBy}/{get_station_element.recordedWith}/{get_station_element.beginDate}",
data=json.dumps(updates, default=str),
)
response_data = response.json()
assert response.status_code == 200
assert response_data["result"][0]["height"] == updates["height"]
def test_should_delete_station_element(
client: TestClient,
get_station_element: climsoft_models.Stationelement,
):
response = client.delete(
f"/v1/station-elements/{get_station_element.recordedFrom}/{get_station_element.describedBy}/{get_station_element.recordedWith}/{get_station_element.beginDate}",
)
assert response.status_code == 200
response = client.get(
f"/v1/station-elements/{get_station_element.recordedFrom}/{get_station_element.describedBy}/{get_station_element.recordedWith}/{get_station_element.beginDate}",
)
assert response.status_code == 404
| 2.21875
| 2
|
py/Logical/min_number.py
|
antoniotorresz/python
| 0
|
12776570
|
<reponame>antoniotorresz/python
#Max value from a list
numbers = []
lenght = int(input("Enter list lenght...\n"))
for i in range(lenght):
numbers.append(float(input("Enter an integer or decimal number...\n")))
print("The min value is: " + str(min(numbers)))
| 4.21875
| 4
|
Crypto/Encryption.py
|
alouks/utilities
| 0
|
12776571
|
import base64
import hashlib
from Crypto import Random
from Crypto.Cipher import AES
'''
Encryption
@description For arbitrary encryption and decryption of data
@author <NAME>
Usage:
e = Encryption()
encrypted_string = e.encrypt("Encrypt me!", "password")
decrypted = e.decrypt(encrypted_string, "password")
'''
class Encryption:
def __init__(self):
self.bs = 16
def encrypt(self, plaintext, password):
plaintext = self.pad(plaintext)
iv = Random.new().read(self.bs)
key = hashlib.sha256(password).hexdigest()[:32]
cipher = AES.new(key, AES.MODE_CBC, iv)
return base64.b64encode(iv + cipher.encrypt(plaintext))
def decrypt(self, ciphertext, password):
key = hashlib.sha256(password).hexdigest()[:32]
ciphertext = base64.b64decode(ciphertext)
iv = ciphertext[:16]
cipher = AES.new(key, AES.MODE_CBC, iv)
decrypt = self.unpad(cipher.decrypt(ciphertext[16:]))
return decrypt
def pad(self, s):
return s + (self.bs - len(s) % self.bs) * chr(self.bs - len(s) % self.bs)
def unpad(self, s):
return s[:-ord(s[len(s)-1:])]
| 3.4375
| 3
|
Lab_10/all_users_jaccard.py
|
Bartosz-Gorka-Archive/processing-massive-datasets
| 5
|
12776572
|
<gh_stars>1-10
import csv
from itertools import combinations
from heapq import heappush, heappushpop
SOURCE_FILE_NAME = 'facts3.csv'
RESULTS_FILE_NAME = 'results.txt'
NEAREST_NEIGHBOR_SIZE = 100
def sort_by_similarity(similarity_list):
return sorted(similarity_list, key=lambda record: (record[0], record[1]), reverse=True)
def nearest_neighbors(similarity):
f = open(RESULTS_FILE_NAME, 'w+')
for user_id in sorted(similarity.keys()):
list_of_partners_similarity = similarity[user_id]
f.write(f'User = {user_id}\n')
f.write('{:8d} 1.00000\n'.format(user_id)) # I know - this is a hack
[f.write('{:8d} {:7.5f}\n'.format(record[1], record[0])) for record in sort_by_similarity(list_of_partners_similarity)[0:NEAREST_NEIGHBOR_SIZE-1]]
f.close()
def main():
with open(SOURCE_FILE_NAME, 'r') as f:
reader = csv.reader(f)
# Skip header with fields
next(reader, None)
songs_groups = {}
user_song_count = {}
hist_dict = {}
similarity = {}
print('START')
for record in reader:
song_id = int(record[1])
if song_id in songs_groups:
songs_groups[song_id].add(int(record[0]))
else:
songs_groups[song_id] = {int(record[0])}
print('CALCULATE HITS')
print(len(songs_groups))
row = 0
for song_id, unique_users_ids in songs_groups.items():
print(row, len(unique_users_ids))
row += 1
for user_id in unique_users_ids:
# Store +1 in user's songs
user_song_count[user_id] = user_song_count.get(user_id, 0) + 1
for (user_1, user_2) in combinations(unique_users_ids, 2):
if user_1 > 100 and user_2 > 100:
continue
if user_1 < user_2:
key = f'{user_1}-{user_2}'
else:
key = f'{user_2}-{user_1}'
hist_dict[key] = hist_dict.get(key, 0) + 1
print('SIMILARITY')
for (key, hits) in hist_dict.items():
user_ids = key.split('-')
user_1 = int(user_ids[0])
user_2 = int(user_ids[1])
total = user_song_count[user_1] + user_song_count[user_2]
value = hits / (total - hits)
similar = similarity.get(user_1, [])
if len(similar) < NEAREST_NEIGHBOR_SIZE:
heappush(similar, [value, user_2])
else:
heappushpop(similar, [value, user_2])
similarity[user_1] = similar
similar = similarity.get(user_2, [])
if len(similar) < NEAREST_NEIGHBOR_SIZE:
heappush(similar, [value, user_1])
else:
heappushpop(similar, [value, user_1])
similarity[user_2] = similar
print('SAVE')
nearest_neighbors(similarity)
print('FINISH')
if __name__ == '__main__':
main()
| 3.28125
| 3
|
judi/utils.py
|
johanneskoester/JUDI
| 0
|
12776573
|
import pandas as pd
import os
def ensure_dir(file_path):
directory = os.path.dirname(file_path)
if directory and not os.path.exists(directory):
print("Creating new directory", directory)
os.makedirs(directory)
import json
def get_cfg_str(x):
# json.dumps(r.to_dict(), sort_keys=True, separators = (',', '~'))[1:-1]
# It seems DoIt does not allow equal (=) char in task name
return ",".join(['{}~{}'.format(k,v) for (k,v) in sorted(x.to_dict().items()) if k not in ['JUDI', 'name']])
def combine_csvs_base(params, infiles, outfile):
df = pd.DataFrame()
for indx, r in params.assign(infile = infiles).iterrows():
tmp = pd.read_csv(r['infile'])
for col in params.columns:
tmp[col] = r[col]
df = df.append(tmp, ignore_index=True)
df.to_csv(outfile, index=False)
def combine_csvs(big, small):
infiles = big['path'].tolist()
outfile = small['path'].tolist()[0]
params = big.drop(columns=['name', 'path'])
combine_csvs_base(params, infiles, outfile)
from PyPDF2 import PdfFileMerger
def merge_pdfs_base(infiles, outfile):
merger = PdfFileMerger()
for pdf in infiles:
merger.append(open(pdf, 'rb'))
with open(outfile, 'wb') as fout:
merger.write(fout)
def merge_pdfs(big, small):
infiles = big['path'].tolist()
outfile = small['path'].tolist()[0]
merge_pdfs_base(infiles, outfile)
| 2.671875
| 3
|
patch_manager_sdk/api/patch_task/create_task_pb2.py
|
easyopsapis/easyops-api-python
| 5
|
12776574
|
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: create_task.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='create_task.proto',
package='patch_task',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\n\x11\x63reate_task.proto\x12\npatch_task\"\xa8\x01\n\x16\x43reatePatchTaskRequest\x12;\n\x07request\x18\x01 \x03(\x0b\x32*.patch_task.CreatePatchTaskRequest.Request\x12\x11\n\tgroupSize\x18\x02 \x01(\x05\x1a>\n\x07Request\x12\x0e\n\x06hostId\x18\x01 \x01(\t\x12\x0e\n\x06hostIp\x18\x02 \x01(\t\x12\x13\n\x0bpatchIdList\x18\x03 \x03(\t\")\n\x17\x43reatePatchTaskResponse\x12\x0e\n\x06taskId\x18\x01 \x01(\t\"\x85\x01\n\x1e\x43reatePatchTaskResponseWrapper\x12\x0c\n\x04\x63ode\x18\x01 \x01(\x05\x12\x13\n\x0b\x63odeExplain\x18\x02 \x01(\t\x12\r\n\x05\x65rror\x18\x03 \x01(\t\x12\x31\n\x04\x64\x61ta\x18\x04 \x01(\x0b\x32#.patch_task.CreatePatchTaskResponseb\x06proto3')
)
_CREATEPATCHTASKREQUEST_REQUEST = _descriptor.Descriptor(
name='Request',
full_name='patch_task.CreatePatchTaskRequest.Request',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='hostId', full_name='patch_task.CreatePatchTaskRequest.Request.hostId', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='hostIp', full_name='patch_task.CreatePatchTaskRequest.Request.hostIp', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='patchIdList', full_name='patch_task.CreatePatchTaskRequest.Request.patchIdList', index=2,
number=3, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=140,
serialized_end=202,
)
_CREATEPATCHTASKREQUEST = _descriptor.Descriptor(
name='CreatePatchTaskRequest',
full_name='patch_task.CreatePatchTaskRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='request', full_name='patch_task.CreatePatchTaskRequest.request', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='groupSize', full_name='patch_task.CreatePatchTaskRequest.groupSize', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_CREATEPATCHTASKREQUEST_REQUEST, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=34,
serialized_end=202,
)
_CREATEPATCHTASKRESPONSE = _descriptor.Descriptor(
name='CreatePatchTaskResponse',
full_name='patch_task.CreatePatchTaskResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='taskId', full_name='patch_task.CreatePatchTaskResponse.taskId', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=204,
serialized_end=245,
)
_CREATEPATCHTASKRESPONSEWRAPPER = _descriptor.Descriptor(
name='CreatePatchTaskResponseWrapper',
full_name='patch_task.CreatePatchTaskResponseWrapper',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='code', full_name='patch_task.CreatePatchTaskResponseWrapper.code', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='codeExplain', full_name='patch_task.CreatePatchTaskResponseWrapper.codeExplain', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='error', full_name='patch_task.CreatePatchTaskResponseWrapper.error', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='data', full_name='patch_task.CreatePatchTaskResponseWrapper.data', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=248,
serialized_end=381,
)
_CREATEPATCHTASKREQUEST_REQUEST.containing_type = _CREATEPATCHTASKREQUEST
_CREATEPATCHTASKREQUEST.fields_by_name['request'].message_type = _CREATEPATCHTASKREQUEST_REQUEST
_CREATEPATCHTASKRESPONSEWRAPPER.fields_by_name['data'].message_type = _CREATEPATCHTASKRESPONSE
DESCRIPTOR.message_types_by_name['CreatePatchTaskRequest'] = _CREATEPATCHTASKREQUEST
DESCRIPTOR.message_types_by_name['CreatePatchTaskResponse'] = _CREATEPATCHTASKRESPONSE
DESCRIPTOR.message_types_by_name['CreatePatchTaskResponseWrapper'] = _CREATEPATCHTASKRESPONSEWRAPPER
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
CreatePatchTaskRequest = _reflection.GeneratedProtocolMessageType('CreatePatchTaskRequest', (_message.Message,), {
'Request' : _reflection.GeneratedProtocolMessageType('Request', (_message.Message,), {
'DESCRIPTOR' : _CREATEPATCHTASKREQUEST_REQUEST,
'__module__' : 'create_task_pb2'
# @@protoc_insertion_point(class_scope:patch_task.CreatePatchTaskRequest.Request)
})
,
'DESCRIPTOR' : _CREATEPATCHTASKREQUEST,
'__module__' : 'create_task_pb2'
# @@protoc_insertion_point(class_scope:patch_task.CreatePatchTaskRequest)
})
_sym_db.RegisterMessage(CreatePatchTaskRequest)
_sym_db.RegisterMessage(CreatePatchTaskRequest.Request)
CreatePatchTaskResponse = _reflection.GeneratedProtocolMessageType('CreatePatchTaskResponse', (_message.Message,), {
'DESCRIPTOR' : _CREATEPATCHTASKRESPONSE,
'__module__' : 'create_task_pb2'
# @@protoc_insertion_point(class_scope:patch_task.CreatePatchTaskResponse)
})
_sym_db.RegisterMessage(CreatePatchTaskResponse)
CreatePatchTaskResponseWrapper = _reflection.GeneratedProtocolMessageType('CreatePatchTaskResponseWrapper', (_message.Message,), {
'DESCRIPTOR' : _CREATEPATCHTASKRESPONSEWRAPPER,
'__module__' : 'create_task_pb2'
# @@protoc_insertion_point(class_scope:patch_task.CreatePatchTaskResponseWrapper)
})
_sym_db.RegisterMessage(CreatePatchTaskResponseWrapper)
# @@protoc_insertion_point(module_scope)
| 1.171875
| 1
|
stamps/includes/W_hotbox/Rules/Stamps/001.py
|
adrianpueyo/stamps
| 18
|
12776575
|
<filename>stamps/includes/W_hotbox/Rules/Stamps/001.py
#----------------------------------------------------------------------------------------------------------
#
# AUTOMATICALLY GENERATED FILE TO BE USED BY W_HOTBOX
#
# NAME: Reconnect by Title
# COLOR: #6b4930
#
#----------------------------------------------------------------------------------------------------------
ns = [n for n in nuke.selectedNodes() if n.knob("identifier")]
for n in ns:
try:
n["reconnect_by_title_this"].execute()
except:
pass
| 2.28125
| 2
|
federation/utils/text.py
|
hoseinfzad/federation
| 0
|
12776576
|
<gh_stars>0
import re
from urllib.parse import urlparse
def decode_if_bytes(text):
try:
return text.decode("utf-8")
except AttributeError:
return text
def encode_if_text(text):
try:
return bytes(text, encoding="utf-8")
except TypeError:
return text
def get_path_from_url(url: str) -> str:
"""
Return only the path part of an URL.
"""
parsed = urlparse(url)
return parsed.path
def validate_handle(handle):
"""
Very basic handle validation as per
https://diaspora.github.io/diaspora_federation/federation/types.html#diaspora-id
"""
return re.match(r"[a-z0-9\-_.]+@[^@/]+\.[^@/]+", handle, flags=re.IGNORECASE) is not None
def with_slash(url):
if url.endswith('/'):
return url
return f"{url}/"
| 3
| 3
|
venv/lib/python2.7/site-packages/image/views.py
|
deandunbar/html2bwml
| 0
|
12776577
|
<filename>venv/lib/python2.7/site-packages/image/views.py<gh_stars>0
# -*- coding: UTF-8 -*-
from django.core.files.base import ContentFile
from encodings.base64_codec import base64_decode
import os
import urllib
import traceback
from django.http import HttpResponse, QueryDict
from django.http.response import Http404
from django.utils import timezone
from django.utils.encoding import smart_unicode
from image.settings import IMAGE_CACHE_HTTP_EXPIRATION, IMAGE_CACHE_ROOT
from image.storage import IMAGE_CACHE_STORAGE, MEDIA_STORAGE, STATIC_STORAGE
from image.utils import scale, scaleAndCrop, IMAGE_DEFAULT_FORMAT, IMAGE_DEFAULT_QUALITY,\
image_create_token
from image.videothumbs import generate_thumb
def image(request, path, token, autogen=False):
is_admin = False
if ("is_admin=true" in token and request and request.user.has_perm('admin')) or autogen:
parameters = token
is_admin = True
if autogen:
token = image_create_token(parameters)
else:
parameters = request.session.get(token, token)
cached_image_file = os.path.join(path, token)
now = timezone.now()
expire_offset = timezone.timedelta(seconds=IMAGE_CACHE_HTTP_EXPIRATION)
response = HttpResponse()
response['Content-type'] = 'image/jpeg'
response['Expires'] = (now + expire_offset).strftime("%a, %d %b %Y %T GMT")
response['Last-Modified'] = now.strftime("%a, %d %b %Y %T GMT")
response['Cache-Control'] = 'max-age=3600, must-revalidate'
response.status_code = 200
# If we already have the cache we send it instead of recreating it
if IMAGE_CACHE_STORAGE.exists(cached_image_file):
if autogen:
return 'Already generated'
try:
f = IMAGE_CACHE_STORAGE.open(cached_image_file, "r")
except IOError:
raise Http404()
response.write(f.read())
f.close()
response['Last-Modified'] = IMAGE_CACHE_STORAGE.modified_time(cached_image_file).strftime("%a, %d %b %Y %T GMT")
return response
if parameters == token and not is_admin:
return HttpResponse("Forbidden", status=403)
qs = QueryDict(parameters)
file_storage = MEDIA_STORAGE
if qs.get('static', '') == "true":
file_storage = STATIC_STORAGE
format = qs.get('format', IMAGE_DEFAULT_FORMAT)
quality = int(qs.get('quality', IMAGE_DEFAULT_QUALITY))
mask = qs.get('mask', None)
mask_source = qs.get('mask_source', None)
if mask is not None:
format = "PNG"
fill = qs.get('fill', None)
background = qs.get('background', None)
tint = qs.get('tint', None)
center = qs.get('center', ".5,.5")
mode = qs.get('mode', "crop")
overlays = qs.getlist('overlay')
overlay_sources = qs.getlist('overlay_source')
overlay_tints = qs.getlist('overlay_tint')
overlay_sizes = qs.getlist('overlay_size')
overlay_positions = qs.getlist('overlay_position')
width = int(qs.get('width', None))
height = int(qs.get('height', None))
try:
padding = float(qs.get('padding',None))
except TypeError:
padding = 0.0
if "video" in qs:
data, http_response = generate_thumb(file_storage, smart_unicode(path), width=width, height=height)
response.status_code = http_response
else:
try:
try:
f = urllib.urlopen(qs['url'])
data = f.read()
f.close()
except KeyError:
f = file_storage.open(path)
data = f.read()
f.close()
except IOError:
response.status_code = 404
data = ""
if data:
try:
if mode == "scale":
output_data = scale(data, width, height, path, padding=padding, overlays=overlays, overlay_sources=overlay_sources, overlay_tints=overlay_tints, overlay_positions=overlay_positions, overlay_sizes=overlay_sizes, mask=mask, mask_source=mask_source, format=format, quality=quality, fill=fill, background=background, tint=tint)
else:
output_data = scaleAndCrop(data, width, height, path, True, padding=padding, overlays=overlays, overlay_sources=overlay_sources, overlay_tints=overlay_tints, overlay_positions=overlay_positions, overlay_sizes=overlay_sizes, mask=mask, mask_source=mask_source, center=center, format=format, quality=quality, fill=fill, background=background, tint=tint)
except IOError:
traceback.print_exc()
response.status_code = 500
output_data = ""
else:
output_data = data
if response.status_code == 200:
IMAGE_CACHE_STORAGE.save(cached_image_file, ContentFile(output_data))
if autogen:
return 'Generated ' + str(response.status_code)
else:
if autogen:
return 'Failed ' + cached_image_file
response.write(output_data)
return response
def crosshair(request):
response = HttpResponse()
response['Content-type'] = 'image/png'
response['Expires'] = 'Fri, 09 Dec 2327 08:34:31 GMT'
response['Last-Modified'] = 'Fri, 24 Sep 2010 11:36:29 GMT'
output, length = base64_decode('<KEY>')
response.write(output)
return response
| 2.21875
| 2
|
bin/tower_api.py
|
coreywan/splunk-alert_ansible-tower
| 0
|
12776578
|
#!/usr/bin/python
import sys, json, os, datetime
import logging, logging.handlers
import splunk.entity as entity
import splunk
import requests
# Tower Connect
#
# This script is used as wrapper to connect to Ansible Tower API.
## Original from:
# __author__ = "<NAME>"
# __email__ = "<EMAIL>"
# __version__ = "1.0"
# Refactored By:
__author__ = "<NAME>"
__email__ = "<EMAIL>"
__version__ = "1.0"
# Setup Logger
def setup_logging():
logger = logging.getLogger('splunk.tower_api')
SPLUNK_HOME = os.environ['SPLUNK_HOME']
LOGGING_DEFAULT_CONFIG_FILE = os.path.join(SPLUNK_HOME, 'etc', 'log.cfg')
LOGGING_LOCAL_CONFIG_FILE = os.path.join(SPLUNK_HOME, 'etc', 'log-local.cfg')
LOGGING_STANZA_NAME = 'python'
LOGGING_FILE_NAME = "tower_api.log"
BASE_LOG_PATH = os.path.join('var', 'log', 'splunk')
LOGGING_FORMAT = "%(asctime)s %(levelname)-s\t%(module)s:%(lineno)d - %(message)s"
splunk_log_handler = logging.handlers.RotatingFileHandler(os.path.join(SPLUNK_HOME, BASE_LOG_PATH, LOGGING_FILE_NAME), mode='a')
splunk_log_handler.setFormatter(logging.Formatter(LOGGING_FORMAT))
logger.addHandler(splunk_log_handler)
splunk.setupSplunkLogger(logger, LOGGING_DEFAULT_CONFIG_FILE, LOGGING_LOCAL_CONFIG_FILE, LOGGING_STANZA_NAME)
return logger
#Securely retrieve Ansible Tower Credentials from Splunk REST API password endpoint
def getCredentials(sessionKey,realm):
''' Get Tower Credentials from Splunk '''
myapp = 'splunk-alert_ansible-tower-master'
try:
# list all credentials
entities = entity.getEntities(['admin', 'passwords'], namespace=myapp,
owner='nobody', sessionKey=sessionKey)
except Exception as e:
logger.error("Could not get %s credentials from splunk. Error: %s"
% (myapp, str(e)))
raise e
# return first set of credentials
for i, c in entities.items():
if c.get('realm') == realm:
return c['username'], c['clear_password']
logger.error("ERROR: No credentials have been found")
def tower_get_job_launch_link(hostname,username,password,job_name):
''' Get Job Launch Link from Tower API based on Name '''
logger.info("Job Name: {}".format(job_name))
#Attempt to get launch link
try:
req = requests.get(
url = 'https://{}/api/v2/unified_job_templates/?name={}'.format(hostname,job_name),
headers = {
"Content-Type": "application/json",
},
verify = False,
auth = (username, password),
)
req.raise_for_status()
results = req.json()
logger.info("Unified Jobs Found: {}".format(results))
if results['count'] != 1:
logger.warn('There was {} templates found with the name of {}'.format(results['count'],job_name))
launch_link = results['results'][0]['related']['launch']
logger.info("Launch Link: {}".format(launch_link))
return launch_link
except Exception as error:
logger.error(error)
raise error
def tower_launch(hostname,username,password,job_name,extra_vars):
''' Launch Tower Job '''
launch_link = tower_get_job_launch_link(hostname, username, password, job_name)
post_data = {
"url": "https://{}{}".format(hostname,launch_link),
"headers": {
"Content-Type": "application/json",
"Accept": "application/json",
},
"verify": False,
"auth": (username, password),
}
if extra_vars != None:
data = {}
data['extra_vars'] = json.loads(extra_vars)
post_data['data'] = json.dumps(data)
#Attempt to Launch Ansible Tower Job Template
try:
req = requests.post(**post_data)
results = req.json()
logger.info("Job Info: {}".format(results))
req.raise_for_status()
except Exception as error:
logger.error(error)
raise error
def main(payload):
#Setup Logger
global logger
#Retrieve session key from payload to authenticate to Splunk REST API for secure credential retrieval
sessionKey = payload.get('session_key')
#Retrieve Ansible Tower Hostname from Payload configuration
hostname = payload['configuration'].get('hostname')
#Retrieve Ansible Tower Job Template ID from Payload configuration
job_name = payload['configuration'].get('job_name')
#Retrieve realm from Payload configuration
realm = payload['configuration'].get('realm')
#Retrive Ansible Tower Credentials from Splunk REST API
username, password = getCredentials(sessionKey,realm)
#Retrieve Extra Variables from Splunk REST API - Future Add to add Extra Variable Support
extra_vars = payload['configuration'].get('extra_var')
#Submit Ansible Tower Job
tower_launch(hostname,username,password,job_name,extra_vars)
if __name__ == "__main__":
logger = setup_logging()
# Check if script initiated with --execute
if len(sys.argv) < 2 or sys.argv[1] != "--execute":
#print >> sys.stderr, "FATAL Unsupported execution mode (expected --execute flag)"
sys.exit(1)
else:
#Get Payload
payload = json.loads(sys.stdin.read())
logger.info("Job Started")
#Pass Pass Payload to main function
main(payload)
logger.info("Job Completed")
| 2.3125
| 2
|
optimus/data/__init__.py
|
IanTayler/tao-exercises
| 3
|
12776579
|
"""Module for manipulation of physical data. Mostly conveniences for reading and writing files."""
| 1.1875
| 1
|
models.py
|
magicwenli/db-generater
| 0
|
12776580
|
import os
from dotenv import load_dotenv
from sqlalchemy import create_engine, Column, String, Date, Numeric, ForeignKey
from sqlalchemy.ext.declarative import declarative_base
load_dotenv(verbose=True)
db_link = os.getenv("DB_LINK")
db = create_engine(db_link)
base = declarative_base()
class JS020(base):
__tablename__ = 'js020'
sno = Column(String(10), primary_key=True, nullable=False, index=True)
sname = Column(String(8), nullable=False)
sex = Column(String(3), nullable=False, default='男')
bdate = Column(Date, nullable=False, default='1970-01-01')
height = Column(Numeric(3, 2), nullable=False, default=0)
dorm = Column(String(15))
class JC020(base):
__tablename__ = 'jc020'
cno = Column(String(12), primary_key=True, nullable=False, index=True)
cname = Column(String(30), nullable=False)
period = Column(Numeric(4, 1), nullable=False, default=0)
credit = Column(Numeric(2, 1), nullable=False, default=0)
teacher = Column(String(10), nullable=False)
class JSC020(base):
__tablename__ = 'jsc020'
sno = Column(String(10), ForeignKey('js020.sno'), primary_key=True, nullable=False)
cno = Column(String(12), ForeignKey('jc020.cno'), primary_key=True, nullable=False)
grade = Column(Numeric(4, 1), nullable=True)
| 2.4375
| 2
|
tests/test_cloudfoundryutil.py
|
jan-randis/php-db2-mysql-buildpack
| 0
|
12776581
|
<reponame>jan-randis/php-db2-mysql-buildpack
from nose.tools import eq_
from build_pack_utils.cloudfoundry import CloudFoundryUtil
from build_pack_utils import utils
import tempfile
import shutil
import os
def buildpack_directory():
directory = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..')
return os.path.abspath(directory)
def create_manifest_file(manifest_filename, contents):
file = open(manifest_filename,'w+')
file.write(contents)
file.close()
class TestCloudFoundryUtil(object):
def setUp(self):
self.buildpack_dir = buildpack_directory()
self.manifest_dir = tempfile.mkdtemp()
self.manifest_file = os.path.join(tempfile.mkdtemp(), 'manifest.yml')
def tearDown(self):
shutil.rmtree(self.manifest_dir)
def test_default_versions_are_updated(self):
input_dict = utils.FormattedDict()
input_dict['BP_DIR'] = buildpack_directory()
create_manifest_file(self.manifest_file, GOOD_MANIFEST)
output_dict = CloudFoundryUtil.update_default_version('php', self.manifest_file, input_dict)
# keys exist
eq_('PHP_VERSION' in output_dict, True)
eq_('PHP_DOWNLOAD_URL' in output_dict, True)
eq_('PHP_MODULES_PATTERN' in output_dict, True)
# have correct value
eq_(output_dict['PHP_VERSION'], '9.9.99')
# output_dict['PHP_VERSION'] + output_dict['MODULE_NAME'] are interpolated into the strings returned
# from the dict, so:
output_dict['MODULE_NAME'] = 'test_default_versions'
eq_(output_dict['PHP_MODULES_PATTERN'], '/php/9.9.99/php-test_default_versions-9.9.99.tar.gz')
eq_(output_dict['PHP_DOWNLOAD_URL'], '/php/9.9.99/php-9.9.99.tar.gz')
def test_default_version_is_not_in_manifest(self):
exception = None
input_dict = utils.FormattedDict()
input_dict['BP_DIR'] = buildpack_directory()
create_manifest_file(self.manifest_file, BAD_MANIFEST)
try:
CloudFoundryUtil.update_default_version('php', self.manifest_file, input_dict)
except RuntimeError as e:
exception = e
eq_("Error detecting PHP default version", str(exception))
BAD_MANIFEST = '''\
---
language: php
default_versions:
- name: php
version: 9.9.777
dependencies:
- name: php
version: 5.6.23
uri: https://buildpacks.cloudfoundry.org/dependencies/php/php-5.6.23-linux-x64-1469767807.tgz
md5: 9ffbd67e557f4569de8d876664a6bd33
- name: php
version: 5.6.24
uri: https://buildpacks.cloudfoundry.org/dependencies/php/php-5.6.24-linux-x64-1469768750.tgz
md5: 35b5e1ccce1f2ca7e55c81b11f278a3f
- name: php
version: 7.0.8
uri: https://buildpacks.cloudfoundry.org/dependencies/php7/php7-7.0.8-linux-x64-1469764417.tgz
md5: a479fec08ac8400ca9d775a88ddb2962
- name: php
version: 7.0.9
uri: https://buildpacks.cloudfoundry.org/dependencies/php7/php7-7.0.9-linux-x64-1469765150.tgz
cf_stacks:
- cflinuxfs2
md5: 19e8318e1cee3fa9fd8fdcc358f01076
'''
GOOD_MANIFEST = '''\
---
language: php
default_versions:
- name: php
version: 9.9.99
dependencies:
- name: php
version: 9.9.99
uri: https://buildpacks.cloudfoundry.org/dependencies/php/php-9.9.99-linux-x64-1469766236.tgz
md5: f31b1e164e29b0782eae9bd3bb6a288a
- name: php
version: 5.6.23
uri: https://buildpacks.cloudfoundry.org/dependencies/php/php-5.6.23-linux-x64-1469767807.tgz
md5: 9ffbd67e557f4569de8d876664a6bd33
- name: php
version: 5.6.24
uri: https://buildpacks.cloudfoundry.org/dependencies/php/php-5.6.24-linux-x64-1469768750.tgz
md5: 35b5e1ccce1f2ca7e55c81b11f278a3f
- name: php
version: 7.0.8
uri: https://buildpacks.cloudfoundry.org/dependencies/php7/php7-7.0.8-linux-x64-1469764417.tgz
md5: a479fec08ac8400ca9d775a88ddb2962
- name: php
version: 7.0.9
uri: https://buildpacks.cloudfoundry.org/dependencies/php7/php7-7.0.9-linux-x64-1469765150.tgz
cf_stacks:
- cflinuxfs2
md5: 19e8318e1cee3fa9fd8fdcc358f01076
'''
| 2.28125
| 2
|
LotteryResult.py
|
emmmmmmmmmmmmmmmmm/bilibili-live-tools
| 1
|
12776582
|
from bilibili import bilibili
import requests
import asyncio
import time
class LotteryResult(bilibili):
async def query(self):
while 1:
print(time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time())), "检查抽奖结果")
# print(self.activity_raffleid_list)
if self.activity_raffleid_list:
for i in range(0,len(self.activity_roomid_list)):
url = "http://api.live.bilibili.com/activity/v1/Raffle/notice?roomid="+str(self.activity_roomid_list[0])+"&raffleId="+str(self.activity_raffleid_list[0])
headers = {
'Accept': 'application/json, text/plain, */*',
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.94 Safari/537.36',
'Accept-Language': 'zh-CN,zh;q=0.9',
'accept-encoding': 'gzip, deflate',
'Host': 'api.live.bilibili.com',
'cookie': self.cookie,
}
response = requests.get(url, headers=headers)
try:
print("# 房间", str(self.activity_roomid_list[0]).center(9), "网页端活动抽奖结果:", response.json()['data']['gift_name']+"x"+str(response.json()['data']['gift_num']))
del self.activity_roomid_list[0]
del self.activity_raffleid_list[0]
del self.activity_time_list[0]
except:
pass
# print(self.TV_raffleid_list)
if self.TV_raffleid_list:
for i in range(0, len(self.TV_roomid_list)):
url="http://api.live.bilibili.com/gift/v2/smalltv/notice?roomid="+str(self.TV_roomid_list[0])+"&raffleId="+str(self.TV_raffleid_list[0])
headers = {
'Accept': 'application/json, text/plain, */*',
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.94 Safari/537.36',
'Accept-Language': 'zh-CN,zh;q=0.9',
'accept-encoding': 'gzip, deflate',
'Host': 'api.live.bilibili.com',
'cookie': self.cookie,
}
response = requests.get(url, headers=headers)
if response.json()['data']['gift_name'] != "":
try:
print("# 房间", str(self.TV_roomid_list[0]).center(9), "小电视道具抽奖结果:", (response.json()['data']['gift_name'])+"x"+str(response.json()['data']['gift_num']))
del self.TV_roomid_list[0]
del self.TV_raffleid_list[0]
del self.TV_time_list[0]
except:
pass
await asyncio.sleep(60)
| 2.609375
| 3
|
python/234_Palindrome_Linked_List.py
|
dvlpsh/leetcode-1
| 4,416
|
12776583
|
# Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution(object):
# def __init__(self):
# self.curr_head = None
#
# def isPalindrome(self, head):
# """
# :type head: ListNode
# :rtype: bool
# """
# self.curr_head = head
# return self.check(head)
#
# def check(self, node):
# if node is None:
# return True
# isPal = self.check(node.next) and (self.curr_head.val == node.val)
# self.curr_head = self.curr_head.next
# return isPal
def isPalindrome(self, head):
# p2 is 2 times faster than p3
# p1 and pre is used to reverse the first half of the list
# so when the first while is over
# p1 is in the middle
# p3 is in middle + 1
# p2 is in the end
if head is None:
return True
p1, p2 = head, head
p3, pre = p1.next, p1
while p2.next is not None and p2.next.next is not None:
p2 = p2.next.next
pre = p1
p1 = p3
p3 = p3.next
p1.next = pre
if p2.next is None:
p1 = p1.next
while p3 is not None:
if p1.val != p3.val:
return False
p1 = p1.next
p3 = p3.next
return True
| 3.9375
| 4
|
pymcdyes.py
|
pudquick/pyMCdyes
| 5
|
12776584
|
<gh_stars>1-10
import zipfile, sys, os.path, exceptions, time
from collections import namedtuple
from itertools import imap
col = namedtuple('col', 'r g b')
color_map, base_colors, base_mods = None, None, None
try:
from itertools import combinations_with_replacement
except:
# Replacement recipe for python 2.6
def combinations_with_replacement(iterable, r):
pool = tuple(iterable)
n = len(pool)
if not n and r:
return
indices = [0] * r
yield tuple(pool[i] for i in indices)
while True:
for i in reversed(range(r)):
if indices[i] != n - 1:
break
else:
return
indices[i:] = [indices[i] + 1] * (r - i)
yield tuple(pool[i] for i in indices)
def hc(s):
return col(*map(lambda x: int(x,16), map(''.join, zip(*[iter(s.strip().lstrip('#'))]*2))))
def ch(c):
return ('#' + hex(c.r)[2:].rjust(2,'0') + hex(c.g)[2:].rjust(2,'0') + hex(c.b)[2:].rjust(2,'0')).upper()
def avg_color(colors, l=None):
if not l:
l = len(colors)
return col(sum([c.r for c in colors])//l,sum([c.g for c in colors])//l,sum([c.b for c in colors])//l)
dyes = {hc('#191919'): 'Ink Sac',
hc('#CC4C4C'): 'Rose Red',
hc('#667F33'): 'Cactus Green',
hc('#7F664C'): 'Cocoa Beans',
hc('#3366CC'): 'Lapis Lazuli',
hc('#B266E5'): 'Purple Dye',
hc('#4C99B2'): 'Cyan Dye',
hc('#999999'): 'Light Gray Dye',
hc('#4C4C4C'): 'Gray Dye',
hc('#F2B2CC'): 'Pink Dye',
hc('#7FCC19'): 'Lime Dye',
hc('#E5E533'): 'Dandelion Yellow',
hc('#99B2F2'): 'Light Blue Dye',
hc('#E57FD8'): 'Magenta Dye',
hc('#F2B233'): 'Orange Dye',
hc('#FFFFFF'): 'Bone Meal'}
def init_bases(dye_dict):
start = time.time()
new_colors, new_mods = dict(), [dict() for x in range(9)]
print "Generating map key (SLOW - takes a minute) ..."
for count in range(8,0,-1):
print "... Level %s of 8 ..." % count
for dye_set in combinations_with_replacement(dye_dict.items(), count):
new_color_name = '[' + '+'.join(imap(lambda x: x[1], dye_set)) + ']'
new_color_avg = avg_color(list(imap(lambda x: x[0], dye_set)))
new_color_sum = avg_color(list(imap(lambda x: x[0], dye_set)), 1)
new_colors[new_color_avg] = new_color_name
new_mods[count][new_color_sum] = new_color_name
print "... Sorting ..."
for count in range(9):
new_mods[count] = tuple(sorted(new_mods[count].items(), key=lambda x: x[0]))
new_colors = tuple(sorted(new_colors.items(), key=lambda x: x[0]))
print "... Done!"
stop = time.time()
# Offer to save time by caching
print "NOTE: This process took %0.2f seconds to complete." % (stop - start)
print "For the cost of ~95MB of storage, would you like to cache these results?"
print "(Average speedup time for loading cached results: 5x times faster - or more!)"
response = raw_input("[N/y]: ").strip().lower()
if (response in ["yes", "y"]):
print "Saving base_colors.cache ..."
f = open('base_colors.cache','wb')
for x_c, x_n in new_colors:
f.write("%s\t%s\t%s\t%s\n" % (x_c.r, x_c.g, x_c.b,x_n))
f.close()
print "Saving base_mods.cache ..."
f = open('base_mods.cache','wb')
for i in range(8,0,-1):
for x_c, x_n in new_mods[i]:
f.write("%s\t%s\t%s\t%s\t%s\n" % (i,x_c.r, x_c.g, x_c.b,x_n))
f.close()
print "... Done!"
else:
print "Skipped caching."
return [new_colors, new_mods]
def init_cached_bases():
print "Reading cached map key (SPEEDY-ISH - takes a few seconds) ..."
cached_base_colors = []
f = open('base_colors.cache','r')
for line in f.xreadlines():
r,g,b,n = line.split('\t')
cached_base_colors.append((col(int(r),int(g),int(b)), n[:-1]))
f.close()
cached_base_colors = tuple(cached_base_colors)
cached_base_mods = [[] for x in range(9)]
f = open('base_mods.cache','r')
old_level = ""
for line in f.xreadlines():
i,r,g,b,n = line.split('\t')
if old_level != i:
print "... Level %s of 8 ..." % i
old_level = i
cached_base_mods[int(i)].append((col(int(r),int(g),int(b)), n[:-1]))
f.close()
for i in range(9):
cached_base_mods[i] = tuple(cached_base_mods[i])
print "... Validating ..."
if (len(cached_base_colors) == 327842) and (len(cached_base_mods[8]) == 414081):
print "... Done!"
return [cached_base_colors, cached_base_mods]
else:
print "... ERROR in cache! ... rebuilding ..."
raise exceptions.Exception('Cache Mismatch')
def init_color_map():
print "Loading color map (FAST) ..."
archive = zipfile.ZipFile('color_map.zip', 'r')
f_map = archive.open('color_map.bytearray')
new_map = bytearray(f_map.read())
f_map.close()
archive.close()
print "... Done!"
return new_map
def get_color_data(target_c):
global color_map
if (target_c.r < 25) or (target_c.g < 25) or (target_c.b < 25):
return (col(0,0,0),0,1)
i = ((target_c.r-25) + (target_c.g-25)*231 + (target_c.b-25)*231*231)*6
source_c = col(*color_map[i:i+3])
mod_level = ((color_map[i+3] & 0xe0) >> 5) + 1
mod_0a = color_map[i+3] & 0x1f
mod_i = (mod_0a << 16) | (color_map[i+4] << 8) | (color_map[i+5])
return (source_c, mod_i, mod_level)
def get_color_ancestry(target_c):
global color_map, base_colors, base_mods
INVALID_COLOR = col(0,0,0)
BASE_COLOR = col(1,1,1)
parent_c, mod_i, mod_level = get_color_data(target_c)
if (parent_c == INVALID_COLOR):
return []
elif (parent_c == BASE_COLOR):
return [(base_colors[mod_i][0], base_colors[mod_i][1])]
else:
ancestry = get_color_ancestry(parent_c)
if ancestry:
return ancestry + [(base_mods[mod_level][mod_i][0], base_mods[mod_level][mod_i][1])]
return ancestry
def verify_ancestry(target_c):
a_chain = get_color_ancestry(target_c)
new_c = a_chain[0][0]
for dye_set, dye_name in a_chain[1:]:
new_c = avg_color([new_c, dye_set], dye_name.count('+') + 2)
return (new_c, target_c)
def color_exists(target_c):
global color_map
if (target_c.r < 25) or (target_c.g < 25) or (target_c.b < 25): return False
i = ((target_c.r-25) + (target_c.g-25)*231 + (target_c.b-25)*231*231)*6
return bool(color_map[i])
def vector_projection(target_c):
scalar = sum(map(lambda x: x*0.57735, target_c))
new_coord = min(int(0.57735*scalar+0.5), 255)
if new_coord > 24:
return col(new_coord,new_coord,new_coord)
return None
def dist_3d(target_c, new_c):
return int(sum(map(lambda x: (x[1] - x[0])**2, zip(target_c, new_c)))**0.5 + 1)
def next_pixel_in_3d(source_c, dest_c):
x1,y1,z1 = pixel = list(source_c)
x2,y2,z2 = dest_c
dx,dy,dz = x2-x1, y2-y1, z2-z1
x_inc = ((dx < 0) and -1) or 1
y_inc = ((dy < 0) and -1) or 1
z_inc = ((dz < 0) and -1) or 1
l,m,n = abs(dx), abs(dy), abs(dz)
dx2,dy2,dz2 = l << 1, m << 1, n << 1
if (l >= m) and (l >= n):
err_1, err_2 = dy2-l, dz2-l
for i in range(l):
if (color_exists(col(*pixel))):
return col(*pixel)
if (err_1 > 0):
pixel[1] += y_inc
err_1 -= dx2
if (err_2 > 0):
pixel[2] += z_inc
err_2 -= dx2
err_1 += dy2
err_2 += dz2
pixel[0] += x_inc
elif (m >= l) and (m >= n):
err_1, err_2 = dx2-m, dz2-m
for i in range(m):
if (color_exists(col(*pixel))):
return col(*pixel)
if (err_1 > 0):
pixel[0] += x_inc
err_1 -= dy2
if (err_2 > 0):
pixel[2] += z_inc
err_2 -= dy2
err_1 += dx2
err_2 += dz2
pixel[1] += y_inc
else:
err_1, err_2 = dy2-n, dx2-n
for i in range(n):
if (color_exists(col(*pixel))):
return col(*pixel)
if (err_1 > 0):
pixel[1] += y_inc
err_1 -= dz2
if (err_2 > 0):
pixel[0] += x_inc
err_2 -= dz2
err_1 += dy2
err_2 += dx2
pixel[2] += z_inc
if (color_exists(col(*pixel))):
return col(*pixel)
return None
def cube_search(target_c, max_dist=255):
print "\nSearching for the closest colors in RGB color space ..."
r, best_dist, best_cs = 1, 500, []
# double max dist passed
max_dist = 2*max_dist
while (r <= max_dist):
# search sides in y-axis:
for dy in ([0] + [j for i in zip(range(1,r),range(-1,-r,-1)) for j in i]):
# This generates a list like [0, 1, -1, 2, -2, ... r-1, -r+1]
y_g = target_c.g + dy
if 24 < y_g < 256:
# side 1 & 2
for x_r in [target_c.r + r, target_c.r - r]:
if 24 < x_r < 256:
for z_b in range(target_c.b - r, target_c.b + r + 1):
if 24 < z_b < 256:
t_c = col(x_r, y_g, z_b)
if color_exists(t_c):
# Found a color, update the max distance range
t_dist = dist_3d(target_c, t_c)
if t_dist < best_dist:
max_dist = min(max_dist, 2*t_dist)
best_dist = t_dist
best_cs = [t_c]
elif t_dist == best_dist:
best_cs.append(t_c)
# side 3 & 4
for z_b in [target_c.b + r, target_c.b - r]:
if 24 < z_b < 256:
for x_r in range(target_c.r - r + 1, target_c.r + r):
if 24 < x_r < 256:
t_c = col(x_r, y_g, z_b)
if color_exists(t_c):
# Found a color, update the max distance range
t_dist = dist_3d(target_c, t_c)
if t_dist < best_dist:
max_dist = min(max_dist, 2*t_dist)
best_dist = t_dist
best_cs = [t_c]
elif t_dist == best_dist:
best_cs.append(t_c)
# search top & bottom in y-axis
for y_g in [target_c.g + r, target_c.g - r]:
if 24 < y_g < 256:
for x_r in range(target_c.r - r, target_c.r + r + 1):
if 24 < x_r < 256:
for z_b in range(target_c.b - r, target_c.b + r + 1):
if 24 < z_b < 256:
t_c = col(x_r, y_g, z_b)
if color_exists(t_c):
# Found a color, update the max distance range
t_dist = dist_3d(target_c, t_c)
if t_dist < best_dist:
max_dist = min(max_dist, 2*t_dist)
best_dist = t_dist
best_cs = [t_c]
elif t_dist == best_dist:
best_cs.append(t_c)
r += 1
if best_cs:
return (best_dist, best_cs)
return None
def try_offer_alternative(target_c):
instr = raw_input("Look for a closest match? [Y/n]: ").strip().lower()
if instr not in ['n', 'no', 'quit', 'q']:
print "Suggested closest colors:\n-------------------------"
max_dist = 256
t_c = next_pixel_in_3d(target_c, col(127,127,127))
if t_c:
print "Towards gray: %s - %s" % (ch(t_c), t_c)
max_dist = min(dist_3d(t_c, target_c), max_dist)
t_c = next_pixel_in_3d(target_c, col(255,255,255))
if t_c:
print "Towards white: %s - %s" % (ch(t_c), t_c)
max_dist = min(dist_3d(t_c, target_c), max_dist)
v_c = vector_projection(target_c)
if v_c:
t_c = next_pixel_in_3d(target_c, v_c)
if t_c:
print "Projection mapped to black->white: %s - %s" % (ch(t_c), t_c)
max_dist = min(dist_3d(t_c, target_c), max_dist)
t_c = next_pixel_in_3d(target_c, col(25,25,25))
if t_c:
print "Towards black: %s - %s" % (ch(t_c), t_c)
max_dist = min(dist_3d(t_c, target_c), max_dist)
closest_c = cube_search(target_c, max_dist)
if closest_c:
c_dist, t_cs = closest_c
print "The following", len(t_cs), "closest colors were found at a distance of:", c_dist
print ",\n".join([(" " + ", ".join(map(lambda x: "%s - %s" % (ch(x), x), t_cs[i:i+2]))) for i in range(0, len(t_cs), 2)])
print ""
def pprint_ancestry(target_c, DEBUG=False):
print "Goal color: %s - %s" % (ch(target_c), target_c)
print "Checking ..."
ancestry = get_color_ancestry(target_c)
if not ancestry:
print "... Sorry, but this color is not reachable with this map!\n"
try_offer_alternative(target_c)
else:
print "... FOUND!\n"
print "(Apply these dye sets, in order, starting with a new leather item!)"
print "\nRecipe for color:\n-----------------"
for rgb, dyes in ancestry:
if DEBUG:
print "- %s \\\\ %s - %s" % (dyes, ch(rgb), rgb)
else:
print "- %s" % dyes
result,target = verify_ancestry(target_c)
if (target == result):
print "\nRecipe Verified: GOOD\n"
else:
print "\nRecipe Verified: ERROR!! (please let pudquick@github-or-r/minecraft know!)"
print "Problem color in question:", target_c
sys.exit(1)
def init_main():
global color_map, base_colors, base_mods
color_map = init_color_map()
if (os.path.exists('base_colors.cache') and os.path.exists('base_mods.cache')):
try:
# Attempt loading cache
base_colors, base_mods = init_cached_bases()
except:
# Cache mismatch, redo
base_colors, base_mods = init_bases(dyes)
else:
base_colors, base_mods = init_bases(dyes)
print "[4,001,584 color recipes loaded]\n"
def main():
init_main()
while True:
instr = raw_input("[Enter RRGGBB hex color to find or Q to quit]: ").strip().lower()
if instr in ['q','quit','exit']:
break
else:
print
try:
target_c = hc(instr)
pprint_ancestry(target_c)
except:
print "... Whoops! Something went wrong there, let's try again ..."
if __name__ == "__main__":
main()
| 2.546875
| 3
|
lib/telegram/message.py
|
yosit/kinnernet_bot
| 0
|
12776585
|
#!/usr/bin/env python
from telegram import TelegramObject
class Message(TelegramObject):
def __init__(self,
message_id,
from_user,
date,
chat,
forward_from=None,
forward_date=None,
reply_to_message=None,
text=None,
audio=None,
document=None,
photo=None,
sticker=None,
video=None,
contact=None,
location=None,
new_chat_participant=None,
left_chat_participant=None,
new_chat_title=None,
new_chat_photo=None,
delete_chat_photo=None,
group_chat_created=None):
self.message_id = message_id
self.from_user = from_user
self.date = date
self.chat = chat
self.forward_from = forward_from
self.forward_date = forward_date
self.reply_to_message = reply_to_message
self.text = text
self.audio = audio
self.document = document
self.photo = photo
self.sticker = sticker
self.video = video
self.contact = contact
self.location = location
self.new_chat_participant = new_chat_participant
self.left_chat_participant = left_chat_participant
self.new_chat_title = new_chat_title
self.new_chat_photo = new_chat_photo
self.delete_chat_photo = delete_chat_photo
self.group_chat_created = group_chat_created
@property
def chat_id(self):
return self.chat.id
@staticmethod
def de_json(data):
if 'from' in data: # from is a reserved word, use from_user instead.
from telegram import User
from_user = User.de_json(data['from'])
else:
from_user = None
if 'chat' in data:
if 'first_name' in data['chat']:
from telegram import User
chat = User.de_json(data['chat'])
if 'title' in data['chat']:
from telegram import GroupChat
chat = GroupChat.de_json(data['chat'])
else:
chat = None
if 'forward_from' in data:
from telegram import User
forward_from = User.de_json(data['forward_from'])
else:
forward_from = None
if 'reply_to_message' in data:
reply_to_message = Message.de_json(data['reply_to_message'])
else:
reply_to_message = None
if 'text' in data:
text = data['text']
else:
text = None
if 'audio' in data:
from telegram import Audio
audio = Audio.de_json(data['audio'])
else:
audio = None
if 'document' in data:
from telegram import Document
document = Document.de_json(data['document'])
else:
document = None
if 'photo' in data:
from telegram import PhotoSize
photo = [PhotoSize.de_json(x) for x in data['photo']]
else:
photo = None
if 'sticker' in data:
from telegram import Sticker
sticker = Sticker.de_json(data['sticker'])
else:
sticker = None
if 'video' in data:
from telegram import Video
video = Video.de_json(data['video'])
else:
video = None
if 'contact' in data:
from telegram import Contact
contact = Contact.de_json(data['contact'])
else:
contact = None
if 'location' in data:
from telegram import Location
location = Location.de_json(data['location'])
else:
location = None
if 'new_chat_participant' in data:
from telegram import User
new_chat_participant = User.de_json(data['new_chat_participant'])
else:
new_chat_participant = None
if 'left_chat_participant' in data:
from telegram import User
left_chat_participant = User.de_json(data['left_chat_participant'])
else:
left_chat_participant = None
return Message(message_id=data.get('message_id', None),
from_user=from_user,
date=data.get('date', None),
chat=chat,
forward_from=forward_from,
forward_date=data.get('forward_date', None),
reply_to_message=reply_to_message,
text=text,
audio=audio,
document=document,
photo=photo,
sticker=sticker,
video=video,
contact=contact,
location=location,
new_chat_participant=new_chat_participant,
left_chat_participant=left_chat_participant,
new_chat_title=data.get('new_chat_title', None),
new_chat_photo=data.get('new_chat_photo', None),
delete_chat_photo=data.get('delete_chat_photo', None),
group_chat_created=data.get('group_chat_created', None))
def to_dict(self):
data = {'message_id': self.message_id,
'from': self.from_user.to_dict(),
'date': self.date,
'chat': self.chat.to_dict()}
if self.forward_from:
data['forward_from'] = self.forward_from
if self.forward_date:
data['forward_date'] = self.forward_date
if self.reply_to_message:
data['reply_to_message'] = self.reply_to_message
if self.text:
data['text'] = self.text
if self.audio:
data['audio'] = self.audio.to_dict()
if self.document:
data['document'] = self.document.to_dict()
if self.photo:
data['photo'] = [p.to_dict() for p in self.photo]
if self.sticker:
data['sticker'] = self.sticker.to_dict()
if self.video:
data['video'] = self.video.to_dict()
if self.contact:
data['contact'] = self.contact.to_dict()
if self.location:
data['location'] = self.location.to_dict()
if self.new_chat_participant:
data['new_chat_participant'] = self.new_chat_participant
if self.left_chat_participant:
data['left_chat_participant'] = self.left_chat_participant
if self.new_chat_title:
data['new_chat_title'] = self.new_chat_title
if self.new_chat_photo:
data['new_chat_photo'] = self.new_chat_photo
if self.delete_chat_photo:
data['delete_chat_photo'] = self.delete_chat_photo
if self.group_chat_created:
data['group_chat_created'] = self.group_chat_created
return data
| 2.734375
| 3
|
cyanobyte/validator.py
|
isabella232/cyanobyte
| 70
|
12776586
|
<filename>cyanobyte/validator.py
"""CyanoByte Validator
The CyanoByte validator is used to ensure that a CyanoByte
document meets the specification.
"""
import sys
import json
import click
import os
import os.path as path
import yaml
from yaml.constructor import ConstructorError
try:
from yaml import CLoader as Loader
except ImportError:
from yaml import Loader
from jsonschema import validate, ValidationError
class CyanobyteValidationError:
def __init__(self, input_file, err):
self.input_file = input_file
self.err = err
# See https://gist.github.com/pypt/94d747fe5180851196eb
def no_duplicates_constructor(loader, node, deep=False):
"""Check for duplicate keys."""
mapping = {}
for key_node, value_node in node.value:
key = loader.construct_object(key_node, deep=deep)
value = loader.construct_object(value_node, deep=deep)
if key in mapping:
raise ConstructorError("while constructing a mapping", node.start_mark,
"found duplicate key (%s)" % key, key_node.start_mark)
mapping[key] = value
return loader.construct_mapping(node, deep)
yaml.add_constructor(yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG, no_duplicates_constructor, Loader=Loader)
def cyanobyte_validate(input_files):
"""
Validate a list of CyanoByte documents.
Args:
input_files: A list of CyanoByte documents to validate.
"""
# Load the JSON Schema file
path = "cyanobyte-spec/cyanobyte.schema.json"
try:
import pkg_resources
path = pkg_resources.resource_filename('cyanobyte-spec', 'cyanobyte.schema.json')
except:pass
with open(path, "r") as schema_json:
schema = json.load(schema_json)
# Validate each document against the schema
errors = []
for input_file in input_files:
with open(input_file, "r") as document_yaml:
try:
document_dict = yaml.load(document_yaml, Loader=Loader)
validate(instance=document_dict, schema=schema)
print('✓ ' + input_file)
except (ConstructorError, ValidationError) as err:
print('✘ ' + input_file)
errors.append(CyanobyteValidationError(input_file, err))
# Dump all errors here
print('')
for e in errors:
print(e.input_file + ':')
print(e.err)
def unittest(input_files):
# Load the python-unittest template file
template = "cyanobyte-templates/python-unittest.py"
try:
import pkg_resources
template = pkg_resources.resource_filename('cyanobyte-templates', 'python-unittest.py')
except:pass
if _DEBUG:
print('cyanobyte-codegen \
-c \
-o ./tmp/ \
-t ' + template + ' ' + ' \
'.join(input_files))
os.system('cyanobyte-codegen \
-c \
-o ./tmp/ \
-t ' + template + ' ' + ' \
'.join(input_files))
for i in input_files:
# Now execute each unittest
file = i.replace('.yaml', '.py')
file = path.basename(file)
# NOTE: Won't work for different package names
if _DEBUG:
print('python3 -m unittest tmp/com/cyanobyte/' + file)
os.system('python3 -m unittest tmp/com/cyanobyte/' + file)
@click.command()
@click.option("-d", "--debug", "debug", default=False)
@click.option("--validate-schema", "flag_schema", is_flag=True)
@click.option("--unit-test", "flag_unittest", is_flag=True)
@click.argument("input_files", type=click.Path(exists=True), nargs=-1)
def click_validate(input_files, flag_schema, flag_unittest, debug=False):
"""
Main command line entrypoint
Args:
input_files: A list of CyanoByte documents to validate.
"""
#pylint: disable=global-statement
global _DEBUG
_DEBUG = debug
run_schema = True
run_unittest = True
# Default all modes to true unless there's a flag
if flag_schema or flag_unittest:
# Then we only allow a few modes
if not flag_schema:
run_schema = False
if not flag_unittest:
run_unittest = False
if run_schema:
cyanobyte_validate(input_files)
if run_unittest:
unittest(input_files)
if __name__ == "__main__":
click_validate(sys.argv[1:])
| 3.078125
| 3
|
src/nn_job_processor.py
|
jsphweid/annhouga
| 1
|
12776587
|
<reponame>jsphweid/annhouga<filename>src/nn_job_processor.py
import boto3, json
from nn_processors import basic_nn_processor
sqs = boto3.resource('sqs')
nn_job_queue = sqs.get_queue_by_name(QueueName='annhouga-nn-jobs')
rds_job_queue = sqs.get_queue_by_name(QueueName='annhouga-rds-jobs')
while 1:
print('[*] Waiting for messages. To exit press CTRL+C')
messages = nn_job_queue.receive_messages(WaitTimeSeconds=20, MaxNumberOfMessages=1)
for message in messages:
print("Message received: {0}".format(message.body))
result = basic_nn_processor(json.loads(message.body))
response = rds_job_queue.send_message(MessageBody=json.dumps(result))
if response:
message.delete()
| 2.25
| 2
|
setup.py
|
steveharwell1/linearGraph-flask
| 0
|
12776588
|
from setuptools import setup
setup(
name='graphs',
packages=['graphs'],
include_package_data=True,
install_requires=[
'flask',
'matplotlib',
]
)
| 1.078125
| 1
|
idpy/LBM/SCThermo.py
|
lullimat/idea.deploy
| 1
|
12776589
|
__author__ = "<NAME>"
__copyright__ = "Copyright (c) 2020 <NAME> (lullimat/idea.deploy), <EMAIL>"
__credits__ = ["<NAME>"]
__license__ = """
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
__version__ = "0.1"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Development"
'''
Provides classes for the computation of the thermodynamic quantities related
to the Shan-Chen model
'''
import scipy.integrate as integrate
from scipy.optimize import fsolve, bisect
from numpy import linspace
from sympy import Rational, diff, simplify
from sympy import lambdify as sp_lambdify
from sympy import symbols as sp_symbols
from sympy import exp as sympy_exp
from sympy.solvers.solveset import nonlinsolve
from sympy.solvers import solve
from functools import reduce
import math
from idpy.LBM.SCFStencils import SCFStencils
from idpy.Utils.ManageData import ManageData
def FindSingleZeroRange(func, x_init, delta_val):
old_val, new_val = func(x_init), func(x_init)
while old_val * new_val > 0:
old_val = new_val
x_init += delta_val
new_val = func(x_init)
return (x_init - delta_val, x_init)
def FindZeroRanges(func, n_range, n_bins, n_delta, debug_flag = False):
zero_ranges = []
old_val, new_val = 0, 0
# Here I can use linspace
for n_i in range(n_bins):
new_val = func(n_range[0] + n_delta * n_i)
if debug_flag:
print(n_bins, n_i, n_range[0] + n_delta * n_i, new_val, old_val)
print(n_i > 0, old_val * new_val < 0, n_i > 0 and old_val * new_val < 0)
print()
if n_i > 0 and old_val * new_val < 0:
zero_ranges.append((n_range[0] + n_delta * (n_i - 1),
n_range[0] + n_delta * n_i))
old_val = new_val
return zero_ranges
def FindExtrema(func, f_arg, arg_range = (0.01,3.), arg_bins = 256):
d_func = lambda f_arg_: diff(func,f_arg).subs(f_arg, f_arg_)
arg_delta = (arg_range[1] - arg_range[0])/arg_bins
zero_ranges = FindZeroRanges(d_func, arg_range, arg_bins, arg_delta)
print("zero_ranges: ", zero_ranges)
extrema = []
for z_range in zero_ranges:
# Initialization point from LEFT -> z_range[0] NOT z_range[1]
arg_swap = bisect(d_func, z_range[0], z_range[1])
f_swap = func.subs(f_arg, arg_swap)
extrema.append((arg_swap,f_swap))
return extrema
class ShanChen:
# Symbols should be safe here
n, G, theta, psi, d_psi, e2 = \
sp_symbols("n G \\theta \\psi \\psi' e_{2}")
P = theta*n + Rational('1/2')*G*e2*psi**2
def __init__(self,
psi_f = None,
G_val = -3.6, theta_val = 1., e2_val = 1.,
n_eps = 0.01):
# Variables Init
self.psi_f = sympy_exp(-1/self.n) if psi_f is None else psi_f
#print(self.psi_f)
self.G_val, self.theta_val, self.e2_val = G_val, theta_val, e2_val
self.n_eps = n_eps
self.d_psi_f = diff(self.psi_f, self.n)
self.P_subs = self.P.subs(self.psi, self.psi_f).subs(self.G, self.G_val)
self.P_subs = self.P_subs.subs(self.theta, self.theta_val).subs(self.e2, self.e2_val)
self.P_subs_lamb = sp_lambdify(self.n, self.P_subs)
# Find Critical Point
## This substitution leaves both n and G free
P_subs_swap = self.P.subs(self.psi, self.psi_f)
P_subs_swap = P_subs_swap.subs(self.theta, self.theta_val)
P_subs_swap = P_subs_swap.subs(self.e2, self.e2_val)
self.d_P = diff(P_subs_swap, self.n)
self.dd_P = diff(self.d_P, self.n)
#print([self.d_P, self.dd_P])
self.critical_point = solve([self.d_P, self.dd_P], [self.G, self.n])
self.G_c, self.n_c = float(self.critical_point[0][0]), float(self.critical_point[0][1])
self.P_c = P_subs_swap.subs(self.n, self.n_c).subs(self.G, self.G_c)
if self.G_val * self.e2_val > self.G_c * self.e2_val:
print("The value of G: %f is above the critical point G_c: %f for the chosen %s" % (self.G_val, self.G_c, str(self.psi) + " = " + str(self.psi_f)))
print("-> No phase separation")
else:
# Find Extrema
lambda_tmp = sp_lambdify(self.n, self.P_subs - self.P_c)
## Here I want to find the value of the density that correxpond to the critical
## pressure because by construction this value of the density is larger than
## any coexistence extreme, and there is only one
self.range_ext = FindSingleZeroRange(lambda_tmp, self.n_eps, self.n_eps)[1]
## Hence we can look for extrema starting from self.n_eps to self.range_ext
## Cannot begin from zero because for some choices of \psi the derivative
## might be singular
print("Extrema:", self.range_ext)
self.extrema = FindExtrema(self.P_subs, self.n,
arg_range = (self.n_eps, self.range_ext))
self.coexistence_range = self.FindCoexistenceRange()
print("Coexistence range (n, P): ", self.coexistence_range)
print()
### Init Ends
def PressureTensorInit(self, py_stencil):
self.PTensor = self.PressureTensor(py_stencil)
def FlatInterfaceProperties(self, which_sol = 0, eps_val = None):
self.FInterface = self.FlatInterface(self, self.PTensor, which_sol, eps_val)
def FindCoexistenceRange(self):
coexistence_range = []
'''
With this check we can manage values of the coupling for which one has
negative pressures
'''
if self.extrema[1][1] > 0:
func_f = lambda f_arg_: (self.P_subs.subs(self.n, f_arg_) - self.extrema[1][1])
# Looking for the LEFT limit starting from ZERO
# and ending after the first stationary point
arg_swap = bisect(func_f, self.n_eps, self.extrema[0][0])
p_swap = self.P_subs.subs(self.n, arg_swap)
coexistence_range.append((arg_swap, p_swap))
else:
coexistence_range.append((0, 0))
# Looking for the RIGHT limit starting from the RIGHT extremum
# that is certainly at the LEFT of the value we are looking for
func_f = lambda f_arg_: (self.P_subs.subs(self.n, f_arg_) - self.extrema[0][1])
arg_swap = bisect(func_f, self.extrema[1][0] + self.n_eps, self.range_ext + self.n_eps)
p_swap = self.P_subs.subs(self.n, arg_swap)
coexistence_range.append((arg_swap, p_swap))
return coexistence_range
####################################################################################
### Subclass: FlatInterface
####################################################################################
class FlatInterface:
def __init__(self, SC, PTensor, which_sol, eps_val):
self.SC, self.PTensor = SC, PTensor
# defining epsilon
if eps_val is None:
self.eps_val = \
PTensor.p_consts_wf['\epsilon'](self.PTensor.py_stencil.w_sol[which_sol])
else:
self.eps_val = eps_val
print("eps_val:", self.eps_val)
self.beta_val = self.PTensor.p_consts_wf['\beta'](self.PTensor.py_stencil.w_sol[which_sol])
self.sigma_c_val = self.PTensor.p_consts_wf['\sigma_c'](self.PTensor.py_stencil.w_sol[which_sol])
self.tolman_c_val = self.PTensor.p_consts_wf['t_c'](self.PTensor.py_stencil.w_sol[which_sol])
self.dndx = None
# defining symbols
self.p_0, self.n_g, self.n_l, self.n_p, self.d_n = \
sp_symbols("p_0 n_g n_l n' \\frac{dn}{dr}")
self.eps = self.PTensor.p_consts_sym['\epsilon']
self.beta = self.PTensor.p_consts_sym['\beta']
self.sigma_c = self.PTensor.p_consts_sym['\sigma_c']
self.tolman_c = self.PTensor.p_consts_sym['t_c']
# Defining the integrand
self.integrand = (self.p_0 - self.SC.P)*self.SC.d_psi_f/(self.SC.psi_f**(1 + self.eps))
# Substituting \theta and e_2 and psi and eps and G
self.integrand = self.integrand.subs(self.SC.theta, self.SC.theta_val)
self.integrand = self.integrand.subs(self.SC.e2, 1)
self.integrand = self.integrand.subs(self.SC.psi, self.SC.psi_f)
self.integrand = self.integrand.subs(self.eps, self.eps_val)
self.integrand = self.integrand.subs(self.SC.G, self.SC.G_val)
# Make a function of n and p_0
self.integrand_np = \
(lambda n_, p_ :
self.integrand.subs(self.SC.n, n_).subs(self.p_0, p_).evalf())
# Numerical value of the Maxwell Construction's Integral
self.maxwell_integral = \
(lambda target_values:
integrate.quad((lambda n_ : self.integrand_np(n_, target_values[0][1])),
target_values[0][0], target_values[1][0])[0])
# Numerical value as a function of the delta density
self.maxwell_integral_delta = \
(lambda delta_: self.maxwell_integral(self.GuessDensitiesFlat(delta_)))
def GuessDensitiesFlat(self, delta):
target_values = []
arg_init = self.SC.coexistence_range[0][0] + delta
func_init = self.SC.P_subs.subs(self.SC.n, arg_init)
target_values.append((arg_init, func_init))
arg_range, arg_bins = [arg_init, self.SC.coexistence_range[1][0]], 2 ** 10
arg_delta = (arg_range[1] - arg_range[0])/arg_bins
delta_func_f = (lambda arg_:
(self.SC.P_subs.subs(self.SC.n, arg_) -
self.SC.P_subs.subs(self.SC.n, arg_range[0])))
zero_ranges = FindZeroRanges(delta_func_f, arg_range, arg_bins, arg_delta,
debug_flag = False)
# Always pick the last range for the stable solution: -1
#print("zero_ranges:", zero_ranges)
#print(bisect(delta_func_f, zero_ranges[0][0], zero_ranges[0][1]))
#print(bisect(delta_func_f, zero_ranges[-1][0], zero_ranges[-1][1]))
solution = bisect(delta_func_f, zero_ranges[-1][0], zero_ranges[-1][1])
arg_swap = solution
func_swap = self.SC.P_subs.subs(self.SC.n, arg_swap)
target_values.append((arg_swap, func_swap))
return target_values
def MechanicEquilibrium(self, n_bins = 32):
# Need to find the zero of self.maxwell_integral_delta
# Delta can vary between (0, and the difference between the gas maximum
# and the beginning of the coexistence region
'''
search_range = \
[self.SC.n_eps,
self.SC.extrema[0][0] - self.SC.coexistence_range[0][0] - self.SC.n_eps]
'''
search_range = \
[self.SC.n_eps,
self.SC.extrema[0][0] - self.SC.coexistence_range[0][0]]
search_delta = (search_range[1] - search_range[0])/n_bins
mech_eq_range = FindZeroRanges(self.maxwell_integral_delta,
search_range, n_bins, search_delta,
debug_flag = False)
mech_eq_delta = bisect(self.maxwell_integral_delta,
mech_eq_range[0][0], mech_eq_range[0][1])
self.mech_eq_zero = self.maxwell_integral_delta(mech_eq_delta)
self.mech_eq_target = self.GuessDensitiesFlat(mech_eq_delta)
print(self.mech_eq_target)
def DNDXLambda(self, rho_g):
prefactor = 24 * ((self.SC.psi_f)**self.eps)/(self.beta * self.SC.G * (self.SC.d_psi_f)**2)
prefactor = prefactor.subs(self.beta, self.beta_val)
prefactor = prefactor.subs(self.SC.G, self.SC.G_val)
prefactor = prefactor.subs(self.eps, self.eps_val)
prefactor_n = lambda n_: prefactor.subs(self.SC.n, n_).evalf()
self.dndx = lambda n_: math.sqrt(prefactor_n(n_) * self.maxwell_integral([rho_g, [n_, rho_g[1]]]))
def SurfaceTension(self, mech_eq_target):
self.DNDXLambda(mech_eq_target[0])
prefactor = self.SC.G_val * self.sigma_c_val
integrand_n = lambda n_: self.dndx(n_) * (self.SC.d_psi_f**2).subs(self.SC.n, n_).evalf()
integral = integrate.quad(integrand_n, mech_eq_target[0][0], mech_eq_target[1][0])
self.sigma_f = prefactor * integral[0]
return self.sigma_f
####################################################################################
### Subclass: PressureTensor
####################################################################################
class PressureTensor:
def __init__(self, py_stencil):
# One stencil at the time
self.py_stencil = py_stencil
# Associating weights symbols
self.w_sym = self.py_stencil.w_sym
self.w_sym_list = self.py_stencil.w_sym_list
# Get e_expr
if not hasattr(self.py_stencil, 'e_expr'):
self.py_stencil.GetWolfEqs()
if not hasattr(self.py_stencil, 'typ_eq_s'):
self.py_stencil.GetTypEqs()
self.e_expr = self.py_stencil.e_expr
self.typ_eq_s = self.py_stencil.typ_eq_s
self.B2q_expr = self.py_stencil.B2q_expr
self.B2n_expr = self.py_stencil.B2n_expr
# Initializing Pressure Tensor symbols
self.PConstants()
self.InitPCoeff()
self.PExpressW()
def GetExprValues(self, w_sol = None):
## Need to add the new constants: Chi/Lambda
if w_sol is None:
w_sol = self.py_stencil.w_sol[0]
print(self.e_expr)
print("Isotropy constants")
for elem in self.e_expr:
w_i = 0
swap_expr = self.e_expr[elem]
for w in self.w_sym_list:
swap_expr = swap_expr.subs(w, w_sol[w_i])
w_i += 1
print(self.e_expr[elem], swap_expr)
print("\n")
print("Pressure Tensor Constants")
for elem in self.p_consts_sym:
w_i = 0
swap_expr = self.p_consts_w[elem]
for w in self.w_sym_list:
swap_expr = swap_expr.subs(w, w_sol[w_i])
w_i += 1
print(self.p_consts_sym[elem], swap_expr)
print("\n")
print("Typical Equations")
for elem in self.typ_eq_s:
for eq in self.typ_eq_s[elem]:
swap_expr = eq
w_i = 0
for w_sym in self.w_sym_list:
swap_expr = swap_expr.subs(w_sym, w_sol[w_i])
w_i += 1
print(elem, self.typ_eq_s[elem], swap_expr)
print("\n")
print("Wolfram Equations: B2q")
for elem in self.B2n_expr:
for eq in self.B2n_expr[elem]:
swap_expr = eq
w_i = 0
for w_sym in self.w_sym_list:
swap_expr = swap_expr.subs(w_sym, w_sol[w_i])
w_i += 1
print(elem, self.B2n_expr[elem], swap_expr)
print("\n")
print("Wolfram Equations: B2q")
for elem in self.B2q_expr:
for eq in self.B2q_expr[elem]:
swap_expr = eq
w_i = 0
for w_sym in self.w_sym_list:
swap_expr = swap_expr.subs(w_sym, w_sol[w_i])
w_i += 1
print(elem, self.B2q_expr[elem], swap_expr)
def InitPCoeff(self):
# List of coefficients for the pressure tensor constants
# Need to do this because each stencil can have a different
# number of groups: for now: no more than the first 5!
self.alpha_c, self.beta_c, self.gamma_c, self.eta_c, self.kappa_c, self.lambda_c = \
[0] * 25, [0] * 25, [0] * 25, [0] * 25, [0] * 25, [0] * 25
self.sigma_c_c, self.tolman_c_c = [0] * 25, [0] * 25
self.lambda_i_c, self.lambda_t_c, self.lambda_n_c = [0] * 25, [0] * 25, [0] * 25
self.chi_i_c, self.chi_t_c, self.chi_n_c = [0] * 25, [0] * 25, [0] * 25
# alpha
self.alpha_c[4], self.alpha_c[5], self.alpha_c[8] = 2, 4, 4
self.alpha_c[9], self.alpha_c[10] = 12, 24
self.alpha_c[13], self.alpha_c[16], self.alpha_c[17] = Rational(88, 3), 40, 80
# beta
self.beta_c[1], self.beta_c[2], self.beta_c[4], self.beta_c[5], self.beta_c[8] = \
Rational("1/2"), 1, 6, 13, 12
self.beta_c[9], self.beta_c[10] = Rational(57, 2), 58
self.beta_c[13], self.beta_c[16], self.beta_c[17] = Rational(203, 3), 88, 177
# gamma
self.gamma_c[5], self.gamma_c[8], self.gamma_c[10] = 1, 4, Rational(8, 3)
self.gamma_c[13], self.gamma_c[17] = Rational(68, 3), 5
# eta
self.eta_c[2], self.eta_c[5], self.eta_c[8], self.eta_c[10] = 1, 7, 12, Rational(46,3)
self.eta_c[13], self.eta_c[17] = Rational(148, 3), 27
# kappa
self.kappa_c[5], self.kappa_c[8] = 4, 8
# lambda
self.lambda_c[2], self.lambda_c[5], self.lambda_c[8] = 2, 12, 24
# sigma_c
self.sigma_c_c[1], self.sigma_c_c[4], self.sigma_c_c[5] = -6, -96, -108
self.sigma_c_c[9], self.sigma_c_c[10] = -486, -768
self.sigma_c_c[13], self.sigma_c_c[16], self.sigma_c_c[17] = -300, -1536, 2700
# tolman_c
self.tolman_c_c[1], self.tolman_c_c[4], self.tolman_c_c[5] = \
-Rational('1/2'), -6, -6
# Lambda_s
self.lambda_i_c[1], self.lambda_i_c[2], self.lambda_i_c[4] = Rational('1/2'), -2, 6
self.lambda_i_c[5], self.lambda_i_c[8] = -6, -24
self.lambda_t_c[2], self.lambda_t_c[5], self.lambda_t_c[8] = 2, 12, 24
self.lambda_n_c[2], self.lambda_n_c[5], self.lambda_n_c[8] = 1, 7, 12
# chi_s
self.chi_i_c[4], self.chi_i_c[5], self.chi_i_c[8] = 2, -1, -8
self.chi_t_c[5], self.chi_t_c[8] = 4, 8
self.chi_n_c[5], self.chi_n_c[8] = 1, 4
def PConstants(self):
# Defining symbols
self.p_consts_sym = {}
self.p_consts_sym['\alpha'] = sp_symbols('\\alpha')
self.p_consts_sym['\beta'] = sp_symbols('\\beta')
self.p_consts_sym['\gamma'] = sp_symbols('\\gamma')
self.p_consts_sym['\eta'] = sp_symbols('\\eta')
self.p_consts_sym['\kappa'] = sp_symbols('\\kappa')
self.p_consts_sym['\lambda'] = sp_symbols('\\lambda')
self.p_consts_sym['\epsilon'] = sp_symbols('\\epsilon')
self.p_consts_sym['\sigma_c'] = sp_symbols('\\sigma_c')
self.p_consts_sym['t_c'] = sp_symbols('t_c')
# These symbols are not good anymore for higher order expansions
self.p_consts_sym['\Lambda_{N}'] = sp_symbols('\\Lambda_{N}')
self.p_consts_sym['\Lambda_{T}'] = sp_symbols('\\Lambda_{T}')
self.p_consts_sym['\Lambda_{I}'] = sp_symbols('\\Lambda_{I}')
self.p_consts_sym['\chi_{N}'] = sp_symbols('\\chi_{N}')
self.p_consts_sym['\chi_{T}'] = sp_symbols('\\chi_{T}')
self.p_consts_sym['\chi_{I}'] = sp_symbols('\\chi_{I}')
def PExpressW(self):
# Defining expressions: e
# Should use a dictionary for the coefficients
self.p_consts_w = {}
self.p_consts_w['\alpha'] = 0
for len2 in self.py_stencil.len_2s:
self.p_consts_w['\alpha'] += -12*self.alpha_c[len2] * self.w_sym[len2]
self.p_consts_w['\beta'] = 0
for len2 in self.py_stencil.len_2s:
self.p_consts_w['\beta'] += 12*self.beta_c[len2] * self.w_sym[len2]
self.p_consts_w['\gamma'] = 0
for len2 in self.py_stencil.len_2s:
self.p_consts_w['\gamma'] += -4*self.gamma_c[len2] * self.w_sym[len2]
self.p_consts_w['\eta'] = 0
for len2 in self.py_stencil.len_2s:
self.p_consts_w['\eta'] += 4*self.eta_c[len2] * self.w_sym[len2]
self.p_consts_w['\kappa'] = 0
for len2 in self.py_stencil.len_2s:
self.p_consts_w['\kappa'] += self.kappa_c[len2] * self.w_sym[len2]
self.p_consts_w['\lambda'] = 0
for len2 in self.py_stencil.len_2s:
self.p_consts_w['\lambda'] += self.kappa_c[len2] * self.w_sym[len2]
self.p_consts_w['\sigma_c'] = 0
for len2 in self.py_stencil.len_2s:
self.p_consts_w['\sigma_c'] += self.sigma_c_c[len2] * self.w_sym[len2]/12
self.p_consts_w['t_c'] = 0
for len2 in self.py_stencil.len_2s:
self.p_consts_w['t_c'] += self.tolman_c_c[len2] * self.w_sym[len2]
# Lambdas, Chis
self.p_consts_w['\Lambda_{I}'] = 0
for len2 in self.py_stencil.len_2s:
self.p_consts_w['\Lambda_{I}'] += self.lambda_i_c[len2] * self.w_sym[len2]
self.p_consts_w['\Lambda_{T}'] = 0
for len2 in self.py_stencil.len_2s:
self.p_consts_w['\Lambda_{T}'] += self.lambda_t_c[len2] * self.w_sym[len2]
self.p_consts_w['\Lambda_{N}'] = 0
for len2 in self.py_stencil.len_2s:
self.p_consts_w['\Lambda_{N}'] += self.lambda_n_c[len2] * self.w_sym[len2]
self.p_consts_w['\chi_{I}'] = 0
for len2 in self.py_stencil.len_2s:
self.p_consts_w['\chi_{I}'] += self.chi_i_c[len2] * self.w_sym[len2]
self.p_consts_w['\chi_{T}'] = 0
for len2 in self.py_stencil.len_2s:
self.p_consts_w['\chi_{T}'] += self.chi_t_c[len2] * self.w_sym[len2]
self.p_consts_w['\chi_{N}'] = 0
for len2 in self.py_stencil.len_2s:
self.p_consts_w['\chi_{N}'] += self.chi_n_c[len2] * self.w_sym[len2]
self.p_consts_w['\epsilon'] = -2*self.p_consts_w['\alpha']/self.p_consts_w['\beta']
# Defining Lambdas
self.p_consts_wf = {}
for elem in self.p_consts_w:
self.p_consts_wf[str(elem)] = \
sp_lambdify([self.w_sym_list],
self.p_consts_w[str(elem)])
class ShanChanEquilibriumCache(ManageData):
def __init__(self,
stencil = None,
G = None, c2 = None, psi_f = None,
dump_file = 'SCEqCache'):
ManageData.__init__(self, dump_file = dump_file)
if stencil is None:
raise Exception("Missing argument stencil")
if G is None:
raise Exception("Missing argument G")
if c2 is None:
raise Exception("Missing argument c2")
if psi_f is None:
raise Exception("Missing argument psi_f")
'''
Looking for the file and data
'''
self.is_file, self.is_key = ManageData.Read(self), False
self.dict_string = (str(psi_f) + "_" + str(float(G)) + "_" +
str(c2) + "_" + str(stencil.w_sol[0]))
if self.is_file:
if self.dict_string in ManageData.WhichData(self):
self.data = ManageData.PullData(self, self.dict_string)
self.is_key = True
if self.is_key is False:
'''
I need to do this until I write a new pressure tensor class
that also computes the Taylor expansion for the flat interface
and consequently the expression for \varepsilon
'''
w1, w2, w4, w5, w8 = sp_symbols("w(1) w(2) w(4) w(5) w(8)")
w9, w10, w13, w16, w17 = sp_symbols("w(9) w(10) w(13) w(16) w(17)")
w_sym_list = [w1, w2, w4, w5, w8, w9, w10, w13, w16, w17]
_eps_expr = (+ 48*w4 + 96*w5 + 96*w8
+ 288*w9 + 576*w10 + 704*w13 + 960*w16 + 1920*w17)
_eps_expr /= (+ 6*w1 + 12*w2 + 72*w4 + 156*w5 + 144*w8
+ 342*w9 + 696*w10 + 812*w13 + 1056*w16 + 2124*w17)
self.eps_lambda = sp_lambdify([w_sym_list], _eps_expr)
_e2_expr = stencil.e_expr[2]
self.e2_lambda = sp_lambdify([w_sym_list], _e2_expr)
_weights_list = None
if len(stencil.w_sol[0]) != 10:
len_diff = 10 - len(stencil.w_sol[0])
if len_diff < 0:
raise Exception("The number of weights must be 5 at most!")
_weights_list = stencil.w_sol[0] + [0 for i in range(len_diff)]
else:
_weights_list = stencil.w_sol[0]
_shan_chen = \
ShanChen(psi_f = psi_f, G_val = G,
theta_val = c2,
e2_val = self.e2_lambda(_weights_list))
_shan_chen.PressureTensorInit(stencil)
_shan_chen.FlatInterfaceProperties()
_shan_chen.FInterface.MechanicEquilibrium()
_mech_eq_target = _shan_chen.FInterface.mech_eq_target
_sigma_f = \
_shan_chen.FInterface.SurfaceTension(_mech_eq_target)
_n_g = _shan_chen.FInterface.mech_eq_target[0][0]
_n_l = _shan_chen.FInterface.mech_eq_target[1][0]
_p_0 = _shan_chen.FInterface.mech_eq_target[1][1]
_n_c = _shan_chen.n_c
_G_c = _shan_chen.G_c
_data_dict = {'G_c': _G_c, 'n_c': _n_c,
'n_l': _n_l, 'n_g': _n_g,
'p_0': _p_0, 'sigma_f': _sigma_f}
self.PushData(data = _data_dict,
key = self.dict_string)
self.Dump()
def GetFromCache(self):
return ManageData.PullData(self, key = self.dict_string)
| 1.460938
| 1
|
app/main/views.py
|
Kadas36/NEWS-App
| 0
|
12776590
|
from flask import render_template,request,redirect,url_for
from . import main
from ..requests import get_news, get_articles
from ..models import Source, Article
# Views
@main.route('/')
def index():
'''
Function that returns the index page and its data
'''
general_list = get_news('general')
health_list = get_news('health')
business_list = get_news('business')
technology_list = get_news('technology')
sports_list = get_news('sports')
entertainment_list = get_news('entertainment')
return render_template('index.html', general=general_list, health=health_list, business=business_list, sports=sports_list, technology=technology_list, entertainment=entertainment_list)
@main.route('/news/<id>')
def news (id):
'''
Returns the news article from a highlight
'''
news_args = get_articles(id)
return render_template("articles.html", news=news_args)
| 2.640625
| 3
|
src/units/__init__.py
|
sunoru/units
| 1
|
12776591
|
# -*- coding:utf-8 -*-
# filename: units/__init__.py
# by スノル
__version__ = "0.2.0"
__all__ = []
| 0.945313
| 1
|
inst/python/rpytools/help.py
|
flyaflya/reticulate
| 1,476
|
12776592
|
import sys
import types
import inspect
def isstring(s):
# if we use Python 3
if (sys.version_info[0] >= 3):
return isinstance(s, str)
# we use Python 2
return isinstance(s, basestring)
def normalize_func(func):
# return None for builtins
if (inspect.isbuiltin(func)):
return None
return func
def get_doc(func):
doc = inspect.getdoc(func)
if doc is None:
func = normalize_func(func)
if func is None:
return None
else:
doc = inspect.getdoc(func)
return doc
def get_property_doc(target, prop):
for name, obj in inspect.getmembers(type(target), inspect.isdatadescriptor):
if (isinstance(obj, property) and name == prop):
return inspect.getdoc(obj.fget)
return None
def get_argspec(func):
try:
if sys.version_info[0] >= 3:
return inspect.getfullargspec(func)
else:
return inspect.getargspec(func)
except TypeError:
return None
def get_arguments(func):
func = normalize_func(func)
if func is None:
return None
argspec = get_argspec(func)
if argspec is None:
return None
args = argspec.args
if 'self' in args:
args.remove('self')
return args
def get_r_representation(default):
if callable(default) and hasattr(default, '__name__'):
arg_value = default.__name__
else:
if default is None:
arg_value = "NULL"
elif type(default) == type(True):
if default == True:
arg_value = "TRUE"
else:
arg_value = "FALSE"
elif isstring(default):
arg_value = "\"%s\"" % default
elif isinstance(default, int):
arg_value = "%rL" % default
elif isinstance(default, float):
arg_value = "%r" % default
elif isinstance(default, list):
arg_value = "c("
for i, item in enumerate(default):
if i is (len(default) - 1):
arg_value += "%s)" % get_r_representation(item)
else:
arg_value += "%s, " % get_r_representation(item)
elif isinstance(default, (tuple, set)):
arg_value = "list("
for i, item in enumerate(default):
if i is (len(default) - 1):
arg_value += "%s)" % get_r_representation(item)
else:
arg_value += "%s, " % get_r_representation(item)
elif isinstance(default, dict):
arg_value = "list("
for i in range(len(default)):
i_arg_value = "%s = %s" % \
(default.keys()[i], get_r_representation(default.values()[i]))
if i is (len(default) - 1):
arg_value += "%s)" % i_arg_value
else:
arg_value += "%s, " % i_arg_value
else:
arg_value = "%r" % default
# if the value starts with "tf." then convert to $ usage
if (arg_value.startswith("tf.")):
arg_value = arg_value.replace(".", "$")
return(arg_value)
def generate_signature_for_function(func):
"""Given a function, returns a string representing its args."""
func = normalize_func(func)
if func is None:
return None
args_list = []
argspec = get_argspec(func)
if argspec is None:
return None
first_arg_with_default = (
len(argspec.args or []) - len(argspec.defaults or []))
for arg in argspec.args[:first_arg_with_default]:
if arg == "self":
# Python documentation typically skips `self` when printing method
# signatures.
continue
args_list.append(arg)
if argspec.varargs == "args" and hasattr(argspec, 'keywords') and argspec.keywords == "kwds":
original_func = func.__closure__[0].cell_contents
return generate_signature_for_function(original_func)
if argspec.defaults:
for arg, default in zip(
argspec.args[first_arg_with_default:], argspec.defaults):
arg_value = get_r_representation(default)
args_list.append("%s = %s" % (arg, arg_value))
if argspec.varargs:
args_list.append("...")
if hasattr(argspec, 'keywords') and argspec.keywords:
args_list.append("...")
return "(" + ", ".join(args_list) + ")"
| 2.65625
| 3
|
day2-4.py
|
Par1Na/Twowaits
| 2
|
12776593
|
# Twowaits
Twowaits Problem
def upPattern(n):
for i in range(n):
for j in range(i,n):
print('* ',end='')
for m in range(0,4*i+1):
print(end=' ')
for p in range(i,n):
print('* ',end='')
print('\r')
def lowPattern(n):
s=4*n-3
for i in range(n):
for j in range(i+1):
print('* ',end='')
for k in range(s):
print(end=' ')
s=s-4
for m in range(i+1):
print('* ',end='')
print('\r')
def comPattern(n):
upPattern(n)
lowPattern(n)
comPattern(5)
| 3.375
| 3
|
bibliopixel/commands/devices.py
|
rec/leds
| 253
|
12776594
|
<reponame>rec/leds
"""
Find serial devices and update serial device IDs
"""
from .. util import log
CONNECT_MESSAGE = """
Connect just one Serial device (AllPixel) and press enter..."""
def run(args):
from ..drivers.serial.driver import Serial
from ..drivers.serial.devices import Devices
import serial
run = True
log.printer("Press Ctrl+C any time to exit.")
try:
while run:
try:
input(CONNECT_MESSAGE)
devices = Devices(args.hardware_id, args.baud)
ports = devices.find_serial_devices()
if not ports:
log.printer("No serial devices found. Please connect one.")
continue
port = sorted(ports.items())[0][1][0]
id = devices.get_device_id(port)
log.printer("Device ID of {}: {}".format(port, id))
newID = input("Input new ID (enter to skip): ")
if newID != '':
try:
newID = int(newID)
if newID < 0 or newID > 255:
raise ValueError()
devices.set_device_id(port, newID)
id = devices.get_device_id(port)
log.printer("Device ID set to: %s" % id)
except ValueError:
log.printer("Please enter a number between 0 and 255.")
except serial.SerialException as e:
log.printer("Problem connecting to serial device. %s" % e)
except Exception as e:
log.printer('Programmer error with exception %s' % e)
except KeyboardInterrupt:
pass
def add_arguments(parser):
parser.set_defaults(run=run)
parser.add_argument('--hardware-id', default='1D50:60AB',
help='USB Vendor ID : Product ID of device. '
'Defaults to VID:PID for AllPixel')
parser.add_argument('--baud', default=921600, type=int,
help='Serial baud rate.')
| 3.0625
| 3
|
ufit/models/peaks.py
|
McStasMcXtrace/ufit
| 0
|
12776595
|
# -*- coding: utf-8 -*-
# *****************************************************************************
# ufit, a universal scattering fitting suite
#
# Copyright (c) 2013-2019, <NAME> and contributors. All rights reserved.
# Licensed under a 2-clause BSD license, see LICENSE.
# *****************************************************************************
"""Models for different peak shapes."""
from numpy import exp, log, sqrt, sin, cos, pi
from scipy.special import wofz
from ufit.models import Model
__all__ = ['Gauss', 'GaussInt', 'Lorentz', 'LorentzInt',
'Voigt', 'PseudoVoigt', 'DHO']
class Gauss(Model):
"""Gaussian peak
Parameters:
* `pos` - Peak center position
* `ampl` - Amplitude at center
* `fwhm` - Full width at half maximum
"""
param_names = ['pos', 'ampl', 'fwhm']
def __init__(self, name='', pos=None, ampl=None, fwhm=None):
pp, pa, pf = self._init_params(name, self.param_names, locals())
# amplitude and fwhm should be positive
self.params[1].finalize = abs
self.params[2].finalize = abs
self.fcn = lambda p, x: \
abs(p[pa]) * exp(-(x - p[pp])**2/p[pf]**2 * 4*log(2))
pick_points = ['peak', 'width']
def convert_pick(self, p, w):
return {
self.params[0].name: p[0], # position
self.params[1].name: p[1], # peak amplitude
self.params[2].name: 2*abs(w[0] - p[0]), # FWHM
}
class GaussInt(Model):
"""Gaussian peak with integrated intensity parameter
Parameters:
* `pos` - Peak center position
* `int` - Integrated intensity
* `fwhm` - Full width at half maximum
"""
param_names = ['pos', 'int', 'fwhm']
def __init__(self, name='', pos=None, int=None, fwhm=None):
pp, pint, pf = self._init_params(name, self.param_names, locals())
# integration and fwhm should be positive
self.params[1].finalize = abs
self.params[2].finalize = abs
self.fcn = lambda p, x: \
abs(p[pint]) / (abs(p[pf]) * sqrt(pi/(4 * log(2)))) * \
exp(-(x - p[pp])**2/p[pf]**2 * 4*log(2))
pick_points = ['peak', 'width']
def convert_pick(self, p, w):
fwhm = 2*abs(w[0] - p[0])
return {
self.params[0].name: p[0], # position
self.params[1].name: p[1] * fwhm * sqrt(2*pi), # peak intensity (integrated)
self.params[2].name: fwhm, # FWHM
}
class Lorentz(Model):
"""Lorentzian peak
Parameters:
* `pos` - Peak center position
* `ampl` - Amplitude at center
* `fwhm` - Full width at half maximum
"""
param_names = ['pos', 'ampl', 'fwhm']
def __init__(self, name='', pos=None, ampl=None, fwhm=None):
pp, pa, pf = self._init_params(name, self.param_names, locals())
# amplitude and fwhm should be positive
self.params[1].finalize = abs
self.params[2].finalize = abs
self.fcn = lambda p, x: abs(p[pa]) / (1 + 4*(x - p[pp])**2/p[pf]**2)
pick_points = ['peak', 'width']
def convert_pick(self, p, w):
return {
self.params[0].name: p[0], # position
self.params[1].name: p[1], # peak amplitude
self.params[2].name: 2*abs(w[0] - p[0]), # FWHM
}
class LorentzInt(Model):
"""Lorentzian peak with integrated intensity parameter
Parameters:
* `pos` - Peak center position
* `int` - Integrated intensity
* `fwhm` - Full width at half maximum
"""
param_names = ['pos', 'int', 'fwhm']
def __init__(self, name='', pos=None, int=None, fwhm=None):
pp, pint, pf = self._init_params(name, self.param_names, locals())
# integration and fwhm should be positive
self.params[1].finalize = abs
self.params[2].finalize = abs
self.fcn = lambda p, x: 2 * abs(p[pint]) / (pi * p[pf]) / (1 + 4*(x - p[pp])**2/p[pf]**2)
pick_points = ['peak', 'width']
def convert_pick(self, p, w):
fwhm = 2*abs(w[0] - p[0])
return {
self.params[0].name: p[0], # position
self.params[1].name: p[1] * fwhm * pi/2, # integrated intensity
self.params[2].name: fwhm, # FWHM
}
class Voigt(Model):
"""Voigt peak
A convolution of a Gaussian and a Lorentzian.
Parameters:
* `pos` - Peak center position
* `ampl` - Amplitude at center
* `fwhm` - Full width at half maximum of the Gauss part
* `shape` - Lorentz contribution
"""
param_names = ['pos', 'ampl', 'fwhm', 'shape']
def __init__(self, name='', pos=None, ampl=None, fwhm=None, shape=None):
pp, pa, pf, psh = self._init_params(name, self.param_names, locals())
# amplitude and fwhms should be positive
self.params[1].finalize = abs
self.params[2].finalize = abs
self.params[3].finalize = abs
self.fcn = lambda p, x: \
p[pa] / wofz(1j*sqrt(log(2))*p[psh]).real * \
wofz(2*sqrt(log(2)) * (x-p[pp])/p[pf] + 1j*sqrt(log(2))*p[psh]).real
pick_points = ['peak', 'width']
def convert_pick(self, p, w):
return {
self.params[0].name: p[0], # position
self.params[1].name: p[1], # peak amplitude
self.params[2].name: 2*abs(w[0] - p[0]), # FWHM of Gauss
self.params[3].name: 0,
}
class PseudoVoigt(Model):
"""Pseudo-Voigt peak
A pseudo-convolution of a Gaussian and a Lorentzian.
Parameters:
* `pos` - Peak center position
* `ampl` - Amplitude at center
* `fwhm` - Full width at half maximum
* `eta` - Lorentzicity
"""
param_names = ['pos', 'ampl', 'fwhm', 'eta']
def __init__(self, name='', pos=None, ampl=None, fwhm=None, eta=0.5):
pp, pa, pf, pe = self._init_params(name, self.param_names, locals())
# amplitude and fwhm should be positive
self.params[1].finalize = abs
self.params[2].finalize = abs
# eta should be between 0 and 1
self.params[3].finalize = lambda e: e % 1.0
self.fcn = lambda p, x: abs(p[pa]) * \
((p[pe] % 1.0) / (1 + 4*(x - p[pp])**2/p[pf]**2) +
(1-(p[pe] % 1.0)) * exp(-(x - p[pp])**2/p[pf]**2 * 4*log(2)))
pick_points = ['peak', 'width']
def convert_pick(self, p, w):
return {
self.params[0].name: p[0], # position
self.params[1].name: p[1], # peak amplitude
self.params[2].name: 2*abs(w[0] - p[0]), # FWHM
}
class DHO(Model):
"""Damped Harmonic Oscillator
Two Lorentzians centered around zero with a common width and amplitude,
respecting the Bose factor.
Parameters:
* `center` - Energy zero
* `pos` - omega_0
* `ampl` - Amplitude
* `gamma` - Damping
* `tt` - Temperature in K
"""
param_names = ['center', 'pos', 'ampl', 'gamma', 'tt']
def __init__(self, name='',
center=0, pos=None, ampl=None, gamma=None, tt=None):
pc, pp, pa, pg, ptt = self._init_params(name, self.param_names,
locals())
# pos, amplitude and gamma should be positive
self.params[1].finalize = abs
self.params[2].finalize = abs
self.params[3].finalize = abs
self.fcn = lambda p, x: x / (1. - exp(-11.6045*(x+0.00001) / p[ptt])) * \
abs(p[pa]) * abs(p[pg]) / \
((p[pp]**2 - (x - p[pc])**2)**2 + (p[pg]*(x - p[pc]))**2)
pick_points = ['left peak', 'width of left peak', 'right peak']
def convert_pick(self, p1, w, p2):
return {
self.params[0].name: 0.5*(p1[0] + p2[0]), # center
self.params[1].name: 0.5*abs(p1[0] - p2[0]), # position
self.params[2].name: p1[1] * 0.01, # peak amplitude
self.params[3].name: 2*abs(w[0] - p1[0]), # gamma
}
class Gauss2D(Model):
"""Gaussian peak in two dimensions
Parameters:
* `bkgd` - Background
* `pos_x` - X center position
* `pos_y` - Y center position
* `ampl` - amplitude
* `fwhm_x` - Full width in X direction
* `fwhm_y` - Full width in Y direction
* `theta` - rotation of Gaussian in radians
"""
param_names = ['bkgd', 'pos_x', 'pos_y', 'ampl', 'fwhm_x', 'fwhm_y', 'theta']
def __init__(self, name='', bkgd=None, pos_x=None, pos_y=None, ampl=None,
fwhm_x=None, fwhm_y=None, theta=None):
pb, ppx, ppy, pa, pfx, pfy, pth = self._init_params(
name, self.param_names, locals())
self.params[3].finalize = abs
self.params[4].finalize = abs
self.params[5].finalize = abs
def fcn(p, x):
# rotate coordinate system by theta
c, s = cos(p[pth]), sin(p[pth])
x1 = (x[:, 0] - p[ppx])*c - (x[:, 1] - p[ppy])*s
y1 = (x[:, 0] - p[ppx])*s + (x[:, 1] - p[ppy])*c
return abs(p[pb]) + abs(p[pa]) * \
exp(-x1**2/p[pfx]**2 * 4*log(2)) * \
exp(-y1**2/p[pfy]**2 * 4*log(2))
self.fcn = fcn
| 2.25
| 2
|
tanker/cli.py
|
bertrandchenal/tanker
| 1
|
12776596
|
<filename>tanker/cli.py
import argparse
import csv
import os
import sys
from .utils import logger, __version__, yaml_load, ctx
from .context import connect, create_tables
from .view import View
from .table import Table
def cli():
parser = argparse.ArgumentParser(description='Tanker CLI')
parser.add_argument(
'action', help='info, read, write, delete or version', nargs=1
)
parser.add_argument('table', help='Table to query', nargs='*')
parser.add_argument(
'--config',
help='Config file (defaults to ".tk.yaml")',
default='.tk.yaml',
)
parser.add_argument(
'-D', '--db-uri',
help='Database URI (override config file value)',
)
parser.add_argument(
'-l', '--limit', help='Limit number of results', type=int
)
parser.add_argument('-o', '--offset', help='Offset results', type=int)
parser.add_argument(
'-F', '--filter', action='append', help='Add filter', default=[]
)
parser.add_argument(
'-p', '--purge', help='Purge table after write', action='store_true'
)
parser.add_argument(
'-s', '--sort', action='append', help='Sort results', default=[]
)
parser.add_argument(
'-f', '--file', help='Read/Write to file ' '(instead of stdin/stdout)'
)
parser.add_argument(
'--yaml',
help='Enable YAML input / ouput ' '(defaults to csv)',
action='store_true',
)
parser.add_argument(
'--ascii-table',
'-t',
help='Enable ascii table output',
action='store_true',
)
parser.add_argument('--vbar', help='Vertical bar plot', action='store_true')
parser.add_argument('--tic', help='Tic character to use for plot')
parser.add_argument(
'-d', '--debug', help='Enable debugging', action='store_true'
)
parser.add_argument(
'-H', '--hide-headers', help='Hide headers', action='store_true'
)
args = parser.parse_args()
if args.debug:
logger.setLevel('DEBUG')
if args.action[0] == 'version':
print(__version__)
return
if os.path.exists(args.config):
cfg = yaml_load(open(args.config))
else:
cfg = {}
if args.db_uri:
cfg['db_uri'] = args.db_uri
if cfg.get('schema'):
cfg['schema'] = yaml_load(open(os.path.expanduser(cfg['schema'])))
with connect(cfg):
cli_main(args)
def ascii_table(rows, headers=None, sep=' '):
# Convert content as strings
rows = [list(map(str, row)) for row in rows]
# Compute lengths
lengths = (len(h) for h in (headers or rows[0]))
for row in rows:
lengths = map(max, (len(i) for i in row), lengths)
lengths = list(lengths)
# Define row formatter
fmt = lambda xs: sep.join(x.ljust(l) for x, l in zip(xs, lengths)) + '\n'
# Output content
if headers:
top = fmt(headers)
yield top
yield fmt('-' * l for l in lengths)
for row in rows:
yield fmt(row)
def vbar(rows, fields, plot_width=80, tic=None):
tic = tic or '•'
if not rows:
return
if not isinstance(rows[0][-1], (float, int)):
err = 'Last column must be numeric'
logger.error(err)
return
labels, values = zip(*((r[:-1], r[-1]) for r in rows))
labels = [str(' / '.join(map(str, l))) for l in labels]
label_len = max(len(l) for l in labels)
value_max = max(max(v for v in values), 0)
value_min = min(min(v for v in values), 0)
value_width = max(len(f' {value_min:.2f}'), len(f'{value_max:.2f}'))
delta = (value_max - value_min) or 1
scale = delta / plot_width
if value_min < 0:
left_pane = round(-value_min / scale)
else:
left_pane = 0
for label, value in zip(labels, values):
yield f'{label:<{label_len}} {value:>{value_width}.2f} '
if value < 0:
nb_tics = -round(value / scale)
line = ' ' * (left_pane - nb_tics) + tic * nb_tics + '|\n'
yield line
else:
pos = round(value / scale)
yield ' ' * left_pane + '|' + tic * pos + '\n'
yield ''
def cli_input_data(args):
fields = args.table[1:] or None
fh = None
if args.file:
fh = open(args.file)
elif args.action in ('write', 'delete'):
fh = sys.stdin
if not fh:
return fields, None
if args.yaml:
data = yaml_load(fh)
else:
reader = csv.reader(fh)
data = list(reader)
# If not field is given we infer them from the data
if not fields and data:
if args.yaml:
fields = data[0].keys()
else:
fields = data[0]
data = data[1:]
return fields, data
def cli_main(args):
action = args.action[0]
table = args.table[0] if args.table else None
order = map(lambda x: x.split(':') if ':' in x else x, args.sort)
fields, data = cli_input_data(args)
if action == 'read':
view = View(table, fields)
res = view.read(
args.filter,
order=list(order),
limit=args.limit,
offset=args.offset,
)
if args.file:
fh = open(args.file, 'w')
else:
fh = sys.stdout
if args.yaml:
import yaml
fh.write(yaml.dump(list(res.dict()), default_flow_style=False))
elif args.ascii_table:
headers = (
None if args.hide_headers else [f.name for f in view.fields]
)
for line in ascii_table(res, headers=headers):
fh.write(line)
elif args.vbar:
for line in vbar(list(res), view.fields, tic=args.tic):
fh.write(line)
else:
writer = csv.writer(fh)
if not args.hide_headers:
writer.writerow([f.name for f in view.fields])
writer.writerows(res.all())
elif action == 'delete':
View(table, fields).delete(filters=args.filter, data=data)
elif action == 'write':
# Extract data
fields, data = cli_input_data(args)
View(table, fields).write(data, purge=args.purge)
elif action == 'info':
if table:
columns = sorted(Table.get(table).columns, key=lambda x: x.name)
for col in columns:
if col.ctype in ('M2O', 'O2M'):
details = '%s -> %s' % (col.ctype, col.fk)
else:
details = col.ctype
print('%s (%s)' % (col.name, details))
else:
for name in sorted(ctx.registry):
print(name)
elif action == 'init':
create_tables()
else:
print('Action "%s" not supported' % action)
if __name__ == '__main__':
cli()
| 2.59375
| 3
|
dataikuapi/dss/admin.py
|
dataiku/dataiku-api-client-python
| 28
|
12776597
|
<reponame>dataiku/dataiku-api-client-python
from .future import DSSFuture
import json, warnings
class DSSConnectionInfo(dict):
"""A class holding read-only information about a connection.
This class should not be created directly. Instead, use :meth:`DSSConnection.get_info`
The main use case of this class is to retrieve the decrypted credentials for a connection,
if allowed by the connection permissions.
Depending on the connection kind, the credential may be available using :meth:`get_basic_credential`
or :meth:`get_aws_credential`
"""
def __init__(self, data):
"""Do not call this directly, use :meth:`DSSConnection.get_info`"""
super(DSSConnectionInfo, self).__init__(data)
def get_type(self):
"""Returns the type of the connection"""
return self["type"]
def get_params(self):
"""Returns the parameters of the connection, as a dict"""
return self["params"]
def get_basic_credential(self):
"""
Returns the basic credential (user/password pair) for this connection, if available
:returns: the credential, as a dict containing "user" and "password"
:rtype dict
"""
if not "resolvedBasicCredential" in self:
raise ValueError("No basic credential available")
return self["resolvedBasicCredential"]
def get_aws_credential(self):
"""
Returns the AWS credential for this connection, if available.
The AWS credential can either be a keypair or a STS token triplet
:returns: the credential, as a dict containing "accessKey", "secretKey", and "sessionToken" (only in the case of STS token)
:rtype dict
"""
if not "resolvedAWSCredential" in self:
raise ValueError("No AWS credential available")
return self["resolvedAWSCredential"]
class DSSConnection(object):
"""
A connection on the DSS instance.
"""
def __init__(self, client, name):
"""Do not call this directly, use :meth:`dataikuapi.DSSClient.get_connection`"""
self.client = client
self.name = name
########################################################
# Location info
########################################################
def get_location_info(self):
"""Deprecated, use get_info"""
warnings.warn("DSSConnection.get_location_info is deprecated, please use get_info", DeprecationWarning)
return self.get_info()
def get_info(self):
"""
Gets information about this connection.
Note: this call requires permissions to read connection details
:returns: a :class:`DSSConnectionInfo` containing connection information
"""
return DSSConnectionInfo(self.client._perform_json(
"GET", "/connections/%s/info" % self.name))
########################################################
# Connection deletion
########################################################
def delete(self):
"""
Delete the connection
"""
return self.client._perform_empty(
"DELETE", "/admin/connections/%s" % self.name)
def get_definition(self):
"""
Get the connection's definition (type, name, params, usage restrictions)
:returns: The connection definition, as a dict.
The exact structure of the returned dict is not documented and depends on the connection
type. Create connections using the DSS UI and call :meth:`get_definition` to see the
fields that are in it.
"""
return self.client._perform_json(
"GET", "/admin/connections/%s" % self.name)
def set_definition(self, description):
"""
Set the connection's definition.
You should only :meth:`set_definition` using an object that you obtained through :meth:`get_definition`,
not create a new dict.
:param dict the definition for the connection, as a dict.
"""
return self.client._perform_json(
"PUT", "/admin/connections/%s" % self.name,
body = description)
########################################################
# Security
########################################################
def sync_root_acls(self):
"""
Resync root permissions on this connection path. This is only useful for HDFS connections
when DSS has User Isolation activated with "DSS-managed HDFS ACL"
:returns: a :class:`~dataikuapi.dss.future.DSSFuture` handle to the task of resynchronizing the permissions
"""
future_response = self.client._perform_json(
"POST", "/admin/connections/%s/sync" % self.name,
body = {'root':True})
return DSSFuture(self.client, future_response.get('jobId', None), future_response)
def sync_datasets_acls(self):
"""
Resync permissions on datasets in this connection path. This is only useful for HDFS connections
when DSS has User Isolation activated with "DSS-managed HDFS ACL"
:returns: a :class:`~dataikuapi.dss.future.DSSFuture` handle to the task of resynchronizing the permissions
"""
future_response = self.client._perform_json(
"POST", "/admin/connections/%s/sync" % self.name,
body = {'root':True})
return DSSFuture(self.client, future_response.get('jobId', None), future_response)
class DSSUser(object):
"""
A handle for a user on the DSS instance.
Do not create this directly, use :meth:`dataikuapi.DSSClient.get_user`
"""
def __init__(self, client, login):
"""Do not call this directly, use :meth:`dataikuapi.DSSClient.get_user`"""
self.client = client
self.login = login
def delete(self):
"""
Deletes the user
"""
return self.client._perform_empty(
"DELETE", "/admin/users/%s" % self.login)
def get_settings(self):
"""
Gets the settings of the user
:rtype: :class:`DSSUserSettings`
"""
raw = self.client._perform_json("GET", "/admin/users/%s" % self.login)
return DSSUserSettings(self.client, self.login, raw)
########################################################
# Legacy
########################################################
def get_definition(self):
"""
Deprecated, use get_settings instead
Get the user's definition (login, type, display name, permissions, ...)
:return: the user's definition, as a dict
"""
warnings.warn("DSSUser.get_definition is deprecated, please use get_settings", DeprecationWarning)
return self.client._perform_json("GET", "/admin/users/%s" % self.login)
def set_definition(self, definition):
"""
Deprecated, use get_settings instead
Set the user's definition.
Note: this call requires an API key with admin rights
You should only :meth:`set_definition` using an object that you obtained through :meth:`get_definition`,
not create a new dict.
The fields that may be changed in a user definition are:
* email
* displayName
* groups
* userProfile
* password
:param dict definition: the definition for the user, as a dict
"""
warnings.warn("DSSUser.set_definition is deprecated, please use get_settings", DeprecationWarning)
return self.client._perform_json("PUT", "/admin/users/%s" % self.login, body = definition)
def get_client_as(self):
"""
Gets a :class:`dataikuapi.DSSClient` that has the permissions of this user.
This allows administrators to impersonate actions on behalf of other users, in order to perform
actions on their behalf
"""
from dataikuapi.dssclient import DSSClient
if self.client.api_key is not None:
return DSSClient(self.client.host, self.client.api_key, extra_headers={"X-DKU-ProxyUser": self.login})
elif self.client.internal_ticket is not None:
return DSSClient(self.client.host, internal_ticket = self.client.internal_ticket,
extra_headers={"X-DKU-ProxyUser": self.login})
else:
raise ValueError("Don't know how to proxy this client")
class DSSOwnUser(object):
"""
A handle to interact with your own user
Do not create this directly, use :meth:`dataikuapi.DSSClient.get_own_user`
"""
def __init__(self, client):
self.client = client
def get_settings(self):
"""
Get your own settings
:rtype: :class:`DSSOwnUserSettings`
"""
raw = self.client._perform_json("GET", "/current-user")
return DSSOwnUserSettings(self.client, raw)
class DSSUserSettingsBase(object):
"""Settings for a DSS user"""
def __init__(self, settings):
"""Do not call this directly, use :meth:`DSSUser.get_settings` or :meth:`DSSOwnUser.get_settings` """
self.settings = settings
def get_raw(self):
"""
:return: the raw settings of the user, as a dict. Modifications made to the returned object
are reflected when saving
:rtype: dict
"""
return self.settings
def add_secret(self, name, value):
"""
Adds a user secret.
If there was already a secret with the same name, it is replaced
"""
self.remove_secret(name)
return self.settings["secrets"].append({"name": name, "value": value, "secret": True})
def remove_secret(self, name):
"""Removes a user secret based on its name"""
self.settings["secrets"] = [x for x in self.settings["secrets"] if x["name"] != name]
@property
def user_properties(self):
"""
The user properties (editable by the user) for this user. Do not set this property, modify the dict in place
:rtype dict
"""
return self.settings["userProperties"]
def set_basic_connection_credential(self, connection, user, password):
"""Sets per-user-credentials for a connection that takes a user/password pair"""
self.settings["credentials"][connection] = {
"type": "BASIC",
"user": user,
"password": password
}
def remove_connection_credential(self,connection):
"""Removes per-user-credentials for a connection"""
if connection in self.settings["credentials"]:
del self.settings["credentials"][connection]
def set_basic_plugin_credential(self, plugin_id, param_set_id, preset_id, param_name, user, password):
"""Sets per-user-credentials for a plugin preset that takes a user/password pair"""
name = json.dumps(["PLUGIN", plugin_id, param_set_id, preset_id, param_name])[1:-1]
self.settings["credentials"][name] = {
"type": "BASIC",
"user": user,
"password": password
}
def set_oauth2_plugin_credential(self, plugin_id, param_set_id, preset_id, param_name, refresh_token):
"""Sets per-user-credentials for a plugin preset that takes a OAuth refresh token"""
name = json.dumps(["PLUGIN", plugin_id, param_set_id, preset_id, param_name])[1:-1]
self.settings["credentials"][name] = {
"type": "OAUTH_REFRESH_TOKEN",
"refreshToken": refresh_token
}
def remove_plugin_credential(self, plugin_id, param_set_id, preset_id, param_name):
"""Removes per-user-credentials for a plugin preset"""
name = json.dumps(["PLUGIN", plugin_id, param_set_id, preset_id, param_name])[1:-1]
if name in self.settings["credentials"]:
del self.settings["credentials"][name]
class DSSUserSettings(DSSUserSettingsBase):
"""Settings for a DSS user"""
def __init__(self, client, login, settings):
"""Do not call this directly, use :meth:`DSSUser.get_settings`"""
super(DSSUserSettings, self).__init__(settings)
self.client = client
self.login = login
@property
def admin_properties(self):
"""
The user properties (not editable by the user) for this user. Do not set this property, modify the dict in place
:rtype dict
"""
return self.settings["adminProperties"]
@property
def enabled(self):
"""
Whether this user is enabled
:rtype boolean
"""
return self.settings["enabled"]
@enabled.setter
def enabled(self, new_value):
self.settings["enabled"] = new_value
def save(self):
"""Saves the settings"""
self.client._perform_json("PUT", "/admin/users/%s" % self.login, body = self.settings)
class DSSOwnUserSettings(DSSUserSettingsBase):
"""Settings for the current DSS user"""
def __init__(self, client, settings):
"""Do not call this directly, use :meth:`dataikuapi.DSSClient.get_own_user`"""
super(DSSOwnUserSettings, self).__init__(settings)
self.client = client
def save(self):
"""Saves the settings"""
self.client._perform_empty("PUT", "/current-user", body = self.settings)
class DSSGroup(object):
"""
A group on the DSS instance.
Do not create this directly, use :meth:`dataikuapi.DSSClient.get_group`
"""
def __init__(self, client, name):
"""Do not call this directly, use :meth:`dataikuapi.DSSClient.get_group`"""
self.client = client
self.name = name
########################################################
# Group deletion
########################################################
def delete(self):
"""
Deletes the group
"""
return self.client._perform_empty(
"DELETE", "/admin/groups/%s" % self.name)
def get_definition(self):
"""
Get the group's definition (name, description, admin abilities, type, ldap name mapping)
:return: the group's definition, as a dict
"""
return self.client._perform_json(
"GET", "/admin/groups/%s" % self.name)
def set_definition(self, definition):
"""
Set the group's definition.
You should only :meth:`set_definition` using an object that you obtained through :meth:`get_definition`,
not create a new dict.
Args:
definition: the definition for the group, as a dict
"""
return self.client._perform_json(
"PUT", "/admin/groups/%s" % self.name,
body = definition)
class DSSGeneralSettings(object):
"""
The general settings of the DSS instance.
Do not create this directly, use :meth:`dataikuapi.DSSClient.get_general_settings`
"""
def __init__(self, client):
"""Do not call this directly, use :meth:`dataikuapi.DSSClient.get_general_settings`"""
self.client = client
self.settings = self.client._perform_json("GET", "/admin/general-settings")
########################################################
# Update settings on instance
########################################################
def save(self):
"""
Save the changes that were made to the settings on the DSS instance
Note: this call requires an API key with admin rights
"""
return self.client._perform_empty("PUT", "/admin/general-settings", body = self.settings)
########################################################
# Value accessors
########################################################
def get_raw(self):
"""
Get the settings as a dictionary
"""
return self.settings
def add_impersonation_rule(self, rule, is_user_rule=True):
"""
Add a rule to the impersonation settings
:param rule: an impersonation rule, either a :class:`dataikuapi.dss.admin.DSSUserImpersonationRule`
or a :class:`dataikuapi.dss.admin.DSSGroupImpersonationRule`, or a plain dict
:param is_user_rule: when the rule parameter is a dict, whether the rule is for users or groups
"""
rule_raw = rule
if isinstance(rule, DSSUserImpersonationRule):
rule_raw = rule.raw
is_user_rule = True
elif isinstance(rule, DSSGroupImpersonationRule):
rule_raw = rule.raw
is_user_rule = False
impersonation = self.settings['impersonation']
if is_user_rule:
impersonation['userRules'].append(rule_raw)
else:
impersonation['groupRules'].append(rule_raw)
def get_impersonation_rules(self, dss_user=None, dss_group=None, unix_user=None, hadoop_user=None, project_key=None, scope=None, rule_type=None, is_user=None):
"""
Retrieve the user or group impersonation rules that matches the parameters
:param dss_user: a DSS user or regular expression to match DSS user names
:param dss_group: a DSS group or regular expression to match DSS groups
:param unix_user: a name to match the target UNIX user
:param hadoop_user: a name to match the target Hadoop user
:param project_key: a project_key
:param scope: project-scoped ('PROJECT') or global ('GLOBAL')
:param type: the rule user or group matching method ('IDENTITY', 'SINGLE_MAPPING', 'REGEXP_RULE')
:param is_user: True if only user-level rules should be considered, False for only group-level rules, None to consider both
"""
user_matches = self.settings['impersonation']['userRules'] if is_user == None or is_user == True else []
if dss_user is not None:
user_matches = [m for m in user_matches if dss_user == m.get('dssUser', None)]
if unix_user is not None:
user_matches = [m for m in user_matches if unix_user == m.get('targetUnix', None)]
if hadoop_user is not None:
user_matches = [m for m in user_matches if hadoop_user == m.get('targetHadoop', None)]
if project_key is not None:
user_matches = [m for m in user_matches if project_key == m.get('projectKey', None)]
if rule_type is not None:
user_matches = [m for m in user_matches if rule_type == m.get('type', None)]
if scope is not None:
user_matches = [m for m in user_matches if scope == m.get('scope', None)]
group_matches = self.settings['impersonation']['groupRules'] if is_user == None or is_user == False else []
if dss_group is not None:
group_matches = [m for m in group_matches if dss_group == m.get('dssGroup', None)]
if unix_user is not None:
group_matches = [m for m in group_matches if unix_user == m.get('targetUnix', None)]
if hadoop_user is not None:
group_matches = [m for m in group_matches if hadoop_user == m.get('targetHadoop', None)]
if rule_type is not None:
group_matches = [m for m in group_matches if rule_type == m.get('type', None)]
all_matches = []
for m in user_matches:
all_matches.append(DSSUserImpersonationRule(m))
for m in group_matches:
all_matches.append(DSSGroupImpersonationRule(m))
return all_matches
def remove_impersonation_rules(self, dss_user=None, dss_group=None, unix_user=None, hadoop_user=None, project_key=None, scope=None, rule_type=None, is_user=None):
"""
Remove the user or group impersonation rules that matches the parameters from the settings
:param dss_user: a DSS user or regular expression to match DSS user names
:param dss_group: a DSS group or regular expression to match DSS groups
:param unix_user: a name to match the target UNIX user
:param hadoop_user: a name to match the target Hadoop user
:param project_key: a project_key
:param scope: project-scoped ('PROJECT') or global ('GLOBAL')
:param type: the rule user or group matching method ('IDENTITY', 'SINGLE_MAPPING', 'REGEXP_RULE')
:param is_user: True if only user-level rules should be considered, False for only group-level rules, None to consider both
"""
for m in self.get_impersonation_rules(dss_user, dss_group, unix_user, hadoop_user, project_key, scope, rule_type, is_user):
if isinstance(m, DSSUserImpersonationRule):
self.settings['impersonation']['userRules'].remove(m.raw)
elif isinstance(m, DSSGroupImpersonationRule):
self.settings['impersonation']['groupRules'].remove(m.raw)
########################################################
# Admin actions
########################################################
def push_container_exec_base_images(self):
"""
Push the container exec base images to their repository
"""
resp = self.client._perform_json("POST", "/admin/container-exec/actions/push-base-images")
if resp is None:
raise Exception('Container exec base image push returned no data')
if resp.get('messages', {}).get('error', False):
raise Exception('Container exec base image push failed : %s' % (json.dumps(resp.get('messages', {}).get('messages', {}))))
return resp
class DSSUserImpersonationRule(object):
"""
Helper to build user-level rule items for the impersonation settings
"""
def __init__(self, raw=None):
self.raw = raw if raw is not None else {'scope':'GLOBAL','type':'IDENTITY'}
def scope_global(self):
"""
Make the rule apply to all projects
"""
self.raw['scope'] = 'GLOBAL'
return self
def scope_project(self, project_key):
"""
Make the rule apply to a given project
Args:
project_key : the project this rule applies to
"""
self.raw['scope'] = 'PROJECT'
self.raw['projectKey'] = project_key
return self
def user_identity(self):
"""
Make the rule map each DSS user to a UNIX user of the same name
"""
self.raw['type'] = 'IDENTITY'
return self
def user_single(self, dss_user, unix_user, hadoop_user=None):
"""
Make the rule map a given DSS user to a given UNIX user
Args:
dss_user : a DSS user
unix_user : a UNIX user
hadoop_user : a Hadoop user (optional, defaults to unix_user)
"""
self.raw['type'] = 'SINGLE_MAPPING'
self.raw['dssUser'] = dss_user
self.raw['targetUnix'] = unix_user
self.raw['targetHadoop'] = hadoop_user
return self
def user_regexp(self, regexp, unix_user, hadoop_user=None):
"""
Make the rule map a DSS users matching a given regular expression to a given UNIX user
Args:
regexp : a regular expression to match DSS user names
unix_user : a UNIX user
hadoop_user : a Hadoop user (optional, defaults to unix_user)
"""
self.raw['type'] = 'REGEXP_RULE'
self.raw['ruleFrom'] = regexp
self.raw['targetUnix'] = unix_user
self.raw['targetHadoop'] = hadoop_user
return self
class DSSGroupImpersonationRule(object):
"""
Helper to build group-level rule items for the impersonation settings
"""
def __init__(self, raw=None):
self.raw = raw if raw is not None else {'type':'IDENTITY'}
def group_identity(self):
"""
Make the rule map each DSS user to a UNIX user of the same name
"""
self.raw['type'] = 'IDENTITY'
return self
def group_single(self, dss_group, unix_user, hadoop_user=None):
"""
Make the rule map a given DSS user to a given UNIX user
Args:
dss_group : a DSS group
unix_user : a UNIX user
hadoop_user : a Hadoop user (optional, defaults to unix_user)
"""
self.raw['type'] = 'SINGLE_MAPPING'
self.raw['dssGroup'] = dss_group
self.raw['targetUnix'] = unix_user
self.raw['targetHadoop'] = hadoop_user
return self
def group_regexp(self, regexp, unix_user, hadoop_user=None):
"""
Make the rule map a DSS users matching a given regular expression to a given UNIX user
Args:
regexp : a regular expression to match DSS groups
unix_user : a UNIX user
hadoop_user : a Hadoop user (optional, defaults to unix_user)
"""
self.raw['type'] = 'REGEXP_RULE'
self.raw['ruleFrom'] = regexp
self.raw['targetUnix'] = unix_user
self.raw['targetHadoop'] = hadoop_user
return self
class DSSCodeEnv(object):
"""
A code env on the DSS instance.
Do not create this directly, use :meth:`dataikuapi.DSSClient.get_code_env`
"""
def __init__(self, client, env_lang, env_name):
self.client = client
self.env_lang = env_lang
self.env_name = env_name
########################################################
# Env deletion
########################################################
def delete(self):
"""
Delete the code env
Note: this call requires an API key with admin rights
"""
resp = self.client._perform_json(
"DELETE", "/admin/code-envs/%s/%s" % (self.env_lang, self.env_name))
if resp is None:
raise Exception('Env deletion returned no data')
if resp.get('messages', {}).get('error', False):
raise Exception('Env deletion failed : %s' % (json.dumps(resp.get('messages', {}).get('messages', {}))))
return resp
########################################################
# Code env description
########################################################
def get_definition(self):
"""
Get the code env's definition
Note: this call requires an API key with admin rights
:returns: the code env definition, as a dict
"""
return self.client._perform_json(
"GET", "/admin/code-envs/%s/%s" % (self.env_lang, self.env_name))
def set_definition(self, env):
"""
Set the code env's definition. The definition should come from a call to :meth:`get_definition`
Fields that can be updated in design node:
* env.permissions, env.usableByAll, env.desc.owner
* env.specCondaEnvironment, env.specPackageList, env.externalCondaEnvName, env.desc.installCorePackages,
env.desc.installJupyterSupport, env.desc.yarnPythonBin
Fields that can be updated in automation node (where {version} is the updated version):
* env.permissions, env.usableByAll, env.owner
* env.{version}.specCondaEnvironment, env.{version}.specPackageList, env.{version}.externalCondaEnvName,
env.{version}.desc.installCorePackages, env.{version}.desc.installJupyterSupport, env.{version}.desc.yarnPythonBin
Note: this call requires an API key with admin rights
:param dict data: a code env definition
:return: the updated code env definition, as a dict
"""
return self.client._perform_json(
"PUT", "/admin/code-envs/%s/%s" % (self.env_lang, self.env_name), body=env)
def get_version_for_project(self, project_key):
"""
Resolve the code env version for a given project
Note: version will only be non-empty for versioned code envs actually used by the project
:returns: the code env reference, with a version field
"""
return self.client._perform_json(
"GET", "/admin/code-envs/%s/%s/%s/version" % (self.env_lang, self.env_name, project_key))
########################################################
# Code env actions
########################################################
def set_jupyter_support(self, active):
"""
Update the code env jupyter support
Note: this call requires an API key with admin rights
:param active: True to activate jupyter support, False to deactivate
"""
resp = self.client._perform_json(
"POST", "/admin/code-envs/%s/%s/jupyter" % (self.env_lang, self.env_name),
params = {'active':active})
if resp is None:
raise Exception('Env update returned no data')
if resp.get('messages', {}).get('error', False):
raise Exception('Env update failed : %s' % (json.dumps(resp.get('messages', {}).get('messages', {}))))
return resp
def update_packages(self, force_rebuild_env=False):
"""
Update the code env packages so that it matches its spec
Note: this call requires an API key with admin rights
"""
resp = self.client._perform_json(
"POST", "/admin/code-envs/%s/%s/packages" % (self.env_lang, self.env_name),
params={"forceRebuildEnv": force_rebuild_env})
if resp is None:
raise Exception('Env update returned no data')
if resp.get('messages', {}).get('error', False):
raise Exception('Env update failed : %s' % (json.dumps(resp.get('messages', {}).get('messages', {}))))
return resp
def update_images(self, env_version=None):
"""
Rebuild the docker image of the code env
Note: this call requires an API key with admin rights
"""
resp = self.client._perform_json(
"POST", "/admin/code-envs/%s/%s/images" % (self.env_lang, self.env_name),
params={"envVersion": env_version})
if resp is None:
raise Exception('Env image build returned no data')
if resp.get('messages', {}).get('error', False):
raise Exception('Env image build failed : %s' % (json.dumps(resp.get('messages', {}).get('messages', {}))))
return resp
def list_usages(self, env_version=None):
"""
List usages of the code env in the instance
:return: a list of objects where the code env is used
"""
return self.client._perform_json(
"GET", "/admin/code-envs/%s/%s/usages" % (self.env_lang, self.env_name), params={"envVersion": env_version})
def list_logs(self, env_version=None):
"""
List logs of the code env in the instance
:return: a list of log descriptions
"""
return self.client._perform_json(
"GET", "/admin/code-envs/%s/%s/logs" % (self.env_lang, self.env_name), params={"envVersion": env_version})
def get_log(self, log_name):
"""
Get the logs of the code env
Args:
log_name: name of the log to fetch
Returns:
the log, as a string
"""
return self.client._perform_text(
"GET", "/admin/code-envs/%s/%s/logs/%s" % (self.env_lang, self.env_name, log_name))
class DSSGlobalApiKey(object):
"""
A global API key on the DSS instance
"""
def __init__(self, client, key):
self.client = client
self.key = key
########################################################
# Key deletion
########################################################
def delete(self):
"""
Delete the api key
Note: this call requires an API key with admin rights
"""
return self.client._perform_empty(
"DELETE", "/admin/globalAPIKeys/%s" % self.key)
########################################################
# Key description
########################################################
def get_definition(self):
"""
Get the API key's definition
Note: this call requires an API key with admin rights
Returns:
the code env definition, as a JSON object
"""
return self.client._perform_json(
"GET", "/admin/globalAPIKeys/%s" % (self.key))
def set_definition(self, definition):
"""
Set the API key's definition.
Note: this call requires an API key with admin rights
Args:
definition: the definition for the API key, as a JSON object.
"""
return self.client._perform_empty(
"PUT", "/admin/globalAPIKeys/%s" % self.key,
body = definition)
class DSSCluster(object):
"""
A handle to interact with a cluster on the DSS instance
"""
def __init__(self, client, cluster_id):
"""Do not call that directly, use :meth:`dataikuapi.DSSClient.get_cluster`"""
self.client = client
self.cluster_id = cluster_id
########################################################
# Cluster deletion
########################################################
def delete(self):
"""
Deletes the cluster. This does not previously stop it.
"""
self.client._perform_empty(
"DELETE", "/admin/clusters/%s" % (self.cluster_id))
########################################################
# Cluster description
########################################################
def get_settings(self):
"""
Get the cluster's settings. This includes opaque data for the cluster if this is
a started managed cluster.
The returned object can be used to save settings.
:returns: a :class:`DSSClusterSettings` object to interact with cluster settings
:rtype: :class:`DSSClusterSettings`
"""
settings = self.client._perform_json(
"GET", "/admin/clusters/%s" % (self.cluster_id))
return DSSClusterSettings(self.client, self.cluster_id, settings)
def set_definition(self, cluster):
"""
Set the cluster's definition. The definition should come from a call to the get_definition()
method.
:param cluster: a cluster definition
Returns:
the updated cluster definition, as a JSON object
"""
return self.client._perform_json(
"PUT", "/admin/clusters/%s" % (self.cluster_id), body=cluster)
def get_status(self):
"""
Get the cluster's status and usage
:returns: The cluster status, as a :class:`DSSClusterStatus` object
:rtype: :class:`DSSClusterStatus`
"""
status = self.client._perform_json("GET", "/admin/clusters/%s/status" % (self.cluster_id))
return DSSClusterStatus(self.client, self.cluster_id, status)
########################################################
# Cluster actions
########################################################
def start(self):
"""
Starts or attaches the cluster.
This operation is only valid for a managed cluster.
"""
resp = self.client._perform_json(
"POST", "/admin/clusters/%s/actions/start" % (self.cluster_id))
if resp is None:
raise Exception('Cluster operation returned no data')
if resp.get('messages', {}).get('error', False):
raise Exception('Cluster operation failed : %s' % (json.dumps(resp.get('messages', {}).get('messages', {}))))
return resp
def stop(self, terminate=True):
"""
Stops or detaches the cluster
This operation is only valid for a managed cluster.
:param boolean terminate: whether to delete the cluster after stopping it
"""
resp = self.client._perform_json(
"POST", "/admin/clusters/%s/actions/stop" % (self.cluster_id),
params = {'terminate':terminate})
if resp is None:
raise Exception('Env update returned no data')
if resp.get('messages', {}).get('error', False):
raise Exception('Cluster operation failed : %s' % (json.dumps(resp.get('messages', {}).get('messages', {}))))
return resp
class DSSClusterSettings(object):
"""
The settings of a cluster
"""
def __init__(self, client, cluster_id, settings):
"""Do not call directly, use :meth:`DSSCluster.get_settings`"""
self.client = client
self.cluster_id = cluster_id
self.settings = settings
def get_raw(self):
"""
Gets all settings as a raw dictionary. This returns a reference to the raw settings, not a copy,
so changes made to the returned object will be reflected when saving.
Fields that can be updated:
- permissions, usableByAll, owner
- params
"""
return self.settings
def get_plugin_data(self):
"""
If this is a managed attached cluster, returns the opaque data returned by the cluster's start
operation. Else, returns None.
You should generally not modify this
"""
return self.settings.get("data", None)
def save(self):
"""Saves back the settings to the cluster"""
return self.client._perform_json(
"PUT", "/admin/clusters/%s" % (self.cluster_id), body=self.settings)
class DSSClusterStatus(object):
"""
The status of a cluster
"""
def __init__(self, client, cluster_id, status):
"""Do not call directly, use :meth:`DSSCluster.get_Status`"""
self.client = client
self.cluster_id = cluster_id
self.status = status
def get_raw(self):
"""
Gets the whole status as a raw dictionary.
"""
return self.status
| 2.96875
| 3
|
app/myCarApp/migrations/0007_auto_20200312_1505.py
|
irokas/myCarApp
| 0
|
12776598
|
# Generated by Django 2.1 on 2020-03-12 15:05
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('myCarApp', '0006_auto_20200312_1502'),
]
operations = [
migrations.RemoveField(
model_name='user',
name='groups',
),
migrations.RemoveField(
model_name='user',
name='user_permissions',
),
migrations.RenameField(
model_name='lesseeprofile',
old_name='User',
new_name='user',
),
migrations.RenameField(
model_name='lessorprofile',
old_name='User',
new_name='user',
),
migrations.DeleteModel(
name='User',
),
]
| 1.617188
| 2
|
computations/plot_teacher_action.py
|
matthiasgruber/supervisor
| 3
|
12776599
|
import numpy as np
from skopt.space import Space
from skopt.sampler import Grid
import matplotlib.pyplot as plt
import seaborn as sns
def plot_teacher_action():
space = Space([(-1., 1.), (-1., 1.)])
grid = Grid(border="include", use_full_layout=False)
action_manipulated = grid.generate(space.dimensions, 160)
action_manipulated = np.array(action_manipulated)
action_manipulated2 = \
np.append(action_manipulated[(action_manipulated[:, 0] < -0.3) * (action_manipulated[:, 1] < -0.3), :],
action_manipulated[(action_manipulated[:, 0] > 0.3) * (action_manipulated[:, 1] > 0.3), :],
axis=0)
action_manipulated2 = \
np.append(action_manipulated2,
action_manipulated[(action_manipulated[:, 0] > 0.3) * (action_manipulated[:, 1] < -0.3), :],
axis=0)
action_manipulated2 = \
np.append(action_manipulated2,
action_manipulated[(action_manipulated[:, 0] < -0.3) * (action_manipulated[:, 1] > 0.3), :],
axis=0)
action_manipulated = np.array([[-0.1, 0],
[0.1, 0],
[0, 0.1],
[0, -0.1],
[-0.25, 0],
[0.25, 0],
[0, 0.25],
[0, -0.25],
[-0.1, 0.1],
[0.1, 0.1],
[-0.1, -0.1],
[0.1, -0.1],
[-0.25, 0.25],
[0.25, 0.25],
[-0.25, -0.25],
[0.25, -0.25],
[0.1, 0.05],
[0.05, 0.1],
[0.05, -0.1],
[-0.25, 0.1],
[0.25, 0.8],
[0.6, 0.25],
[0.3, -0.25],
[-0.1, 0.7],
[0.9, 0.1],
[-0.1, -1],
[1, -0.1],
[-0.2, 0.75],
[0.5, 0.5],
[-0.5, -0.5],
[0.75, 0],
[0.15, 0.05],
[0.6, 0.1],
[0.4, -0.1],
[-0.25, 0.15],
[0.25, 0.9],
[-0.35, 0.25],
[0.5, -0.25],
[-0.19, 0.19],
[1, 1],
[-1, -1],
[0, 1],
[-1, 0],
[0.2, 0.75],
[-0.8, 0],
[0, -0.58]])
plt.figure()
sns.set_style("whitegrid", {'axes.grid': True, 'axes.edgecolor': 'black'})
sns.scatterplot(data=action_manipulated, x=action_manipulated[:, 0], y=action_manipulated[:, 1])
plt.xlabel('velocity x')
plt.ylabel('velocity y')
plt.ylim(bottom=-1.05, top=1.05)
plt.xlim(-1.05, 1.05)
plt.savefig("art/plots/teacher_action_random.png", dpi=100, transparent=True)
plt.show()
plt.figure()
sns.set_style("whitegrid", {'axes.grid': True, 'axes.edgecolor': 'black'})
sns.scatterplot(data=action_manipulated2, x=action_manipulated2[:, 0], y=action_manipulated2[:, 1])
plt.xlabel('velocity x')
plt.ylabel('velocity y')
plt.ylim(bottom=-1.05, top=1.05)
plt.xlim(-1.05, 1.05)
plt.savefig("art/plots/teacher_action_grid.png", dpi=100, transparent=True)
plt.show()
| 2.28125
| 2
|
src/adversaries/momentum_fgsm_transfer.py
|
googleinterns/out-of-distribution
| 0
|
12776600
|
import json
import os
import torch
from torch import nn
from root import from_root
from src.adversaries.adversary import Adversary, AdversaryOutput
from src.experiments.config import create_resnet
from src.misc.collection_object import DictObject
from src.misc.utils import model_device
class MomentumFgsmTransfer(Adversary):
"""
Implements the Momentum Iterative FGSM method for generating adversarial examples in the context of black-box
transfer-based attack, i.e. perturbations are generated on the surrogate model passed in the constructor.
"""
surrogate_model: nn.Module
epsilon: float
n_iters: int
decay_factor: float
def __init__(self, surrogate_cfg_filepath: str, epsilon: float, n_iters: int, decay_factor: float):
self.init_surrogate_model(surrogate_cfg_filepath)
self.epsilon = epsilon
self.n_iters = n_iters
self.decay_factor = decay_factor
def init_surrogate_model(self, surrogate_cfg_filepath: str) -> None:
with open(from_root(surrogate_cfg_filepath), "r") as file:
cfg = DictObject(json.load(file))
self.surrogate_model = create_resnet(cfg)
self.surrogate_model = self.surrogate_model.to(cfg.model.device)
best_epoch_filepath = os.path.join(from_root(cfg.out_dirpath), "checkpoints/best_epoch.txt")
with open(best_epoch_filepath, "r") as file:
epoch = int(file.read())
checkpoint_filepath = os.path.join(from_root(cfg.out_dirpath), f"checkpoints/checkpoint_{epoch}.pth")
checkpoint = torch.load(checkpoint_filepath, map_location=model_device(self.surrogate_model))
self.surrogate_model.load_state_dict(checkpoint["model_state_dict"])
self.surrogate_model.eval()
for param in self.surrogate_model.parameters():
param.requires_grad = False
def __call__(self, model: nn.Module, images: torch.Tensor, labels: torch.Tensor) -> AdversaryOutput:
step_size = self.epsilon / self.n_iters
velocity = torch.zeros_like(images)
result = images.clone()
result.requires_grad = True
for _ in range(self.n_iters):
if result.grad is not None:
result.grad.detach_()
result.grad.zero_()
loss = self.compute_objective(self.surrogate_model, result, labels, "mean")
loss.backward()
velocity = self.decay_factor * velocity + result.grad / torch.norm(result.grad, p=1)
with torch.no_grad():
result += step_size * torch.sign(velocity)
result.clamp_(0, 1)
result.requires_grad = False
return AdversaryOutput(result, result - images)
| 2.234375
| 2
|
core/BuildRelationModel.py
|
jakelever/VERSE
| 14
|
12776601
|
import sys
import fileinput
import argparse
import time
import itertools
import pickle
import random
import codecs
from collections import defaultdict
from sklearn import svm
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction import DictVectorizer
from scipy.sparse import coo_matrix, hstack, vstack
import numpy as np
import json
from ClassifierStuff import *
from SentenceModel import *
from CandidateBuilder import generateRelationCandidates,findTrigger
def createRelationClassifier(sentenceAndEventData,targetRelations,targetArguments,parameters=None,generateClassifier=True,sentenceRange=0,doFiltering=False):
classes,examples,relTypes = generateRelationCandidates(sentenceAndEventData,targetRelations,targetArguments,sentenceRange,doFiltering)
assert min(classes) == 0, "Expecting negative cases in relation examples"
assert max(classes) > 0, "Expecting positive cases in relation examples"
vectors,vectorizer,featureSelector = buildVectorizer(classes,examples,parameters)
classifier = None
if generateClassifier:
classifier = buildClassifierFromVectors(classes,vectors,parameters)
data = (classes,examples,vectors,relTypes)
return data,vectorizer,featureSelector,classifier
# It's the main bit. Yay!
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='VERSE Relation Extraction tool')
parser.add_argument('--trainingFile', required=True, type=str, help='Parsed-text file containing the training data')
parser.add_argument('--relationDescriptions', required=True, type=str, help='Description file containing list of relation types with arguments to predict')
parser.add_argument('--parameters', type=str, help='Parameters to use for feature construction, selection and classification')
parser.add_argument('--modelFile', type=str, help='Output filename for data with predicted modifications')
args = parser.parse_args()
parameters = {}
if args.parameters:
for arg in args.parameters.split(';'):
name,value = arg.strip().split(":")
parameters[name.strip()] = value.strip()
sentenceRange = 0
if "sentenceRange" in parameters:
sentenceRange = int(parameters["sentenceRange"])
trainFilename = args.trainingFile
with open(trainFilename, 'r') as f:
trainingSentenceAndEventData = pickle.load(f)
print "Loaded " + trainFilename
tmpTargetRelations = set()
for filename,data in trainingSentenceAndEventData.iteritems():
sentenceData = data[0]
relations = data[1]
for (relName,id1,id2) in relations:
sentenceid1,locs1 = findTrigger(sentenceData,id1)
sentenceid2,locs2 = findTrigger(sentenceData,id2)
type1 = sentenceData[sentenceid1].locsToTriggerTypes[tuple(locs1)]
type2 = sentenceData[sentenceid2].locsToTriggerTypes[tuple(locs2)]
tmpTargetRelations.add((relName,type1,type2))
print "#"*30
for relName,type1,type2 in tmpTargetRelations:
print "%s\t%s\t%s" % (relName,type1,type2)
print "#"*30
doFiltering = False
if 'doFiltering' in parameters and parameters['doFiltering'] == 'True':
doFiltering = True
#targetRelations = []
targetRelations,targetArguments = set(),set()
#typeLookup = {}
with open(args.relationDescriptions,'r') as f:
for line in f:
nameAndArgs,type1,type2 = line.strip().split('\t')
# Pull out the name of arguments and sort by the argument names
nameAndArgsSplit = nameAndArgs.split(';')
# Basically don't do anything if we aren't given the argument names
if len(nameAndArgsSplit) == 1:
targetRelations.add(tuple(nameAndArgsSplit))
targetArguments.add((type1,type2))
else: # Or do sort by argument names (if they are provided)
relName,argName1,argName2 = nameAndArgs.split(';')
relArgs = [(argName1,type1),(argName2,type2)]
relArgs = sorted(relArgs)
targetRelations.add((relName,relArgs[0][0],relArgs[1][0]))
targetArguments.add((relArgs[0][1],relArgs[1][1]))
targetRelations = list(targetRelations)
targetRelations = sorted(targetRelations)
targetRelationsToIDs = { arg:i+1 for i,arg in enumerate(targetRelations) }
print "-"*30
for targetRelation in targetRelations:
print targetRelation
print "-"*30
for targetArgument in targetArguments:
print targetArgument
print "-"*30
relData,argVec,argFS,argClf = createRelationClassifier(trainingSentenceAndEventData,targetRelationsToIDs,targetArguments,parameters,True,sentenceRange,doFiltering)
model = {}
model['parameters'] = parameters;
model['targetRelations'] = targetRelations;
model['targetRelationsToIDs'] = targetRelationsToIDs;
model['targetArguments'] = targetArguments;
model['argVec'] = argVec;
model['argFS'] = argFS;
model['argClf'] = argClf;
with open(args.modelFile,'w') as f:
pickle.dump(model,f)
| 2.390625
| 2
|
myclass/class_mysql_backup_to_zip_files.py
|
ysh329/mysql-backup
| 0
|
12776602
|
# -*- coding: utf-8 -*-
# !/usr/bin/python
################################### PART0 DESCRIPTION #################################
# Filename: class_mysql_backup_to_zip_files.py
# Description:
# Author: <NAME>
# E-mail: <EMAIL>
# Create: 2015-9-24 17:50:39
# Last:
__author__ = 'yuens'
################################### PART1 IMPORT ######################################
import MySQLdb
import os
################################ PART2 CLASS && FUNCTION ##############################
class get_back_database_name_list(object):
def __init__(self, mysql_user, mysql_passwd):
self.con = MySQLdb.connect(host = 'localhost',
user = mysql_user,#'root',
passwd = <PASSWORD>,#'<PASSWORD>',
#db = '',
charset = 'utf8')
def __del__(self):
self.con.close()
def get_all_database_name_list(self):
cursor = self.con.cursor()
cursor.execute("""SHOW DATABASES""")
database_name_2d_tuple = cursor.fetchall()
all_database_name_list = map(lambda database_name_tuple: database_name_tuple[0], database_name_2d_tuple)
return all_database_name_list
def filter_database_name_list(self, backup_database_name_list, all_database_name_list, default_existed_database_name_list):
if backup_database_name_list == []:
backup_database_name_list = list(set(all_database_name_list) - set(default_existed_database_name_list))
else:
backup_database_name_list = list(set(backup_database_name_list) - (set(backup_database_name_list) - set(all_database_name_list)))
return backup_database_name_list
def backup_backup_database_name_list(self, backup_database_name_list):
for database_name in iter(backup_database_name_list):
os.system("".format())
############################ PART3 CLASS && FUNCTION TEST #############################
default_existed_database_name_list = ['information_schema', 'performance_schema']
backup_database_name_list = []
Backup2ZipFile = get_back_database_name_list(mysql_user = 'root',
mysql_passwd = '<PASSWORD>')
all_database_name_list = Backup2ZipFile.get_all_database_name_list()
print "all_database_name_list:%s" % all_database_name_list
backup_database_name_list = Backup2ZipFile.filter_database_name_list(backup_database_name_list = backup_database_name_list,
all_database_name_list = all_database_name_list,
default_existed_database_name_list = default_existed_database_name_list)
print "backup_database_name_list:%s" % backup_database_name_list
| 2.84375
| 3
|
Exercicios/ex045.py
|
MateusBarboza99/Python-03-
| 0
|
12776603
|
from random import randint
from time import sleep
itens = ('Pedra', 'Papel','Tesoura')
computador = randint(0, 2)
print('''\033[1;31mSuas opções\033[m:
[ 0 ] PEDRA
[ 1 ] PAPEL
[ 2 ] TESOURA''')
jogador = int(input('\033[1;34mQual é a sua Jogada?\033[m '))
print('\033[1;30mJO\033[m')
sleep(1)
print('\033[1;34mKEN\033[m')
sleep(1)
print('\033[1;33mPO!!\033[m')
sleep(1)
print('\033[35m-=\033[m' * 11)
print('\033[1;32mComputador jogou\033[m ' ' \033[1;35m{}\033[m'.format(itens[computador]))
print('\033[1;36mJogador jogou\033[m ' ' \033[1;32m{}\033[m'. format(itens[jogador]))
print('\033[35m-=\033[m' * 11)
if computador == 0:# computador jogou PEDRA
if jogador == 0:
print('\033[1;37mEMPATE\033[m')
elif jogador == 1:
print('\033[1;43mJOGADOR VENCEU\033[m')
elif jogador == 2:
print('\033[1;31mCOMPUTADOR VENCEU\033[m')
else:
print('\033[4;33;40mJOGADA INVÁLIDA\033[m!')
elif computador == 1: # computador jogou PAPEL
if jogador == 0:
print('\033[1;31mCOMPUTADOR VENCEU\033[m')
elif jogador == 1:
print('\033[1;37mEMPATE\033[m')
elif jogador == 2:
print('\033[1;34mJOGADOR VENCEU\033[m')
else:
print('\033[4;33;;40mJOGADA INVÁLIDA\033[m!')
elif computador == 2: # computador jogou TESOURA
if jogador == 0:
print('\033[1;34mJOGADOR VENCEU\033[m')
elif jogador == 1:
print('\033[1;31mCOMPUTADOR VENCEU\033[m')
elif jogador == 2:
print('\033[1;37mEMPATE\033[m')
else:
print('\033[4;33;mJOGADA INVÁLIDA\033[m!')
| 3.5
| 4
|
setup.py
|
TylerTemp/docpie
| 18
|
12776604
|
# from distutils.core import setup
from setuptools import setup
import os
from docpie import __version__
setup(
name="docpie",
packages=["docpie"],
package_data={
'': [
'README.rst',
'LICENSE',
'CHANGELOG.md'
],
'docpie': [
'example/*.py',
'example/git/*.py'
],
},
version=__version__,
author="TylerTemp",
author_email="<EMAIL>",
url="http://docpie.comes.today/",
download_url="https://github.com/TylerTemp/docpie/tarball/%s/" % __version__,
license='MIT',
description=("An easy and Pythonic way to create "
"your POSIX command line interface"),
keywords='option arguments parsing optparse argparse getopt docopt',
long_description=open(
os.path.join(os.path.dirname(__file__), 'README.rst')).read(),
platforms='any',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'License :: OSI Approved :: MIT License',
'Topic :: Utilities',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: Implementation :: PyPy',
],
)
| 1.429688
| 1
|
venv/Lib/site-packages/PIL/_version.py
|
deerajnagothu/pyenf_extraction
| 6
|
12776605
|
# Master version for Pillow
__version__ = '5.3.0'
| 1.0625
| 1
|
custom_components/reef_pi/sensor.py
|
tdragon/reef-pi-hass-custom
| 3
|
12776606
|
<gh_stars>1-10
"""Platform for reef-pi sensor integration."""
from homeassistant.const import (
TEMP_CELSIUS,
TEMP_FAHRENHEIT,
DEGREE)
from homeassistant.components.sensor import SensorDeviceClass, SensorStateClass
from homeassistant.helpers.update_coordinator import CoordinatorEntity
from homeassistant.components.sensor import SensorEntity
from homeassistant.helpers.typing import StateType
from .const import _LOGGER, DOMAIN, MANUFACTURER
from datetime import datetime
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Add an temperature entity from a config_entry."""
coordinator = hass.data[DOMAIN][config_entry.entry_id]["coordinator"]
base_name = coordinator.info["name"] + ": "
sensors = [
ReefPiTemperature(id, base_name + tcs["name"], coordinator)
for id, tcs in coordinator.tcs.items()
]
ph_sensors = [
ReefPiPh(id, base_name + ph["name"], coordinator)
for id, ph in coordinator.ph.items()
]
pumps = [
ReefPiPump(id, base_name + "pump_" + id, coordinator)
for id in coordinator.pumps.keys()
]
atos = [
ReefPiATO(id, base_name + ato["name"] + " Last Run", False, coordinator)
for id, ato in coordinator.ato.items()
] + [
ReefPiATO(id, base_name + ato["name"] + " Duration", True, coordinator)
for id, ato in coordinator.ato.items()
]
_LOGGER.debug("sensor base name: %s, temperature: %d, pH: %d", base_name, len(sensors), len(ph_sensors))
async_add_entities(sensors)
async_add_entities(ph_sensors)
async_add_entities([ReefPiBaicInfo(coordinator)])
async_add_entities(pumps)
async_add_entities(atos)
class ReefPiBaicInfo(CoordinatorEntity, SensorEntity):
_attr_native_unit_of_measurement = TEMP_CELSIUS
def __init__(self, coordinator):
"""Initialize the sensor."""
super().__init__(coordinator)
self.api = coordinator
_attr_device_class = SensorDeviceClass.TEMPERATURE
_attr_state_class = SensorStateClass.MEASUREMENT
@property
def device_info(self):
info = {
'identifiers': {
(DOMAIN, self.coordinator.unique_id)
},
'default_name': self.api.default_name,
'default_manufacturer': MANUFACTURER,
"default_model" : "Reef PI",
"configuration_url": self.api.configuration_url
}
if self.api.info:
info['model'] = self.api.info["model"]
info['sw_version'] = self.api.info["version"]
info['name'] = self.name
return info
@property
def icon(self):
return "mdi:fishbowl-outline"
@property
def name(self):
"""Return the name of the sensor"""
if not self.api.info or not "name" in self.api.info:
return "ReefPiBaicInfo"
return self.api.info["name"]
@property
def unique_id(self):
"""Return a unique_id for this entity."""
return f"{self.coordinator.unique_id}_info"
@property
def available(self):
"""Return if teperature"""
return self.api.info and "name" in self.api.info
@property
def native_value(self) -> StateType:
"""Return the state of the sensor."""
return self.api.info["cpu_temperature"]
@property
def extra_state_attributes(self):
if self.api.info:
return self.api.info
return {}
class ReefPiTemperature(CoordinatorEntity, SensorEntity):
def __init__(self, id, name, coordinator):
"""Initialize the sensor."""
super().__init__(coordinator)
self._id = id
self._name = name
self.api = coordinator
_attr_device_class = SensorDeviceClass.TEMPERATURE
_attr_state_class = SensorStateClass.MEASUREMENT
@property
def device_info(self):
return {
'identifiers': {
(DOMAIN, self.coordinator.unique_id)
}}
@property
def name(self):
"""Return the name of the sensor"""
return self._name
@property
def unique_id(self):
"""Return a unique_id for this entity."""
return f"{self.coordinator.unique_id}_tcs_{self._id}"
@property
def native_unit_of_measurement(self):
"""Return the unit of measurement."""
if self.available and self.api.tcs[self._id]["fahrenheit"]:
return TEMP_FAHRENHEIT
return TEMP_CELSIUS
@property
def available(self):
"""Return if available"""
return self._id in self.api.tcs.keys()
@property
def native_value(self) -> StateType:
"""Return the state of the sensor."""
return self.api.tcs[self._id]["temperature"]
@property
def extra_state_attributes(self):
return self.api.tcs[self._id]["attributes"]
class ReefPiPh(CoordinatorEntity, SensorEntity):
def __init__(self, id, name, coordinator):
"""Initialize the sensor."""
super().__init__(coordinator)
self._id = id
self._name = name
self.api = coordinator
@property
def device_info(self):
return {
'identifiers': {
(DOMAIN, self.coordinator.unique_id)
}}
@property
def icon(self):
return "mdi:ph"
@property
def name(self):
"""Return the name of the sensor"""
return self._name
@property
def unique_id(self):
"""Return a unique_id for this entity."""
return f"{self.coordinator.unique_id}_ph_{self._id}"
@property
def unit_of_measurement(self):
"""Return the unit of measurement."""
return DEGREE
@property
def available(self):
"""Return if available"""
return self._id in self.api.ph.keys() and self.api.ph[self._id]["value"]
@property
def state(self):
"""Return the state of the sensor."""
return self.api.ph[self._id]["value"]
@property
def extra_state_attributes(self):
return self.api.ph[self._id]["attributes"]
class ReefPiPump(CoordinatorEntity, SensorEntity):
def __init__(self, id, name, coordinator):
"""Initialize the sensor."""
super().__init__(coordinator)
self._id = id
self._name = name
self.api = coordinator
_attr_device_class = SensorDeviceClass.TIMESTAMP
@property
def device_info(self):
return {
'identifiers': {
(DOMAIN, self.coordinator.unique_id)
}}
@property
def name(self):
"""Return the name of the sensor"""
return self._name
@property
def unique_id(self):
"""Return a unique_id for this entity."""
return f"{self.coordinator.unique_id}_pump_{self._id}"
@property
def available(self):
"""Return if available"""
return self._id in self.api.pumps.keys() and self.api.pumps[self._id]["time"] != datetime.fromtimestamp(0)
@property
def state(self):
"""Return the state of the sensor."""
return self.api.pumps[self._id]["time"].isoformat()
@property
def extra_state_attributes(self):
return self.api.pumps[self._id]["attributes"]
class ReefPiATO(CoordinatorEntity, SensorEntity):
def __init__(self, id, name, show_pump, coordinator):
"""Initialize the sensor."""
super().__init__(coordinator)
self._id = id
self._name = name
self._show_pump = show_pump
self.api = coordinator
@property
def device_class(self):
if not self._show_pump:
return SensorDeviceClass.TIMESTAMP
return None
@property
def device_info(self):
return {
'identifiers': {
(DOMAIN, self.coordinator.unique_id)
}}
@property
def name(self):
"""Return the name of the sensor"""
return self._name
@property
def unique_id(self):
"""Return a unique_id for this entity."""
if self._show_pump:
return f"{self.coordinator.unique_id}_ato_{self._id}_duration"
else:
return f"{self.coordinator.unique_id}_ato_{self._id}_last_run"
@property
def available(self):
"""Return if available"""
return self._id in self.api.ato_states.keys() and self.api.ato_states[self._id]["ts"] != datetime.fromtimestamp(0)
@property
def state(self):
"""Return the state of the sensor."""
if self._show_pump:
return self.api.ato_states[self._id]["pump"]
else:
return self.api.ato_states[self._id]["ts"].isoformat()
@property
def extra_state_attributes(self):
return self.api.ato_states[self._id]
| 2.03125
| 2
|
generated-libraries/python/netapp/sis/sis_chkpoint_op_type.py
|
radekg/netapp-ontap-lib-get
| 2
|
12776607
|
<filename>generated-libraries/python/netapp/sis/sis_chkpoint_op_type.py
class SisChkpointOpType(basestring):
"""
Checkpoint type
Possible values:
<ul>
<li> "scan" - Scanning volume for fingerprints,
<li> "start" - Starting a storage efficiency
operation,
<li> "check" - Checking for stale data in the
fingerprint database,
<li> "undo" - Undoing storage efficiency on the
volume,
<li> "downgrade" - Storage efficiency operations necessary
for downgrade activity,
<li> "post_transfer" - Starting a storage efficiency operation
post mirror transfer
</ul>
"""
@staticmethod
def get_api_name():
return "sis-chkpoint-op-type"
| 1.898438
| 2
|
octotribble/Convolution/Test_convolution_between_different_resolutions.py
|
jason-neal/equanimous-octo-tribble
| 1
|
12776608
|
# Test convolving to different resolutions
# Test the effect of convolving straight to 20000 and convolving first to an intermediate resolution say 80000.
import matplotlib.pyplot as plt
import numpy as np
from IP_multi_Convolution import ip_convolution, unitary_Gauss
def main():
# fwhm = lambda/R
fwhm = 2046 / 100000
# Starting spectrum
wav = np.linspace(2040, 2050, 20000)
flux = (np.ones_like(wav) - unitary_Gauss(wav, 2045, fwhm) -
unitary_Gauss(wav, 2047, fwhm))
# range in which to have the convoled values. Be careful of the edges!
chip_limits = [2042, 2049]
# Convolution to 80k
R = 80000
wav_80k, flux_80k = ip_convolution(wav, flux, chip_limits, R,
fwhm_lim=5.0, plot=False, verbose=True)
# Convolution to 50k
R = 50000
wav_50k, flux_50k = ip_convolution(wav, flux, chip_limits, R,
fwhm_lim=5.0, plot=False, verbose=True)
wav_80k_50k, flux_80k_50k = ip_convolution(wav_80k, flux_80k, chip_limits, R,
fwhm_lim=5.0, plot=False, verbose=True)
# Convolution to 20k
R = 20000
wav_80k_20k, flux_80k_20k = ip_convolution(wav_80k, flux_80k, chip_limits, R,
fwhm_lim=5.0, plot=False, verbose=True)
wav_50k_20k, flux_50k_20k = ip_convolution(wav_50k, flux_50k, chip_limits, R,
fwhm_lim=5.0, plot=False, verbose=True)
wav_80k_50k_20k, flux_80k_50k_20k = ip_convolution(wav_80k_50k, flux_80k_50k,
chip_limits, R, fwhm_lim=5.0,
plot=False, verbose=True)
# Convolution straight to 20000
wav_20k, flux_20k = ip_convolution(wav, flux, chip_limits, R, fwhm_lim=5.0,
plot=False, verbose=True)
# Plot the results
plt.figure(1)
plt.xlabel(r"wavelength [nm])")
plt.ylabel(r"flux [counts] ")
plt.plot(wav, flux / np.max(flux), color='k',
linestyle="-", label="Original spectra")
plt.plot(wav_80k, flux_80k / np.max(flux_80k), color='r', linestyle="-.", label="R=80k-20k")
plt.plot(wav_50k, flux_50k / np.max(flux_50k), color='b', linestyle="--", label="R=50k")
plt.plot(wav_80k_20k, flux_80k_20k / np.max(flux_80k_20k), color='r',
linestyle="-", label="R=80k-20k")
plt.plot(wav_50k_20k, flux_50k_20k / np.max(flux_50k_20k), color='b',
linestyle="-", label="R=50k20k")
plt.plot(wav_80k_50k_20k, flux_80k_50k_20k / np.max(flux_80k_50k_20k), color='m',
linestyle="-", label="R=80k-50k-20k")
plt.plot(wav_20k, flux_20k / np.max(flux_20k), color='c', linestyle="-", label="R=20k")
plt.legend(loc='best')
plt.title(r"Convolution by different Instrument Profiles")
plt.show()
if __name__ == "__main__":
# The IPcovolution fails if it is not run inside __name__ == "__main__"
main()
| 2.71875
| 3
|
keras/train.py
|
TheWh1teRose/AI-Robot-for-industrial-automation
| 0
|
12776609
|
<reponame>TheWh1teRose/AI-Robot-for-industrial-automation
import numpy as np
import tensorflow as tf
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import normalize
import glob
import CNN_utils as cnn
import pickle
import time
import datetime
import functools
import VGG_modells
import keras
import os.path
import gc
path = 'F:/Dokumente/Programmieren/RoboPen/UnitySimulation/AIRobot_Simulation/DataProcessing/traindata/pre/data_30x30/diff3/*.data'
file = glob.glob(path)
data = None
print(file)
for f in file:
if data is None:
print("loaded: " + f)
data = pickle.load(open(f, "rb"))
gc.collect()
X = data[0]
y = data[1]
else:
data = pickle.load(open(f, "rb"))
gc.collect()
print("loaded: " + f)
X = np.concatenate((X, data[0]))
y = np.concatenate((y, data[1]))
x_min = X.min(axis=(1,2,3,4), keepdims=True)
x_max = X.max(axis=(1,2,3,4), keepdims=True)
X = (X - x_min)/(x_max - x_min)
#X = X[:,:,:,:3]
print(X.shape)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, shuffle=True)
image_width = 30
image_height = 30
image_depth = 3
num_lable = 8
batch_size = 265
num_epochs = 5000
def accuracy(predictions, labels):
return (100.0 * np.sum(np.argmax(predictions, 1) == np.argmax(labels, 1)) / predictions.shape[0])
config = tf.ConfigProto( device_count = {'GPU': 1, 'CPU': 56} )
sess = tf.Session(config=config)
keras.backend.set_session(sess)
checkpointer = keras.callbacks.ModelCheckpoint(filepath=os.path.join('ckpts', 'LRCN' + '_{epoch:02d}_{val_loss:.2f}.hdf5'), verbose=1, save_best_only=True)
tb = keras.callbacks.TensorBoard(log_dir=os.path.join('statistics', 'LRCN'))
early_stopper = keras.callbacks.EarlyStopping(patience=7)
timestamp = time.time()
csv_logger = keras.callbacks.CSVLogger(os.path.join('logs', 'LRCN' + '-' + 'training-' + str(timestamp) + '.log'))
modell = VGG_modells.VGG_A(0.2, 7)
optimizer = keras.optimizers.Adam(lr=1e-4, decay=1e-5)
metrics = ['accuracy']
modell.compile(loss='categorical_crossentropy', optimizer=optimizer, metrics=metrics)
modell.fit(X_train,
y_train,
batch_size=batch_size,
validation_data=(X_test, y_test),
verbose=1,
callbacks=[tb, csv_logger, checkpointer],
epochs=num_epochs, shuffle=True)
| 2.5
| 2
|
api/v1/views/index.py
|
ricarhincapie/Torre_Dev
| 0
|
12776610
|
#!/usr/bin/env python3
""" Module to define API routes
"""
from api.v1.views import app_views
from flask import jsonify, request, abort, make_response
from engine.score_engine import score_engine
import requests
@app_views.route('/status', methods=['GET'], strict_slashes=False)
def status():
""" Status of API """
return jsonify({"status": "OK"})
@app_views.route('/<user_id>', methods=['GET'], strict_slashes=False)
def user_fetch(user_id):
""" Takes Torre username, fetches API and returns points """
req_string = "https://bio.torre.co/api/bios/" + str(user_id)
response = requests.get(req_string)
if response.status_code > 399:
return jsonify({"status": "error"})
my_response = response.json()
my_dict = my_response.get('stats')
my_user = my_response.get('person').get('name')
my_headline = my_response.get('person').get('professionalHeadline')
result = score_engine(**my_dict)
result['name'] = my_user
result['headline'] = my_headline
return jsonify(result)
| 2.953125
| 3
|
tests/models/test_trading_account.py
|
ppawel/tastyworks_api
| 6
|
12776611
|
<filename>tests/models/test_trading_account.py
import datetime
import unittest
from decimal import Decimal
from tastyworks.models import option, order, underlying, trading_account
class TestTradingAccount(unittest.TestCase):
def setUp(self):
self.order_details = order.OrderDetails(
type=order.OrderType.LIMIT,
price=Decimal(400),
price_effect=order.OrderPriceEffect.CREDIT,
)
self.order_details.legs = [
option.Option(
ticker='AKS',
expiry=datetime.date(2018, 8, 31),
strike=Decimal('3.5'),
option_type=option.OptionType.CALL,
underlying_type=underlying.UnderlyingType.EQUITY,
quantity=1
)
]
self.test_order = order.Order(self.order_details)
def test_get_execute_order_json(self):
res = trading_account._get_execute_order_json(self.test_order)
expected_result = {
'source': 'WBT',
'order-type': 'Limit',
'price': '400.00',
'price-effect': 'Credit',
'time-in-force': 'Day',
'legs': [
{
'instrument-type': 'Equity Option',
'symbol': 'AKS 180831C00003500',
'quantity': 1,
'action': 'Sell to Open'
}
]
}
self.assertDictEqual(res, expected_result)
| 2.484375
| 2
|
lib/models/RefineNet.py
|
TheRevanchist/DeepWatershedDetection
| 0
|
12776612
|
<gh_stars>0
import tensorflow as tf
from tensorflow.contrib import slim
import models.resnet_v1 as resnet_v1
import os, sys
def Upsampling(inputs,scale):
return tf.image.resize_bilinear(inputs, size=[tf.shape(inputs)[1]*scale, tf.shape(inputs)[2]*scale])
def ConvBlock(inputs, n_filters, kernel_size=[3, 3]):
"""
Basic conv block for Encoder-Decoder
Apply successivly Convolution, BatchNormalization, ReLU nonlinearity
"""
net = slim.conv2d(inputs, n_filters, kernel_size, activation_fn=None, normalizer_fn=None)
net = tf.nn.relu(slim.batch_norm(net))
return net
def ConvUpscaleBlock(inputs, n_filters, kernel_size=[3, 3], scale=2):
"""
Basic conv transpose block for Encoder-Decoder upsampling
Apply successivly Transposed Convolution, BatchNormalization, ReLU nonlinearity
"""
net = slim.conv2d_transpose(inputs, n_filters, kernel_size=[3, 3], stride=[2, 2], activation_fn=None)
net = tf.nn.relu(slim.batch_norm(net))
return net
def ResidualConvUnit(inputs,n_filters=256,kernel_size=3):
"""
A local residual unit designed to fine-tune the pretrained ResNet weights
Arguments:
inputs: The input tensor
n_filters: Number of output feature maps for each conv
kernel_size: Size of convolution kernel
Returns:
Output of local residual block
"""
net=tf.nn.relu(inputs)
net=slim.conv2d(net, n_filters, kernel_size, activation_fn=None)
net=tf.nn.relu(net)
net=slim.conv2d(net,n_filters,kernel_size, activation_fn=None)
net=tf.add(net,inputs)
return net
def ChainedResidualPooling(inputs,n_filters=256):
"""
Chained residual pooling aims to capture background
context from a large image region. This component is
built as a chain of 2 pooling blocks, each consisting
of one max-pooling layer and one convolution layer. One pooling
block takes the output of the previous pooling block as
input. The output feature maps of all pooling blocks are
fused together with the input feature map through summation
of residual connections.
Arguments:
inputs: The input tensor
n_filters: Number of output feature maps for each conv
Returns:
Double-pooled feature maps
"""
net_relu=tf.nn.relu(inputs)
net=slim.max_pool2d(net_relu, [5, 5],stride=1,padding='SAME')
net=slim.conv2d(net,n_filters,3, activation_fn=None)
net_sum_1=tf.add(net,net_relu)
net = slim.max_pool2d(net_relu, [5, 5], stride=1, padding='SAME')
net = slim.conv2d(net, n_filters, 3, activation_fn=None)
net_sum_2=tf.add(net,net_sum_1)
return net_sum_2
def MultiResolutionFusion(high_inputs=None,low_inputs=None,n_filters=256):
"""
Fuse together all path inputs. This block first applies convolutions
for input adaptation, which generate feature maps of the same feature dimension
(the smallest one among the inputs), and then up-samples all (smaller) feature maps to
the largest resolution of the inputs. Finally, all features maps are fused by summation.
Arguments:
high_inputs: The input tensors that have the higher resolution
low_inputs: The input tensors that have the lower resolution
n_filters: Number of output feature maps for each conv
Returns:
Fused feature maps at higher resolution
"""
if high_inputs is None:#refineNet block 4
rcu_low_1 = low_inputs[0]
rcu_low_2 = low_inputs[1]
rcu_low_1 = slim.conv2d(rcu_low_1, n_filters, 3, activation_fn=None)
rcu_low_2 = slim.conv2d(rcu_low_2, n_filters, 3, activation_fn=None)
return tf.add(rcu_low_1,rcu_low_2)
else:
rcu_low_1 = low_inputs[0]
rcu_low_2 = low_inputs[1]
rcu_low_1 = slim.conv2d(rcu_low_1, n_filters, 3, activation_fn=None)
rcu_low_2 = slim.conv2d(rcu_low_2, n_filters, 3, activation_fn=None)
rcu_low = tf.add(rcu_low_1,rcu_low_2)
rcu_high_1 = high_inputs[0]
rcu_high_2 = high_inputs[1]
rcu_high_1 = Upsampling(slim.conv2d(rcu_high_1, n_filters, 3, activation_fn=None),2)
rcu_high_2 = Upsampling(slim.conv2d(rcu_high_2, n_filters, 3, activation_fn=None),2)
rcu_high = tf.add(rcu_high_1,rcu_high_2)
return tf.add(rcu_low, rcu_high)
def RefineBlock(high_inputs=None,low_inputs=None):
"""
A RefineNet Block which combines together the ResidualConvUnits,
fuses the feature maps using MultiResolutionFusion, and then gets
large-scale context with the ResidualConvUnit.
Arguments:
high_inputs: The input tensors that have the higher resolution
low_inputs: The input tensors that have the lower resolution
Returns:
RefineNet block for a single path i.e one resolution
"""
if high_inputs is None: # block 4
rcu_low_1= ResidualConvUnit(low_inputs, n_filters=256)
rcu_low_2 = ResidualConvUnit(low_inputs, n_filters=256)
rcu_low = [rcu_low_1, rcu_low_2]
fuse = MultiResolutionFusion(high_inputs=None, low_inputs=rcu_low, n_filters=256)
fuse_pooling = ChainedResidualPooling(fuse, n_filters=256)
output = ResidualConvUnit(fuse_pooling, n_filters=256)
return output
else:
rcu_low_1 = ResidualConvUnit(low_inputs, n_filters=256)
rcu_low_2 = ResidualConvUnit(low_inputs, n_filters=256)
rcu_low = [rcu_low_1, rcu_low_2]
rcu_high_1 = ResidualConvUnit(high_inputs, n_filters=256)
rcu_high_2 = ResidualConvUnit(high_inputs, n_filters=256)
rcu_high = [rcu_high_1, rcu_high_2]
fuse = MultiResolutionFusion(rcu_high, rcu_low,n_filters=256)
fuse_pooling = ChainedResidualPooling(fuse, n_filters=256)
output = ResidualConvUnit(fuse_pooling, n_filters=256)
return output
def build_refinenet(inputs, num_classes= None, preset_model='RefineNet-Res101', weight_decay=1e-5, is_training=True, upscaling_method="bilinear", pretrained_dir="models",substract_mean = True,
individual_upsamp="False"):
"""
Builds the RefineNet model.
Arguments:
inputs: The input tensor
preset_model: Which model you want to use. Select which ResNet model to use for feature extraction
num_classes: Number of classes
Returns:
RefineNet model
"""
if substract_mean:
inputs = mean_image_subtraction(inputs)
if preset_model == 'RefineNet-Res50':
with slim.arg_scope(resnet_v1.resnet_arg_scope(weight_decay=weight_decay)):
logits, end_points = resnet_v1.resnet_v1_50(inputs, is_training=is_training, scope='resnet_v1_50')
# RefineNet requires pre-trained ResNet weights
init_fn = slim.assign_from_checkpoint_fn(os.path.join(pretrained_dir, 'resnet_v1_50.ckpt'), slim.get_model_variables('resnet_v1_50'))
elif preset_model == 'RefineNet-Res101':
with slim.arg_scope(resnet_v1.resnet_arg_scope(weight_decay=weight_decay)):
logits, end_points = resnet_v1.resnet_v1_101(inputs, is_training=is_training, scope='resnet_v1_101')
# RefineNet requires pre-trained ResNet weights
init_fn = slim.assign_from_checkpoint_fn(os.path.join(pretrained_dir, 'resnet_v1_101.ckpt'), slim.get_model_variables('resnet_v1_101'))
elif preset_model == 'RefineNet-Res152':
with slim.arg_scope(resnet_v1.resnet_arg_scope(weight_decay=weight_decay)):
logits, end_points = resnet_v1.resnet_v1_152(inputs, is_training=is_training, scope='resnet_v1_152')
# RefineNet requires pre-trained ResNet weights
init_fn = slim.assign_from_checkpoint_fn(os.path.join(pretrained_dir, 'resnet_v1_152.ckpt'), slim.get_model_variables('resnet_v1_152'))
else:
raise ValueError("Unsupported ResNet model '%s'. This function only supports ResNet 101 and ResNet 152" % (preset_model))
net_name = list(end_points.keys())[0].split("/")[0]
if individual_upsamp == "True":
f = [end_points['pool5'], end_points['pool4'],
end_points['pool3'], end_points['pool2']]
us_stages = ["energy", "classes", "bbox"]
g_list = list()
for stage in us_stages:
g = [None, None, None, None]
h = [None, None, None, None]
for i in range(4):
h[i] = slim.conv2d(f[i], 256, 1)
g[0] = RefineBlock(high_inputs=None, low_inputs=h[0])
g[1] = RefineBlock(g[0], h[1])
g[2] = RefineBlock(g[1], h[2])
g[3] = RefineBlock(g[2], h[3])
g[3] = Upsampling(g[3], scale=4)
g_list.append(g)
return g_list, init_fn
else:
f = [end_points['pool5'], end_points['pool4'],
end_points['pool3'], end_points['pool2']]
g = [None, None, None, None]
h = [None, None, None, None]
for i in range(4):
h[i] = slim.conv2d(f[i], 256, 1)
g[0] = RefineBlock(high_inputs=None, low_inputs=h[0])
g[1] = RefineBlock(g[0], h[1])
g[2] = RefineBlock(g[1], h[2])
g[3] = RefineBlock(g[2], h[3])
g[3] = Upsampling(g[3], scale=4)
# if upscaling_method.lower() == "conv":
# net = ConvUpscaleBlock(net, 256, kernel_size=[3, 3], scale=2)
# net = ConvBlock(net, 256)
# net = ConvUpscaleBlock(net, 128, kernel_size=[3, 3], scale=2)
# net = ConvBlock(net, 128)
# net = ConvUpscaleBlock(net, 64, kernel_size=[3, 3], scale=2)
# net = ConvBlock(net, 64)
# elif upscaling_method.lower() == "bilinear":
# net = Upsampling(net, label_size)
if num_classes is not None:
net = slim.conv2d(g[3], num_classes, [1, 1], activation_fn=None, scope='logits')
return net, init_fn
else:
return g, init_fn
def mean_image_subtraction(inputs, means=[123.68, 116.78, 103.94]):
inputs=tf.to_float(inputs)
num_channels = inputs.get_shape().as_list()[-1]
if len(means) != num_channels:
raise ValueError('len(means) must match the number of channels')
channels = tf.split(axis=3, num_or_size_splits=num_channels, value=inputs)
for i in range(num_channels):
channels[i] -= means[i]
return tf.concat(axis=3, values=channels)
| 2.765625
| 3
|
labelme_tools/create_text_sample.py
|
dikers/ocr-train-data-generator
| 0
|
12776613
|
import random
import time
random.seed(time.time())
def create_zero(count):
char_list = '0Oo***.\、.----。、~!O@0o/L#$%0/LOg/Lo^./L**&00.00*()0。g/L、、--/L---+|/0Oo[]#%$¥0~-/L--!/L@#oo*~~~¥0O%&*OO。[]0Oog/L'
lines = ''
for i in range(count):
# print("{} random {}".format(i, random.randint(3, 10)))
line_length = random.randint(3, 10)
line = ''
for j in range(line_length):
start = random.randint(0, len(char_list) -2)
line += char_list[start: start+2]
#print(line)
lines += (line +'\n')
return lines
def create_char(count):
start_char = '#%!~*-^*/+#%*、,。.*.。*'
char_list = 'ABCDEFGHIJKLMNOPQRSTUVWXYZA'
lines = ''
for i in range(count):
line_length = random.randint(3, 8)
line = start_char[random.randint(0, len(start_char)-1)]
for j in range(line_length):
line += char_list[random.randint(0, len(char_list)-1)]
#print(line)
lines += (line +'\n')
return lines
def create_method_1(count):
char_split = ['--', '~', '--', '%', '/L', 'g/L', 'mg/L', 'L/L', '^', '=>', '<=', '*', '、', '。']
lines = ''
for i in range(count):
a = random.randint(10, 100000) / 1000
b = random.randint(10, 100000) / 1000
lines += "{}{}{}\n".format(a, char_split[random.randint(0, len(char_split)-1)], b)
return lines
def create_number_1(count):
char_list = '.。,壹贰叁肆伍陆柒捌玖拾佰仟.。,一二三四五六七八九十元百千万亿.。/,1234567890.。,、**%~##'
lines = ''
for i in range(count):
line_length = random.randint(3, 8)
line = ''
for j in range(line_length):
line += char_list[random.randint(0, len(char_list)-1)]
#print(line)
lines += (line +'\n')
return lines
def create_number_2(count):
char_list = '+-*/%¥¥¥$$$***... 。。。、、、~~~***--%%%***、~~=@#'
lines = ''
for i in range(count):
line = '{}{}{}'.format(random.randint(0,100000)/1000.0,
char_list[random.randint(0, len(char_list)-1)],
random.randint(0,100000)/1000.0)
lines += (line +'\n')
return lines
if __name__ == "__main__":
labels_file = '../output/spec_chars_02.txt'
total_lines = ''
#total_lines += create_number_2(200)
total_lines += create_zero(3000)
#total_lines += create_char(200)
total_lines += create_method_1(2000)
# print(total_lines)
lines = total_lines.split('\n')
print("length : {} ".format(len(lines)))
line_list = []
for line in lines:
if len(line) < 1:
continue
line_list.append(line)
line_list = list(set(line_list))
random.shuffle(line_list)
lines = '\n'.join(line_list)
#print(lines)
with open(labels_file, "w") as f:
f.write(lines)
print('【输出】生成文件 输出路径{}, 对象个数 {}.'.format(labels_file, len(line_list)))
| 3.453125
| 3
|
ospath/ospath_abspath.py
|
dineshkumar2509/learning-python
| 86
|
12776614
|
#!/usr/bin/env python
# encoding: utf-8
"""Compute an absolute path from a relative path.
"""
import os.path
for path in ['.', '..', './one/two/three', '../one/two/three']:
print '"%s" : "%s"' % (path, os.path.abspath(path))
| 2.796875
| 3
|
ch10/recipe2/recognize_action_tfhub.py
|
ArjunVarma39/Tensorflow-2.0-Computer-Vision-Cookbook
| 1
|
12776615
|
import os
import random
import re
import ssl
import tempfile
from urllib import request
import cv2
import imageio
import numpy as np
import tensorflow as tf
import tensorflow_hub as tfhub
UCF_ROOT = 'https://www.crcv.ucf.edu/THUMOS14/UCF101/UCF101/'
KINETICS_URL = ('https://raw.githubusercontent.com/deepmind/'
'kinetics-i3d/master/data/label_map.txt')
CACHE_DIR = tempfile.mkdtemp()
UNVERIFIED_CONTEXT = ssl._create_unverified_context()
def fetch_ucf_videos():
index = \
(request
.urlopen(UCF_ROOT, context=UNVERIFIED_CONTEXT)
.read()
.decode('utf-8'))
videos = re.findall('(v_[\w]+\.avi)', index)
return sorted(set(videos))
def fetch_kinetics_labels():
with request.urlopen(KINETICS_URL) as f:
labels = [line.decode('utf-8').strip()
for line in f.readlines()]
return labels
def fetch_random_video(videos_list):
video_name = random.choice(videos_list)
cache_path = os.path.join(CACHE_DIR, video_name)
if not os.path.exists(cache_path):
url = request.urljoin(UCF_ROOT, video_name)
response = (request
.urlopen(url,
context=UNVERIFIED_CONTEXT)
.read())
with open(cache_path, 'wb') as f:
f.write(response)
return cache_path
def crop_center(frame):
height, width = frame.shape[:2]
smallest_dimension = min(width, height)
x_start = (width // 2) - (smallest_dimension // 2)
x_end = x_start + smallest_dimension
y_start = (height // 2) - (smallest_dimension // 2)
y_end = y_start + smallest_dimension
roi = frame[y_start:y_end, x_start:x_end]
return roi
def read_video(path, max_frames=32, resize=(224, 224)):
capture = cv2.VideoCapture(path)
frames = []
while len(frames) <= max_frames:
frame_read, frame = capture.read()
if not frame_read:
break
frame = crop_center(frame)
frame = cv2.resize(frame, resize)
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
frames.append(frame)
capture.release()
frames = np.array(frames)
return frames / 255.
def predict(model, labels, sample_video):
model_input = tf.constant(sample_video,
dtype=tf.float32)
model_input = model_input[tf.newaxis, ...]
logits = model(model_input)['default'][0]
probabilities = tf.nn.softmax(logits)
print('Top 5 actions:')
for i in np.argsort(probabilities)[::-1][:5]:
print(f'{labels[i]}: {probabilities[i] * 100:5.2f}%')
def save_as_gif(images, video_name):
converted_images = np.clip(images * 255, 0, 255)
converted_images = converted_images.astype(np.uint8)
imageio.mimsave(f'./{video_name}.gif',
converted_images,
fps=25)
VIDEO_LIST = fetch_ucf_videos()
LABELS = fetch_kinetics_labels()
video_path = fetch_random_video(VIDEO_LIST)
sample_video = read_video(video_path)
model_path = 'https://tfhub.dev/deepmind/i3d-kinetics-400/1'
model = tfhub.load(model_path)
model = model.signatures['default']
predict(model, LABELS, sample_video)
video_name = video_path.rsplit('/', maxsplit=1)[1][:-4]
save_as_gif(sample_video, video_name)
| 2.390625
| 2
|
cracking-the-code-interview/trees/02-minimal-tree.py
|
vtemian/interviews-prep
| 8
|
12776616
|
from typing import List
class BST:
def __init__(self, val: int, left: 'BST', right: 'BST'):
self.val = val
self.left = left
self.right = right
def __str__(self) -> str:
if not self.val:
return ""
return " {} {} {} ".format(self.left, self.val, self.right)
def _height(node: BST):
if not node:
return 0
return 1 + max(_height(node.left), _height(node.right))
def build(nodes: List[int]) -> BST:
def _build(start: int, end: int):
if start > end:
return
mid = (start + end) // 2
return BST(nodes[mid], _build(start, mid - 1), _build(mid + 1, end))
return _build(0, len(nodes) - 1)
for use_case, expected_result in [
[[1, 2, 3, 4, 5, 6, 7, 8], 4],
[[1], 1],
[[1, 2, 3], 2],
[[1, 2, 3, 5], 3],
]:
bst = build(use_case)
assert _height(bst) == expected_result
| 3.765625
| 4
|
scripts/parse_results2.py
|
pfritzgerald/nusassifi
| 2
|
12776617
|
<reponame>pfritzgerald/nusassifi<filename>scripts/parse_results2.py<gh_stars>1-10
###################################################################################
# Copyright (c) 2015, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of NVIDIA CORPORATION nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
###################################################################################
import os, sys, re, string, math, datetime, time, pkgutil
from optparse import OptionParser
import common_params as cp
import specific_params as sp
import common_functions as cf
import sqlite3
#import matplotlib.pyplot as plt
import numpy as np
results_app_table = {} # app, igid, bfm, outcome,
inj_types = ["inst","rf"]
###############################################################################
# inst_fraction contains the fraction of IADD, FADD, IMAD, FFMA, ISETP, etc.
# instructions per application
###############################################################################
inst_fraction = {}
inst_count = {}
def parse_results_file(app, igid, bfm, c):
if injection_mode == "interval":
results_f_name = sp.app_log_dir[app] + "results-igid" + str(igid) + ".bfm" + str(bfm) + ".interval.txt"
elif injection_mode == "pc":
results_f_name = sp.app_log_dir[app] + "results-igid" + str(igid) + ".bfm" + str(bfm) + "." +\
str(sp.NUM_INJECTIONS) + ".pc.txt"
else:
results_f_name = sp.app_log_dir[app] + "results-igid" + str(igid) + ".bfm" + str(bfm) + "." + str(sp.NUM_INJECTIONS) + ".txt"
try:
rf = open(results_f_name, "r")
except IOError:
print "app=%s, igid=%d, bfm=%d " %(app, igid, bfm),
print "NOT OPEN: " + results_f_name
return
suite = sp.apps[app][0]
print "file is " + rf.name
num_lines = 0
for line in rf: # for each injection site
# print "-------LINE: " + str(num_lines) + "---------------"
#Example line: _Z22bpnn_layerforward_CUDAPfS_S_S_ii-0-26605491-0.506809798834-0.560204950825:..:MOV:773546:17:0.759537:3:dmesg,
#kname-kcount-iid-allIId-opid-bid:pc:opcode:tid:injBID:runtime_sec:outcome_category:dmesg
words = line.split(":")
inj_site_info = words[0].split("-")
if injection_mode == "interval":
[interval_size, interval_id] = [int(inj_site_info[2]), int(inj_site_info[3])]
inst_id = int(inj_site_info[4])
opIdSeed = inj_site_info[5]
bIdSeed = inj_site_info[6]
[opcode, injBID, runtime, outcome] = \
[words[5], int(words[7]), float(words[8]), int(words[9])]
elif injection_mode == "pc":
[opIdSeed, bIdSeed, pc_text, pc_count] = [inj_site_info[3], inj_site_info[4],\
str(inj_site_info[5]), int(inj_site_info[6])]
[bb_id, global_inst_id, app_dyn_inst_id, opcode, tId, injBID, runtime, outcome] = \
[int(words[1]), int(words[2]), int(words[3]), words[4], int(words[5]), int(words[6]),\
float(words[7]), int(words[8])]
else:
[kname, invocation_index, opcode, injBID, runtime, outcome] = \
[inj_site_info[0], int(inj_site_info[1]), words[5], int(words[7]), float(words[8]), int(words[9])]
inst_id = int(inj_site_info[2])
opIdSeed = inj_site_info[3]
bIdSeed = inj_site_info[4]
# print "words[1]: "+ str(words[1]),
if injection_mode != "pc":
pc_text = '0x'+str(words[1])
bb_id = int(words[2])
global_inst_id = int(words[3])
app_dyn_inst_id = int(words[4])
tId = int(words[6])
if pc_text == '0x':
pc_text = "0x0"
# print "PC text: " + " => " + pc_text
# pc = int(pc_text,0)
if injection_mode == "interval":
c.execute('INSERT OR IGNORE INTO Results '\
'VALUES(NULL, \'%s\',\'%s\',%d,\'%s\', \'%s\', %d, %d,'\
' %d, %d, \'%s\', %d, %d, %d, \'%s\', %d, %d, %f, %d)'
%(suite,app, interval_size, opIdSeed, bIdSeed, igid, bfm,
interval_id, inst_id, pc_text, bb_id,
global_inst_id, app_dyn_inst_id, opcode, tId,
injBID, runtime, (outcome-1)))
elif injection_mode == "pc":
c.execute('INSERT OR IGNORE INTO Results '\
'VALUES(NULL, \'%s\', \'%s\', \'%s\', \'%s\', %d, %d, \'%s\', %d, %d, '\
'%d, %d, \'%s\', %d, %d, %f, %d)'
% (suite, app, opIdSeed, bIdSeed, igid, bfm, pc_text, pc_count, bb_id,
global_inst_id, app_dyn_inst_id, opcode, tId, injBID, runtime, (outcome-1)))
else:
c.execute('INSERT OR IGNORE INTO Results '\
'VALUES(NULL, \'%s\',\'%s\',\'%s\',\'%s\', \'%s\''\
', %d, %d, %d, %d, \'%s\', %d, %d, %d, \'%s\', %d, %d, %f, %d)'
%(suite,app, kname, opIdSeed, bIdSeed, igid, bfm,
invocation_index, inst_id, pc_text,
bb_id, global_inst_id, app_dyn_inst_id, opcode,
tId, injBID, runtime, (outcome-1)))
num_lines += 1
rf.close()
if num_lines == 0 and app in results_app_table and os.stat(sp.app_log_dir[app] +
"injection-list/igid" + str(igid) + ".bfm" + str(bfm) + "." +
str(sp.NUM_INJECTIONS) + ".txt").st_size != 0:
print "%s, igid=%d, bfm=%d not done" %(app, igid, bfm)
def parse_mem_accesses(app, c):
try:
rf = open(sp.app_dir[app] + "global_gpr_insts.txt", "r")
except IOError:
print "NOT OPEN: " + sp.app_dir[app] + "global_gpr_insts.txt"
return
suite = sp.apps[app][0]
print "file is " + rf.name
kName = ""
invocation_id=0
for line in rf: # for each mem access (or new kernel and invocation)
words = line.split(",")
if words[0] == "INTERVAL":
interval_id = int(words[1])
global_loads = int(words[3])
global_stores = int(words[5])
nonglobal_loads = int(words[7])
nonglobal_stores = int(words[9])
c.execute('INSERT OR IGNORE INTO MemAccesses '\
'VALUES(NULL, \'%s\',%d, %d, %d, %d, %d)'
%(app, interval_id, global_loads, global_stores,
nonglobal_loads, nonglobal_stores))
def parse_pupcs(app, c):
try:
rf = open(sp.app_dir[app] + "pupcs.txt", "r")
except IOError:
print "PUPC - NOT OPEN: " + sp.app_dir[app] + "pupcs.txt"
return
suite = sp.apps[app][0]
print "PUPC - file is " + rf.name
for line in rf: # for each mem access (or new kernel and invocation)
words = line.split(",")
if words[0] == "PUPC":
pupc = '0x' + words[1]
bb_id = int(words[3])
fnName = words[5]
opcode = words[7]
is_mem = int(words[9])
is_dest_reg = int(words[11])
weight = int(words[13])
num_gpr_srcs = int(words[15])
gpr_srcs = ""
gpr_srcs = ",".join(map(str, words[17:17+num_gpr_srcs]))
num_gpr_dsts = int(words[18+num_gpr_srcs])
gpr_dsts = ",".join(map(str, words[20+num_gpr_srcs:18+num_gpr_srcs+num_gpr_dsts]))
c.execute('INSERT OR IGNORE INTO PUPCs '\
'VALUES(NULL, \'%s\', \'%s\', '\
'%d,%d,\'%s\',\'%s\', %d, %d, %d, \'%s\', %d,\'%s\')'
%(app, pupc, weight, bb_id, fnName, opcode, is_mem,is_dest_reg, num_gpr_srcs,
gpr_srcs, num_gpr_dsts, gpr_dsts))
def parse_bb_interval_executions(app, c):
try:
rf = open(sp.app_dir[app] + "basic_block_insts.txt", "r")
except IOError:
print "NOT OPEN: " + sp.app_dir[app] + "basic_block_insts.txt"
return
suite = sp.apps[app][0]
print "file is " + rf.name
kName = ""
invocation_id=0
for line in rf: # for each mem access (or new kernel and invocation)
if "kernel," in line:
words = line.split(",")
kName = words[1]
invocation_id = int(words[3])
interval_size = int(words[5])
elif "INTERVAL," in line:
words = line.split(",")
interval_id = int(words[1])
num_gpr_insts = int(words[3])
c.execute('INSERT OR IGNORE INTO BBVIntervalSizes '\
'VALUES(NULL, \'%s\', %d, %d, %d);'
%(app, interval_size, interval_id,num_gpr_insts))
else:
words = line.split(",")
basic_block_id = int(words[0])
num_insts = int(words[2])
func_name = words[1]
inst_interval =int(words[3])
bb_num_execs = int(words[4])
num_succs = int(words[5])
succs = ",".join(map(str, words[6:6+num_succs]))
# print 'INSERT OR IGNORE INTO BBProfile '\
# 'VALUES(NULL, \'%s\',\'%s\', %d, %d, %d, %d, \'%s\', %d,'\
# '%d, \'%s\');' %(app, kName, invocation_id, inst_interval, basic_block_id, num_insts,
# func_name, bb_num_execs, num_succs, succs)
c.execute('INSERT OR IGNORE INTO BBProfile '\
'VALUES(NULL, \'%s\',\'%s\', %d, %d, %d, %d, \'%s\', %d,'\
'%d, \'%s\');'
%(app, kName, invocation_id, inst_interval, basic_block_id, num_insts,
func_name, bb_num_execs, num_succs, succs))
def parse_bb_executions(app, c):
try:
rf = open(sp.app_dir[app] + "bb_profile.txt", "r")
except IOError:
print "BB Profiling - NOT OPEN: " + sp.app_dir[app] + "bb_profile.txt"
return
suite = sp.apps[app][0]
print "BB Profiling - file is " + rf.name
kName = ""
invocation_id=0
for line in rf: # for each mem access (or new kernel and invocation)
if "kName," in line:
words = line.split(",")
kName = words[1]
continue
elif "BBId," in line:
words = line.split(",")
basic_block_id = int(words[1])
num_insts = int(words[5])
bb_num_execs = int(words[3])
is_entry = int(words[7])
is_exit = int(words[9])
num_succ = int(words[23])
succs = ",".join(map(str, words[25:25+num_succ]))
c.execute('INSERT OR IGNORE INTO BBExecutions '\
'VALUES(NULL, \'%s\',\'%s\', %d, %d, %d, %d, %d, %d, \'%s\');'
%(app, kName, basic_block_id, bb_num_execs, num_insts, is_entry,
is_exit, num_succ, succs))
def parse_path_executions(app, c):
try:
rf = open(sp.app_dir[app] + "path_profile.txt", "r")
except IOError:
print "NOT OPEN: " + sp.app_dir[app] + "path_profile.txt"
return
suite = sp.apps[app][0]
kInvocation = {}
print "file is " + rf.name
kName = ""
invocation_id=0
for line in rf: # for each path (or new kernel and invocation)
if "kernel," in line:
words = line.strip().split(",")
kName = words[1]
if kName not in kInvocation:
kInvocation[kName] = 0
else:
kInvocation[kName]+=1
elif "path_id," in line:
words = line.strip().split(",")
kernel = words[1]
invocation_id = kInvocation[kName]
path_id = int(words[3])
bb_start = int(words[5])
bb_end = int(words[7])
count = int(words[9])
c.execute('INSERT OR IGNORE INTO PathProfile '\
'VALUES(NULL, \'%s\',\'%s\', %d, %d, %d, %d, %d);'
%(app, kernel, invocation_id, path_id, bb_start, bb_end, count))
def parse_path_incs(app, c):
try:
rf = open(sp.app_dir[app] + "cfgs.txt", "r")
except IOError:
print "NOT OPEN: " + sp.app_dir[app] + "cfgs.txt"
return
suite = sp.apps[app][0]
print "file is " + rf.name
kName = ""
num_kernels = int(rf.readline())
print "num kernels in app: " + app + " is " + str(num_kernels)
for kernel in range(0,num_kernels): # for each path inc (or new kernel and invocation)
kname=rf.readline().strip()
num_incs=int(rf.readline())
for inc in range(0,num_incs):
[bb_from,bb_to,inc_value] = map(int, rf.readline().split())
c.execute('INSERT OR IGNORE INTO PathIncs '\
'VALUES(NULL, \'%s\',\'%s\', %d, %d, %d);'
%(app, kname, bb_from, bb_to, inc_value))
def parse_full_paths(app, c):
try:
rf = open(sp.app_dir[app] + "full_paths.txt", "r")
except IOError:
print "NOT OPEN: " + sp.app_dir[app] + "full_paths.txt"
return
print "FILE OPEN: " + rf.name
kInvocation = {}
kName = ""
invocation_id = 0
for line in rf:
if "kernel," in line:
words = line.strip().split(",")
kName = words[1]
if kName not in kInvocation:
kInvocation[kName] = 0
else:
kInvocation[kName] += 1
elif "WARP" in line:
words=line.strip().split("=>")
warp_id = int(words[0].split()[1])
full_path = words[1][:-1]
invocation_id = kInvocation[kName]
c.execute('INSERT OR IGNORE INTO FullPaths '\
'VALUES(NULL, \'%s\', \'%s\', \'%s\');'
% (app, kName, full_path))
full_path_id = c.execute('SELECT ID FROM FullPaths WHERE App IS \'%s\' AND kName IS \'%s\' '\
'AND FullPath IS \'%s\';'
%(app,kName, full_path)).fetchone()[0]
c.execute('INSERT OR IGNORE INTO FullPathExecs '\
'VALUES(NULL, \'%s\',\'%s\',%d,%d,%d);'
% (app, kName, invocation_id, warp_id, full_path_id))
def parse_fipoints(app, c):
try:
rf = open(sp.app_dir[app] + "interval.txt", "r")
except IOError:
print "NOT OPEN: " + sp.app_dir[app] + "interval.txt"
return
print "file is " + rf.name
next(rf)
next(rf)
next(rf)
for line in rf:
line = line.split(":")
[intervalId, intervalFreq] = [int(line[0]), float(line[2])]
c.execute('INSERT OR IGNORE INTO FiPointClusters '\
'VALUES(NULL, \'%s\', %d, %f);'
% (app, intervalId, intervalFreq))
###################################################################################
# Parse results files and populate summary to results table
###################################################################################
def parse_results_apps(typ,c):
for app in sp.parse_apps:
print app
if typ == "inst":
for igid in sp.parse_igid_bfm_map:
for bfm in sp.parse_igid_bfm_map[igid]:
parse_results_file(app, igid, bfm, c)
else:
for bfm in sp.parse_rf_bfm_list:
parse_results_file(app, "rf", bfm, c)
parse_mem_accesses(app, c)
parse_pupcs(app, c)
#parse_bb_executions(app,c)
parse_bb_interval_executions(app,c)
#parse_path_executions(app,c)
#parse_path_incs(app, c)
#parse_full_paths(app,c)
if injection_mode == "interval":
parse_fipoints(app, c)
def parse_options():
parser = OptionParser()
parser.add_option("-t", "--type", dest="inj_type",
help="Injection Type <inst/rf>", metavar="INJ_TYPE")
parser.add_option("-d", "--database", dest="database_file",
help="Database file where our data is")
parser.add_option("-a", "--app", dest="application",
help="Application to analyze")
parser.add_option("-m", "--mode", dest="injection_mode", default="normal",
help="Mode of injection - normal or interval (fipoint)")
# Create a database if one was not passed.
(options, args) = parser.parse_args()
if options.inj_type:
if options.inj_type not in inj_types:
parser.error("inj_type should be one of: %s - provided:%s"
% (inj_types,options.inj_type))
else:
options.inj_type = "inst"
if not options.database_file:
options.database_file = "data.db"
return options.database_file, options.inj_type, options.application, options.injection_mode
def print_usage():
print "Usage: \n python parse_results.py rf/inst"
exit(1)
def CreateNewDB(c):
print "creating data DB"
if injection_mode == "interval":
c.execute('CREATE TABLE IF NOT EXISTS '\
'Results(ID INTEGER PRIMARY KEY, Suite TEXT, App TEXT, IntervalSize INTEGER, '\
'OpIdSeed TEXT, BIDSeed TEXT, IgId INTEGER, '\
'BFM INTEGER, IntervalId INTEGER, InstId INTERGER, PC TEXT, BBId '\
'INTEGER, GlobalInstId INTEGER, AppDynInstId INTEGER, '\
'Opcode TEXT, TId INTEGER, InjBId INTEGER, Runtime INTEGER, OutcomeID INTEGER)')
elif injection_mode == "pc":
c.execute('CREATE TABLE IF NOT EXISTS '\
'Results(ID INTEGER PRIMARY KEY, Suite TEXT, App TEXT, OpIdSeed TEXT, '\
'BIDSeed TEXT, IgId INTEGER, BFM INTEGER, PC TEXT, PCCount INTEGER, BBId INTEGER, '\
'GlobalInstId INTEGER, AppDynInstId INTEGER, Opcode TEXT, TId INTEGER, '\
'InjBId INTEGER, Runtime INTEGER, OutcomeId INTEGER)')
else:
c.execute('CREATE TABLE IF NOT EXISTS '\
'Results(ID INTEGER PRIMARY KEY, Suite TEXT, App TEXT, kName TEXT, '\
'OpIdSeed TEXT, BIDSeed TEXT, IgId INTEGER, '\
'BFM INTEGER, InvocationIdx INTEGER, InstId INTERGER, PC TEXT, BBId '\
'INTEGER, GlobalInstId INTEGER, AppDynInstId INTEGER, '\
'Opcode TEXT, TId INTEGER, InjBId INTEGER, Runtime INTEGER, OutcomeID INTEGER)')
c.execute('CREATE TABLE IF NOT EXISTS '\
'OutcomeMap(ID INTEGER PRIMARY KEY, Description TEXT)')
c.execute('CREATE TABLE IF NOT EXISTS '\
'IgIdMap(ID INTEGER PRIMARY KEY, IDNum INTEGER, Description TEXT, App TEXT,'\
' InstCount INTEGER)')
c.execute('CREATE TABLE IF NOT EXISTS '\
'BFMMap(ID INTEGER PRIMARY KEY, IDNum INTEGER, Description TEXT, App TEXT)')
c.execute('CREATE TABLE IF NOT EXISTS '\
'OpcodeMap(ID INTEGER PRIMARY KEY, Description TEXT, App TEXT, InstCount INTEGER)')
c.execute('CREATE TABLE IF NOT EXISTS '\
'Kernels(ID INTEGER PRIMARY KEY, Application TEXT, kName TEXT, '\
'InvocationIdx INTEGER, InvInstCount INTEGER, AppInstCount INTEGER)')
c.execute('CREATE TABLE IF NOT EXISTS '\
'MemAccesses(ID INTEGER PRIMARY KEY, App TEXT, IntervalId INTEGER, '\
'GlobalLoads INTEGER, GlobalStores INTEGER, '\
'NonGlobalLoads INTEGER, NonGlobalStores INTEGER)')
c.execute('CREATE TABLE IF NOT EXISTS '\
'BBProfile(ID INTEGER PRIMARY KEY, App TEXT, KName TEXT, '\
'InvocationIdx INTEGER, InstIntervalId INTEGER, '\
' BasicBlockId INTEGER, BBNumInsts INTEGER, FuncName TEXT, BBNumExecs INTEGER,'\
'numSuccs INTEGER, Succs TEXT)')
c.execute('CREATE TABLE IF NOT EXISTS '\
'BBExecutions(ID INTEGER PRIMARY KEY, App TEXT, KName TEXT, '\
'BasicBlockId INTEGER, BBNumExecs INTEGER, BBNumInsts INTEGER,'\
'isEntry INTEGER, isExit INTEGER, numSuccs INTEGER, Succs TEXT)')
c.execute('CREATE TABLE IF NOT EXISTS '\
'PathProfile(ID INTEGER PRIMARY KEY, App TEXT, kName TEXT, '\
'InvocationIdx INTEGER, PathId INTEGER, BBStart INTEGER,'\
'BBEnd INTEGER, Count INTEGER)')
c.execute('CREATE TABLE IF NOT EXISTS '\
'PathIncs(ID INTEGER PRIMARY KEY, App TEXT, kName TEXT, BBFrom INTEGER, '\
'BBTo INTEGER, Inc INTEGER)')
c.execute('CREATE TABLE IF NOT EXISTS '\
'FullPaths(ID INTEGER PRIMARY KEY, App TEXT, kName TEXT, FullPath TEXT, UNIQUE(App,kName,FullPath))')
c.execute('CREATE TABLE IF NOT EXISTS '\
'FullPathExecs(ID INTEGER PRIMARY KEY, App TEXT, kName TEXT, InvocationIdx INTEGER, '\
'WarpId INTEGER, FullPathID INTEGER)')
c.execute('CREATE TABLE IF NOT EXISTS '\
'BBVIntervalSizes(ID INTEGER PRIMARY KEY, App TEXT, IntervalSize INTEGER,'\
' IntervalId INTEGER, NumGPRInsts INTEGER)')
c.execute('CREATE TABLE IF NOT EXISTS '\
'PUPCs(ID INTEGER PRIMARY KEY, App TEXt, PUPC TEXT, Weight INTEGER, BBId INTEGER, '\
'FnName TEXT, Opcode TEXT, IsMem INTEGER, IsDestReg INTEGER, NumGPRSrcs INTEGER, GPRSrcs TEXT, '\
'NumGPRDsts INTEGER, GPRDsts TEXT)')
if injection_mode == "interval":
c.execute('CREATE TABLE IF NOT EXISTS '\
'FIPointClusters(ID INTEGER PRIMARY KEY, App TEXT, IntervalId INTEGER,'\
' IntervalFrequency INTEGER)')
######
# fill up OutcomeMap table
#########
for cat in range(cp.NUM_CATS-1):
# print "cat %d cat_str %s " % (cat, cp.CAT_STR[cat])
c.execute('INSERT OR IGNORE INTO OutcomeMap '\
'VALUES(%d, \'%s\')' % (cat, cp.CAT_STR[cat]))
##########
# Filling up IgIdMap
#########
for app in sp.apps:
countList = cf.read_inst_counts(sp.app_dir[app],app)
#print countList
for igid in range(cp.NUM_INST_TYPES):
igid_inst_count = 0
for l in countList:
igid_inst_count += int(l[igid+2])
c.execute('INSERT OR IGNORE INTO IgIdMap '\
'VALUES(NULL, %d, \'%s\', \'%s\',%d)' % (igid,cp.IGID_STR[igid], app, igid_inst_count))
##########
# Filling up BitFlipModelMap (BFMMap)
#########
for app in sp.apps:
countList = cf.read_inst_counts(sp.app_dir[app],app)
#print countList
for bfm in range(len(cp.EM_STR)):
c.execute('INSERT OR IGNORE INTO BFMMap '\
'VALUES(NULL, %d, \'%s\', \'%s\')'
%(bfm,cp.EM_STR[bfm], app))
###########
# Filling up OpcodeMap
###########
opcode_list_str = "ATOM:ATOMS:B2R:BAR:BFE:BFI:BPT:BRA:BRK:BRX:CAL:CAS:CCTL:CCTLL:CCTLT:CONT:CS2R:CSET:CSETP:DADD:DEPBAR:DFMA:DMNMX:DMUL:DSET:DSETP:EXIT:F2F:F2I:FADD:FADD32I:FCHK:FCMP:FFMA:FFMA32I:FLO:FMNMX:FMUL:FMUL32I:FSET:FSETP:FSWZ:FSWZADD:I2F:I2I:IADD:IADD3:IADD32I:ICMP:IMAD:IMAD32I:IMADSP:IMNMX:IMUL:IMUL32I:ISAD:ISCADD:ISCADD32I:ISET:ISETP:JCAL:JMX:LD:LDC:LDG:LDL:LDLK:LDS:LDSLK:LDS_LDU:LDU:LD_LDU:LEA:LEPC:LONGJMP:LOP:LOP3:LOP32I:MEMBAR:MOV:MUFU:NOP:P2R:PBK:PCNT:PEXIT:PLONGJMP:POPC:PRET:PRMT:PSET:PSETP:R2B:R2P:RED:RET:RRO:S2R:SEL:SHF:SHFL:SHL:SHR:SSY:ST:STG:STL:STS:STSCUL:STSUL:STUL:SUATOM:SUBFM:SUCLAMP:SUEAU:SULD:SULDGA:SULEA:SUQ:SURED:SUST:SUSTGA:SYNC:TEX:TEXDEPBAR:TEXS:TLD:TLD4:TLD4S:TLDS:TXQ:UNMAPPED:USER_DEFINED:VMNMX:VOTE:XMAD"
opcode_list = opcode_list_str.split(":")
# print "OPCODE LIST: " + str(opcode_list)
for app in sp.apps:
countList = cf.read_inst_counts(sp.app_dir[app], app)
total_count = cf.get_total_counts(countList)
for i in range(len(opcode_list)):
c.execute('INSERT OR IGNORE INTO OpcodeMap '\
'VALUES(NULL, \'%s\', \'%s\',%d)' %(opcode_list[i], app, total_count[i+cp.NUM_INST_TYPES+1]))
# print "len total counts " + str(len(total_count))
# print "len opcode_list: " + str(len(opcode_list))
for app in sp.apps:
# print "App: " + app
countList = cf.read_inst_counts(sp.app_dir[app], app)
#print "countList: " + str(countList)
for l in countList:
total_inst_count = 0
for i in range(cp.NUM_INST_TYPES+3, len(countList[0])): # 3: 1 for kname, 1 for kcount and 1 for WILL NOT EXECUTE instruction count
total_inst_count += int(l[i])
kernel_name = str(l[0])
invocation_idx = int(l[1])
app_inst_count = cf.get_total_insts(countList)
c.execute('INSERT OR IGNORE INTO Kernels '\
'VALUES(NULL, \'%s\',\'%s\', %d, %d, %d)'
% (app, kernel_name, invocation_idx, total_inst_count, app_inst_count))
###############################################################################
# Main function that processes files, analyzes results and prints them to an
# xlsx file
###############################################################################
def main():
global injection_mode
db_file, inj_type, application, injection_mode = parse_options()
print "DB file is : " + db_file
conn = sqlite3.connect(db_file)
c = conn.cursor()
if db_file == "data.db":
CreateNewDB(c)
# total_count = cf.get_total_insts(countList)
parse_results_apps(inj_type, c) # parse sassifi results into local data structures
conn.commit()
conn.close()
if __name__ == "__main__":
main()
| 1.28125
| 1
|
htm_rl/htm_rl/envs/biogwlab/wrappers/entity_map_provider.py
|
cog-isa/htm-rl
| 1
|
12776618
|
<reponame>cog-isa/htm-rl
import numpy as np
from htm_rl.envs.biogwlab.environment import Environment
from htm_rl.envs.biogwlab.module import EntityType
from htm_rl.envs.env import Wrapper
class EntityMapProvider(Wrapper):
root_env: Environment
entities: dict[EntityType, np.array]
def __init__(self, entities: dict[EntityType, np.array], env):
super().__init__(env)
self.entities = entities
def get_info(self) -> dict:
info = super(EntityMapProvider, self).get_info()
mask_map = dict()
for entity, entity_flag in self.entities.items():
mask_map[entity] = self.root_env.aggregated_mask[entity_flag]
info['map'] = mask_map
return info
| 2.15625
| 2
|
tests/multipart_test.py
|
SanthoshBala18/filestack-python
| 0
|
12776619
|
import io
import json
from collections import defaultdict
from unittest.mock import patch
import responses
from httmock import HTTMock, response, urlmatch
from tests.helpers import DummyHttpResponse
from filestack import Client
from filestack.config import MULTIPART_START_URL
from filestack.uploads.multipart import upload_chunk, Chunk
APIKEY = 'APIKEY'
HANDLE = 'SOMEHANDLE'
URL = 'https://cdn.filestackcontent.com/{}'.format(HANDLE)
def chunk_put_callback(request):
body = {'url': URL}
return 200, {'ETag': 'someetags'}, json.dumps(body)
@responses.activate
def test_upload_filepath():
client = Client(APIKEY)
# add the different HTTP responses that are called during the multipart upload
responses.add(
responses.POST, MULTIPART_START_URL, status=200, content_type='application/json',
json={'region': 'us-east-1', 'upload_id': 'someuuid', 'uri': 'someuri', 'location_url': 'fs-uploads.com'}
)
responses.add(
responses.POST, 'https://fs-uploads.com/multipart/upload',
status=200, content_type='application/json', json={'url': URL, 'headers': {}}
)
responses.add_callback(responses.PUT, URL, callback=chunk_put_callback)
responses.add(
responses.POST, 'https://fs-uploads.com/multipart/complete', status=200,
content_type='application/json', json={'url': URL, 'handle': HANDLE}
)
new_filelink = client.upload(filepath='tests/data/doom.mp4')
assert new_filelink.handle == HANDLE
@patch('filestack.uploads.multipart.requests.put')
@patch('filestack.uploads.multipart.requests.post')
def test_upload_file_obj(post_mock, put_mock):
start_response = defaultdict(str)
start_response['location_url'] = 'fs.api'
post_mock.side_effect = [
DummyHttpResponse(json_dict=start_response),
DummyHttpResponse(json_dict=defaultdict(str)),
DummyHttpResponse(json_dict={'handle': 'bytesHandle'})
]
put_mock.return_value = DummyHttpResponse(
json_dict=defaultdict(str), headers={'ETag': 'etag-1'}
)
file_content = b'file bytes'
filelink = Client(APIKEY).upload(file_obj=io.BytesIO(file_content))
assert filelink.handle == 'bytesHandle'
put_args, put_kwargs = put_mock.call_args
assert put_kwargs['data'] == file_content
def test_upload_chunk():
@urlmatch(netloc=r'fsuploads\.com', path='/multipart/upload', method='post', scheme='https')
def fs_backend_mock(url, request):
return {
'status_code': 200,
'content': json.dumps({
'url': 'https://amazon.com/upload', 'headers': {'one': 'two'}
})
}
@urlmatch(netloc=r'amazon\.com', path='/upload', method='put', scheme='https')
def amazon_mock(url, request):
return response(200, b'', {'ETag': 'etagX'}, reason=None, elapsed=0, request=request)
chunk = Chunk(num=123, seek_point=0, filepath='tests/data/doom.mp4')
start_response = defaultdict(str)
start_response['location_url'] = 'fsuploads.com'
with HTTMock(fs_backend_mock), HTTMock(amazon_mock):
upload_result = upload_chunk('apikey', 'filename', 's3', start_response, chunk)
assert upload_result == {'part_number': 123, 'etag': 'etagX'}
| 2.375
| 2
|
rocksmith/sng.py
|
0x0L/rocksmith
| 25
|
12776620
|
from construct import (
Float32l,
Float64l,
If,
Int8sl,
Int16sl,
Int16ul,
Int32sl,
Int32ul,
PaddedString,
Padding,
PrefixedArray,
Struct,
len_,
this,
)
def array(subcon):
return PrefixedArray(Int32ul, subcon)
Bend = Struct("time" / Float32l, "step" / Float32l, Padding(3), "UNK" / Int8sl)
Beat = Struct(
"time" / Float32l,
"measure" / Int16ul,
"beat" / Int16ul,
"phraseIteration" / Int32ul,
"mask" / Int32ul,
)
Phrase = Struct(
"solo" / Int8sl,
"disparity" / Int8sl,
"ignore" / Int8sl,
Padding(1),
"maxDifficulty" / Int32ul,
"phraseIterationLinks" / Int32ul,
"name" / PaddedString(32, encoding="utf8"),
)
ChordTemplate = Struct(
"mask" / Int32ul,
"frets" / Int8sl[6],
"fingers" / Int8sl[6],
"notes" / Int32sl[6],
"name" / PaddedString(32, encoding="utf8"),
)
ChordNote = Struct(
"mask" / Int32ul[6],
"bends" / Struct("bendValues" / Bend[32], "count" / Int32ul)[6],
"slideTo" / Int8sl[6],
"slideUnpitchTo" / Int8sl[6],
"vibrato" / Int16sl[6],
)
Vocal = Struct(
"time" / Float32l,
"note" / Int32sl,
"length" / Float32l,
"lyrics" / PaddedString(48, encoding="utf8"),
)
Texture = Struct(
"fontpath" / PaddedString(128, encoding="ascii"),
"fontpathLength" / Int32ul,
Padding(4),
"width" / Int32ul,
"height" / Int32ul,
)
BoundingBox = Struct("y0" / Float32l, "x0" / Float32l, "y1" / Float32l, "x1" / Float32l)
SymbolDef = Struct(
"name" / PaddedString(12, encoding="utf8"),
"outerRect" / BoundingBox,
"innerRect" / BoundingBox,
)
Symbols = Struct(
"header" / array(Int32sl[8]),
"texture" / array(Texture),
"definition" / array(SymbolDef),
)
PhraseIteration = Struct(
"phraseId" / Int32ul,
"time" / Float32l,
"endTime" / Float32l,
"difficulty" / Int32ul[3],
)
PhraseExtraInfo = Struct(
"phraseId" / Int32ul,
"difficulty" / Int32ul,
"empty" / Int32ul,
"levelJump" / Int8sl,
"redundant" / Int16sl,
Padding(1),
)
LinkedDiff = Struct("levelBreak" / Int32sl, "nld_phrase" / array(Int32ul))
Action = Struct("time" / Float32l, "name" / PaddedString(256, encoding="ascii"))
Event = Struct("time" / Float32l, "name" / PaddedString(256, encoding="ascii"))
Tone = Struct("time" / Float32l, "id" / Int32ul)
DNA = Struct("time" / Float32l, "id" / Int32ul)
Section = Struct(
"name" / PaddedString(32, encoding="utf8"),
"number" / Int32ul,
"startTime" / Float32l,
"endTime" / Float32l,
"startPhraseIterationId" / Int32ul,
"endPhraseIterationId" / Int32ul,
"stringMask" / Int8sl[36],
)
Anchor = Struct(
"time" / Float32l,
"endTime" / Float32l,
"UNK_time" / Float32l,
"UNK_time2" / Float32l,
"fret" / Int32sl,
"width" / Int32sl,
"phraseIterationId" / Int32ul,
)
AnchorExtension = Struct("time" / Float32l, "fret" / Int8sl, Padding(7))
FingerPrint = Struct(
"chordId" / Int32ul,
"startTime" / Float32l,
"endTime" / Float32l,
"UNK_startTime" / Float32l,
"UNK_endTime" / Float32l,
)
Note = Struct(
"mask" / Int32ul,
"flags" / Int32ul,
"hash" / Int32ul,
"time" / Float32l,
"string" / Int8sl,
"fret" / Int8sl,
"anchorFret" / Int8sl,
"anchorWidth" / Int8sl,
"chordId" / Int32ul,
"chordNoteId" / Int32ul,
"phraseId" / Int32ul,
"phraseIterationId" / Int32ul,
"fingerPrintId" / Int16ul[2],
"nextIterNote" / Int16ul,
"prevIterNote" / Int16ul,
"parentPrevNote" / Int16ul,
"slideTo" / Int8sl,
"slideUnpitchTo" / Int8sl,
"leftHand" / Int8sl,
"tap" / Int8sl,
"pickDirection" / Int8sl,
"slap" / Int8sl,
"pluck" / Int8sl,
"vibrato" / Int16sl,
"sustain" / Float32l,
"bend_time" / Float32l,
"bends" / array(Bend),
)
Level = Struct(
"difficulty" / Int32ul,
"anchors" / array(Anchor),
"anchor_extensions" / array(AnchorExtension),
"fingerprints" / array(FingerPrint)[2],
"notes" / array(Note),
"averageNotesPerIter" / array(Float32l),
"notesInIterCountNoIgnored" / array(Int32ul),
"notesInIterCount" / array(Int32ul),
)
Metadata = Struct(
"maxScores" / Float64l,
"maxNotes" / Float64l,
"maxNotesNoIgnored" / Float64l,
"pointsPerNote" / Float64l,
"firstBeatLength" / Float32l,
"startTime" / Float32l,
"capo" / Int8sl,
"lastConversionDateTime" / PaddedString(32, encoding="ascii"),
"part" / Int16sl,
"songLength" / Float32l,
"tuning" / array(Int16sl),
"firstNoteTime" / Float32l,
"firstNoteTime2" / Float32l,
"maxDifficulty" / Int32sl,
)
Song = Struct(
"beats" / array(Beat),
"phrases" / array(Phrase),
"chordTemplates" / array(ChordTemplate),
"chordNotes" / array(ChordNote),
"vocals" / array(Vocal),
"symbols" / If(len_(this.vocals) > 0, Symbols),
"phraseIterations" / array(PhraseIteration),
"phraseExtraInfos" / array(PhraseExtraInfo),
"newLinkedDiffs" / array(LinkedDiff),
"actions" / array(Action),
"events" / array(Event),
"tones" / array(Tone),
"dna" / array(DNA),
"sections" / array(Section),
"levels" / array(Level),
"metadata" / Metadata,
)
| 2.265625
| 2
|
azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/identify_candidate_py3.py
|
JonathanGailliez/azure-sdk-for-python
| 1
|
12776621
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class IdentifyCandidate(Model):
"""All possible faces that may qualify.
All required parameters must be populated in order to send to Azure.
:param person_id: Required. Id of candidate
:type person_id: str
:param confidence: Required. Confidence threshold of identification, used
to judge whether one face belong to one person. The range of
confidenceThreshold is [0, 1] (default specified by algorithm).
:type confidence: float
"""
_validation = {
'person_id': {'required': True},
'confidence': {'required': True},
}
_attribute_map = {
'person_id': {'key': 'personId', 'type': 'str'},
'confidence': {'key': 'confidence', 'type': 'float'},
}
def __init__(self, *, person_id: str, confidence: float, **kwargs) -> None:
super(IdentifyCandidate, self).__init__(**kwargs)
self.person_id = person_id
self.confidence = confidence
| 2.328125
| 2
|
chapter13/asyncgmaps.py
|
lixin940207/expert_python_programming
| 189
|
12776622
|
<reponame>lixin940207/expert_python_programming<gh_stars>100-1000
# -*- coding: utf-8 -*-
import aiohttp
session = aiohttp.ClientSession()
async def geocode(place):
params = {
'sensor': 'false',
'address': place
}
async with session.get(
'https://maps.googleapis.com/maps/api/geocode/json',
params=params
) as response:
result = await response.json()
return result['results']
| 2.609375
| 3
|
AC_TD3_code/utils/__init__.py
|
Jiang-HB/AC_CDQ
| 7
|
12776623
|
from .attr_dict import AttrDict
from .commons import *
from .options import opts
from .recorder import Recoder
from .replay_buffer import ReplayBuffer
from .eval_policy import eval_policy
from .run import run
| 0.957031
| 1
|
03_join.py
|
Madhav2204/Python-vs-code
| 0
|
12776624
|
<filename>03_join.py
l = ["Camera", "Laptop", "Phone", "ipad", "Hard Disk", "Nvidia Graphic 3080 card"]
# sentence = "~~".join(l)
# sentence = "==".join(l)
sentence = "\n".join(l)
print(sentence)
print(type(sentence))
| 2.53125
| 3
|
enqueuer_thread.py
|
shriya999/MultiStage-ActionDetection
| 0
|
12776625
|
# coding=utf-8
"""Given the dataset object, make a multithread enqueuer"""
import os
import queue
import threading
import contextlib
import multiprocessing
import time
import random
import sys
import utils
import traceback
import cv2
# modified from keras
class DatasetEnqueuer(object):
def __init__(
self,
dataset,
prefetch=5,
num_workers=1,
start=True, # start the dataset get thread when init
shuffle=False,
# whether to break down each mini-batch for each gpu
is_multi_gpu=False,
last_full_batch=False, # make sure the last batch is full
):
self.dataset = dataset
self.prefetch = prefetch # how many batch to save in queue
self.max_queue_size = int(self.prefetch * dataset.batch_size)
self.is_multi_gpu = is_multi_gpu
self.last_full_batch = last_full_batch
self.workers = num_workers
self.queue = None
self.run_thread = None # the thread to spawn others
self.stop_signal = None
self.cur_batch_count = 0
self.shuffle = shuffle
if start:
self.start()
def is_running(self):
return self.stop_signal is not None and not self.stop_signal.is_set()
def start(self):
self.queue = queue.Queue(self.max_queue_size)
self.stop_signal = threading.Event()
self.run_thread = threading.Thread(target=self._run)
self.run_thread.daemon = True
self.run_thread.start()
def stop(self):
# print("stop called")
if self.is_running():
self._stop()
def _stop(self):
# print("_stop called")
self.stop_signal.set()
with self.queue.mutex:
self.queue.queue.clear()
self.queue.unfinished_tasks = 0
self.queue.not_full.notify()
self.run_thread.join(0)
def __del__(self):
if self.is_running():
self._stop()
# thread to start getting batches into queue
def _run(self):
batch_idxs = list(self.dataset.valid_idxs) * self.dataset.num_epochs
if self.shuffle:
batch_idxs = random.sample(batch_idxs, len(batch_idxs))
batch_idxs = random.sample(batch_idxs, len(batch_idxs))
if self.last_full_batch:
# make sure the batch_idxs are multiplier of batch_size
batch_idxs += [
batch_idxs[-1]
for _ in range(
self.dataset.batch_size - len(batch_idxs) % self.dataset.batch_size
)
]
while True:
with contextlib.closing(
multiprocessing.pool.ThreadPool(self.workers)
) as executor:
for idx in batch_idxs:
if self.stop_signal.is_set():
return
# block until not full
self.queue.put(
executor.apply_async(self.dataset.get_sample, (idx,)),
block=True,
)
self._wait_queue()
if self.stop_signal.is_set():
# We're done
return
# iterator to get batch from the queue
def get(self):
if not self.is_running():
self.start()
try:
while self.is_running():
if self.cur_batch_count == self.dataset.num_batches:
self._stop()
return
samples = []
for i in range(self.dataset.batch_size):
# first get got the ApplyResult object,
# then second get to get the actual thing (block till get)
sample = self.queue.get(block=True).get()
self.queue.task_done()
samples.append(sample)
# break the mini-batch into mini-batches for multi-gpu
if self.is_multi_gpu:
batches = []
# a list of [frames, boxes, labels_arr, ori_boxes, box_keys]
this_batch_idxs = range(len(samples))
# pack these batches for each gpu
this_batch_idxs_gpus = utils.grouper(
this_batch_idxs, self.dataset.batch_size_per_gpu
)
for this_batch_idxs_per_gpu in this_batch_idxs_gpus:
batches.append(
self.dataset.collect_batch(samples, this_batch_idxs_per_gpu)
)
batch = batches
else:
batch = self.dataset.collect_batch(samples)
self.cur_batch_count += 1
yield batch
except Exception as e: # pylint: disable=broad-except
self._stop()
_type, _value, _traceback = sys.exc_info()
print("Exception in enqueuer.get: %s" % e)
traceback.print_tb(_traceback)
raise Exception
def _wait_queue(self):
"""Wait for the queue to be empty."""
while True:
time.sleep(0.1)
if self.queue.unfinished_tasks == 0 or self.stop_signal.is_set():
return
def count_frame_get(total_frame, frame_gap):
count = 0
cur_frame = 0
while cur_frame < total_frame:
if cur_frame % frame_gap != 0:
cur_frame += 1
continue
count += 1
cur_frame += 1
return count
class VideoEnqueuer(object):
def __init__(
self,
cfg,
vcap,
num_frame,
frame_gap=1,
prefetch=5,
start=True, # start the dataset get thread when init
is_moviepy=False,
batch_size=4,
):
self.cfg = cfg
self.vcap = vcap
self.num_frame = num_frame
self.frame_gap = frame_gap
self.is_moviepy = is_moviepy
self.batch_size = batch_size
self.prefetch = prefetch # how many batch to save in queue
self.max_queue_size = int(self.prefetch * batch_size)
self.queue = None
self.run_thread = None # the thread to spawn others
self.stop_signal = None
# how many frames we are actually gonna get due to frame gap
self.get_num_frame = count_frame_get(self.num_frame, self.frame_gap)
# compute the number of batches we gonna get so we know when to stop and exit
# last batch is not enough batch_size
self.num_batches = self.get_num_frame // batch_size + int(
self.get_num_frame % batch_size != 0
)
self.cur_batch_count = 0
if start:
self.start()
def is_running(self):
return self.stop_signal is not None and not self.stop_signal.is_set()
def start(self):
self.queue = queue.Queue(self.max_queue_size)
self.stop_signal = threading.Event()
self.run_thread = threading.Thread(target=self._run)
self.run_thread.daemon = True
self.run_thread.start()
def stop(self):
# print("stop called")
if self.is_running():
self._stop()
def _stop(self):
# print("_stop called")
self.stop_signal.set()
with self.queue.mutex:
self.queue.queue.clear()
self.queue.unfinished_tasks = 0
self.queue.not_full.notify()
self.run_thread.join(0)
def __del__(self):
if self.is_running():
self._stop()
# thread to start getting batches into queue
def _run(self):
cfg = self.cfg
frame_count = 0
while frame_count < self.num_frame:
if self.stop_signal.is_set():
return
if self.is_moviepy:
suc = True
frame = next(self.vcap)
else:
suc, frame = self.vcap.read()
if not suc:
frame_count += 1
continue
if frame_count % self.frame_gap != 0:
frame_count += 1
continue
# frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
im = frame.astype("float32")
resized_image = cv2.resize(im, (cfg.short_edge_size, cfg.max_size))
scale = (
resized_image.shape[0] * 1.0 / im.shape[0]
+ resized_image.shape[1] * 1.0 / im.shape[1]
) / 2.0
self.queue.put((resized_image, scale, frame_count), block=True)
frame_count += 1
self._wait_queue()
if self.stop_signal.is_set():
# We're done
return
# iterator to get batch from the queue
def get(self):
if not self.is_running():
self.start()
try:
while self.is_running():
if self.cur_batch_count == self.num_batches:
self._stop()
return
batch_size = self.batch_size
# last batch
if (self.cur_batch_count == self.num_batches - 1) and (
self.get_num_frame % batch_size != 0
):
batch_size = self.get_num_frame % batch_size
samples = []
for i in range(batch_size):
sample = self.queue.get(block=True)
self.queue.task_done()
samples.append(sample)
batch = samples
self.cur_batch_count += 1
yield batch
except Exception as e: # pylint: disable=broad-except
self._stop()
_type, _value, _traceback = sys.exc_info()
print("Exception in enqueuer.get: %s" % e)
traceback.print_tb(_traceback)
raise Exception
def _wait_queue(self):
"""Wait for the queue to be empty."""
while True:
time.sleep(0.1)
if self.queue.unfinished_tasks == 0 or self.stop_signal.is_set():
return
| 3.0625
| 3
|
tests/unit/test_events.py
|
ChristChurchMayfair/ccm-assistant
| 0
|
12776626
|
import unittest
import events
from tests.testing_utils import is_valid_response, ValidResponseObjectTester
class TestEvents(unittest.TestCase):
def test_on_launch(self):
response = events.on_launch()
self.assertTrue(is_valid_response(response))
response_tester = ValidResponseObjectTester(response)
self.assertEqual(response_tester.get_speech_plain(),
('I can read you the Bible passage for a service or play you a past sermon. What would you '
'like? '))
self.assertFalse(response_tester.is_session_ending())
self.assertEqual(response_tester.get_card_text(), ('Hello! Ask me for the bible reading '
'for a service or a past sermon.'))
self.assertEqual(response_tester.get_card_title(), 'Christ Church Mayfair')
| 3.03125
| 3
|
run.py
|
LauryneL/Pipographe-v2
| 1
|
12776627
|
<reponame>LauryneL/Pipographe-v2<filename>run.py
from app.app import config_app
if __name__ == "__main__":
app = config_app()
app.run(debug=True)
| 1.320313
| 1
|
passenger_wsgi.py
|
fraigo/python-cors-proxy
| 0
|
12776628
|
import imp
import os
import sys
from corsproxy.wsgi import application
sys.path.insert(0, os.path.dirname(__file__))
# wsgi = imp.load_source('wsgi', 'passenger_wsgi.py')
# application = wsgi.application
| 1.601563
| 2
|
t10/A10httprest/ta.py
|
THS-on/AttestationEngine
| 7
|
12776629
|
<gh_stars>1-10
# Copyright 2021 Nokia
# Licensed under the BSD 3-Clause License.
# SPDX-License-Identifier: BSD-3-Clause
from flask import Flask, request, jsonify
from endpoints.tpm2.tpm2_endpoint import tpm2_endpoint
from endpoints.uefi.uefi_endpoint import uefi_endpoint
import sys
import os
VERSION = "0.3"
ta = Flask(__name__)
ta.register_blueprint(tpm2_endpoint, url_prefix="/tpm2")
ta.register_blueprint(uefi_endpoint, url_prefix="/uefi")
@ta.route("/", methods=["GET"])
def status_homepage():
services = [r.rule for r in ta.url_map.iter_rules()]
rc = {
"title": "T10 Trust Agent",
"version": VERSION,
"services": str(services),
"platform": sys.platform,
"os": os.name,
"pid": os.getpid(),
}
return jsonify(rc), 200
def main(cert, key, config_filename="ta_config.cfg"):
for rule in ta.url_map.iter_rules():
print(rule)
ta.config.from_pyfile(config_filename)
if cert and key:
ta.run(
debug=ta.config["FLASKDEBUG"],
threaded=ta.config["FLASKTHREADED"],
host=ta.config["DEFAULTHOST"],
port=ta.config["DEFAULTPORT"],
ssl_context=(cert, key),
)
else:
ta.run(
debug=ta.config["FLASKDEBUG"],
threaded=ta.config["FLASKTHREADED"],
host=ta.config["DEFAULTHOST"],
port=ta.config["DEFAULTPORT"],
)
if __name__ == "__main__":
print("TA Starting")
main("", "")
| 2.078125
| 2
|
LeetCode/234 Palindrome Linked List.py
|
gesuwen/Algorithms
| 0
|
12776630
|
# Linked List, Two Pointers
# Given a singly linked list, determine if it is a palindrome.
#
# Example 1:
#
# Input: 1->2
# Output: false
# Example 2:
#
# Input: 1->2->2->1
# Output: true
# Follow up:
# Could you do it in O(n) time and O(1) space?
# Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution(object):
def isPalindrome(self, head):
"""
:type head: ListNode
:rtype: bool
"""
if head is None or head.next is None:
return True
slow, fast = head, head
# get the middle element
while fast != None and fast.next != None:
slow = slow.next
fast = fast.next.next
# reverse the first half of the linked list
p1 = None
p2 = head
while p2 != slow:
p3 = p2.next
p2.next = p1
p1 = p2
p2 = p3
# odd number case
if fast != None:
slow = slow.next
# check first half and second half
while p1 != None and slow != None:
if p1.val != slow.val:
return False
p1 = p1.next
slow = slow.next
return True
| 4.125
| 4
|
3.7.0/lldb-3.7.0.src/test/functionalities/inline-stepping/TestInlineStepping.py
|
androm3da/clang_sles
| 3
|
12776631
|
<gh_stars>1-10
"""Test stepping over and into inlined functions."""
import os, time, sys
import unittest2
import lldb
import lldbutil
from lldbtest import *
class TestInlineStepping(TestBase):
mydir = TestBase.compute_mydir(__file__)
@skipUnlessDarwin
@python_api_test
@dsym_test
def test_with_dsym_and_python_api(self):
"""Test stepping over and into inlined functions."""
self.buildDsym()
self.inline_stepping()
@python_api_test
@dwarf_test
@expectedFailureFreeBSD('llvm.org/pr17214')
@expectedFailureIcc # Not really a bug. ICC combines two inlined functions.
@expectedFailureAll("llvm.org/pr23139", oslist=["linux"], compiler="gcc", compiler_version=[">=","4.9"], archs=["i386"])
# failed 1/365 dosep runs, (i386-clang), TestInlineStepping.py:237 failed to stop at first breakpoint in main
@expectedFailureAll(oslist=["linux"], archs=["i386"])
def test_with_dwarf_and_python_api(self):
"""Test stepping over and into inlined functions."""
self.buildDwarf()
self.inline_stepping()
@skipUnlessDarwin
@python_api_test
@dsym_test
def test_step_over_with_dsym_and_python_api(self):
"""Test stepping over and into inlined functions."""
self.buildDsym()
self.inline_stepping_step_over()
@python_api_test
@dwarf_test
@expectedFailureAll("llvm.org/pr23139", oslist=["linux"], compiler="gcc", compiler_version=[">=","4.9"], archs=["i386"])
def test_step_over_with_dwarf_and_python_api(self):
"""Test stepping over and into inlined functions."""
self.buildDwarf()
self.inline_stepping_step_over()
def setUp(self):
# Call super's setUp().
TestBase.setUp(self)
# Find the line numbers that we will step to in main:
self.main_source = "calling.cpp"
self.source_lines = {}
functions = ['caller_ref_1', 'caller_ref_2', 'inline_ref_1', 'inline_ref_2', 'called_by_inline_ref', 'caller_trivial_1', 'caller_trivial_2', 'inline_trivial_1', 'inline_trivial_2', 'called_by_inline_trivial' ]
for name in functions:
self.source_lines[name] = line_number(self.main_source, "// In " + name + ".")
self.main_source_spec = lldb.SBFileSpec (self.main_source)
def do_step(self, step_type, destination_line_entry, test_stack_depth):
expected_stack_depth = self.thread.GetNumFrames()
if step_type == "into":
expected_stack_depth += 1
self.thread.StepInto()
elif step_type == "out":
expected_stack_depth -= 1
self.thread.StepOut()
elif step_type == "over":
self.thread.StepOver()
else:
self.fail ("Unrecognized step type: " + step_type)
threads = lldbutil.get_stopped_threads (self.process, lldb.eStopReasonPlanComplete)
if len(threads) != 1:
destination_description = lldb.SBStream()
destination_line_entry.GetDescription(destination_description)
self.fail ("Failed to stop due to step " + step_type + " operation stepping to: " + destination_description.GetData())
self.thread = threads[0]
stop_line_entry = self.thread.GetFrameAtIndex(0).GetLineEntry()
self.assertTrue (stop_line_entry.IsValid(), "Stop line entry was not valid.")
# Don't use the line entry equal operator because we don't care about the column number.
stop_at_right_place = (stop_line_entry.GetFileSpec() == destination_line_entry.GetFileSpec() and stop_line_entry.GetLine() == destination_line_entry.GetLine())
if stop_at_right_place == False:
destination_description = lldb.SBStream()
destination_line_entry.GetDescription(destination_description)
actual_description = lldb.SBStream()
stop_line_entry.GetDescription(actual_description)
self.fail ("Step " + step_type + " stopped at wrong place: expected: " + destination_description.GetData() + " got: " + actual_description.GetData() + ".")
real_stack_depth = self.thread.GetNumFrames()
if test_stack_depth and real_stack_depth != expected_stack_depth:
destination_description = lldb.SBStream()
destination_line_entry.GetDescription(destination_description)
self.fail ("Step %s to %s got wrong number of frames, should be: %d was: %d."%(step_type, destination_description.GetData(), expected_stack_depth, real_stack_depth))
def run_step_sequence(self, step_sequence):
"""This function takes a list of duples instructing how to run the program. The first element in each duple is
a source pattern for the target location, and the second is the operation that will take you from the current
source location to the target location. It will then run all the steps in the sequence.
It will check that you arrived at the expected source location at each step, and that the stack depth changed
correctly for the operation in the sequence."""
target_line_entry = lldb.SBLineEntry()
target_line_entry.SetFileSpec(self.main_source_spec)
test_stack_depth = True
# Work around for <rdar://problem/16363195>, the darwin unwinder seems flakey about whether it duplicates the first frame
# or not, which makes counting stack depth unreliable.
if self.platformIsDarwin():
test_stack_depth = False
for step_pattern in step_sequence:
step_stop_line = line_number (self.main_source, step_pattern[0])
target_line_entry.SetLine(step_stop_line)
self.do_step (step_pattern[1], target_line_entry, test_stack_depth)
def inline_stepping(self):
"""Use Python APIs to test stepping over and hitting breakpoints."""
exe = os.path.join(os.getcwd(), "a.out")
target = self.dbg.CreateTarget(exe)
self.assertTrue(target, VALID_TARGET)
break_1_in_main = target.BreakpointCreateBySourceRegex ('// Stop here and step over to set up stepping over.', self.main_source_spec)
self.assertTrue(break_1_in_main, VALID_BREAKPOINT)
# Now launch the process, and do not stop at entry point.
self.process = target.LaunchSimple (None, None, self.get_process_working_directory())
self.assertTrue(self.process, PROCESS_IS_VALID)
# The stop reason of the thread should be breakpoint.
threads = lldbutil.get_threads_stopped_at_breakpoint (self.process, break_1_in_main)
if len(threads) != 1:
self.fail ("Failed to stop at first breakpoint in main.")
self.thread = threads[0]
# Step over the inline_value = 0 line to get us to inline_trivial_1 called from main. Doing it this way works
# around a bug in lldb where the breakpoint on the containing line of an inlined function with no return value
# gets set past the insertion line in the function.
# Then test stepping over a simple inlined function. Note, to test all the parts of the inlined stepping
# the calls inline_stepping_1 and inline_stepping_2 should line up at the same address, that way we will test
# the "virtual" stepping.
# FIXME: Put in a check to see if that is true and warn if it is not.
step_sequence = [["// At inline_trivial_1 called from main.", "over"],
["// At first call of caller_trivial_1 in main.", "over"]]
self.run_step_sequence(step_sequence)
# Now step from caller_ref_1 all the way into called_by_inline_trivial
step_sequence = [["// In caller_trivial_1.", "into"],
["// In caller_trivial_2.", "into"],
["// In inline_trivial_1.", "into"],
["// In inline_trivial_2.", "into"],
["// At caller_by_inline_trivial in inline_trivial_2.", "over"],
["// In called_by_inline_trivial.", "into"]]
self.run_step_sequence(step_sequence)
# Now run to the inline_trivial_1 just before the immediate step into inline_trivial_2:
break_2_in_main = target.BreakpointCreateBySourceRegex ('// At second call of caller_trivial_1 in main.', self.main_source_spec)
self.assertTrue(break_2_in_main, VALID_BREAKPOINT)
threads = lldbutil.continue_to_breakpoint (self.process, break_2_in_main)
self.assertTrue (len(threads) == 1, "Successfully ran to call site of second caller_trivial_1 call.")
self.thread = threads[0]
step_sequence = [["// In caller_trivial_1.", "into"],
["// In caller_trivial_2.", "into"],
["// In inline_trivial_1.", "into"]]
self.run_step_sequence(step_sequence)
# Then call some trivial function, and make sure we end up back where we were in the inlined call stack:
frame = self.thread.GetFrameAtIndex(0)
before_line_entry = frame.GetLineEntry()
value = frame.EvaluateExpression ("function_to_call()")
after_line_entry = frame.GetLineEntry()
self.assertTrue (before_line_entry.GetLine() == after_line_entry.GetLine(), "Line entry before and after function calls are the same.")
# Now make sure stepping OVER in the middle of the stack works, and then check finish from the inlined frame:
step_sequence = [["// At increment in inline_trivial_1.", "over"],
["// At increment in caller_trivial_2.", "out"]]
self.run_step_sequence(step_sequence)
# Now run to the place in main just before the first call to caller_ref_1:
break_3_in_main = target.BreakpointCreateBySourceRegex ('// At first call of caller_ref_1 in main.', self.main_source_spec)
self.assertTrue(break_3_in_main, VALID_BREAKPOINT)
threads = lldbutil.continue_to_breakpoint (self.process, break_3_in_main)
self.assertTrue (len(threads) == 1, "Successfully ran to call site of first caller_ref_1 call.")
self.thread = threads[0]
step_sequence = [["// In caller_ref_1.", "into"],
["// In caller_ref_2.", "into"],
["// In inline_ref_1.", "into"],
["// In inline_ref_2.", "into"],
["// In called_by_inline_ref.", "into"],
["// In inline_ref_2.", "out"],
["// In inline_ref_1.", "out"],
["// At increment in inline_ref_1.", "over"],
["// In caller_ref_2.", "out"],
["// At increment in caller_ref_2.", "over"]]
self.run_step_sequence (step_sequence)
def inline_stepping_step_over(self):
"""Use Python APIs to test stepping over and hitting breakpoints."""
exe = os.path.join(os.getcwd(), "a.out")
target = self.dbg.CreateTarget(exe)
self.assertTrue(target, VALID_TARGET)
break_1_in_main = target.BreakpointCreateBySourceRegex ('// At second call of caller_ref_1 in main.', self.main_source_spec)
self.assertTrue(break_1_in_main, VALID_BREAKPOINT)
# Now launch the process, and do not stop at entry point.
self.process = target.LaunchSimple (None, None, self.get_process_working_directory())
self.assertTrue(self.process, PROCESS_IS_VALID)
# The stop reason of the thread should be breakpoint.
threads = lldbutil.get_threads_stopped_at_breakpoint (self.process, break_1_in_main)
if len(threads) != 1:
self.fail ("Failed to stop at first breakpoint in main.")
self.thread = threads[0]
step_sequence = [["// In caller_ref_1.", "into"],
["// In caller_ref_2.", "into"],
["// At increment in caller_ref_2.", "over"]]
self.run_step_sequence (step_sequence)
if __name__ == '__main__':
import atexit
lldb.SBDebugger.Initialize()
atexit.register(lambda: lldb.SBDebugger.Terminate())
unittest2.main()
| 2
| 2
|
src/shootadoc/cli.py
|
akaihola/shootadoc
| 0
|
12776632
|
#!/usr/bin/env python
from dataclasses import dataclass
from math import log2
from typing import Callable, Optional, Tuple, Union
import click
import PIL.Image
import PIL.ImageMath
from PIL.Image import Image
from PIL.ImageChops import darker, lighter
def _normalize_offset(offset: int, size: int) -> int:
return offset if offset >= 0 else size - 1
@dataclass
class ImageSlicer:
image: Image
def _get_absolute_range(
self, item: Union[slice, int], axis: int
) -> Tuple[int, int]:
size = self.image.size[axis]
if item is None:
return 0, size
if isinstance(item, slice):
assert item.step is None
return (
0 if item.start is None else _normalize_offset(item.start, size),
size if item.stop is None else _normalize_offset(item.stop, size),
)
offset = _normalize_offset(item, size)
return offset, offset + 1
def __getitem__(
self, item: Tuple[Union[slice, int, None], Union[slice, int, None]]
) -> Image:
x, y = item
x1, x2 = self._get_absolute_range(x, 0)
y1, y2 = self._get_absolute_range(y, 1)
return self.image.crop((x1, y1, x2, y2))
def get_brightest_neighbor(image: Image, shift: int, aggregate=lighter) -> Image:
slicer = ImageSlicer(image)
orig = slicer[:-shift, :-shift]
down = slicer[:-shift, shift:]
right = slicer[shift:, :-shift]
diag = slicer[shift:, shift:]
return aggregate(aggregate(orig, down), aggregate(right, diag))
def fill(image: Image, direction: int, x: int = None, y: int = None) -> None:
def get_filler_dimension(offset: Optional[int], size: int) -> int:
if offset is None:
return size
return offset if direction == -1 else size - offset - 1
def get_filler_offset(offset: Optional[int]) -> int:
return 0 if offset is None or direction == -1 else offset + 1
slicer = ImageSlicer(image)
filler = slicer[x, y].resize(
(get_filler_dimension(x, image.width), get_filler_dimension(y, image.height))
)
image.paste(filler, (get_filler_offset(x), get_filler_offset(y)))
def get_extreme(
image: Image, steps: int, mode: Callable[[Image, Image], Image]
) -> Image:
out = PIL.Image.new(image.mode, image.size)
assert steps > 0
for step in range(steps):
shift = 2 ** step
image = get_brightest_neighbor(image, shift, mode)
out.paste(image, (shift, shift))
fill(out, direction=-1, y=shift)
fill(out, direction=1, y=out.height - shift)
fill(out, direction=-1, x=shift)
fill(out, direction=1, x=out.width - shift)
return out
@click.command()
@click.argument("input_path")
@click.argument("output_path")
@click.option("-b", "--block-size", default=0)
@click.option("-w", "--white-level", default=192)
def handle_image(input_path, output_path, block_size, white_level):
image = PIL.Image.open(input_path).convert("L")
if not block_size:
block_size = int(log2(min(image.size))) - 1
adjusted_image = PIL.ImageMath.eval(
"255 * float(image - darkest) / float(brightest - darkest) / gain",
image=image,
darkest=get_extreme(image, block_size, PIL.ImageChops.darker),
brightest=get_extreme(image, block_size, PIL.ImageChops.lighter),
gain=white_level / 255.0,
)
adjusted_image.convert("L").save(output_path)
if __name__ == "__main__":
handle_image()
| 2.5625
| 3
|
server_dev/example_app/schemes/__init__.py
|
elishakrasz1/effort
| 0
|
12776633
|
from .user import UserCreateMutation, UserUpdateMutation
| 1.109375
| 1
|
lib/schemas.py
|
xjdrlabs/rcs_demo_code
| 0
|
12776634
|
<filename>lib/schemas.py
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class SchemaMode:
Backend = "BE"
Frontend = "FE"
def make_type(typename, required):
if required:
return [typename]
else:
return ["null", typename]
def add_field(schema, name, field_type):
# schema["fields"].append({"name": name, "type": field_type, "default": "NONE"})
schema["fields"].append({"name": name, "type": field_type})
def add_string(schema, name, required):
add_field(schema, name, make_type("string", required))
def add_array(schema, name, typename, required):
add_field(schema, name, make_type({"type": "array", "items": typename}, required))
def add_int64_array(schema, name, required):
add_array(schema, name, "long", required)
def add_float64_array(schema, name, required):
add_array(schema, name, "double", required)
def add_string_array(schema, name, required):
add_array(schema, name, "string", required)
# add_field(schema, name, ["null", {"type": "array", "items": "string"}])
def add_bool(schema, name, required):
add_field(schema, name, make_type("boolean", required))
def add_int64(schema, name, required):
add_field(schema, name, make_type("long", required))
def add_timestamp(schema, name, required):
add_field(schema, name, make_type("string", required))
def add_fields(schema, table_columns):
debug = []
for table_column in table_columns:
ignore_field = False
required = False
if len(table_column) == 4:
func, col, required, ignore_field = table_column
elif len(table_column) == 3:
func, col, required = table_column
else:
func, col = table_column[:2]
debug.append((func, col, required, ignore_field))
if not ignore_field:
func(schema, col, required)
def oms_retail_schema(name, fields):
schema = dict()
schema["namespace"] = "google.retail.oms"
schema["type"] = "record"
schema["name"] = name
schema["fields"] = list()
add_fields(schema, fields)
return schema
def get_product_schema(mode=SchemaMode.Backend):
fields = [
(add_string, "id", True, mode == SchemaMode.Frontend), # NOFE
(add_string, "sku", mode == SchemaMode.Frontend),
(add_string, "title", mode == SchemaMode.Frontend),
(add_string, "name", mode == SchemaMode.Frontend),
(add_string, "description", mode == SchemaMode.Frontend),
(add_string, "pdp_link", mode == SchemaMode.Frontend),
(add_string, "main_image_link"),
(add_string_array, "additional_images"),
(add_int64, "gtin"),
(add_string, "mpn"),
(add_bool, "identifier_exists"),
(add_float64_array, "features", False, mode == SchemaMode.Frontend), # NOFE
(add_float64_array, "memory", False, mode == SchemaMode.Frontend), # NOFE
(add_string_array, "filters"),
(add_string_array, "item_groups", mode == SchemaMode.Frontend),
(add_timestamp, "created_at"),
(add_timestamp, "expires_at"),
(add_timestamp, "last_updated"),
(add_timestamp, "commit_timestamp", True, mode == SchemaMode.Frontend), # NOFE
]
return oms_retail_schema("product", fields)
| 1.929688
| 2
|
queens_puzzle/database/__init__.py
|
mauricio-chavez/queens_puzzle_solver
| 0
|
12776635
|
<filename>queens_puzzle/database/__init__.py
"""Project database manager"""
from .sessions import _SessionFactory, Base, engine
from .models import Solution, SolutionQuery
def session_factory():
"""Retrives sessions"""
Base.metadata.create_all(engine)
return _SessionFactory()
def create_solution(n, solution):
"""Inserts a solution row into database"""
session = session_factory()
solution = Solution(n, solution)
session.add(solution)
session.commit()
session.close()
def create_solution_query(n, status):
"""Inserts a solution query row into database"""
session = session_factory()
solution_query = SolutionQuery(n, status)
session.add(solution_query)
session.commit()
session.close()
| 2.5625
| 3
|
example/tasks/__init__.py
|
FranciscoCarbonell/flask-task
| 0
|
12776636
|
from flask_task import Task
from models import *
import time
task = Task()
print(task)
@task.decorator
def proceso():
users = Users.query.all()
print(users)
time.sleep(7)
| 2.25
| 2
|
jaseci_serv/jaseci_serv/jac_api/tests/test_jac_admin_api.py
|
seed4600/jaseci
| 0
|
12776637
|
<reponame>seed4600/jaseci<filename>jaseci_serv/jaseci_serv/jac_api/tests/test_jac_admin_api.py
from django.contrib.auth import get_user_model
from django.urls import reverse
from rest_framework.test import APIClient
from rest_framework import status
from jaseci.utils.utils import TestCaseHelper
from django.test import TestCase
class PrivateJacAdminApiTests(TestCaseHelper, TestCase):
"""Test the authorized user node API"""
def setUp(self):
super().setUp()
# First user is always super,
self.user = get_user_model().objects.create_user(
'<EMAIL>',
'password'
)
self.nonadmin = get_user_model().objects.create_user(
'<EMAIL>',
'password'
)
self.client = APIClient()
self.client.force_authenticate(self.user)
self.notadminc = APIClient()
self.notadminc.force_authenticate(self.nonadmin)
self.master = self.user.get_master()
def tearDown(self):
super().tearDown()
def test_jac_api_config_index_has_core(self):
payload = {'op': 'config_index'}
res = self.client.post(
reverse(f'jac_api:{payload["op"]}'), payload, format='json')
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertGreater(len(res.data), 2)
self.assertIn('ACTION_SETS', res.data)
def test_jac_api_create_config(self):
"""Test API for creating a config"""
payload = {'op': 'config_set', 'name': 'EMAIL_HOST_USER',
'value': 'val1', 'do_check': False}
res = self.client.post(
reverse(f'jac_api:{payload["op"]}'), payload, format='json')
self.assertEqual(res.status_code, status.HTTP_200_OK)
payload = {'op': 'config_get', 'name': 'EMAIL_HOST_USER'}
res = self.client.post(
reverse(f'jac_api:{payload["op"]}'), payload, format='json')
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(res.data, "val1")
def test_jac_api_create_config_needs_force(self):
"""Test API for creating a config"""
payload = {'op': 'config_delete', 'name': 'TEST'}
res = self.client.post(
reverse(f'jac_api:{payload["op"]}'), payload, format='json')
payload = {'op': 'config_set',
'name': 'TEST', 'value': 'val1'}
res = self.client.post(
reverse(f'jac_api:{payload["op"]}'), payload, format='json')
self.assertEqual(res.status_code, status.HTTP_200_OK)
payload = {'op': 'config_get', 'name': 'TEST'}
res = self.client.post(
reverse(f'jac_api:{payload["op"]}'), payload, format='json')
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertNotEqual(res.data, "val1")
def test_jac_api_create_config_nonadmin_fails(self):
"""Test API for creating a config"""
payload = {'op': 'config_set',
'name': 'EMAIL_HOST_USER', 'value': 'val1'}
res = self.notadminc.post(
reverse(f'jac_api:{payload["op"]}'), payload, format='json')
self.assertEqual(res.status_code, status.HTTP_403_FORBIDDEN)
def test_jac_api_create_config_list(self):
"""Test API for creating a config"""
payload = {'op': 'config_set', 'name': 'EMAIL_HOST_USER',
'value': 'val1', 'do_check': False}
res = self.client.post(
reverse(f'jac_api:{payload["op"]}'), payload, format='json')
self.assertEqual(res.status_code, status.HTTP_200_OK)
payload = {'op': 'config_set', 'name': 'EMAIL_HOST_PASSWORD',
'value': 'val2', 'do_check': False}
res = self.client.post(
reverse(f'jac_api:{payload["op"]}'), payload, format='json')
self.assertEqual(res.status_code, status.HTTP_200_OK)
payload = {'op': 'config_set', 'name': 'EMAIL_DEFAULT_FROM',
'value': 'val3', 'do_check': False}
res = self.client.post(
reverse(f'jac_api:{payload["op"]}'), payload, format='json')
self.assertEqual(res.status_code, status.HTTP_200_OK)
payload = {'op': 'config_set', 'name': 'EMAIL_BACKEND',
'value': 'val4', 'do_check': False}
res = self.client.post(
reverse(f'jac_api:{payload["op"]}'), payload, format='json')
self.assertEqual(res.status_code, status.HTTP_200_OK)
payload = {'op': 'config_list'}
res = self.client.post(
reverse(f'jac_api:{payload["op"]}'), payload, format='json')
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(len(res.data), 4)
self.assertIn('EMAIL_DEFAULT_FROM', res.data)
| 2.296875
| 2
|
websocks/rule.py
|
abersheeran/websocks
| 91
|
12776638
|
<filename>websocks/rule.py<gh_stars>10-100
import os
import base64
import typing
import logging
from urllib import request
from .utils import Singleton
root = os.path.dirname(os.path.abspath(__file__))
if not os.path.exists(root):
os.makedirs(root)
gfwlist_path = os.path.join(root, "gfwlist.txt")
whitelist_path = os.path.join(root, "whitelist.txt")
cache: typing.Set[str] = set()
logger = logging.getLogger(__name__)
class FilterRule(metaclass=Singleton):
def __init__(self, yourself_s: typing.Sequence[str] = []) -> None:
self.yourself_s = list(yourself_s)
@staticmethod
def download_gfwlist(
url: str = "https://cdn.jsdelivr.net/gh/gfwlist/gfwlist/gfwlist.txt",
) -> None:
if url is None:
print("gfwlist url is None, nothing to do.", flush=True)
return
req = request.Request(url, method="GET")
resp = request.urlopen(req)
with open(gfwlist_path, "wb+") as file:
base64.decode(resp, file)
@staticmethod
def download_whitelist(
url: str = "https://cdn.jsdelivr.net/gh/abersheeran/websocks/websocks/whitelist.txt",
) -> None:
if url is None:
print("whitelist url is None, nothing to do.", flush=True)
return
req = request.Request(url, method="GET")
resp = request.urlopen(req)
with open(whitelist_path, "wb+") as file:
file.write(resp.read())
@staticmethod
def open(filepath: str) -> typing.Generator:
try:
with open(filepath, "r") as file:
for line in file.readlines():
yield line.strip()
except FileNotFoundError:
pass
def judge(self, host: str) -> typing.Optional[bool]:
"""
匹配例外则返回 False, 匹配成功则返回 True.
不在规则内返回 None.
"""
result = self._judge_yourself(host)
if result is not None:
return result
result = self._judge_whitelist(host)
if result is not None:
return result
result = self._judge_gfwlist(host)
if result is not None:
return result
def _judge_whitelist(self, host: str) -> typing.Optional[bool]:
"""
从白名单中匹配
"""
return self._judge_from_file(whitelist_path, host)
def _judge_gfwlist(self, host: str) -> typing.Optional[bool]:
"""
从 GFWList 中匹配
"""
return self._judge_from_file(gfwlist_path, host)
def _judge_yourself(self, host: str) -> typing.Optional[bool]:
"""
从自定义文件中匹配
"""
for filepath in self.yourself_s:
result = self._judge_from_file(filepath, host)
if result is not None:
return result
def _judge_from_file(self, filepath: str, host: str) -> typing.Optional[bool]:
for line in self.open(filepath):
line = line.strip()
if not line:
continue
result = self._judge(line, host)
if result is not None:
return result
def _judge(self, line: str, host: str) -> typing.Optional[bool]:
if line.startswith("!"):
return None
if line[:2] == "||":
if host.endswith(line[2:]):
return True
elif line[0] == ".":
if host.endswith(line) or host == line[1:]:
return True
elif line.startswith("@@"):
_ = self._judge(line[2:], host)
if _ is not None:
return not _
else:
if host.startswith(line):
return True
def judge(host: str) -> typing.Optional[bool]:
"""检查是否需要走代理"""
if host in cache:
return True
result = FilterRule().judge(host)
if result is True:
cache.add(host)
return result
def add(host: str) -> None:
"""增加新的 host 进加速名单"""
cache.add(host)
| 2.421875
| 2
|
CalibrateTransfer/data_preprocess.py
|
IMBINGO95/FairMOT
| 0
|
12776639
|
<filename>CalibrateTransfer/data_preprocess.py<gh_stars>0
import cv2
import json
import codecs
import os
import torch
import time
import shutil
import re
import argparse
# from M2Det.utils.core import print_info
from CalibrateTransfer.class_set import *
from CalibrateTransfer.img_operation import ScreenSHot
# from Config_ReID import cfg as ReIDCfg
# from ReID_model.modeling import ReID_Model
from ReID_model.utils.dataset_loader import ReID_imgs_load_by_home_and_away
#
from utils_BINGO.K_Means import k_means
def make_dir(root_path,index,Secondary_directory='visualization'):
'''use os module to make a dir
index:指的是该动作所对应的动作条目
'''
visualization_dir = os.path.join(root_path, '{}/{}'.format(Secondary_directory,index))
if os.path.exists(visualization_dir):
shutil.rmtree(visualization_dir)
os.makedirs(visualization_dir)
return visualization_dir
def for_football_detection(all_img_points,index,save_dir,video_parameter,setting_parameter):
visualization_dir = make_dir(save_dir, index)
for count,(action_time,img_point) in enumerate(all_img_points):
Message = ScreenSHot(img_point, action_time=action_time, video_parameter=video_parameter,
setting_parameter=setting_parameter)
if Message[0] == True:
cv2.imwrite(os.path.join(visualization_dir,'{}.jpg'.format(count)),Message[1])
def read_subdata(sub_data,Videoparameters):
'''
read each item of sub data
:param sub_data:
:param Videoparameters:
:return:
'''
channel = sub_data['channel']
# action_time = sub_data['action_time'] - Videoparameters[channel]['delta_t']
action_time = sub_data['action_time']
img_point = [sub_data['image_x'], sub_data['image_y']]
video_parameter = Videoparameters[channel]
return channel,action_time,img_point,video_parameter
def regular_videoName(root_path):
'''
given an string regular rule, sorted the video names by this regular rule
:param root_path: the path includes the videos
:return: the target videoname dictionary.
'''
# bulid the regular format
re_videoName = re.compile(r'(c|C)h0\w*.mp4')
videoNames = {}
for videoName in os.listdir(root_path):
if re_videoName.match(videoName):
videoNames[videoName[3]] = videoName
return videoNames
def read_data_from_json_file(root_path,file_name, args):
'''
:param root_path: root path of the target game
:param file_name: the file name of target json name
:param args:
:return:
'''
# read data from json file that operator get.
with codecs.open(os.path.join(root_path, file_name), 'r', 'utf-8-sig') as f:
data = json.load(f)
#given an string regular rule, sorted the video names by this regular rule
videoNames = regular_videoName(root_path)
parameter = data['params']
action_data = data['data']
# manully set by operator
setting_parameter = {'Output_size': args.ImgSize, 'bias': args.bias, 'radius': args.radius}
Videoparameters = {}
channel_list = []
for channel in parameter:
channel_list.append(channel)
Videoparameter = {}
Videoparameter['CalibrateParameter'] = dict_To_CalibrateParameter(parameter[channel]['section']['section1'])
# read in video by channel id.
videoName = channel[-1]
if channel[-1] in videoNames:
videoName = videoNames[channel[-1]]
else:
raise ValueError('Target video {} does not exits '.format(videoName))
videoName = os.path.join(root_path,videoName)
capture = cv2.VideoCapture(videoName)
if capture.isOpened():
Videoparameter['video'] = capture
else:
raise ValueError('{} can not oepn'.format(videoName))
Videoparameter['delta_t'] = parameter[channel]['delta_t'] / 1000
Videoparameters[channel] = Videoparameter
return Videoparameters,setting_parameter,action_data,channel_list,parameter
def read_data_from_json_file_v2(root_path, file_name, args):
'''
v2版本不读取视频
:param root_path: root path of the target game
:param file_name: the file name of target json name
:param args:
:return:
'''
# read data from json file that operator get.
with codecs.open(os.path.join(root_path, file_name), 'r', 'utf-8-sig') as f:
data = json.load(f)
parameter = data['params']
action_data = data['data']
# manully set by operator
setting_parameter = {'Output_size': args.ImgSize, 'bias': args.bias, 'radius': args.radius}
Videoparameters = {}
channel_list = []
for channel in parameter:
channel_list.append(channel)
Videoparameter = {}
Videoparameter['CalibrateParameter'] = dict_To_CalibrateParameter(parameter[channel]['section']['section1'])
Videoparameter['delta_t'] = parameter[channel]['delta_t'] / 1000
Videoparameters[channel] = Videoparameter
return Videoparameters, setting_parameter, action_data, channel_list, parameter
def write_data_to_json_file(root_path,file_name,action_data,parameter, file_save_name='result_'):
'''write data to json '''
with codecs.open(os.path.join(root_path, file_name), 'r', 'utf-8-sig') as f:
data = json.load(f)
data['data'] = action_data
with open(os.path.join(root_path,file_save_name+file_name),'w') as f:
json.dump(data,f)
def read_stack_data(root, re_format,max_index=0):
'''to calculate where did the action detection stopped'''
for file in os.listdir(root):
groups = re_format.match(file)
if groups!= None:
max_index = max(max_index,int(groups[1]))
return max_index
def mk_cluster_dirs(save_dir,num_cls):
for i in range(num_cls):
dir = os.path.join(save_dir,str(i))
if not os.path.exists(dir):
os.makedirs(os.path.join(dir,'True'))
os.makedirs(os.path.join(dir,'False'))
def cluster_main_imgs(ReID, ReIDCfg, main_img_dir, action_datas, save_dir, num_cls):
'''
:param ReID: ReID model
:param ReIDCfg: ReID configure
:param main_img_dir: The dir save the imgs which the programme what to cluster.
:param action_datas:
:param save_dir:
:param num_cls: how many classes that the programme want !
:return:
'''
# make directories to save the clustered imgs.
mk_cluster_dirs(save_dir, num_cls)
'''Preprocess the imgs before ReID'''
if not os.path.exists(main_img_dir):
raise ValueError("The main_img_dir is not exits")
imgs_arrays_all, img_names_all = ReID_imgs_load_by_home_and_away(ReIDCfg, main_img_dir, action_datas['data'])
t1 = time.time()
cls_res_all = {'Home':0,'Away':2}
for TeanIndex,TeamType in enumerate(['Home','Away']):
print('TeamType============================================',TeamType)
all_feats = []
imgs_arrays = imgs_arrays_all[TeamType]
img_names = img_names_all[TeamType]
cls_res = cls_res_all[TeamType]
with torch.no_grad():
if torch.cuda.device_count() >= 1:
print('torch.cuda.device.count = {}'.format(torch.cuda.device_count()))
for imgs_array in imgs_arrays:
imgs_array = imgs_array.to('cuda')
feats = ReID(imgs_array).cpu().numpy().tolist()
all_feats.extend(feats)
length = len(all_feats)
print('There are {} actions want to be delt with.'.format(dir, length))
t1 = time.time()
assignments, dataset = k_means(all_feats, 2)
# feats = torch.cat(all_feats,dim=0)
# feats = torch.nn.functional.normalize(feats,dim=1,p=2)
for index, cls in enumerate(assignments):
cls += cls_res
# Is the number of this img detected ?
if int(action_datas['data'][int(img_names[index])]['num']) == -1 or action_datas['data'][int(img_names[index])]['num'] == None:
IsNumPredited = False
shutil.copyfile(os.path.join(main_img_dir, img_names[index] + '.jpg'),
os.path.join(save_dir,
'{}'.format(cls),
'{}_.jpg'.format(img_names[index])))
else:
IsNumPredited = True
shutil.copyfile(os.path.join(main_img_dir, img_names[index] + '.jpg'),
os.path.join(save_dir,
'{}'.format(cls),
'{}_{}.jpg'.format(img_names[index],
action_datas['data'][int(img_names[index])]['num'])))
action_datas['data'][int(img_names[index])]['team'] = str(cls)
t2 = time.time()
print('time = {}'.format(t2 - t1))
return action_datas
def cal_the_accuracy(file):
with codecs.open(file, 'r', 'utf-8-sig') as f:
action_datas = json.load(f)
correct = 0
wrong_10 = 0
wrong_minus1 = 0
for index, item in enumerate(action_datas['data']):
if item['num'] == "10":
wrong_10 += 1
elif item['num'] == "-1":
wrong_minus1 += 1
else:
correct += 1
print('wrong number = {}, wrong_minus1 number = {}, correct number = {}'.format(
wrong_10, wrong_minus1, correct))
if __name__ == "__main__":
os.environ["CUDA_VISIBLE_DEVICES"] = '1,5'
main_img_dir = '/datanew/hwb/data/Football/SoftWare/2/main_imgs'
json_file = '/datanew/hwb/data/Football/SoftWare/2/with_VOT_test_1.json'
json_file_after_cluster = '/datanew/hwb/data/Football/SoftWare/2/with_VOT_test_1.json'
# with codecs.open(json_file, 'r', 'utf-8-sig') as f:
# action_datas = json.load(f)
#
# parser = argparse.ArgumentParser(description='detection and tracking!')
# # set for ReID classification
# parser.add_argument(
# "--ReIDCfg",
# default="/datanew/hwb/Re-ID_models/reid-strong-baseline-master/configs/softmax_triplet_with_center.yml",
# help="path to config file", type=str)
# args = parser.parse_args()
#
# ####################################
# # Load in ReID Classification model#
# ####################################
# print_info('===> Start to constructing and loading ReID model', ['yellow', 'bold'])
# if args.ReIDCfg != "":
# ReIDCfg.merge_from_file(args.ReIDCfg)
# ReIDCfg.freeze()
#
# # imgs_arrays, img_names = ReID_imgs_load(ReIDCfg,main_img_dir)
#
#
# ReID = ReID_Model(ReIDCfg)
#
# # ReID = torch.nn.DataParallel(ReID)
#
# print_info('===> Finished constructing and loading ReID model', ['blue', 'bold'])
#
# data_after = cluster_main_imgs(ReID, ReIDCfg, main_img_dir, action_datas, main_img_dir, 4)
#
# with codecs.open(json_file_after_cluster, 'w', 'utf-8-sig') as f:
# json.dump(data_after,f)
| 2.359375
| 2
|
infobot/storage/file.py
|
otuk/infobot
| 0
|
12776640
|
<filename>infobot/storage/file.py
import os
import yaml
import infobot.konstants as K
from infobot.storage.template import Admin
from infobot.config import Admin as ConfigAdm
from infobot.brains import Brains
class FileAdminConf():
def __init__(self, fileadmindetails):
"Configurations Object for File based storage admin"
self.directory = Brains.expand_home(
fileadmindetails[K.dataDirectoryKey])
self.counterfile = Brains.expand_home(
fileadmindetails[K.counterKey])
self.indexFileFormat = fileadmindetails[K.indexFileFormatKey]
class FileAdmin(Admin):
def __init__(self, config, fileadmindetails):
"""
This is a file system based storage admin.
It stores each information to be posted as a separate file
It also keeps track of the number of items to be posted
"""
super().__init__(config)
self._details = FileAdminConf(fileadmindetails)
self._directory = self._details.directory
self._counterfile = self._details.counterfile
self._indexFileFormat = self._details.indexFileFormat
def status(self):
print("File Based Storage Admin is active")
print(" Reading posts from: {}".format(
self._directory
))
print(" Counter file at: {}".format(
self._counterfile
))
start, last, previous = self.get_counters()
print(" There are {} posts, last post index is: {}".format(
str(last - start + 1),
str(last)
))
def format_index(self, topic, num):
"""
returns formatted filename
"""
return self._indexFileFormat.format(topic, str(num))
def read_from(self, index):
"""
Returns the postable data at the index location
"""
fullpath = os.path.join(self._directory, index)
with open(fullpath) as postfile:
postdata = postfile.read()
return postdata
def get_counters(self):
counterData = ConfigAdm.read_yaml(self._counterfile)
return (int(counterData[K.startKey]),
int(counterData[K.lastKey]),
int(counterData[K.previousKey]))
def increment_last(self):
with open(self._counterfile, "r") as f:
data = yaml.safe_load(f)
cl = int(data[K.lastKey])
data[K.lastKey] = cl + 1
with open(self._counterfile, 'w') as f:
yaml.safe_dump(data, f, default_flow_style=False)
def store_all(self, topic, fromdir):
print(("Will move files with name containing {}\n"
"from: {}\n"
"into: {}")
.format(self.config.topic.name,
fromdir,
self._directory))
fromdir = Brains.expand_home(fromdir)
for f in os.listdir(fromdir):
sourceFile = os.path.join(fromdir, f)
print(sourceFile)
if os.path.isdir(f) or (topic not in str(f)):
continue
with open(sourceFile) as fd:
data = fd.read()
self.store(topic, data)
os.remove(sourceFile)
def store(self, topic, data):
_, last, _ = self.get_counters()
num = last+1
filename = self.format_index(topic, num)
fullpath = os.path.join(self._directory, filename)
with open(fullpath, "w") as f:
f.write(data)
self.increment_last()
def get_header(self, socialNetwork, topic, num):
header = FileAdmin.templates[socialNetwork][K.headerKey]
return header.format(topic, num)
def get_footer(self, socialNetwork, topic, num):
footer = FileAdmin.templates[socialNetwork][K.footerKey]
return footer.format(topic, num)
templates = {
K.fakeKey:
{
K.headerKey: """
#{}-{}""",
K.footerKey: """
------
Please reply to report error"""
},
K.mastodonKey:
{
K.headerKey: "#{}-{}\n",
K.footerKey: ""
}
}
| 2.734375
| 3
|
test_fields_ip.py
|
kezabelle/django-strictmodels
| 2
|
12776641
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
from django.core.exceptions import ValidationError
from django.forms.models import model_to_dict, modelform_factory
from model_mommy.mommy import Mommy
import pytest
from fakeapp.models import GenericIPAddressFieldModel
from strictmodels import MODEL_MOMMY_MAPPING
def test_StrictGenericIPAddressField_no_args():
value = GenericIPAddressFieldModel()
@pytest.mark.django_db
def test_StrictGenericIPAddressField_save():
x = GenericIPAddressFieldModel(field='127.0.0.1')
x.save()
assert model_to_dict(x) == model_to_dict(GenericIPAddressFieldModel.objects.get(pk=x.pk))
@pytest.mark.django_db
def test_StrictGenericIPAddressField_mommy():
mommy = Mommy(model=GenericIPAddressFieldModel)
mommy.type_mapping.update(MODEL_MOMMY_MAPPING)
mommy.prepare()
mommy.make()
@pytest.mark.django_db
def test_StrictGenericIPAddressField_form_with_instance_valid():
x = GenericIPAddressFieldModel(field='127.0.0.1')
form_class = modelform_factory(model=GenericIPAddressFieldModel, fields=['field'])
form = form_class(data={'field': '255.255.255.255'}, instance=x)
assert form.is_valid() is True
assert form.errors == {}
assert form.save().field == '255.255.255.255'
def test_StrictGenericIPAddressField_form_with_instance_invalid():
x = GenericIPAddressFieldModel(field='127.0.0.1')
form_class = modelform_factory(model=GenericIPAddressFieldModel, fields=['field'])
form = form_class(data={'field': 'ghost'}, instance=x)
assert form.is_valid() is False
assert form.errors == {'field': ['Enter a valid IPv4 or IPv6 address.']}
@pytest.mark.django_db
def test_StrictGenericIPAddressField_form_without_instance_valid():
form_class = modelform_factory(model=GenericIPAddressFieldModel, fields=['field'])
form = form_class(data={'field': '255.255.255.255'})
assert form.is_valid() is True
assert form.errors == {}
assert form.save().field == '255.255.255.255'
def test_StrictGenericIPAddressField_form_without_instance_invalid():
form_class = modelform_factory(model=GenericIPAddressFieldModel, fields=['field'])
form = form_class(data={'field': 'shark'})
assert form.is_valid() is False
assert form.errors == {'field': ['Enter a valid IPv4 or IPv6 address.']}
def test_StrictGenericIPAddressField_descriptor_doesnt_disappear():
"""
don't clobber the descriptor
"""
value = GenericIPAddressFieldModel(field='127.0.0.1')
assert value.field == '127.0.0.1'
value.field = '127.0.0.2'
assert value.field == '127.0.0.2'
with pytest.raises(ValidationError):
value.field = 'v'*256
assert value.field == '127.0.0.2'
value.field = '192.168.0.1'
assert value.field == '192.168.0.1'
value.field = None
def test_StrictGenericIPAddressField_values_error_length():
"""
Once an input is too long, error loudly.
ValidationError: Ensure this value has at most 255 characters (it has 256)
"""
ok = '2001:0db8:85a3:0042:1000:8a2e:0370:7334'
notok = '2001:0db8:85a3:0042:1000:8a2e:0370:7334a'
assert GenericIPAddressFieldModel(field=ok).field == '2001:db8:85a3:42:1000:8a2e:370:7334' # noqa
with pytest.raises(ValidationError):
GenericIPAddressFieldModel(field=notok)
def test_StrictGenericIPAddressField_null_skips_cleaning():
GenericIPAddressFieldModel(field=None)
def test_StrictGenericIPAddressField_ok_until_changed():
"""
Ensure this value cannot change to an invalid state after being set
"""
model = GenericIPAddressFieldModel(field='2001:0::0:01')
with pytest.raises(ValidationError):
model.field = 't'*256
@pytest.mark.django_db
def test_StrictGenericIPAddressField_create_via_queryset():
"""
ValidationError: Ensure this value has at most 255 characters (it has 256)
"""
assert GenericIPAddressFieldModel.objects.count() == 0
with pytest.raises(ValidationError):
GenericIPAddressFieldModel.objects.create(field='t'*256)
assert GenericIPAddressFieldModel.objects.count() == 0
@pytest.mark.django_db
def test_StrictGenericIPAddressField_update_via_queryset_invalid_then_get():
model = GenericIPAddressFieldModel.objects.create(field='127.0.0.1')
model.__class__.objects.filter(pk=model.pk).update(field='2.2.2.2.2.2.2.2')
with pytest.raises(ValidationError):
model.__class__.objects.get(pk=model.pk)
| 2.03125
| 2
|
database.py
|
dorlneylon/itfy-feed-to-chat
| 7
|
12776642
|
from peewee import SqliteDatabase, Model
from peewee import IntegerField, CharField, PrimaryKeyField, TimestampField
from pathlib import Path
from configparser import ConfigParser
config = ConfigParser()
config.read("config.ini", encoding="utf-8")
db = SqliteDatabase(Path.cwd() / config.get('main', 'database_file'))
class BaseModel(Model):
class Meta:
database = db
class Topic(BaseModel):
id = PrimaryKeyField(null=False)
title = CharField()
link = CharField()
ext_id = IntegerField()
saved_on = TimestampField()
announced_on = TimestampField()
class Meta:
db_table = 'topics'
db.connect()
if not Topic.table_exists():
Topic.create_table()
| 2.765625
| 3
|
src/utils.py
|
SpirinEgor/gnn_pretraining
| 2
|
12776643
|
from warnings import filterwarnings
PAD = "<PAD>"
UNK = "<UNK>"
MASK = "<MASK>"
BOS = "<BOS>"
EOS = "<EOS>"
def filter_warnings():
# "The dataloader does not have many workers which may be a bottleneck."
filterwarnings("ignore", category=UserWarning, module="pytorch_lightning.trainer.data_loading", lineno=102)
filterwarnings("ignore", category=UserWarning, module="pytorch_lightning.utilities.data", lineno=41)
# "Please also save or load the state of the optimizer when saving or loading the scheduler."
filterwarnings("ignore", category=UserWarning, module="torch.optim.lr_scheduler", lineno=216) # save
filterwarnings("ignore", category=UserWarning, module="torch.optim.lr_scheduler", lineno=234) # load
| 2.421875
| 2
|
_cihub/auth.py
|
gocept/cihub
| 1
|
12776644
|
<filename>_cihub/auth.py
from _cihub.config import config
from starlette.authentication import AuthCredentials
from starlette.authentication import AuthenticationBackend
from starlette.authentication import AuthenticationError
from starlette.authentication import SimpleUser
from starlette.responses import PlainTextResponse
import base64
import binascii
class BasicAuthBackend(AuthenticationBackend):
async def authenticate(self, request):
if "Authorization" not in request.headers:
raise AuthenticationError('You have to authenticate.')
auth = request.headers["Authorization"]
try:
scheme, credentials = auth.split()
if scheme.lower() != 'basic':
return
decoded = base64.b64decode(credentials).decode("ascii")
except (ValueError, UnicodeDecodeError, binascii.Error):
raise AuthenticationError('Invalid basic auth credentials')
username, _, password = decoded.partition(":")
if username == config('username') and password == config('password'):
return AuthCredentials(["authenticated"]), SimpleUser(username)
def on_auth_error(request, exc):
return PlainTextResponse(
str(exc),
headers={'WWW-Authenticate': 'Basic realm=cihub'},
status_code=401)
| 2.328125
| 2
|
first.py
|
HelloEI/accountbook.py
| 5
|
12776645
|
#!/usr/bin/env python3
import mysql.connector
class Student(object):
def aMethod(name):
print("hello world!,my name is %s"%name)
def doubleNum(number,n):
total = 0
while n > 1:
total = ( number * number ) if total==0 else (total * number)
n = n - 1
return total
def getData():
conn = mysql.connector.connect(user="root",password="<PASSWORD>",host="127.0.0.1",database="bluemountain")
cursor = conn.cursor()
cursor.execute("SELECT PRODUCT_ID,PRODUCT_NAME FROM PRODUCT WHERE IS_VARIANT='N'")
result = cursor.fetchall()
for productid,name in result:
print("商品ID:%s ,商品名称:%s" % (productid,name))
#aMethod("Sven")
#print(doubleNum(3,3))
| 3.546875
| 4
|
scripts/eval_obj_stats.py
|
albert-yue/objectnav
| 15
|
12776646
|
<reponame>albert-yue/objectnav
#%%
# This notebook analyzes misc episode-level statistics; i.e. reproduces Fig A.1.
import numpy as np
import pandas as pd
import os
import os.path as osp
import json
import matplotlib.pyplot as plt
import seaborn as sns
import PIL.Image
import torch
from obj_consts import get_variant_labels, get_obj_label
from fp_finder import load_variant
from analyze_utils import prep_plt, load_stats
variant = 'base-full'
ckpt = 31
# variant = 'base4-full'
# ckpt = 34
# ckpt = 33
variant = 'split_clamp-full'
ckpt = 31
variant = 'split_rednet-full'
ckpt = 38
is_eval = True
# is_eval = False
is_gt = False
# is_gt = True
# meta_df, title = load_stats(variant, ckpt, is_gt=is_gt)
meta_df, title = load_stats(variant, ckpt, override_fn=f'{variant}/{ckpt}/eval_gt_False_21.pth')
meta_df['variant'] = 'Tethered'
print("Success\t", meta_df['success'].mean())
print("Coverage\t", meta_df['coverage'].mean())
print("Coverage on Success\t", meta_df[meta_df['success'] == 1.0]['coverage'].mean())
variant = 'split_clamp-full'
ckpt = 31
meta_df_2, _ = load_stats(variant, ckpt, is_gt=is_gt)
meta_df_2['variant'] = 'Base'
meta_df = pd.concat([meta_df, meta_df_2])
# meta_df['obj_d2g'] = meta_df.groupby('obj_cat')['geodesic'].transform('mean')
# meta_df = meta_df.sort_values('obj_d2g', ascending=True)
meta_df['scene'] = pd.Categorical(meta_df.scene)
#%%
#%%
# * Success vs Obj Goal / Scene
prep_plt()
y = "success"
# y = "coverage"
# y = "steps"
# y = "spl"
palette = sns.color_palette(n_colors=2)
x = 'obj_cat'
# x = 'scene'
if x == 'obj_cat':
meta_df = meta_df.sort_values('obj_freq', ascending=False)
ax = sns.barplot(x=x, y=y, data=meta_df, ci=None, hue='variant', palette=palette)
if x == 'scene':
meta_df['scene_diff'] = meta_df.groupby('scene')['success'].transform('mean')
meta_df = meta_df.sort_values('scene_diff', ascending=False)
# Hmm, sort doesn't seem to work by default
scene_diff_order = meta_df['scene'].unique()
# print(scene_diff_order)
ax = sns.barplot(x=x, y=y, data=meta_df, ci=None, hue='variant', palette=palette, order=scene_diff_order)
# ax = sns.barplot(x="obj_cat", y="success", data=meta_df, ci=None)
# ax.set_xlabel("Goal Category in Ascending Average Distance")
# ax.set_xlabel("Goal Category in Descending Frequency")
# ax.set_ylim(0.0, 0.85)
sns.despine(ax=ax)
ax.set_ylabel(f"{y} Ratio")
ax.set_ylabel(f"Average Success")
if x == "obj_cat":
ax.set_xlabel("Goals (Descending Frequency)")
elif x == 'scene':
ax.set_xlabel("Scene")
ax.set_title("")
ax.legend(["Tethered", "Base"], frameon=False, fontsize=16)
# ax.text(8, 0.7, "Tethered", color=palette[0], size=16)
# ax.text(8, 0.64, "Base", color=palette[1], size=16)
strs = map(lambda label: label._text, ax.get_xticklabels())
if x == 'obj_cat':
mapper = get_obj_label
elif x == 'scene':
mapper = lambda x: x.split('/')[-2][:5]
ax.set_xticklabels(map(mapper, strs), rotation=45, horizontalalignment='right')
plt.savefig('test.pdf', dpi=150, bbox_inches="tight")
#%%
meta_df_dummy = meta_df_2.copy(deep=True)
meta_df_dummy['success'] = 1
meta_df_dummy['variant'] = 'Total Episodes'
df3 = pd.concat([meta_df_dummy, meta_df])
df3 = df3[df3['success'] == 1]
def plot_success_vs_geodesic(df, cat=None, scene=None, ax=None):
plot_df = df
if cat is not None:
plot_df = df[df['obj_cat'] == cat]
if scene is not None:
plot_df = df[df['scene'] == scene]
# prep_plt()
# sns.despine(ax=ax)
g = sns.displot(
data=plot_df,
x="geodesic",
hue="variant",
# hue="success",
multiple="dodge",
# col='variant',
ax=ax,
bins=np.arange(0, 30, 2)
)
g.set_axis_labels('Goal Geodesic Distance', 'Success Count')
g.legend.set_title("")
g.legend.set_bbox_to_anchor((0.7, 0.7))
# ax.set_xlabel("Geodesic distance")
# ax.set_title(f"Base")
# ax.set_title(f"Tethered")
# ax.set_title(f"{title}")
# fig = plt.figure()
# ax = fig.add_subplot(111)
# plot_success_vs_geodesic(meta_df, ax=ax)
plot_success_vs_geodesic(df3)
plt.savefig('test.pdf', bbox_inches='tight')
# plot_success_vs_geodesic(meta_df, cat="chair")
# plot_success_vs_geodesic(meta_df, cat="table")
# plot_success_vs_geodesic(meta_df, cat="cushion")
# plot_success_vs_geodesic(meta_df, cat="cabinet")
#%%
# Other random plots below
success = 1.0
success = 0.0
success_df = meta_df[meta_df['success'] == success]
# plot_success_vs_geodesic(meta_df, ax=plt.gca())
ax = sns.histplot(
data=success_df,
x="geodesic",
hue="cat_or_rare",
multiple="stack",
ax=plt.gca(),
shrink=0.5,
bins=np.arange(30),
)
import matplotlib as mpl
legends = [c for c in ax.get_children() if isinstance(c, mpl.legend.Legend)]
legends[0].set_title("Category")
plt.ylim(0, 300)
plt.xlabel("Geodesic Distance")
plt.ylabel(f"{'Failure' if success == 0.0 else 'Success'} Count")
# plt.ylabel("Success Count")
#%%
ax = sns.countplot(data=meta_df, x="obj_cat")
ax.set_xticklabels(ax.get_xticklabels(), rotation=45, horizontalalignment='right')
ax.set_xlabel("Category")
ax.set_ylabel("Count")
ax.set_title(f"GT Category Distribution {'EVAL' if is_eval else 'TRAIN'}")
#%%
# ax = sns.barplot(data=meta_df, x="success", y="geodesic", hue="obj_cat")
# ax = sns.barplot(data=meta_df, x="obj_cat", y="geodesic")
ax = sns.barplot(data=meta_df, x="obj_cat", y="geodesic", hue="success")
ax.set_xticklabels(ax.get_xticklabels(), rotation=45, horizontalalignment='right')
ax.set_xlabel("Category")
ax.set_ylabel("Geodesic Distance")
ax.set_title(f"{title} Distance per Category")
| 1.554688
| 2
|
merlin/spec/override.py
|
robinson96/merlin
| 0
|
12776647
|
import logging
import re
import yaml
LOG = logging.getLogger(__name__)
def error_override_vars(override_vars, spec_filepath):
"""
Warn user if any given variable name isn't found in the original spec file.
"""
if override_vars is None:
return
original_text = open(spec_filepath, "r").read()
for variable in override_vars.keys():
if variable not in original_text:
raise ValueError(
f"Command line override variable '{variable}' not found in spec file '{spec_filepath}'."
)
def replace_override_vars(full_text, env, override_vars):
"""
Given the full text of a yaml spec, return the full
text with user variable overrides in the 'env' block.
The env yaml block looks like:
env:
variables:
.......
.......
The regex will find and match to the above yaml block.
"""
updated_env = dict(env)
if override_vars is not None:
for key, val in env.items():
updated_env[key].update(override_vars)
updated_env = {"env": updated_env}
dump = yaml.dump(updated_env, default_flow_style=False, sort_keys=False)
updated_env_text = f"\n{dump}\n"
env_block_pattern = r"\nenv\s*:\s*(\n+( |\t)+.*)+\n*"
regex = re.compile(env_block_pattern)
updated_full_text = re.sub(regex, updated_env_text, full_text)
return updated_full_text
def dump_with_overrides(spec, override_vars):
dumped_text = spec.dump()
if override_vars is None:
return dumped_text
result = replace_override_vars(
full_text=dumped_text, env=spec.environment, override_vars=override_vars
)
return result
| 3.15625
| 3
|
src/oci/oda/models/resource_type_metadata.py
|
pabs3/oci-python-sdk
| 0
|
12776648
|
# coding: utf-8
# Copyright (c) 2016, 2022, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class ResourceTypeMetadata(object):
"""
Describes resources of a given type within a package.
"""
def __init__(self, **kwargs):
"""
Initializes a new ResourceTypeMetadata object with values from keyword arguments.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param resource_type:
The value to assign to the resource_type property of this ResourceTypeMetadata.
:type resource_type: str
:param properties:
The value to assign to the properties property of this ResourceTypeMetadata.
:type properties: list[oci.oda.models.MetadataProperty]
"""
self.swagger_types = {
'resource_type': 'str',
'properties': 'list[MetadataProperty]'
}
self.attribute_map = {
'resource_type': 'resourceType',
'properties': 'properties'
}
self._resource_type = None
self._properties = None
@property
def resource_type(self):
"""
Gets the resource_type of this ResourceTypeMetadata.
The type of the resource described by this metadata object.
:return: The resource_type of this ResourceTypeMetadata.
:rtype: str
"""
return self._resource_type
@resource_type.setter
def resource_type(self, resource_type):
"""
Sets the resource_type of this ResourceTypeMetadata.
The type of the resource described by this metadata object.
:param resource_type: The resource_type of this ResourceTypeMetadata.
:type: str
"""
self._resource_type = resource_type
@property
def properties(self):
"""
Gets the properties of this ResourceTypeMetadata.
Any properties needed to describe the content and its usage for this resource type, and within the containing package.
:return: The properties of this ResourceTypeMetadata.
:rtype: list[oci.oda.models.MetadataProperty]
"""
return self._properties
@properties.setter
def properties(self, properties):
"""
Sets the properties of this ResourceTypeMetadata.
Any properties needed to describe the content and its usage for this resource type, and within the containing package.
:param properties: The properties of this ResourceTypeMetadata.
:type: list[oci.oda.models.MetadataProperty]
"""
self._properties = properties
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| 2.03125
| 2
|
stinger_client.py
|
wisdark/pystinger
| 973
|
12776649
|
<gh_stars>100-1000
# -*- coding: utf-8 -*-
# @File : client.py
# @Date : 2019/8/28
# @Desc :
# @license : Copyright(C), funnywolf
# @Author: funnywolf
# @Contact : github.com/FunnyWolf
import argparse
import struct
import threading
import time
from socket import AF_INET, SOCK_STREAM
from threading import Thread
import ipaddr
from config import *
try:
import requests
from requests.packages.urllib3.exceptions import InsecureRequestWarning
except Exception as E:
import urllib3
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
global globalClientCenter
class ClientCenter(threading.Thread):
def __init__(self):
self.headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36',
"Connection": "keep-alive",
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
"Accept-Language": "zh-CN,zh;q=0.8",
'Accept-Encoding': 'gzip',
}
self.proxy = None
self.CACHE_CONNS = {}
self.MIRROR_CHCHE_CONNS = {}
# {
# "conn": self.request,
# "targetaddr": TARGET_ADDR,
# "new": True,
# }
# socket参数
self.LOCAL_ADDR = None
self.READ_BUFF_SIZE = 11200
self.POST_RETRY_COUNT = 10 # post请求重试最大次数
# 日志参数
self.LOG_LEVEL = "INFO"
self.logger = get_logger(level=self.LOG_LEVEL, name="StreamLogger")
# webshell参数
self.WEBSHELL = None
self.REMOTE_SERVER = None
self.SINGLE_MODE = False
# mirror
self.SOCKET_TIMEOUT = DEFAULT_SOCKET_TIMEOUT
self.TARGET_IP = "127.0.0.1"
self.TARGET_PORT = 60020
# 缓存变量
self.die_client_address = []
self.mirror_die_client_address = []
self.session = requests.session()
self.session.verify = False
# 多线程变量
self.post_send_data = {}
self.post_return_data = {}
threading.Thread.__init__(self)
def custom_header(self, inputstr):
try:
str_headers = inputstr.split(",")
for str_header in str_headers:
header_type = str_header.split(":")[0].strip()
header_value = str_header.split(":")[1].strip()
self.headers[header_type] = header_value
except Exception as E:
self.logger.exception(E)
return False
self.logger.info("------------ Custom Http Request Header ------------")
self.logger.info(self.headers)
self.logger.info("\n")
return True
def custom_proxy(self, proxy):
self.proxy = {'http': proxy, 'https': proxy}
self.session.proxies = self.proxy
self.logger.info("------------ Custom Http Request Proxy ------------")
self.logger.info(self.proxy)
self.logger.info("\n")
return True
def recv_socks_data(self, client_address):
"""socks数据接收"""
client_socket_conn = self.CACHE_CONNS.get(client_address).get("conn")
try:
tcp_recv_data = client_socket_conn.recv(self.READ_BUFF_SIZE)
self.logger.debug("CLIENT_ADDRESS:{} TCP_RECV_DATA:{}".format(client_address, tcp_recv_data))
if len(tcp_recv_data) > 0:
has_data = True
self.logger.info("CLIENT_ADDRESS:{} TCP_RECV_LEN:{}".format(client_address, len(tcp_recv_data)))
except Exception as err:
tcp_recv_data = b""
self.logger.debug("TCP_RECV_NONE")
# 编码问题,data数据(tcp传输的数据)需要额外再base64编码一次
client_socket_targetaddr = self.CACHE_CONNS.get(client_address).get("targetaddr")
# 每一个client_address的数据结构体
client_address_one_data = {
"data": base64.b64encode(tcp_recv_data),
"targetaddr": client_socket_targetaddr,
}
self.post_send_data[client_address] = client_address_one_data
def send_socks_data(self, client_address):
# 将返回的数据发送到client Tcp连接中
# 读取server返回的数据
try:
client_socket_conn = self.CACHE_CONNS.get(client_address).get("conn")
server_tcp_send_data = base64.b64decode(self.post_return_data.get(client_address).get("data"))
except Exception as E:
if self.SINGLE_MODE is True:
self.logger.warning(
"CLIENT_ADDRESS:{} server socket not in client socket list".format(client_address))
self.logger.warning("SINGLE_MODE: {} ,remove is conn from server".format(self.SINGLE_MODE))
self.die_client_address.append(client_address)
return
if server_tcp_send_data == "": # 无数据返回继续下一个连接
return
# 将返回的数据发送到client Tcp连接中
try:
client_socket_conn.send(server_tcp_send_data)
self.logger.debug("CLIENT_ADDRESS:{} TCP_SEND_DATA:{}".format(client_address, server_tcp_send_data))
except Exception as E:
self.logger.warning("CLIENT_ADDRESS:{} Client socket send failed".format(client_address))
self.die_client_address.append(client_address)
try:
self.CACHE_CONNS.pop(client_address)
client_socket_conn.close()
except Exception as E:
pass
def _post_data(self, url, data={}):
"""发送数据到webshell"""
payload = {
"Remoteserver": self.REMOTE_SERVER,
"Endpoint": url,
"SENDDATA": diyEncode(data)
}
self.logger.debug(payload)
for i in range(self.POST_RETRY_COUNT):
try:
# timeout 要大于脚本中post的超时时间
r = self.session.post(self.WEBSHELL, data=payload, verify=False, timeout=15, headers=self.headers)
except Exception as E:
self.logger.warning("Post data to WEBSHELL failed")
self.logger.exception(E)
time.sleep(3) # 错误后延时
continue
try:
web_return_data = diyDecode(r.content)
if isinstance(web_return_data, dict) and web_return_data.get(ERROR_CODE) is not None:
self.logger.error(web_return_data.get(ERROR_CODE))
self.logger.warning(r.content)
return None
else:
return web_return_data
except Exception as E:
self.logger.warning("WEBSHELL return wrong data")
self.logger.debug(r.content)
time.sleep(3) # 错误后延时
continue
# 超过重试次数后,退出
return None
def run(self):
self.logger.warning("LoopThread start")
while True:
self._sync_data()
def _sync_data(self):
has_data = False
# 清除无效的client
for client_address in self.die_client_address:
try:
one = self.CACHE_CONNS.pop(client_address)
one.get("conn").close()
self.logger.warning("CLIENT_ADDRESS:{} close client in die_client_address".format(client_address))
except Exception as E:
self.logger.warning(
"CLIENT_ADDRESS:{} close client close client in die_client_address error".format(client_address))
# 从tcp中读取数据
thread_list = []
self.post_send_data = {}
for client_address in list(self.CACHE_CONNS.keys()):
temp = Thread(target=self.recv_socks_data,
args=(client_address,))
thread_list.append(temp)
for temp in thread_list:
temp.start()
for temp in thread_list:
temp.join()
# 从tcp中读取数据(mirror)
mirror_post_send_data = {}
for mirror_client_address in list(self.MIRROR_CHCHE_CONNS.keys()):
client_socket_conn = self.MIRROR_CHCHE_CONNS.get(mirror_client_address).get("conn")
try:
tcp_recv_data = client_socket_conn.recv(self.READ_BUFF_SIZE)
self.logger.debug("CLIENT_ADDRESS:{} TCP_RECV_DATA:{}".format(mirror_client_address, tcp_recv_data))
if len(tcp_recv_data) > 0:
has_data = True
self.logger.info(
"MIRROR_CLIENT_ADDRESS:{} CLIENT_TCP_RECV_LEN:{}".format(mirror_client_address,
len(tcp_recv_data)))
except Exception as err:
tcp_recv_data = b""
self.logger.debug("TCP_RECV_NONE")
# 每一个client_address的数据结构体
client_address_one_data = {
# 编码问题,data数据(tcp传输的数据)需要额外再base64编码一次
"data": base64.b64encode(tcp_recv_data),
}
mirror_post_send_data[mirror_client_address] = client_address_one_data
# 组装数据
payload = {}
payload[DATA_TAG] = self.post_send_data # 发送的数据
payload[DIE_CLIENT_ADDRESS_TAG] = self.die_client_address # 需要清除的连接
payload[MIRROR_DATA_TAG] = mirror_post_send_data # 发送的数据
payload[MIRROR_DIE_CLIENT_ADDRESS_TAG] = self.mirror_die_client_address # 需要清除的连接
# 发送读取的数据到webshell
return_data = self._post_data(URL_STINGER_SYNC, data=payload)
if return_data is None: # 获取数据失败,退出此次同步
return
# 处理post返回数据
# 读取server返回的数据
self.post_return_data = return_data.get(RETURN_DATA)
self.die_client_address = []
thread_list = []
for client_address in list(self.post_return_data.keys()):
temp = Thread(target=self.send_socks_data,
args=(client_address,))
thread_list.append(temp)
for temp in thread_list:
temp.start()
for temp in thread_list:
temp.join()
# 检查没有在server返回列表中的client
for client_address in list(self.CACHE_CONNS.keys()):
if self.post_return_data.get(client_address) is None:
if self.CACHE_CONNS.get(client_address).get("new") is True:
self.CACHE_CONNS[client_address]["new"] = False
pass
else:
self.logger.warning(
"CLIENT_ADDRESS:{} remove client not in server CHCHE_CONNS".format(client_address)
)
self.logger.warning("CLIENT_ADDRESS:{} append in die_client_address".format(client_address))
self.die_client_address.append(client_address)
# mirror处理
mirror_post_return_data = return_data.get(MIRROR_RETURN_DATA)
self.mirror_die_client_address = []
for mirror_client_address in list(mirror_post_return_data.keys()):
# 处理socket连接
if self.MIRROR_CHCHE_CONNS.get(mirror_client_address) is None:
# 新建链接
try:
server_socket_conn = socket.socket(AF_INET, SOCK_STREAM)
server_socket_conn.settimeout(self.SOCKET_TIMEOUT)
server_socket_conn.connect((self.TARGET_IP, self.TARGET_PORT), ) # json不支持元组,自动转化为list
self.MIRROR_CHCHE_CONNS[mirror_client_address] = {"conn": server_socket_conn}
self.logger.info("MIRROR_CLIENT_ADDRESS:{} Create new tcp socket, TARGET_ADDRESS:{}:{}".format(
mirror_client_address, self.TARGET_IP, self.TARGET_PORT))
except Exception as E:
self.logger.warning(
"MIRROR_CLIENT_ADDRESS:{} TARGET_ADDR:{}:{} Create new socket failed. {}".format(
mirror_client_address,
self.TARGET_IP,
self.TARGET_PORT, E))
self.mirror_die_client_address.append(mirror_client_address)
continue
else:
server_socket_conn = self.MIRROR_CHCHE_CONNS.get(mirror_client_address).get("conn")
# 读取server返回的数据
try:
server_tcp_send_data = base64.b64decode(mirror_post_return_data.get(mirror_client_address).get("data"))
server_socket_conn.send(server_tcp_send_data)
self.logger.debug("MIRROR_CLIENT_ADDRESS:{} SERVER_TCP_SEND_DATA:{}".format(mirror_client_address,
server_tcp_send_data))
if len(server_tcp_send_data) > 0:
self.logger.info(
"MIRROR_CLIENT_ADDRESS:{} SERVER_TCP_SEND_LEN:{}".format(mirror_client_address,
len(server_tcp_send_data)))
except Exception as E:
self.logger.info(
"MIRROR_CLIENT_ADDRESS:{} socket send data failed. {}".format(mirror_client_address, E))
self.mirror_die_client_address.append(mirror_client_address)
one = self.MIRROR_CHCHE_CONNS.pop(mirror_client_address)
one.get("conn").close()
continue
# 检查没有在server返回列表中的client
for mirror_client_address in list(self.MIRROR_CHCHE_CONNS.keys()):
if mirror_post_return_data.get(mirror_client_address) is None:
self.logger.warning(
"MIRROR_CLIENT_ADDRESS:{} remove client not in server MIRROR_CHCHE_CONNS".format(
mirror_client_address)
)
# self.mirror_die_client_address.append(mirror_client_address)
one = self.MIRROR_CHCHE_CONNS.pop(mirror_client_address)
one.get("conn").close()
# 等待时间
if has_data:
wait = 0
else:
wait = return_data.get(WAIT_TIME)
time.sleep(wait)
def setc_webshell(self, WEBSHELL):
try:
r = requests.get(WEBSHELL, verify=False, timeout=3, headers=self.headers, proxies=self.proxy)
if b"UTF-8" in r.content:
self.WEBSHELL = WEBSHELL
return True
else:
return False
except requests.exceptions.ProxyError as proxyError:
self.logger.error("Connet to proxy failed : {}".format(self.proxy))
return False
except Exception as E:
self.logger.exception(E)
return False
def setc_remoteserver(self, remote_server=None):
if remote_server is None:
for port in CONTROL_PORT:
for i in range(2):
self.REMOTE_SERVER = "http://{}:{}".format(LOCALADDR, port)
result = self._post_data(URL_CHECK)
if result is None: # 失败回退
self.REMOTE_SERVER = None
continue
else:
return result
return None
self.REMOTE_SERVER = remote_server
result = self._post_data(URL_CHECK)
if result is None: # 失败回退
self.REMOTE_SERVER = None
return result
def setc_localaddr(self, ip, port):
if port_is_used(port, ip):
return False
else:
self.LOCAL_ADDR = "{}:{}".format(ip, port)
return True
def sets_config(self, tag, data):
payload = {CONFIG_TAG: tag, CONFIG_DATA: data}
web_return_data = self._post_data(URL_SET_CONFIG, payload)
return web_return_data
def send_cmd(self, tag, data=None):
payload = {CONFIG_TAG: tag, CONFIG_DATA: data}
web_return_data = self._post_data(URL_CMD, payload)
return web_return_data
class ClientRequest(object):
'''Represents a client SOCKS4 request'''
def __init__(self, data):
'''Construct a new ClientRequeset from the given raw SOCKS request'''
self.invalid = False
# Client requests must be at least 9 bytes to hold all necessary data
if len(data) < 9:
self.invalid = True
return
# Version number (VN)
self.parse_vn(data)
# SOCKS command code (CD)
self.parse_cd(data)
# Destination port
self.parse_dst_port(data)
# Destination IP / Domain name (if specified)
self.parse_ip(data)
# Userid
self.parse_userid(data)
@classmethod
def parse_fixed(cls, data):
'''Parse and return the fixed-length part of a SOCKS request
Returns a tuple containing (vn, cd, dst_port, dst_ip) given the raw
socks request
'''
return struct.unpack('>BBHL', data[:8])
def parse_vn(self, data):
'''Parse and store the version number given the raw SOCKS request'''
vn, _, _, _ = ClientRequest.parse_fixed(data)
if (vn != CLIENT_VN):
self.invalid = True
def parse_dst_port(self, data):
'''Parse and store the destination port given the raw SOCKS request'''
_, _, dst_port, _ = ClientRequest.parse_fixed(data)
self.dst_port = dst_port
def parse_cd(self, data):
'''Parse and store the request code given the raw SOCKS request'''
_, cd, _, _ = ClientRequest.parse_fixed(data)
if (cd == REQUEST_CD_CONNECT or cd == REQUEST_CD_BIND):
self.cd = cd
else:
self.invalid = True
def parse_ip(self, data):
'''Parse and store the destination ip given the raw SOCKS request
If the IP is of the form 0.0.0.(1-255), attempt to resolve the domain
name specified, then store the resolved ip as the destination ip.
'''
_, _, _, dst_ip = ClientRequest.parse_fixed(data)
ip = ipaddr.IPv4Address(dst_ip)
o1, o2, o3, o4 = ip.packed
# Invalid ip address specifying that we must resolve the domain
# specified in data (As specified in SOCKS4a)
if (o1, o2, o3) == (0, 0, 0) and o4 != 0:
try:
# Variable length part of the request containing the userid
# and domain (8th byte onwards)
userid_and_domain = data[8:]
# Extract the domain to resolve
_, domain, _ = userid_and_domain.split(b'\x00')
except ValueError:
# Error parsing request
self.invalid = True
return
try:
resolved_ip = socket.gethostbyname(domain)
except socket.gaierror:
# Domain name not found
self.invalid = True
return
self.dst_ip = resolved_ip
else:
self.dst_ip = ip.exploded
def parse_userid(self, data):
'''Parse and store the userid given the raw SOCKS request'''
try:
index = data.index(b'\x00')
self.userid = data[8:index]
except ValueError:
self.invalid = True
except IndexError:
self.invalid = True
def isInvalid(self):
'''Returns true if this request is invalid, false otherwise'''
return self.invalid
class Socks4aProxy(threading.Thread):
'''A SOCKS4a Proxy'''
def __init__(self, host="127.0.0.1", port=-1, timeout=0.05, bufsize=BUFSIZE):
'''Create a new SOCKS4 proxy on the specified port'''
self._host = host
self._port = port
self._bufsize = bufsize
self._backlog = BACKLOG
self._timeout = timeout
self.logger = logging.getLogger("StreamLogger")
threading.Thread.__init__(self)
@staticmethod
def build_socks_reply(cd, dst_port=0x0000, dst_ip='0.0.0.0'):
'''
Build a SOCKS4 reply with the specified reply code, destination port and
destination ip.
'''
# dst_ip_bytes = ipaddress.IPv4Address(dst_ip).packed
dst_ip_bytes = ipaddr.IPv4Address(dst_ip).packed
dst_ip_raw, = struct.unpack('>L', dst_ip_bytes)
return struct.pack('>BBHL', SERVER_VN, cd, dst_port, dst_ip_raw)
def run(self):
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.bind((self._host, self._port))
s.listen(self._backlog)
self.logger.warning("socks4a server start on {}:{}".format(self._host, self._port))
except Exception as E:
self.logger.exception(E)
self.logger.error(
"start socks4a server failed on {}:{}, maybe port is using by other process".format(self._host,
self._port))
return False
self.logger.warning("Socks4a ready to accept")
while True:
try:
conn, addr = s.accept()
conn.settimeout(self._timeout)
data = conn.recv(self._bufsize)
# Got a connection, handle it with process_request()
self._process_request(data, conn, addr)
self.logger.info("Socks4a process_request finish")
except KeyboardInterrupt as ki:
self.logger.warning('Caught KeyboardInterrupt, exiting')
s.close()
sys.exit(0)
except Exception as E:
self.logger.exception(E)
try:
conn.close()
except Exception as E:
pass
def _process_request(self, data, client_conn, addr):
'''Process a general SOCKS request'''
client_request = ClientRequest(data)
# Handle invalid requests
if client_request.isInvalid():
client_conn.send(self.build_socks_reply(RESPONSE_CD_REQUEST_REJECTED))
client_conn.close()
return
if client_request.cd == REQUEST_CD_CONNECT:
globalClientCenter.logger.warning('Got connection from {}'.format(addr))
key = "{}:{}".format(addr[0], addr[1])
globalClientCenter.CACHE_CONNS[key] = {
"conn": client_conn,
"targetaddr": (client_request.dst_ip, client_request.dst_port),
"new": True, # 新的连接,第一次检查略过
}
client_conn.settimeout(self._timeout)
client_conn.send(self.build_socks_reply(RESPONSE_CD_REQUEST_GRANTED)) # 处理完成,开始正式连接
else:
self.logger.warning("Socks4a do not support bind request")
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description="Make sure the stinger_server is running on webserver "
"(stinger_server will listen 127.0.0.1:60010 127.0.0.1:60020)")
parser.add_argument('-w', '--webshell', metavar='http://192.168.3.10:8080/proxy.jsp',
help="webshell url",
required=True)
parser.add_argument('--header', metavar='Authorization: XXX,Cookie: XXX',
help="custom http request header",
default=None)
parser.add_argument('--proxy', metavar='socks5://127.0.0.1:1080',
help="Connect webshell through proxy",
default=None)
parser.add_argument('-l', '--locallistenaddress', metavar='127.0.0.1/0.0.0.0',
help="local listen address for socks4",
default='127.0.0.1')
parser.add_argument('-p', '--locallistenport',
default=10800,
metavar='N',
type=int,
help="local listen port for socks4",
)
parser.add_argument('-st', '--sockettimeout', default=0.2,
metavar="N",
type=float,
help="socket timeout value,the biger the timeout, the slower the transmission speed",
)
parser.add_argument('-ti', '--targetipaddress', metavar='127.0.0.1',
help="reverse proxy target ipaddress",
required=False)
parser.add_argument('-tp', '--targetport', metavar='60020',
help="reverse proxy target port",
required=False)
parser.add_argument('-c', '--cleansockst', default=False,
nargs='?',
metavar="true",
type=bool,
help="clean server exist socket(this will kill other client connect)",
)
parser.add_argument('-sm', '--singlemode', default=False,
nargs='?',
metavar="true",
type=bool,
help="clean server exist socket(this will kill other client connect)",
)
args = parser.parse_args()
WEBSHELL = args.webshell
LISTEN_ADDR = args.locallistenaddress
LISTEN_PORT = args.locallistenport
CLEAN_SOCKET = args.cleansockst
if CLEAN_SOCKET is not False:
CLEAN_SOCKET = True
else:
CLEAN_SOCKET = False
# 处理header参数
globalClientCenter = ClientCenter()
header = args.header
if header is not None:
flag = globalClientCenter.custom_header(header)
if flag is not True:
sys.exit(1)
# 处理proxy参数
proxy = args.proxy
if proxy is not None:
flag = globalClientCenter.custom_proxy(proxy)
if flag is not True:
sys.exit(1)
# 处理singlemode参数
SINGLE_MODE = args.singlemode
if SINGLE_MODE is not False:
SINGLE_MODE = True
globalClientCenter.SINGLE_MODE = SINGLE_MODE
globalClientCenter.logger.info("SINGLE_MODE : {}".format(SINGLE_MODE))
else:
SINGLE_MODE = False
# 本地端口检查
globalClientCenter.logger.info("------------------- Local check -------------------")
flag = globalClientCenter.setc_localaddr(LISTEN_ADDR, LISTEN_PORT)
if flag:
globalClientCenter.logger.info("Local listen check : pass")
else:
globalClientCenter.logger.error(
"Local listen check failed, please check if {}:{} is available".format(LISTEN_ADDR, LISTEN_PORT))
globalClientCenter.logger.error(WEBSHELL)
sys.exit(1)
# 检查webshell是否可用
webshell_alive = globalClientCenter.setc_webshell(WEBSHELL)
if webshell_alive:
globalClientCenter.logger.info("WEBSHELL check : pass")
globalClientCenter.logger.info("WEBSHELL: {}".format(WEBSHELL))
else:
globalClientCenter.logger.error("WEBSHELL check failed!")
globalClientCenter.logger.error(WEBSHELL)
sys.exit(1)
# 检查stinger_server是否可用
result = globalClientCenter.setc_remoteserver()
if result is None:
globalClientCenter.logger.error("Read REMOTE_SERVER failed,please check whether server is running")
sys.exit(1)
else:
MIRROR_LISTEN = "127.0.0.1:60020"
globalClientCenter.logger.info("REMOTE_SERVER check : pass")
globalClientCenter.logger.info("\n")
globalClientCenter.logger.info("------------------- Get Sever Config -------------------")
for key in result:
globalClientCenter.logger.info("{} : {}".format(key, result.get(key)))
if key == "MIRROR_LISTEN":
MIRROR_LISTEN = result.get(key)
globalClientCenter.logger.info("\n")
globalClientCenter.logger.info("------------------- Set Server Config -------------------")
# 是否清理已有连接
if CLEAN_SOCKET:
flag = globalClientCenter.send_cmd("CLEAN_SOCKET")
globalClientCenter.logger.info("CLEAN_SOCKET cmd : {}".format(flag))
# server建立内网tcp连接的超时时间
sockettimeout = args.sockettimeout
if sockettimeout != DEFAULT_SOCKET_TIMEOUT:
flag = globalClientCenter.sets_config("SOCKET_TIMEOUT", sockettimeout)
globalClientCenter.logger.info("Set server SOCKET_TIMEOUT => {}".format(flag))
globalClientCenter.SOCKET_TIMEOUT = sockettimeout
globalClientCenter.logger.info("\n")
# 映射到本地的地址
TARGET_IP = args.targetipaddress
if TARGET_IP is None:
globalClientCenter.TARGET_IP = MIRROR_LISTEN.split(":")[0]
else:
globalClientCenter.TARGET_IP = TARGET_IP
# 映射到本地的端口
TARGET_PORT = args.targetport
if TARGET_PORT is None:
globalClientCenter.TARGET_PORT = int(MIRROR_LISTEN.split(":")[1])
else:
globalClientCenter.TARGET_PORT = int(TARGET_PORT)
globalClientCenter.logger.info("------------------! RAT Config !------------------")
globalClientCenter.logger.info("Socks4a on {}:{}".format(LISTEN_ADDR, LISTEN_PORT))
globalClientCenter.logger.info(
"Handler/LISTENER should listen on {}:{}".format(globalClientCenter.TARGET_IP, globalClientCenter.TARGET_PORT))
globalClientCenter.logger.info(
"Payload should connect to {}".format(MIRROR_LISTEN))
globalClientCenter.logger.info("------------------! RAT Config !------------------\n")
# 设置线程为守护线程
globalClientCenter.setDaemon(True)
t2 = Socks4aProxy(host=args.locallistenaddress, port=args.locallistenport, timeout=sockettimeout)
t2.setDaemon(True)
# 启动服务
globalClientCenter.start()
t2.start()
# 保持程序运行,处理结束信号
while True:
try:
time.sleep(10)
except KeyboardInterrupt as ki:
print('Caught KeyboardInterrupt, exiting')
sys.exit(1)
| 2.21875
| 2
|
project/core/renderer.py
|
MarkKoz/code-jam-3
| 1
|
12776650
|
import pygame
import pyscroll
from project.entities.player import Player
from .constants import FONTS, SCREEN_SCALE
from .world import World
class Renderer:
def __init__(self, width: int, height: int):
self.screen: pygame.Surface = None
self.surface: pygame.Surface = None
self._set_screen(width, height)
self.map_layer = None
self.group = None
def load_world(self, world: World):
w, h = self.screen.get_size()
self.map_layer = pyscroll.BufferedRenderer(world.map_data,
(w / SCREEN_SCALE, h / SCREEN_SCALE),
clamp_camera=True)
self.group = pyscroll.PyscrollGroup(map_layer=self.map_layer, default_layer=4)
self.group.add(world.player)
self.group.add(world.lemons)
self.group.add(world.juice, layer=5)
def draw(self, player: Player):
# Prevents the camera from tracking the player when moving left
camera_pos = list(player.rect.center)
camera_pos[0] = max(camera_pos[0], player.max_x)
self.group.center(camera_pos)
self.group.draw(self.surface)
def draw_score(self, score):
text = f'Lemons: {score}'
font = pygame.font.Font(FONTS['monogram'], 16)
font_surface: pygame.Surface = font.render(text, False, pygame.Color('white'))
x = self.surface.get_size()[0] - font_surface.get_width()
self.surface.blit(font_surface, (x - 4, 4))
def resize(self, width, height):
self._set_screen(width, height)
self.map_layer.set_size((width / SCREEN_SCALE, height / SCREEN_SCALE))
def _draw_debug_info(self, player: Player, col_event):
# TODO: Move somewhere else?
text = repr(player).split('\n')
if col_event:
text.extend((
f'Collision: {col_event.collision}',
f'Position: {col_event.position} (offset: {col_event.offset})',
f'Surface: {col_event.surface}'
))
font = pygame.font.Font(FONTS['monogram'], 16)
height = 0
for line in text:
font_surface: pygame.Surface = font.render(line, False, pygame.Color('white'))
bg_surface: pygame.Surface = pygame.Surface(font_surface.get_size(), pygame.SRCALPHA, 32)
bg_surface.fill((51, 51, 51, 159))
bg_surface.blit(font_surface, (0, 0))
self.surface.blit(bg_surface, (0, height))
height += font_surface.get_height()
def update(self, player: Player, score: int, debug: bool, col_event):
self.draw(player)
self.draw_score(score)
if debug:
self._draw_debug_info(player, col_event)
# Resizes the surface and sets it as the new screen.
pygame.transform.scale(self.surface, self.screen.get_size(), self.screen)
pygame.display.flip() # Updates the display.
def _set_screen(self, width, height):
"""Simple wrapper to keep the screen resizeable."""
self.screen = pygame.display.set_mode((width, height), pygame.RESIZABLE)
self.surface = pygame.Surface((width / SCREEN_SCALE, height / SCREEN_SCALE)).convert()
| 2.9375
| 3
|